diff options
-rw-r--r-- | Documentation/crypto/async-tx-api.txt | 96 | ||||
-rw-r--r-- | Documentation/dmaengine.txt | 1 | ||||
-rw-r--r-- | Documentation/kernel-parameters.txt | 46 | ||||
-rw-r--r-- | Documentation/powerpc/dts-bindings/4xx/ndfc.txt | 39 | ||||
-rw-r--r-- | arch/arm/mach-pxa/corgi.c | 54 | ||||
-rw-r--r-- | arch/arm/mach-pxa/poodle.c | 51 | ||||
-rw-r--r-- | arch/arm/mach-pxa/spitz.c | 77 | ||||
-rw-r--r-- | arch/avr32/mach-at32ap/at32ap700x.c | 15 | ||||
-rw-r--r-- | arch/ia64/include/asm/acpi-ext.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/sn/acpi.h | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi.c | 1 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/io_acpi_init.c | 103 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/io_common.c | 5 | ||||
-rw-r--r-- | arch/parisc/Makefile | 2 | ||||
-rw-r--r-- | arch/parisc/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/parisc/include/asm/byteorder.h | 77 | ||||
-rw-r--r-- | arch/parisc/include/asm/checksum.h | 2 | ||||
-rw-r--r-- | arch/parisc/include/asm/io.h | 12 | ||||
-rw-r--r-- | arch/parisc/include/asm/mmu_context.h | 13 | ||||
-rw-r--r-- | arch/parisc/include/asm/processor.h | 4 | ||||
-rw-r--r-- | arch/parisc/include/asm/swab.h | 66 | ||||
-rw-r--r-- | arch/parisc/include/asm/uaccess.h | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/drivers.c | 40 | ||||
-rw-r--r-- | arch/parisc/kernel/hpmc.S | 8 | ||||
-rw-r--r-- | arch/parisc/kernel/irq.c | 11 | ||||
-rw-r--r-- | arch/parisc/kernel/pdc_cons.c | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/perf.c | 4 | ||||
-rw-r--r-- | arch/parisc/kernel/processor.c | 68 | ||||
-rw-r--r-- | arch/parisc/kernel/setup.c | 11 | ||||
-rw-r--r-- | arch/parisc/kernel/smp.c | 32 | ||||
-rw-r--r-- | arch/parisc/kernel/time.c | 4 | ||||
-rw-r--r-- | arch/parisc/kernel/topology.c | 4 | ||||
-rw-r--r-- | arch/parisc/kernel/traps.c | 9 | ||||
-rw-r--r-- | arch/parisc/kernel/unwind.c | 2 | ||||
-rw-r--r-- | arch/parisc/lib/iomap.c | 2 | ||||
-rw-r--r-- | arch/parisc/lib/memcpy.c | 2 | ||||
-rw-r--r-- | arch/parisc/mm/fault.c | 58 | ||||
-rw-r--r-- | arch/powerpc/include/asm/cell-pmu.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/oprofile_impl.h | 6 | ||||
-rw-r--r-- | arch/powerpc/oprofile/cell/pr_util.h | 11 | ||||
-rw-r--r-- | arch/powerpc/oprofile/cell/spu_profiler.c | 56 | ||||
-rw-r--r-- | arch/powerpc/oprofile/common.c | 22 | ||||
-rw-r--r-- | arch/powerpc/oprofile/op_model_cell.c | 748 | ||||
-rw-r--r-- | arch/x86/include/asm/bitops.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/boot.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/cstate.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/sleep.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/e820.c | 21 | ||||
-rw-r--r-- | arch/x86/kernel/early-quirks.c | 22 | ||||
-rw-r--r-- | arch/x86/oprofile/op_model_amd.c | 149 | ||||
-rw-r--r-- | crypto/async_tx/async_tx.c | 350 | ||||
-rw-r--r-- | drivers/Kconfig | 2 | ||||
-rw-r--r-- | drivers/Makefile | 1 | ||||
-rw-r--r-- | drivers/acpi/Kconfig | 84 | ||||
-rw-r--r-- | drivers/acpi/Makefile | 25 | ||||
-rw-r--r-- | drivers/acpi/acpica/Makefile | 44 | ||||
-rw-r--r-- | drivers/acpi/acpica/accommon.h | 63 | ||||
-rw-r--r-- | drivers/acpi/acpica/acconfig.h (renamed from include/acpi/acconfig.h) | 8 | ||||
-rw-r--r-- | drivers/acpi/acpica/acdebug.h (renamed from include/acpi/acdebug.h) | 0 | ||||
-rw-r--r-- | drivers/acpi/acpica/acdispat.h (renamed from include/acpi/acdispat.h) | 0 | ||||
-rw-r--r-- | drivers/acpi/acpica/acevents.h (renamed from include/acpi/acevents.h) | 6 | ||||
-rw-r--r-- | drivers/acpi/acpica/acglobal.h (renamed from include/acpi/acglobal.h) | 9 | ||||
-rw-r--r-- | drivers/acpi/acpica/achware.h (renamed from include/acpi/achware.h) | 22 | ||||
-rw-r--r-- | drivers/acpi/acpica/acinterp.h (renamed from include/acpi/acinterp.h) | 0 | ||||
-rw-r--r-- | drivers/acpi/acpica/aclocal.h (renamed from include/acpi/aclocal.h) | 86 | ||||
-rw-r--r-- | drivers/acpi/acpica/acmacros.h (renamed from include/acpi/acmacros.h) | 122 | ||||
-rw-r--r-- | drivers/acpi/acpica/acnamesp.h (renamed from include/acpi/acnamesp.h) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/acobject.h (renamed from include/acpi/acobject.h) | 0 | ||||
-rw-r--r-- | drivers/acpi/acpica/acopcode.h (renamed from include/acpi/acopcode.h) | 0 | ||||
-rw-r--r-- | drivers/acpi/acpica/acparser.h (renamed from include/acpi/acparser.h) | 0 | ||||
-rw-r--r-- | drivers/acpi/acpica/acpredef.h (renamed from include/acpi/acpredef.h) | 0 | ||||
-rw-r--r-- | drivers/acpi/acpica/acresrc.h (renamed from include/acpi/acresrc.h) | 0 | ||||
-rw-r--r-- | drivers/acpi/acpica/acstruct.h (renamed from include/acpi/acstruct.h) | 0 | ||||
-rw-r--r-- | drivers/acpi/acpica/actables.h (renamed from include/acpi/actables.h) | 2 | ||||
-rw-r--r-- | drivers/acpi/acpica/acutils.h (renamed from include/acpi/acutils.h) | 36 | ||||
-rw-r--r-- | drivers/acpi/acpica/amlcode.h (renamed from include/acpi/amlcode.h) | 0 | ||||
-rw-r--r-- | drivers/acpi/acpica/amlresrc.h (renamed from include/acpi/amlresrc.h) | 0 | ||||
-rw-r--r-- | drivers/acpi/acpica/dsfield.c (renamed from drivers/acpi/dispatcher/dsfield.c) | 11 | ||||
-rw-r--r-- | drivers/acpi/acpica/dsinit.c (renamed from drivers/acpi/dispatcher/dsinit.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/dsmethod.c (renamed from drivers/acpi/dispatcher/dsmethod.c) | 14 | ||||
-rw-r--r-- | drivers/acpi/acpica/dsmthdat.c (renamed from drivers/acpi/dispatcher/dsmthdat.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/dsobject.c (renamed from drivers/acpi/dispatcher/dsobject.c) | 11 | ||||
-rw-r--r-- | drivers/acpi/acpica/dsopcode.c (renamed from drivers/acpi/dispatcher/dsopcode.c) | 66 | ||||
-rw-r--r-- | drivers/acpi/acpica/dsutils.c (renamed from drivers/acpi/dispatcher/dsutils.c) | 13 | ||||
-rw-r--r-- | drivers/acpi/acpica/dswexec.c (renamed from drivers/acpi/dispatcher/dswexec.c) | 13 | ||||
-rw-r--r-- | drivers/acpi/acpica/dswload.c (renamed from drivers/acpi/dispatcher/dswload.c) | 13 | ||||
-rw-r--r-- | drivers/acpi/acpica/dswscope.c (renamed from drivers/acpi/dispatcher/dswscope.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/dswstate.c (renamed from drivers/acpi/dispatcher/dswstate.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/evevent.c (renamed from drivers/acpi/events/evevent.c) | 17 | ||||
-rw-r--r-- | drivers/acpi/acpica/evgpe.c (renamed from drivers/acpi/events/evgpe.c) | 53 | ||||
-rw-r--r-- | drivers/acpi/acpica/evgpeblk.c (renamed from drivers/acpi/events/evgpeblk.c) | 82 | ||||
-rw-r--r-- | drivers/acpi/acpica/evmisc.c (renamed from drivers/acpi/events/evmisc.c) | 62 | ||||
-rw-r--r-- | drivers/acpi/acpica/evregion.c (renamed from drivers/acpi/events/evregion.c) | 140 | ||||
-rw-r--r-- | drivers/acpi/acpica/evrgnini.c (renamed from drivers/acpi/events/evrgnini.c) | 46 | ||||
-rw-r--r-- | drivers/acpi/acpica/evsci.c (renamed from drivers/acpi/events/evsci.c) | 13 | ||||
-rw-r--r-- | drivers/acpi/acpica/evxface.c (renamed from drivers/acpi/events/evxface.c) | 9 | ||||
-rw-r--r-- | drivers/acpi/acpica/evxfevnt.c (renamed from drivers/acpi/events/evxfevnt.c) | 170 | ||||
-rw-r--r-- | drivers/acpi/acpica/evxfregn.c (renamed from drivers/acpi/events/evxfregn.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/exconfig.c (renamed from drivers/acpi/executer/exconfig.c) | 9 | ||||
-rw-r--r-- | drivers/acpi/acpica/exconvrt.c (renamed from drivers/acpi/executer/exconvrt.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/excreate.c (renamed from drivers/acpi/executer/excreate.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/exdump.c (renamed from drivers/acpi/executer/exdump.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/exfield.c (renamed from drivers/acpi/executer/exfield.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/exfldio.c (renamed from drivers/acpi/executer/exfldio.c) | 20 | ||||
-rw-r--r-- | drivers/acpi/acpica/exmisc.c (renamed from drivers/acpi/executer/exmisc.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/exmutex.c (renamed from drivers/acpi/executer/exmutex.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/exnames.c (renamed from drivers/acpi/executer/exnames.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/exoparg1.c (renamed from drivers/acpi/executer/exoparg1.c) | 11 | ||||
-rw-r--r-- | drivers/acpi/acpica/exoparg2.c (renamed from drivers/acpi/executer/exoparg2.c) | 9 | ||||
-rw-r--r-- | drivers/acpi/acpica/exoparg3.c (renamed from drivers/acpi/executer/exoparg3.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/exoparg6.c (renamed from drivers/acpi/executer/exoparg6.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/exprep.c (renamed from drivers/acpi/executer/exprep.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/exregion.c (renamed from drivers/acpi/executer/exregion.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/exresnte.c (renamed from drivers/acpi/executer/exresnte.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/exresolv.c (renamed from drivers/acpi/executer/exresolv.c) | 9 | ||||
-rw-r--r-- | drivers/acpi/acpica/exresop.c (renamed from drivers/acpi/executer/exresop.c) | 9 | ||||
-rw-r--r-- | drivers/acpi/acpica/exstore.c (renamed from drivers/acpi/executer/exstore.c) | 9 | ||||
-rw-r--r-- | drivers/acpi/acpica/exstoren.c (renamed from drivers/acpi/executer/exstoren.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/exstorob.c (renamed from drivers/acpi/executer/exstorob.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/exsystem.c (renamed from drivers/acpi/executer/exsystem.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/exutils.c (renamed from drivers/acpi/executer/exutils.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/hwacpi.c (renamed from drivers/acpi/hardware/hwacpi.c) | 1 | ||||
-rw-r--r-- | drivers/acpi/acpica/hwgpe.c (renamed from drivers/acpi/hardware/hwgpe.c) | 78 | ||||
-rw-r--r-- | drivers/acpi/acpica/hwregs.c | 353 | ||||
-rw-r--r-- | drivers/acpi/acpica/hwsleep.c (renamed from drivers/acpi/hardware/hwsleep.c) | 76 | ||||
-rw-r--r-- | drivers/acpi/acpica/hwtimer.c (renamed from drivers/acpi/hardware/hwtimer.c) | 1 | ||||
-rw-r--r-- | drivers/acpi/acpica/hwxface.c (renamed from drivers/acpi/hardware/hwregs.c) | 744 | ||||
-rw-r--r-- | drivers/acpi/acpica/nsaccess.c (renamed from drivers/acpi/namespace/nsaccess.c) | 18 | ||||
-rw-r--r-- | drivers/acpi/acpica/nsalloc.c (renamed from drivers/acpi/namespace/nsalloc.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/nsdump.c (renamed from drivers/acpi/namespace/nsdump.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/nsdumpdv.c (renamed from drivers/acpi/namespace/nsdumpdv.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/nseval.c (renamed from drivers/acpi/namespace/nseval.c) | 77 | ||||
-rw-r--r-- | drivers/acpi/acpica/nsinit.c (renamed from drivers/acpi/namespace/nsinit.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/nsload.c (renamed from drivers/acpi/namespace/nsload.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/nsnames.c (renamed from drivers/acpi/namespace/nsnames.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/nsobject.c (renamed from drivers/acpi/namespace/nsobject.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/nsparse.c (renamed from drivers/acpi/namespace/nsparse.c) | 9 | ||||
-rw-r--r-- | drivers/acpi/acpica/nspredef.c (renamed from drivers/acpi/namespace/nspredef.c) | 261 | ||||
-rw-r--r-- | drivers/acpi/acpica/nssearch.c (renamed from drivers/acpi/namespace/nssearch.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/nsutils.c (renamed from drivers/acpi/namespace/nsutils.c) | 15 | ||||
-rw-r--r-- | drivers/acpi/acpica/nswalk.c (renamed from drivers/acpi/namespace/nswalk.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/nsxfeval.c (renamed from drivers/acpi/namespace/nsxfeval.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/nsxfname.c (renamed from drivers/acpi/namespace/nsxfname.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/nsxfobj.c (renamed from drivers/acpi/namespace/nsxfobj.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/psargs.c (renamed from drivers/acpi/parser/psargs.c) | 9 | ||||
-rw-r--r-- | drivers/acpi/acpica/psloop.c (renamed from drivers/acpi/parser/psloop.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/psopcode.c (renamed from drivers/acpi/parser/psopcode.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/psparse.c (renamed from drivers/acpi/parser/psparse.c) | 23 | ||||
-rw-r--r-- | drivers/acpi/acpica/psscope.c (renamed from drivers/acpi/parser/psscope.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/pstree.c (renamed from drivers/acpi/parser/pstree.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/psutils.c (renamed from drivers/acpi/parser/psutils.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/pswalk.c (renamed from drivers/acpi/parser/pswalk.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/psxface.c (renamed from drivers/acpi/parser/psxface.c) | 40 | ||||
-rw-r--r-- | drivers/acpi/acpica/rsaddr.c (renamed from drivers/acpi/resources/rsaddr.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/rscalc.c (renamed from drivers/acpi/resources/rscalc.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/rscreate.c (renamed from drivers/acpi/resources/rscreate.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/rsdump.c (renamed from drivers/acpi/resources/rsdump.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/rsinfo.c (renamed from drivers/acpi/resources/rsinfo.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/rsio.c (renamed from drivers/acpi/resources/rsio.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/rsirq.c (renamed from drivers/acpi/resources/rsirq.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/rslist.c (renamed from drivers/acpi/resources/rslist.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/rsmemory.c (renamed from drivers/acpi/resources/rsmemory.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/rsmisc.c (renamed from drivers/acpi/resources/rsmisc.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/rsutils.c (renamed from drivers/acpi/resources/rsutils.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/rsxface.c (renamed from drivers/acpi/resources/rsxface.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/tbfadt.c (renamed from drivers/acpi/tables/tbfadt.c) | 252 | ||||
-rw-r--r-- | drivers/acpi/acpica/tbfind.c (renamed from drivers/acpi/tables/tbfind.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/tbinstal.c (renamed from drivers/acpi/tables/tbinstal.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/tbutils.c (renamed from drivers/acpi/tables/tbutils.c) | 30 | ||||
-rw-r--r-- | drivers/acpi/acpica/tbxface.c (renamed from drivers/acpi/tables/tbxface.c) | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/tbxfroot.c (renamed from drivers/acpi/tables/tbxfroot.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/utalloc.c (renamed from drivers/acpi/utilities/utalloc.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/utcopy.c (renamed from drivers/acpi/utilities/utcopy.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/utdebug.c (renamed from drivers/acpi/utilities/utdebug.c) | 95 | ||||
-rw-r--r-- | drivers/acpi/acpica/utdelete.c (renamed from drivers/acpi/utilities/utdelete.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/uteval.c (renamed from drivers/acpi/utilities/uteval.c) | 11 | ||||
-rw-r--r-- | drivers/acpi/acpica/utglobal.c (renamed from drivers/acpi/utilities/utglobal.c) | 12 | ||||
-rw-r--r-- | drivers/acpi/acpica/utinit.c (renamed from drivers/acpi/utilities/utinit.c) | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/utmath.c (renamed from drivers/acpi/utilities/utmath.c) | 1 | ||||
-rw-r--r-- | drivers/acpi/acpica/utmisc.c (renamed from drivers/acpi/utilities/utmisc.c) | 23 | ||||
-rw-r--r-- | drivers/acpi/acpica/utmutex.c (renamed from drivers/acpi/utilities/utmutex.c) | 1 | ||||
-rw-r--r-- | drivers/acpi/acpica/utobject.c (renamed from drivers/acpi/utilities/utobject.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/utresrc.c (renamed from drivers/acpi/utilities/utresrc.c) | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/utstate.c (renamed from drivers/acpi/utilities/utstate.c) | 1 | ||||
-rw-r--r-- | drivers/acpi/acpica/utxface.c (renamed from drivers/acpi/utilities/utxface.c) | 18 | ||||
-rw-r--r-- | drivers/acpi/battery.c | 5 | ||||
-rw-r--r-- | drivers/acpi/cm_sbs.c | 3 | ||||
-rw-r--r-- | drivers/acpi/debug.c | 1 | ||||
-rw-r--r-- | drivers/acpi/dispatcher/Makefile | 9 | ||||
-rw-r--r-- | drivers/acpi/ec.c | 57 | ||||
-rw-r--r-- | drivers/acpi/events/Makefile | 9 | ||||
-rw-r--r-- | drivers/acpi/executer/Makefile | 10 | ||||
-rw-r--r-- | drivers/acpi/hardware/Makefile | 9 | ||||
-rw-r--r-- | drivers/acpi/main.c (renamed from drivers/acpi/sleep/main.c) | 79 | ||||
-rw-r--r-- | drivers/acpi/namespace/Makefile | 12 | ||||
-rw-r--r-- | drivers/acpi/numa.c | 1 | ||||
-rw-r--r-- | drivers/acpi/osl.c | 4 | ||||
-rw-r--r-- | drivers/acpi/parser/Makefile | 8 | ||||
-rw-r--r-- | drivers/acpi/pci_bind.c | 90 | ||||
-rw-r--r-- | drivers/acpi/pci_irq.c | 472 | ||||
-rw-r--r-- | drivers/acpi/pci_link.c | 6 | ||||
-rw-r--r-- | drivers/acpi/power.c | 6 | ||||
-rw-r--r-- | drivers/acpi/proc.c (renamed from drivers/acpi/sleep/proc.c) | 65 | ||||
-rw-r--r-- | drivers/acpi/reboot.c | 2 | ||||
-rw-r--r-- | drivers/acpi/resources/Makefile | 10 | ||||
-rw-r--r-- | drivers/acpi/sbshc.c | 1 | ||||
-rw-r--r-- | drivers/acpi/scan.c | 1 | ||||
-rw-r--r-- | drivers/acpi/sleep.h (renamed from drivers/acpi/sleep/sleep.h) | 0 | ||||
-rw-r--r-- | drivers/acpi/sleep/Makefile | 5 | ||||
-rw-r--r-- | drivers/acpi/system.c | 63 | ||||
-rw-r--r-- | drivers/acpi/tables/Makefile | 7 | ||||
-rw-r--r-- | drivers/acpi/utilities/Makefile | 9 | ||||
-rw-r--r-- | drivers/acpi/utilities/utcache.c | 314 | ||||
-rw-r--r-- | drivers/acpi/video.c | 20 | ||||
-rw-r--r-- | drivers/acpi/video_detect.c | 4 | ||||
-rw-r--r-- | drivers/acpi/wakeup.c (renamed from drivers/acpi/sleep/wakeup.c) | 6 | ||||
-rw-r--r-- | drivers/ata/libata-acpi.c | 6 | ||||
-rw-r--r-- | drivers/ata/libata-core.c | 16 | ||||
-rw-r--r-- | drivers/ata/libata-sff.c | 24 | ||||
-rw-r--r-- | drivers/ata/pata_acpi.c | 6 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_bios.c | 2 | ||||
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 10 | ||||
-rw-r--r-- | drivers/dca/dca-core.c | 2 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 2 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 778 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 129 | ||||
-rw-r--r-- | drivers/dma/dw_dmac.c | 119 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 5 | ||||
-rw-r--r-- | drivers/dma/ioat.c | 92 | ||||
-rw-r--r-- | drivers/dma/ioat_dma.c | 18 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 30 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 11 | ||||
-rw-r--r-- | drivers/ide/ide-acpi.c | 6 | ||||
-rw-r--r-- | drivers/misc/Kconfig | 284 | ||||
-rw-r--r-- | drivers/misc/Makefile | 13 | ||||
-rw-r--r-- | drivers/mmc/host/atmel-mci.c | 103 | ||||
-rw-r--r-- | drivers/mtd/Kconfig | 10 | ||||
-rw-r--r-- | drivers/mtd/Makefile | 2 | ||||
-rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0001.c | 12 | ||||
-rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0002.c | 18 | ||||
-rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0020.c | 14 | ||||
-rw-r--r-- | drivers/mtd/chips/fwh_lock.h | 4 | ||||
-rw-r--r-- | drivers/mtd/devices/lart.c | 6 | ||||
-rw-r--r-- | drivers/mtd/devices/m25p80.c | 41 | ||||
-rw-r--r-- | drivers/mtd/devices/mtd_dataflash.c | 24 | ||||
-rw-r--r-- | drivers/mtd/ftl.c | 100 | ||||
-rw-r--r-- | drivers/mtd/inftlcore.c | 2 | ||||
-rw-r--r-- | drivers/mtd/inftlmount.c | 4 | ||||
-rw-r--r-- | drivers/mtd/lpddr/Kconfig | 22 | ||||
-rw-r--r-- | drivers/mtd/lpddr/Makefile | 6 | ||||
-rw-r--r-- | drivers/mtd/lpddr/lpddr_cmds.c | 796 | ||||
-rw-r--r-- | drivers/mtd/lpddr/qinfo_probe.c | 255 | ||||
-rw-r--r-- | drivers/mtd/maps/Kconfig | 21 | ||||
-rw-r--r-- | drivers/mtd/maps/alchemy-flash.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/amd76xrom.c | 4 | ||||
-rw-r--r-- | drivers/mtd/maps/cfi_flagadm.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/ck804xrom.c | 4 | ||||
-rw-r--r-- | drivers/mtd/maps/dbox2-flash.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/edb7312.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/esb2rom.c | 4 | ||||
-rw-r--r-- | drivers/mtd/maps/fortunet.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/h720x-flash.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/ichxrom.c | 4 | ||||
-rw-r--r-- | drivers/mtd/maps/impa7.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/ipaq-flash.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/mbx860.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/nettel.c | 9 | ||||
-rw-r--r-- | drivers/mtd/maps/octagon-5066.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/physmap.c | 41 | ||||
-rw-r--r-- | drivers/mtd/maps/pmcmsp-flash.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/redwood.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/rpxlite.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/sbc8240.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/scb2_flash.c | 8 | ||||
-rw-r--r-- | drivers/mtd/maps/sharpsl-flash.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/tqm8xxl.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/uclinux.c | 4 | ||||
-rw-r--r-- | drivers/mtd/maps/vmax301.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/wr_sbc82xx_flash.c | 2 | ||||
-rw-r--r-- | drivers/mtd/mtdchar.c | 6 | ||||
-rw-r--r-- | drivers/mtd/mtdconcat.c | 35 | ||||
-rw-r--r-- | drivers/mtd/mtdcore.c | 16 | ||||
-rw-r--r-- | drivers/mtd/mtdoops.c | 9 | ||||
-rw-r--r-- | drivers/mtd/mtdpart.c | 34 | ||||
-rw-r--r-- | drivers/mtd/nand/Kconfig | 7 | ||||
-rw-r--r-- | drivers/mtd/nand/alauda.c | 6 | ||||
-rw-r--r-- | drivers/mtd/nand/cafe_nand.c | 7 | ||||
-rw-r--r-- | drivers/mtd/nand/fsl_elbc_nand.c | 4 | ||||
-rw-r--r-- | drivers/mtd/nand/nand_base.c | 25 | ||||
-rw-r--r-- | drivers/mtd/nand/nand_bbt.c | 31 | ||||
-rw-r--r-- | drivers/mtd/nand/nandsim.c | 339 | ||||
-rw-r--r-- | drivers/mtd/nand/ndfc.c | 269 | ||||
-rw-r--r-- | drivers/mtd/nand/pxa3xx_nand.c | 6 | ||||
-rw-r--r-- | drivers/mtd/nand/sharpsl.c | 247 | ||||
-rw-r--r-- | drivers/mtd/nftlcore.c | 2 | ||||
-rw-r--r-- | drivers/mtd/nftlmount.c | 4 | ||||
-rw-r--r-- | drivers/mtd/onenand/onenand_base.c | 8 | ||||
-rw-r--r-- | drivers/mtd/rfd_ftl.c | 29 | ||||
-rw-r--r-- | drivers/mtd/ssfdc.c | 7 | ||||
-rw-r--r-- | drivers/mtd/tests/Makefile | 7 | ||||
-rw-r--r-- | drivers/mtd/tests/mtd_oobtest.c | 742 | ||||
-rw-r--r-- | drivers/mtd/tests/mtd_pagetest.c | 632 | ||||
-rw-r--r-- | drivers/mtd/tests/mtd_readtest.c | 253 | ||||
-rw-r--r-- | drivers/mtd/tests/mtd_speedtest.c | 502 | ||||
-rw-r--r-- | drivers/mtd/tests/mtd_stresstest.c | 330 | ||||
-rw-r--r-- | drivers/mtd/tests/mtd_subpagetest.c | 525 | ||||
-rw-r--r-- | drivers/mtd/tests/mtd_torturetest.c | 530 | ||||
-rw-r--r-- | drivers/mtd/ubi/build.c | 2 | ||||
-rw-r--r-- | drivers/mtd/ubi/gluebi.c | 17 | ||||
-rw-r--r-- | drivers/oprofile/buffer_sync.c | 188 | ||||
-rw-r--r-- | drivers/oprofile/cpu_buffer.c | 316 | ||||
-rw-r--r-- | drivers/oprofile/cpu_buffer.h | 89 | ||||
-rw-r--r-- | drivers/oprofile/event_buffer.c | 4 | ||||
-rw-r--r-- | drivers/oprofile/oprof.c | 4 | ||||
-rw-r--r-- | drivers/oprofile/oprof.h | 8 | ||||
-rw-r--r-- | drivers/oprofile/oprofile_files.c | 27 | ||||
-rw-r--r-- | drivers/parisc/asp.c | 3 | ||||
-rw-r--r-- | drivers/parisc/ccio-dma.c | 4 | ||||
-rw-r--r-- | drivers/parisc/dino.c | 4 | ||||
-rw-r--r-- | drivers/parisc/hppb.c | 2 | ||||
-rw-r--r-- | drivers/parisc/lasi.c | 5 | ||||
-rw-r--r-- | drivers/parisc/lba_pci.c | 2 | ||||
-rw-r--r-- | drivers/parisc/sba_iommu.c | 9 | ||||
-rw-r--r-- | drivers/parisc/wax.c | 3 | ||||
-rw-r--r-- | drivers/pci/hotplug/acpi_pcihp.c | 1 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp.h | 1 | ||||
-rw-r--r-- | drivers/pci/pci-acpi.c | 2 | ||||
-rw-r--r-- | drivers/platform/Kconfig | 5 | ||||
-rw-r--r-- | drivers/platform/Makefile | 5 | ||||
-rw-r--r-- | drivers/platform/x86/Kconfig | 375 | ||||
-rw-r--r-- | drivers/platform/x86/Makefile | 19 | ||||
-rw-r--r-- | drivers/platform/x86/acer-wmi.c (renamed from drivers/misc/acer-wmi.c) | 0 | ||||
-rw-r--r-- | drivers/platform/x86/asus-laptop.c (renamed from drivers/misc/asus-laptop.c) | 0 | ||||
-rw-r--r-- | drivers/platform/x86/asus_acpi.c (renamed from drivers/acpi/asus_acpi.c) | 0 | ||||
-rw-r--r-- | drivers/platform/x86/compal-laptop.c (renamed from drivers/misc/compal-laptop.c) | 0 | ||||
-rw-r--r-- | drivers/platform/x86/eeepc-laptop.c (renamed from drivers/misc/eeepc-laptop.c) | 0 | ||||
-rw-r--r-- | drivers/platform/x86/fujitsu-laptop.c (renamed from drivers/misc/fujitsu-laptop.c) | 419 | ||||
-rw-r--r-- | drivers/platform/x86/hp-wmi.c (renamed from drivers/misc/hp-wmi.c) | 0 | ||||
-rw-r--r-- | drivers/platform/x86/intel_menlow.c (renamed from drivers/misc/intel_menlow.c) | 0 | ||||
-rw-r--r-- | drivers/platform/x86/msi-laptop.c (renamed from drivers/misc/msi-laptop.c) | 0 | ||||
-rw-r--r-- | drivers/platform/x86/panasonic-laptop.c (renamed from drivers/misc/panasonic-laptop.c) | 22 | ||||
-rw-r--r-- | drivers/platform/x86/sony-laptop.c (renamed from drivers/misc/sony-laptop.c) | 15 | ||||
-rw-r--r-- | drivers/platform/x86/tc1100-wmi.c (renamed from drivers/misc/tc1100-wmi.c) | 1 | ||||
-rw-r--r-- | drivers/platform/x86/thinkpad_acpi.c (renamed from drivers/misc/thinkpad_acpi.c) | 1 | ||||
-rw-r--r-- | drivers/platform/x86/toshiba_acpi.c (renamed from drivers/acpi/toshiba_acpi.c) | 0 | ||||
-rw-r--r-- | drivers/platform/x86/wmi.c (renamed from drivers/acpi/wmi.c) | 0 | ||||
-rw-r--r-- | drivers/pnp/pnpacpi/core.c | 1 | ||||
-rw-r--r-- | drivers/rtc/rtc-parisc.c | 3 | ||||
-rw-r--r-- | fs/jffs2/compr_rubin.c | 120 | ||||
-rw-r--r-- | fs/jffs2/erase.c | 5 | ||||
-rw-r--r-- | include/acpi/acdisasm.h | 445 | ||||
-rw-r--r-- | include/acpi/acexcep.h | 6 | ||||
-rw-r--r-- | include/acpi/acoutput.h | 103 | ||||
-rw-r--r-- | include/acpi/acpi.h | 31 | ||||
-rw-r--r-- | include/acpi/acpiosxf.h | 13 | ||||
-rw-r--r-- | include/acpi/acpixf.h | 100 | ||||
-rw-r--r-- | include/acpi/acrestyp.h | 405 | ||||
-rw-r--r-- | include/acpi/actbl.h | 25 | ||||
-rw-r--r-- | include/acpi/actbl1.h | 2 | ||||
-rw-r--r-- | include/acpi/actypes.h | 557 | ||||
-rw-r--r-- | include/acpi/platform/acenv.h | 45 | ||||
-rw-r--r-- | include/acpi/platform/aclinux.h | 4 | ||||
-rw-r--r-- | include/linux/acpi.h | 17 | ||||
-rw-r--r-- | include/linux/async_tx.h | 17 | ||||
-rw-r--r-- | include/linux/atmel-mci.h | 6 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 181 | ||||
-rw-r--r-- | include/linux/dw_dmac.h | 31 | ||||
-rw-r--r-- | include/linux/mtd/cfi.h | 1 | ||||
-rw-r--r-- | include/linux/mtd/ftl.h | 38 | ||||
-rw-r--r-- | include/linux/mtd/map.h | 1 | ||||
-rw-r--r-- | include/linux/mtd/mtd.h | 75 | ||||
-rw-r--r-- | include/linux/mtd/nand.h | 7 | ||||
-rw-r--r-- | include/linux/mtd/partitions.h | 6 | ||||
-rw-r--r-- | include/linux/mtd/pfow.h | 159 | ||||
-rw-r--r-- | include/linux/mtd/physmap.h | 1 | ||||
-rw-r--r-- | include/linux/mtd/qinfo.h | 91 | ||||
-rw-r--r-- | include/linux/mtd/sharpsl.h | 20 | ||||
-rw-r--r-- | include/linux/netdevice.h | 3 | ||||
-rw-r--r-- | include/linux/oprofile.h | 18 | ||||
-rw-r--r-- | include/linux/pci_hotplug.h | 1 | ||||
-rw-r--r-- | include/linux/suspend.h | 13 | ||||
-rw-r--r-- | include/net/netdma.h | 11 | ||||
-rw-r--r-- | kernel/cred.c | 2 | ||||
-rw-r--r-- | kernel/power/disk.c | 6 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 370 | ||||
-rw-r--r-- | kernel/power/swsusp.c | 122 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 8 | ||||
-rw-r--r-- | net/core/dev.c | 149 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 5 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 2 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 2 |
392 files changed, 12961 insertions, 6922 deletions
diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt index c1e9545c59bd..9f59fcbf5d82 100644 --- a/Documentation/crypto/async-tx-api.txt +++ b/Documentation/crypto/async-tx-api.txt | |||
@@ -13,9 +13,9 @@ | |||
13 | 3.6 Constraints | 13 | 3.6 Constraints |
14 | 3.7 Example | 14 | 3.7 Example |
15 | 15 | ||
16 | 4 DRIVER DEVELOPER NOTES | 16 | 4 DMAENGINE DRIVER DEVELOPER NOTES |
17 | 4.1 Conformance points | 17 | 4.1 Conformance points |
18 | 4.2 "My application needs finer control of hardware channels" | 18 | 4.2 "My application needs exclusive control of hardware channels" |
19 | 19 | ||
20 | 5 SOURCE | 20 | 5 SOURCE |
21 | 21 | ||
@@ -150,6 +150,7 @@ ops_run_* and ops_complete_* routines in drivers/md/raid5.c for more | |||
150 | implementation examples. | 150 | implementation examples. |
151 | 151 | ||
152 | 4 DRIVER DEVELOPMENT NOTES | 152 | 4 DRIVER DEVELOPMENT NOTES |
153 | |||
153 | 4.1 Conformance points: | 154 | 4.1 Conformance points: |
154 | There are a few conformance points required in dmaengine drivers to | 155 | There are a few conformance points required in dmaengine drivers to |
155 | accommodate assumptions made by applications using the async_tx API: | 156 | accommodate assumptions made by applications using the async_tx API: |
@@ -158,58 +159,49 @@ accommodate assumptions made by applications using the async_tx API: | |||
158 | 3/ Use async_tx_run_dependencies() in the descriptor clean up path to | 159 | 3/ Use async_tx_run_dependencies() in the descriptor clean up path to |
159 | handle submission of dependent operations | 160 | handle submission of dependent operations |
160 | 161 | ||
161 | 4.2 "My application needs finer control of hardware channels" | 162 | 4.2 "My application needs exclusive control of hardware channels" |
162 | This requirement seems to arise from cases where a DMA engine driver is | 163 | Primarily this requirement arises from cases where a DMA engine driver |
163 | trying to support device-to-memory DMA. The dmaengine and async_tx | 164 | is being used to support device-to-memory operations. A channel that is |
164 | implementations were designed for offloading memory-to-memory | 165 | performing these operations cannot, for many platform specific reasons, |
165 | operations; however, there are some capabilities of the dmaengine layer | 166 | be shared. For these cases the dma_request_channel() interface is |
166 | that can be used for platform-specific channel management. | 167 | provided. |
167 | Platform-specific constraints can be handled by registering the | 168 | |
168 | application as a 'dma_client' and implementing a 'dma_event_callback' to | 169 | The interface is: |
169 | apply a filter to the available channels in the system. Before showing | 170 | struct dma_chan *dma_request_channel(dma_cap_mask_t mask, |
170 | how to implement a custom dma_event callback some background of | 171 | dma_filter_fn filter_fn, |
171 | dmaengine's client support is required. | 172 | void *filter_param); |
172 | 173 | ||
173 | The following routines in dmaengine support multiple clients requesting | 174 | Where dma_filter_fn is defined as: |
174 | use of a channel: | 175 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); |
175 | - dma_async_client_register(struct dma_client *client) | 176 | |
176 | - dma_async_client_chan_request(struct dma_client *client) | 177 | When the optional 'filter_fn' parameter is set to NULL |
177 | 178 | dma_request_channel simply returns the first channel that satisfies the | |
178 | dma_async_client_register takes a pointer to an initialized dma_client | 179 | capability mask. Otherwise, when the mask parameter is insufficient for |
179 | structure. It expects that the 'event_callback' and 'cap_mask' fields | 180 | specifying the necessary channel, the filter_fn routine can be used to |
180 | are already initialized. | 181 | disposition the available channels in the system. The filter_fn routine |
181 | 182 | is called once for each free channel in the system. Upon seeing a | |
182 | dma_async_client_chan_request triggers dmaengine to notify the client of | 183 | suitable channel filter_fn returns DMA_ACK which flags that channel to |
183 | all channels that satisfy the capability mask. It is up to the client's | 184 | be the return value from dma_request_channel. A channel allocated via |
184 | event_callback routine to track how many channels the client needs and | 185 | this interface is exclusive to the caller, until dma_release_channel() |
185 | how many it is currently using. The dma_event_callback routine returns a | 186 | is called. |
186 | dma_state_client code to let dmaengine know the status of the | 187 | |
187 | allocation. | 188 | The DMA_PRIVATE capability flag is used to tag dma devices that should |
188 | 189 | not be used by the general-purpose allocator. It can be set at | |
189 | Below is the example of how to extend this functionality for | 190 | initialization time if it is known that a channel will always be |
190 | platform-specific filtering of the available channels beyond the | 191 | private. Alternatively, it is set when dma_request_channel() finds an |
191 | standard capability mask: | 192 | unused "public" channel. |
192 | 193 | ||
193 | static enum dma_state_client | 194 | A couple caveats to note when implementing a driver and consumer: |
194 | my_dma_client_callback(struct dma_client *client, | 195 | 1/ Once a channel has been privately allocated it will no longer be |
195 | struct dma_chan *chan, enum dma_state state) | 196 | considered by the general-purpose allocator even after a call to |
196 | { | 197 | dma_release_channel(). |
197 | struct dma_device *dma_dev; | 198 | 2/ Since capabilities are specified at the device level a dma_device |
198 | struct my_platform_specific_dma *plat_dma_dev; | 199 | with multiple channels will either have all channels public, or all |
199 | 200 | channels private. | |
200 | dma_dev = chan->device; | ||
201 | plat_dma_dev = container_of(dma_dev, | ||
202 | struct my_platform_specific_dma, | ||
203 | dma_dev); | ||
204 | |||
205 | if (!plat_dma_dev->platform_specific_capability) | ||
206 | return DMA_DUP; | ||
207 | |||
208 | . . . | ||
209 | } | ||
210 | 201 | ||
211 | 5 SOURCE | 202 | 5 SOURCE |
212 | include/linux/dmaengine.h: core header file for DMA drivers and clients | 203 | |
204 | include/linux/dmaengine.h: core header file for DMA drivers and api users | ||
213 | drivers/dma/dmaengine.c: offload engine channel management routines | 205 | drivers/dma/dmaengine.c: offload engine channel management routines |
214 | drivers/dma/: location for offload engine drivers | 206 | drivers/dma/: location for offload engine drivers |
215 | include/linux/async_tx.h: core header file for the async_tx api | 207 | include/linux/async_tx.h: core header file for the async_tx api |
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt new file mode 100644 index 000000000000..0c1c2f63c0a9 --- /dev/null +++ b/Documentation/dmaengine.txt | |||
@@ -0,0 +1 @@ | |||
See Documentation/crypto/async-tx-api.txt | |||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index fb849020aea9..fcc48bf722a8 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -141,6 +141,7 @@ and is between 256 and 4096 characters. It is defined in the file | |||
141 | ht -- run only enough ACPI to enable Hyper Threading | 141 | ht -- run only enough ACPI to enable Hyper Threading |
142 | strict -- Be less tolerant of platforms that are not | 142 | strict -- Be less tolerant of platforms that are not |
143 | strictly ACPI specification compliant. | 143 | strictly ACPI specification compliant. |
144 | rsdt -- prefer RSDT over (default) XSDT | ||
144 | 145 | ||
145 | See also Documentation/power/pm.txt, pci=noacpi | 146 | See also Documentation/power/pm.txt, pci=noacpi |
146 | 147 | ||
@@ -151,16 +152,20 @@ and is between 256 and 4096 characters. It is defined in the file | |||
151 | default: 0 | 152 | default: 0 |
152 | 153 | ||
153 | acpi_sleep= [HW,ACPI] Sleep options | 154 | acpi_sleep= [HW,ACPI] Sleep options |
154 | Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig, old_ordering } | 155 | Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig, |
155 | See Documentation/power/video.txt for s3_bios and s3_mode. | 156 | old_ordering, s4_nonvs } |
157 | See Documentation/power/video.txt for information on | ||
158 | s3_bios and s3_mode. | ||
156 | s3_beep is for debugging; it makes the PC's speaker beep | 159 | s3_beep is for debugging; it makes the PC's speaker beep |
157 | as soon as the kernel's real-mode entry point is called. | 160 | as soon as the kernel's real-mode entry point is called. |
158 | s4_nohwsig prevents ACPI hardware signature from being | 161 | s4_nohwsig prevents ACPI hardware signature from being |
159 | used during resume from hibernation. | 162 | used during resume from hibernation. |
160 | old_ordering causes the ACPI 1.0 ordering of the _PTS | 163 | old_ordering causes the ACPI 1.0 ordering of the _PTS |
161 | control method, wrt putting devices into low power | 164 | control method, with respect to putting devices into |
162 | states, to be enforced (the ACPI 2.0 ordering of _PTS is | 165 | low power states, to be enforced (the ACPI 2.0 ordering |
163 | used by default). | 166 | of _PTS is used by default). |
167 | s4_nonvs prevents the kernel from saving/restoring the | ||
168 | ACPI NVS memory during hibernation. | ||
164 | 169 | ||
165 | acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode | 170 | acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode |
166 | Format: { level | edge | high | low } | 171 | Format: { level | edge | high | low } |
@@ -195,7 +200,7 @@ and is between 256 and 4096 characters. It is defined in the file | |||
195 | acpi_skip_timer_override [HW,ACPI] | 200 | acpi_skip_timer_override [HW,ACPI] |
196 | Recognize and ignore IRQ0/pin2 Interrupt Override. | 201 | Recognize and ignore IRQ0/pin2 Interrupt Override. |
197 | For broken nForce2 BIOS resulting in XT-PIC timer. | 202 | For broken nForce2 BIOS resulting in XT-PIC timer. |
198 | acpi_use_timer_override [HW,ACPI} | 203 | acpi_use_timer_override [HW,ACPI] |
199 | Use timer override. For some broken Nvidia NF5 boards | 204 | Use timer override. For some broken Nvidia NF5 boards |
200 | that require a timer override, but don't have | 205 | that require a timer override, but don't have |
201 | HPET | 206 | HPET |
@@ -878,17 +883,19 @@ and is between 256 and 4096 characters. It is defined in the file | |||
878 | See Documentation/ide/ide.txt. | 883 | See Documentation/ide/ide.txt. |
879 | 884 | ||
880 | idle= [X86] | 885 | idle= [X86] |
881 | Format: idle=poll or idle=mwait, idle=halt, idle=nomwait | 886 | Format: idle=poll, idle=mwait, idle=halt, idle=nomwait |
882 | Poll forces a polling idle loop that can slightly improves the performance | 887 | Poll forces a polling idle loop that can slightly |
883 | of waking up a idle CPU, but will use a lot of power and make the system | 888 | improve the performance of waking up a idle CPU, but |
884 | run hot. Not recommended. | 889 | will use a lot of power and make the system run hot. |
885 | idle=mwait. On systems which support MONITOR/MWAIT but the kernel chose | 890 | Not recommended. |
886 | to not use it because it doesn't save as much power as a normal idle | 891 | idle=mwait: On systems which support MONITOR/MWAIT but |
887 | loop use the MONITOR/MWAIT idle loop anyways. Performance should be the same | 892 | the kernel chose to not use it because it doesn't save |
888 | as idle=poll. | 893 | as much power as a normal idle loop, use the |
889 | idle=halt. Halt is forced to be used for CPU idle. | 894 | MONITOR/MWAIT idle loop anyways. Performance should be |
895 | the same as idle=poll. | ||
896 | idle=halt: Halt is forced to be used for CPU idle. | ||
890 | In such case C2/C3 won't be used again. | 897 | In such case C2/C3 won't be used again. |
891 | idle=nomwait. Disable mwait for CPU C-states | 898 | idle=nomwait: Disable mwait for CPU C-states |
892 | 899 | ||
893 | ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem | 900 | ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem |
894 | Claim all unknown PCI IDE storage controllers. | 901 | Claim all unknown PCI IDE storage controllers. |
@@ -1074,8 +1081,8 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1074 | lapic [X86-32,APIC] Enable the local APIC even if BIOS | 1081 | lapic [X86-32,APIC] Enable the local APIC even if BIOS |
1075 | disabled it. | 1082 | disabled it. |
1076 | 1083 | ||
1077 | lapic_timer_c2_ok [X86-32,x86-64,APIC] trust the local apic timer in | 1084 | lapic_timer_c2_ok [X86-32,x86-64,APIC] trust the local apic timer |
1078 | C2 power state. | 1085 | in C2 power state. |
1079 | 1086 | ||
1080 | libata.dma= [LIBATA] DMA control | 1087 | libata.dma= [LIBATA] DMA control |
1081 | libata.dma=0 Disable all PATA and SATA DMA | 1088 | libata.dma=0 Disable all PATA and SATA DMA |
@@ -2303,7 +2310,8 @@ and is between 256 and 4096 characters. It is defined in the file | |||
2303 | 2310 | ||
2304 | thermal.psv= [HW,ACPI] | 2311 | thermal.psv= [HW,ACPI] |
2305 | -1: disable all passive trip points | 2312 | -1: disable all passive trip points |
2306 | <degrees C>: override all passive trip points to this value | 2313 | <degrees C>: override all passive trip points to this |
2314 | value | ||
2307 | 2315 | ||
2308 | thermal.tzp= [HW,ACPI] | 2316 | thermal.tzp= [HW,ACPI] |
2309 | Specify global default ACPI thermal zone polling rate | 2317 | Specify global default ACPI thermal zone polling rate |
diff --git a/Documentation/powerpc/dts-bindings/4xx/ndfc.txt b/Documentation/powerpc/dts-bindings/4xx/ndfc.txt new file mode 100644 index 000000000000..869f0b5f16e8 --- /dev/null +++ b/Documentation/powerpc/dts-bindings/4xx/ndfc.txt | |||
@@ -0,0 +1,39 @@ | |||
1 | AMCC NDFC (NanD Flash Controller) | ||
2 | |||
3 | Required properties: | ||
4 | - compatible : "ibm,ndfc". | ||
5 | - reg : should specify chip select and size used for the chip (0x2000). | ||
6 | |||
7 | Optional properties: | ||
8 | - ccr : NDFC config and control register value (default 0). | ||
9 | - bank-settings : NDFC bank configuration register value (default 0). | ||
10 | |||
11 | Notes: | ||
12 | - partition(s) - follows the OF MTD standard for partitions | ||
13 | |||
14 | Example: | ||
15 | |||
16 | ndfc@1,0 { | ||
17 | compatible = "ibm,ndfc"; | ||
18 | reg = <0x00000001 0x00000000 0x00002000>; | ||
19 | ccr = <0x00001000>; | ||
20 | bank-settings = <0x80002222>; | ||
21 | #address-cells = <1>; | ||
22 | #size-cells = <1>; | ||
23 | |||
24 | nand { | ||
25 | #address-cells = <1>; | ||
26 | #size-cells = <1>; | ||
27 | |||
28 | partition@0 { | ||
29 | label = "kernel"; | ||
30 | reg = <0x00000000 0x00200000>; | ||
31 | }; | ||
32 | partition@200000 { | ||
33 | label = "root"; | ||
34 | reg = <0x00200000 0x03E00000>; | ||
35 | }; | ||
36 | }; | ||
37 | }; | ||
38 | |||
39 | |||
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index c5e28a46b292..a8d91b6c136b 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/spi/spi.h> | 27 | #include <linux/spi/spi.h> |
28 | #include <linux/spi/ads7846.h> | 28 | #include <linux/spi/ads7846.h> |
29 | #include <linux/spi/corgi_lcd.h> | 29 | #include <linux/spi/corgi_lcd.h> |
30 | #include <linux/mtd/sharpsl.h> | ||
30 | #include <video/w100fb.h> | 31 | #include <video/w100fb.h> |
31 | 32 | ||
32 | #include <asm/setup.h> | 33 | #include <asm/setup.h> |
@@ -542,6 +543,55 @@ err_free_1: | |||
542 | static inline void corgi_init_spi(void) {} | 543 | static inline void corgi_init_spi(void) {} |
543 | #endif | 544 | #endif |
544 | 545 | ||
546 | static struct mtd_partition sharpsl_nand_partitions[] = { | ||
547 | { | ||
548 | .name = "System Area", | ||
549 | .offset = 0, | ||
550 | .size = 7 * 1024 * 1024, | ||
551 | }, | ||
552 | { | ||
553 | .name = "Root Filesystem", | ||
554 | .offset = 7 * 1024 * 1024, | ||
555 | .size = 25 * 1024 * 1024, | ||
556 | }, | ||
557 | { | ||
558 | .name = "Home Filesystem", | ||
559 | .offset = MTDPART_OFS_APPEND, | ||
560 | .size = MTDPART_SIZ_FULL, | ||
561 | }, | ||
562 | }; | ||
563 | |||
564 | static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; | ||
565 | |||
566 | static struct nand_bbt_descr sharpsl_bbt = { | ||
567 | .options = 0, | ||
568 | .offs = 4, | ||
569 | .len = 2, | ||
570 | .pattern = scan_ff_pattern | ||
571 | }; | ||
572 | |||
573 | static struct sharpsl_nand_platform_data sharpsl_nand_platform_data = { | ||
574 | .badblock_pattern = &sharpsl_bbt, | ||
575 | .partitions = sharpsl_nand_partitions, | ||
576 | .nr_partitions = ARRAY_SIZE(sharpsl_nand_partitions), | ||
577 | }; | ||
578 | |||
579 | static struct resource sharpsl_nand_resources[] = { | ||
580 | { | ||
581 | .start = 0x0C000000, | ||
582 | .end = 0x0C000FFF, | ||
583 | .flags = IORESOURCE_MEM, | ||
584 | }, | ||
585 | }; | ||
586 | |||
587 | static struct platform_device sharpsl_nand_device = { | ||
588 | .name = "sharpsl-nand", | ||
589 | .id = -1, | ||
590 | .resource = sharpsl_nand_resources, | ||
591 | .num_resources = ARRAY_SIZE(sharpsl_nand_resources), | ||
592 | .dev.platform_data = &sharpsl_nand_platform_data, | ||
593 | }; | ||
594 | |||
545 | static struct mtd_partition sharpsl_rom_parts[] = { | 595 | static struct mtd_partition sharpsl_rom_parts[] = { |
546 | { | 596 | { |
547 | .name ="Boot PROM Filesystem", | 597 | .name ="Boot PROM Filesystem", |
@@ -577,6 +627,7 @@ static struct platform_device *devices[] __initdata = { | |||
577 | &corgifb_device, | 627 | &corgifb_device, |
578 | &corgikbd_device, | 628 | &corgikbd_device, |
579 | &corgiled_device, | 629 | &corgiled_device, |
630 | &sharpsl_nand_device, | ||
580 | &sharpsl_rom_device, | 631 | &sharpsl_rom_device, |
581 | }; | 632 | }; |
582 | 633 | ||
@@ -617,6 +668,9 @@ static void __init corgi_init(void) | |||
617 | 668 | ||
618 | platform_scoop_config = &corgi_pcmcia_config; | 669 | platform_scoop_config = &corgi_pcmcia_config; |
619 | 670 | ||
671 | if (machine_is_husky()) | ||
672 | sharpsl_nand_partitions[1].size = 53 * 1024 * 1024; | ||
673 | |||
620 | platform_add_devices(devices, ARRAY_SIZE(devices)); | 674 | platform_add_devices(devices, ARRAY_SIZE(devices)); |
621 | } | 675 | } |
622 | 676 | ||
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c index ae88855bf974..f9093beba752 100644 --- a/arch/arm/mach-pxa/poodle.c +++ b/arch/arm/mach-pxa/poodle.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/gpio.h> | 24 | #include <linux/gpio.h> |
25 | #include <linux/spi/spi.h> | 25 | #include <linux/spi/spi.h> |
26 | #include <linux/spi/ads7846.h> | 26 | #include <linux/spi/ads7846.h> |
27 | #include <linux/mtd/sharpsl.h> | ||
27 | 28 | ||
28 | #include <mach/hardware.h> | 29 | #include <mach/hardware.h> |
29 | #include <asm/mach-types.h> | 30 | #include <asm/mach-types.h> |
@@ -414,6 +415,55 @@ static struct pxafb_mach_info poodle_fb_info = { | |||
414 | .lcd_conn = LCD_COLOR_TFT_16BPP, | 415 | .lcd_conn = LCD_COLOR_TFT_16BPP, |
415 | }; | 416 | }; |
416 | 417 | ||
418 | static struct mtd_partition sharpsl_nand_partitions[] = { | ||
419 | { | ||
420 | .name = "System Area", | ||
421 | .offset = 0, | ||
422 | .size = 7 * 1024 * 1024, | ||
423 | }, | ||
424 | { | ||
425 | .name = "Root Filesystem", | ||
426 | .offset = 7 * 1024 * 1024, | ||
427 | .size = 22 * 1024 * 1024, | ||
428 | }, | ||
429 | { | ||
430 | .name = "Home Filesystem", | ||
431 | .offset = MTDPART_OFS_APPEND, | ||
432 | .size = MTDPART_SIZ_FULL, | ||
433 | }, | ||
434 | }; | ||
435 | |||
436 | static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; | ||
437 | |||
438 | static struct nand_bbt_descr sharpsl_bbt = { | ||
439 | .options = 0, | ||
440 | .offs = 4, | ||
441 | .len = 2, | ||
442 | .pattern = scan_ff_pattern | ||
443 | }; | ||
444 | |||
445 | static struct sharpsl_nand_platform_data sharpsl_nand_platform_data = { | ||
446 | .badblock_pattern = &sharpsl_bbt, | ||
447 | .partitions = sharpsl_nand_partitions, | ||
448 | .nr_partitions = ARRAY_SIZE(sharpsl_nand_partitions), | ||
449 | }; | ||
450 | |||
451 | static struct resource sharpsl_nand_resources[] = { | ||
452 | { | ||
453 | .start = 0x0C000000, | ||
454 | .end = 0x0C000FFF, | ||
455 | .flags = IORESOURCE_MEM, | ||
456 | }, | ||
457 | }; | ||
458 | |||
459 | static struct platform_device sharpsl_nand_device = { | ||
460 | .name = "sharpsl-nand", | ||
461 | .id = -1, | ||
462 | .resource = sharpsl_nand_resources, | ||
463 | .num_resources = ARRAY_SIZE(sharpsl_nand_resources), | ||
464 | .dev.platform_data = &sharpsl_nand_platform_data, | ||
465 | }; | ||
466 | |||
417 | static struct mtd_partition sharpsl_rom_parts[] = { | 467 | static struct mtd_partition sharpsl_rom_parts[] = { |
418 | { | 468 | { |
419 | .name ="Boot PROM Filesystem", | 469 | .name ="Boot PROM Filesystem", |
@@ -447,6 +497,7 @@ static struct platform_device sharpsl_rom_device = { | |||
447 | static struct platform_device *devices[] __initdata = { | 497 | static struct platform_device *devices[] __initdata = { |
448 | &poodle_locomo_device, | 498 | &poodle_locomo_device, |
449 | &poodle_scoop_device, | 499 | &poodle_scoop_device, |
500 | &sharpsl_nand_device, | ||
450 | &sharpsl_rom_device, | 501 | &sharpsl_rom_device, |
451 | }; | 502 | }; |
452 | 503 | ||
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 7299d87a1cb3..6d447c9ce8ab 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/spi/spi.h> | 31 | #include <linux/spi/spi.h> |
32 | #include <linux/spi/ads7846.h> | 32 | #include <linux/spi/ads7846.h> |
33 | #include <linux/spi/corgi_lcd.h> | 33 | #include <linux/spi/corgi_lcd.h> |
34 | #include <linux/mtd/sharpsl.h> | ||
34 | 35 | ||
35 | #include <asm/setup.h> | 36 | #include <asm/setup.h> |
36 | #include <asm/memory.h> | 37 | #include <asm/memory.h> |
@@ -613,6 +614,54 @@ static struct pxafb_mach_info spitz_pxafb_info = { | |||
613 | .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_ALTERNATE_MAPPING, | 614 | .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_ALTERNATE_MAPPING, |
614 | }; | 615 | }; |
615 | 616 | ||
617 | static struct mtd_partition sharpsl_nand_partitions[] = { | ||
618 | { | ||
619 | .name = "System Area", | ||
620 | .offset = 0, | ||
621 | .size = 7 * 1024 * 1024, | ||
622 | }, | ||
623 | { | ||
624 | .name = "Root Filesystem", | ||
625 | .offset = 7 * 1024 * 1024, | ||
626 | }, | ||
627 | { | ||
628 | .name = "Home Filesystem", | ||
629 | .offset = MTDPART_OFS_APPEND, | ||
630 | .size = MTDPART_SIZ_FULL, | ||
631 | }, | ||
632 | }; | ||
633 | |||
634 | static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; | ||
635 | |||
636 | static struct nand_bbt_descr sharpsl_bbt = { | ||
637 | .options = 0, | ||
638 | .offs = 4, | ||
639 | .len = 2, | ||
640 | .pattern = scan_ff_pattern | ||
641 | }; | ||
642 | |||
643 | static struct sharpsl_nand_platform_data sharpsl_nand_platform_data = { | ||
644 | .badblock_pattern = &sharpsl_bbt, | ||
645 | .partitions = sharpsl_nand_partitions, | ||
646 | .nr_partitions = ARRAY_SIZE(sharpsl_nand_partitions), | ||
647 | }; | ||
648 | |||
649 | static struct resource sharpsl_nand_resources[] = { | ||
650 | { | ||
651 | .start = 0x0C000000, | ||
652 | .end = 0x0C000FFF, | ||
653 | .flags = IORESOURCE_MEM, | ||
654 | }, | ||
655 | }; | ||
656 | |||
657 | static struct platform_device sharpsl_nand_device = { | ||
658 | .name = "sharpsl-nand", | ||
659 | .id = -1, | ||
660 | .resource = sharpsl_nand_resources, | ||
661 | .num_resources = ARRAY_SIZE(sharpsl_nand_resources), | ||
662 | .dev.platform_data = &sharpsl_nand_platform_data, | ||
663 | }; | ||
664 | |||
616 | 665 | ||
617 | static struct mtd_partition sharpsl_rom_parts[] = { | 666 | static struct mtd_partition sharpsl_rom_parts[] = { |
618 | { | 667 | { |
@@ -648,6 +697,7 @@ static struct platform_device *devices[] __initdata = { | |||
648 | &spitzscoop_device, | 697 | &spitzscoop_device, |
649 | &spitzkbd_device, | 698 | &spitzkbd_device, |
650 | &spitzled_device, | 699 | &spitzled_device, |
700 | &sharpsl_nand_device, | ||
651 | &sharpsl_rom_device, | 701 | &sharpsl_rom_device, |
652 | }; | 702 | }; |
653 | 703 | ||
@@ -671,6 +721,14 @@ static void __init common_init(void) | |||
671 | pm_power_off = spitz_poweroff; | 721 | pm_power_off = spitz_poweroff; |
672 | arm_pm_restart = spitz_restart; | 722 | arm_pm_restart = spitz_restart; |
673 | 723 | ||
724 | if (machine_is_spitz()) { | ||
725 | sharpsl_nand_partitions[1].size = 5 * 1024 * 1024; | ||
726 | } else if (machine_is_akita()) { | ||
727 | sharpsl_nand_partitions[1].size = 58 * 1024 * 1024; | ||
728 | } else if (machine_is_borzoi()) { | ||
729 | sharpsl_nand_partitions[1].size = 32 * 1024 * 1024; | ||
730 | } | ||
731 | |||
674 | PMCR = 0x00; | 732 | PMCR = 0x00; |
675 | 733 | ||
676 | /* Stop 3.6MHz and drive HIGH to PCMCIA and CS */ | 734 | /* Stop 3.6MHz and drive HIGH to PCMCIA and CS */ |
@@ -715,10 +773,29 @@ static struct i2c_board_info akita_i2c_board_info[] = { | |||
715 | }, | 773 | }, |
716 | }; | 774 | }; |
717 | 775 | ||
776 | static struct nand_bbt_descr sharpsl_akita_bbt = { | ||
777 | .options = 0, | ||
778 | .offs = 4, | ||
779 | .len = 1, | ||
780 | .pattern = scan_ff_pattern | ||
781 | }; | ||
782 | |||
783 | static struct nand_ecclayout akita_oobinfo = { | ||
784 | .eccbytes = 24, | ||
785 | .eccpos = { | ||
786 | 0x5, 0x1, 0x2, 0x3, 0x6, 0x7, 0x15, 0x11, | ||
787 | 0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23, | ||
788 | 0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37}, | ||
789 | .oobfree = {{0x08, 0x09}} | ||
790 | }; | ||
791 | |||
718 | static void __init akita_init(void) | 792 | static void __init akita_init(void) |
719 | { | 793 | { |
720 | spitz_ficp_platform_data.transceiver_mode = akita_irda_transceiver_mode; | 794 | spitz_ficp_platform_data.transceiver_mode = akita_irda_transceiver_mode; |
721 | 795 | ||
796 | sharpsl_nand_platform_data.badblock_pattern = &sharpsl_akita_bbt; | ||
797 | sharpsl_nand_platform_data.ecc_layout = &akita_oobinfo; | ||
798 | |||
722 | /* We just pretend the second element of the array doesn't exist */ | 799 | /* We just pretend the second element of the array doesn't exist */ |
723 | spitz_pcmcia_config.num_devs = 1; | 800 | spitz_pcmcia_config.num_devs = 1; |
724 | platform_scoop_config = &spitz_pcmcia_config; | 801 | platform_scoop_config = &spitz_pcmcia_config; |
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index ea7bc1e8562b..3fbfd1e32a9e 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c | |||
@@ -1305,7 +1305,7 @@ struct platform_device *__init | |||
1305 | at32_add_device_mci(unsigned int id, struct mci_platform_data *data) | 1305 | at32_add_device_mci(unsigned int id, struct mci_platform_data *data) |
1306 | { | 1306 | { |
1307 | struct platform_device *pdev; | 1307 | struct platform_device *pdev; |
1308 | struct dw_dma_slave *dws; | 1308 | struct dw_dma_slave *dws = &data->dma_slave; |
1309 | u32 pioa_mask; | 1309 | u32 pioa_mask; |
1310 | u32 piob_mask; | 1310 | u32 piob_mask; |
1311 | 1311 | ||
@@ -1324,22 +1324,13 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) | |||
1324 | ARRAY_SIZE(atmel_mci0_resource))) | 1324 | ARRAY_SIZE(atmel_mci0_resource))) |
1325 | goto fail; | 1325 | goto fail; |
1326 | 1326 | ||
1327 | if (data->dma_slave) | 1327 | dws->dma_dev = &dw_dmac0_device.dev; |
1328 | dws = kmemdup(to_dw_dma_slave(data->dma_slave), | 1328 | dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT; |
1329 | sizeof(struct dw_dma_slave), GFP_KERNEL); | ||
1330 | else | ||
1331 | dws = kzalloc(sizeof(struct dw_dma_slave), GFP_KERNEL); | ||
1332 | |||
1333 | dws->slave.dev = &pdev->dev; | ||
1334 | dws->slave.dma_dev = &dw_dmac0_device.dev; | ||
1335 | dws->slave.reg_width = DMA_SLAVE_WIDTH_32BIT; | ||
1336 | dws->cfg_hi = (DWC_CFGH_SRC_PER(0) | 1329 | dws->cfg_hi = (DWC_CFGH_SRC_PER(0) |
1337 | | DWC_CFGH_DST_PER(1)); | 1330 | | DWC_CFGH_DST_PER(1)); |
1338 | dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | 1331 | dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL |
1339 | | DWC_CFGL_HS_SRC_POL); | 1332 | | DWC_CFGL_HS_SRC_POL); |
1340 | 1333 | ||
1341 | data->dma_slave = &dws->slave; | ||
1342 | |||
1343 | if (platform_device_add_data(pdev, data, | 1334 | if (platform_device_add_data(pdev, data, |
1344 | sizeof(struct mci_platform_data))) | 1335 | sizeof(struct mci_platform_data))) |
1345 | goto fail; | 1336 | goto fail; |
diff --git a/arch/ia64/include/asm/acpi-ext.h b/arch/ia64/include/asm/acpi-ext.h index 734d137dda6e..7f8362b379eb 100644 --- a/arch/ia64/include/asm/acpi-ext.h +++ b/arch/ia64/include/asm/acpi-ext.h | |||
@@ -14,7 +14,6 @@ | |||
14 | #define _ASM_IA64_ACPI_EXT_H | 14 | #define _ASM_IA64_ACPI_EXT_H |
15 | 15 | ||
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <acpi/actypes.h> | ||
18 | 17 | ||
19 | extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length); | 18 | extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length); |
20 | 19 | ||
diff --git a/arch/ia64/include/asm/sn/acpi.h b/arch/ia64/include/asm/sn/acpi.h index 9ce2801cbd57..fd480db25565 100644 --- a/arch/ia64/include/asm/sn/acpi.h +++ b/arch/ia64/include/asm/sn/acpi.h | |||
@@ -9,8 +9,6 @@ | |||
9 | #ifndef _ASM_IA64_SN_ACPI_H | 9 | #ifndef _ASM_IA64_SN_ACPI_H |
10 | #define _ASM_IA64_SN_ACPI_H | 10 | #define _ASM_IA64_SN_ACPI_H |
11 | 11 | ||
12 | #include "acpi/acglobal.h" | ||
13 | |||
14 | extern int sn_acpi_rev; | 12 | extern int sn_acpi_rev; |
15 | #define SN_ACPI_BASE_SUPPORT() (sn_acpi_rev >= 0x20101) | 13 | #define SN_ACPI_BASE_SUPPORT() (sn_acpi_rev >= 0x20101) |
16 | 14 | ||
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 0553648b7595..d541671caf4a 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -65,6 +65,7 @@ EXPORT_SYMBOL(pm_idle); | |||
65 | void (*pm_power_off) (void); | 65 | void (*pm_power_off) (void); |
66 | EXPORT_SYMBOL(pm_power_off); | 66 | EXPORT_SYMBOL(pm_power_off); |
67 | 67 | ||
68 | u32 acpi_rsdt_forced; | ||
68 | unsigned int acpi_cpei_override; | 69 | unsigned int acpi_cpei_override; |
69 | unsigned int acpi_cpei_phys_cpuid; | 70 | unsigned int acpi_cpei_phys_cpuid; |
70 | 71 | ||
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c index bc610a6c7851..c5a214026a77 100644 --- a/arch/ia64/sn/kernel/io_acpi_init.c +++ b/arch/ia64/sn/kernel/io_acpi_init.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <asm/sn/sn_sal.h> | 13 | #include <asm/sn/sn_sal.h> |
14 | #include "xtalk/hubdev.h" | 14 | #include "xtalk/hubdev.h" |
15 | #include <linux/acpi.h> | 15 | #include <linux/acpi.h> |
16 | #include <acpi/acnamesp.h> | ||
17 | 16 | ||
18 | 17 | ||
19 | /* | 18 | /* |
@@ -64,6 +63,7 @@ static acpi_status __init | |||
64 | sn_acpi_hubdev_init(acpi_handle handle, u32 depth, void *context, void **ret) | 63 | sn_acpi_hubdev_init(acpi_handle handle, u32 depth, void *context, void **ret) |
65 | { | 64 | { |
66 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 65 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
66 | struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
67 | u64 addr; | 67 | u64 addr; |
68 | struct hubdev_info *hubdev; | 68 | struct hubdev_info *hubdev; |
69 | struct hubdev_info *hubdev_ptr; | 69 | struct hubdev_info *hubdev_ptr; |
@@ -77,11 +77,12 @@ sn_acpi_hubdev_init(acpi_handle handle, u32 depth, void *context, void **ret) | |||
77 | status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, | 77 | status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, |
78 | &sn_uuid, &buffer); | 78 | &sn_uuid, &buffer); |
79 | if (ACPI_FAILURE(status)) { | 79 | if (ACPI_FAILURE(status)) { |
80 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
80 | printk(KERN_ERR | 81 | printk(KERN_ERR |
81 | "sn_acpi_hubdev_init: acpi_get_vendor_resource() " | 82 | "sn_acpi_hubdev_init: acpi_get_vendor_resource() " |
82 | "(0x%x) failed for: ", status); | 83 | "(0x%x) failed for: %s\n", status, |
83 | acpi_ns_print_node_pathname(handle, NULL); | 84 | (char *)name_buffer.pointer); |
84 | printk("\n"); | 85 | kfree(name_buffer.pointer); |
85 | return AE_OK; /* Continue walking namespace */ | 86 | return AE_OK; /* Continue walking namespace */ |
86 | } | 87 | } |
87 | 88 | ||
@@ -89,11 +90,12 @@ sn_acpi_hubdev_init(acpi_handle handle, u32 depth, void *context, void **ret) | |||
89 | vendor = &resource->data.vendor_typed; | 90 | vendor = &resource->data.vendor_typed; |
90 | if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) != | 91 | if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) != |
91 | sizeof(struct hubdev_info *)) { | 92 | sizeof(struct hubdev_info *)) { |
93 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
92 | printk(KERN_ERR | 94 | printk(KERN_ERR |
93 | "sn_acpi_hubdev_init: Invalid vendor data length: %d for: ", | 95 | "sn_acpi_hubdev_init: Invalid vendor data length: " |
94 | vendor->byte_length); | 96 | "%d for: %s\n", |
95 | acpi_ns_print_node_pathname(handle, NULL); | 97 | vendor->byte_length, (char *)name_buffer.pointer); |
96 | printk("\n"); | 98 | kfree(name_buffer.pointer); |
97 | goto exit; | 99 | goto exit; |
98 | } | 100 | } |
99 | 101 | ||
@@ -120,6 +122,7 @@ sn_get_bussoft_ptr(struct pci_bus *bus) | |||
120 | { | 122 | { |
121 | u64 addr; | 123 | u64 addr; |
122 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 124 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
125 | struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
123 | acpi_handle handle; | 126 | acpi_handle handle; |
124 | struct pcibus_bussoft *prom_bussoft_ptr; | 127 | struct pcibus_bussoft *prom_bussoft_ptr; |
125 | struct acpi_resource *resource; | 128 | struct acpi_resource *resource; |
@@ -131,11 +134,11 @@ sn_get_bussoft_ptr(struct pci_bus *bus) | |||
131 | status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, | 134 | status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, |
132 | &sn_uuid, &buffer); | 135 | &sn_uuid, &buffer); |
133 | if (ACPI_FAILURE(status)) { | 136 | if (ACPI_FAILURE(status)) { |
137 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
134 | printk(KERN_ERR "%s: " | 138 | printk(KERN_ERR "%s: " |
135 | "acpi_get_vendor_resource() failed (0x%x) for: ", | 139 | "acpi_get_vendor_resource() failed (0x%x) for: %s\n", |
136 | __func__, status); | 140 | __func__, status, (char *)name_buffer.pointer); |
137 | acpi_ns_print_node_pathname(handle, NULL); | 141 | kfree(name_buffer.pointer); |
138 | printk("\n"); | ||
139 | return NULL; | 142 | return NULL; |
140 | } | 143 | } |
141 | resource = buffer.pointer; | 144 | resource = buffer.pointer; |
@@ -168,6 +171,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info, | |||
168 | { | 171 | { |
169 | u64 addr; | 172 | u64 addr; |
170 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 173 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
174 | struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
171 | struct sn_irq_info *irq_info, *irq_info_prom; | 175 | struct sn_irq_info *irq_info, *irq_info_prom; |
172 | struct pcidev_info *pcidev_ptr, *pcidev_prom_ptr; | 176 | struct pcidev_info *pcidev_ptr, *pcidev_prom_ptr; |
173 | struct acpi_resource *resource; | 177 | struct acpi_resource *resource; |
@@ -182,11 +186,11 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info, | |||
182 | status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, | 186 | status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, |
183 | &sn_uuid, &buffer); | 187 | &sn_uuid, &buffer); |
184 | if (ACPI_FAILURE(status)) { | 188 | if (ACPI_FAILURE(status)) { |
189 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
185 | printk(KERN_ERR | 190 | printk(KERN_ERR |
186 | "%s: acpi_get_vendor_resource() failed (0x%x) for: ", | 191 | "%s: acpi_get_vendor_resource() failed (0x%x) for: %s\n", |
187 | __func__, status); | 192 | __func__, status, (char *)name_buffer.pointer); |
188 | acpi_ns_print_node_pathname(handle, NULL); | 193 | kfree(name_buffer.pointer); |
189 | printk("\n"); | ||
190 | return 1; | 194 | return 1; |
191 | } | 195 | } |
192 | 196 | ||
@@ -194,11 +198,12 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info, | |||
194 | vendor = &resource->data.vendor_typed; | 198 | vendor = &resource->data.vendor_typed; |
195 | if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) != | 199 | if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) != |
196 | sizeof(struct pci_devdev_info *)) { | 200 | sizeof(struct pci_devdev_info *)) { |
201 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
197 | printk(KERN_ERR | 202 | printk(KERN_ERR |
198 | "%s: Invalid vendor data length: %d for: ", | 203 | "%s: Invalid vendor data length: %d for: %s\n", |
199 | __func__, vendor->byte_length); | 204 | __func__, vendor->byte_length, |
200 | acpi_ns_print_node_pathname(handle, NULL); | 205 | (char *)name_buffer.pointer); |
201 | printk("\n"); | 206 | kfree(name_buffer.pointer); |
202 | ret = 1; | 207 | ret = 1; |
203 | goto exit; | 208 | goto exit; |
204 | } | 209 | } |
@@ -239,6 +244,9 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle) | |||
239 | acpi_handle parent; | 244 | acpi_handle parent; |
240 | int slot; | 245 | int slot; |
241 | acpi_status status; | 246 | acpi_status status; |
247 | struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
248 | |||
249 | acpi_get_name(device_handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
242 | 250 | ||
243 | /* | 251 | /* |
244 | * Do an upward search to find the root bus device, and | 252 | * Do an upward search to find the root bus device, and |
@@ -249,9 +257,8 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle) | |||
249 | status = acpi_get_parent(child, &parent); | 257 | status = acpi_get_parent(child, &parent); |
250 | if (ACPI_FAILURE(status)) { | 258 | if (ACPI_FAILURE(status)) { |
251 | printk(KERN_ERR "%s: acpi_get_parent() failed " | 259 | printk(KERN_ERR "%s: acpi_get_parent() failed " |
252 | "(0x%x) for: ", __func__, status); | 260 | "(0x%x) for: %s\n", __func__, status, |
253 | acpi_ns_print_node_pathname(child, NULL); | 261 | (char *)name_buffer.pointer); |
254 | printk("\n"); | ||
255 | panic("%s: Unable to find host devfn\n", __func__); | 262 | panic("%s: Unable to find host devfn\n", __func__); |
256 | } | 263 | } |
257 | if (parent == rootbus_handle) | 264 | if (parent == rootbus_handle) |
@@ -259,22 +266,20 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle) | |||
259 | child = parent; | 266 | child = parent; |
260 | } | 267 | } |
261 | if (!child) { | 268 | if (!child) { |
262 | printk(KERN_ERR "%s: Unable to find root bus for: ", | 269 | printk(KERN_ERR "%s: Unable to find root bus for: %s\n", |
263 | __func__); | 270 | __func__, (char *)name_buffer.pointer); |
264 | acpi_ns_print_node_pathname(device_handle, NULL); | ||
265 | printk("\n"); | ||
266 | BUG(); | 271 | BUG(); |
267 | } | 272 | } |
268 | 273 | ||
269 | status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr); | 274 | status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr); |
270 | if (ACPI_FAILURE(status)) { | 275 | if (ACPI_FAILURE(status)) { |
271 | printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ", | 276 | printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: %s\n", |
272 | __func__, status); | 277 | __func__, status, (char *)name_buffer.pointer); |
273 | acpi_ns_print_node_pathname(child, NULL); | ||
274 | printk("\n"); | ||
275 | panic("%s: Unable to find host devfn\n", __func__); | 278 | panic("%s: Unable to find host devfn\n", __func__); |
276 | } | 279 | } |
277 | 280 | ||
281 | kfree(name_buffer.pointer); | ||
282 | |||
278 | slot = (adr >> 16) & 0xffff; | 283 | slot = (adr >> 16) & 0xffff; |
279 | function = adr & 0xffff; | 284 | function = adr & 0xffff; |
280 | devfn = PCI_DEVFN(slot, function); | 285 | devfn = PCI_DEVFN(slot, function); |
@@ -300,27 +305,28 @@ find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
300 | int function; | 305 | int function; |
301 | int slot; | 306 | int slot; |
302 | struct sn_pcidev_match *info = context; | 307 | struct sn_pcidev_match *info = context; |
308 | struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
303 | 309 | ||
304 | status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, | 310 | status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, |
305 | &adr); | 311 | &adr); |
306 | if (ACPI_SUCCESS(status)) { | 312 | if (ACPI_SUCCESS(status)) { |
307 | status = acpi_get_parent(handle, &parent); | 313 | status = acpi_get_parent(handle, &parent); |
308 | if (ACPI_FAILURE(status)) { | 314 | if (ACPI_FAILURE(status)) { |
315 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
309 | printk(KERN_ERR | 316 | printk(KERN_ERR |
310 | "%s: acpi_get_parent() failed (0x%x) for: ", | 317 | "%s: acpi_get_parent() failed (0x%x) for: %s\n", |
311 | __func__, status); | 318 | __func__, status, (char *)name_buffer.pointer); |
312 | acpi_ns_print_node_pathname(handle, NULL); | 319 | kfree(name_buffer.pointer); |
313 | printk("\n"); | ||
314 | return AE_OK; | 320 | return AE_OK; |
315 | } | 321 | } |
316 | status = acpi_evaluate_integer(parent, METHOD_NAME__BBN, | 322 | status = acpi_evaluate_integer(parent, METHOD_NAME__BBN, |
317 | NULL, &bbn); | 323 | NULL, &bbn); |
318 | if (ACPI_FAILURE(status)) { | 324 | if (ACPI_FAILURE(status)) { |
325 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
319 | printk(KERN_ERR | 326 | printk(KERN_ERR |
320 | "%s: Failed to find _BBN in parent of: ", | 327 | "%s: Failed to find _BBN in parent of: %s\n", |
321 | __func__); | 328 | __func__, (char *)name_buffer.pointer); |
322 | acpi_ns_print_node_pathname(handle, NULL); | 329 | kfree(name_buffer.pointer); |
323 | printk("\n"); | ||
324 | return AE_OK; | 330 | return AE_OK; |
325 | } | 331 | } |
326 | 332 | ||
@@ -350,24 +356,27 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info, | |||
350 | acpi_handle rootbus_handle; | 356 | acpi_handle rootbus_handle; |
351 | unsigned long long segment; | 357 | unsigned long long segment; |
352 | acpi_status status; | 358 | acpi_status status; |
359 | struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
353 | 360 | ||
354 | rootbus_handle = PCI_CONTROLLER(dev)->acpi_handle; | 361 | rootbus_handle = PCI_CONTROLLER(dev)->acpi_handle; |
355 | status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL, | 362 | status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL, |
356 | &segment); | 363 | &segment); |
357 | if (ACPI_SUCCESS(status)) { | 364 | if (ACPI_SUCCESS(status)) { |
358 | if (segment != pci_domain_nr(dev)) { | 365 | if (segment != pci_domain_nr(dev)) { |
366 | acpi_get_name(rootbus_handle, ACPI_FULL_PATHNAME, | ||
367 | &name_buffer); | ||
359 | printk(KERN_ERR | 368 | printk(KERN_ERR |
360 | "%s: Segment number mismatch, 0x%llx vs 0x%x for: ", | 369 | "%s: Segment number mismatch, 0x%llx vs 0x%x for: %s\n", |
361 | __func__, segment, pci_domain_nr(dev)); | 370 | __func__, segment, pci_domain_nr(dev), |
362 | acpi_ns_print_node_pathname(rootbus_handle, NULL); | 371 | (char *)name_buffer.pointer); |
363 | printk("\n"); | 372 | kfree(name_buffer.pointer); |
364 | return 1; | 373 | return 1; |
365 | } | 374 | } |
366 | } else { | 375 | } else { |
367 | printk(KERN_ERR "%s: Unable to get __SEG from: ", | 376 | acpi_get_name(rootbus_handle, ACPI_FULL_PATHNAME, &name_buffer); |
368 | __func__); | 377 | printk(KERN_ERR "%s: Unable to get __SEG from: %s\n", |
369 | acpi_ns_print_node_pathname(rootbus_handle, NULL); | 378 | __func__, (char *)name_buffer.pointer); |
370 | printk("\n"); | 379 | kfree(name_buffer.pointer); |
371 | return 1; | 380 | return 1; |
372 | } | 381 | } |
373 | 382 | ||
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 8a924a5661dd..0d4ffa4da1da 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/acpi.h> | 26 | #include <linux/acpi.h> |
27 | #include <asm/sn/sn2/sn_hwperf.h> | 27 | #include <asm/sn/sn2/sn_hwperf.h> |
28 | #include <asm/sn/acpi.h> | 28 | #include <asm/sn/acpi.h> |
29 | #include "acpi/acglobal.h" | ||
30 | 29 | ||
31 | extern void sn_init_cpei_timer(void); | 30 | extern void sn_init_cpei_timer(void); |
32 | extern void register_sn_procfs(void); | 31 | extern void register_sn_procfs(void); |
@@ -473,7 +472,7 @@ sn_io_early_init(void) | |||
473 | { | 472 | { |
474 | struct acpi_table_header *header = NULL; | 473 | struct acpi_table_header *header = NULL; |
475 | 474 | ||
476 | acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header); | 475 | acpi_get_table(ACPI_SIG_DSDT, 1, &header); |
477 | BUG_ON(header == NULL); | 476 | BUG_ON(header == NULL); |
478 | sn_acpi_rev = header->oem_revision; | 477 | sn_acpi_rev = header->oem_revision; |
479 | } | 478 | } |
@@ -505,7 +504,7 @@ sn_io_early_init(void) | |||
505 | 504 | ||
506 | { | 505 | { |
507 | struct acpi_table_header *header; | 506 | struct acpi_table_header *header; |
508 | (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header); | 507 | (void)acpi_get_table(ACPI_SIG_DSDT, 1, &header); |
509 | printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n", | 508 | printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n", |
510 | header->oem_revision); | 509 | header->oem_revision); |
511 | } | 510 | } |
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 5ddad7bd60ac..0d428278356d 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile | |||
@@ -77,7 +77,7 @@ libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name` | |||
77 | 77 | ||
78 | drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ | 78 | drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ |
79 | 79 | ||
80 | PALO := $(shell if which palo; then : ; \ | 80 | PALO := $(shell if (which palo 2>&1); then : ; \ |
81 | elif [ -x /sbin/palo ]; then echo /sbin/palo; \ | 81 | elif [ -x /sbin/palo ]; then echo /sbin/palo; \ |
82 | fi) | 82 | fi) |
83 | 83 | ||
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index f88b252e419c..2121d99f8364 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild | |||
@@ -1,3 +1,4 @@ | |||
1 | include include/asm-generic/Kbuild.asm | 1 | include include/asm-generic/Kbuild.asm |
2 | 2 | ||
3 | unifdef-y += pdc.h | 3 | unifdef-y += pdc.h |
4 | unifdef-y += swab.h | ||
diff --git a/arch/parisc/include/asm/byteorder.h b/arch/parisc/include/asm/byteorder.h index db148313de5d..da66029c4cb2 100644 --- a/arch/parisc/include/asm/byteorder.h +++ b/arch/parisc/include/asm/byteorder.h | |||
@@ -1,82 +1,7 @@ | |||
1 | #ifndef _PARISC_BYTEORDER_H | 1 | #ifndef _PARISC_BYTEORDER_H |
2 | #define _PARISC_BYTEORDER_H | 2 | #define _PARISC_BYTEORDER_H |
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm/swab.h> |
5 | #include <linux/compiler.h> | ||
6 | |||
7 | #ifdef __GNUC__ | ||
8 | |||
9 | static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x) | ||
10 | { | ||
11 | __asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */ | ||
12 | "shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */ | ||
13 | : "=r" (x) | ||
14 | : "0" (x)); | ||
15 | return x; | ||
16 | } | ||
17 | |||
18 | static __inline__ __attribute_const__ __u32 ___arch__swab24(__u32 x) | ||
19 | { | ||
20 | __asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */ | ||
21 | "dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */ | ||
22 | "shd %%r0, %0, 8, %0" /* shift 0000cbab -> 0cba */ | ||
23 | : "=r" (x) | ||
24 | : "0" (x)); | ||
25 | return x; | ||
26 | } | ||
27 | |||
28 | static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) | ||
29 | { | ||
30 | unsigned int temp; | ||
31 | __asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */ | ||
32 | "dep %1, 15, 8, %1\n\t" /* deposit cdab -> cbab */ | ||
33 | "shd %0, %1, 8, %0" /* shift abcdcbab -> dcba */ | ||
34 | : "=r" (x), "=&r" (temp) | ||
35 | : "0" (x)); | ||
36 | return x; | ||
37 | } | ||
38 | |||
39 | |||
40 | #if BITS_PER_LONG > 32 | ||
41 | /* | ||
42 | ** From "PA-RISC 2.0 Architecture", HP Professional Books. | ||
43 | ** See Appendix I page 8 , "Endian Byte Swapping". | ||
44 | ** | ||
45 | ** Pretty cool algorithm: (* == zero'd bits) | ||
46 | ** PERMH 01234567 -> 67452301 into %0 | ||
47 | ** HSHL 67452301 -> 7*5*3*1* into %1 | ||
48 | ** HSHR 67452301 -> *6*4*2*0 into %0 | ||
49 | ** OR %0 | %1 -> 76543210 into %0 (all done!) | ||
50 | */ | ||
51 | static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) { | ||
52 | __u64 temp; | ||
53 | __asm__("permh,3210 %0, %0\n\t" | ||
54 | "hshl %0, 8, %1\n\t" | ||
55 | "hshr,u %0, 8, %0\n\t" | ||
56 | "or %1, %0, %0" | ||
57 | : "=r" (x), "=&r" (temp) | ||
58 | : "0" (x)); | ||
59 | return x; | ||
60 | } | ||
61 | #define __arch__swab64(x) ___arch__swab64(x) | ||
62 | #define __BYTEORDER_HAS_U64__ | ||
63 | #elif !defined(__STRICT_ANSI__) | ||
64 | static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) | ||
65 | { | ||
66 | __u32 t1 = ___arch__swab32((__u32) x); | ||
67 | __u32 t2 = ___arch__swab32((__u32) (x >> 32)); | ||
68 | return (((__u64) t1 << 32) | t2); | ||
69 | } | ||
70 | #define __arch__swab64(x) ___arch__swab64(x) | ||
71 | #define __BYTEORDER_HAS_U64__ | ||
72 | #endif | ||
73 | |||
74 | #define __arch__swab16(x) ___arch__swab16(x) | ||
75 | #define __arch__swab24(x) ___arch__swab24(x) | ||
76 | #define __arch__swab32(x) ___arch__swab32(x) | ||
77 | |||
78 | #endif /* __GNUC__ */ | ||
79 | |||
80 | #include <linux/byteorder/big_endian.h> | 5 | #include <linux/byteorder/big_endian.h> |
81 | 6 | ||
82 | #endif /* _PARISC_BYTEORDER_H */ | 7 | #endif /* _PARISC_BYTEORDER_H */ |
diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h index e9639ccc3fce..c84b2fcb18a9 100644 --- a/arch/parisc/include/asm/checksum.h +++ b/arch/parisc/include/asm/checksum.h | |||
@@ -182,7 +182,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | |||
182 | #endif | 182 | #endif |
183 | : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len) | 183 | : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len) |
184 | : "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto) | 184 | : "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto) |
185 | : "r19", "r20", "r21", "r22"); | 185 | : "r19", "r20", "r21", "r22", "memory"); |
186 | return csum_fold(sum); | 186 | return csum_fold(sum); |
187 | } | 187 | } |
188 | 188 | ||
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index 55ddb1842107..d3031d1f9d03 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h | |||
@@ -4,12 +4,6 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/pgtable.h> | 5 | #include <asm/pgtable.h> |
6 | 6 | ||
7 | extern unsigned long parisc_vmerge_boundary; | ||
8 | extern unsigned long parisc_vmerge_max_size; | ||
9 | |||
10 | #define BIO_VMERGE_BOUNDARY parisc_vmerge_boundary | ||
11 | #define BIO_VMERGE_MAX_SIZE parisc_vmerge_max_size | ||
12 | |||
13 | #define virt_to_phys(a) ((unsigned long)__pa(a)) | 7 | #define virt_to_phys(a) ((unsigned long)__pa(a)) |
14 | #define phys_to_virt(a) __va(a) | 8 | #define phys_to_virt(a) __va(a) |
15 | #define virt_to_bus virt_to_phys | 9 | #define virt_to_bus virt_to_phys |
@@ -182,9 +176,9 @@ static inline void __raw_writeq(unsigned long long b, volatile void __iomem *add | |||
182 | 176 | ||
183 | /* readb can never be const, so use __fswab instead of le*_to_cpu */ | 177 | /* readb can never be const, so use __fswab instead of le*_to_cpu */ |
184 | #define readb(addr) __raw_readb(addr) | 178 | #define readb(addr) __raw_readb(addr) |
185 | #define readw(addr) __fswab16(__raw_readw(addr)) | 179 | #define readw(addr) le16_to_cpu(__raw_readw(addr)) |
186 | #define readl(addr) __fswab32(__raw_readl(addr)) | 180 | #define readl(addr) le32_to_cpu(__raw_readl(addr)) |
187 | #define readq(addr) __fswab64(__raw_readq(addr)) | 181 | #define readq(addr) le64_to_cpu(__raw_readq(addr)) |
188 | #define writeb(b, addr) __raw_writeb(b, addr) | 182 | #define writeb(b, addr) __raw_writeb(b, addr) |
189 | #define writew(b, addr) __raw_writew(cpu_to_le16(b), addr) | 183 | #define writew(b, addr) __raw_writew(cpu_to_le16(b), addr) |
190 | #define writel(b, addr) __raw_writel(cpu_to_le32(b), addr) | 184 | #define writel(b, addr) __raw_writel(cpu_to_le32(b), addr) |
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h index 85856c74ad1d..354b2aca990e 100644 --- a/arch/parisc/include/asm/mmu_context.h +++ b/arch/parisc/include/asm/mmu_context.h | |||
@@ -34,16 +34,21 @@ destroy_context(struct mm_struct *mm) | |||
34 | mm->context = 0; | 34 | mm->context = 0; |
35 | } | 35 | } |
36 | 36 | ||
37 | static inline void load_context(mm_context_t context) | 37 | static inline unsigned long __space_to_prot(mm_context_t context) |
38 | { | 38 | { |
39 | mtsp(context, 3); | ||
40 | #if SPACEID_SHIFT == 0 | 39 | #if SPACEID_SHIFT == 0 |
41 | mtctl(context << 1,8); | 40 | return context << 1; |
42 | #else | 41 | #else |
43 | mtctl(context >> (SPACEID_SHIFT - 1),8); | 42 | return context >> (SPACEID_SHIFT - 1); |
44 | #endif | 43 | #endif |
45 | } | 44 | } |
46 | 45 | ||
46 | static inline void load_context(mm_context_t context) | ||
47 | { | ||
48 | mtsp(context, 3); | ||
49 | mtctl(__space_to_prot(context), 8); | ||
50 | } | ||
51 | |||
47 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) | 52 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) |
48 | { | 53 | { |
49 | 54 | ||
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 3c9d34844c83..9d64df8754ba 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
18 | #include <asm/types.h> | 18 | #include <asm/types.h> |
19 | #include <asm/system.h> | 19 | #include <asm/system.h> |
20 | #include <asm/percpu.h> | ||
20 | #endif /* __ASSEMBLY__ */ | 21 | #endif /* __ASSEMBLY__ */ |
21 | 22 | ||
22 | #define KERNEL_STACK_SIZE (4*PAGE_SIZE) | 23 | #define KERNEL_STACK_SIZE (4*PAGE_SIZE) |
@@ -109,8 +110,7 @@ struct cpuinfo_parisc { | |||
109 | }; | 110 | }; |
110 | 111 | ||
111 | extern struct system_cpuinfo_parisc boot_cpu_data; | 112 | extern struct system_cpuinfo_parisc boot_cpu_data; |
112 | extern struct cpuinfo_parisc cpu_data[NR_CPUS]; | 113 | DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data); |
113 | #define current_cpu_data cpu_data[smp_processor_id()] | ||
114 | 114 | ||
115 | #define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF) | 115 | #define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF) |
116 | 116 | ||
diff --git a/arch/parisc/include/asm/swab.h b/arch/parisc/include/asm/swab.h new file mode 100644 index 000000000000..3ff16c5a3358 --- /dev/null +++ b/arch/parisc/include/asm/swab.h | |||
@@ -0,0 +1,66 @@ | |||
1 | #ifndef _PARISC_SWAB_H | ||
2 | #define _PARISC_SWAB_H | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | #include <linux/compiler.h> | ||
6 | |||
7 | #define __SWAB_64_THRU_32__ | ||
8 | |||
9 | static inline __attribute_const__ __u16 __arch_swab16(__u16 x) | ||
10 | { | ||
11 | __asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */ | ||
12 | "shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */ | ||
13 | : "=r" (x) | ||
14 | : "0" (x)); | ||
15 | return x; | ||
16 | } | ||
17 | #define __arch_swab16 __arch_swab16 | ||
18 | |||
19 | static inline __attribute_const__ __u32 __arch_swab24(__u32 x) | ||
20 | { | ||
21 | __asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */ | ||
22 | "dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */ | ||
23 | "shd %%r0, %0, 8, %0" /* shift 0000cbab -> 0cba */ | ||
24 | : "=r" (x) | ||
25 | : "0" (x)); | ||
26 | return x; | ||
27 | } | ||
28 | |||
29 | static inline __attribute_const__ __u32 __arch_swab32(__u32 x) | ||
30 | { | ||
31 | unsigned int temp; | ||
32 | __asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */ | ||
33 | "dep %1, 15, 8, %1\n\t" /* deposit cdab -> cbab */ | ||
34 | "shd %0, %1, 8, %0" /* shift abcdcbab -> dcba */ | ||
35 | : "=r" (x), "=&r" (temp) | ||
36 | : "0" (x)); | ||
37 | return x; | ||
38 | } | ||
39 | #define __arch_swab32 __arch_swab32 | ||
40 | |||
41 | #if BITS_PER_LONG > 32 | ||
42 | /* | ||
43 | ** From "PA-RISC 2.0 Architecture", HP Professional Books. | ||
44 | ** See Appendix I page 8 , "Endian Byte Swapping". | ||
45 | ** | ||
46 | ** Pretty cool algorithm: (* == zero'd bits) | ||
47 | ** PERMH 01234567 -> 67452301 into %0 | ||
48 | ** HSHL 67452301 -> 7*5*3*1* into %1 | ||
49 | ** HSHR 67452301 -> *6*4*2*0 into %0 | ||
50 | ** OR %0 | %1 -> 76543210 into %0 (all done!) | ||
51 | */ | ||
52 | static inline __attribute_const__ __u64 __arch_swab64(__u64 x) | ||
53 | { | ||
54 | __u64 temp; | ||
55 | __asm__("permh,3210 %0, %0\n\t" | ||
56 | "hshl %0, 8, %1\n\t" | ||
57 | "hshr,u %0, 8, %0\n\t" | ||
58 | "or %1, %0, %0" | ||
59 | : "=r" (x), "=&r" (temp) | ||
60 | : "0" (x)); | ||
61 | return x; | ||
62 | } | ||
63 | #define __arch_swab64 __arch_swab64 | ||
64 | #endif /* BITS_PER_LONG > 32 */ | ||
65 | |||
66 | #endif /* _PARISC_SWAB_H */ | ||
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 4878b9501f24..1c6dbb6f6e56 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h | |||
@@ -241,4 +241,6 @@ unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned lo | |||
241 | #define __copy_to_user_inatomic __copy_to_user | 241 | #define __copy_to_user_inatomic __copy_to_user |
242 | #define __copy_from_user_inatomic __copy_from_user | 242 | #define __copy_from_user_inatomic __copy_from_user |
243 | 243 | ||
244 | int fixup_exception(struct pt_regs *regs); | ||
245 | |||
244 | #endif /* __PARISC_UACCESS_H */ | 246 | #endif /* __PARISC_UACCESS_H */ |
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 884b7ce16a3b..994bcd980909 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c | |||
@@ -549,6 +549,38 @@ static int parisc_generic_match(struct device *dev, struct device_driver *drv) | |||
549 | return match_device(to_parisc_driver(drv), to_parisc_device(dev)); | 549 | return match_device(to_parisc_driver(drv), to_parisc_device(dev)); |
550 | } | 550 | } |
551 | 551 | ||
552 | static ssize_t make_modalias(struct device *dev, char *buf) | ||
553 | { | ||
554 | const struct parisc_device *padev = to_parisc_device(dev); | ||
555 | const struct parisc_device_id *id = &padev->id; | ||
556 | |||
557 | return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n", | ||
558 | (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev, | ||
559 | (u32)id->sversion); | ||
560 | } | ||
561 | |||
562 | static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
563 | { | ||
564 | const struct parisc_device *padev; | ||
565 | char modalias[40]; | ||
566 | |||
567 | if (!dev) | ||
568 | return -ENODEV; | ||
569 | |||
570 | padev = to_parisc_device(dev); | ||
571 | if (!padev) | ||
572 | return -ENODEV; | ||
573 | |||
574 | if (add_uevent_var(env, "PARISC_NAME=%s", padev->name)) | ||
575 | return -ENOMEM; | ||
576 | |||
577 | make_modalias(dev, modalias); | ||
578 | if (add_uevent_var(env, "MODALIAS=%s", modalias)) | ||
579 | return -ENOMEM; | ||
580 | |||
581 | return 0; | ||
582 | } | ||
583 | |||
552 | #define pa_dev_attr(name, field, format_string) \ | 584 | #define pa_dev_attr(name, field, format_string) \ |
553 | static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \ | 585 | static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \ |
554 | { \ | 586 | { \ |
@@ -566,12 +598,7 @@ pa_dev_attr_id(sversion, "0x%05x\n"); | |||
566 | 598 | ||
567 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) | 599 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) |
568 | { | 600 | { |
569 | struct parisc_device *padev = to_parisc_device(dev); | 601 | return make_modalias(dev, buf); |
570 | struct parisc_device_id *id = &padev->id; | ||
571 | |||
572 | return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n", | ||
573 | (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev, | ||
574 | (u32)id->sversion); | ||
575 | } | 602 | } |
576 | 603 | ||
577 | static struct device_attribute parisc_device_attrs[] = { | 604 | static struct device_attribute parisc_device_attrs[] = { |
@@ -587,6 +614,7 @@ static struct device_attribute parisc_device_attrs[] = { | |||
587 | struct bus_type parisc_bus_type = { | 614 | struct bus_type parisc_bus_type = { |
588 | .name = "parisc", | 615 | .name = "parisc", |
589 | .match = parisc_generic_match, | 616 | .match = parisc_generic_match, |
617 | .uevent = parisc_uevent, | ||
590 | .dev_attrs = parisc_device_attrs, | 618 | .dev_attrs = parisc_device_attrs, |
591 | .probe = parisc_driver_probe, | 619 | .probe = parisc_driver_probe, |
592 | .remove = parisc_driver_remove, | 620 | .remove = parisc_driver_remove, |
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S index 2cbf13b3ef11..5595a2f31181 100644 --- a/arch/parisc/kernel/hpmc.S +++ b/arch/parisc/kernel/hpmc.S | |||
@@ -80,6 +80,7 @@ END(hpmc_pim_data) | |||
80 | 80 | ||
81 | .import intr_save, code | 81 | .import intr_save, code |
82 | ENTRY(os_hpmc) | 82 | ENTRY(os_hpmc) |
83 | .os_hpmc: | ||
83 | 84 | ||
84 | /* | 85 | /* |
85 | * registers modified: | 86 | * registers modified: |
@@ -295,5 +296,10 @@ os_hpmc_6: | |||
295 | b . | 296 | b . |
296 | nop | 297 | nop |
297 | ENDPROC(os_hpmc) | 298 | ENDPROC(os_hpmc) |
298 | ENTRY(os_hpmc_end) /* this label used to compute os_hpmc checksum */ | 299 | .os_hpmc_end: |
299 | nop | 300 | nop |
301 | .data | ||
302 | .align 4 | ||
303 | .export os_hpmc_size | ||
304 | os_hpmc_size: | ||
305 | .word .os_hpmc_end-.os_hpmc | ||
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 4cea935e2f99..ac2c822928c7 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -298,7 +298,7 @@ unsigned long txn_affinity_addr(unsigned int irq, int cpu) | |||
298 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); | 298 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); |
299 | #endif | 299 | #endif |
300 | 300 | ||
301 | return cpu_data[cpu].txn_addr; | 301 | return per_cpu(cpu_data, cpu).txn_addr; |
302 | } | 302 | } |
303 | 303 | ||
304 | 304 | ||
@@ -309,8 +309,9 @@ unsigned long txn_alloc_addr(unsigned int virt_irq) | |||
309 | next_cpu++; /* assign to "next" CPU we want this bugger on */ | 309 | next_cpu++; /* assign to "next" CPU we want this bugger on */ |
310 | 310 | ||
311 | /* validate entry */ | 311 | /* validate entry */ |
312 | while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr || | 312 | while ((next_cpu < NR_CPUS) && |
313 | !cpu_online(next_cpu))) | 313 | (!per_cpu(cpu_data, next_cpu).txn_addr || |
314 | !cpu_online(next_cpu))) | ||
314 | next_cpu++; | 315 | next_cpu++; |
315 | 316 | ||
316 | if (next_cpu >= NR_CPUS) | 317 | if (next_cpu >= NR_CPUS) |
@@ -359,7 +360,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
359 | printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", | 360 | printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", |
360 | irq, smp_processor_id(), cpu); | 361 | irq, smp_processor_id(), cpu); |
361 | gsc_writel(irq + CPU_IRQ_BASE, | 362 | gsc_writel(irq + CPU_IRQ_BASE, |
362 | cpu_data[cpu].hpa); | 363 | per_cpu(cpu_data, cpu).hpa); |
363 | goto set_out; | 364 | goto set_out; |
364 | } | 365 | } |
365 | #endif | 366 | #endif |
@@ -421,5 +422,5 @@ void __init init_IRQ(void) | |||
421 | 422 | ||
422 | void ack_bad_irq(unsigned int irq) | 423 | void ack_bad_irq(unsigned int irq) |
423 | { | 424 | { |
424 | printk("unexpected IRQ %d\n", irq); | 425 | printk(KERN_WARNING "unexpected IRQ %d\n", irq); |
425 | } | 426 | } |
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c index ccb68090781e..1ff366cb9685 100644 --- a/arch/parisc/kernel/pdc_cons.c +++ b/arch/parisc/kernel/pdc_cons.c | |||
@@ -52,7 +52,7 @@ | |||
52 | #include <linux/tty.h> | 52 | #include <linux/tty.h> |
53 | #include <asm/pdc.h> /* for iodc_call() proto and friends */ | 53 | #include <asm/pdc.h> /* for iodc_call() proto and friends */ |
54 | 54 | ||
55 | static spinlock_t pdc_console_lock = SPIN_LOCK_UNLOCKED; | 55 | static DEFINE_SPINLOCK(pdc_console_lock); |
56 | 56 | ||
57 | static void pdc_console_write(struct console *co, const char *s, unsigned count) | 57 | static void pdc_console_write(struct console *co, const char *s, unsigned count) |
58 | { | 58 | { |
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c index f696f57faa15..75099efb3bf3 100644 --- a/arch/parisc/kernel/perf.c +++ b/arch/parisc/kernel/perf.c | |||
@@ -541,9 +541,9 @@ static int __init perf_init(void) | |||
541 | spin_lock_init(&perf_lock); | 541 | spin_lock_init(&perf_lock); |
542 | 542 | ||
543 | /* TODO: this only lets us access the first cpu.. what to do for SMP? */ | 543 | /* TODO: this only lets us access the first cpu.. what to do for SMP? */ |
544 | cpu_device = cpu_data[0].dev; | 544 | cpu_device = per_cpu(cpu_data, 0).dev; |
545 | printk("Performance monitoring counters enabled for %s\n", | 545 | printk("Performance monitoring counters enabled for %s\n", |
546 | cpu_data[0].dev->name); | 546 | per_cpu(cpu_data, 0).dev->name); |
547 | 547 | ||
548 | return 0; | 548 | return 0; |
549 | } | 549 | } |
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index 370086fb8333..ecb609342feb 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * Initial setup-routines for HP 9000 based hardware. | 3 | * Initial setup-routines for HP 9000 based hardware. |
4 | * | 4 | * |
5 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | 5 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds |
6 | * Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de> | 6 | * Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de> |
7 | * Modifications copyright 1999 SuSE GmbH (Philipp Rumpf) | 7 | * Modifications copyright 1999 SuSE GmbH (Philipp Rumpf) |
8 | * Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net> | 8 | * Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net> |
9 | * Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org> | 9 | * Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org> |
@@ -46,7 +46,7 @@ | |||
46 | struct system_cpuinfo_parisc boot_cpu_data __read_mostly; | 46 | struct system_cpuinfo_parisc boot_cpu_data __read_mostly; |
47 | EXPORT_SYMBOL(boot_cpu_data); | 47 | EXPORT_SYMBOL(boot_cpu_data); |
48 | 48 | ||
49 | struct cpuinfo_parisc cpu_data[NR_CPUS] __read_mostly; | 49 | DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data); |
50 | 50 | ||
51 | extern int update_cr16_clocksource(void); /* from time.c */ | 51 | extern int update_cr16_clocksource(void); /* from time.c */ |
52 | 52 | ||
@@ -69,6 +69,23 @@ extern int update_cr16_clocksource(void); /* from time.c */ | |||
69 | */ | 69 | */ |
70 | 70 | ||
71 | /** | 71 | /** |
72 | * init_cpu_profiler - enable/setup per cpu profiling hooks. | ||
73 | * @cpunum: The processor instance. | ||
74 | * | ||
75 | * FIXME: doesn't do much yet... | ||
76 | */ | ||
77 | static void __cpuinit | ||
78 | init_percpu_prof(unsigned long cpunum) | ||
79 | { | ||
80 | struct cpuinfo_parisc *p; | ||
81 | |||
82 | p = &per_cpu(cpu_data, cpunum); | ||
83 | p->prof_counter = 1; | ||
84 | p->prof_multiplier = 1; | ||
85 | } | ||
86 | |||
87 | |||
88 | /** | ||
72 | * processor_probe - Determine if processor driver should claim this device. | 89 | * processor_probe - Determine if processor driver should claim this device. |
73 | * @dev: The device which has been found. | 90 | * @dev: The device which has been found. |
74 | * | 91 | * |
@@ -147,7 +164,7 @@ static int __cpuinit processor_probe(struct parisc_device *dev) | |||
147 | } | 164 | } |
148 | #endif | 165 | #endif |
149 | 166 | ||
150 | p = &cpu_data[cpuid]; | 167 | p = &per_cpu(cpu_data, cpuid); |
151 | boot_cpu_data.cpu_count++; | 168 | boot_cpu_data.cpu_count++; |
152 | 169 | ||
153 | /* initialize counters - CPU 0 gets it_value set in time_init() */ | 170 | /* initialize counters - CPU 0 gets it_value set in time_init() */ |
@@ -162,12 +179,9 @@ static int __cpuinit processor_probe(struct parisc_device *dev) | |||
162 | #ifdef CONFIG_SMP | 179 | #ifdef CONFIG_SMP |
163 | /* | 180 | /* |
164 | ** FIXME: review if any other initialization is clobbered | 181 | ** FIXME: review if any other initialization is clobbered |
165 | ** for boot_cpu by the above memset(). | 182 | ** for boot_cpu by the above memset(). |
166 | */ | 183 | */ |
167 | 184 | init_percpu_prof(cpuid); | |
168 | /* stolen from init_percpu_prof() */ | ||
169 | cpu_data[cpuid].prof_counter = 1; | ||
170 | cpu_data[cpuid].prof_multiplier = 1; | ||
171 | #endif | 185 | #endif |
172 | 186 | ||
173 | /* | 187 | /* |
@@ -261,19 +275,6 @@ void __init collect_boot_cpu_data(void) | |||
261 | } | 275 | } |
262 | 276 | ||
263 | 277 | ||
264 | /** | ||
265 | * init_cpu_profiler - enable/setup per cpu profiling hooks. | ||
266 | * @cpunum: The processor instance. | ||
267 | * | ||
268 | * FIXME: doesn't do much yet... | ||
269 | */ | ||
270 | static inline void __init | ||
271 | init_percpu_prof(int cpunum) | ||
272 | { | ||
273 | cpu_data[cpunum].prof_counter = 1; | ||
274 | cpu_data[cpunum].prof_multiplier = 1; | ||
275 | } | ||
276 | |||
277 | 278 | ||
278 | /** | 279 | /** |
279 | * init_per_cpu - Handle individual processor initializations. | 280 | * init_per_cpu - Handle individual processor initializations. |
@@ -293,7 +294,7 @@ init_percpu_prof(int cpunum) | |||
293 | * | 294 | * |
294 | * o Enable CPU profiling hooks. | 295 | * o Enable CPU profiling hooks. |
295 | */ | 296 | */ |
296 | int __init init_per_cpu(int cpunum) | 297 | int __cpuinit init_per_cpu(int cpunum) |
297 | { | 298 | { |
298 | int ret; | 299 | int ret; |
299 | struct pdc_coproc_cfg coproc_cfg; | 300 | struct pdc_coproc_cfg coproc_cfg; |
@@ -307,8 +308,8 @@ int __init init_per_cpu(int cpunum) | |||
307 | /* FWIW, FP rev/model is a more accurate way to determine | 308 | /* FWIW, FP rev/model is a more accurate way to determine |
308 | ** CPU type. CPU rev/model has some ambiguous cases. | 309 | ** CPU type. CPU rev/model has some ambiguous cases. |
309 | */ | 310 | */ |
310 | cpu_data[cpunum].fp_rev = coproc_cfg.revision; | 311 | per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; |
311 | cpu_data[cpunum].fp_model = coproc_cfg.model; | 312 | per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; |
312 | 313 | ||
313 | printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", | 314 | printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", |
314 | cpunum, coproc_cfg.revision, coproc_cfg.model); | 315 | cpunum, coproc_cfg.revision, coproc_cfg.model); |
@@ -344,16 +345,17 @@ int __init init_per_cpu(int cpunum) | |||
344 | int | 345 | int |
345 | show_cpuinfo (struct seq_file *m, void *v) | 346 | show_cpuinfo (struct seq_file *m, void *v) |
346 | { | 347 | { |
347 | int n; | 348 | unsigned long cpu; |
348 | 349 | ||
349 | for(n=0; n<boot_cpu_data.cpu_count; n++) { | 350 | for_each_online_cpu(cpu) { |
351 | const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); | ||
350 | #ifdef CONFIG_SMP | 352 | #ifdef CONFIG_SMP |
351 | if (0 == cpu_data[n].hpa) | 353 | if (0 == cpuinfo->hpa) |
352 | continue; | 354 | continue; |
353 | #endif | 355 | #endif |
354 | seq_printf(m, "processor\t: %d\n" | 356 | seq_printf(m, "processor\t: %lu\n" |
355 | "cpu family\t: PA-RISC %s\n", | 357 | "cpu family\t: PA-RISC %s\n", |
356 | n, boot_cpu_data.family_name); | 358 | cpu, boot_cpu_data.family_name); |
357 | 359 | ||
358 | seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name ); | 360 | seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name ); |
359 | 361 | ||
@@ -365,8 +367,8 @@ show_cpuinfo (struct seq_file *m, void *v) | |||
365 | seq_printf(m, "model\t\t: %s\n" | 367 | seq_printf(m, "model\t\t: %s\n" |
366 | "model name\t: %s\n", | 368 | "model name\t: %s\n", |
367 | boot_cpu_data.pdc.sys_model_name, | 369 | boot_cpu_data.pdc.sys_model_name, |
368 | cpu_data[n].dev ? | 370 | cpuinfo->dev ? |
369 | cpu_data[n].dev->name : "Unknown" ); | 371 | cpuinfo->dev->name : "Unknown"); |
370 | 372 | ||
371 | seq_printf(m, "hversion\t: 0x%08x\n" | 373 | seq_printf(m, "hversion\t: 0x%08x\n" |
372 | "sversion\t: 0x%08x\n", | 374 | "sversion\t: 0x%08x\n", |
@@ -377,8 +379,8 @@ show_cpuinfo (struct seq_file *m, void *v) | |||
377 | show_cache_info(m); | 379 | show_cache_info(m); |
378 | 380 | ||
379 | seq_printf(m, "bogomips\t: %lu.%02lu\n", | 381 | seq_printf(m, "bogomips\t: %lu.%02lu\n", |
380 | cpu_data[n].loops_per_jiffy / (500000 / HZ), | 382 | cpuinfo->loops_per_jiffy / (500000 / HZ), |
381 | (cpu_data[n].loops_per_jiffy / (5000 / HZ)) % 100); | 383 | (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100); |
382 | 384 | ||
383 | seq_printf(m, "software id\t: %ld\n\n", | 385 | seq_printf(m, "software id\t: %ld\n\n", |
384 | boot_cpu_data.pdc.model.sw_id); | 386 | boot_cpu_data.pdc.model.sw_id); |
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 7d27853ff8c8..82131ca8e05c 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c | |||
@@ -58,11 +58,6 @@ int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */ | |||
58 | EXPORT_SYMBOL(parisc_bus_is_phys); | 58 | EXPORT_SYMBOL(parisc_bus_is_phys); |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | /* This sets the vmerge boundary and size, it's here because it has to | ||
62 | * be available on all platforms (zero means no-virtual merging) */ | ||
63 | unsigned long parisc_vmerge_boundary = 0; | ||
64 | unsigned long parisc_vmerge_max_size = 0; | ||
65 | |||
66 | void __init setup_cmdline(char **cmdline_p) | 61 | void __init setup_cmdline(char **cmdline_p) |
67 | { | 62 | { |
68 | extern unsigned int boot_args[]; | 63 | extern unsigned int boot_args[]; |
@@ -321,7 +316,7 @@ static int __init parisc_init(void) | |||
321 | 316 | ||
322 | processor_init(); | 317 | processor_init(); |
323 | printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n", | 318 | printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n", |
324 | boot_cpu_data.cpu_count, | 319 | num_present_cpus(), |
325 | boot_cpu_data.cpu_name, | 320 | boot_cpu_data.cpu_name, |
326 | boot_cpu_data.cpu_hz / 1000000, | 321 | boot_cpu_data.cpu_hz / 1000000, |
327 | boot_cpu_data.cpu_hz % 1000000 ); | 322 | boot_cpu_data.cpu_hz % 1000000 ); |
@@ -387,8 +382,8 @@ void start_parisc(void) | |||
387 | if (ret >= 0 && coproc_cfg.ccr_functional) { | 382 | if (ret >= 0 && coproc_cfg.ccr_functional) { |
388 | mtctl(coproc_cfg.ccr_functional, 10); | 383 | mtctl(coproc_cfg.ccr_functional, 10); |
389 | 384 | ||
390 | cpu_data[cpunum].fp_rev = coproc_cfg.revision; | 385 | per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; |
391 | cpu_data[cpunum].fp_model = coproc_cfg.model; | 386 | per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; |
392 | 387 | ||
393 | asm volatile ("fstd %fr0,8(%sp)"); | 388 | asm volatile ("fstd %fr0,8(%sp)"); |
394 | } else { | 389 | } else { |
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 80bc000523fa..9995d7ed5819 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -56,16 +56,17 @@ static int smp_debug_lvl = 0; | |||
56 | if (lvl >= smp_debug_lvl) \ | 56 | if (lvl >= smp_debug_lvl) \ |
57 | printk(printargs); | 57 | printk(printargs); |
58 | #else | 58 | #else |
59 | #define smp_debug(lvl, ...) | 59 | #define smp_debug(lvl, ...) do { } while(0) |
60 | #endif /* DEBUG_SMP */ | 60 | #endif /* DEBUG_SMP */ |
61 | 61 | ||
62 | DEFINE_SPINLOCK(smp_lock); | 62 | DEFINE_SPINLOCK(smp_lock); |
63 | 63 | ||
64 | volatile struct task_struct *smp_init_current_idle_task; | 64 | volatile struct task_struct *smp_init_current_idle_task; |
65 | 65 | ||
66 | static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is booting */ | 66 | /* track which CPU is booting */ |
67 | static volatile int cpu_now_booting __cpuinitdata; | ||
67 | 68 | ||
68 | static int parisc_max_cpus __read_mostly = 1; | 69 | static int parisc_max_cpus __cpuinitdata = 1; |
69 | 70 | ||
70 | DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; | 71 | DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; |
71 | 72 | ||
@@ -123,7 +124,7 @@ irqreturn_t | |||
123 | ipi_interrupt(int irq, void *dev_id) | 124 | ipi_interrupt(int irq, void *dev_id) |
124 | { | 125 | { |
125 | int this_cpu = smp_processor_id(); | 126 | int this_cpu = smp_processor_id(); |
126 | struct cpuinfo_parisc *p = &cpu_data[this_cpu]; | 127 | struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); |
127 | unsigned long ops; | 128 | unsigned long ops; |
128 | unsigned long flags; | 129 | unsigned long flags; |
129 | 130 | ||
@@ -202,13 +203,13 @@ ipi_interrupt(int irq, void *dev_id) | |||
202 | static inline void | 203 | static inline void |
203 | ipi_send(int cpu, enum ipi_message_type op) | 204 | ipi_send(int cpu, enum ipi_message_type op) |
204 | { | 205 | { |
205 | struct cpuinfo_parisc *p = &cpu_data[cpu]; | 206 | struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu); |
206 | spinlock_t *lock = &per_cpu(ipi_lock, cpu); | 207 | spinlock_t *lock = &per_cpu(ipi_lock, cpu); |
207 | unsigned long flags; | 208 | unsigned long flags; |
208 | 209 | ||
209 | spin_lock_irqsave(lock, flags); | 210 | spin_lock_irqsave(lock, flags); |
210 | p->pending_ipi |= 1 << op; | 211 | p->pending_ipi |= 1 << op; |
211 | gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa); | 212 | gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa); |
212 | spin_unlock_irqrestore(lock, flags); | 213 | spin_unlock_irqrestore(lock, flags); |
213 | } | 214 | } |
214 | 215 | ||
@@ -224,10 +225,7 @@ send_IPI_mask(cpumask_t mask, enum ipi_message_type op) | |||
224 | static inline void | 225 | static inline void |
225 | send_IPI_single(int dest_cpu, enum ipi_message_type op) | 226 | send_IPI_single(int dest_cpu, enum ipi_message_type op) |
226 | { | 227 | { |
227 | if (dest_cpu == NO_PROC_ID) { | 228 | BUG_ON(dest_cpu == NO_PROC_ID); |
228 | BUG(); | ||
229 | return; | ||
230 | } | ||
231 | 229 | ||
232 | ipi_send(dest_cpu, op); | 230 | ipi_send(dest_cpu, op); |
233 | } | 231 | } |
@@ -309,8 +307,7 @@ smp_cpu_init(int cpunum) | |||
309 | /* Initialise the idle task for this CPU */ | 307 | /* Initialise the idle task for this CPU */ |
310 | atomic_inc(&init_mm.mm_count); | 308 | atomic_inc(&init_mm.mm_count); |
311 | current->active_mm = &init_mm; | 309 | current->active_mm = &init_mm; |
312 | if(current->mm) | 310 | BUG_ON(current->mm); |
313 | BUG(); | ||
314 | enter_lazy_tlb(&init_mm, current); | 311 | enter_lazy_tlb(&init_mm, current); |
315 | 312 | ||
316 | init_IRQ(); /* make sure no IRQs are enabled or pending */ | 313 | init_IRQ(); /* make sure no IRQs are enabled or pending */ |
@@ -345,6 +342,7 @@ void __init smp_callin(void) | |||
345 | */ | 342 | */ |
346 | int __cpuinit smp_boot_one_cpu(int cpuid) | 343 | int __cpuinit smp_boot_one_cpu(int cpuid) |
347 | { | 344 | { |
345 | const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); | ||
348 | struct task_struct *idle; | 346 | struct task_struct *idle; |
349 | long timeout; | 347 | long timeout; |
350 | 348 | ||
@@ -376,7 +374,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid) | |||
376 | smp_init_current_idle_task = idle ; | 374 | smp_init_current_idle_task = idle ; |
377 | mb(); | 375 | mb(); |
378 | 376 | ||
379 | printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa); | 377 | printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa); |
380 | 378 | ||
381 | /* | 379 | /* |
382 | ** This gets PDC to release the CPU from a very tight loop. | 380 | ** This gets PDC to release the CPU from a very tight loop. |
@@ -387,7 +385,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid) | |||
387 | ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the | 385 | ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the |
388 | ** contents of memory are valid." | 386 | ** contents of memory are valid." |
389 | */ | 387 | */ |
390 | gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, cpu_data[cpuid].hpa); | 388 | gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa); |
391 | mb(); | 389 | mb(); |
392 | 390 | ||
393 | /* | 391 | /* |
@@ -419,12 +417,12 @@ alive: | |||
419 | return 0; | 417 | return 0; |
420 | } | 418 | } |
421 | 419 | ||
422 | void __devinit smp_prepare_boot_cpu(void) | 420 | void __init smp_prepare_boot_cpu(void) |
423 | { | 421 | { |
424 | int bootstrap_processor=cpu_data[0].cpuid; /* CPU ID of BSP */ | 422 | int bootstrap_processor = per_cpu(cpu_data, 0).cpuid; |
425 | 423 | ||
426 | /* Setup BSP mappings */ | 424 | /* Setup BSP mappings */ |
427 | printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor); | 425 | printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor); |
428 | 426 | ||
429 | cpu_set(bootstrap_processor, cpu_online_map); | 427 | cpu_set(bootstrap_processor, cpu_online_map); |
430 | cpu_set(bootstrap_processor, cpu_present_map); | 428 | cpu_set(bootstrap_processor, cpu_present_map); |
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 4d09203bc693..9d46c43a4152 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c | |||
@@ -60,7 +60,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
60 | unsigned long cycles_elapsed, ticks_elapsed; | 60 | unsigned long cycles_elapsed, ticks_elapsed; |
61 | unsigned long cycles_remainder; | 61 | unsigned long cycles_remainder; |
62 | unsigned int cpu = smp_processor_id(); | 62 | unsigned int cpu = smp_processor_id(); |
63 | struct cpuinfo_parisc *cpuinfo = &cpu_data[cpu]; | 63 | struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); |
64 | 64 | ||
65 | /* gcc can optimize for "read-only" case with a local clocktick */ | 65 | /* gcc can optimize for "read-only" case with a local clocktick */ |
66 | unsigned long cpt = clocktick; | 66 | unsigned long cpt = clocktick; |
@@ -213,7 +213,7 @@ void __init start_cpu_itimer(void) | |||
213 | 213 | ||
214 | mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ | 214 | mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ |
215 | 215 | ||
216 | cpu_data[cpu].it_value = next_tick; | 216 | per_cpu(cpu_data, cpu).it_value = next_tick; |
217 | } | 217 | } |
218 | 218 | ||
219 | struct platform_device rtc_parisc_dev = { | 219 | struct platform_device rtc_parisc_dev = { |
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c index d71cb018a21e..f5159381fdd6 100644 --- a/arch/parisc/kernel/topology.c +++ b/arch/parisc/kernel/topology.c | |||
@@ -22,14 +22,14 @@ | |||
22 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
23 | #include <linux/cache.h> | 23 | #include <linux/cache.h> |
24 | 24 | ||
25 | static struct cpu cpu_devices[NR_CPUS] __read_mostly; | 25 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
26 | 26 | ||
27 | static int __init topology_init(void) | 27 | static int __init topology_init(void) |
28 | { | 28 | { |
29 | int num; | 29 | int num; |
30 | 30 | ||
31 | for_each_present_cpu(num) { | 31 | for_each_present_cpu(num) { |
32 | register_cpu(&cpu_devices[num], num); | 32 | register_cpu(&per_cpu(cpu_devices, num), num); |
33 | } | 33 | } |
34 | return 0; | 34 | return 0; |
35 | } | 35 | } |
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 4c771cd580ec..ba658d2086f7 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c | |||
@@ -745,6 +745,10 @@ void handle_interruption(int code, struct pt_regs *regs) | |||
745 | /* Fall Through */ | 745 | /* Fall Through */ |
746 | case 27: | 746 | case 27: |
747 | /* Data memory protection ID trap */ | 747 | /* Data memory protection ID trap */ |
748 | if (code == 27 && !user_mode(regs) && | ||
749 | fixup_exception(regs)) | ||
750 | return; | ||
751 | |||
748 | die_if_kernel("Protection id trap", regs, code); | 752 | die_if_kernel("Protection id trap", regs, code); |
749 | si.si_code = SEGV_MAPERR; | 753 | si.si_code = SEGV_MAPERR; |
750 | si.si_signo = SIGSEGV; | 754 | si.si_signo = SIGSEGV; |
@@ -821,8 +825,8 @@ void handle_interruption(int code, struct pt_regs *regs) | |||
821 | 825 | ||
822 | int __init check_ivt(void *iva) | 826 | int __init check_ivt(void *iva) |
823 | { | 827 | { |
828 | extern u32 os_hpmc_size; | ||
824 | extern const u32 os_hpmc[]; | 829 | extern const u32 os_hpmc[]; |
825 | extern const u32 os_hpmc_end[]; | ||
826 | 830 | ||
827 | int i; | 831 | int i; |
828 | u32 check = 0; | 832 | u32 check = 0; |
@@ -839,8 +843,7 @@ int __init check_ivt(void *iva) | |||
839 | *ivap++ = 0; | 843 | *ivap++ = 0; |
840 | 844 | ||
841 | /* Compute Checksum for HPMC handler */ | 845 | /* Compute Checksum for HPMC handler */ |
842 | 846 | length = os_hpmc_size; | |
843 | length = os_hpmc_end - os_hpmc; | ||
844 | ivap[7] = length; | 847 | ivap[7] = length; |
845 | 848 | ||
846 | hpmcp = (u32 *)os_hpmc; | 849 | hpmcp = (u32 *)os_hpmc; |
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 6773c582e457..69dad5a850a8 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c | |||
@@ -372,7 +372,7 @@ void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct | |||
372 | struct pt_regs *r = &t->thread.regs; | 372 | struct pt_regs *r = &t->thread.regs; |
373 | struct pt_regs *r2; | 373 | struct pt_regs *r2; |
374 | 374 | ||
375 | r2 = kmalloc(sizeof(struct pt_regs), GFP_KERNEL); | 375 | r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC); |
376 | if (!r2) | 376 | if (!r2) |
377 | return; | 377 | return; |
378 | *r2 = *r; | 378 | *r2 = *r; |
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c index 9abed07db7fc..5069e8b2ca71 100644 --- a/arch/parisc/lib/iomap.c +++ b/arch/parisc/lib/iomap.c | |||
@@ -261,7 +261,7 @@ static const struct iomap_ops iomem_ops = { | |||
261 | iomem_write32r, | 261 | iomem_write32r, |
262 | }; | 262 | }; |
263 | 263 | ||
264 | const struct iomap_ops *iomap_ops[8] = { | 264 | static const struct iomap_ops *iomap_ops[8] = { |
265 | [0] = &ioport_ops, | 265 | [0] = &ioport_ops, |
266 | [7] = &iomem_ops | 266 | [7] = &iomem_ops |
267 | }; | 267 | }; |
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index 2d68431fc22e..bbda909c866e 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c | |||
@@ -275,7 +275,7 @@ handle_store_error: | |||
275 | 275 | ||
276 | 276 | ||
277 | /* Returns 0 for success, otherwise, returns number of bytes not transferred. */ | 277 | /* Returns 0 for success, otherwise, returns number of bytes not transferred. */ |
278 | unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) | 278 | static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) |
279 | { | 279 | { |
280 | register unsigned long src, dst, t1, t2, t3; | 280 | register unsigned long src, dst, t1, t2, t3; |
281 | register unsigned char *pcs, *pcd; | 281 | register unsigned char *pcs, *pcd; |
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index b2e3e9a8cece..92c7fa4ecc3f 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c | |||
@@ -139,13 +139,41 @@ parisc_acctyp(unsigned long code, unsigned int inst) | |||
139 | } | 139 | } |
140 | #endif | 140 | #endif |
141 | 141 | ||
142 | int fixup_exception(struct pt_regs *regs) | ||
143 | { | ||
144 | const struct exception_table_entry *fix; | ||
145 | |||
146 | fix = search_exception_tables(regs->iaoq[0]); | ||
147 | if (fix) { | ||
148 | struct exception_data *d; | ||
149 | d = &__get_cpu_var(exception_data); | ||
150 | d->fault_ip = regs->iaoq[0]; | ||
151 | d->fault_space = regs->isr; | ||
152 | d->fault_addr = regs->ior; | ||
153 | |||
154 | regs->iaoq[0] = ((fix->fixup) & ~3); | ||
155 | /* | ||
156 | * NOTE: In some cases the faulting instruction | ||
157 | * may be in the delay slot of a branch. We | ||
158 | * don't want to take the branch, so we don't | ||
159 | * increment iaoq[1], instead we set it to be | ||
160 | * iaoq[0]+4, and clear the B bit in the PSW | ||
161 | */ | ||
162 | regs->iaoq[1] = regs->iaoq[0] + 4; | ||
163 | regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */ | ||
164 | |||
165 | return 1; | ||
166 | } | ||
167 | |||
168 | return 0; | ||
169 | } | ||
170 | |||
142 | void do_page_fault(struct pt_regs *regs, unsigned long code, | 171 | void do_page_fault(struct pt_regs *regs, unsigned long code, |
143 | unsigned long address) | 172 | unsigned long address) |
144 | { | 173 | { |
145 | struct vm_area_struct *vma, *prev_vma; | 174 | struct vm_area_struct *vma, *prev_vma; |
146 | struct task_struct *tsk = current; | 175 | struct task_struct *tsk = current; |
147 | struct mm_struct *mm = tsk->mm; | 176 | struct mm_struct *mm = tsk->mm; |
148 | const struct exception_table_entry *fix; | ||
149 | unsigned long acc_type; | 177 | unsigned long acc_type; |
150 | int fault; | 178 | int fault; |
151 | 179 | ||
@@ -229,32 +257,8 @@ bad_area: | |||
229 | 257 | ||
230 | no_context: | 258 | no_context: |
231 | 259 | ||
232 | if (!user_mode(regs)) { | 260 | if (!user_mode(regs) && fixup_exception(regs)) { |
233 | fix = search_exception_tables(regs->iaoq[0]); | 261 | return; |
234 | |||
235 | if (fix) { | ||
236 | struct exception_data *d; | ||
237 | |||
238 | d = &__get_cpu_var(exception_data); | ||
239 | d->fault_ip = regs->iaoq[0]; | ||
240 | d->fault_space = regs->isr; | ||
241 | d->fault_addr = regs->ior; | ||
242 | |||
243 | regs->iaoq[0] = ((fix->fixup) & ~3); | ||
244 | |||
245 | /* | ||
246 | * NOTE: In some cases the faulting instruction | ||
247 | * may be in the delay slot of a branch. We | ||
248 | * don't want to take the branch, so we don't | ||
249 | * increment iaoq[1], instead we set it to be | ||
250 | * iaoq[0]+4, and clear the B bit in the PSW | ||
251 | */ | ||
252 | |||
253 | regs->iaoq[1] = regs->iaoq[0] + 4; | ||
254 | regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */ | ||
255 | |||
256 | return; | ||
257 | } | ||
258 | } | 262 | } |
259 | 263 | ||
260 | parisc_terminate("Bad Address (null pointer deref?)", regs, code, address); | 264 | parisc_terminate("Bad Address (null pointer deref?)", regs, code, address); |
diff --git a/arch/powerpc/include/asm/cell-pmu.h b/arch/powerpc/include/asm/cell-pmu.h index 8066eede3a0c..b4b7338ad79e 100644 --- a/arch/powerpc/include/asm/cell-pmu.h +++ b/arch/powerpc/include/asm/cell-pmu.h | |||
@@ -37,9 +37,11 @@ | |||
37 | #define CBE_PM_STOP_AT_MAX 0x40000000 | 37 | #define CBE_PM_STOP_AT_MAX 0x40000000 |
38 | #define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3) | 38 | #define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3) |
39 | #define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28) | 39 | #define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28) |
40 | #define CBE_PM_TRACE_BUF_OVFLW(bit) (((bit) & 0x1) << 17) | ||
40 | #define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18) | 41 | #define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18) |
41 | #define CBE_PM_FREEZE_ALL_CTRS 0x00100000 | 42 | #define CBE_PM_FREEZE_ALL_CTRS 0x00100000 |
42 | #define CBE_PM_ENABLE_EXT_TRACE 0x00008000 | 43 | #define CBE_PM_ENABLE_EXT_TRACE 0x00008000 |
44 | #define CBE_PM_SPU_ADDR_TRACE_SET(msk) (((msk) & 0x3) << 9) | ||
43 | 45 | ||
44 | /* Macros for the trace_address register. */ | 46 | /* Macros for the trace_address register. */ |
45 | #define CBE_PM_TRACE_BUF_FULL 0x00000800 | 47 | #define CBE_PM_TRACE_BUF_FULL 0x00000800 |
diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h index 95035c602ba6..639dc96077ab 100644 --- a/arch/powerpc/include/asm/oprofile_impl.h +++ b/arch/powerpc/include/asm/oprofile_impl.h | |||
@@ -32,6 +32,12 @@ struct op_system_config { | |||
32 | unsigned long mmcr0; | 32 | unsigned long mmcr0; |
33 | unsigned long mmcr1; | 33 | unsigned long mmcr1; |
34 | unsigned long mmcra; | 34 | unsigned long mmcra; |
35 | #ifdef CONFIG_OPROFILE_CELL | ||
36 | /* Register for oprofile user tool to check cell kernel profiling | ||
37 | * suport. | ||
38 | */ | ||
39 | unsigned long cell_support; | ||
40 | #endif | ||
35 | #endif | 41 | #endif |
36 | unsigned long enable_kernel; | 42 | unsigned long enable_kernel; |
37 | unsigned long enable_user; | 43 | unsigned long enable_user; |
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h index dfdbffa06818..964b93974d89 100644 --- a/arch/powerpc/oprofile/cell/pr_util.h +++ b/arch/powerpc/oprofile/cell/pr_util.h | |||
@@ -30,6 +30,10 @@ | |||
30 | extern struct delayed_work spu_work; | 30 | extern struct delayed_work spu_work; |
31 | extern int spu_prof_running; | 31 | extern int spu_prof_running; |
32 | 32 | ||
33 | #define TRACE_ARRAY_SIZE 1024 | ||
34 | |||
35 | extern spinlock_t oprof_spu_smpl_arry_lck; | ||
36 | |||
33 | struct spu_overlay_info { /* map of sections within an SPU overlay */ | 37 | struct spu_overlay_info { /* map of sections within an SPU overlay */ |
34 | unsigned int vma; /* SPU virtual memory address from elf */ | 38 | unsigned int vma; /* SPU virtual memory address from elf */ |
35 | unsigned int size; /* size of section from elf */ | 39 | unsigned int size; /* size of section from elf */ |
@@ -89,10 +93,11 @@ void vma_map_free(struct vma_to_fileoffset_map *map); | |||
89 | * Entry point for SPU profiling. | 93 | * Entry point for SPU profiling. |
90 | * cycles_reset is the SPU_CYCLES count value specified by the user. | 94 | * cycles_reset is the SPU_CYCLES count value specified by the user. |
91 | */ | 95 | */ |
92 | int start_spu_profiling(unsigned int cycles_reset); | 96 | int start_spu_profiling_cycles(unsigned int cycles_reset); |
93 | 97 | void start_spu_profiling_events(void); | |
94 | void stop_spu_profiling(void); | ||
95 | 98 | ||
99 | void stop_spu_profiling_cycles(void); | ||
100 | void stop_spu_profiling_events(void); | ||
96 | 101 | ||
97 | /* add the necessary profiling hooks */ | 102 | /* add the necessary profiling hooks */ |
98 | int spu_sync_start(void); | 103 | int spu_sync_start(void); |
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c index 83faa958b9d4..9305ddaac512 100644 --- a/arch/powerpc/oprofile/cell/spu_profiler.c +++ b/arch/powerpc/oprofile/cell/spu_profiler.c | |||
@@ -18,11 +18,21 @@ | |||
18 | #include <asm/cell-pmu.h> | 18 | #include <asm/cell-pmu.h> |
19 | #include "pr_util.h" | 19 | #include "pr_util.h" |
20 | 20 | ||
21 | #define TRACE_ARRAY_SIZE 1024 | ||
22 | #define SCALE_SHIFT 14 | 21 | #define SCALE_SHIFT 14 |
23 | 22 | ||
24 | static u32 *samples; | 23 | static u32 *samples; |
25 | 24 | ||
25 | /* spu_prof_running is a flag used to indicate if spu profiling is enabled | ||
26 | * or not. It is set by the routines start_spu_profiling_cycles() and | ||
27 | * start_spu_profiling_events(). The flag is cleared by the routines | ||
28 | * stop_spu_profiling_cycles() and stop_spu_profiling_events(). These | ||
29 | * routines are called via global_start() and global_stop() which are called in | ||
30 | * op_powerpc_start() and op_powerpc_stop(). These routines are called once | ||
31 | * per system as a result of the user starting/stopping oprofile. Hence, only | ||
32 | * one CPU per user at a time will be changing the value of spu_prof_running. | ||
33 | * In general, OProfile does not protect against multiple users trying to run | ||
34 | * OProfile at a time. | ||
35 | */ | ||
26 | int spu_prof_running; | 36 | int spu_prof_running; |
27 | static unsigned int profiling_interval; | 37 | static unsigned int profiling_interval; |
28 | 38 | ||
@@ -31,8 +41,8 @@ static unsigned int profiling_interval; | |||
31 | 41 | ||
32 | #define SPU_PC_MASK 0xFFFF | 42 | #define SPU_PC_MASK 0xFFFF |
33 | 43 | ||
34 | static DEFINE_SPINLOCK(sample_array_lock); | 44 | DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck); |
35 | unsigned long sample_array_lock_flags; | 45 | unsigned long oprof_spu_smpl_arry_lck_flags; |
36 | 46 | ||
37 | void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset) | 47 | void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset) |
38 | { | 48 | { |
@@ -145,13 +155,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) | |||
145 | * sample array must be loaded and then processed for a given | 155 | * sample array must be loaded and then processed for a given |
146 | * cpu. The sample array is not per cpu. | 156 | * cpu. The sample array is not per cpu. |
147 | */ | 157 | */ |
148 | spin_lock_irqsave(&sample_array_lock, | 158 | spin_lock_irqsave(&oprof_spu_smpl_arry_lck, |
149 | sample_array_lock_flags); | 159 | oprof_spu_smpl_arry_lck_flags); |
150 | num_samples = cell_spu_pc_collection(cpu); | 160 | num_samples = cell_spu_pc_collection(cpu); |
151 | 161 | ||
152 | if (num_samples == 0) { | 162 | if (num_samples == 0) { |
153 | spin_unlock_irqrestore(&sample_array_lock, | 163 | spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck, |
154 | sample_array_lock_flags); | 164 | oprof_spu_smpl_arry_lck_flags); |
155 | continue; | 165 | continue; |
156 | } | 166 | } |
157 | 167 | ||
@@ -162,8 +172,8 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) | |||
162 | num_samples); | 172 | num_samples); |
163 | } | 173 | } |
164 | 174 | ||
165 | spin_unlock_irqrestore(&sample_array_lock, | 175 | spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck, |
166 | sample_array_lock_flags); | 176 | oprof_spu_smpl_arry_lck_flags); |
167 | 177 | ||
168 | } | 178 | } |
169 | smp_wmb(); /* insure spu event buffer updates are written */ | 179 | smp_wmb(); /* insure spu event buffer updates are written */ |
@@ -182,13 +192,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) | |||
182 | 192 | ||
183 | static struct hrtimer timer; | 193 | static struct hrtimer timer; |
184 | /* | 194 | /* |
185 | * Entry point for SPU profiling. | 195 | * Entry point for SPU cycle profiling. |
186 | * NOTE: SPU profiling is done system-wide, not per-CPU. | 196 | * NOTE: SPU profiling is done system-wide, not per-CPU. |
187 | * | 197 | * |
188 | * cycles_reset is the count value specified by the user when | 198 | * cycles_reset is the count value specified by the user when |
189 | * setting up OProfile to count SPU_CYCLES. | 199 | * setting up OProfile to count SPU_CYCLES. |
190 | */ | 200 | */ |
191 | int start_spu_profiling(unsigned int cycles_reset) | 201 | int start_spu_profiling_cycles(unsigned int cycles_reset) |
192 | { | 202 | { |
193 | ktime_t kt; | 203 | ktime_t kt; |
194 | 204 | ||
@@ -212,10 +222,30 @@ int start_spu_profiling(unsigned int cycles_reset) | |||
212 | return 0; | 222 | return 0; |
213 | } | 223 | } |
214 | 224 | ||
215 | void stop_spu_profiling(void) | 225 | /* |
226 | * Entry point for SPU event profiling. | ||
227 | * NOTE: SPU profiling is done system-wide, not per-CPU. | ||
228 | * | ||
229 | * cycles_reset is the count value specified by the user when | ||
230 | * setting up OProfile to count SPU_CYCLES. | ||
231 | */ | ||
232 | void start_spu_profiling_events(void) | ||
233 | { | ||
234 | spu_prof_running = 1; | ||
235 | schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE); | ||
236 | |||
237 | return; | ||
238 | } | ||
239 | |||
240 | void stop_spu_profiling_cycles(void) | ||
216 | { | 241 | { |
217 | spu_prof_running = 0; | 242 | spu_prof_running = 0; |
218 | hrtimer_cancel(&timer); | 243 | hrtimer_cancel(&timer); |
219 | kfree(samples); | 244 | kfree(samples); |
220 | pr_debug("SPU_PROF: stop_spu_profiling issued\n"); | 245 | pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n"); |
246 | } | ||
247 | |||
248 | void stop_spu_profiling_events(void) | ||
249 | { | ||
250 | spu_prof_running = 0; | ||
221 | } | 251 | } |
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c index 17807acb05d9..21f16edf6c8d 100644 --- a/arch/powerpc/oprofile/common.c +++ b/arch/powerpc/oprofile/common.c | |||
@@ -132,6 +132,28 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root) | |||
132 | oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); | 132 | oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); |
133 | oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); | 133 | oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); |
134 | oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); | 134 | oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); |
135 | #ifdef CONFIG_OPROFILE_CELL | ||
136 | /* create a file the user tool can check to see what level of profiling | ||
137 | * support exits with this kernel. Initialize bit mask to indicate | ||
138 | * what support the kernel has: | ||
139 | * bit 0 - Supports SPU event profiling in addition to PPU | ||
140 | * event and cycles; and SPU cycle profiling | ||
141 | * bits 1-31 - Currently unused. | ||
142 | * | ||
143 | * If the file does not exist, then the kernel only supports SPU | ||
144 | * cycle profiling, PPU event and cycle profiling. | ||
145 | */ | ||
146 | oprofilefs_create_ulong(sb, root, "cell_support", &sys.cell_support); | ||
147 | sys.cell_support = 0x1; /* Note, the user OProfile tool must check | ||
148 | * that this bit is set before attempting to | ||
149 | * user SPU event profiling. Older kernels | ||
150 | * will not have this file, hence the user | ||
151 | * tool is not allowed to do SPU event | ||
152 | * profiling on older kernels. Older kernels | ||
153 | * will accept SPU events but collected data | ||
154 | * is garbage. | ||
155 | */ | ||
156 | #endif | ||
135 | #endif | 157 | #endif |
136 | 158 | ||
137 | for (i = 0; i < model->num_counters; ++i) { | 159 | for (i = 0; i < model->num_counters; ++i) { |
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c index 25a4ec2514a3..ae06c6236d9c 100644 --- a/arch/powerpc/oprofile/op_model_cell.c +++ b/arch/powerpc/oprofile/op_model_cell.c | |||
@@ -40,14 +40,15 @@ | |||
40 | #include "../platforms/cell/interrupt.h" | 40 | #include "../platforms/cell/interrupt.h" |
41 | #include "cell/pr_util.h" | 41 | #include "cell/pr_util.h" |
42 | 42 | ||
43 | static void cell_global_stop_spu(void); | 43 | #define PPU_PROFILING 0 |
44 | #define SPU_PROFILING_CYCLES 1 | ||
45 | #define SPU_PROFILING_EVENTS 2 | ||
44 | 46 | ||
45 | /* | 47 | #define SPU_EVENT_NUM_START 4100 |
46 | * spu_cycle_reset is the number of cycles between samples. | 48 | #define SPU_EVENT_NUM_STOP 4399 |
47 | * This variable is used for SPU profiling and should ONLY be set | 49 | #define SPU_PROFILE_EVENT_ADDR 4363 /* spu, address trace, decimal */ |
48 | * at the beginning of cell_reg_setup; otherwise, it's read-only. | 50 | #define SPU_PROFILE_EVENT_ADDR_MASK_A 0x146 /* sub unit set to zero */ |
49 | */ | 51 | #define SPU_PROFILE_EVENT_ADDR_MASK_B 0x186 /* sub unit set to zero */ |
50 | static unsigned int spu_cycle_reset; | ||
51 | 52 | ||
52 | #define NUM_SPUS_PER_NODE 8 | 53 | #define NUM_SPUS_PER_NODE 8 |
53 | #define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */ | 54 | #define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */ |
@@ -66,6 +67,21 @@ static unsigned int spu_cycle_reset; | |||
66 | 67 | ||
67 | #define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */ | 68 | #define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */ |
68 | 69 | ||
70 | /* Minumum HW interval timer setting to send value to trace buffer is 10 cycle. | ||
71 | * To configure counter to send value every N cycles set counter to | ||
72 | * 2^32 - 1 - N. | ||
73 | */ | ||
74 | #define NUM_INTERVAL_CYC 0xFFFFFFFF - 10 | ||
75 | |||
76 | /* | ||
77 | * spu_cycle_reset is the number of cycles between samples. | ||
78 | * This variable is used for SPU profiling and should ONLY be set | ||
79 | * at the beginning of cell_reg_setup; otherwise, it's read-only. | ||
80 | */ | ||
81 | static unsigned int spu_cycle_reset; | ||
82 | static unsigned int profiling_mode; | ||
83 | static int spu_evnt_phys_spu_indx; | ||
84 | |||
69 | struct pmc_cntrl_data { | 85 | struct pmc_cntrl_data { |
70 | unsigned long vcntr; | 86 | unsigned long vcntr; |
71 | unsigned long evnts; | 87 | unsigned long evnts; |
@@ -105,6 +121,8 @@ struct pm_cntrl { | |||
105 | u16 trace_mode; | 121 | u16 trace_mode; |
106 | u16 freeze; | 122 | u16 freeze; |
107 | u16 count_mode; | 123 | u16 count_mode; |
124 | u16 spu_addr_trace; | ||
125 | u8 trace_buf_ovflw; | ||
108 | }; | 126 | }; |
109 | 127 | ||
110 | static struct { | 128 | static struct { |
@@ -122,7 +140,7 @@ static struct { | |||
122 | #define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2) | 140 | #define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2) |
123 | 141 | ||
124 | static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values); | 142 | static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values); |
125 | 143 | static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE]; | |
126 | static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS]; | 144 | static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS]; |
127 | 145 | ||
128 | /* | 146 | /* |
@@ -152,6 +170,7 @@ static u32 hdw_thread; | |||
152 | 170 | ||
153 | static u32 virt_cntr_inter_mask; | 171 | static u32 virt_cntr_inter_mask; |
154 | static struct timer_list timer_virt_cntr; | 172 | static struct timer_list timer_virt_cntr; |
173 | static struct timer_list timer_spu_event_swap; | ||
155 | 174 | ||
156 | /* | 175 | /* |
157 | * pm_signal needs to be global since it is initialized in | 176 | * pm_signal needs to be global since it is initialized in |
@@ -165,7 +184,7 @@ static int spu_rtas_token; /* token for SPU cycle profiling */ | |||
165 | static u32 reset_value[NR_PHYS_CTRS]; | 184 | static u32 reset_value[NR_PHYS_CTRS]; |
166 | static int num_counters; | 185 | static int num_counters; |
167 | static int oprofile_running; | 186 | static int oprofile_running; |
168 | static DEFINE_SPINLOCK(virt_cntr_lock); | 187 | static DEFINE_SPINLOCK(cntr_lock); |
169 | 188 | ||
170 | static u32 ctr_enabled; | 189 | static u32 ctr_enabled; |
171 | 190 | ||
@@ -336,13 +355,13 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask) | |||
336 | for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) { | 355 | for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) { |
337 | if (bus_word & (1 << i)) { | 356 | if (bus_word & (1 << i)) { |
338 | pm_regs.debug_bus_control |= | 357 | pm_regs.debug_bus_control |= |
339 | (bus_type << (30 - (2 * i))); | 358 | (bus_type << (30 - (2 * i))); |
340 | 359 | ||
341 | for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) { | 360 | for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) { |
342 | if (input_bus[j] == 0xff) { | 361 | if (input_bus[j] == 0xff) { |
343 | input_bus[j] = i; | 362 | input_bus[j] = i; |
344 | pm_regs.group_control |= | 363 | pm_regs.group_control |= |
345 | (i << (30 - (2 * j))); | 364 | (i << (30 - (2 * j))); |
346 | 365 | ||
347 | break; | 366 | break; |
348 | } | 367 | } |
@@ -367,12 +386,16 @@ static void write_pm_cntrl(int cpu) | |||
367 | if (pm_regs.pm_cntrl.stop_at_max == 1) | 386 | if (pm_regs.pm_cntrl.stop_at_max == 1) |
368 | val |= CBE_PM_STOP_AT_MAX; | 387 | val |= CBE_PM_STOP_AT_MAX; |
369 | 388 | ||
370 | if (pm_regs.pm_cntrl.trace_mode == 1) | 389 | if (pm_regs.pm_cntrl.trace_mode != 0) |
371 | val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode); | 390 | val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode); |
372 | 391 | ||
392 | if (pm_regs.pm_cntrl.trace_buf_ovflw == 1) | ||
393 | val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw); | ||
373 | if (pm_regs.pm_cntrl.freeze == 1) | 394 | if (pm_regs.pm_cntrl.freeze == 1) |
374 | val |= CBE_PM_FREEZE_ALL_CTRS; | 395 | val |= CBE_PM_FREEZE_ALL_CTRS; |
375 | 396 | ||
397 | val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace); | ||
398 | |||
376 | /* | 399 | /* |
377 | * Routine set_count_mode must be called previously to set | 400 | * Routine set_count_mode must be called previously to set |
378 | * the count mode based on the user selection of user and kernel. | 401 | * the count mode based on the user selection of user and kernel. |
@@ -441,7 +464,7 @@ static void cell_virtual_cntr(unsigned long data) | |||
441 | * not both playing with the counters on the same node. | 464 | * not both playing with the counters on the same node. |
442 | */ | 465 | */ |
443 | 466 | ||
444 | spin_lock_irqsave(&virt_cntr_lock, flags); | 467 | spin_lock_irqsave(&cntr_lock, flags); |
445 | 468 | ||
446 | prev_hdw_thread = hdw_thread; | 469 | prev_hdw_thread = hdw_thread; |
447 | 470 | ||
@@ -480,7 +503,7 @@ static void cell_virtual_cntr(unsigned long data) | |||
480 | cbe_disable_pm_interrupts(cpu); | 503 | cbe_disable_pm_interrupts(cpu); |
481 | for (i = 0; i < num_counters; i++) { | 504 | for (i = 0; i < num_counters; i++) { |
482 | per_cpu(pmc_values, cpu + prev_hdw_thread)[i] | 505 | per_cpu(pmc_values, cpu + prev_hdw_thread)[i] |
483 | = cbe_read_ctr(cpu, i); | 506 | = cbe_read_ctr(cpu, i); |
484 | 507 | ||
485 | if (per_cpu(pmc_values, cpu + next_hdw_thread)[i] | 508 | if (per_cpu(pmc_values, cpu + next_hdw_thread)[i] |
486 | == 0xFFFFFFFF) | 509 | == 0xFFFFFFFF) |
@@ -527,7 +550,7 @@ static void cell_virtual_cntr(unsigned long data) | |||
527 | cbe_enable_pm(cpu); | 550 | cbe_enable_pm(cpu); |
528 | } | 551 | } |
529 | 552 | ||
530 | spin_unlock_irqrestore(&virt_cntr_lock, flags); | 553 | spin_unlock_irqrestore(&cntr_lock, flags); |
531 | 554 | ||
532 | mod_timer(&timer_virt_cntr, jiffies + HZ / 10); | 555 | mod_timer(&timer_virt_cntr, jiffies + HZ / 10); |
533 | } | 556 | } |
@@ -541,38 +564,146 @@ static void start_virt_cntrs(void) | |||
541 | add_timer(&timer_virt_cntr); | 564 | add_timer(&timer_virt_cntr); |
542 | } | 565 | } |
543 | 566 | ||
544 | /* This function is called once for all cpus combined */ | 567 | static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr, |
545 | static int cell_reg_setup(struct op_counter_config *ctr, | ||
546 | struct op_system_config *sys, int num_ctrs) | 568 | struct op_system_config *sys, int num_ctrs) |
547 | { | 569 | { |
548 | int i, j, cpu; | 570 | spu_cycle_reset = ctr[0].count; |
549 | spu_cycle_reset = 0; | ||
550 | 571 | ||
551 | if (ctr[0].event == SPU_CYCLES_EVENT_NUM) { | 572 | /* |
552 | spu_cycle_reset = ctr[0].count; | 573 | * Each node will need to make the rtas call to start |
574 | * and stop SPU profiling. Get the token once and store it. | ||
575 | */ | ||
576 | spu_rtas_token = rtas_token("ibm,cbe-spu-perftools"); | ||
577 | |||
578 | if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) { | ||
579 | printk(KERN_ERR | ||
580 | "%s: rtas token ibm,cbe-spu-perftools unknown\n", | ||
581 | __func__); | ||
582 | return -EIO; | ||
583 | } | ||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | /* Unfortunately, the hardware will only support event profiling | ||
588 | * on one SPU per node at a time. Therefore, we must time slice | ||
589 | * the profiling across all SPUs in the node. Note, we do this | ||
590 | * in parallel for each node. The following routine is called | ||
591 | * periodically based on kernel timer to switch which SPU is | ||
592 | * being monitored in a round robbin fashion. | ||
593 | */ | ||
594 | static void spu_evnt_swap(unsigned long data) | ||
595 | { | ||
596 | int node; | ||
597 | int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx; | ||
598 | unsigned long flags; | ||
599 | int cpu; | ||
600 | int ret; | ||
601 | u32 interrupt_mask; | ||
602 | |||
603 | |||
604 | /* enable interrupts on cntr 0 */ | ||
605 | interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0); | ||
606 | |||
607 | hdw_thread = 0; | ||
608 | |||
609 | /* Make sure spu event interrupt handler and spu event swap | ||
610 | * don't access the counters simultaneously. | ||
611 | */ | ||
612 | spin_lock_irqsave(&cntr_lock, flags); | ||
613 | |||
614 | cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx; | ||
615 | |||
616 | if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE) | ||
617 | spu_evnt_phys_spu_indx = 0; | ||
618 | |||
619 | pm_signal[0].sub_unit = spu_evnt_phys_spu_indx; | ||
620 | pm_signal[1].sub_unit = spu_evnt_phys_spu_indx; | ||
621 | pm_signal[2].sub_unit = spu_evnt_phys_spu_indx; | ||
622 | |||
623 | /* switch the SPU being profiled on each node */ | ||
624 | for_each_online_cpu(cpu) { | ||
625 | if (cbe_get_hw_thread_id(cpu)) | ||
626 | continue; | ||
627 | |||
628 | node = cbe_cpu_to_node(cpu); | ||
629 | cur_phys_spu = (node * NUM_SPUS_PER_NODE) | ||
630 | + cur_spu_evnt_phys_spu_indx; | ||
631 | nxt_phys_spu = (node * NUM_SPUS_PER_NODE) | ||
632 | + spu_evnt_phys_spu_indx; | ||
553 | 633 | ||
554 | /* | 634 | /* |
555 | * Each node will need to make the rtas call to start | 635 | * stop counters, save counter values, restore counts |
556 | * and stop SPU profiling. Get the token once and store it. | 636 | * for previous physical SPU |
557 | */ | 637 | */ |
558 | spu_rtas_token = rtas_token("ibm,cbe-spu-perftools"); | 638 | cbe_disable_pm(cpu); |
639 | cbe_disable_pm_interrupts(cpu); | ||
559 | 640 | ||
560 | if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) { | 641 | spu_pm_cnt[cur_phys_spu] |
561 | printk(KERN_ERR | 642 | = cbe_read_ctr(cpu, 0); |
562 | "%s: rtas token ibm,cbe-spu-perftools unknown\n", | 643 | |
563 | __func__); | 644 | /* restore previous count for the next spu to sample */ |
564 | return -EIO; | 645 | /* NOTE, hardware issue, counter will not start if the |
565 | } | 646 | * counter value is at max (0xFFFFFFFF). |
647 | */ | ||
648 | if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF) | ||
649 | cbe_write_ctr(cpu, 0, 0xFFFFFFF0); | ||
650 | else | ||
651 | cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]); | ||
652 | |||
653 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | ||
654 | |||
655 | /* setup the debug bus measure the one event and | ||
656 | * the two events to route the next SPU's PC on | ||
657 | * the debug bus | ||
658 | */ | ||
659 | ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3); | ||
660 | if (ret) | ||
661 | printk(KERN_ERR "%s: pm_rtas_activate_signals failed, " | ||
662 | "SPU event swap\n", __func__); | ||
663 | |||
664 | /* clear the trace buffer, don't want to take PC for | ||
665 | * previous SPU*/ | ||
666 | cbe_write_pm(cpu, trace_address, 0); | ||
667 | |||
668 | enable_ctr(cpu, 0, pm_regs.pm07_cntrl); | ||
669 | |||
670 | /* Enable interrupts on the CPU thread that is starting */ | ||
671 | cbe_enable_pm_interrupts(cpu, hdw_thread, | ||
672 | interrupt_mask); | ||
673 | cbe_enable_pm(cpu); | ||
566 | } | 674 | } |
567 | 675 | ||
568 | pm_rtas_token = rtas_token("ibm,cbe-perftools"); | 676 | spin_unlock_irqrestore(&cntr_lock, flags); |
569 | 677 | ||
678 | /* swap approximately every 0.1 seconds */ | ||
679 | mod_timer(&timer_spu_event_swap, jiffies + HZ / 25); | ||
680 | } | ||
681 | |||
682 | static void start_spu_event_swap(void) | ||
683 | { | ||
684 | init_timer(&timer_spu_event_swap); | ||
685 | timer_spu_event_swap.function = spu_evnt_swap; | ||
686 | timer_spu_event_swap.data = 0UL; | ||
687 | timer_spu_event_swap.expires = jiffies + HZ / 25; | ||
688 | add_timer(&timer_spu_event_swap); | ||
689 | } | ||
690 | |||
691 | static int cell_reg_setup_spu_events(struct op_counter_config *ctr, | ||
692 | struct op_system_config *sys, int num_ctrs) | ||
693 | { | ||
694 | int i; | ||
695 | |||
696 | /* routine is called once for all nodes */ | ||
697 | |||
698 | spu_evnt_phys_spu_indx = 0; | ||
570 | /* | 699 | /* |
571 | * For all events excetp PPU CYCLEs, each node will need to make | 700 | * For all events except PPU CYCLEs, each node will need to make |
572 | * the rtas cbe-perftools call to setup and reset the debug bus. | 701 | * the rtas cbe-perftools call to setup and reset the debug bus. |
573 | * Make the token lookup call once and store it in the global | 702 | * Make the token lookup call once and store it in the global |
574 | * variable pm_rtas_token. | 703 | * variable pm_rtas_token. |
575 | */ | 704 | */ |
705 | pm_rtas_token = rtas_token("ibm,cbe-perftools"); | ||
706 | |||
576 | if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) { | 707 | if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) { |
577 | printk(KERN_ERR | 708 | printk(KERN_ERR |
578 | "%s: rtas token ibm,cbe-perftools unknown\n", | 709 | "%s: rtas token ibm,cbe-perftools unknown\n", |
@@ -580,6 +711,58 @@ static int cell_reg_setup(struct op_counter_config *ctr, | |||
580 | return -EIO; | 711 | return -EIO; |
581 | } | 712 | } |
582 | 713 | ||
714 | /* setup the pm_control register settings, | ||
715 | * settings will be written per node by the | ||
716 | * cell_cpu_setup() function. | ||
717 | */ | ||
718 | pm_regs.pm_cntrl.trace_buf_ovflw = 1; | ||
719 | |||
720 | /* Use the occurrence trace mode to have SPU PC saved | ||
721 | * to the trace buffer. Occurrence data in trace buffer | ||
722 | * is not used. Bit 2 must be set to store SPU addresses. | ||
723 | */ | ||
724 | pm_regs.pm_cntrl.trace_mode = 2; | ||
725 | |||
726 | pm_regs.pm_cntrl.spu_addr_trace = 0x1; /* using debug bus | ||
727 | event 2 & 3 */ | ||
728 | |||
729 | /* setup the debug bus event array with the SPU PC routing events. | ||
730 | * Note, pm_signal[0] will be filled in by set_pm_event() call below. | ||
731 | */ | ||
732 | pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100; | ||
733 | pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A); | ||
734 | pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100; | ||
735 | pm_signal[1].sub_unit = spu_evnt_phys_spu_indx; | ||
736 | |||
737 | pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100; | ||
738 | pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B); | ||
739 | pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100; | ||
740 | pm_signal[2].sub_unit = spu_evnt_phys_spu_indx; | ||
741 | |||
742 | /* Set the user selected spu event to profile on, | ||
743 | * note, only one SPU profiling event is supported | ||
744 | */ | ||
745 | num_counters = 1; /* Only support one SPU event at a time */ | ||
746 | set_pm_event(0, ctr[0].event, ctr[0].unit_mask); | ||
747 | |||
748 | reset_value[0] = 0xFFFFFFFF - ctr[0].count; | ||
749 | |||
750 | /* global, used by cell_cpu_setup */ | ||
751 | ctr_enabled |= 1; | ||
752 | |||
753 | /* Initialize the count for each SPU to the reset value */ | ||
754 | for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++) | ||
755 | spu_pm_cnt[i] = reset_value[0]; | ||
756 | |||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | static int cell_reg_setup_ppu(struct op_counter_config *ctr, | ||
761 | struct op_system_config *sys, int num_ctrs) | ||
762 | { | ||
763 | /* routine is called once for all nodes */ | ||
764 | int i, j, cpu; | ||
765 | |||
583 | num_counters = num_ctrs; | 766 | num_counters = num_ctrs; |
584 | 767 | ||
585 | if (unlikely(num_ctrs > NR_PHYS_CTRS)) { | 768 | if (unlikely(num_ctrs > NR_PHYS_CTRS)) { |
@@ -589,14 +772,6 @@ static int cell_reg_setup(struct op_counter_config *ctr, | |||
589 | __func__); | 772 | __func__); |
590 | return -EIO; | 773 | return -EIO; |
591 | } | 774 | } |
592 | pm_regs.group_control = 0; | ||
593 | pm_regs.debug_bus_control = 0; | ||
594 | |||
595 | /* setup the pm_control register */ | ||
596 | memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl)); | ||
597 | pm_regs.pm_cntrl.stop_at_max = 1; | ||
598 | pm_regs.pm_cntrl.trace_mode = 0; | ||
599 | pm_regs.pm_cntrl.freeze = 1; | ||
600 | 775 | ||
601 | set_count_mode(sys->enable_kernel, sys->enable_user); | 776 | set_count_mode(sys->enable_kernel, sys->enable_user); |
602 | 777 | ||
@@ -665,6 +840,63 @@ static int cell_reg_setup(struct op_counter_config *ctr, | |||
665 | } | 840 | } |
666 | 841 | ||
667 | 842 | ||
843 | /* This function is called once for all cpus combined */ | ||
844 | static int cell_reg_setup(struct op_counter_config *ctr, | ||
845 | struct op_system_config *sys, int num_ctrs) | ||
846 | { | ||
847 | int ret=0; | ||
848 | spu_cycle_reset = 0; | ||
849 | |||
850 | /* initialize the spu_arr_trace value, will be reset if | ||
851 | * doing spu event profiling. | ||
852 | */ | ||
853 | pm_regs.group_control = 0; | ||
854 | pm_regs.debug_bus_control = 0; | ||
855 | pm_regs.pm_cntrl.stop_at_max = 1; | ||
856 | pm_regs.pm_cntrl.trace_mode = 0; | ||
857 | pm_regs.pm_cntrl.freeze = 1; | ||
858 | pm_regs.pm_cntrl.trace_buf_ovflw = 0; | ||
859 | pm_regs.pm_cntrl.spu_addr_trace = 0; | ||
860 | |||
861 | /* | ||
862 | * For all events except PPU CYCLEs, each node will need to make | ||
863 | * the rtas cbe-perftools call to setup and reset the debug bus. | ||
864 | * Make the token lookup call once and store it in the global | ||
865 | * variable pm_rtas_token. | ||
866 | */ | ||
867 | pm_rtas_token = rtas_token("ibm,cbe-perftools"); | ||
868 | |||
869 | if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) { | ||
870 | printk(KERN_ERR | ||
871 | "%s: rtas token ibm,cbe-perftools unknown\n", | ||
872 | __func__); | ||
873 | return -EIO; | ||
874 | } | ||
875 | |||
876 | if (ctr[0].event == SPU_CYCLES_EVENT_NUM) { | ||
877 | profiling_mode = SPU_PROFILING_CYCLES; | ||
878 | ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs); | ||
879 | } else if ((ctr[0].event >= SPU_EVENT_NUM_START) && | ||
880 | (ctr[0].event <= SPU_EVENT_NUM_STOP)) { | ||
881 | profiling_mode = SPU_PROFILING_EVENTS; | ||
882 | spu_cycle_reset = ctr[0].count; | ||
883 | |||
884 | /* for SPU event profiling, need to setup the | ||
885 | * pm_signal array with the events to route the | ||
886 | * SPU PC before making the FW call. Note, only | ||
887 | * one SPU event for profiling can be specified | ||
888 | * at a time. | ||
889 | */ | ||
890 | cell_reg_setup_spu_events(ctr, sys, num_ctrs); | ||
891 | } else { | ||
892 | profiling_mode = PPU_PROFILING; | ||
893 | ret = cell_reg_setup_ppu(ctr, sys, num_ctrs); | ||
894 | } | ||
895 | |||
896 | return ret; | ||
897 | } | ||
898 | |||
899 | |||
668 | 900 | ||
669 | /* This function is called once for each cpu */ | 901 | /* This function is called once for each cpu */ |
670 | static int cell_cpu_setup(struct op_counter_config *cntr) | 902 | static int cell_cpu_setup(struct op_counter_config *cntr) |
@@ -672,8 +904,13 @@ static int cell_cpu_setup(struct op_counter_config *cntr) | |||
672 | u32 cpu = smp_processor_id(); | 904 | u32 cpu = smp_processor_id(); |
673 | u32 num_enabled = 0; | 905 | u32 num_enabled = 0; |
674 | int i; | 906 | int i; |
907 | int ret; | ||
675 | 908 | ||
676 | if (spu_cycle_reset) | 909 | /* Cycle based SPU profiling does not use the performance |
910 | * counters. The trace array is configured to collect | ||
911 | * the data. | ||
912 | */ | ||
913 | if (profiling_mode == SPU_PROFILING_CYCLES) | ||
677 | return 0; | 914 | return 0; |
678 | 915 | ||
679 | /* There is one performance monitor per processor chip (i.e. node), | 916 | /* There is one performance monitor per processor chip (i.e. node), |
@@ -686,7 +923,6 @@ static int cell_cpu_setup(struct op_counter_config *cntr) | |||
686 | cbe_disable_pm(cpu); | 923 | cbe_disable_pm(cpu); |
687 | cbe_disable_pm_interrupts(cpu); | 924 | cbe_disable_pm_interrupts(cpu); |
688 | 925 | ||
689 | cbe_write_pm(cpu, pm_interval, 0); | ||
690 | cbe_write_pm(cpu, pm_start_stop, 0); | 926 | cbe_write_pm(cpu, pm_start_stop, 0); |
691 | cbe_write_pm(cpu, group_control, pm_regs.group_control); | 927 | cbe_write_pm(cpu, group_control, pm_regs.group_control); |
692 | cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control); | 928 | cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control); |
@@ -703,7 +939,20 @@ static int cell_cpu_setup(struct op_counter_config *cntr) | |||
703 | * The pm_rtas_activate_signals will return -EIO if the FW | 939 | * The pm_rtas_activate_signals will return -EIO if the FW |
704 | * call failed. | 940 | * call failed. |
705 | */ | 941 | */ |
706 | return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled); | 942 | if (profiling_mode == SPU_PROFILING_EVENTS) { |
943 | /* For SPU event profiling also need to setup the | ||
944 | * pm interval timer | ||
945 | */ | ||
946 | ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), | ||
947 | num_enabled+2); | ||
948 | /* store PC from debug bus to Trace buffer as often | ||
949 | * as possible (every 10 cycles) | ||
950 | */ | ||
951 | cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC); | ||
952 | return ret; | ||
953 | } else | ||
954 | return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), | ||
955 | num_enabled); | ||
707 | } | 956 | } |
708 | 957 | ||
709 | #define ENTRIES 303 | 958 | #define ENTRIES 303 |
@@ -885,7 +1134,122 @@ static struct notifier_block cpu_freq_notifier_block = { | |||
885 | }; | 1134 | }; |
886 | #endif | 1135 | #endif |
887 | 1136 | ||
888 | static int cell_global_start_spu(struct op_counter_config *ctr) | 1137 | /* |
1138 | * Note the generic OProfile stop calls do not support returning | ||
1139 | * an error on stop. Hence, will not return an error if the FW | ||
1140 | * calls fail on stop. Failure to reset the debug bus is not an issue. | ||
1141 | * Failure to disable the SPU profiling is not an issue. The FW calls | ||
1142 | * to enable the performance counters and debug bus will work even if | ||
1143 | * the hardware was not cleanly reset. | ||
1144 | */ | ||
1145 | static void cell_global_stop_spu_cycles(void) | ||
1146 | { | ||
1147 | int subfunc, rtn_value; | ||
1148 | unsigned int lfsr_value; | ||
1149 | int cpu; | ||
1150 | |||
1151 | oprofile_running = 0; | ||
1152 | smp_wmb(); | ||
1153 | |||
1154 | #ifdef CONFIG_CPU_FREQ | ||
1155 | cpufreq_unregister_notifier(&cpu_freq_notifier_block, | ||
1156 | CPUFREQ_TRANSITION_NOTIFIER); | ||
1157 | #endif | ||
1158 | |||
1159 | for_each_online_cpu(cpu) { | ||
1160 | if (cbe_get_hw_thread_id(cpu)) | ||
1161 | continue; | ||
1162 | |||
1163 | subfunc = 3; /* | ||
1164 | * 2 - activate SPU tracing, | ||
1165 | * 3 - deactivate | ||
1166 | */ | ||
1167 | lfsr_value = 0x8f100000; | ||
1168 | |||
1169 | rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL, | ||
1170 | subfunc, cbe_cpu_to_node(cpu), | ||
1171 | lfsr_value); | ||
1172 | |||
1173 | if (unlikely(rtn_value != 0)) { | ||
1174 | printk(KERN_ERR | ||
1175 | "%s: rtas call ibm,cbe-spu-perftools " \ | ||
1176 | "failed, return = %d\n", | ||
1177 | __func__, rtn_value); | ||
1178 | } | ||
1179 | |||
1180 | /* Deactivate the signals */ | ||
1181 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | ||
1182 | } | ||
1183 | |||
1184 | stop_spu_profiling_cycles(); | ||
1185 | } | ||
1186 | |||
1187 | static void cell_global_stop_spu_events(void) | ||
1188 | { | ||
1189 | int cpu; | ||
1190 | oprofile_running = 0; | ||
1191 | |||
1192 | stop_spu_profiling_events(); | ||
1193 | smp_wmb(); | ||
1194 | |||
1195 | for_each_online_cpu(cpu) { | ||
1196 | if (cbe_get_hw_thread_id(cpu)) | ||
1197 | continue; | ||
1198 | |||
1199 | cbe_sync_irq(cbe_cpu_to_node(cpu)); | ||
1200 | /* Stop the counters */ | ||
1201 | cbe_disable_pm(cpu); | ||
1202 | cbe_write_pm07_control(cpu, 0, 0); | ||
1203 | |||
1204 | /* Deactivate the signals */ | ||
1205 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | ||
1206 | |||
1207 | /* Deactivate interrupts */ | ||
1208 | cbe_disable_pm_interrupts(cpu); | ||
1209 | } | ||
1210 | del_timer_sync(&timer_spu_event_swap); | ||
1211 | } | ||
1212 | |||
1213 | static void cell_global_stop_ppu(void) | ||
1214 | { | ||
1215 | int cpu; | ||
1216 | |||
1217 | /* | ||
1218 | * This routine will be called once for the system. | ||
1219 | * There is one performance monitor per node, so we | ||
1220 | * only need to perform this function once per node. | ||
1221 | */ | ||
1222 | del_timer_sync(&timer_virt_cntr); | ||
1223 | oprofile_running = 0; | ||
1224 | smp_wmb(); | ||
1225 | |||
1226 | for_each_online_cpu(cpu) { | ||
1227 | if (cbe_get_hw_thread_id(cpu)) | ||
1228 | continue; | ||
1229 | |||
1230 | cbe_sync_irq(cbe_cpu_to_node(cpu)); | ||
1231 | /* Stop the counters */ | ||
1232 | cbe_disable_pm(cpu); | ||
1233 | |||
1234 | /* Deactivate the signals */ | ||
1235 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | ||
1236 | |||
1237 | /* Deactivate interrupts */ | ||
1238 | cbe_disable_pm_interrupts(cpu); | ||
1239 | } | ||
1240 | } | ||
1241 | |||
1242 | static void cell_global_stop(void) | ||
1243 | { | ||
1244 | if (profiling_mode == PPU_PROFILING) | ||
1245 | cell_global_stop_ppu(); | ||
1246 | else if (profiling_mode == SPU_PROFILING_EVENTS) | ||
1247 | cell_global_stop_spu_events(); | ||
1248 | else | ||
1249 | cell_global_stop_spu_cycles(); | ||
1250 | } | ||
1251 | |||
1252 | static int cell_global_start_spu_cycles(struct op_counter_config *ctr) | ||
889 | { | 1253 | { |
890 | int subfunc; | 1254 | int subfunc; |
891 | unsigned int lfsr_value; | 1255 | unsigned int lfsr_value; |
@@ -951,18 +1315,18 @@ static int cell_global_start_spu(struct op_counter_config *ctr) | |||
951 | 1315 | ||
952 | /* start profiling */ | 1316 | /* start profiling */ |
953 | ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc, | 1317 | ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc, |
954 | cbe_cpu_to_node(cpu), lfsr_value); | 1318 | cbe_cpu_to_node(cpu), lfsr_value); |
955 | 1319 | ||
956 | if (unlikely(ret != 0)) { | 1320 | if (unlikely(ret != 0)) { |
957 | printk(KERN_ERR | 1321 | printk(KERN_ERR |
958 | "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n", | 1322 | "%s: rtas call ibm,cbe-spu-perftools failed, " \ |
959 | __func__, ret); | 1323 | "return = %d\n", __func__, ret); |
960 | rtas_error = -EIO; | 1324 | rtas_error = -EIO; |
961 | goto out; | 1325 | goto out; |
962 | } | 1326 | } |
963 | } | 1327 | } |
964 | 1328 | ||
965 | rtas_error = start_spu_profiling(spu_cycle_reset); | 1329 | rtas_error = start_spu_profiling_cycles(spu_cycle_reset); |
966 | if (rtas_error) | 1330 | if (rtas_error) |
967 | goto out_stop; | 1331 | goto out_stop; |
968 | 1332 | ||
@@ -970,11 +1334,74 @@ static int cell_global_start_spu(struct op_counter_config *ctr) | |||
970 | return 0; | 1334 | return 0; |
971 | 1335 | ||
972 | out_stop: | 1336 | out_stop: |
973 | cell_global_stop_spu(); /* clean up the PMU/debug bus */ | 1337 | cell_global_stop_spu_cycles(); /* clean up the PMU/debug bus */ |
974 | out: | 1338 | out: |
975 | return rtas_error; | 1339 | return rtas_error; |
976 | } | 1340 | } |
977 | 1341 | ||
1342 | static int cell_global_start_spu_events(struct op_counter_config *ctr) | ||
1343 | { | ||
1344 | int cpu; | ||
1345 | u32 interrupt_mask = 0; | ||
1346 | int rtn = 0; | ||
1347 | |||
1348 | hdw_thread = 0; | ||
1349 | |||
1350 | /* spu event profiling, uses the performance counters to generate | ||
1351 | * an interrupt. The hardware is setup to store the SPU program | ||
1352 | * counter into the trace array. The occurrence mode is used to | ||
1353 | * enable storing data to the trace buffer. The bits are set | ||
1354 | * to send/store the SPU address in the trace buffer. The debug | ||
1355 | * bus must be setup to route the SPU program counter onto the | ||
1356 | * debug bus. The occurrence data in the trace buffer is not used. | ||
1357 | */ | ||
1358 | |||
1359 | /* This routine gets called once for the system. | ||
1360 | * There is one performance monitor per node, so we | ||
1361 | * only need to perform this function once per node. | ||
1362 | */ | ||
1363 | |||
1364 | for_each_online_cpu(cpu) { | ||
1365 | if (cbe_get_hw_thread_id(cpu)) | ||
1366 | continue; | ||
1367 | |||
1368 | /* | ||
1369 | * Setup SPU event-based profiling. | ||
1370 | * Set perf_mon_control bit 0 to a zero before | ||
1371 | * enabling spu collection hardware. | ||
1372 | * | ||
1373 | * Only support one SPU event on one SPU per node. | ||
1374 | */ | ||
1375 | if (ctr_enabled & 1) { | ||
1376 | cbe_write_ctr(cpu, 0, reset_value[0]); | ||
1377 | enable_ctr(cpu, 0, pm_regs.pm07_cntrl); | ||
1378 | interrupt_mask |= | ||
1379 | CBE_PM_CTR_OVERFLOW_INTR(0); | ||
1380 | } else { | ||
1381 | /* Disable counter */ | ||
1382 | cbe_write_pm07_control(cpu, 0, 0); | ||
1383 | } | ||
1384 | |||
1385 | cbe_get_and_clear_pm_interrupts(cpu); | ||
1386 | cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask); | ||
1387 | cbe_enable_pm(cpu); | ||
1388 | |||
1389 | /* clear the trace buffer */ | ||
1390 | cbe_write_pm(cpu, trace_address, 0); | ||
1391 | } | ||
1392 | |||
1393 | /* Start the timer to time slice collecting the event profile | ||
1394 | * on each of the SPUs. Note, can collect profile on one SPU | ||
1395 | * per node at a time. | ||
1396 | */ | ||
1397 | start_spu_event_swap(); | ||
1398 | start_spu_profiling_events(); | ||
1399 | oprofile_running = 1; | ||
1400 | smp_wmb(); | ||
1401 | |||
1402 | return rtn; | ||
1403 | } | ||
1404 | |||
978 | static int cell_global_start_ppu(struct op_counter_config *ctr) | 1405 | static int cell_global_start_ppu(struct op_counter_config *ctr) |
979 | { | 1406 | { |
980 | u32 cpu, i; | 1407 | u32 cpu, i; |
@@ -994,8 +1421,7 @@ static int cell_global_start_ppu(struct op_counter_config *ctr) | |||
994 | if (ctr_enabled & (1 << i)) { | 1421 | if (ctr_enabled & (1 << i)) { |
995 | cbe_write_ctr(cpu, i, reset_value[i]); | 1422 | cbe_write_ctr(cpu, i, reset_value[i]); |
996 | enable_ctr(cpu, i, pm_regs.pm07_cntrl); | 1423 | enable_ctr(cpu, i, pm_regs.pm07_cntrl); |
997 | interrupt_mask |= | 1424 | interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i); |
998 | CBE_PM_CTR_OVERFLOW_INTR(i); | ||
999 | } else { | 1425 | } else { |
1000 | /* Disable counter */ | 1426 | /* Disable counter */ |
1001 | cbe_write_pm07_control(cpu, i, 0); | 1427 | cbe_write_pm07_control(cpu, i, 0); |
@@ -1024,99 +1450,162 @@ static int cell_global_start_ppu(struct op_counter_config *ctr) | |||
1024 | 1450 | ||
1025 | static int cell_global_start(struct op_counter_config *ctr) | 1451 | static int cell_global_start(struct op_counter_config *ctr) |
1026 | { | 1452 | { |
1027 | if (spu_cycle_reset) | 1453 | if (profiling_mode == SPU_PROFILING_CYCLES) |
1028 | return cell_global_start_spu(ctr); | 1454 | return cell_global_start_spu_cycles(ctr); |
1455 | else if (profiling_mode == SPU_PROFILING_EVENTS) | ||
1456 | return cell_global_start_spu_events(ctr); | ||
1029 | else | 1457 | else |
1030 | return cell_global_start_ppu(ctr); | 1458 | return cell_global_start_ppu(ctr); |
1031 | } | 1459 | } |
1032 | 1460 | ||
1033 | /* | 1461 | |
1034 | * Note the generic OProfile stop calls do not support returning | 1462 | /* The SPU interrupt handler |
1035 | * an error on stop. Hence, will not return an error if the FW | 1463 | * |
1036 | * calls fail on stop. Failure to reset the debug bus is not an issue. | 1464 | * SPU event profiling works as follows: |
1037 | * Failure to disable the SPU profiling is not an issue. The FW calls | 1465 | * The pm_signal[0] holds the one SPU event to be measured. It is routed on |
1038 | * to enable the performance counters and debug bus will work even if | 1466 | * the debug bus using word 0 or 1. The value of pm_signal[1] and |
1039 | * the hardware was not cleanly reset. | 1467 | * pm_signal[2] contain the necessary events to route the SPU program |
1468 | * counter for the selected SPU onto the debug bus using words 2 and 3. | ||
1469 | * The pm_interval register is setup to write the SPU PC value into the | ||
1470 | * trace buffer at the maximum rate possible. The trace buffer is configured | ||
1471 | * to store the PCs, wrapping when it is full. The performance counter is | ||
1472 | * intialized to the max hardware count minus the number of events, N, between | ||
1473 | * samples. Once the N events have occured, a HW counter overflow occurs | ||
1474 | * causing the generation of a HW counter interrupt which also stops the | ||
1475 | * writing of the SPU PC values to the trace buffer. Hence the last PC | ||
1476 | * written to the trace buffer is the SPU PC that we want. Unfortunately, | ||
1477 | * we have to read from the beginning of the trace buffer to get to the | ||
1478 | * last value written. We just hope the PPU has nothing better to do then | ||
1479 | * service this interrupt. The PC for the specific SPU being profiled is | ||
1480 | * extracted from the trace buffer processed and stored. The trace buffer | ||
1481 | * is cleared, interrupts are cleared, the counter is reset to max - N. | ||
1482 | * A kernel timer is used to periodically call the routine spu_evnt_swap() | ||
1483 | * to switch to the next physical SPU in the node to profile in round robbin | ||
1484 | * order. This way data is collected for all SPUs on the node. It does mean | ||
1485 | * that we need to use a relatively small value of N to ensure enough samples | ||
1486 | * on each SPU are collected each SPU is being profiled 1/8 of the time. | ||
1487 | * It may also be necessary to use a longer sample collection period. | ||
1040 | */ | 1488 | */ |
1041 | static void cell_global_stop_spu(void) | 1489 | static void cell_handle_interrupt_spu(struct pt_regs *regs, |
1490 | struct op_counter_config *ctr) | ||
1042 | { | 1491 | { |
1043 | int subfunc, rtn_value; | 1492 | u32 cpu, cpu_tmp; |
1044 | unsigned int lfsr_value; | 1493 | u64 trace_entry; |
1045 | int cpu; | 1494 | u32 interrupt_mask; |
1495 | u64 trace_buffer[2]; | ||
1496 | u64 last_trace_buffer; | ||
1497 | u32 sample; | ||
1498 | u32 trace_addr; | ||
1499 | unsigned long sample_array_lock_flags; | ||
1500 | int spu_num; | ||
1501 | unsigned long flags; | ||
1046 | 1502 | ||
1047 | oprofile_running = 0; | 1503 | /* Make sure spu event interrupt handler and spu event swap |
1504 | * don't access the counters simultaneously. | ||
1505 | */ | ||
1506 | cpu = smp_processor_id(); | ||
1507 | spin_lock_irqsave(&cntr_lock, flags); | ||
1048 | 1508 | ||
1049 | #ifdef CONFIG_CPU_FREQ | 1509 | cpu_tmp = cpu; |
1050 | cpufreq_unregister_notifier(&cpu_freq_notifier_block, | 1510 | cbe_disable_pm(cpu); |
1051 | CPUFREQ_TRANSITION_NOTIFIER); | ||
1052 | #endif | ||
1053 | 1511 | ||
1054 | for_each_online_cpu(cpu) { | 1512 | interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu); |
1055 | if (cbe_get_hw_thread_id(cpu)) | ||
1056 | continue; | ||
1057 | 1513 | ||
1058 | subfunc = 3; /* | 1514 | sample = 0xABCDEF; |
1059 | * 2 - activate SPU tracing, | 1515 | trace_entry = 0xfedcba; |
1060 | * 3 - deactivate | 1516 | last_trace_buffer = 0xdeadbeaf; |
1061 | */ | ||
1062 | lfsr_value = 0x8f100000; | ||
1063 | 1517 | ||
1064 | rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL, | 1518 | if ((oprofile_running == 1) && (interrupt_mask != 0)) { |
1065 | subfunc, cbe_cpu_to_node(cpu), | 1519 | /* disable writes to trace buff */ |
1066 | lfsr_value); | 1520 | cbe_write_pm(cpu, pm_interval, 0); |
1067 | 1521 | ||
1068 | if (unlikely(rtn_value != 0)) { | 1522 | /* only have one perf cntr being used, cntr 0 */ |
1069 | printk(KERN_ERR | 1523 | if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0)) |
1070 | "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n", | 1524 | && ctr[0].enabled) |
1071 | __func__, rtn_value); | 1525 | /* The SPU PC values will be read |
1526 | * from the trace buffer, reset counter | ||
1527 | */ | ||
1528 | |||
1529 | cbe_write_ctr(cpu, 0, reset_value[0]); | ||
1530 | |||
1531 | trace_addr = cbe_read_pm(cpu, trace_address); | ||
1532 | |||
1533 | while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) { | ||
1534 | /* There is data in the trace buffer to process | ||
1535 | * Read the buffer until you get to the last | ||
1536 | * entry. This is the value we want. | ||
1537 | */ | ||
1538 | |||
1539 | cbe_read_trace_buffer(cpu, trace_buffer); | ||
1540 | trace_addr = cbe_read_pm(cpu, trace_address); | ||
1072 | } | 1541 | } |
1073 | 1542 | ||
1074 | /* Deactivate the signals */ | 1543 | /* SPU Address 16 bit count format for 128 bit |
1075 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | 1544 | * HW trace buffer is used for the SPU PC storage |
1076 | } | 1545 | * HDR bits 0:15 |
1546 | * SPU Addr 0 bits 16:31 | ||
1547 | * SPU Addr 1 bits 32:47 | ||
1548 | * unused bits 48:127 | ||
1549 | * | ||
1550 | * HDR: bit4 = 1 SPU Address 0 valid | ||
1551 | * HDR: bit5 = 1 SPU Address 1 valid | ||
1552 | * - unfortunately, the valid bits don't seem to work | ||
1553 | * | ||
1554 | * Note trace_buffer[0] holds bits 0:63 of the HW | ||
1555 | * trace buffer, trace_buffer[1] holds bits 64:127 | ||
1556 | */ | ||
1077 | 1557 | ||
1078 | stop_spu_profiling(); | 1558 | trace_entry = trace_buffer[0] |
1079 | } | 1559 | & 0x00000000FFFF0000; |
1080 | 1560 | ||
1081 | static void cell_global_stop_ppu(void) | 1561 | /* only top 16 of the 18 bit SPU PC address |
1082 | { | 1562 | * is stored in trace buffer, hence shift right |
1083 | int cpu; | 1563 | * by 16 -2 bits */ |
1564 | sample = trace_entry >> 14; | ||
1565 | last_trace_buffer = trace_buffer[0]; | ||
1084 | 1566 | ||
1085 | /* | 1567 | spu_num = spu_evnt_phys_spu_indx |
1086 | * This routine will be called once for the system. | 1568 | + (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE); |
1087 | * There is one performance monitor per node, so we | ||
1088 | * only need to perform this function once per node. | ||
1089 | */ | ||
1090 | del_timer_sync(&timer_virt_cntr); | ||
1091 | oprofile_running = 0; | ||
1092 | smp_wmb(); | ||
1093 | 1569 | ||
1094 | for_each_online_cpu(cpu) { | 1570 | /* make sure only one process at a time is calling |
1095 | if (cbe_get_hw_thread_id(cpu)) | 1571 | * spu_sync_buffer() |
1096 | continue; | 1572 | */ |
1573 | spin_lock_irqsave(&oprof_spu_smpl_arry_lck, | ||
1574 | sample_array_lock_flags); | ||
1575 | spu_sync_buffer(spu_num, &sample, 1); | ||
1576 | spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck, | ||
1577 | sample_array_lock_flags); | ||
1097 | 1578 | ||
1098 | cbe_sync_irq(cbe_cpu_to_node(cpu)); | 1579 | smp_wmb(); /* insure spu event buffer updates are written |
1099 | /* Stop the counters */ | 1580 | * don't want events intermingled... */ |
1100 | cbe_disable_pm(cpu); | ||
1101 | 1581 | ||
1102 | /* Deactivate the signals */ | 1582 | /* The counters were frozen by the interrupt. |
1103 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | 1583 | * Reenable the interrupt and restart the counters. |
1584 | */ | ||
1585 | cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC); | ||
1586 | cbe_enable_pm_interrupts(cpu, hdw_thread, | ||
1587 | virt_cntr_inter_mask); | ||
1104 | 1588 | ||
1105 | /* Deactivate interrupts */ | 1589 | /* clear the trace buffer, re-enable writes to trace buff */ |
1106 | cbe_disable_pm_interrupts(cpu); | 1590 | cbe_write_pm(cpu, trace_address, 0); |
1107 | } | 1591 | cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC); |
1108 | } | ||
1109 | 1592 | ||
1110 | static void cell_global_stop(void) | 1593 | /* The writes to the various performance counters only writes |
1111 | { | 1594 | * to a latch. The new values (interrupt setting bits, reset |
1112 | if (spu_cycle_reset) | 1595 | * counter value etc.) are not copied to the actual registers |
1113 | cell_global_stop_spu(); | 1596 | * until the performance monitor is enabled. In order to get |
1114 | else | 1597 | * this to work as desired, the permormance monitor needs to |
1115 | cell_global_stop_ppu(); | 1598 | * be disabled while writing to the latches. This is a |
1599 | * HW design issue. | ||
1600 | */ | ||
1601 | write_pm_cntrl(cpu); | ||
1602 | cbe_enable_pm(cpu); | ||
1603 | } | ||
1604 | spin_unlock_irqrestore(&cntr_lock, flags); | ||
1116 | } | 1605 | } |
1117 | 1606 | ||
1118 | static void cell_handle_interrupt(struct pt_regs *regs, | 1607 | static void cell_handle_interrupt_ppu(struct pt_regs *regs, |
1119 | struct op_counter_config *ctr) | 1608 | struct op_counter_config *ctr) |
1120 | { | 1609 | { |
1121 | u32 cpu; | 1610 | u32 cpu; |
1122 | u64 pc; | 1611 | u64 pc; |
@@ -1132,7 +1621,7 @@ static void cell_handle_interrupt(struct pt_regs *regs, | |||
1132 | * routine are not running at the same time. See the | 1621 | * routine are not running at the same time. See the |
1133 | * cell_virtual_cntr() routine for additional comments. | 1622 | * cell_virtual_cntr() routine for additional comments. |
1134 | */ | 1623 | */ |
1135 | spin_lock_irqsave(&virt_cntr_lock, flags); | 1624 | spin_lock_irqsave(&cntr_lock, flags); |
1136 | 1625 | ||
1137 | /* | 1626 | /* |
1138 | * Need to disable and reenable the performance counters | 1627 | * Need to disable and reenable the performance counters |
@@ -1185,7 +1674,16 @@ static void cell_handle_interrupt(struct pt_regs *regs, | |||
1185 | */ | 1674 | */ |
1186 | cbe_enable_pm(cpu); | 1675 | cbe_enable_pm(cpu); |
1187 | } | 1676 | } |
1188 | spin_unlock_irqrestore(&virt_cntr_lock, flags); | 1677 | spin_unlock_irqrestore(&cntr_lock, flags); |
1678 | } | ||
1679 | |||
1680 | static void cell_handle_interrupt(struct pt_regs *regs, | ||
1681 | struct op_counter_config *ctr) | ||
1682 | { | ||
1683 | if (profiling_mode == PPU_PROFILING) | ||
1684 | cell_handle_interrupt_ppu(regs, ctr); | ||
1685 | else | ||
1686 | cell_handle_interrupt_spu(regs, ctr); | ||
1189 | } | 1687 | } |
1190 | 1688 | ||
1191 | /* | 1689 | /* |
@@ -1195,7 +1693,8 @@ static void cell_handle_interrupt(struct pt_regs *regs, | |||
1195 | */ | 1693 | */ |
1196 | static int cell_sync_start(void) | 1694 | static int cell_sync_start(void) |
1197 | { | 1695 | { |
1198 | if (spu_cycle_reset) | 1696 | if ((profiling_mode == SPU_PROFILING_CYCLES) || |
1697 | (profiling_mode == SPU_PROFILING_EVENTS)) | ||
1199 | return spu_sync_start(); | 1698 | return spu_sync_start(); |
1200 | else | 1699 | else |
1201 | return DO_GENERIC_SYNC; | 1700 | return DO_GENERIC_SYNC; |
@@ -1203,7 +1702,8 @@ static int cell_sync_start(void) | |||
1203 | 1702 | ||
1204 | static int cell_sync_stop(void) | 1703 | static int cell_sync_stop(void) |
1205 | { | 1704 | { |
1206 | if (spu_cycle_reset) | 1705 | if ((profiling_mode == SPU_PROFILING_CYCLES) || |
1706 | (profiling_mode == SPU_PROFILING_EVENTS)) | ||
1207 | return spu_sync_stop(); | 1707 | return spu_sync_stop(); |
1208 | else | 1708 | else |
1209 | return 1; | 1709 | return 1; |
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 9fa9dcdf344b..e02a359d2aa5 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
@@ -300,7 +300,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
300 | return oldbit; | 300 | return oldbit; |
301 | } | 301 | } |
302 | 302 | ||
303 | static inline int constant_test_bit(int nr, const volatile unsigned long *addr) | 303 | static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) |
304 | { | 304 | { |
305 | return ((1UL << (nr % BITS_PER_LONG)) & | 305 | return ((1UL << (nr % BITS_PER_LONG)) & |
306 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; | 306 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 29dc0c89d4af..d37593c2f438 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -47,7 +47,7 @@ | |||
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | static int __initdata acpi_force = 0; | 49 | static int __initdata acpi_force = 0; |
50 | 50 | u32 acpi_rsdt_forced; | |
51 | #ifdef CONFIG_ACPI | 51 | #ifdef CONFIG_ACPI |
52 | int acpi_disabled = 0; | 52 | int acpi_disabled = 0; |
53 | #else | 53 | #else |
@@ -1374,6 +1374,17 @@ static void __init acpi_process_madt(void) | |||
1374 | "Invalid BIOS MADT, disabling ACPI\n"); | 1374 | "Invalid BIOS MADT, disabling ACPI\n"); |
1375 | disable_acpi(); | 1375 | disable_acpi(); |
1376 | } | 1376 | } |
1377 | } else { | ||
1378 | /* | ||
1379 | * ACPI found no MADT, and so ACPI wants UP PIC mode. | ||
1380 | * In the event an MPS table was found, forget it. | ||
1381 | * Boot with "acpi=off" to use MPS on such a system. | ||
1382 | */ | ||
1383 | if (smp_found_config) { | ||
1384 | printk(KERN_WARNING PREFIX | ||
1385 | "No APIC-table, disabling MPS\n"); | ||
1386 | smp_found_config = 0; | ||
1387 | } | ||
1377 | } | 1388 | } |
1378 | 1389 | ||
1379 | /* | 1390 | /* |
@@ -1809,6 +1820,10 @@ static int __init parse_acpi(char *arg) | |||
1809 | disable_acpi(); | 1820 | disable_acpi(); |
1810 | acpi_ht = 1; | 1821 | acpi_ht = 1; |
1811 | } | 1822 | } |
1823 | /* acpi=rsdt use RSDT instead of XSDT */ | ||
1824 | else if (strcmp(arg, "rsdt") == 0) { | ||
1825 | acpi_rsdt_forced = 1; | ||
1826 | } | ||
1812 | /* "acpi=noirq" disables ACPI interrupt routing */ | 1827 | /* "acpi=noirq" disables ACPI interrupt routing */ |
1813 | else if (strcmp(arg, "noirq") == 0) { | 1828 | else if (strcmp(arg, "noirq") == 0) { |
1814 | acpi_noirq_set(); | 1829 | acpi_noirq_set(); |
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index c2502eb9aa83..a4805b3b4095 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c | |||
@@ -56,6 +56,7 @@ static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */ | |||
56 | static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; | 56 | static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; |
57 | 57 | ||
58 | #define MWAIT_SUBSTATE_MASK (0xf) | 58 | #define MWAIT_SUBSTATE_MASK (0xf) |
59 | #define MWAIT_CSTATE_MASK (0xf) | ||
59 | #define MWAIT_SUBSTATE_SIZE (4) | 60 | #define MWAIT_SUBSTATE_SIZE (4) |
60 | 61 | ||
61 | #define CPUID_MWAIT_LEAF (5) | 62 | #define CPUID_MWAIT_LEAF (5) |
@@ -98,7 +99,8 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, | |||
98 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); | 99 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); |
99 | 100 | ||
100 | /* Check whether this particular cx_type (in CST) is supported or not */ | 101 | /* Check whether this particular cx_type (in CST) is supported or not */ |
101 | cstate_type = (cx->address >> MWAIT_SUBSTATE_SIZE) + 1; | 102 | cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) & |
103 | MWAIT_CSTATE_MASK) + 1; | ||
102 | edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE); | 104 | edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE); |
103 | num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; | 105 | num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; |
104 | 106 | ||
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 806b4e9051b4..707c1f6f95fa 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -159,6 +159,8 @@ static int __init acpi_sleep_setup(char *str) | |||
159 | #endif | 159 | #endif |
160 | if (strncmp(str, "old_ordering", 12) == 0) | 160 | if (strncmp(str, "old_ordering", 12) == 0) |
161 | acpi_old_suspend_ordering(); | 161 | acpi_old_suspend_ordering(); |
162 | if (strncmp(str, "s4_nonvs", 8) == 0) | ||
163 | acpi_s4_no_nvs(); | ||
162 | str = strchr(str, ','); | 164 | str = strchr(str, ','); |
163 | if (str != NULL) | 165 | if (str != NULL) |
164 | str += strspn(str, ", \t"); | 166 | str += strspn(str, ", \t"); |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 65a13943e098..e85826829cf2 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -665,6 +665,27 @@ void __init e820_mark_nosave_regions(unsigned long limit_pfn) | |||
665 | } | 665 | } |
666 | #endif | 666 | #endif |
667 | 667 | ||
668 | #ifdef CONFIG_HIBERNATION | ||
669 | /** | ||
670 | * Mark ACPI NVS memory region, so that we can save/restore it during | ||
671 | * hibernation and the subsequent resume. | ||
672 | */ | ||
673 | static int __init e820_mark_nvs_memory(void) | ||
674 | { | ||
675 | int i; | ||
676 | |||
677 | for (i = 0; i < e820.nr_map; i++) { | ||
678 | struct e820entry *ei = &e820.map[i]; | ||
679 | |||
680 | if (ei->type == E820_NVS) | ||
681 | hibernate_nvs_register(ei->addr, ei->size); | ||
682 | } | ||
683 | |||
684 | return 0; | ||
685 | } | ||
686 | core_initcall(e820_mark_nvs_memory); | ||
687 | #endif | ||
688 | |||
668 | /* | 689 | /* |
669 | * Early reserved memory areas. | 690 | * Early reserved memory areas. |
670 | */ | 691 | */ |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 744aa7fc49d5..76b8cd953dee 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -201,6 +201,12 @@ struct chipset { | |||
201 | void (*f)(int num, int slot, int func); | 201 | void (*f)(int num, int slot, int func); |
202 | }; | 202 | }; |
203 | 203 | ||
204 | /* | ||
205 | * Only works for devices on the root bus. If you add any devices | ||
206 | * not on bus 0 readd another loop level in early_quirks(). But | ||
207 | * be careful because at least the Nvidia quirk here relies on | ||
208 | * only matching on bus 0. | ||
209 | */ | ||
204 | static struct chipset early_qrk[] __initdata = { | 210 | static struct chipset early_qrk[] __initdata = { |
205 | { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, | 211 | { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, |
206 | PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs }, | 212 | PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs }, |
@@ -267,17 +273,17 @@ static int __init check_dev_quirk(int num, int slot, int func) | |||
267 | 273 | ||
268 | void __init early_quirks(void) | 274 | void __init early_quirks(void) |
269 | { | 275 | { |
270 | int num, slot, func; | 276 | int slot, func; |
271 | 277 | ||
272 | if (!early_pci_allowed()) | 278 | if (!early_pci_allowed()) |
273 | return; | 279 | return; |
274 | 280 | ||
275 | /* Poor man's PCI discovery */ | 281 | /* Poor man's PCI discovery */ |
276 | for (num = 0; num < 32; num++) | 282 | /* Only scan the root bus */ |
277 | for (slot = 0; slot < 32; slot++) | 283 | for (slot = 0; slot < 32; slot++) |
278 | for (func = 0; func < 8; func++) { | 284 | for (func = 0; func < 8; func++) { |
279 | /* Only probe function 0 on single fn devices */ | 285 | /* Only probe function 0 on single fn devices */ |
280 | if (check_dev_quirk(num, slot, func)) | 286 | if (check_dev_quirk(0, slot, func)) |
281 | break; | 287 | break; |
282 | } | 288 | } |
283 | } | 289 | } |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 98658f25f542..8fdf06e4edf9 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * @file op_model_amd.c | 2 | * @file op_model_amd.c |
3 | * athlon / K7 / K8 / Family 10h model-specific MSR operations | 3 | * athlon / K7 / K8 / Family 10h model-specific MSR operations |
4 | * | 4 | * |
5 | * @remark Copyright 2002-2008 OProfile authors | 5 | * @remark Copyright 2002-2009 OProfile authors |
6 | * @remark Read the file COPYING | 6 | * @remark Read the file COPYING |
7 | * | 7 | * |
8 | * @author John Levon | 8 | * @author John Levon |
@@ -10,7 +10,7 @@ | |||
10 | * @author Graydon Hoare | 10 | * @author Graydon Hoare |
11 | * @author Robert Richter <robert.richter@amd.com> | 11 | * @author Robert Richter <robert.richter@amd.com> |
12 | * @author Barry Kasindorf | 12 | * @author Barry Kasindorf |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/oprofile.h> | 15 | #include <linux/oprofile.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
@@ -60,53 +60,10 @@ static unsigned long reset_value[NUM_COUNTERS]; | |||
60 | #define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ | 60 | #define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ |
61 | #define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ | 61 | #define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ |
62 | 62 | ||
63 | /* Codes used in cpu_buffer.c */ | 63 | #define IBS_FETCH_SIZE 6 |
64 | /* This produces duplicate code, need to be fixed */ | 64 | #define IBS_OP_SIZE 12 |
65 | #define IBS_FETCH_BEGIN 3 | ||
66 | #define IBS_OP_BEGIN 4 | ||
67 | |||
68 | /* | ||
69 | * The function interface needs to be fixed, something like add | ||
70 | * data. Should then be added to linux/oprofile.h. | ||
71 | */ | ||
72 | extern void | ||
73 | oprofile_add_ibs_sample(struct pt_regs * const regs, | ||
74 | unsigned int * const ibs_sample, int ibs_code); | ||
75 | |||
76 | struct ibs_fetch_sample { | ||
77 | /* MSRC001_1031 IBS Fetch Linear Address Register */ | ||
78 | unsigned int ibs_fetch_lin_addr_low; | ||
79 | unsigned int ibs_fetch_lin_addr_high; | ||
80 | /* MSRC001_1030 IBS Fetch Control Register */ | ||
81 | unsigned int ibs_fetch_ctl_low; | ||
82 | unsigned int ibs_fetch_ctl_high; | ||
83 | /* MSRC001_1032 IBS Fetch Physical Address Register */ | ||
84 | unsigned int ibs_fetch_phys_addr_low; | ||
85 | unsigned int ibs_fetch_phys_addr_high; | ||
86 | }; | ||
87 | |||
88 | struct ibs_op_sample { | ||
89 | /* MSRC001_1034 IBS Op Logical Address Register (IbsRIP) */ | ||
90 | unsigned int ibs_op_rip_low; | ||
91 | unsigned int ibs_op_rip_high; | ||
92 | /* MSRC001_1035 IBS Op Data Register */ | ||
93 | unsigned int ibs_op_data1_low; | ||
94 | unsigned int ibs_op_data1_high; | ||
95 | /* MSRC001_1036 IBS Op Data 2 Register */ | ||
96 | unsigned int ibs_op_data2_low; | ||
97 | unsigned int ibs_op_data2_high; | ||
98 | /* MSRC001_1037 IBS Op Data 3 Register */ | ||
99 | unsigned int ibs_op_data3_low; | ||
100 | unsigned int ibs_op_data3_high; | ||
101 | /* MSRC001_1038 IBS DC Linear Address Register (IbsDcLinAd) */ | ||
102 | unsigned int ibs_dc_linear_low; | ||
103 | unsigned int ibs_dc_linear_high; | ||
104 | /* MSRC001_1039 IBS DC Physical Address Register (IbsDcPhysAd) */ | ||
105 | unsigned int ibs_dc_phys_low; | ||
106 | unsigned int ibs_dc_phys_high; | ||
107 | }; | ||
108 | 65 | ||
109 | static int ibs_allowed; /* AMD Family10h and later */ | 66 | static int has_ibs; /* AMD Family10h and later */ |
110 | 67 | ||
111 | struct op_ibs_config { | 68 | struct op_ibs_config { |
112 | unsigned long op_enabled; | 69 | unsigned long op_enabled; |
@@ -197,31 +154,29 @@ static inline int | |||
197 | op_amd_handle_ibs(struct pt_regs * const regs, | 154 | op_amd_handle_ibs(struct pt_regs * const regs, |
198 | struct op_msrs const * const msrs) | 155 | struct op_msrs const * const msrs) |
199 | { | 156 | { |
200 | unsigned int low, high; | 157 | u32 low, high; |
201 | struct ibs_fetch_sample ibs_fetch; | 158 | u64 msr; |
202 | struct ibs_op_sample ibs_op; | 159 | struct op_entry entry; |
203 | 160 | ||
204 | if (!ibs_allowed) | 161 | if (!has_ibs) |
205 | return 1; | 162 | return 1; |
206 | 163 | ||
207 | if (ibs_config.fetch_enabled) { | 164 | if (ibs_config.fetch_enabled) { |
208 | rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); | 165 | rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); |
209 | if (high & IBS_FETCH_HIGH_VALID_BIT) { | 166 | if (high & IBS_FETCH_HIGH_VALID_BIT) { |
210 | ibs_fetch.ibs_fetch_ctl_high = high; | 167 | rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr); |
211 | ibs_fetch.ibs_fetch_ctl_low = low; | 168 | oprofile_write_reserve(&entry, regs, msr, |
212 | rdmsr(MSR_AMD64_IBSFETCHLINAD, low, high); | 169 | IBS_FETCH_CODE, IBS_FETCH_SIZE); |
213 | ibs_fetch.ibs_fetch_lin_addr_high = high; | 170 | oprofile_add_data(&entry, (u32)msr); |
214 | ibs_fetch.ibs_fetch_lin_addr_low = low; | 171 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
215 | rdmsr(MSR_AMD64_IBSFETCHPHYSAD, low, high); | 172 | oprofile_add_data(&entry, low); |
216 | ibs_fetch.ibs_fetch_phys_addr_high = high; | 173 | oprofile_add_data(&entry, high); |
217 | ibs_fetch.ibs_fetch_phys_addr_low = low; | 174 | rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr); |
218 | 175 | oprofile_add_data(&entry, (u32)msr); | |
219 | oprofile_add_ibs_sample(regs, | 176 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
220 | (unsigned int *)&ibs_fetch, | 177 | oprofile_write_commit(&entry); |
221 | IBS_FETCH_BEGIN); | ||
222 | 178 | ||
223 | /* reenable the IRQ */ | 179 | /* reenable the IRQ */ |
224 | rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); | ||
225 | high &= ~IBS_FETCH_HIGH_VALID_BIT; | 180 | high &= ~IBS_FETCH_HIGH_VALID_BIT; |
226 | high |= IBS_FETCH_HIGH_ENABLE; | 181 | high |= IBS_FETCH_HIGH_ENABLE; |
227 | low &= IBS_FETCH_LOW_MAX_CNT_MASK; | 182 | low &= IBS_FETCH_LOW_MAX_CNT_MASK; |
@@ -232,30 +187,29 @@ op_amd_handle_ibs(struct pt_regs * const regs, | |||
232 | if (ibs_config.op_enabled) { | 187 | if (ibs_config.op_enabled) { |
233 | rdmsr(MSR_AMD64_IBSOPCTL, low, high); | 188 | rdmsr(MSR_AMD64_IBSOPCTL, low, high); |
234 | if (low & IBS_OP_LOW_VALID_BIT) { | 189 | if (low & IBS_OP_LOW_VALID_BIT) { |
235 | rdmsr(MSR_AMD64_IBSOPRIP, low, high); | 190 | rdmsrl(MSR_AMD64_IBSOPRIP, msr); |
236 | ibs_op.ibs_op_rip_low = low; | 191 | oprofile_write_reserve(&entry, regs, msr, |
237 | ibs_op.ibs_op_rip_high = high; | 192 | IBS_OP_CODE, IBS_OP_SIZE); |
238 | rdmsr(MSR_AMD64_IBSOPDATA, low, high); | 193 | oprofile_add_data(&entry, (u32)msr); |
239 | ibs_op.ibs_op_data1_low = low; | 194 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
240 | ibs_op.ibs_op_data1_high = high; | 195 | rdmsrl(MSR_AMD64_IBSOPDATA, msr); |
241 | rdmsr(MSR_AMD64_IBSOPDATA2, low, high); | 196 | oprofile_add_data(&entry, (u32)msr); |
242 | ibs_op.ibs_op_data2_low = low; | 197 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
243 | ibs_op.ibs_op_data2_high = high; | 198 | rdmsrl(MSR_AMD64_IBSOPDATA2, msr); |
244 | rdmsr(MSR_AMD64_IBSOPDATA3, low, high); | 199 | oprofile_add_data(&entry, (u32)msr); |
245 | ibs_op.ibs_op_data3_low = low; | 200 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
246 | ibs_op.ibs_op_data3_high = high; | 201 | rdmsrl(MSR_AMD64_IBSOPDATA3, msr); |
247 | rdmsr(MSR_AMD64_IBSDCLINAD, low, high); | 202 | oprofile_add_data(&entry, (u32)msr); |
248 | ibs_op.ibs_dc_linear_low = low; | 203 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
249 | ibs_op.ibs_dc_linear_high = high; | 204 | rdmsrl(MSR_AMD64_IBSDCLINAD, msr); |
250 | rdmsr(MSR_AMD64_IBSDCPHYSAD, low, high); | 205 | oprofile_add_data(&entry, (u32)msr); |
251 | ibs_op.ibs_dc_phys_low = low; | 206 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
252 | ibs_op.ibs_dc_phys_high = high; | 207 | rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr); |
208 | oprofile_add_data(&entry, (u32)msr); | ||
209 | oprofile_add_data(&entry, (u32)(msr >> 32)); | ||
210 | oprofile_write_commit(&entry); | ||
253 | 211 | ||
254 | /* reenable the IRQ */ | 212 | /* reenable the IRQ */ |
255 | oprofile_add_ibs_sample(regs, | ||
256 | (unsigned int *)&ibs_op, | ||
257 | IBS_OP_BEGIN); | ||
258 | rdmsr(MSR_AMD64_IBSOPCTL, low, high); | ||
259 | high = 0; | 213 | high = 0; |
260 | low &= ~IBS_OP_LOW_VALID_BIT; | 214 | low &= ~IBS_OP_LOW_VALID_BIT; |
261 | low |= IBS_OP_LOW_ENABLE; | 215 | low |= IBS_OP_LOW_ENABLE; |
@@ -305,14 +259,14 @@ static void op_amd_start(struct op_msrs const * const msrs) | |||
305 | } | 259 | } |
306 | 260 | ||
307 | #ifdef CONFIG_OPROFILE_IBS | 261 | #ifdef CONFIG_OPROFILE_IBS |
308 | if (ibs_allowed && ibs_config.fetch_enabled) { | 262 | if (has_ibs && ibs_config.fetch_enabled) { |
309 | low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; | 263 | low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; |
310 | high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ | 264 | high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ |
311 | + IBS_FETCH_HIGH_ENABLE; | 265 | + IBS_FETCH_HIGH_ENABLE; |
312 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); | 266 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); |
313 | } | 267 | } |
314 | 268 | ||
315 | if (ibs_allowed && ibs_config.op_enabled) { | 269 | if (has_ibs && ibs_config.op_enabled) { |
316 | low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) | 270 | low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) |
317 | + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ | 271 | + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ |
318 | + IBS_OP_LOW_ENABLE; | 272 | + IBS_OP_LOW_ENABLE; |
@@ -341,14 +295,14 @@ static void op_amd_stop(struct op_msrs const * const msrs) | |||
341 | } | 295 | } |
342 | 296 | ||
343 | #ifdef CONFIG_OPROFILE_IBS | 297 | #ifdef CONFIG_OPROFILE_IBS |
344 | if (ibs_allowed && ibs_config.fetch_enabled) { | 298 | if (has_ibs && ibs_config.fetch_enabled) { |
345 | /* clear max count and enable */ | 299 | /* clear max count and enable */ |
346 | low = 0; | 300 | low = 0; |
347 | high = 0; | 301 | high = 0; |
348 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); | 302 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); |
349 | } | 303 | } |
350 | 304 | ||
351 | if (ibs_allowed && ibs_config.op_enabled) { | 305 | if (has_ibs && ibs_config.op_enabled) { |
352 | /* clear max count and enable */ | 306 | /* clear max count and enable */ |
353 | low = 0; | 307 | low = 0; |
354 | high = 0; | 308 | high = 0; |
@@ -409,6 +363,7 @@ static int init_ibs_nmi(void) | |||
409 | | IBSCTL_LVTOFFSETVAL); | 363 | | IBSCTL_LVTOFFSETVAL); |
410 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); | 364 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); |
411 | if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { | 365 | if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { |
366 | pci_dev_put(cpu_cfg); | ||
412 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " | 367 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " |
413 | "IBSCTL = 0x%08x", value); | 368 | "IBSCTL = 0x%08x", value); |
414 | return 1; | 369 | return 1; |
@@ -436,20 +391,20 @@ static int init_ibs_nmi(void) | |||
436 | /* uninitialize the APIC for the IBS interrupts if needed */ | 391 | /* uninitialize the APIC for the IBS interrupts if needed */ |
437 | static void clear_ibs_nmi(void) | 392 | static void clear_ibs_nmi(void) |
438 | { | 393 | { |
439 | if (ibs_allowed) | 394 | if (has_ibs) |
440 | on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1); | 395 | on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1); |
441 | } | 396 | } |
442 | 397 | ||
443 | /* initialize the APIC for the IBS interrupts if available */ | 398 | /* initialize the APIC for the IBS interrupts if available */ |
444 | static void ibs_init(void) | 399 | static void ibs_init(void) |
445 | { | 400 | { |
446 | ibs_allowed = boot_cpu_has(X86_FEATURE_IBS); | 401 | has_ibs = boot_cpu_has(X86_FEATURE_IBS); |
447 | 402 | ||
448 | if (!ibs_allowed) | 403 | if (!has_ibs) |
449 | return; | 404 | return; |
450 | 405 | ||
451 | if (init_ibs_nmi()) { | 406 | if (init_ibs_nmi()) { |
452 | ibs_allowed = 0; | 407 | has_ibs = 0; |
453 | return; | 408 | return; |
454 | } | 409 | } |
455 | 410 | ||
@@ -458,7 +413,7 @@ static void ibs_init(void) | |||
458 | 413 | ||
459 | static void ibs_exit(void) | 414 | static void ibs_exit(void) |
460 | { | 415 | { |
461 | if (!ibs_allowed) | 416 | if (!has_ibs) |
462 | return; | 417 | return; |
463 | 418 | ||
464 | clear_ibs_nmi(); | 419 | clear_ibs_nmi(); |
@@ -478,7 +433,7 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root) | |||
478 | if (ret) | 433 | if (ret) |
479 | return ret; | 434 | return ret; |
480 | 435 | ||
481 | if (!ibs_allowed) | 436 | if (!has_ibs) |
482 | return ret; | 437 | return ret; |
483 | 438 | ||
484 | /* model specific files */ | 439 | /* model specific files */ |
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index dcbf1be149f3..f21147f3626a 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -28,351 +28,18 @@ | |||
28 | #include <linux/async_tx.h> | 28 | #include <linux/async_tx.h> |
29 | 29 | ||
30 | #ifdef CONFIG_DMA_ENGINE | 30 | #ifdef CONFIG_DMA_ENGINE |
31 | static enum dma_state_client | 31 | static int __init async_tx_init(void) |
32 | dma_channel_add_remove(struct dma_client *client, | ||
33 | struct dma_chan *chan, enum dma_state state); | ||
34 | |||
35 | static struct dma_client async_tx_dma = { | ||
36 | .event_callback = dma_channel_add_remove, | ||
37 | /* .cap_mask == 0 defaults to all channels */ | ||
38 | }; | ||
39 | |||
40 | /** | ||
41 | * dma_cap_mask_all - enable iteration over all operation types | ||
42 | */ | ||
43 | static dma_cap_mask_t dma_cap_mask_all; | ||
44 | |||
45 | /** | ||
46 | * chan_ref_percpu - tracks channel allocations per core/opertion | ||
47 | */ | ||
48 | struct chan_ref_percpu { | ||
49 | struct dma_chan_ref *ref; | ||
50 | }; | ||
51 | |||
52 | static int channel_table_initialized; | ||
53 | static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END]; | ||
54 | |||
55 | /** | ||
56 | * async_tx_lock - protect modification of async_tx_master_list and serialize | ||
57 | * rebalance operations | ||
58 | */ | ||
59 | static spinlock_t async_tx_lock; | ||
60 | |||
61 | static LIST_HEAD(async_tx_master_list); | ||
62 | |||
63 | /* async_tx_issue_pending_all - start all transactions on all channels */ | ||
64 | void async_tx_issue_pending_all(void) | ||
65 | { | ||
66 | struct dma_chan_ref *ref; | ||
67 | |||
68 | rcu_read_lock(); | ||
69 | list_for_each_entry_rcu(ref, &async_tx_master_list, node) | ||
70 | ref->chan->device->device_issue_pending(ref->chan); | ||
71 | rcu_read_unlock(); | ||
72 | } | ||
73 | EXPORT_SYMBOL_GPL(async_tx_issue_pending_all); | ||
74 | |||
75 | /* dma_wait_for_async_tx - spin wait for a transcation to complete | ||
76 | * @tx: transaction to wait on | ||
77 | */ | ||
78 | enum dma_status | ||
79 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | ||
80 | { | ||
81 | enum dma_status status; | ||
82 | struct dma_async_tx_descriptor *iter; | ||
83 | struct dma_async_tx_descriptor *parent; | ||
84 | |||
85 | if (!tx) | ||
86 | return DMA_SUCCESS; | ||
87 | |||
88 | /* poll through the dependency chain, return when tx is complete */ | ||
89 | do { | ||
90 | iter = tx; | ||
91 | |||
92 | /* find the root of the unsubmitted dependency chain */ | ||
93 | do { | ||
94 | parent = iter->parent; | ||
95 | if (!parent) | ||
96 | break; | ||
97 | else | ||
98 | iter = parent; | ||
99 | } while (parent); | ||
100 | |||
101 | /* there is a small window for ->parent == NULL and | ||
102 | * ->cookie == -EBUSY | ||
103 | */ | ||
104 | while (iter->cookie == -EBUSY) | ||
105 | cpu_relax(); | ||
106 | |||
107 | status = dma_sync_wait(iter->chan, iter->cookie); | ||
108 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | ||
109 | |||
110 | return status; | ||
111 | } | ||
112 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | ||
113 | |||
114 | /* async_tx_run_dependencies - helper routine for dma drivers to process | ||
115 | * (start) dependent operations on their target channel | ||
116 | * @tx: transaction with dependencies | ||
117 | */ | ||
118 | void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx) | ||
119 | { | ||
120 | struct dma_async_tx_descriptor *dep = tx->next; | ||
121 | struct dma_async_tx_descriptor *dep_next; | ||
122 | struct dma_chan *chan; | ||
123 | |||
124 | if (!dep) | ||
125 | return; | ||
126 | |||
127 | chan = dep->chan; | ||
128 | |||
129 | /* keep submitting up until a channel switch is detected | ||
130 | * in that case we will be called again as a result of | ||
131 | * processing the interrupt from async_tx_channel_switch | ||
132 | */ | ||
133 | for (; dep; dep = dep_next) { | ||
134 | spin_lock_bh(&dep->lock); | ||
135 | dep->parent = NULL; | ||
136 | dep_next = dep->next; | ||
137 | if (dep_next && dep_next->chan == chan) | ||
138 | dep->next = NULL; /* ->next will be submitted */ | ||
139 | else | ||
140 | dep_next = NULL; /* submit current dep and terminate */ | ||
141 | spin_unlock_bh(&dep->lock); | ||
142 | |||
143 | dep->tx_submit(dep); | ||
144 | } | ||
145 | |||
146 | chan->device->device_issue_pending(chan); | ||
147 | } | ||
148 | EXPORT_SYMBOL_GPL(async_tx_run_dependencies); | ||
149 | |||
150 | static void | ||
151 | free_dma_chan_ref(struct rcu_head *rcu) | ||
152 | { | ||
153 | struct dma_chan_ref *ref; | ||
154 | ref = container_of(rcu, struct dma_chan_ref, rcu); | ||
155 | kfree(ref); | ||
156 | } | ||
157 | |||
158 | static void | ||
159 | init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan) | ||
160 | { | ||
161 | INIT_LIST_HEAD(&ref->node); | ||
162 | INIT_RCU_HEAD(&ref->rcu); | ||
163 | ref->chan = chan; | ||
164 | atomic_set(&ref->count, 0); | ||
165 | } | ||
166 | |||
167 | /** | ||
168 | * get_chan_ref_by_cap - returns the nth channel of the given capability | ||
169 | * defaults to returning the channel with the desired capability and the | ||
170 | * lowest reference count if the index can not be satisfied | ||
171 | * @cap: capability to match | ||
172 | * @index: nth channel desired, passing -1 has the effect of forcing the | ||
173 | * default return value | ||
174 | */ | ||
175 | static struct dma_chan_ref * | ||
176 | get_chan_ref_by_cap(enum dma_transaction_type cap, int index) | ||
177 | { | ||
178 | struct dma_chan_ref *ret_ref = NULL, *min_ref = NULL, *ref; | ||
179 | |||
180 | rcu_read_lock(); | ||
181 | list_for_each_entry_rcu(ref, &async_tx_master_list, node) | ||
182 | if (dma_has_cap(cap, ref->chan->device->cap_mask)) { | ||
183 | if (!min_ref) | ||
184 | min_ref = ref; | ||
185 | else if (atomic_read(&ref->count) < | ||
186 | atomic_read(&min_ref->count)) | ||
187 | min_ref = ref; | ||
188 | |||
189 | if (index-- == 0) { | ||
190 | ret_ref = ref; | ||
191 | break; | ||
192 | } | ||
193 | } | ||
194 | rcu_read_unlock(); | ||
195 | |||
196 | if (!ret_ref) | ||
197 | ret_ref = min_ref; | ||
198 | |||
199 | if (ret_ref) | ||
200 | atomic_inc(&ret_ref->count); | ||
201 | |||
202 | return ret_ref; | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * async_tx_rebalance - redistribute the available channels, optimize | ||
207 | * for cpu isolation in the SMP case, and opertaion isolation in the | ||
208 | * uniprocessor case | ||
209 | */ | ||
210 | static void async_tx_rebalance(void) | ||
211 | { | ||
212 | int cpu, cap, cpu_idx = 0; | ||
213 | unsigned long flags; | ||
214 | |||
215 | if (!channel_table_initialized) | ||
216 | return; | ||
217 | |||
218 | spin_lock_irqsave(&async_tx_lock, flags); | ||
219 | |||
220 | /* undo the last distribution */ | ||
221 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
222 | for_each_possible_cpu(cpu) { | ||
223 | struct dma_chan_ref *ref = | ||
224 | per_cpu_ptr(channel_table[cap], cpu)->ref; | ||
225 | if (ref) { | ||
226 | atomic_set(&ref->count, 0); | ||
227 | per_cpu_ptr(channel_table[cap], cpu)->ref = | ||
228 | NULL; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
233 | for_each_online_cpu(cpu) { | ||
234 | struct dma_chan_ref *new; | ||
235 | if (NR_CPUS > 1) | ||
236 | new = get_chan_ref_by_cap(cap, cpu_idx++); | ||
237 | else | ||
238 | new = get_chan_ref_by_cap(cap, -1); | ||
239 | |||
240 | per_cpu_ptr(channel_table[cap], cpu)->ref = new; | ||
241 | } | ||
242 | |||
243 | spin_unlock_irqrestore(&async_tx_lock, flags); | ||
244 | } | ||
245 | |||
246 | static enum dma_state_client | ||
247 | dma_channel_add_remove(struct dma_client *client, | ||
248 | struct dma_chan *chan, enum dma_state state) | ||
249 | { | ||
250 | unsigned long found, flags; | ||
251 | struct dma_chan_ref *master_ref, *ref; | ||
252 | enum dma_state_client ack = DMA_DUP; /* default: take no action */ | ||
253 | |||
254 | switch (state) { | ||
255 | case DMA_RESOURCE_AVAILABLE: | ||
256 | found = 0; | ||
257 | rcu_read_lock(); | ||
258 | list_for_each_entry_rcu(ref, &async_tx_master_list, node) | ||
259 | if (ref->chan == chan) { | ||
260 | found = 1; | ||
261 | break; | ||
262 | } | ||
263 | rcu_read_unlock(); | ||
264 | |||
265 | pr_debug("async_tx: dma resource available [%s]\n", | ||
266 | found ? "old" : "new"); | ||
267 | |||
268 | if (!found) | ||
269 | ack = DMA_ACK; | ||
270 | else | ||
271 | break; | ||
272 | |||
273 | /* add the channel to the generic management list */ | ||
274 | master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL); | ||
275 | if (master_ref) { | ||
276 | /* keep a reference until async_tx is unloaded */ | ||
277 | dma_chan_get(chan); | ||
278 | init_dma_chan_ref(master_ref, chan); | ||
279 | spin_lock_irqsave(&async_tx_lock, flags); | ||
280 | list_add_tail_rcu(&master_ref->node, | ||
281 | &async_tx_master_list); | ||
282 | spin_unlock_irqrestore(&async_tx_lock, | ||
283 | flags); | ||
284 | } else { | ||
285 | printk(KERN_WARNING "async_tx: unable to create" | ||
286 | " new master entry in response to" | ||
287 | " a DMA_RESOURCE_ADDED event" | ||
288 | " (-ENOMEM)\n"); | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | async_tx_rebalance(); | ||
293 | break; | ||
294 | case DMA_RESOURCE_REMOVED: | ||
295 | found = 0; | ||
296 | spin_lock_irqsave(&async_tx_lock, flags); | ||
297 | list_for_each_entry(ref, &async_tx_master_list, node) | ||
298 | if (ref->chan == chan) { | ||
299 | /* permit backing devices to go away */ | ||
300 | dma_chan_put(ref->chan); | ||
301 | list_del_rcu(&ref->node); | ||
302 | call_rcu(&ref->rcu, free_dma_chan_ref); | ||
303 | found = 1; | ||
304 | break; | ||
305 | } | ||
306 | spin_unlock_irqrestore(&async_tx_lock, flags); | ||
307 | |||
308 | pr_debug("async_tx: dma resource removed [%s]\n", | ||
309 | found ? "ours" : "not ours"); | ||
310 | |||
311 | if (found) | ||
312 | ack = DMA_ACK; | ||
313 | else | ||
314 | break; | ||
315 | |||
316 | async_tx_rebalance(); | ||
317 | break; | ||
318 | case DMA_RESOURCE_SUSPEND: | ||
319 | case DMA_RESOURCE_RESUME: | ||
320 | printk(KERN_WARNING "async_tx: does not support dma channel" | ||
321 | " suspend/resume\n"); | ||
322 | break; | ||
323 | default: | ||
324 | BUG(); | ||
325 | } | ||
326 | |||
327 | return ack; | ||
328 | } | ||
329 | |||
330 | static int __init | ||
331 | async_tx_init(void) | ||
332 | { | 32 | { |
333 | enum dma_transaction_type cap; | 33 | dmaengine_get(); |
334 | |||
335 | spin_lock_init(&async_tx_lock); | ||
336 | bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); | ||
337 | |||
338 | /* an interrupt will never be an explicit operation type. | ||
339 | * clearing this bit prevents allocation to a slot in 'channel_table' | ||
340 | */ | ||
341 | clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); | ||
342 | |||
343 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { | ||
344 | channel_table[cap] = alloc_percpu(struct chan_ref_percpu); | ||
345 | if (!channel_table[cap]) | ||
346 | goto err; | ||
347 | } | ||
348 | |||
349 | channel_table_initialized = 1; | ||
350 | dma_async_client_register(&async_tx_dma); | ||
351 | dma_async_client_chan_request(&async_tx_dma); | ||
352 | 34 | ||
353 | printk(KERN_INFO "async_tx: api initialized (async)\n"); | 35 | printk(KERN_INFO "async_tx: api initialized (async)\n"); |
354 | 36 | ||
355 | return 0; | 37 | return 0; |
356 | err: | ||
357 | printk(KERN_ERR "async_tx: initialization failure\n"); | ||
358 | |||
359 | while (--cap >= 0) | ||
360 | free_percpu(channel_table[cap]); | ||
361 | |||
362 | return 1; | ||
363 | } | 38 | } |
364 | 39 | ||
365 | static void __exit async_tx_exit(void) | 40 | static void __exit async_tx_exit(void) |
366 | { | 41 | { |
367 | enum dma_transaction_type cap; | 42 | dmaengine_put(); |
368 | |||
369 | channel_table_initialized = 0; | ||
370 | |||
371 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
372 | if (channel_table[cap]) | ||
373 | free_percpu(channel_table[cap]); | ||
374 | |||
375 | dma_async_client_unregister(&async_tx_dma); | ||
376 | } | 43 | } |
377 | 44 | ||
378 | /** | 45 | /** |
@@ -387,16 +54,9 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | |||
387 | { | 54 | { |
388 | /* see if we can keep the chain on one channel */ | 55 | /* see if we can keep the chain on one channel */ |
389 | if (depend_tx && | 56 | if (depend_tx && |
390 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) | 57 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) |
391 | return depend_tx->chan; | 58 | return depend_tx->chan; |
392 | else if (likely(channel_table_initialized)) { | 59 | return dma_find_channel(tx_type); |
393 | struct dma_chan_ref *ref; | ||
394 | int cpu = get_cpu(); | ||
395 | ref = per_cpu_ptr(channel_table[tx_type], cpu)->ref; | ||
396 | put_cpu(); | ||
397 | return ref ? ref->chan : NULL; | ||
398 | } else | ||
399 | return NULL; | ||
400 | } | 60 | } |
401 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); | 61 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); |
402 | #else | 62 | #else |
diff --git a/drivers/Kconfig b/drivers/Kconfig index 2f557f570ade..00cf9553f740 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -107,4 +107,6 @@ source "drivers/uio/Kconfig" | |||
107 | source "drivers/xen/Kconfig" | 107 | source "drivers/xen/Kconfig" |
108 | 108 | ||
109 | source "drivers/staging/Kconfig" | 109 | source "drivers/staging/Kconfig" |
110 | |||
111 | source "drivers/platform/Kconfig" | ||
110 | endmenu | 112 | endmenu |
diff --git a/drivers/Makefile b/drivers/Makefile index 6326f4dbbdab..c1bf41737936 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -105,3 +105,4 @@ obj-$(CONFIG_OF) += of/ | |||
105 | obj-$(CONFIG_SSB) += ssb/ | 105 | obj-$(CONFIG_SSB) += ssb/ |
106 | obj-$(CONFIG_VIRTIO) += virtio/ | 106 | obj-$(CONFIG_VIRTIO) += virtio/ |
107 | obj-$(CONFIG_STAGING) += staging/ | 107 | obj-$(CONFIG_STAGING) += staging/ |
108 | obj-y += platform/ | ||
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index b0243fd55ac0..d7f9839ba264 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -196,90 +196,6 @@ config ACPI_NUMA | |||
196 | depends on (X86 || IA64) | 196 | depends on (X86 || IA64) |
197 | default y if IA64_GENERIC || IA64_SGI_SN2 | 197 | default y if IA64_GENERIC || IA64_SGI_SN2 |
198 | 198 | ||
199 | config ACPI_WMI | ||
200 | tristate "WMI (EXPERIMENTAL)" | ||
201 | depends on X86 | ||
202 | depends on EXPERIMENTAL | ||
203 | help | ||
204 | This driver adds support for the ACPI-WMI (Windows Management | ||
205 | Instrumentation) mapper device (PNP0C14) found on some systems. | ||
206 | |||
207 | ACPI-WMI is a proprietary extension to ACPI to expose parts of the | ||
208 | ACPI firmware to userspace - this is done through various vendor | ||
209 | defined methods and data blocks in a PNP0C14 device, which are then | ||
210 | made available for userspace to call. | ||
211 | |||
212 | The implementation of this in Linux currently only exposes this to | ||
213 | other kernel space drivers. | ||
214 | |||
215 | This driver is a required dependency to build the firmware specific | ||
216 | drivers needed on many machines, including Acer and HP laptops. | ||
217 | |||
218 | It is safe to enable this driver even if your DSDT doesn't define | ||
219 | any ACPI-WMI devices. | ||
220 | |||
221 | config ACPI_ASUS | ||
222 | tristate "ASUS/Medion Laptop Extras" | ||
223 | depends on X86 | ||
224 | select BACKLIGHT_CLASS_DEVICE | ||
225 | ---help--- | ||
226 | This driver provides support for extra features of ACPI-compatible | ||
227 | ASUS laptops. As some of Medion laptops are made by ASUS, it may also | ||
228 | support some Medion laptops (such as 9675 for example). It makes all | ||
229 | the extra buttons generate standard ACPI events that go through | ||
230 | /proc/acpi/events, and (on some models) adds support for changing the | ||
231 | display brightness and output, switching the LCD backlight on and off, | ||
232 | and most importantly, allows you to blink those fancy LEDs intended | ||
233 | for reporting mail and wireless status. | ||
234 | |||
235 | Note: display switching code is currently considered EXPERIMENTAL, | ||
236 | toying with these values may even lock your machine. | ||
237 | |||
238 | All settings are changed via /proc/acpi/asus directory entries. Owner | ||
239 | and group for these entries can be set with asus_uid and asus_gid | ||
240 | parameters. | ||
241 | |||
242 | More information and a userspace daemon for handling the extra buttons | ||
243 | at <http://sourceforge.net/projects/acpi4asus/>. | ||
244 | |||
245 | If you have an ACPI-compatible ASUS laptop, say Y or M here. This | ||
246 | driver is still under development, so if your laptop is unsupported or | ||
247 | something works not quite as expected, please use the mailing list | ||
248 | available on the above page (acpi4asus-user@lists.sourceforge.net). | ||
249 | |||
250 | NOTE: This driver is deprecated and will probably be removed soon, | ||
251 | use asus-laptop instead. | ||
252 | |||
253 | config ACPI_TOSHIBA | ||
254 | tristate "Toshiba Laptop Extras" | ||
255 | depends on X86 && INPUT | ||
256 | select INPUT_POLLDEV | ||
257 | select NET | ||
258 | select RFKILL | ||
259 | select BACKLIGHT_CLASS_DEVICE | ||
260 | ---help--- | ||
261 | This driver adds support for access to certain system settings | ||
262 | on "legacy free" Toshiba laptops. These laptops can be recognized by | ||
263 | their lack of a BIOS setup menu and APM support. | ||
264 | |||
265 | On these machines, all system configuration is handled through the | ||
266 | ACPI. This driver is required for access to controls not covered | ||
267 | by the general ACPI drivers, such as LCD brightness, video output, | ||
268 | etc. | ||
269 | |||
270 | This driver differs from the non-ACPI Toshiba laptop driver (located | ||
271 | under "Processor type and features") in several aspects. | ||
272 | Configuration is accessed by reading and writing text files in the | ||
273 | /proc tree instead of by program interface to /dev. Furthermore, no | ||
274 | power management functions are exposed, as those are handled by the | ||
275 | general ACPI drivers. | ||
276 | |||
277 | More information about this driver is available at | ||
278 | <http://memebeam.org/toys/ToshibaAcpiDriver>. | ||
279 | |||
280 | If you have a legacy free Toshiba laptop (such as the Libretto L1 | ||
281 | series), say Y. | ||
282 | |||
283 | config ACPI_CUSTOM_DSDT_FILE | 199 | config ACPI_CUSTOM_DSDT_FILE |
284 | string "Custom DSDT Table file to include" | 200 | string "Custom DSDT Table file to include" |
285 | default "" | 201 | default "" |
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 3c0c93300f12..d80f4cc2e0da 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
@@ -2,15 +2,8 @@ | |||
2 | # Makefile for the Linux ACPI interpreter | 2 | # Makefile for the Linux ACPI interpreter |
3 | # | 3 | # |
4 | 4 | ||
5 | export ACPI_CFLAGS | 5 | ccflags-y := -Os |
6 | 6 | ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT | |
7 | ACPI_CFLAGS := -Os | ||
8 | |||
9 | ifdef CONFIG_ACPI_DEBUG | ||
10 | ACPI_CFLAGS += -DACPI_DEBUG_OUTPUT | ||
11 | endif | ||
12 | |||
13 | EXTRA_CFLAGS += $(ACPI_CFLAGS) | ||
14 | 7 | ||
15 | # | 8 | # |
16 | # ACPI Boot-Time Table Parsing | 9 | # ACPI Boot-Time Table Parsing |
@@ -22,9 +15,13 @@ obj-$(CONFIG_X86) += blacklist.o | |||
22 | # ACPI Core Subsystem (Interpreter) | 15 | # ACPI Core Subsystem (Interpreter) |
23 | # | 16 | # |
24 | obj-y += osl.o utils.o reboot.o\ | 17 | obj-y += osl.o utils.o reboot.o\ |
25 | dispatcher/ events/ executer/ hardware/ \ | 18 | acpica/ |
26 | namespace/ parser/ resources/ tables/ \ | 19 | |
27 | utilities/ | 20 | # sleep related files |
21 | obj-y += wakeup.o | ||
22 | obj-y += main.o | ||
23 | obj-$(CONFIG_ACPI_SLEEP) += proc.o | ||
24 | |||
28 | 25 | ||
29 | # | 26 | # |
30 | # ACPI Bus and Device Drivers | 27 | # ACPI Bus and Device Drivers |
@@ -35,7 +32,6 @@ ifdef CONFIG_CPU_FREQ | |||
35 | processor-objs += processor_perflib.o | 32 | processor-objs += processor_perflib.o |
36 | endif | 33 | endif |
37 | 34 | ||
38 | obj-y += sleep/ | ||
39 | obj-y += bus.o glue.o | 35 | obj-y += bus.o glue.o |
40 | obj-y += scan.o | 36 | obj-y += scan.o |
41 | # Keep EC driver first. Initialization of others depend on it. | 37 | # Keep EC driver first. Initialization of others depend on it. |
@@ -59,9 +55,6 @@ obj-y += power.o | |||
59 | obj-$(CONFIG_ACPI_SYSTEM) += system.o event.o | 55 | obj-$(CONFIG_ACPI_SYSTEM) += system.o event.o |
60 | obj-$(CONFIG_ACPI_DEBUG) += debug.o | 56 | obj-$(CONFIG_ACPI_DEBUG) += debug.o |
61 | obj-$(CONFIG_ACPI_NUMA) += numa.o | 57 | obj-$(CONFIG_ACPI_NUMA) += numa.o |
62 | obj-$(CONFIG_ACPI_WMI) += wmi.o | ||
63 | obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o | ||
64 | obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o | ||
65 | obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o | 58 | obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o |
66 | obj-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o | 59 | obj-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o |
67 | obj-$(CONFIG_ACPI_SBS) += sbshc.o | 60 | obj-$(CONFIG_ACPI_SBS) += sbshc.o |
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile new file mode 100644 index 000000000000..3f23298ee3fd --- /dev/null +++ b/drivers/acpi/acpica/Makefile | |||
@@ -0,0 +1,44 @@ | |||
1 | # | ||
2 | # Makefile for ACPICA Core interpreter | ||
3 | # | ||
4 | |||
5 | ccflags-y := -Os | ||
6 | ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT | ||
7 | |||
8 | obj-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \ | ||
9 | dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \ | ||
10 | dsinit.o | ||
11 | |||
12 | obj-y += evevent.o evregion.o evsci.o evxfevnt.o \ | ||
13 | evmisc.o evrgnini.o evxface.o evxfregn.o \ | ||
14 | evgpe.o evgpeblk.o | ||
15 | |||
16 | obj-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ | ||
17 | exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ | ||
18 | excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \ | ||
19 | exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o | ||
20 | |||
21 | obj-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o | ||
22 | |||
23 | obj-$(ACPI_FUTURE_USAGE) += hwtimer.o | ||
24 | |||
25 | obj-y += nsaccess.o nsload.o nssearch.o nsxfeval.o \ | ||
26 | nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \ | ||
27 | nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \ | ||
28 | nsparse.o nspredef.o | ||
29 | |||
30 | obj-$(ACPI_FUTURE_USAGE) += nsdumpdv.o | ||
31 | |||
32 | obj-y += psargs.o psparse.o psloop.o pstree.o pswalk.o \ | ||
33 | psopcode.o psscope.o psutils.o psxface.o | ||
34 | |||
35 | obj-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \ | ||
36 | rscalc.o rsirq.o rsmemory.o rsutils.o | ||
37 | |||
38 | obj-$(ACPI_FUTURE_USAGE) += rsdump.o | ||
39 | |||
40 | obj-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o | ||
41 | |||
42 | obj-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \ | ||
43 | utcopy.o utdelete.o utglobal.o utmath.o utobject.o \ | ||
44 | utstate.o utmutex.o utobject.o utresrc.o | ||
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h new file mode 100644 index 000000000000..3b20786cbb0d --- /dev/null +++ b/drivers/acpi/acpica/accommon.h | |||
@@ -0,0 +1,63 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Name: accommon.h - Common include files for generation of ACPICA source | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #ifndef __ACCOMMON_H__ | ||
45 | #define __ACCOMMON_H__ | ||
46 | |||
47 | /* | ||
48 | * Common set of includes for all ACPICA source files. | ||
49 | * We put them here because we don't want to duplicate them | ||
50 | * in the the source code again and again. | ||
51 | * | ||
52 | * Note: The order of these include files is important. | ||
53 | */ | ||
54 | #include "acconfig.h" /* Global configuration constants */ | ||
55 | #include "acmacros.h" /* C macros */ | ||
56 | #include "aclocal.h" /* Internal data types */ | ||
57 | #include "acobject.h" /* ACPI internal object */ | ||
58 | #include "acstruct.h" /* Common structures */ | ||
59 | #include "acglobal.h" /* All global variables */ | ||
60 | #include "achware.h" /* Hardware defines and interfaces */ | ||
61 | #include "acutils.h" /* Utility interfaces */ | ||
62 | |||
63 | #endif /* __ACCOMMON_H__ */ | ||
diff --git a/include/acpi/acconfig.h b/drivers/acpi/acpica/acconfig.h index 29feee27f0ea..e6777fb883d2 100644 --- a/include/acpi/acconfig.h +++ b/drivers/acpi/acpica/acconfig.h | |||
@@ -61,10 +61,6 @@ | |||
61 | * | 61 | * |
62 | */ | 62 | */ |
63 | 63 | ||
64 | /* Current ACPICA subsystem version in YYYYMMDD format */ | ||
65 | |||
66 | #define ACPI_CA_VERSION 0x20080926 | ||
67 | |||
68 | /* | 64 | /* |
69 | * OS name, used for the _OS object. The _OS object is essentially obsolete, | 65 | * OS name, used for the _OS object. The _OS object is essentially obsolete, |
70 | * but there is a large base of ASL/AML code in existing machines that check | 66 | * but there is a large base of ASL/AML code in existing machines that check |
@@ -119,6 +115,10 @@ | |||
119 | 115 | ||
120 | #define ACPI_ROOT_TABLE_SIZE_INCREMENT 4 | 116 | #define ACPI_ROOT_TABLE_SIZE_INCREMENT 4 |
121 | 117 | ||
118 | /* Maximum number of While() loop iterations before forced abort */ | ||
119 | |||
120 | #define ACPI_MAX_LOOP_ITERATIONS 0xFFFF | ||
121 | |||
122 | /****************************************************************************** | 122 | /****************************************************************************** |
123 | * | 123 | * |
124 | * ACPI Specification constants (Do not change unless the specification changes) | 124 | * ACPI Specification constants (Do not change unless the specification changes) |
diff --git a/include/acpi/acdebug.h b/drivers/acpi/acpica/acdebug.h index 62c59df3b86c..62c59df3b86c 100644 --- a/include/acpi/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h | |||
diff --git a/include/acpi/acdispat.h b/drivers/acpi/acpica/acdispat.h index 6291904be01e..6291904be01e 100644 --- a/include/acpi/acdispat.h +++ b/drivers/acpi/acpica/acdispat.h | |||
diff --git a/include/acpi/acevents.h b/drivers/acpi/acpica/acevents.h index d5d099bf349c..07e20135f01b 100644 --- a/include/acpi/acevents.h +++ b/drivers/acpi/acpica/acevents.h | |||
@@ -93,11 +93,13 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, | |||
93 | */ | 93 | */ |
94 | u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info); | 94 | u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info); |
95 | 95 | ||
96 | acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback); | 96 | acpi_status |
97 | acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context); | ||
97 | 98 | ||
98 | acpi_status | 99 | acpi_status |
99 | acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 100 | acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
100 | struct acpi_gpe_block_info *gpe_block); | 101 | struct acpi_gpe_block_info *gpe_block, |
102 | void *context); | ||
101 | 103 | ||
102 | acpi_status | 104 | acpi_status |
103 | acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | 105 | acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, |
diff --git a/include/acpi/acglobal.h b/drivers/acpi/acpica/acglobal.h index 15dda46b70d1..ddb40f5c68fc 100644 --- a/include/acpi/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h | |||
@@ -102,6 +102,12 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_create_osi_method, TRUE); | |||
102 | */ | 102 | */ |
103 | ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE); | 103 | ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE); |
104 | 104 | ||
105 | /* | ||
106 | * Optionally use default values for the ACPI register widths. Set this to | ||
107 | * TRUE to use the defaults, if an FADT contains incorrect widths/lengths. | ||
108 | */ | ||
109 | ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE); | ||
110 | |||
105 | /***************************************************************************** | 111 | /***************************************************************************** |
106 | * | 112 | * |
107 | * Debug support | 113 | * Debug support |
@@ -140,7 +146,7 @@ ACPI_EXTERN u32 acpi_gbl_trace_flags; | |||
140 | */ | 146 | */ |
141 | ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list; | 147 | ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list; |
142 | ACPI_EXTERN struct acpi_table_fadt acpi_gbl_FADT; | 148 | ACPI_EXTERN struct acpi_table_fadt acpi_gbl_FADT; |
143 | extern u8 acpi_gbl_permanent_mmap; | 149 | ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS; |
144 | 150 | ||
145 | /* These addresses are calculated from FADT address values */ | 151 | /* These addresses are calculated from FADT address values */ |
146 | 152 | ||
@@ -326,6 +332,7 @@ ACPI_EXTERN struct acpi_fixed_event_handler | |||
326 | ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; | 332 | ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; |
327 | ACPI_EXTERN struct acpi_gpe_block_info | 333 | ACPI_EXTERN struct acpi_gpe_block_info |
328 | *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; | 334 | *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; |
335 | ACPI_EXTERN u32 acpi_current_gpe_count; | ||
329 | 336 | ||
330 | /***************************************************************************** | 337 | /***************************************************************************** |
331 | * | 338 | * |
diff --git a/include/acpi/achware.h b/drivers/acpi/acpica/achware.h index 97a72b193276..58c69dc49ab4 100644 --- a/include/acpi/achware.h +++ b/drivers/acpi/acpica/achware.h | |||
@@ -44,11 +44,7 @@ | |||
44 | #ifndef __ACHWARE_H__ | 44 | #ifndef __ACHWARE_H__ |
45 | #define __ACHWARE_H__ | 45 | #define __ACHWARE_H__ |
46 | 46 | ||
47 | /* PM Timer ticks per second (HZ) */ | 47 | /* Values for the _SST predefined method */ |
48 | |||
49 | #define PM_TIMER_FREQUENCY 3579545 | ||
50 | |||
51 | /* Values for the _SST reserved method */ | ||
52 | 48 | ||
53 | #define ACPI_SST_INDICATOR_OFF 0 | 49 | #define ACPI_SST_INDICATOR_OFF 0 |
54 | #define ACPI_SST_WORKING 1 | 50 | #define ACPI_SST_WORKING 1 |
@@ -56,8 +52,6 @@ | |||
56 | #define ACPI_SST_SLEEPING 3 | 52 | #define ACPI_SST_SLEEPING 3 |
57 | #define ACPI_SST_SLEEP_CONTEXT 4 | 53 | #define ACPI_SST_SLEEP_CONTEXT 4 |
58 | 54 | ||
59 | /* Prototypes */ | ||
60 | |||
61 | /* | 55 | /* |
62 | * hwacpi - high level functions | 56 | * hwacpi - high level functions |
63 | */ | 57 | */ |
@@ -75,13 +69,6 @@ acpi_hw_register_read(u32 register_id, u32 * return_value); | |||
75 | 69 | ||
76 | acpi_status acpi_hw_register_write(u32 register_id, u32 value); | 70 | acpi_status acpi_hw_register_write(u32 register_id, u32 value); |
77 | 71 | ||
78 | acpi_status | ||
79 | acpi_hw_low_level_read(u32 width, | ||
80 | u32 * value, struct acpi_generic_address *reg); | ||
81 | |||
82 | acpi_status | ||
83 | acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address *reg); | ||
84 | |||
85 | acpi_status acpi_hw_clear_acpi_status(void); | 72 | acpi_status acpi_hw_clear_acpi_status(void); |
86 | 73 | ||
87 | /* | 74 | /* |
@@ -94,13 +81,13 @@ acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info); | |||
94 | 81 | ||
95 | acpi_status | 82 | acpi_status |
96 | acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 83 | acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
97 | struct acpi_gpe_block_info *gpe_block); | 84 | struct acpi_gpe_block_info *gpe_block, void *context); |
98 | 85 | ||
99 | acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info *gpe_event_info); | 86 | acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info *gpe_event_info); |
100 | 87 | ||
101 | acpi_status | 88 | acpi_status |
102 | acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 89 | acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
103 | struct acpi_gpe_block_info *gpe_block); | 90 | struct acpi_gpe_block_info *gpe_block, void *context); |
104 | 91 | ||
105 | acpi_status | 92 | acpi_status |
106 | acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, | 93 | acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, |
@@ -114,7 +101,8 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void); | |||
114 | 101 | ||
115 | acpi_status | 102 | acpi_status |
116 | acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 103 | acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
117 | struct acpi_gpe_block_info *gpe_block); | 104 | struct acpi_gpe_block_info *gpe_block, |
105 | void *context); | ||
118 | 106 | ||
119 | #ifdef ACPI_FUTURE_USAGE | 107 | #ifdef ACPI_FUTURE_USAGE |
120 | /* | 108 | /* |
diff --git a/include/acpi/acinterp.h b/drivers/acpi/acpica/acinterp.h index e8db7a3143a5..e8db7a3143a5 100644 --- a/include/acpi/acinterp.h +++ b/drivers/acpi/acpica/acinterp.h | |||
diff --git a/include/acpi/aclocal.h b/drivers/acpi/acpica/aclocal.h index ecab527cf78e..492d02761bb7 100644 --- a/include/acpi/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
@@ -46,8 +46,6 @@ | |||
46 | 46 | ||
47 | /* acpisrc:struct_defs -- for acpisrc conversion */ | 47 | /* acpisrc:struct_defs -- for acpisrc conversion */ |
48 | 48 | ||
49 | #define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */ | ||
50 | #define ACPI_DO_NOT_WAIT 0 | ||
51 | #define ACPI_SERIALIZED 0xFF | 49 | #define ACPI_SERIALIZED 0xFF |
52 | 50 | ||
53 | typedef u32 acpi_mutex_handle; | 51 | typedef u32 acpi_mutex_handle; |
@@ -120,11 +118,6 @@ static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = { | |||
120 | #define ACPI_MAX_LOCK 1 | 118 | #define ACPI_MAX_LOCK 1 |
121 | #define ACPI_NUM_LOCK ACPI_MAX_LOCK+1 | 119 | #define ACPI_NUM_LOCK ACPI_MAX_LOCK+1 |
122 | 120 | ||
123 | /* Owner IDs are used to track namespace nodes for selective deletion */ | ||
124 | |||
125 | typedef u8 acpi_owner_id; | ||
126 | #define ACPI_OWNER_ID_MAX 0xFF | ||
127 | |||
128 | /* This Thread ID means that the mutex is not in use (unlocked) */ | 121 | /* This Thread ID means that the mutex is not in use (unlocked) */ |
129 | 122 | ||
130 | #define ACPI_MUTEX_NOT_ACQUIRED (acpi_thread_id) 0 | 123 | #define ACPI_MUTEX_NOT_ACQUIRED (acpi_thread_id) 0 |
@@ -165,11 +158,6 @@ typedef enum { | |||
165 | ACPI_IMODE_EXECUTE = 0x03 | 158 | ACPI_IMODE_EXECUTE = 0x03 |
166 | } acpi_interpreter_mode; | 159 | } acpi_interpreter_mode; |
167 | 160 | ||
168 | union acpi_name_union { | ||
169 | u32 integer; | ||
170 | char ascii[4]; | ||
171 | }; | ||
172 | |||
173 | /* | 161 | /* |
174 | * The Namespace Node describes a named object that appears in the AML. | 162 | * The Namespace Node describes a named object that appears in the AML. |
175 | * descriptor_type is used to differentiate between internal descriptors. | 163 | * descriptor_type is used to differentiate between internal descriptors. |
@@ -216,26 +204,6 @@ struct acpi_namespace_node { | |||
216 | #define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */ | 204 | #define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */ |
217 | #define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */ | 205 | #define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */ |
218 | 206 | ||
219 | /* | ||
220 | * ACPI Table Descriptor. One per ACPI table | ||
221 | */ | ||
222 | struct acpi_table_desc { | ||
223 | acpi_physical_address address; | ||
224 | struct acpi_table_header *pointer; | ||
225 | u32 length; /* Length fixed at 32 bits */ | ||
226 | union acpi_name_union signature; | ||
227 | acpi_owner_id owner_id; | ||
228 | u8 flags; | ||
229 | }; | ||
230 | |||
231 | /* Flags for above */ | ||
232 | |||
233 | #define ACPI_TABLE_ORIGIN_UNKNOWN (0) | ||
234 | #define ACPI_TABLE_ORIGIN_MAPPED (1) | ||
235 | #define ACPI_TABLE_ORIGIN_ALLOCATED (2) | ||
236 | #define ACPI_TABLE_ORIGIN_MASK (3) | ||
237 | #define ACPI_TABLE_IS_LOADED (4) | ||
238 | |||
239 | /* One internal RSDT for table management */ | 207 | /* One internal RSDT for table management */ |
240 | 208 | ||
241 | struct acpi_internal_rsdt { | 209 | struct acpi_internal_rsdt { |
@@ -266,15 +234,6 @@ struct acpi_ns_search_data { | |||
266 | struct acpi_namespace_node *node; | 234 | struct acpi_namespace_node *node; |
267 | }; | 235 | }; |
268 | 236 | ||
269 | /* | ||
270 | * Predefined Namespace items | ||
271 | */ | ||
272 | struct acpi_predefined_names { | ||
273 | char *name; | ||
274 | u8 type; | ||
275 | char *val; | ||
276 | }; | ||
277 | |||
278 | /* Object types used during package copies */ | 237 | /* Object types used during package copies */ |
279 | 238 | ||
280 | #define ACPI_COPY_TYPE_SIMPLE 0 | 239 | #define ACPI_COPY_TYPE_SIMPLE 0 |
@@ -487,10 +446,15 @@ struct acpi_gpe_walk_info { | |||
487 | struct acpi_gpe_block_info *gpe_block; | 446 | struct acpi_gpe_block_info *gpe_block; |
488 | }; | 447 | }; |
489 | 448 | ||
490 | typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info * | 449 | struct acpi_gpe_device_info { |
491 | gpe_xrupt_info, | 450 | u32 index; |
492 | struct acpi_gpe_block_info * | 451 | u32 next_block_base_index; |
493 | gpe_block); | 452 | acpi_status status; |
453 | struct acpi_namespace_node *gpe_device; | ||
454 | }; | ||
455 | |||
456 | typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *gpe_xrupt_info, | ||
457 | struct acpi_gpe_block_info *gpe_block, void *context); | ||
494 | 458 | ||
495 | /* Information about each particular fixed event */ | 459 | /* Information about each particular fixed event */ |
496 | 460 | ||
@@ -566,6 +530,7 @@ struct acpi_control_state { | |||
566 | union acpi_parse_object *predicate_op; | 530 | union acpi_parse_object *predicate_op; |
567 | u8 *aml_predicate_start; /* Start of if/while predicate */ | 531 | u8 *aml_predicate_start; /* Start of if/while predicate */ |
568 | u8 *package_end; /* End of if/while block */ | 532 | u8 *package_end; /* End of if/while block */ |
533 | u32 loop_count; /* While() loop counter */ | ||
569 | }; | 534 | }; |
570 | 535 | ||
571 | /* | 536 | /* |
@@ -671,6 +636,12 @@ union acpi_parse_value { | |||
671 | union acpi_parse_object *arg; /* arguments and contained ops */ | 636 | union acpi_parse_object *arg; /* arguments and contained ops */ |
672 | }; | 637 | }; |
673 | 638 | ||
639 | #ifdef ACPI_DISASSEMBLER | ||
640 | #define ACPI_DISASM_ONLY_MEMBERS(a) a; | ||
641 | #else | ||
642 | #define ACPI_DISASM_ONLY_MEMBERS(a) | ||
643 | #endif | ||
644 | |||
674 | #define ACPI_PARSE_COMMON \ | 645 | #define ACPI_PARSE_COMMON \ |
675 | union acpi_parse_object *parent; /* Parent op */\ | 646 | union acpi_parse_object *parent; /* Parent op */\ |
676 | u8 descriptor_type; /* To differentiate various internal objs */\ | 647 | u8 descriptor_type; /* To differentiate various internal objs */\ |
@@ -790,9 +761,6 @@ struct acpi_parse_state { | |||
790 | * | 761 | * |
791 | ****************************************************************************/ | 762 | ****************************************************************************/ |
792 | 763 | ||
793 | #define PCI_ROOT_HID_STRING "PNP0A03" | ||
794 | #define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08" | ||
795 | |||
796 | struct acpi_bit_register_info { | 764 | struct acpi_bit_register_info { |
797 | u8 parent_register; | 765 | u8 parent_register; |
798 | u8 bit_position; | 766 | u8 bit_position; |
@@ -1019,26 +987,4 @@ struct acpi_debug_mem_block { | |||
1019 | #define ACPI_MEM_LIST_MAX 1 | 987 | #define ACPI_MEM_LIST_MAX 1 |
1020 | #define ACPI_NUM_MEM_LISTS 2 | 988 | #define ACPI_NUM_MEM_LISTS 2 |
1021 | 989 | ||
1022 | struct acpi_memory_list { | ||
1023 | char *list_name; | ||
1024 | void *list_head; | ||
1025 | u16 object_size; | ||
1026 | u16 max_depth; | ||
1027 | u16 current_depth; | ||
1028 | u16 link_offset; | ||
1029 | |||
1030 | #ifdef ACPI_DBG_TRACK_ALLOCATIONS | ||
1031 | |||
1032 | /* Statistics for debug memory tracking only */ | ||
1033 | |||
1034 | u32 total_allocated; | ||
1035 | u32 total_freed; | ||
1036 | u32 max_occupied; | ||
1037 | u32 total_size; | ||
1038 | u32 current_total_size; | ||
1039 | u32 requests; | ||
1040 | u32 hits; | ||
1041 | #endif | ||
1042 | }; | ||
1043 | |||
1044 | #endif /* __ACLOCAL_H__ */ | 990 | #endif /* __ACLOCAL_H__ */ |
diff --git a/include/acpi/acmacros.h b/drivers/acpi/acpica/acmacros.h index 1954c9d1d012..9c127e8e2d6d 100644 --- a/include/acpi/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h | |||
@@ -45,23 +45,6 @@ | |||
45 | #define __ACMACROS_H__ | 45 | #define __ACMACROS_H__ |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Data manipulation macros | ||
49 | */ | ||
50 | #define ACPI_LOWORD(l) ((u16)(u32)(l)) | ||
51 | #define ACPI_HIWORD(l) ((u16)((((u32)(l)) >> 16) & 0xFFFF)) | ||
52 | #define ACPI_LOBYTE(l) ((u8)(u16)(l)) | ||
53 | #define ACPI_HIBYTE(l) ((u8)((((u16)(l)) >> 8) & 0xFF)) | ||
54 | |||
55 | #define ACPI_SET_BIT(target,bit) ((target) |= (bit)) | ||
56 | #define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit)) | ||
57 | #define ACPI_MIN(a,b) (((a)<(b))?(a):(b)) | ||
58 | #define ACPI_MAX(a,b) (((a)>(b))?(a):(b)) | ||
59 | |||
60 | /* Size calculation */ | ||
61 | |||
62 | #define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0])) | ||
63 | |||
64 | /* | ||
65 | * Extract data using a pointer. Any more than a byte and we | 48 | * Extract data using a pointer. Any more than a byte and we |
66 | * get into potential aligment issues -- see the STORE macros below. | 49 | * get into potential aligment issues -- see the STORE macros below. |
67 | * Use with care. | 50 | * Use with care. |
@@ -76,39 +59,6 @@ | |||
76 | #define ACPI_SET64(ptr) *ACPI_CAST_PTR (u64, ptr) | 59 | #define ACPI_SET64(ptr) *ACPI_CAST_PTR (u64, ptr) |
77 | 60 | ||
78 | /* | 61 | /* |
79 | * Pointer manipulation | ||
80 | */ | ||
81 | #define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p)) | ||
82 | #define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p)) | ||
83 | #define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b))) | ||
84 | #define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b))) | ||
85 | |||
86 | /* Pointer/Integer type conversions */ | ||
87 | |||
88 | #define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void, (void *) NULL, (acpi_size) i) | ||
89 | #define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) NULL) | ||
90 | #define ACPI_OFFSET(d, f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f), (void *) NULL) | ||
91 | #define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) | ||
92 | #define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) | ||
93 | |||
94 | #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED | ||
95 | #define ACPI_COMPARE_NAME(a, b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) | ||
96 | #else | ||
97 | #define ACPI_COMPARE_NAME(a, b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE)) | ||
98 | #endif | ||
99 | |||
100 | /* | ||
101 | * Full 64-bit integer must be available on both 32-bit and 64-bit platforms | ||
102 | */ | ||
103 | struct acpi_integer_overlay { | ||
104 | u32 lo_dword; | ||
105 | u32 hi_dword; | ||
106 | }; | ||
107 | |||
108 | #define ACPI_LODWORD(integer) (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->lo_dword) | ||
109 | #define ACPI_HIDWORD(integer) (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->hi_dword) | ||
110 | |||
111 | /* | ||
112 | * printf() format helpers | 62 | * printf() format helpers |
113 | */ | 63 | */ |
114 | 64 | ||
@@ -209,7 +159,7 @@ struct acpi_integer_overlay { | |||
209 | /* | 159 | /* |
210 | * The hardware does not support unaligned transfers. We must move the | 160 | * The hardware does not support unaligned transfers. We must move the |
211 | * data one byte at a time. These macros work whether the source or | 161 | * data one byte at a time. These macros work whether the source or |
212 | * the destination (or both) is/are unaligned. (Little-endian move) | 162 | * the destination (or both) is/are unaligned. (Little-endian move) |
213 | */ | 163 | */ |
214 | 164 | ||
215 | /* 16-bit source, 16/32/64 destination */ | 165 | /* 16-bit source, 16/32/64 destination */ |
@@ -357,12 +307,6 @@ struct acpi_integer_overlay { | |||
357 | {(u32)(Pargs), (u32)(Iargs), (u32)(flags), obj_type, class, type} | 307 | {(u32)(Pargs), (u32)(Iargs), (u32)(flags), obj_type, class, type} |
358 | #endif | 308 | #endif |
359 | 309 | ||
360 | #ifdef ACPI_DISASSEMBLER | ||
361 | #define ACPI_DISASM_ONLY_MEMBERS(a) a; | ||
362 | #else | ||
363 | #define ACPI_DISASM_ONLY_MEMBERS(a) | ||
364 | #endif | ||
365 | |||
366 | #define ARG_TYPE_WIDTH 5 | 310 | #define ARG_TYPE_WIDTH 5 |
367 | #define ARG_1(x) ((u32)(x)) | 311 | #define ARG_1(x) ((u32)(x)) |
368 | #define ARG_2(x) ((u32)(x) << (1 * ARG_TYPE_WIDTH)) | 312 | #define ARG_2(x) ((u32)(x) << (1 * ARG_TYPE_WIDTH)) |
@@ -388,32 +332,16 @@ struct acpi_integer_overlay { | |||
388 | #define GET_CURRENT_ARG_TYPE(list) (list & ((u32) 0x1F)) | 332 | #define GET_CURRENT_ARG_TYPE(list) (list & ((u32) 0x1F)) |
389 | #define INCREMENT_ARG_LIST(list) (list >>= ((u32) ARG_TYPE_WIDTH)) | 333 | #define INCREMENT_ARG_LIST(list) (list >>= ((u32) ARG_TYPE_WIDTH)) |
390 | 334 | ||
391 | #if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES) | ||
392 | /* | ||
393 | * Module name is include in both debug and non-debug versions primarily for | ||
394 | * error messages. The __FILE__ macro is not very useful for this, because it | ||
395 | * often includes the entire pathname to the module | ||
396 | */ | ||
397 | #define ACPI_MODULE_NAME(name) static const char ACPI_UNUSED_VAR _acpi_module_name[] = name; | ||
398 | #else | ||
399 | #define ACPI_MODULE_NAME(name) | ||
400 | #endif | ||
401 | |||
402 | /* | 335 | /* |
403 | * Ascii error messages can be configured out | 336 | * Ascii error messages can be configured out |
404 | */ | 337 | */ |
405 | #ifndef ACPI_NO_ERROR_MESSAGES | 338 | #ifndef ACPI_NO_ERROR_MESSAGES |
406 | #define AE_INFO _acpi_module_name, __LINE__ | ||
407 | 339 | ||
408 | /* | 340 | /* |
409 | * Error reporting. Callers module and line number are inserted by AE_INFO, | 341 | * Error reporting. Callers module and line number are inserted by AE_INFO, |
410 | * the plist contains a set of parens to allow variable-length lists. | 342 | * the plist contains a set of parens to allow variable-length lists. |
411 | * These macros are used for both the debug and non-debug versions of the code. | 343 | * These macros are used for both the debug and non-debug versions of the code. |
412 | */ | 344 | */ |
413 | #define ACPI_INFO(plist) acpi_ut_info plist | ||
414 | #define ACPI_WARNING(plist) acpi_ut_warning plist | ||
415 | #define ACPI_EXCEPTION(plist) acpi_ut_exception plist | ||
416 | #define ACPI_ERROR(plist) acpi_ut_error plist | ||
417 | #define ACPI_ERROR_NAMESPACE(s, e) acpi_ns_report_error (AE_INFO, s, e); | 345 | #define ACPI_ERROR_NAMESPACE(s, e) acpi_ns_report_error (AE_INFO, s, e); |
418 | #define ACPI_ERROR_METHOD(s, n, p, e) acpi_ns_report_method_error (AE_INFO, s, n, p, e); | 346 | #define ACPI_ERROR_METHOD(s, n, p, e) acpi_ns_report_method_error (AE_INFO, s, n, p, e); |
419 | 347 | ||
@@ -421,13 +349,9 @@ struct acpi_integer_overlay { | |||
421 | 349 | ||
422 | /* No error messages */ | 350 | /* No error messages */ |
423 | 351 | ||
424 | #define ACPI_INFO(plist) | ||
425 | #define ACPI_WARNING(plist) | ||
426 | #define ACPI_EXCEPTION(plist) | ||
427 | #define ACPI_ERROR(plist) | ||
428 | #define ACPI_ERROR_NAMESPACE(s, e) | 352 | #define ACPI_ERROR_NAMESPACE(s, e) |
429 | #define ACPI_ERROR_METHOD(s, n, p, e) | 353 | #define ACPI_ERROR_METHOD(s, n, p, e) |
430 | #endif | 354 | #endif /* ACPI_NO_ERROR_MESSAGES */ |
431 | 355 | ||
432 | /* | 356 | /* |
433 | * Debug macros that are conditionally compiled | 357 | * Debug macros that are conditionally compiled |
@@ -435,36 +359,8 @@ struct acpi_integer_overlay { | |||
435 | #ifdef ACPI_DEBUG_OUTPUT | 359 | #ifdef ACPI_DEBUG_OUTPUT |
436 | 360 | ||
437 | /* | 361 | /* |
438 | * Common parameters used for debug output functions: | ||
439 | * line number, function name, module(file) name, component ID | ||
440 | */ | ||
441 | #define ACPI_DEBUG_PARAMETERS __LINE__, ACPI_GET_FUNCTION_NAME, _acpi_module_name, _COMPONENT | ||
442 | |||
443 | /* | ||
444 | * Function entry tracing | 362 | * Function entry tracing |
445 | */ | 363 | */ |
446 | |||
447 | /* | ||
448 | * If ACPI_GET_FUNCTION_NAME was not defined in the compiler-dependent header, | ||
449 | * define it now. This is the case where there the compiler does not support | ||
450 | * a __func__ macro or equivalent. | ||
451 | */ | ||
452 | #ifndef ACPI_GET_FUNCTION_NAME | ||
453 | #define ACPI_GET_FUNCTION_NAME _acpi_function_name | ||
454 | /* | ||
455 | * The Name parameter should be the procedure name as a quoted string. | ||
456 | * The function name is also used by the function exit macros below. | ||
457 | * Note: (const char) is used to be compatible with the debug interfaces | ||
458 | * and macros such as __func__. | ||
459 | */ | ||
460 | #define ACPI_FUNCTION_NAME(name) static const char _acpi_function_name[] = #name; | ||
461 | |||
462 | #else | ||
463 | /* Compiler supports __func__ (or equivalent) -- Ignore this macro */ | ||
464 | |||
465 | #define ACPI_FUNCTION_NAME(name) | ||
466 | #endif | ||
467 | |||
468 | #ifdef CONFIG_ACPI_DEBUG_FUNC_TRACE | 364 | #ifdef CONFIG_ACPI_DEBUG_FUNC_TRACE |
469 | 365 | ||
470 | #define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \ | 366 | #define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \ |
@@ -584,15 +480,6 @@ struct acpi_integer_overlay { | |||
584 | #define ACPI_DUMP_RESOURCE_LIST(a) acpi_rs_dump_resource_list(a) | 480 | #define ACPI_DUMP_RESOURCE_LIST(a) acpi_rs_dump_resource_list(a) |
585 | #define ACPI_DUMP_BUFFER(a, b) acpi_ut_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT) | 481 | #define ACPI_DUMP_BUFFER(a, b) acpi_ut_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT) |
586 | 482 | ||
587 | /* | ||
588 | * Master debug print macros | ||
589 | * Print iff: | ||
590 | * 1) Debug print for the current component is enabled | ||
591 | * 2) Debug error level or trace level for the print statement is enabled | ||
592 | */ | ||
593 | #define ACPI_DEBUG_PRINT(plist) acpi_ut_debug_print plist | ||
594 | #define ACPI_DEBUG_PRINT_RAW(plist) acpi_ut_debug_print_raw plist | ||
595 | |||
596 | #else | 483 | #else |
597 | /* | 484 | /* |
598 | * This is the non-debug case -- make everything go away, | 485 | * This is the non-debug case -- make everything go away, |
@@ -603,7 +490,6 @@ struct acpi_integer_overlay { | |||
603 | 490 | ||
604 | #define ACPI_DEBUG_DEFINE(a) do { } while(0) | 491 | #define ACPI_DEBUG_DEFINE(a) do { } while(0) |
605 | #define ACPI_DEBUG_ONLY_MEMBERS(a) do { } while(0) | 492 | #define ACPI_DEBUG_ONLY_MEMBERS(a) do { } while(0) |
606 | #define ACPI_FUNCTION_NAME(a) do { } while(0) | ||
607 | #define ACPI_FUNCTION_TRACE(a) do { } while(0) | 493 | #define ACPI_FUNCTION_TRACE(a) do { } while(0) |
608 | #define ACPI_FUNCTION_TRACE_PTR(a, b) do { } while(0) | 494 | #define ACPI_FUNCTION_TRACE_PTR(a, b) do { } while(0) |
609 | #define ACPI_FUNCTION_TRACE_U32(a, b) do { } while(0) | 495 | #define ACPI_FUNCTION_TRACE_U32(a, b) do { } while(0) |
@@ -619,8 +505,6 @@ struct acpi_integer_overlay { | |||
619 | #define ACPI_DUMP_PATHNAME(a, b, c, d) do { } while(0) | 505 | #define ACPI_DUMP_PATHNAME(a, b, c, d) do { } while(0) |
620 | #define ACPI_DUMP_RESOURCE_LIST(a) do { } while(0) | 506 | #define ACPI_DUMP_RESOURCE_LIST(a) do { } while(0) |
621 | #define ACPI_DUMP_BUFFER(a, b) do { } while(0) | 507 | #define ACPI_DUMP_BUFFER(a, b) do { } while(0) |
622 | #define ACPI_DEBUG_PRINT(pl) do { } while(0) | ||
623 | #define ACPI_DEBUG_PRINT_RAW(pl) do { } while(0) | ||
624 | 508 | ||
625 | #define return_VOID return | 509 | #define return_VOID return |
626 | #define return_ACPI_STATUS(s) return(s) | 510 | #define return_ACPI_STATUS(s) return(s) |
@@ -629,7 +513,7 @@ struct acpi_integer_overlay { | |||
629 | #define return_UINT32(s) return(s) | 513 | #define return_UINT32(s) return(s) |
630 | #define return_PTR(s) return(s) | 514 | #define return_PTR(s) return(s) |
631 | 515 | ||
632 | #endif | 516 | #endif /* ACPI_DEBUG_OUTPUT */ |
633 | 517 | ||
634 | /* | 518 | /* |
635 | * Some code only gets executed when the debugger is built in. | 519 | * Some code only gets executed when the debugger is built in. |
diff --git a/include/acpi/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index db4e6f677855..46cb5b46d280 100644 --- a/include/acpi/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h | |||
@@ -182,7 +182,9 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info); | |||
182 | */ | 182 | */ |
183 | acpi_status | 183 | acpi_status |
184 | acpi_ns_check_predefined_names(struct acpi_namespace_node *node, | 184 | acpi_ns_check_predefined_names(struct acpi_namespace_node *node, |
185 | union acpi_operand_object *return_object); | 185 | u32 user_param_count, |
186 | acpi_status return_status, | ||
187 | union acpi_operand_object **return_object); | ||
186 | 188 | ||
187 | const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct | 189 | const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct |
188 | acpi_namespace_node | 190 | acpi_namespace_node |
@@ -191,6 +193,7 @@ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct | |||
191 | void | 193 | void |
192 | acpi_ns_check_parameter_count(char *pathname, | 194 | acpi_ns_check_parameter_count(char *pathname, |
193 | struct acpi_namespace_node *node, | 195 | struct acpi_namespace_node *node, |
196 | u32 user_param_count, | ||
194 | const union acpi_predefined_info *info); | 197 | const union acpi_predefined_info *info); |
195 | 198 | ||
196 | /* | 199 | /* |
diff --git a/include/acpi/acobject.h b/drivers/acpi/acpica/acobject.h index eb6f038b03d9..eb6f038b03d9 100644 --- a/include/acpi/acobject.h +++ b/drivers/acpi/acpica/acobject.h | |||
diff --git a/include/acpi/acopcode.h b/drivers/acpi/acpica/acopcode.h index dfdf63327885..dfdf63327885 100644 --- a/include/acpi/acopcode.h +++ b/drivers/acpi/acpica/acopcode.h | |||
diff --git a/include/acpi/acparser.h b/drivers/acpi/acpica/acparser.h index 23ee0fbf5619..23ee0fbf5619 100644 --- a/include/acpi/acparser.h +++ b/drivers/acpi/acpica/acparser.h | |||
diff --git a/include/acpi/acpredef.h b/drivers/acpi/acpica/acpredef.h index 16a9ca9a66e4..16a9ca9a66e4 100644 --- a/include/acpi/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h | |||
diff --git a/include/acpi/acresrc.h b/drivers/acpi/acpica/acresrc.h index eef5bd7a59fa..eef5bd7a59fa 100644 --- a/include/acpi/acresrc.h +++ b/drivers/acpi/acpica/acresrc.h | |||
diff --git a/include/acpi/acstruct.h b/drivers/acpi/acpica/acstruct.h index 7980a26bad35..7980a26bad35 100644 --- a/include/acpi/acstruct.h +++ b/drivers/acpi/acpica/acstruct.h | |||
diff --git a/include/acpi/actables.h b/drivers/acpi/acpica/actables.h index 0cbe1b9ab522..7ce6e33c7f78 100644 --- a/include/acpi/actables.h +++ b/drivers/acpi/acpica/actables.h | |||
@@ -94,6 +94,8 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded); | |||
94 | /* | 94 | /* |
95 | * tbutils - table manager utilities | 95 | * tbutils - table manager utilities |
96 | */ | 96 | */ |
97 | acpi_status acpi_tb_initialize_facs(void); | ||
98 | |||
97 | u8 acpi_tb_tables_loaded(void); | 99 | u8 acpi_tb_tables_loaded(void); |
98 | 100 | ||
99 | void | 101 | void |
diff --git a/include/acpi/acutils.h b/drivers/acpi/acpica/acutils.h index d8307b2987e3..80d8813484fe 100644 --- a/include/acpi/acutils.h +++ b/drivers/acpi/acpica/acutils.h | |||
@@ -297,42 +297,6 @@ void acpi_ut_report_info(char *module_name, u32 line_number); | |||
297 | 297 | ||
298 | void acpi_ut_report_warning(char *module_name, u32 line_number); | 298 | void acpi_ut_report_warning(char *module_name, u32 line_number); |
299 | 299 | ||
300 | /* Error and message reporting interfaces */ | ||
301 | |||
302 | void ACPI_INTERNAL_VAR_XFACE | ||
303 | acpi_ut_debug_print(u32 requested_debug_level, | ||
304 | u32 line_number, | ||
305 | const char *function_name, | ||
306 | const char *module_name, | ||
307 | u32 component_id, | ||
308 | const char *format, ...) ACPI_PRINTF_LIKE(6); | ||
309 | |||
310 | void ACPI_INTERNAL_VAR_XFACE | ||
311 | acpi_ut_debug_print_raw(u32 requested_debug_level, | ||
312 | u32 line_number, | ||
313 | const char *function_name, | ||
314 | const char *module_name, | ||
315 | u32 component_id, | ||
316 | const char *format, ...) ACPI_PRINTF_LIKE(6); | ||
317 | |||
318 | void ACPI_INTERNAL_VAR_XFACE | ||
319 | acpi_ut_error(const char *module_name, | ||
320 | u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3); | ||
321 | |||
322 | void ACPI_INTERNAL_VAR_XFACE | ||
323 | acpi_ut_exception(const char *module_name, | ||
324 | u32 line_number, | ||
325 | acpi_status status, | ||
326 | const char *format, ...) ACPI_PRINTF_LIKE(4); | ||
327 | |||
328 | void ACPI_INTERNAL_VAR_XFACE | ||
329 | acpi_ut_warning(const char *module_name, | ||
330 | u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3); | ||
331 | |||
332 | void ACPI_INTERNAL_VAR_XFACE | ||
333 | acpi_ut_info(const char *module_name, | ||
334 | u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3); | ||
335 | |||
336 | /* | 300 | /* |
337 | * utdelete - Object deletion and reference counts | 301 | * utdelete - Object deletion and reference counts |
338 | */ | 302 | */ |
diff --git a/include/acpi/amlcode.h b/drivers/acpi/acpica/amlcode.h index ff851c5df698..ff851c5df698 100644 --- a/include/acpi/amlcode.h +++ b/drivers/acpi/acpica/amlcode.h | |||
diff --git a/include/acpi/amlresrc.h b/drivers/acpi/acpica/amlresrc.h index 7b070e42b7c5..7b070e42b7c5 100644 --- a/include/acpi/amlresrc.h +++ b/drivers/acpi/acpica/amlresrc.h | |||
diff --git a/drivers/acpi/dispatcher/dsfield.c b/drivers/acpi/acpica/dsfield.c index f988a5e7d2b4..53e27bc5a734 100644 --- a/drivers/acpi/dispatcher/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c | |||
@@ -42,11 +42,12 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/amlcode.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acdispat.h> | 46 | #include "amlcode.h" |
47 | #include <acpi/acinterp.h> | 47 | #include "acdispat.h" |
48 | #include <acpi/acnamesp.h> | 48 | #include "acinterp.h" |
49 | #include <acpi/acparser.h> | 49 | #include "acnamesp.h" |
50 | #include "acparser.h" | ||
50 | 51 | ||
51 | #define _COMPONENT ACPI_DISPATCHER | 52 | #define _COMPONENT ACPI_DISPATCHER |
52 | ACPI_MODULE_NAME("dsfield") | 53 | ACPI_MODULE_NAME("dsfield") |
diff --git a/drivers/acpi/dispatcher/dsinit.c b/drivers/acpi/acpica/dsinit.c index 949f7c75029e..eb144b13d8fa 100644 --- a/drivers/acpi/dispatcher/dsinit.c +++ b/drivers/acpi/acpica/dsinit.c | |||
@@ -42,9 +42,10 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acdispat.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "acdispat.h" |
47 | #include <acpi/actables.h> | 47 | #include "acnamesp.h" |
48 | #include "actables.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_DISPATCHER | 50 | #define _COMPONENT ACPI_DISPATCHER |
50 | ACPI_MODULE_NAME("dsinit") | 51 | ACPI_MODULE_NAME("dsinit") |
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index 279a5a60a0dd..14b8b8ed8023 100644 --- a/drivers/acpi/dispatcher/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c | |||
@@ -42,11 +42,14 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/amlcode.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acdispat.h> | 46 | #include "amlcode.h" |
47 | #include <acpi/acinterp.h> | 47 | #include "acdispat.h" |
48 | #include <acpi/acnamesp.h> | 48 | #include "acinterp.h" |
49 | #include "acnamesp.h" | ||
50 | #ifdef ACPI_DISASSEMBLER | ||
49 | #include <acpi/acdisasm.h> | 51 | #include <acpi/acdisasm.h> |
52 | #endif | ||
50 | 53 | ||
51 | #define _COMPONENT ACPI_DISPATCHER | 54 | #define _COMPONENT ACPI_DISPATCHER |
52 | ACPI_MODULE_NAME("dsmethod") | 55 | ACPI_MODULE_NAME("dsmethod") |
@@ -412,6 +415,9 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, | |||
412 | 415 | ||
413 | if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) { | 416 | if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) { |
414 | status = obj_desc->method.implementation(next_walk_state); | 417 | status = obj_desc->method.implementation(next_walk_state); |
418 | if (status == AE_OK) { | ||
419 | status = AE_CTRL_TERMINATE; | ||
420 | } | ||
415 | } | 421 | } |
416 | 422 | ||
417 | return_ACPI_STATUS(status); | 423 | return_ACPI_STATUS(status); |
diff --git a/drivers/acpi/dispatcher/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c index d03f81bd1bcb..da0f5468184c 100644 --- a/drivers/acpi/dispatcher/dsmthdat.c +++ b/drivers/acpi/acpica/dsmthdat.c | |||
@@ -42,9 +42,10 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acdispat.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "acdispat.h" |
47 | #include <acpi/acinterp.h> | 47 | #include "acnamesp.h" |
48 | #include "acinterp.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_DISPATCHER | 50 | #define _COMPONENT ACPI_DISPATCHER |
50 | ACPI_MODULE_NAME("dsmthdat") | 51 | ACPI_MODULE_NAME("dsmthdat") |
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/acpica/dsobject.c index 4f08e599d07e..15c628e6aa00 100644 --- a/drivers/acpi/dispatcher/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c | |||
@@ -42,11 +42,12 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acparser.h> | 45 | #include "accommon.h" |
46 | #include <acpi/amlcode.h> | 46 | #include "acparser.h" |
47 | #include <acpi/acdispat.h> | 47 | #include "amlcode.h" |
48 | #include <acpi/acnamesp.h> | 48 | #include "acdispat.h" |
49 | #include <acpi/acinterp.h> | 49 | #include "acnamesp.h" |
50 | #include "acinterp.h" | ||
50 | 51 | ||
51 | #define _COMPONENT ACPI_DISPATCHER | 52 | #define _COMPONENT ACPI_DISPATCHER |
52 | ACPI_MODULE_NAME("dsobject") | 53 | ACPI_MODULE_NAME("dsobject") |
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index 69fae5905bb8..0c3b4dd60e8a 100644 --- a/drivers/acpi/dispatcher/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c | |||
@@ -43,13 +43,14 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acparser.h> | 46 | #include "accommon.h" |
47 | #include <acpi/amlcode.h> | 47 | #include "acparser.h" |
48 | #include <acpi/acdispat.h> | 48 | #include "amlcode.h" |
49 | #include <acpi/acinterp.h> | 49 | #include "acdispat.h" |
50 | #include <acpi/acnamesp.h> | 50 | #include "acinterp.h" |
51 | #include <acpi/acevents.h> | 51 | #include "acnamesp.h" |
52 | #include <acpi/actables.h> | 52 | #include "acevents.h" |
53 | #include "actables.h" | ||
53 | 54 | ||
54 | #define _COMPONENT ACPI_DISPATCHER | 55 | #define _COMPONENT ACPI_DISPATCHER |
55 | ACPI_MODULE_NAME("dsopcode") | 56 | ACPI_MODULE_NAME("dsopcode") |
@@ -1140,10 +1141,29 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state, | |||
1140 | op->common.aml_opcode, walk_state)); | 1141 | op->common.aml_opcode, walk_state)); |
1141 | 1142 | ||
1142 | switch (op->common.aml_opcode) { | 1143 | switch (op->common.aml_opcode) { |
1143 | case AML_IF_OP: | ||
1144 | case AML_WHILE_OP: | 1144 | case AML_WHILE_OP: |
1145 | 1145 | ||
1146 | /* | 1146 | /* |
1147 | * If this is an additional iteration of a while loop, continue. | ||
1148 | * There is no need to allocate a new control state. | ||
1149 | */ | ||
1150 | if (walk_state->control_state) { | ||
1151 | if (walk_state->control_state->control.aml_predicate_start | ||
1152 | == (walk_state->parser_state.aml - 1)) { | ||
1153 | |||
1154 | /* Reset the state to start-of-loop */ | ||
1155 | |||
1156 | walk_state->control_state->common.state = | ||
1157 | ACPI_CONTROL_CONDITIONAL_EXECUTING; | ||
1158 | break; | ||
1159 | } | ||
1160 | } | ||
1161 | |||
1162 | /*lint -fallthrough */ | ||
1163 | |||
1164 | case AML_IF_OP: | ||
1165 | |||
1166 | /* | ||
1147 | * IF/WHILE: Create a new control state to manage these | 1167 | * IF/WHILE: Create a new control state to manage these |
1148 | * constructs. We need to manage these as a stack, in order | 1168 | * constructs. We need to manage these as a stack, in order |
1149 | * to handle nesting. | 1169 | * to handle nesting. |
@@ -1243,13 +1263,36 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state, | |||
1243 | 1263 | ||
1244 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op)); | 1264 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op)); |
1245 | 1265 | ||
1246 | if (walk_state->control_state->common.value) { | 1266 | control_state = walk_state->control_state; |
1267 | if (control_state->common.value) { | ||
1247 | 1268 | ||
1248 | /* Predicate was true, go back and evaluate it again! */ | 1269 | /* Predicate was true, the body of the loop was just executed */ |
1249 | 1270 | ||
1271 | /* | ||
1272 | * This loop counter mechanism allows the interpreter to escape | ||
1273 | * possibly infinite loops. This can occur in poorly written AML | ||
1274 | * when the hardware does not respond within a while loop and the | ||
1275 | * loop does not implement a timeout. | ||
1276 | */ | ||
1277 | control_state->control.loop_count++; | ||
1278 | if (control_state->control.loop_count > | ||
1279 | ACPI_MAX_LOOP_ITERATIONS) { | ||
1280 | status = AE_AML_INFINITE_LOOP; | ||
1281 | break; | ||
1282 | } | ||
1283 | |||
1284 | /* | ||
1285 | * Go back and evaluate the predicate and maybe execute the loop | ||
1286 | * another time | ||
1287 | */ | ||
1250 | status = AE_CTRL_PENDING; | 1288 | status = AE_CTRL_PENDING; |
1289 | walk_state->aml_last_while = | ||
1290 | control_state->control.aml_predicate_start; | ||
1291 | break; | ||
1251 | } | 1292 | } |
1252 | 1293 | ||
1294 | /* Predicate was false, terminate this while loop */ | ||
1295 | |||
1253 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, | 1296 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, |
1254 | "[WHILE_OP] termination! Op=%p\n", op)); | 1297 | "[WHILE_OP] termination! Op=%p\n", op)); |
1255 | 1298 | ||
@@ -1257,9 +1300,6 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state, | |||
1257 | 1300 | ||
1258 | control_state = | 1301 | control_state = |
1259 | acpi_ut_pop_generic_state(&walk_state->control_state); | 1302 | acpi_ut_pop_generic_state(&walk_state->control_state); |
1260 | |||
1261 | walk_state->aml_last_while = | ||
1262 | control_state->control.aml_predicate_start; | ||
1263 | acpi_ut_delete_generic_state(control_state); | 1303 | acpi_ut_delete_generic_state(control_state); |
1264 | break; | 1304 | break; |
1265 | 1305 | ||
diff --git a/drivers/acpi/dispatcher/dsutils.c b/drivers/acpi/acpica/dsutils.c index b398982f0d8b..dabc23a46176 100644 --- a/drivers/acpi/dispatcher/dsutils.c +++ b/drivers/acpi/acpica/dsutils.c | |||
@@ -42,12 +42,13 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acparser.h> | 45 | #include "accommon.h" |
46 | #include <acpi/amlcode.h> | 46 | #include "acparser.h" |
47 | #include <acpi/acdispat.h> | 47 | #include "amlcode.h" |
48 | #include <acpi/acinterp.h> | 48 | #include "acdispat.h" |
49 | #include <acpi/acnamesp.h> | 49 | #include "acinterp.h" |
50 | #include <acpi/acdebug.h> | 50 | #include "acnamesp.h" |
51 | #include "acdebug.h" | ||
51 | 52 | ||
52 | #define _COMPONENT ACPI_DISPATCHER | 53 | #define _COMPONENT ACPI_DISPATCHER |
53 | ACPI_MODULE_NAME("dsutils") | 54 | ACPI_MODULE_NAME("dsutils") |
diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/acpica/dswexec.c index 396fe12078cd..350e6656bc89 100644 --- a/drivers/acpi/dispatcher/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c | |||
@@ -43,12 +43,13 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acparser.h> | 46 | #include "accommon.h" |
47 | #include <acpi/amlcode.h> | 47 | #include "acparser.h" |
48 | #include <acpi/acdispat.h> | 48 | #include "amlcode.h" |
49 | #include <acpi/acinterp.h> | 49 | #include "acdispat.h" |
50 | #include <acpi/acnamesp.h> | 50 | #include "acinterp.h" |
51 | #include <acpi/acdebug.h> | 51 | #include "acnamesp.h" |
52 | #include "acdebug.h" | ||
52 | 53 | ||
53 | #define _COMPONENT ACPI_DISPATCHER | 54 | #define _COMPONENT ACPI_DISPATCHER |
54 | ACPI_MODULE_NAME("dswexec") | 55 | ACPI_MODULE_NAME("dswexec") |
diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/acpica/dswload.c index dff7a3e445a8..3023ceaa8d54 100644 --- a/drivers/acpi/dispatcher/dswload.c +++ b/drivers/acpi/acpica/dswload.c | |||
@@ -42,12 +42,13 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acparser.h> | 45 | #include "accommon.h" |
46 | #include <acpi/amlcode.h> | 46 | #include "acparser.h" |
47 | #include <acpi/acdispat.h> | 47 | #include "amlcode.h" |
48 | #include <acpi/acinterp.h> | 48 | #include "acdispat.h" |
49 | #include <acpi/acnamesp.h> | 49 | #include "acinterp.h" |
50 | #include <acpi/acevents.h> | 50 | #include "acnamesp.h" |
51 | #include "acevents.h" | ||
51 | 52 | ||
52 | #ifdef ACPI_ASL_COMPILER | 53 | #ifdef ACPI_ASL_COMPILER |
53 | #include <acpi/acdisasm.h> | 54 | #include <acpi/acdisasm.h> |
diff --git a/drivers/acpi/dispatcher/dswscope.c b/drivers/acpi/acpica/dswscope.c index 9e6073265873..908645e72f03 100644 --- a/drivers/acpi/dispatcher/dswscope.c +++ b/drivers/acpi/acpica/dswscope.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acdispat.h> | 45 | #include "accommon.h" |
46 | #include "acdispat.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_DISPATCHER | 48 | #define _COMPONENT ACPI_DISPATCHER |
48 | ACPI_MODULE_NAME("dswscope") | 49 | ACPI_MODULE_NAME("dswscope") |
diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/acpica/dswstate.c index b00d4af791aa..40f92bf7dce5 100644 --- a/drivers/acpi/dispatcher/dswstate.c +++ b/drivers/acpi/acpica/dswstate.c | |||
@@ -42,9 +42,10 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acparser.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acdispat.h> | 46 | #include "acparser.h" |
47 | #include <acpi/acnamesp.h> | 47 | #include "acdispat.h" |
48 | #include "acnamesp.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_DISPATCHER | 50 | #define _COMPONENT ACPI_DISPATCHER |
50 | ACPI_MODULE_NAME("dswstate") | 51 | ACPI_MODULE_NAME("dswstate") |
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/acpica/evevent.c index c56c5c6ea77b..803edd9e3f6a 100644 --- a/drivers/acpi/events/evevent.c +++ b/drivers/acpi/acpica/evevent.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acevents.h> | 45 | #include "accommon.h" |
46 | #include "acevents.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_EVENTS | 48 | #define _COMPONENT ACPI_EVENTS |
48 | ACPI_MODULE_NAME("evevent") | 49 | ACPI_MODULE_NAME("evevent") |
@@ -72,8 +73,8 @@ acpi_status acpi_ev_initialize_events(void) | |||
72 | 73 | ||
73 | /* | 74 | /* |
74 | * Initialize the Fixed and General Purpose Events. This is done prior to | 75 | * Initialize the Fixed and General Purpose Events. This is done prior to |
75 | * enabling SCIs to prevent interrupts from occurring before the handlers are | 76 | * enabling SCIs to prevent interrupts from occurring before the handlers |
76 | * installed. | 77 | * are installed. |
77 | */ | 78 | */ |
78 | status = acpi_ev_fixed_event_initialize(); | 79 | status = acpi_ev_fixed_event_initialize(); |
79 | if (ACPI_FAILURE(status)) { | 80 | if (ACPI_FAILURE(status)) { |
@@ -192,8 +193,8 @@ static acpi_status acpi_ev_fixed_event_initialize(void) | |||
192 | acpi_status status; | 193 | acpi_status status; |
193 | 194 | ||
194 | /* | 195 | /* |
195 | * Initialize the structure that keeps track of fixed event handlers | 196 | * Initialize the structure that keeps track of fixed event handlers and |
196 | * and enable the fixed events. | 197 | * enable the fixed events. |
197 | */ | 198 | */ |
198 | for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { | 199 | for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { |
199 | acpi_gbl_fixed_event_handlers[i].handler = NULL; | 200 | acpi_gbl_fixed_event_handlers[i].handler = NULL; |
@@ -237,7 +238,7 @@ u32 acpi_ev_fixed_event_detect(void) | |||
237 | 238 | ||
238 | /* | 239 | /* |
239 | * Read the fixed feature status and enable registers, as all the cases | 240 | * Read the fixed feature status and enable registers, as all the cases |
240 | * depend on their values. Ignore errors here. | 241 | * depend on their values. Ignore errors here. |
241 | */ | 242 | */ |
242 | (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status); | 243 | (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status); |
243 | (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable); | 244 | (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable); |
@@ -291,8 +292,8 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event) | |||
291 | status_register_id, 1); | 292 | status_register_id, 1); |
292 | 293 | ||
293 | /* | 294 | /* |
294 | * Make sure we've got a handler. If not, report an error. | 295 | * Make sure we've got a handler. If not, report an error. The event is |
295 | * The event is disabled to prevent further interrupts. | 296 | * disabled to prevent further interrupts. |
296 | */ | 297 | */ |
297 | if (NULL == acpi_gbl_fixed_event_handlers[event].handler) { | 298 | if (NULL == acpi_gbl_fixed_event_handlers[event].handler) { |
298 | (void)acpi_set_register(acpi_gbl_fixed_event_info[event]. | 299 | (void)acpi_set_register(acpi_gbl_fixed_event_info[event]. |
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/acpica/evgpe.c index f45c74fe745e..f345ced36477 100644 --- a/drivers/acpi/events/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
@@ -42,8 +42,9 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acevents.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "acevents.h" |
47 | #include "acnamesp.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_EVENTS | 49 | #define _COMPONENT ACPI_EVENTS |
49 | ACPI_MODULE_NAME("evgpe") | 50 | ACPI_MODULE_NAME("evgpe") |
@@ -125,7 +126,7 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, | |||
125 | (1 << | 126 | (1 << |
126 | (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number)); | 127 | (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number)); |
127 | 128 | ||
128 | /* 1) Disable case. Simply clear all enable bits */ | 129 | /* 1) Disable case. Simply clear all enable bits */ |
129 | 130 | ||
130 | if (type == ACPI_GPE_DISABLE) { | 131 | if (type == ACPI_GPE_DISABLE) { |
131 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, | 132 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, |
@@ -134,7 +135,7 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, | |||
134 | return_ACPI_STATUS(AE_OK); | 135 | return_ACPI_STATUS(AE_OK); |
135 | } | 136 | } |
136 | 137 | ||
137 | /* 2) Enable case. Set/Clear the appropriate enable bits */ | 138 | /* 2) Enable case. Set/Clear the appropriate enable bits */ |
138 | 139 | ||
139 | switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { | 140 | switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { |
140 | case ACPI_GPE_TYPE_WAKE: | 141 | case ACPI_GPE_TYPE_WAKE: |
@@ -295,7 +296,7 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
295 | * | 296 | * |
296 | * FUNCTION: acpi_ev_get_gpe_event_info | 297 | * FUNCTION: acpi_ev_get_gpe_event_info |
297 | * | 298 | * |
298 | * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 | 299 | * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 |
299 | * gpe_number - Raw GPE number | 300 | * gpe_number - Raw GPE number |
300 | * | 301 | * |
301 | * RETURN: A GPE event_info struct. NULL if not a valid GPE | 302 | * RETURN: A GPE event_info struct. NULL if not a valid GPE |
@@ -372,7 +373,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, | |||
372 | * | 373 | * |
373 | * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED | 374 | * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED |
374 | * | 375 | * |
375 | * DESCRIPTION: Detect if any GP events have occurred. This function is | 376 | * DESCRIPTION: Detect if any GP events have occurred. This function is |
376 | * executed at interrupt level. | 377 | * executed at interrupt level. |
377 | * | 378 | * |
378 | ******************************************************************************/ | 379 | ******************************************************************************/ |
@@ -400,8 +401,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
400 | 401 | ||
401 | /* | 402 | /* |
402 | * We need to obtain the GPE lock for both the data structs and registers | 403 | * We need to obtain the GPE lock for both the data structs and registers |
403 | * Note: Not necessary to obtain the hardware lock, since the GPE registers | 404 | * Note: Not necessary to obtain the hardware lock, since the GPE |
404 | * are owned by the gpe_lock. | 405 | * registers are owned by the gpe_lock. |
405 | */ | 406 | */ |
406 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | 407 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); |
407 | 408 | ||
@@ -410,9 +411,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
410 | gpe_block = gpe_xrupt_list->gpe_block_list_head; | 411 | gpe_block = gpe_xrupt_list->gpe_block_list_head; |
411 | while (gpe_block) { | 412 | while (gpe_block) { |
412 | /* | 413 | /* |
413 | * Read all of the 8-bit GPE status and enable registers | 414 | * Read all of the 8-bit GPE status and enable registers in this GPE |
414 | * in this GPE block, saving all of them. | 415 | * block, saving all of them. Find all currently active GP events. |
415 | * Find all currently active GP events. | ||
416 | */ | 416 | */ |
417 | for (i = 0; i < gpe_block->register_count; i++) { | 417 | for (i = 0; i < gpe_block->register_count; i++) { |
418 | 418 | ||
@@ -423,10 +423,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
423 | /* Read the Status Register */ | 423 | /* Read the Status Register */ |
424 | 424 | ||
425 | status = | 425 | status = |
426 | acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH, | 426 | acpi_read(&status_reg, |
427 | &status_reg, | 427 | &gpe_register_info->status_address); |
428 | &gpe_register_info-> | ||
429 | status_address); | ||
430 | if (ACPI_FAILURE(status)) { | 428 | if (ACPI_FAILURE(status)) { |
431 | goto unlock_and_exit; | 429 | goto unlock_and_exit; |
432 | } | 430 | } |
@@ -434,10 +432,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
434 | /* Read the Enable Register */ | 432 | /* Read the Enable Register */ |
435 | 433 | ||
436 | status = | 434 | status = |
437 | acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH, | 435 | acpi_read(&enable_reg, |
438 | &enable_reg, | 436 | &gpe_register_info->enable_address); |
439 | &gpe_register_info-> | ||
440 | enable_address); | ||
441 | if (ACPI_FAILURE(status)) { | 437 | if (ACPI_FAILURE(status)) { |
442 | goto unlock_and_exit; | 438 | goto unlock_and_exit; |
443 | } | 439 | } |
@@ -527,8 +523,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
527 | (void)acpi_ev_enable_gpe(gpe_event_info, FALSE); | 523 | (void)acpi_ev_enable_gpe(gpe_event_info, FALSE); |
528 | 524 | ||
529 | /* | 525 | /* |
530 | * Take a snapshot of the GPE info for this level - we copy the | 526 | * Take a snapshot of the GPE info for this level - we copy the info to |
531 | * info to prevent a race condition with remove_handler/remove_block. | 527 | * prevent a race condition with remove_handler/remove_block. |
532 | */ | 528 | */ |
533 | ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info, | 529 | ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info, |
534 | sizeof(struct acpi_gpe_event_info)); | 530 | sizeof(struct acpi_gpe_event_info)); |
@@ -539,8 +535,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
539 | } | 535 | } |
540 | 536 | ||
541 | /* | 537 | /* |
542 | * Must check for control method type dispatch one more | 538 | * Must check for control method type dispatch one more time to avoid a |
543 | * time to avoid race with ev_gpe_install_handler | 539 | * race with ev_gpe_install_handler |
544 | */ | 540 | */ |
545 | if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) == | 541 | if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) == |
546 | ACPI_GPE_DISPATCH_METHOD) { | 542 | ACPI_GPE_DISPATCH_METHOD) { |
@@ -584,8 +580,8 @@ static void acpi_ev_asynch_enable_gpe(void *context) | |||
584 | if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == | 580 | if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == |
585 | ACPI_GPE_LEVEL_TRIGGERED) { | 581 | ACPI_GPE_LEVEL_TRIGGERED) { |
586 | /* | 582 | /* |
587 | * GPE is level-triggered, we clear the GPE status bit after | 583 | * GPE is level-triggered, we clear the GPE status bit after handling |
588 | * handling the event. | 584 | * the event. |
589 | */ | 585 | */ |
590 | status = acpi_hw_clear_gpe(gpe_event_info); | 586 | status = acpi_hw_clear_gpe(gpe_event_info); |
591 | if (ACPI_FAILURE(status)) { | 587 | if (ACPI_FAILURE(status)) { |
@@ -624,7 +620,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | |||
624 | acpi_os_gpe_count(gpe_number); | 620 | acpi_os_gpe_count(gpe_number); |
625 | 621 | ||
626 | /* | 622 | /* |
627 | * If edge-triggered, clear the GPE status bit now. Note that | 623 | * If edge-triggered, clear the GPE status bit now. Note that |
628 | * level-triggered events are cleared after the GPE is serviced. | 624 | * level-triggered events are cleared after the GPE is serviced. |
629 | */ | 625 | */ |
630 | if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == | 626 | if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == |
@@ -650,7 +646,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | |||
650 | 646 | ||
651 | /* | 647 | /* |
652 | * Invoke the installed handler (at interrupt level) | 648 | * Invoke the installed handler (at interrupt level) |
653 | * Ignore return status for now. TBD: leave GPE disabled on error? | 649 | * Ignore return status for now. |
650 | * TBD: leave GPE disabled on error? | ||
654 | */ | 651 | */ |
655 | (void)gpe_event_info->dispatch.handler->address(gpe_event_info-> | 652 | (void)gpe_event_info->dispatch.handler->address(gpe_event_info-> |
656 | dispatch. | 653 | dispatch. |
@@ -708,7 +705,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | |||
708 | gpe_number)); | 705 | gpe_number)); |
709 | 706 | ||
710 | /* | 707 | /* |
711 | * Disable the GPE. The GPE will remain disabled until the ACPI | 708 | * Disable the GPE. The GPE will remain disabled until the ACPICA |
712 | * Core Subsystem is restarted, or a handler is installed. | 709 | * Core Subsystem is restarted, or a handler is installed. |
713 | */ | 710 | */ |
714 | status = acpi_ev_disable_gpe(gpe_event_info); | 711 | status = acpi_ev_disable_gpe(gpe_event_info); |
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index 73c058e2f5c2..484cc0565d5b 100644 --- a/drivers/acpi/events/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c | |||
@@ -42,8 +42,9 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acevents.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "acevents.h" |
47 | #include "acnamesp.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_EVENTS | 49 | #define _COMPONENT ACPI_EVENTS |
49 | ACPI_MODULE_NAME("evgpeblk") | 50 | ACPI_MODULE_NAME("evgpeblk") |
@@ -124,6 +125,7 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info) | |||
124 | * FUNCTION: acpi_ev_walk_gpe_list | 125 | * FUNCTION: acpi_ev_walk_gpe_list |
125 | * | 126 | * |
126 | * PARAMETERS: gpe_walk_callback - Routine called for each GPE block | 127 | * PARAMETERS: gpe_walk_callback - Routine called for each GPE block |
128 | * Context - Value passed to callback | ||
127 | * | 129 | * |
128 | * RETURN: Status | 130 | * RETURN: Status |
129 | * | 131 | * |
@@ -131,7 +133,8 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info) | |||
131 | * | 133 | * |
132 | ******************************************************************************/ | 134 | ******************************************************************************/ |
133 | 135 | ||
134 | acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback) | 136 | acpi_status |
137 | acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context) | ||
135 | { | 138 | { |
136 | struct acpi_gpe_block_info *gpe_block; | 139 | struct acpi_gpe_block_info *gpe_block; |
137 | struct acpi_gpe_xrupt_info *gpe_xrupt_info; | 140 | struct acpi_gpe_xrupt_info *gpe_xrupt_info; |
@@ -154,8 +157,13 @@ acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback) | |||
154 | 157 | ||
155 | /* One callback per GPE block */ | 158 | /* One callback per GPE block */ |
156 | 159 | ||
157 | status = gpe_walk_callback(gpe_xrupt_info, gpe_block); | 160 | status = |
161 | gpe_walk_callback(gpe_xrupt_info, gpe_block, | ||
162 | context); | ||
158 | if (ACPI_FAILURE(status)) { | 163 | if (ACPI_FAILURE(status)) { |
164 | if (status == AE_CTRL_END) { /* Callback abort */ | ||
165 | status = AE_OK; | ||
166 | } | ||
159 | goto unlock_and_exit; | 167 | goto unlock_and_exit; |
160 | } | 168 | } |
161 | 169 | ||
@@ -186,7 +194,8 @@ acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback) | |||
186 | 194 | ||
187 | acpi_status | 195 | acpi_status |
188 | acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 196 | acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
189 | struct acpi_gpe_block_info *gpe_block) | 197 | struct acpi_gpe_block_info *gpe_block, |
198 | void *context) | ||
190 | { | 199 | { |
191 | struct acpi_gpe_event_info *gpe_event_info; | 200 | struct acpi_gpe_event_info *gpe_event_info; |
192 | u32 i; | 201 | u32 i; |
@@ -309,17 +318,17 @@ acpi_ev_save_method_info(acpi_handle obj_handle, | |||
309 | (gpe_block->block_base_number + | 318 | (gpe_block->block_base_number + |
310 | (gpe_block->register_count * 8)))) { | 319 | (gpe_block->register_count * 8)))) { |
311 | /* | 320 | /* |
312 | * Not valid for this GPE block, just ignore it | 321 | * Not valid for this GPE block, just ignore it. However, it may be |
313 | * However, it may be valid for a different GPE block, since GPE0 and GPE1 | 322 | * valid for a different GPE block, since GPE0 and GPE1 methods both |
314 | * methods both appear under \_GPE. | 323 | * appear under \_GPE. |
315 | */ | 324 | */ |
316 | return_ACPI_STATUS(AE_OK); | 325 | return_ACPI_STATUS(AE_OK); |
317 | } | 326 | } |
318 | 327 | ||
319 | /* | 328 | /* |
320 | * Now we can add this information to the gpe_event_info block | 329 | * Now we can add this information to the gpe_event_info block for use |
321 | * for use during dispatch of this GPE. Default type is RUNTIME, although | 330 | * during dispatch of this GPE. Default type is RUNTIME, although this may |
322 | * this may change when the _PRW methods are executed later. | 331 | * change when the _PRW methods are executed later. |
323 | */ | 332 | */ |
324 | gpe_event_info = | 333 | gpe_event_info = |
325 | &gpe_block->event_info[gpe_number - gpe_block->block_base_number]; | 334 | &gpe_block->event_info[gpe_number - gpe_block->block_base_number]; |
@@ -394,8 +403,8 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, | |||
394 | gpe_block = gpe_info->gpe_block; | 403 | gpe_block = gpe_info->gpe_block; |
395 | 404 | ||
396 | /* | 405 | /* |
397 | * The _PRW object must return a package, we are only interested | 406 | * The _PRW object must return a package, we are only interested in the |
398 | * in the first element | 407 | * first element |
399 | */ | 408 | */ |
400 | obj_desc = pkg_desc->package.elements[0]; | 409 | obj_desc = pkg_desc->package.elements[0]; |
401 | 410 | ||
@@ -434,7 +443,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, | |||
434 | /* | 443 | /* |
435 | * Is this GPE within this block? | 444 | * Is this GPE within this block? |
436 | * | 445 | * |
437 | * TRUE iff these conditions are true: | 446 | * TRUE if and only if these conditions are true: |
438 | * 1) The GPE devices match. | 447 | * 1) The GPE devices match. |
439 | * 2) The GPE index(number) is within the range of the Gpe Block | 448 | * 2) The GPE index(number) is within the range of the Gpe Block |
440 | * associated with the GPE device. | 449 | * associated with the GPE device. |
@@ -457,6 +466,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, | |||
457 | if (ACPI_FAILURE(status)) { | 466 | if (ACPI_FAILURE(status)) { |
458 | goto cleanup; | 467 | goto cleanup; |
459 | } | 468 | } |
469 | |||
460 | status = | 470 | status = |
461 | acpi_ev_update_gpe_enable_masks(gpe_event_info, | 471 | acpi_ev_update_gpe_enable_masks(gpe_event_info, |
462 | ACPI_GPE_DISABLE); | 472 | ACPI_GPE_DISABLE); |
@@ -476,9 +486,9 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, | |||
476 | * RETURN: A GPE interrupt block | 486 | * RETURN: A GPE interrupt block |
477 | * | 487 | * |
478 | * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt | 488 | * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt |
479 | * block per unique interrupt level used for GPEs. | 489 | * block per unique interrupt level used for GPEs. Should be |
480 | * Should be called only when the GPE lists are semaphore locked | 490 | * called only when the GPE lists are semaphore locked and not |
481 | * and not subject to change. | 491 | * subject to change. |
482 | * | 492 | * |
483 | ******************************************************************************/ | 493 | ******************************************************************************/ |
484 | 494 | ||
@@ -608,8 +618,9 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt) | |||
608 | * | 618 | * |
609 | * FUNCTION: acpi_ev_install_gpe_block | 619 | * FUNCTION: acpi_ev_install_gpe_block |
610 | * | 620 | * |
611 | * PARAMETERS: gpe_block - New GPE block | 621 | * PARAMETERS: gpe_block - New GPE block |
612 | * interrupt_number - Xrupt to be associated with this GPE block | 622 | * interrupt_number - Xrupt to be associated with this |
623 | * GPE block | ||
613 | * | 624 | * |
614 | * RETURN: Status | 625 | * RETURN: Status |
615 | * | 626 | * |
@@ -666,7 +677,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, | |||
666 | * | 677 | * |
667 | * FUNCTION: acpi_ev_delete_gpe_block | 678 | * FUNCTION: acpi_ev_delete_gpe_block |
668 | * | 679 | * |
669 | * PARAMETERS: gpe_block - Existing GPE block | 680 | * PARAMETERS: gpe_block - Existing GPE block |
670 | * | 681 | * |
671 | * RETURN: Status | 682 | * RETURN: Status |
672 | * | 683 | * |
@@ -688,7 +699,8 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block) | |||
688 | 699 | ||
689 | /* Disable all GPEs in this block */ | 700 | /* Disable all GPEs in this block */ |
690 | 701 | ||
691 | status = acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block); | 702 | status = |
703 | acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL); | ||
692 | 704 | ||
693 | if (!gpe_block->previous && !gpe_block->next) { | 705 | if (!gpe_block->previous && !gpe_block->next) { |
694 | 706 | ||
@@ -715,6 +727,9 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block) | |||
715 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | 727 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); |
716 | } | 728 | } |
717 | 729 | ||
730 | acpi_current_gpe_count -= | ||
731 | gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH; | ||
732 | |||
718 | /* Free the gpe_block */ | 733 | /* Free the gpe_block */ |
719 | 734 | ||
720 | ACPI_FREE(gpe_block->register_info); | 735 | ACPI_FREE(gpe_block->register_info); |
@@ -786,9 +801,9 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) | |||
786 | 801 | ||
787 | /* | 802 | /* |
788 | * Initialize the GPE Register and Event structures. A goal of these | 803 | * Initialize the GPE Register and Event structures. A goal of these |
789 | * tables is to hide the fact that there are two separate GPE register sets | 804 | * tables is to hide the fact that there are two separate GPE register |
790 | * in a given GPE hardware block, the status registers occupy the first half, | 805 | * sets in a given GPE hardware block, the status registers occupy the |
791 | * and the enable registers occupy the second half. | 806 | * first half, and the enable registers occupy the second half. |
792 | */ | 807 | */ |
793 | this_register = gpe_register_info; | 808 | this_register = gpe_register_info; |
794 | this_event = gpe_event_info; | 809 | this_event = gpe_event_info; |
@@ -816,10 +831,8 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) | |||
816 | ACPI_GPE_REGISTER_WIDTH; | 831 | ACPI_GPE_REGISTER_WIDTH; |
817 | this_register->enable_address.bit_width = | 832 | this_register->enable_address.bit_width = |
818 | ACPI_GPE_REGISTER_WIDTH; | 833 | ACPI_GPE_REGISTER_WIDTH; |
819 | this_register->status_address.bit_offset = | 834 | this_register->status_address.bit_offset = 0; |
820 | ACPI_GPE_REGISTER_WIDTH; | 835 | this_register->enable_address.bit_offset = 0; |
821 | this_register->enable_address.bit_offset = | ||
822 | ACPI_GPE_REGISTER_WIDTH; | ||
823 | 836 | ||
824 | /* Init the event_info for each GPE within this register */ | 837 | /* Init the event_info for each GPE within this register */ |
825 | 838 | ||
@@ -832,18 +845,14 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) | |||
832 | 845 | ||
833 | /* Disable all GPEs within this register */ | 846 | /* Disable all GPEs within this register */ |
834 | 847 | ||
835 | status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0x00, | 848 | status = acpi_write(0x00, &this_register->enable_address); |
836 | &this_register-> | ||
837 | enable_address); | ||
838 | if (ACPI_FAILURE(status)) { | 849 | if (ACPI_FAILURE(status)) { |
839 | goto error_exit; | 850 | goto error_exit; |
840 | } | 851 | } |
841 | 852 | ||
842 | /* Clear any pending GPE events within this register */ | 853 | /* Clear any pending GPE events within this register */ |
843 | 854 | ||
844 | status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0xFF, | 855 | status = acpi_write(0xFF, &this_register->status_address); |
845 | &this_register-> | ||
846 | status_address); | ||
847 | if (ACPI_FAILURE(status)) { | 856 | if (ACPI_FAILURE(status)) { |
848 | goto error_exit; | 857 | goto error_exit; |
849 | } | 858 | } |
@@ -956,6 +965,9 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | |||
956 | gpe_device->name.ascii, gpe_block->register_count, | 965 | gpe_device->name.ascii, gpe_block->register_count, |
957 | interrupt_number)); | 966 | interrupt_number)); |
958 | 967 | ||
968 | /* Update global count of currently available GPEs */ | ||
969 | |||
970 | acpi_current_gpe_count += register_count * ACPI_GPE_REGISTER_WIDTH; | ||
959 | return_ACPI_STATUS(AE_OK); | 971 | return_ACPI_STATUS(AE_OK); |
960 | } | 972 | } |
961 | 973 | ||
@@ -1055,7 +1067,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, | |||
1055 | 1067 | ||
1056 | /* Enable all valid runtime GPEs found above */ | 1068 | /* Enable all valid runtime GPEs found above */ |
1057 | 1069 | ||
1058 | status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block); | 1070 | status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block, NULL); |
1059 | if (ACPI_FAILURE(status)) { | 1071 | if (ACPI_FAILURE(status)) { |
1060 | ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p", | 1072 | ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p", |
1061 | gpe_block)); | 1073 | gpe_block)); |
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/acpica/evmisc.c index 1d5670be729a..5f893057bcc6 100644 --- a/drivers/acpi/events/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c | |||
@@ -42,18 +42,15 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acevents.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "acevents.h" |
47 | #include <acpi/acinterp.h> | 47 | #include "acnamesp.h" |
48 | #include "acinterp.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_EVENTS | 50 | #define _COMPONENT ACPI_EVENTS |
50 | ACPI_MODULE_NAME("evmisc") | 51 | ACPI_MODULE_NAME("evmisc") |
51 | 52 | ||
52 | /* Pointer to FACS needed for the Global Lock */ | ||
53 | static struct acpi_table_facs *facs = NULL; | ||
54 | |||
55 | /* Local prototypes */ | 53 | /* Local prototypes */ |
56 | |||
57 | static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); | 54 | static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); |
58 | 55 | ||
59 | static u32 acpi_ev_global_lock_handler(void *context); | 56 | static u32 acpi_ev_global_lock_handler(void *context); |
@@ -152,7 +149,9 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node, | |||
152 | break; | 149 | break; |
153 | 150 | ||
154 | default: | 151 | default: |
152 | |||
155 | /* All other types are not supported */ | 153 | /* All other types are not supported */ |
154 | |||
156 | return (AE_TYPE); | 155 | return (AE_TYPE); |
157 | } | 156 | } |
158 | } | 157 | } |
@@ -193,9 +192,8 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node, | |||
193 | acpi_ut_delete_generic_state(notify_info); | 192 | acpi_ut_delete_generic_state(notify_info); |
194 | } | 193 | } |
195 | } else { | 194 | } else { |
196 | /* | 195 | /* There is no notify handler (per-device or system) for this device */ |
197 | * There is no notify handler (per-device or system) for this device. | 196 | |
198 | */ | ||
199 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 197 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
200 | "No notify handler for Notify (%4.4s, %X) node %p\n", | 198 | "No notify handler for Notify (%4.4s, %X) node %p\n", |
201 | acpi_ut_get_node_name(node), notify_value, | 199 | acpi_ut_get_node_name(node), notify_value, |
@@ -229,9 +227,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context) | |||
229 | ACPI_FUNCTION_ENTRY(); | 227 | ACPI_FUNCTION_ENTRY(); |
230 | 228 | ||
231 | /* | 229 | /* |
232 | * We will invoke a global notify handler if installed. | 230 | * We will invoke a global notify handler if installed. This is done |
233 | * This is done _before_ we invoke the per-device handler attached | 231 | * _before_ we invoke the per-device handler attached to the device. |
234 | * to the device. | ||
235 | */ | 232 | */ |
236 | if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) { | 233 | if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) { |
237 | 234 | ||
@@ -299,7 +296,7 @@ static u32 acpi_ev_global_lock_handler(void *context) | |||
299 | * If we don't get it now, it will be marked pending and we will | 296 | * If we don't get it now, it will be marked pending and we will |
300 | * take another interrupt when it becomes free. | 297 | * take another interrupt when it becomes free. |
301 | */ | 298 | */ |
302 | ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired); | 299 | ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); |
303 | if (acquired) { | 300 | if (acquired) { |
304 | 301 | ||
305 | /* Got the lock, now wake all threads waiting for it */ | 302 | /* Got the lock, now wake all threads waiting for it */ |
@@ -336,34 +333,27 @@ acpi_status acpi_ev_init_global_lock_handler(void) | |||
336 | 333 | ||
337 | ACPI_FUNCTION_TRACE(ev_init_global_lock_handler); | 334 | ACPI_FUNCTION_TRACE(ev_init_global_lock_handler); |
338 | 335 | ||
339 | status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, | 336 | /* Attempt installation of the global lock handler */ |
340 | ACPI_CAST_INDIRECT_PTR(struct | ||
341 | acpi_table_header, | ||
342 | &facs)); | ||
343 | if (ACPI_FAILURE(status)) { | ||
344 | return_ACPI_STATUS(status); | ||
345 | } | ||
346 | 337 | ||
347 | acpi_gbl_global_lock_present = TRUE; | ||
348 | status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, | 338 | status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, |
349 | acpi_ev_global_lock_handler, | 339 | acpi_ev_global_lock_handler, |
350 | NULL); | 340 | NULL); |
351 | 341 | ||
352 | /* | 342 | /* |
353 | * If the global lock does not exist on this platform, the attempt | 343 | * If the global lock does not exist on this platform, the attempt to |
354 | * to enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick) | 344 | * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick). |
355 | * Map to AE_OK, but mark global lock as not present. | 345 | * Map to AE_OK, but mark global lock as not present. Any attempt to |
356 | * Any attempt to actually use the global lock will be flagged | 346 | * actually use the global lock will be flagged with an error. |
357 | * with an error. | ||
358 | */ | 347 | */ |
359 | if (status == AE_NO_HARDWARE_RESPONSE) { | 348 | if (status == AE_NO_HARDWARE_RESPONSE) { |
360 | ACPI_ERROR((AE_INFO, | 349 | ACPI_ERROR((AE_INFO, |
361 | "No response from Global Lock hardware, disabling lock")); | 350 | "No response from Global Lock hardware, disabling lock")); |
362 | 351 | ||
363 | acpi_gbl_global_lock_present = FALSE; | 352 | acpi_gbl_global_lock_present = FALSE; |
364 | status = AE_OK; | 353 | return_ACPI_STATUS(AE_OK); |
365 | } | 354 | } |
366 | 355 | ||
356 | acpi_gbl_global_lock_present = TRUE; | ||
367 | return_ACPI_STATUS(status); | 357 | return_ACPI_STATUS(status); |
368 | } | 358 | } |
369 | 359 | ||
@@ -462,8 +452,8 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout) | |||
462 | } | 452 | } |
463 | 453 | ||
464 | /* | 454 | /* |
465 | * Make sure that a global lock actually exists. If not, just treat | 455 | * Make sure that a global lock actually exists. If not, just treat the |
466 | * the lock as a standard mutex. | 456 | * lock as a standard mutex. |
467 | */ | 457 | */ |
468 | if (!acpi_gbl_global_lock_present) { | 458 | if (!acpi_gbl_global_lock_present) { |
469 | acpi_gbl_global_lock_acquired = TRUE; | 459 | acpi_gbl_global_lock_acquired = TRUE; |
@@ -472,7 +462,7 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout) | |||
472 | 462 | ||
473 | /* Attempt to acquire the actual hardware lock */ | 463 | /* Attempt to acquire the actual hardware lock */ |
474 | 464 | ||
475 | ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired); | 465 | ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); |
476 | if (acquired) { | 466 | if (acquired) { |
477 | 467 | ||
478 | /* We got the lock */ | 468 | /* We got the lock */ |
@@ -536,7 +526,7 @@ acpi_status acpi_ev_release_global_lock(void) | |||
536 | 526 | ||
537 | /* Allow any thread to release the lock */ | 527 | /* Allow any thread to release the lock */ |
538 | 528 | ||
539 | ACPI_RELEASE_GLOBAL_LOCK(facs, pending); | 529 | ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending); |
540 | 530 | ||
541 | /* | 531 | /* |
542 | * If the pending bit was set, we must write GBL_RLS to the control | 532 | * If the pending bit was set, we must write GBL_RLS to the control |
@@ -582,8 +572,8 @@ void acpi_ev_terminate(void) | |||
582 | 572 | ||
583 | if (acpi_gbl_events_initialized) { | 573 | if (acpi_gbl_events_initialized) { |
584 | /* | 574 | /* |
585 | * Disable all event-related functionality. | 575 | * Disable all event-related functionality. In all cases, on error, |
586 | * In all cases, on error, print a message but obviously we don't abort. | 576 | * print a message but obviously we don't abort. |
587 | */ | 577 | */ |
588 | 578 | ||
589 | /* Disable all fixed events */ | 579 | /* Disable all fixed events */ |
@@ -599,7 +589,7 @@ void acpi_ev_terminate(void) | |||
599 | 589 | ||
600 | /* Disable all GPEs in all GPE blocks */ | 590 | /* Disable all GPEs in all GPE blocks */ |
601 | 591 | ||
602 | status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block); | 592 | status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL); |
603 | 593 | ||
604 | /* Remove SCI handler */ | 594 | /* Remove SCI handler */ |
605 | 595 | ||
@@ -617,7 +607,7 @@ void acpi_ev_terminate(void) | |||
617 | 607 | ||
618 | /* Deallocate all handler objects installed within GPE info structs */ | 608 | /* Deallocate all handler objects installed within GPE info structs */ |
619 | 609 | ||
620 | status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers); | 610 | status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL); |
621 | 611 | ||
622 | /* Return to original mode if necessary */ | 612 | /* Return to original mode if necessary */ |
623 | 613 | ||
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/acpica/evregion.c index 236fbd1ca438..665c0887ab4d 100644 --- a/drivers/acpi/events/evregion.c +++ b/drivers/acpi/acpica/evregion.c | |||
@@ -42,22 +42,15 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acevents.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "acevents.h" |
47 | #include <acpi/acinterp.h> | 47 | #include "acnamesp.h" |
48 | #include "acinterp.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_EVENTS | 50 | #define _COMPONENT ACPI_EVENTS |
50 | ACPI_MODULE_NAME("evregion") | 51 | ACPI_MODULE_NAME("evregion") |
51 | #define ACPI_NUM_DEFAULT_SPACES 4 | ||
52 | static u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = { | ||
53 | ACPI_ADR_SPACE_SYSTEM_MEMORY, | ||
54 | ACPI_ADR_SPACE_SYSTEM_IO, | ||
55 | ACPI_ADR_SPACE_PCI_CONFIG, | ||
56 | ACPI_ADR_SPACE_DATA_TABLE | ||
57 | }; | ||
58 | 52 | ||
59 | /* Local prototypes */ | 53 | /* Local prototypes */ |
60 | |||
61 | static acpi_status | 54 | static acpi_status |
62 | acpi_ev_reg_run(acpi_handle obj_handle, | 55 | acpi_ev_reg_run(acpi_handle obj_handle, |
63 | u32 level, void *context, void **return_value); | 56 | u32 level, void *context, void **return_value); |
@@ -66,6 +59,17 @@ static acpi_status | |||
66 | acpi_ev_install_handler(acpi_handle obj_handle, | 59 | acpi_ev_install_handler(acpi_handle obj_handle, |
67 | u32 level, void *context, void **return_value); | 60 | u32 level, void *context, void **return_value); |
68 | 61 | ||
62 | /* These are the address spaces that will get default handlers */ | ||
63 | |||
64 | #define ACPI_NUM_DEFAULT_SPACES 4 | ||
65 | |||
66 | static u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = { | ||
67 | ACPI_ADR_SPACE_SYSTEM_MEMORY, | ||
68 | ACPI_ADR_SPACE_SYSTEM_IO, | ||
69 | ACPI_ADR_SPACE_PCI_CONFIG, | ||
70 | ACPI_ADR_SPACE_DATA_TABLE | ||
71 | }; | ||
72 | |||
69 | /******************************************************************************* | 73 | /******************************************************************************* |
70 | * | 74 | * |
71 | * FUNCTION: acpi_ev_install_region_handlers | 75 | * FUNCTION: acpi_ev_install_region_handlers |
@@ -91,18 +95,19 @@ acpi_status acpi_ev_install_region_handlers(void) | |||
91 | } | 95 | } |
92 | 96 | ||
93 | /* | 97 | /* |
94 | * All address spaces (PCI Config, EC, SMBus) are scope dependent | 98 | * All address spaces (PCI Config, EC, SMBus) are scope dependent and |
95 | * and registration must occur for a specific device. | 99 | * registration must occur for a specific device. |
96 | * | 100 | * |
97 | * In the case of the system memory and IO address spaces there is currently | 101 | * In the case of the system memory and IO address spaces there is |
98 | * no device associated with the address space. For these we use the root. | 102 | * currently no device associated with the address space. For these we |
103 | * use the root. | ||
99 | * | 104 | * |
100 | * We install the default PCI config space handler at the root so | 105 | * We install the default PCI config space handler at the root so that |
101 | * that this space is immediately available even though the we have | 106 | * this space is immediately available even though the we have not |
102 | * not enumerated all the PCI Root Buses yet. This is to conform | 107 | * enumerated all the PCI Root Buses yet. This is to conform to the ACPI |
103 | * to the ACPI specification which states that the PCI config | 108 | * specification which states that the PCI config space must be always |
104 | * space must be always available -- even though we are nowhere | 109 | * available -- even though we are nowhere near ready to find the PCI root |
105 | * near ready to find the PCI root buses at this point. | 110 | * buses at this point. |
106 | * | 111 | * |
107 | * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler | 112 | * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler |
108 | * has already been installed (via acpi_install_address_space_handler). | 113 | * has already been installed (via acpi_install_address_space_handler). |
@@ -160,12 +165,11 @@ acpi_status acpi_ev_initialize_op_regions(void) | |||
160 | return_ACPI_STATUS(status); | 165 | return_ACPI_STATUS(status); |
161 | } | 166 | } |
162 | 167 | ||
163 | /* | 168 | /* Run the _REG methods for op_regions in each default address space */ |
164 | * Run the _REG methods for op_regions in each default address space | ||
165 | */ | ||
166 | for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) { | ||
167 | 169 | ||
168 | /* TBD: Make sure handler is the DEFAULT handler, otherwise | 170 | for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) { |
171 | /* | ||
172 | * TBD: Make sure handler is the DEFAULT handler, otherwise | ||
169 | * _REG will have already been run. | 173 | * _REG will have already been run. |
170 | */ | 174 | */ |
171 | status = acpi_ev_execute_reg_methods(acpi_gbl_root_node, | 175 | status = acpi_ev_execute_reg_methods(acpi_gbl_root_node, |
@@ -318,13 +322,13 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, | |||
318 | } | 322 | } |
319 | 323 | ||
320 | /* | 324 | /* |
321 | * It may be the case that the region has never been initialized | 325 | * It may be the case that the region has never been initialized. |
322 | * Some types of regions require special init code | 326 | * Some types of regions require special init code |
323 | */ | 327 | */ |
324 | if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { | 328 | if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { |
325 | /* | 329 | |
326 | * This region has not been initialized yet, do it | 330 | /* This region has not been initialized yet, do it */ |
327 | */ | 331 | |
328 | region_setup = handler_desc->address_space.setup; | 332 | region_setup = handler_desc->address_space.setup; |
329 | if (!region_setup) { | 333 | if (!region_setup) { |
330 | 334 | ||
@@ -339,9 +343,9 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, | |||
339 | } | 343 | } |
340 | 344 | ||
341 | /* | 345 | /* |
342 | * We must exit the interpreter because the region | 346 | * We must exit the interpreter because the region setup will |
343 | * setup will potentially execute control methods | 347 | * potentially execute control methods (for example, the _REG method |
344 | * (e.g., _REG method for this region) | 348 | * for this region) |
345 | */ | 349 | */ |
346 | acpi_ex_exit_interpreter(); | 350 | acpi_ex_exit_interpreter(); |
347 | 351 | ||
@@ -364,9 +368,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, | |||
364 | return_ACPI_STATUS(status); | 368 | return_ACPI_STATUS(status); |
365 | } | 369 | } |
366 | 370 | ||
367 | /* | 371 | /* Region initialization may have been completed by region_setup */ |
368 | * Region initialization may have been completed by region_setup | 372 | |
369 | */ | ||
370 | if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { | 373 | if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { |
371 | region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE; | 374 | region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE; |
372 | 375 | ||
@@ -521,8 +524,8 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj, | |||
521 | } | 524 | } |
522 | 525 | ||
523 | /* | 526 | /* |
524 | * If the region has been activated, call the setup handler | 527 | * If the region has been activated, call the setup handler with |
525 | * with the deactivate notification | 528 | * the deactivate notification |
526 | */ | 529 | */ |
527 | if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) { | 530 | if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) { |
528 | region_setup = handler_obj->address_space.setup; | 531 | region_setup = handler_obj->address_space.setup; |
@@ -668,8 +671,8 @@ acpi_ev_install_handler(acpi_handle obj_handle, | |||
668 | } | 671 | } |
669 | 672 | ||
670 | /* | 673 | /* |
671 | * We only care about regions.and objects | 674 | * We only care about regions and objects that are allowed to have |
672 | * that are allowed to have address space handlers | 675 | * address space handlers |
673 | */ | 676 | */ |
674 | if ((node->type != ACPI_TYPE_DEVICE) && | 677 | if ((node->type != ACPI_TYPE_DEVICE) && |
675 | (node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) { | 678 | (node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) { |
@@ -710,9 +713,9 @@ acpi_ev_install_handler(acpi_handle obj_handle, | |||
710 | /* | 713 | /* |
711 | * Since the object we found it on was a device, then it | 714 | * Since the object we found it on was a device, then it |
712 | * means that someone has already installed a handler for | 715 | * means that someone has already installed a handler for |
713 | * the branch of the namespace from this device on. Just | 716 | * the branch of the namespace from this device on. Just |
714 | * bail out telling the walk routine to not traverse this | 717 | * bail out telling the walk routine to not traverse this |
715 | * branch. This preserves the scoping rule for handlers. | 718 | * branch. This preserves the scoping rule for handlers. |
716 | */ | 719 | */ |
717 | return (AE_CTRL_DEPTH); | 720 | return (AE_CTRL_DEPTH); |
718 | } | 721 | } |
@@ -723,9 +726,8 @@ acpi_ev_install_handler(acpi_handle obj_handle, | |||
723 | } | 726 | } |
724 | 727 | ||
725 | /* | 728 | /* |
726 | * As long as the device didn't have a handler for this | 729 | * As long as the device didn't have a handler for this space we |
727 | * space we don't care about it. We just ignore it and | 730 | * don't care about it. We just ignore it and proceed. |
728 | * proceed. | ||
729 | */ | 731 | */ |
730 | return (AE_OK); | 732 | return (AE_OK); |
731 | } | 733 | } |
@@ -733,16 +735,14 @@ acpi_ev_install_handler(acpi_handle obj_handle, | |||
733 | /* Object is a Region */ | 735 | /* Object is a Region */ |
734 | 736 | ||
735 | if (obj_desc->region.space_id != handler_obj->address_space.space_id) { | 737 | if (obj_desc->region.space_id != handler_obj->address_space.space_id) { |
736 | /* | 738 | |
737 | * This region is for a different address space | 739 | /* This region is for a different address space, just ignore it */ |
738 | * -- just ignore it | 740 | |
739 | */ | ||
740 | return (AE_OK); | 741 | return (AE_OK); |
741 | } | 742 | } |
742 | 743 | ||
743 | /* | 744 | /* |
744 | * Now we have a region and it is for the handler's address | 745 | * Now we have a region and it is for the handler's address space type. |
745 | * space type. | ||
746 | * | 746 | * |
747 | * First disconnect region for any previous handler (if any) | 747 | * First disconnect region for any previous handler (if any) |
748 | */ | 748 | */ |
@@ -786,9 +786,8 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node, | |||
786 | ACPI_FUNCTION_TRACE(ev_install_space_handler); | 786 | ACPI_FUNCTION_TRACE(ev_install_space_handler); |
787 | 787 | ||
788 | /* | 788 | /* |
789 | * This registration is valid for only the types below | 789 | * This registration is valid for only the types below and the root. This |
790 | * and the root. This is where the default handlers | 790 | * is where the default handlers get placed. |
791 | * get placed. | ||
792 | */ | 791 | */ |
793 | if ((node->type != ACPI_TYPE_DEVICE) && | 792 | if ((node->type != ACPI_TYPE_DEVICE) && |
794 | (node->type != ACPI_TYPE_PROCESSOR) && | 793 | (node->type != ACPI_TYPE_PROCESSOR) && |
@@ -848,8 +847,8 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node, | |||
848 | obj_desc = acpi_ns_get_attached_object(node); | 847 | obj_desc = acpi_ns_get_attached_object(node); |
849 | if (obj_desc) { | 848 | if (obj_desc) { |
850 | /* | 849 | /* |
851 | * The attached device object already exists. | 850 | * The attached device object already exists. Make sure the handler |
852 | * Make sure the handler is not already installed. | 851 | * is not already installed. |
853 | */ | 852 | */ |
854 | handler_obj = obj_desc->device.handler; | 853 | handler_obj = obj_desc->device.handler; |
855 | 854 | ||
@@ -864,8 +863,8 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node, | |||
864 | handler) { | 863 | handler) { |
865 | /* | 864 | /* |
866 | * It is (relatively) OK to attempt to install the SAME | 865 | * It is (relatively) OK to attempt to install the SAME |
867 | * handler twice. This can easily happen | 866 | * handler twice. This can easily happen with the |
868 | * with PCI_Config space. | 867 | * PCI_Config space. |
869 | */ | 868 | */ |
870 | status = AE_SAME_HANDLER; | 869 | status = AE_SAME_HANDLER; |
871 | goto unlock_and_exit; | 870 | goto unlock_and_exit; |
@@ -925,9 +924,8 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node, | |||
925 | /* | 924 | /* |
926 | * Install the handler | 925 | * Install the handler |
927 | * | 926 | * |
928 | * At this point there is no existing handler. | 927 | * At this point there is no existing handler. Just allocate the object |
929 | * Just allocate the object for the handler and link it | 928 | * for the handler and link it into the list. |
930 | * into the list. | ||
931 | */ | 929 | */ |
932 | handler_obj = | 930 | handler_obj = |
933 | acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER); | 931 | acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER); |
@@ -1000,11 +998,10 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, | |||
1000 | ACPI_FUNCTION_TRACE(ev_execute_reg_methods); | 998 | ACPI_FUNCTION_TRACE(ev_execute_reg_methods); |
1001 | 999 | ||
1002 | /* | 1000 | /* |
1003 | * Run all _REG methods for all Operation Regions for this | 1001 | * Run all _REG methods for all Operation Regions for this space ID. This |
1004 | * space ID. This is a separate walk in order to handle any | 1002 | * is a separate walk in order to handle any interdependencies between |
1005 | * interdependencies between regions and _REG methods. (i.e. handlers | 1003 | * regions and _REG methods. (i.e. handlers must be installed for all |
1006 | * must be installed for all regions of this Space ID before we | 1004 | * regions of this Space ID before we can run any _REG methods) |
1007 | * can run any _REG methods) | ||
1008 | */ | 1005 | */ |
1009 | status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, | 1006 | status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, |
1010 | ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, | 1007 | ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, |
@@ -1042,8 +1039,8 @@ acpi_ev_reg_run(acpi_handle obj_handle, | |||
1042 | } | 1039 | } |
1043 | 1040 | ||
1044 | /* | 1041 | /* |
1045 | * We only care about regions.and objects | 1042 | * We only care about regions.and objects that are allowed to have address |
1046 | * that are allowed to have address space handlers | 1043 | * space handlers |
1047 | */ | 1044 | */ |
1048 | if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) { | 1045 | if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) { |
1049 | return (AE_OK); | 1046 | return (AE_OK); |
@@ -1062,10 +1059,9 @@ acpi_ev_reg_run(acpi_handle obj_handle, | |||
1062 | /* Object is a Region */ | 1059 | /* Object is a Region */ |
1063 | 1060 | ||
1064 | if (obj_desc->region.space_id != space_id) { | 1061 | if (obj_desc->region.space_id != space_id) { |
1065 | /* | 1062 | |
1066 | * This region is for a different address space | 1063 | /* This region is for a different address space, just ignore it */ |
1067 | * -- just ignore it | 1064 | |
1068 | */ | ||
1069 | return (AE_OK); | 1065 | return (AE_OK); |
1070 | } | 1066 | } |
1071 | 1067 | ||
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index 6b94b38df07d..f3f1fb45c3dc 100644 --- a/drivers/acpi/events/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c | |||
@@ -42,8 +42,9 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acevents.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "acevents.h" |
47 | #include "acnamesp.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_EVENTS | 49 | #define _COMPONENT ACPI_EVENTS |
49 | ACPI_MODULE_NAME("evrgnini") | 50 | ACPI_MODULE_NAME("evrgnini") |
@@ -233,9 +234,9 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, | |||
233 | if (ACPI_FAILURE(status)) { | 234 | if (ACPI_FAILURE(status)) { |
234 | if (status == AE_SAME_HANDLER) { | 235 | if (status == AE_SAME_HANDLER) { |
235 | /* | 236 | /* |
236 | * It is OK if the handler is already installed on the root | 237 | * It is OK if the handler is already installed on the |
237 | * bridge. Still need to return a context object for the | 238 | * root bridge. Still need to return a context object |
238 | * new PCI_Config operation region, however. | 239 | * for the new PCI_Config operation region, however. |
239 | */ | 240 | */ |
240 | status = AE_OK; | 241 | status = AE_OK; |
241 | } else { | 242 | } else { |
@@ -272,8 +273,8 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, | |||
272 | } | 273 | } |
273 | 274 | ||
274 | /* | 275 | /* |
275 | * For PCI_Config space access, we need the segment, bus, | 276 | * For PCI_Config space access, we need the segment, bus, device and |
276 | * device and function numbers. Acquire them here. | 277 | * function numbers. Acquire them here. |
277 | * | 278 | * |
278 | * Find the parent device object. (This allows the operation region to be | 279 | * Find the parent device object. (This allows the operation region to be |
279 | * within a subscope under the device, such as a control method.) | 280 | * within a subscope under the device, such as a control method.) |
@@ -289,16 +290,16 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, | |||
289 | } | 290 | } |
290 | 291 | ||
291 | /* | 292 | /* |
292 | * Get the PCI device and function numbers from the _ADR object | 293 | * Get the PCI device and function numbers from the _ADR object contained |
293 | * contained in the parent's scope. | 294 | * in the parent's scope. |
294 | */ | 295 | */ |
295 | status = | 296 | status = |
296 | acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node, | 297 | acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node, |
297 | &pci_value); | 298 | &pci_value); |
298 | 299 | ||
299 | /* | 300 | /* |
300 | * The default is zero, and since the allocation above zeroed | 301 | * The default is zero, and since the allocation above zeroed the data, |
301 | * the data, just do nothing on failure. | 302 | * just do nothing on failure. |
302 | */ | 303 | */ |
303 | if (ACPI_SUCCESS(status)) { | 304 | if (ACPI_SUCCESS(status)) { |
304 | pci_id->device = ACPI_HIWORD(ACPI_LODWORD(pci_value)); | 305 | pci_id->device = ACPI_HIWORD(ACPI_LODWORD(pci_value)); |
@@ -382,9 +383,8 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) | |||
382 | struct acpi_compatible_id_list *cid; | 383 | struct acpi_compatible_id_list *cid; |
383 | u32 i; | 384 | u32 i; |
384 | 385 | ||
385 | /* | 386 | /* Get the _HID and check for a PCI Root Bridge */ |
386 | * Get the _HID and check for a PCI Root Bridge | 387 | |
387 | */ | ||
388 | status = acpi_ut_execute_HID(node, &hid); | 388 | status = acpi_ut_execute_HID(node, &hid); |
389 | if (ACPI_FAILURE(status)) { | 389 | if (ACPI_FAILURE(status)) { |
390 | return (FALSE); | 390 | return (FALSE); |
@@ -394,10 +394,8 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) | |||
394 | return (TRUE); | 394 | return (TRUE); |
395 | } | 395 | } |
396 | 396 | ||
397 | /* | 397 | /* The _HID did not match. Get the _CID and check for a PCI Root Bridge */ |
398 | * The _HID did not match. | 398 | |
399 | * Get the _CID and check for a PCI Root Bridge | ||
400 | */ | ||
401 | status = acpi_ut_execute_CID(node, &cid); | 399 | status = acpi_ut_execute_CID(node, &cid); |
402 | if (ACPI_FAILURE(status)) { | 400 | if (ACPI_FAILURE(status)) { |
403 | return (FALSE); | 401 | return (FALSE); |
@@ -516,9 +514,9 @@ acpi_ev_default_region_setup(acpi_handle handle, | |||
516 | * Get the appropriate address space handler for a newly | 514 | * Get the appropriate address space handler for a newly |
517 | * created region. | 515 | * created region. |
518 | * | 516 | * |
519 | * This also performs address space specific initialization. For | 517 | * This also performs address space specific initialization. For |
520 | * example, PCI regions must have an _ADR object that contains | 518 | * example, PCI regions must have an _ADR object that contains |
521 | * a PCI address in the scope of the definition. This address is | 519 | * a PCI address in the scope of the definition. This address is |
522 | * required to perform an access to PCI config space. | 520 | * required to perform an access to PCI config space. |
523 | * | 521 | * |
524 | * MUTEX: Interpreter should be unlocked, because we may run the _REG | 522 | * MUTEX: Interpreter should be unlocked, because we may run the _REG |
@@ -572,7 +570,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj, | |||
572 | if (ACPI_SUCCESS(status)) { | 570 | if (ACPI_SUCCESS(status)) { |
573 | /* | 571 | /* |
574 | * The _REG method is optional and there can be only one per region | 572 | * The _REG method is optional and there can be only one per region |
575 | * definition. This will be executed when the handler is attached | 573 | * definition. This will be executed when the handler is attached |
576 | * or removed | 574 | * or removed |
577 | */ | 575 | */ |
578 | region_obj2->extra.method_REG = method_node; | 576 | region_obj2->extra.method_REG = method_node; |
@@ -670,10 +668,8 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj, | |||
670 | } | 668 | } |
671 | } | 669 | } |
672 | 670 | ||
673 | /* | 671 | /* This node does not have the handler we need; Pop up one level */ |
674 | * This node does not have the handler we need; | 672 | |
675 | * Pop up one level | ||
676 | */ | ||
677 | node = acpi_ns_get_parent_node(node); | 673 | node = acpi_ns_get_parent_node(node); |
678 | } | 674 | } |
679 | 675 | ||
diff --git a/drivers/acpi/events/evsci.c b/drivers/acpi/acpica/evsci.c index 2a8b77877610..567b356c85af 100644 --- a/drivers/acpi/events/evsci.c +++ b/drivers/acpi/acpica/evsci.c | |||
@@ -43,7 +43,8 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acevents.h> | 46 | #include "accommon.h" |
47 | #include "acevents.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_EVENTS | 49 | #define _COMPONENT ACPI_EVENTS |
49 | ACPI_MODULE_NAME("evsci") | 50 | ACPI_MODULE_NAME("evsci") |
@@ -115,10 +116,8 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context) | |||
115 | * if this interrupt handler is installed, ACPI is enabled. | 116 | * if this interrupt handler is installed, ACPI is enabled. |
116 | */ | 117 | */ |
117 | 118 | ||
118 | /* | 119 | /* GPEs: Check for and dispatch any GPEs that have occurred */ |
119 | * GPEs: | 120 | |
120 | * Check for and dispatch any GPEs that have occurred | ||
121 | */ | ||
122 | interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); | 121 | interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); |
123 | 122 | ||
124 | return_UINT32(interrupt_handled); | 123 | return_UINT32(interrupt_handled); |
@@ -158,11 +157,11 @@ u32 acpi_ev_install_sci_handler(void) | |||
158 | * RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not | 157 | * RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not |
159 | * installed to begin with | 158 | * installed to begin with |
160 | * | 159 | * |
161 | * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be | 160 | * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be |
162 | * taken. | 161 | * taken. |
163 | * | 162 | * |
164 | * Note: It doesn't seem important to disable all events or set the event | 163 | * Note: It doesn't seem important to disable all events or set the event |
165 | * enable registers to their original values. The OS should disable | 164 | * enable registers to their original values. The OS should disable |
166 | * the SCI interrupt level when the handler is removed, so no more | 165 | * the SCI interrupt level when the handler is removed, so no more |
167 | * events will come in. | 166 | * events will come in. |
168 | * | 167 | * |
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/acpica/evxface.c index 94a6efe020be..3aca9010a11e 100644 --- a/drivers/acpi/events/evxface.c +++ b/drivers/acpi/acpica/evxface.c | |||
@@ -42,9 +42,10 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acnamesp.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acevents.h> | 46 | #include "acnamesp.h" |
47 | #include <acpi/acinterp.h> | 47 | #include "acevents.h" |
48 | #include "acinterp.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_EVENTS | 50 | #define _COMPONENT ACPI_EVENTS |
50 | ACPI_MODULE_NAME("evxface") | 51 | ACPI_MODULE_NAME("evxface") |
@@ -267,7 +268,7 @@ acpi_install_notify_handler(acpi_handle device, | |||
267 | /* | 268 | /* |
268 | * Root Object: | 269 | * Root Object: |
269 | * Registering a notify handler on the root object indicates that the | 270 | * Registering a notify handler on the root object indicates that the |
270 | * caller wishes to receive notifications for all objects. Note that | 271 | * caller wishes to receive notifications for all objects. Note that |
271 | * only one <external> global handler can be regsitered (per notify type). | 272 | * only one <external> global handler can be regsitered (per notify type). |
272 | */ | 273 | */ |
273 | if (device == ACPI_ROOT_OBJECT) { | 274 | if (device == ACPI_ROOT_OBJECT) { |
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index 41554f736b68..35485e4b60a6 100644 --- a/drivers/acpi/events/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c | |||
@@ -42,13 +42,19 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acevents.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "acevents.h" |
47 | #include <acpi/actables.h> | 47 | #include "acnamesp.h" |
48 | #include "actables.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_EVENTS | 50 | #define _COMPONENT ACPI_EVENTS |
50 | ACPI_MODULE_NAME("evxfevnt") | 51 | ACPI_MODULE_NAME("evxfevnt") |
51 | 52 | ||
53 | /* Local prototypes */ | ||
54 | acpi_status | ||
55 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | ||
56 | struct acpi_gpe_block_info *gpe_block, void *context); | ||
57 | |||
52 | /******************************************************************************* | 58 | /******************************************************************************* |
53 | * | 59 | * |
54 | * FUNCTION: acpi_enable | 60 | * FUNCTION: acpi_enable |
@@ -60,6 +66,7 @@ ACPI_MODULE_NAME("evxfevnt") | |||
60 | * DESCRIPTION: Transfers the system into ACPI mode. | 66 | * DESCRIPTION: Transfers the system into ACPI mode. |
61 | * | 67 | * |
62 | ******************************************************************************/ | 68 | ******************************************************************************/ |
69 | |||
63 | acpi_status acpi_enable(void) | 70 | acpi_status acpi_enable(void) |
64 | { | 71 | { |
65 | acpi_status status = AE_OK; | 72 | acpi_status status = AE_OK; |
@@ -161,8 +168,8 @@ acpi_status acpi_enable_event(u32 event, u32 flags) | |||
161 | } | 168 | } |
162 | 169 | ||
163 | /* | 170 | /* |
164 | * Enable the requested fixed event (by writing a one to the | 171 | * Enable the requested fixed event (by writing a one to the enable |
165 | * enable register bit) | 172 | * register bit) |
166 | */ | 173 | */ |
167 | status = | 174 | status = |
168 | acpi_set_register(acpi_gbl_fixed_event_info[event]. | 175 | acpi_set_register(acpi_gbl_fixed_event_info[event]. |
@@ -343,8 +350,8 @@ acpi_status acpi_disable_event(u32 event, u32 flags) | |||
343 | } | 350 | } |
344 | 351 | ||
345 | /* | 352 | /* |
346 | * Disable the requested fixed event (by writing a zero to the | 353 | * Disable the requested fixed event (by writing a zero to the enable |
347 | * enable register bit) | 354 | * register bit) |
348 | */ | 355 | */ |
349 | status = | 356 | status = |
350 | acpi_set_register(acpi_gbl_fixed_event_info[event]. | 357 | acpi_set_register(acpi_gbl_fixed_event_info[event]. |
@@ -396,8 +403,8 @@ acpi_status acpi_clear_event(u32 event) | |||
396 | } | 403 | } |
397 | 404 | ||
398 | /* | 405 | /* |
399 | * Clear the requested fixed event (By writing a one to the | 406 | * Clear the requested fixed event (By writing a one to the status |
400 | * status register bit) | 407 | * register bit) |
401 | */ | 408 | */ |
402 | status = | 409 | status = |
403 | acpi_set_register(acpi_gbl_fixed_event_info[event]. | 410 | acpi_set_register(acpi_gbl_fixed_event_info[event]. |
@@ -717,3 +724,148 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) | |||
717 | } | 724 | } |
718 | 725 | ||
719 | ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) | 726 | ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) |
727 | |||
728 | /******************************************************************************* | ||
729 | * | ||
730 | * FUNCTION: acpi_get_gpe_device | ||
731 | * | ||
732 | * PARAMETERS: Index - System GPE index (0-current_gpe_count) | ||
733 | * gpe_device - Where the parent GPE Device is returned | ||
734 | * | ||
735 | * RETURN: Status | ||
736 | * | ||
737 | * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL | ||
738 | * gpe device indicates that the gpe number is contained in one of | ||
739 | * the FADT-defined gpe blocks. Otherwise, the GPE block device. | ||
740 | * | ||
741 | ******************************************************************************/ | ||
742 | acpi_status | ||
743 | acpi_get_gpe_device(u32 index, acpi_handle *gpe_device) | ||
744 | { | ||
745 | struct acpi_gpe_device_info info; | ||
746 | acpi_status status; | ||
747 | |||
748 | ACPI_FUNCTION_TRACE(acpi_get_gpe_device); | ||
749 | |||
750 | if (!gpe_device) { | ||
751 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
752 | } | ||
753 | |||
754 | if (index >= acpi_current_gpe_count) { | ||
755 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
756 | } | ||
757 | |||
758 | /* Setup and walk the GPE list */ | ||
759 | |||
760 | info.index = index; | ||
761 | info.status = AE_NOT_EXIST; | ||
762 | info.gpe_device = NULL; | ||
763 | info.next_block_base_index = 0; | ||
764 | |||
765 | status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info); | ||
766 | if (ACPI_FAILURE(status)) { | ||
767 | return_ACPI_STATUS(status); | ||
768 | } | ||
769 | |||
770 | *gpe_device = info.gpe_device; | ||
771 | return_ACPI_STATUS(info.status); | ||
772 | } | ||
773 | |||
774 | ACPI_EXPORT_SYMBOL(acpi_get_gpe_device) | ||
775 | |||
776 | /******************************************************************************* | ||
777 | * | ||
778 | * FUNCTION: acpi_ev_get_gpe_device | ||
779 | * | ||
780 | * PARAMETERS: GPE_WALK_CALLBACK | ||
781 | * | ||
782 | * RETURN: Status | ||
783 | * | ||
784 | * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE | ||
785 | * block device. NULL if the GPE is one of the FADT-defined GPEs. | ||
786 | * | ||
787 | ******************************************************************************/ | ||
788 | acpi_status | ||
789 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | ||
790 | struct acpi_gpe_block_info *gpe_block, void *context) | ||
791 | { | ||
792 | struct acpi_gpe_device_info *info = context; | ||
793 | |||
794 | /* Increment Index by the number of GPEs in this block */ | ||
795 | |||
796 | info->next_block_base_index += | ||
797 | (gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH); | ||
798 | |||
799 | if (info->index < info->next_block_base_index) { | ||
800 | /* | ||
801 | * The GPE index is within this block, get the node. Leave the node | ||
802 | * NULL for the FADT-defined GPEs | ||
803 | */ | ||
804 | if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) { | ||
805 | info->gpe_device = gpe_block->node; | ||
806 | } | ||
807 | |||
808 | info->status = AE_OK; | ||
809 | return (AE_CTRL_END); | ||
810 | } | ||
811 | |||
812 | return (AE_OK); | ||
813 | } | ||
814 | |||
815 | /****************************************************************************** | ||
816 | * | ||
817 | * FUNCTION: acpi_disable_all_gpes | ||
818 | * | ||
819 | * PARAMETERS: None | ||
820 | * | ||
821 | * RETURN: Status | ||
822 | * | ||
823 | * DESCRIPTION: Disable and clear all GPEs in all GPE blocks | ||
824 | * | ||
825 | ******************************************************************************/ | ||
826 | |||
827 | acpi_status acpi_disable_all_gpes(void) | ||
828 | { | ||
829 | acpi_status status; | ||
830 | |||
831 | ACPI_FUNCTION_TRACE(acpi_disable_all_gpes); | ||
832 | |||
833 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
834 | if (ACPI_FAILURE(status)) { | ||
835 | return_ACPI_STATUS(status); | ||
836 | } | ||
837 | |||
838 | status = acpi_hw_disable_all_gpes(); | ||
839 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
840 | |||
841 | return_ACPI_STATUS(status); | ||
842 | } | ||
843 | |||
844 | /****************************************************************************** | ||
845 | * | ||
846 | * FUNCTION: acpi_enable_all_runtime_gpes | ||
847 | * | ||
848 | * PARAMETERS: None | ||
849 | * | ||
850 | * RETURN: Status | ||
851 | * | ||
852 | * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks | ||
853 | * | ||
854 | ******************************************************************************/ | ||
855 | |||
856 | acpi_status acpi_enable_all_runtime_gpes(void) | ||
857 | { | ||
858 | acpi_status status; | ||
859 | |||
860 | ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes); | ||
861 | |||
862 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
863 | if (ACPI_FAILURE(status)) { | ||
864 | return_ACPI_STATUS(status); | ||
865 | } | ||
866 | |||
867 | status = acpi_hw_enable_all_runtime_gpes(); | ||
868 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
869 | |||
870 | return_ACPI_STATUS(status); | ||
871 | } | ||
diff --git a/drivers/acpi/events/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index e8750807e57d..479e7a3721be 100644 --- a/drivers/acpi/events/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c | |||
@@ -43,8 +43,9 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acnamesp.h> | 46 | #include "accommon.h" |
47 | #include <acpi/acevents.h> | 47 | #include "acnamesp.h" |
48 | #include "acevents.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_EVENTS | 50 | #define _COMPONENT ACPI_EVENTS |
50 | ACPI_MODULE_NAME("evxfregn") | 51 | ACPI_MODULE_NAME("evxfregn") |
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/acpica/exconfig.c index 74da6fa52ef1..932bbc26aa04 100644 --- a/drivers/acpi/executer/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c | |||
@@ -42,10 +42,11 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acinterp.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "acinterp.h" |
47 | #include <acpi/actables.h> | 47 | #include "acnamesp.h" |
48 | #include <acpi/acdispat.h> | 48 | #include "actables.h" |
49 | #include "acdispat.h" | ||
49 | 50 | ||
50 | #define _COMPONENT ACPI_EXECUTER | 51 | #define _COMPONENT ACPI_EXECUTER |
51 | ACPI_MODULE_NAME("exconfig") | 52 | ACPI_MODULE_NAME("exconfig") |
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index 1d1f35adddde..0be10188316e 100644 --- a/drivers/acpi/executer/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c | |||
@@ -42,8 +42,9 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acinterp.h> | 45 | #include "accommon.h" |
46 | #include <acpi/amlcode.h> | 46 | #include "acinterp.h" |
47 | #include "amlcode.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_EXECUTER | 49 | #define _COMPONENT ACPI_EXECUTER |
49 | ACPI_MODULE_NAME("exconvrt") | 50 | ACPI_MODULE_NAME("exconvrt") |
diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/acpica/excreate.c index ad09696d5069..a57ad2564ab0 100644 --- a/drivers/acpi/executer/excreate.c +++ b/drivers/acpi/acpica/excreate.c | |||
@@ -42,9 +42,10 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acinterp.h> | 45 | #include "accommon.h" |
46 | #include <acpi/amlcode.h> | 46 | #include "acinterp.h" |
47 | #include <acpi/acnamesp.h> | 47 | #include "amlcode.h" |
48 | #include "acnamesp.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_EXECUTER | 50 | #define _COMPONENT ACPI_EXECUTER |
50 | ACPI_MODULE_NAME("excreate") | 51 | ACPI_MODULE_NAME("excreate") |
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/acpica/exdump.c index d087a7d28aa5..aa313574b0df 100644 --- a/drivers/acpi/executer/exdump.c +++ b/drivers/acpi/acpica/exdump.c | |||
@@ -42,9 +42,10 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acinterp.h> | 45 | #include "accommon.h" |
46 | #include <acpi/amlcode.h> | 46 | #include "acinterp.h" |
47 | #include <acpi/acnamesp.h> | 47 | #include "amlcode.h" |
48 | #include "acnamesp.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_EXECUTER | 50 | #define _COMPONENT ACPI_EXECUTER |
50 | ACPI_MODULE_NAME("exdump") | 51 | ACPI_MODULE_NAME("exdump") |
diff --git a/drivers/acpi/executer/exfield.c b/drivers/acpi/acpica/exfield.c index 3e440d84226a..a352d0233857 100644 --- a/drivers/acpi/executer/exfield.c +++ b/drivers/acpi/acpica/exfield.c | |||
@@ -42,8 +42,9 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acdispat.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acinterp.h> | 46 | #include "acdispat.h" |
47 | #include "acinterp.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_EXECUTER | 49 | #define _COMPONENT ACPI_EXECUTER |
49 | ACPI_MODULE_NAME("exfield") | 50 | ACPI_MODULE_NAME("exfield") |
diff --git a/drivers/acpi/executer/exfldio.c b/drivers/acpi/acpica/exfldio.c index 9ff9d1f4615d..ef58ac4e687b 100644 --- a/drivers/acpi/executer/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c | |||
@@ -42,10 +42,11 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acinterp.h> | 45 | #include "accommon.h" |
46 | #include <acpi/amlcode.h> | 46 | #include "acinterp.h" |
47 | #include <acpi/acevents.h> | 47 | #include "amlcode.h" |
48 | #include <acpi/acdispat.h> | 48 | #include "acevents.h" |
49 | #include "acdispat.h" | ||
49 | 50 | ||
50 | #define _COMPONENT ACPI_EXECUTER | 51 | #define _COMPONENT ACPI_EXECUTER |
51 | ACPI_MODULE_NAME("exfldio") | 52 | ACPI_MODULE_NAME("exfldio") |
@@ -498,14 +499,13 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc, | |||
498 | return_ACPI_STATUS(status); | 499 | return_ACPI_STATUS(status); |
499 | } | 500 | } |
500 | 501 | ||
501 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, | ||
502 | "I/O to Data Register: ValuePtr %p\n", | ||
503 | value)); | ||
504 | |||
505 | if (read_write == ACPI_READ) { | 502 | if (read_write == ACPI_READ) { |
506 | 503 | ||
507 | /* Read the datum from the data_register */ | 504 | /* Read the datum from the data_register */ |
508 | 505 | ||
506 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, | ||
507 | "Read from Data Register\n")); | ||
508 | |||
509 | status = | 509 | status = |
510 | acpi_ex_extract_from_field(obj_desc->index_field. | 510 | acpi_ex_extract_from_field(obj_desc->index_field. |
511 | data_obj, value, | 511 | data_obj, value, |
@@ -513,6 +513,10 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc, | |||
513 | } else { | 513 | } else { |
514 | /* Write the datum to the data_register */ | 514 | /* Write the datum to the data_register */ |
515 | 515 | ||
516 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, | ||
517 | "Write to Data Register: Value %8.8X%8.8X\n", | ||
518 | ACPI_FORMAT_UINT64(*value))); | ||
519 | |||
516 | status = | 520 | status = |
517 | acpi_ex_insert_into_field(obj_desc->index_field. | 521 | acpi_ex_insert_into_field(obj_desc->index_field. |
518 | data_obj, value, | 522 | data_obj, value, |
diff --git a/drivers/acpi/executer/exmisc.c b/drivers/acpi/acpica/exmisc.c index efb191340059..6b0747ac683b 100644 --- a/drivers/acpi/executer/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c | |||
@@ -43,9 +43,10 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acinterp.h> | 46 | #include "accommon.h" |
47 | #include <acpi/amlcode.h> | 47 | #include "acinterp.h" |
48 | #include <acpi/amlresrc.h> | 48 | #include "amlcode.h" |
49 | #include "amlresrc.h" | ||
49 | 50 | ||
50 | #define _COMPONENT ACPI_EXECUTER | 51 | #define _COMPONENT ACPI_EXECUTER |
51 | ACPI_MODULE_NAME("exmisc") | 52 | ACPI_MODULE_NAME("exmisc") |
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/acpica/exmutex.c index a8bf3d713e28..d301c1f363ef 100644 --- a/drivers/acpi/executer/exmutex.c +++ b/drivers/acpi/acpica/exmutex.c | |||
@@ -43,8 +43,9 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acinterp.h> | 46 | #include "accommon.h" |
47 | #include <acpi/acevents.h> | 47 | #include "acinterp.h" |
48 | #include "acevents.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_EXECUTER | 50 | #define _COMPONENT ACPI_EXECUTER |
50 | ACPI_MODULE_NAME("exmutex") | 51 | ACPI_MODULE_NAME("exmutex") |
diff --git a/drivers/acpi/executer/exnames.c b/drivers/acpi/acpica/exnames.c index 817e67be3697..ffdae122d94a 100644 --- a/drivers/acpi/executer/exnames.c +++ b/drivers/acpi/acpica/exnames.c | |||
@@ -43,8 +43,9 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acinterp.h> | 46 | #include "accommon.h" |
47 | #include <acpi/amlcode.h> | 47 | #include "acinterp.h" |
48 | #include "amlcode.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_EXECUTER | 50 | #define _COMPONENT ACPI_EXECUTER |
50 | ACPI_MODULE_NAME("exnames") | 51 | ACPI_MODULE_NAME("exnames") |
diff --git a/drivers/acpi/executer/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index f622f9eac8a1..b530480cc7d5 100644 --- a/drivers/acpi/executer/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c | |||
@@ -43,11 +43,12 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acparser.h> | 46 | #include "accommon.h" |
47 | #include <acpi/acdispat.h> | 47 | #include "acparser.h" |
48 | #include <acpi/acinterp.h> | 48 | #include "acdispat.h" |
49 | #include <acpi/amlcode.h> | 49 | #include "acinterp.h" |
50 | #include <acpi/acnamesp.h> | 50 | #include "amlcode.h" |
51 | #include "acnamesp.h" | ||
51 | 52 | ||
52 | #define _COMPONENT ACPI_EXECUTER | 53 | #define _COMPONENT ACPI_EXECUTER |
53 | ACPI_MODULE_NAME("exoparg1") | 54 | ACPI_MODULE_NAME("exoparg1") |
diff --git a/drivers/acpi/executer/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index 368def5dffce..0b4f513ca885 100644 --- a/drivers/acpi/executer/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c | |||
@@ -42,10 +42,11 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acparser.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acinterp.h> | 46 | #include "acparser.h" |
47 | #include <acpi/acevents.h> | 47 | #include "acinterp.h" |
48 | #include <acpi/amlcode.h> | 48 | #include "acevents.h" |
49 | #include "amlcode.h" | ||
49 | 50 | ||
50 | #define _COMPONENT ACPI_EXECUTER | 51 | #define _COMPONENT ACPI_EXECUTER |
51 | ACPI_MODULE_NAME("exoparg2") | 52 | ACPI_MODULE_NAME("exoparg2") |
diff --git a/drivers/acpi/executer/exoparg3.c b/drivers/acpi/acpica/exoparg3.c index 9cb4197681af..c6520bbf882b 100644 --- a/drivers/acpi/executer/exoparg3.c +++ b/drivers/acpi/acpica/exoparg3.c | |||
@@ -43,9 +43,10 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acinterp.h> | 46 | #include "accommon.h" |
47 | #include <acpi/acparser.h> | 47 | #include "acinterp.h" |
48 | #include <acpi/amlcode.h> | 48 | #include "acparser.h" |
49 | #include "amlcode.h" | ||
49 | 50 | ||
50 | #define _COMPONENT ACPI_EXECUTER | 51 | #define _COMPONENT ACPI_EXECUTER |
51 | ACPI_MODULE_NAME("exoparg3") | 52 | ACPI_MODULE_NAME("exoparg3") |
diff --git a/drivers/acpi/executer/exoparg6.c b/drivers/acpi/acpica/exoparg6.c index 67d48737af53..ae43f7670a6c 100644 --- a/drivers/acpi/executer/exoparg6.c +++ b/drivers/acpi/acpica/exoparg6.c | |||
@@ -43,9 +43,10 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acinterp.h> | 46 | #include "accommon.h" |
47 | #include <acpi/acparser.h> | 47 | #include "acinterp.h" |
48 | #include <acpi/amlcode.h> | 48 | #include "acparser.h" |
49 | #include "amlcode.h" | ||
49 | 50 | ||
50 | #define _COMPONENT ACPI_EXECUTER | 51 | #define _COMPONENT ACPI_EXECUTER |
51 | ACPI_MODULE_NAME("exoparg6") | 52 | ACPI_MODULE_NAME("exoparg6") |
diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/acpica/exprep.c index a7dc87ecee37..a226f74d4a5c 100644 --- a/drivers/acpi/executer/exprep.c +++ b/drivers/acpi/acpica/exprep.c | |||
@@ -43,9 +43,10 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acinterp.h> | 46 | #include "accommon.h" |
47 | #include <acpi/amlcode.h> | 47 | #include "acinterp.h" |
48 | #include <acpi/acnamesp.h> | 48 | #include "amlcode.h" |
49 | #include "acnamesp.h" | ||
49 | 50 | ||
50 | #define _COMPONENT ACPI_EXECUTER | 51 | #define _COMPONENT ACPI_EXECUTER |
51 | ACPI_MODULE_NAME("exprep") | 52 | ACPI_MODULE_NAME("exprep") |
diff --git a/drivers/acpi/executer/exregion.c b/drivers/acpi/acpica/exregion.c index 7a41c409ae4d..76ec8ff903b8 100644 --- a/drivers/acpi/executer/exregion.c +++ b/drivers/acpi/acpica/exregion.c | |||
@@ -43,7 +43,8 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acinterp.h> | 46 | #include "accommon.h" |
47 | #include "acinterp.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_EXECUTER | 49 | #define _COMPONENT ACPI_EXECUTER |
49 | ACPI_MODULE_NAME("exregion") | 50 | ACPI_MODULE_NAME("exregion") |
diff --git a/drivers/acpi/executer/exresnte.c b/drivers/acpi/acpica/exresnte.c index 423ad3635f3d..a063a74006f6 100644 --- a/drivers/acpi/executer/exresnte.c +++ b/drivers/acpi/acpica/exresnte.c | |||
@@ -43,9 +43,10 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acdispat.h> | 46 | #include "accommon.h" |
47 | #include <acpi/acinterp.h> | 47 | #include "acdispat.h" |
48 | #include <acpi/acnamesp.h> | 48 | #include "acinterp.h" |
49 | #include "acnamesp.h" | ||
49 | 50 | ||
50 | #define _COMPONENT ACPI_EXECUTER | 51 | #define _COMPONENT ACPI_EXECUTER |
51 | ACPI_MODULE_NAME("exresnte") | 52 | ACPI_MODULE_NAME("exresnte") |
diff --git a/drivers/acpi/executer/exresolv.c b/drivers/acpi/acpica/exresolv.c index 60e8c47128e9..f6105a6d6126 100644 --- a/drivers/acpi/executer/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c | |||
@@ -43,10 +43,11 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/amlcode.h> | 46 | #include "accommon.h" |
47 | #include <acpi/acdispat.h> | 47 | #include "amlcode.h" |
48 | #include <acpi/acinterp.h> | 48 | #include "acdispat.h" |
49 | #include <acpi/acnamesp.h> | 49 | #include "acinterp.h" |
50 | #include "acnamesp.h" | ||
50 | 51 | ||
51 | #define _COMPONENT ACPI_EXECUTER | 52 | #define _COMPONENT ACPI_EXECUTER |
52 | ACPI_MODULE_NAME("exresolv") | 53 | ACPI_MODULE_NAME("exresolv") |
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/acpica/exresop.c index 0bb82593da72..3c3802764bfb 100644 --- a/drivers/acpi/executer/exresop.c +++ b/drivers/acpi/acpica/exresop.c | |||
@@ -43,10 +43,11 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/amlcode.h> | 46 | #include "accommon.h" |
47 | #include <acpi/acparser.h> | 47 | #include "amlcode.h" |
48 | #include <acpi/acinterp.h> | 48 | #include "acparser.h" |
49 | #include <acpi/acnamesp.h> | 49 | #include "acinterp.h" |
50 | #include "acnamesp.h" | ||
50 | 51 | ||
51 | #define _COMPONENT ACPI_EXECUTER | 52 | #define _COMPONENT ACPI_EXECUTER |
52 | ACPI_MODULE_NAME("exresop") | 53 | ACPI_MODULE_NAME("exresop") |
diff --git a/drivers/acpi/executer/exstore.c b/drivers/acpi/acpica/exstore.c index 1c118ba78adb..e35e9b4f6a4e 100644 --- a/drivers/acpi/executer/exstore.c +++ b/drivers/acpi/acpica/exstore.c | |||
@@ -43,10 +43,11 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acdispat.h> | 46 | #include "accommon.h" |
47 | #include <acpi/acinterp.h> | 47 | #include "acdispat.h" |
48 | #include <acpi/amlcode.h> | 48 | #include "acinterp.h" |
49 | #include <acpi/acnamesp.h> | 49 | #include "amlcode.h" |
50 | #include "acnamesp.h" | ||
50 | 51 | ||
51 | #define _COMPONENT ACPI_EXECUTER | 52 | #define _COMPONENT ACPI_EXECUTER |
52 | ACPI_MODULE_NAME("exstore") | 53 | ACPI_MODULE_NAME("exstore") |
diff --git a/drivers/acpi/executer/exstoren.c b/drivers/acpi/acpica/exstoren.c index eef61a00803e..145d15305f70 100644 --- a/drivers/acpi/executer/exstoren.c +++ b/drivers/acpi/acpica/exstoren.c | |||
@@ -44,8 +44,9 @@ | |||
44 | */ | 44 | */ |
45 | 45 | ||
46 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
47 | #include <acpi/acinterp.h> | 47 | #include "accommon.h" |
48 | #include <acpi/amlcode.h> | 48 | #include "acinterp.h" |
49 | #include "amlcode.h" | ||
49 | 50 | ||
50 | #define _COMPONENT ACPI_EXECUTER | 51 | #define _COMPONENT ACPI_EXECUTER |
51 | ACPI_MODULE_NAME("exstoren") | 52 | ACPI_MODULE_NAME("exstoren") |
diff --git a/drivers/acpi/executer/exstorob.c b/drivers/acpi/acpica/exstorob.c index 9a75ff09fb0c..67340cc70142 100644 --- a/drivers/acpi/executer/exstorob.c +++ b/drivers/acpi/acpica/exstorob.c | |||
@@ -43,7 +43,8 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acinterp.h> | 46 | #include "accommon.h" |
47 | #include "acinterp.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_EXECUTER | 49 | #define _COMPONENT ACPI_EXECUTER |
49 | ACPI_MODULE_NAME("exstorob") | 50 | ACPI_MODULE_NAME("exstorob") |
diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/acpica/exsystem.c index 68990f1df371..3d00b9357233 100644 --- a/drivers/acpi/executer/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c | |||
@@ -43,7 +43,8 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acinterp.h> | 46 | #include "accommon.h" |
47 | #include "acinterp.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_EXECUTER | 49 | #define _COMPONENT ACPI_EXECUTER |
49 | ACPI_MODULE_NAME("exsystem") | 50 | ACPI_MODULE_NAME("exsystem") |
diff --git a/drivers/acpi/executer/exutils.c b/drivers/acpi/acpica/exutils.c index 86c03880b523..32b85d68e756 100644 --- a/drivers/acpi/executer/exutils.c +++ b/drivers/acpi/acpica/exutils.c | |||
@@ -59,8 +59,9 @@ | |||
59 | #define DEFINE_AML_GLOBALS | 59 | #define DEFINE_AML_GLOBALS |
60 | 60 | ||
61 | #include <acpi/acpi.h> | 61 | #include <acpi/acpi.h> |
62 | #include <acpi/acinterp.h> | 62 | #include "accommon.h" |
63 | #include <acpi/amlcode.h> | 63 | #include "acinterp.h" |
64 | #include "amlcode.h" | ||
64 | 65 | ||
65 | #define _COMPONENT ACPI_EXECUTER | 66 | #define _COMPONENT ACPI_EXECUTER |
66 | ACPI_MODULE_NAME("exutils") | 67 | ACPI_MODULE_NAME("exutils") |
diff --git a/drivers/acpi/hardware/hwacpi.c b/drivers/acpi/acpica/hwacpi.c index 816894ea839e..a9d4fea4167f 100644 --- a/drivers/acpi/hardware/hwacpi.c +++ b/drivers/acpi/acpica/hwacpi.c | |||
@@ -43,6 +43,7 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_HARDWARE | 48 | #define _COMPONENT ACPI_HARDWARE |
48 | ACPI_MODULE_NAME("hwacpi") | 49 | ACPI_MODULE_NAME("hwacpi") |
diff --git a/drivers/acpi/hardware/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 0b80db9d9197..2013b66745d2 100644 --- a/drivers/acpi/hardware/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c | |||
@@ -43,7 +43,8 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acevents.h> | 46 | #include "accommon.h" |
47 | #include "acevents.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_HARDWARE | 49 | #define _COMPONENT ACPI_HARDWARE |
49 | ACPI_MODULE_NAME("hwgpe") | 50 | ACPI_MODULE_NAME("hwgpe") |
@@ -51,7 +52,8 @@ ACPI_MODULE_NAME("hwgpe") | |||
51 | /* Local prototypes */ | 52 | /* Local prototypes */ |
52 | static acpi_status | 53 | static acpi_status |
53 | acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 54 | acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
54 | struct acpi_gpe_block_info *gpe_block); | 55 | struct acpi_gpe_block_info *gpe_block, |
56 | void *context); | ||
55 | 57 | ||
56 | /****************************************************************************** | 58 | /****************************************************************************** |
57 | * | 59 | * |
@@ -80,8 +82,7 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
80 | 82 | ||
81 | /* Get current value of the enable register that contains this GPE */ | 83 | /* Get current value of the enable register that contains this GPE */ |
82 | 84 | ||
83 | status = acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH, &enable_mask, | 85 | status = acpi_read(&enable_mask, &gpe_register_info->enable_address); |
84 | &gpe_register_info->enable_address); | ||
85 | if (ACPI_FAILURE(status)) { | 86 | if (ACPI_FAILURE(status)) { |
86 | return (status); | 87 | return (status); |
87 | } | 88 | } |
@@ -95,9 +96,7 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
95 | 96 | ||
96 | /* Write the updated enable mask */ | 97 | /* Write the updated enable mask */ |
97 | 98 | ||
98 | status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, enable_mask, | 99 | status = acpi_write(enable_mask, &gpe_register_info->enable_address); |
99 | &gpe_register_info->enable_address); | ||
100 | |||
101 | return (status); | 100 | return (status); |
102 | } | 101 | } |
103 | 102 | ||
@@ -132,8 +131,8 @@ acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info) | |||
132 | 131 | ||
133 | /* Write the entire GPE (runtime) enable register */ | 132 | /* Write the entire GPE (runtime) enable register */ |
134 | 133 | ||
135 | status = acpi_hw_low_level_write(8, gpe_register_info->enable_for_run, | 134 | status = acpi_write(gpe_register_info->enable_for_run, |
136 | &gpe_register_info->enable_address); | 135 | &gpe_register_info->enable_address); |
137 | 136 | ||
138 | return (status); | 137 | return (status); |
139 | } | 138 | } |
@@ -166,9 +165,8 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info) | |||
166 | * Write a one to the appropriate bit in the status register to | 165 | * Write a one to the appropriate bit in the status register to |
167 | * clear this GPE. | 166 | * clear this GPE. |
168 | */ | 167 | */ |
169 | status = acpi_hw_low_level_write(8, register_bit, | 168 | status = acpi_write(register_bit, |
170 | &gpe_event_info->register_info-> | 169 | &gpe_event_info->register_info->status_address); |
171 | status_address); | ||
172 | 170 | ||
173 | return (status); | 171 | return (status); |
174 | } | 172 | } |
@@ -227,9 +225,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, | |||
227 | 225 | ||
228 | /* GPE currently active (status bit == 1)? */ | 226 | /* GPE currently active (status bit == 1)? */ |
229 | 227 | ||
230 | status = | 228 | status = acpi_read(&in_byte, &gpe_register_info->status_address); |
231 | acpi_hw_low_level_read(8, &in_byte, | ||
232 | &gpe_register_info->status_address); | ||
233 | if (ACPI_FAILURE(status)) { | 229 | if (ACPI_FAILURE(status)) { |
234 | goto unlock_and_exit; | 230 | goto unlock_and_exit; |
235 | } | 231 | } |
@@ -260,8 +256,8 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, | |||
260 | ******************************************************************************/ | 256 | ******************************************************************************/ |
261 | 257 | ||
262 | acpi_status | 258 | acpi_status |
263 | acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info, | 259 | acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
264 | struct acpi_gpe_block_info * gpe_block) | 260 | struct acpi_gpe_block_info *gpe_block, void *context) |
265 | { | 261 | { |
266 | u32 i; | 262 | u32 i; |
267 | acpi_status status; | 263 | acpi_status status; |
@@ -272,9 +268,9 @@ acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info, | |||
272 | 268 | ||
273 | /* Disable all GPEs in this register */ | 269 | /* Disable all GPEs in this register */ |
274 | 270 | ||
275 | status = acpi_hw_low_level_write(8, 0x00, | 271 | status = |
276 | &gpe_block->register_info[i]. | 272 | acpi_write(0x00, |
277 | enable_address); | 273 | &gpe_block->register_info[i].enable_address); |
278 | if (ACPI_FAILURE(status)) { | 274 | if (ACPI_FAILURE(status)) { |
279 | return (status); | 275 | return (status); |
280 | } | 276 | } |
@@ -297,8 +293,8 @@ acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info, | |||
297 | ******************************************************************************/ | 293 | ******************************************************************************/ |
298 | 294 | ||
299 | acpi_status | 295 | acpi_status |
300 | acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info, | 296 | acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
301 | struct acpi_gpe_block_info * gpe_block) | 297 | struct acpi_gpe_block_info *gpe_block, void *context) |
302 | { | 298 | { |
303 | u32 i; | 299 | u32 i; |
304 | acpi_status status; | 300 | acpi_status status; |
@@ -309,9 +305,9 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info, | |||
309 | 305 | ||
310 | /* Clear status on all GPEs in this register */ | 306 | /* Clear status on all GPEs in this register */ |
311 | 307 | ||
312 | status = acpi_hw_low_level_write(8, 0xFF, | 308 | status = |
313 | &gpe_block->register_info[i]. | 309 | acpi_write(0xFF, |
314 | status_address); | 310 | &gpe_block->register_info[i].status_address); |
315 | if (ACPI_FAILURE(status)) { | 311 | if (ACPI_FAILURE(status)) { |
316 | return (status); | 312 | return (status); |
317 | } | 313 | } |
@@ -335,8 +331,8 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info, | |||
335 | ******************************************************************************/ | 331 | ******************************************************************************/ |
336 | 332 | ||
337 | acpi_status | 333 | acpi_status |
338 | acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info, | 334 | acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
339 | struct acpi_gpe_block_info * gpe_block) | 335 | struct acpi_gpe_block_info *gpe_block, void *context) |
340 | { | 336 | { |
341 | u32 i; | 337 | u32 i; |
342 | acpi_status status; | 338 | acpi_status status; |
@@ -352,12 +348,9 @@ acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info, | |||
352 | 348 | ||
353 | /* Enable all "runtime" GPEs in this register */ | 349 | /* Enable all "runtime" GPEs in this register */ |
354 | 350 | ||
355 | status = | 351 | status = acpi_write(gpe_block->register_info[i].enable_for_run, |
356 | acpi_hw_low_level_write(8, | 352 | &gpe_block->register_info[i]. |
357 | gpe_block->register_info[i]. | 353 | enable_address); |
358 | enable_for_run, | ||
359 | &gpe_block->register_info[i]. | ||
360 | enable_address); | ||
361 | if (ACPI_FAILURE(status)) { | 354 | if (ACPI_FAILURE(status)) { |
362 | return (status); | 355 | return (status); |
363 | } | 356 | } |
@@ -382,7 +375,8 @@ acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info, | |||
382 | 375 | ||
383 | static acpi_status | 376 | static acpi_status |
384 | acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 377 | acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
385 | struct acpi_gpe_block_info *gpe_block) | 378 | struct acpi_gpe_block_info *gpe_block, |
379 | void *context) | ||
386 | { | 380 | { |
387 | u32 i; | 381 | u32 i; |
388 | acpi_status status; | 382 | acpi_status status; |
@@ -396,11 +390,9 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
396 | 390 | ||
397 | /* Enable all "wake" GPEs in this register */ | 391 | /* Enable all "wake" GPEs in this register */ |
398 | 392 | ||
399 | status = acpi_hw_low_level_write(8, | 393 | status = acpi_write(gpe_block->register_info[i].enable_for_wake, |
400 | gpe_block->register_info[i]. | 394 | &gpe_block->register_info[i]. |
401 | enable_for_wake, | 395 | enable_address); |
402 | &gpe_block->register_info[i]. | ||
403 | enable_address); | ||
404 | if (ACPI_FAILURE(status)) { | 396 | if (ACPI_FAILURE(status)) { |
405 | return (status); | 397 | return (status); |
406 | } | 398 | } |
@@ -427,8 +419,8 @@ acpi_status acpi_hw_disable_all_gpes(void) | |||
427 | 419 | ||
428 | ACPI_FUNCTION_TRACE(hw_disable_all_gpes); | 420 | ACPI_FUNCTION_TRACE(hw_disable_all_gpes); |
429 | 421 | ||
430 | status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block); | 422 | status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL); |
431 | status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block); | 423 | status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL); |
432 | return_ACPI_STATUS(status); | 424 | return_ACPI_STATUS(status); |
433 | } | 425 | } |
434 | 426 | ||
@@ -450,7 +442,7 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void) | |||
450 | 442 | ||
451 | ACPI_FUNCTION_TRACE(hw_enable_all_runtime_gpes); | 443 | ACPI_FUNCTION_TRACE(hw_enable_all_runtime_gpes); |
452 | 444 | ||
453 | status = acpi_ev_walk_gpe_list(acpi_hw_enable_runtime_gpe_block); | 445 | status = acpi_ev_walk_gpe_list(acpi_hw_enable_runtime_gpe_block, NULL); |
454 | return_ACPI_STATUS(status); | 446 | return_ACPI_STATUS(status); |
455 | } | 447 | } |
456 | 448 | ||
@@ -472,6 +464,6 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void) | |||
472 | 464 | ||
473 | ACPI_FUNCTION_TRACE(hw_enable_all_wakeup_gpes); | 465 | ACPI_FUNCTION_TRACE(hw_enable_all_wakeup_gpes); |
474 | 466 | ||
475 | status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block); | 467 | status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block, NULL); |
476 | return_ACPI_STATUS(status); | 468 | return_ACPI_STATUS(status); |
477 | } | 469 | } |
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c new file mode 100644 index 000000000000..4dc43b018517 --- /dev/null +++ b/drivers/acpi/acpica/hwregs.c | |||
@@ -0,0 +1,353 @@ | |||
1 | |||
2 | /******************************************************************************* | ||
3 | * | ||
4 | * Module Name: hwregs - Read/write access functions for the various ACPI | ||
5 | * control and status registers. | ||
6 | * | ||
7 | ******************************************************************************/ | ||
8 | |||
9 | /* | ||
10 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
11 | * All rights reserved. | ||
12 | * | ||
13 | * Redistribution and use in source and binary forms, with or without | ||
14 | * modification, are permitted provided that the following conditions | ||
15 | * are met: | ||
16 | * 1. Redistributions of source code must retain the above copyright | ||
17 | * notice, this list of conditions, and the following disclaimer, | ||
18 | * without modification. | ||
19 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
20 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
21 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
22 | * including a substantially similar Disclaimer requirement for further | ||
23 | * binary redistribution. | ||
24 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
25 | * of any contributors may be used to endorse or promote products derived | ||
26 | * from this software without specific prior written permission. | ||
27 | * | ||
28 | * Alternatively, this software may be distributed under the terms of the | ||
29 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
30 | * Software Foundation. | ||
31 | * | ||
32 | * NO WARRANTY | ||
33 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
34 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
35 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
36 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
37 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
38 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
39 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
40 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
41 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
42 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
43 | * POSSIBILITY OF SUCH DAMAGES. | ||
44 | */ | ||
45 | |||
46 | #include <acpi/acpi.h> | ||
47 | #include "accommon.h" | ||
48 | #include "acnamesp.h" | ||
49 | #include "acevents.h" | ||
50 | |||
51 | #define _COMPONENT ACPI_HARDWARE | ||
52 | ACPI_MODULE_NAME("hwregs") | ||
53 | |||
54 | /******************************************************************************* | ||
55 | * | ||
56 | * FUNCTION: acpi_hw_clear_acpi_status | ||
57 | * | ||
58 | * PARAMETERS: None | ||
59 | * | ||
60 | * RETURN: Status | ||
61 | * | ||
62 | * DESCRIPTION: Clears all fixed and general purpose status bits | ||
63 | * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED | ||
64 | * | ||
65 | ******************************************************************************/ | ||
66 | acpi_status acpi_hw_clear_acpi_status(void) | ||
67 | { | ||
68 | acpi_status status; | ||
69 | acpi_cpu_flags lock_flags = 0; | ||
70 | |||
71 | ACPI_FUNCTION_TRACE(hw_clear_acpi_status); | ||
72 | |||
73 | ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n", | ||
74 | ACPI_BITMASK_ALL_FIXED_STATUS, | ||
75 | (u16) acpi_gbl_FADT.xpm1a_event_block.address)); | ||
76 | |||
77 | lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); | ||
78 | |||
79 | status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, | ||
80 | ACPI_BITMASK_ALL_FIXED_STATUS); | ||
81 | if (ACPI_FAILURE(status)) { | ||
82 | goto unlock_and_exit; | ||
83 | } | ||
84 | |||
85 | /* Clear the fixed events */ | ||
86 | |||
87 | if (acpi_gbl_FADT.xpm1b_event_block.address) { | ||
88 | status = acpi_write(ACPI_BITMASK_ALL_FIXED_STATUS, | ||
89 | &acpi_gbl_FADT.xpm1b_event_block); | ||
90 | if (ACPI_FAILURE(status)) { | ||
91 | goto unlock_and_exit; | ||
92 | } | ||
93 | } | ||
94 | |||
95 | /* Clear the GPE Bits in all GPE registers in all GPE blocks */ | ||
96 | |||
97 | status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL); | ||
98 | |||
99 | unlock_and_exit: | ||
100 | acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); | ||
101 | return_ACPI_STATUS(status); | ||
102 | } | ||
103 | |||
104 | /******************************************************************************* | ||
105 | * | ||
106 | * FUNCTION: acpi_hw_get_register_bit_mask | ||
107 | * | ||
108 | * PARAMETERS: register_id - Index of ACPI Register to access | ||
109 | * | ||
110 | * RETURN: The bitmask to be used when accessing the register | ||
111 | * | ||
112 | * DESCRIPTION: Map register_id into a register bitmask. | ||
113 | * | ||
114 | ******************************************************************************/ | ||
115 | |||
116 | struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id) | ||
117 | { | ||
118 | ACPI_FUNCTION_ENTRY(); | ||
119 | |||
120 | if (register_id > ACPI_BITREG_MAX) { | ||
121 | ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X", | ||
122 | register_id)); | ||
123 | return (NULL); | ||
124 | } | ||
125 | |||
126 | return (&acpi_gbl_bit_register_info[register_id]); | ||
127 | } | ||
128 | |||
129 | /****************************************************************************** | ||
130 | * | ||
131 | * FUNCTION: acpi_hw_register_read | ||
132 | * | ||
133 | * PARAMETERS: register_id - ACPI Register ID | ||
134 | * return_value - Where the register value is returned | ||
135 | * | ||
136 | * RETURN: Status and the value read. | ||
137 | * | ||
138 | * DESCRIPTION: Read from the specified ACPI register | ||
139 | * | ||
140 | ******************************************************************************/ | ||
141 | acpi_status | ||
142 | acpi_hw_register_read(u32 register_id, u32 * return_value) | ||
143 | { | ||
144 | u32 value1 = 0; | ||
145 | u32 value2 = 0; | ||
146 | acpi_status status; | ||
147 | |||
148 | ACPI_FUNCTION_TRACE(hw_register_read); | ||
149 | |||
150 | switch (register_id) { | ||
151 | case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */ | ||
152 | |||
153 | status = acpi_read(&value1, &acpi_gbl_FADT.xpm1a_event_block); | ||
154 | if (ACPI_FAILURE(status)) { | ||
155 | goto exit; | ||
156 | } | ||
157 | |||
158 | /* PM1B is optional */ | ||
159 | |||
160 | status = acpi_read(&value2, &acpi_gbl_FADT.xpm1b_event_block); | ||
161 | value1 |= value2; | ||
162 | break; | ||
163 | |||
164 | case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */ | ||
165 | |||
166 | status = acpi_read(&value1, &acpi_gbl_xpm1a_enable); | ||
167 | if (ACPI_FAILURE(status)) { | ||
168 | goto exit; | ||
169 | } | ||
170 | |||
171 | /* PM1B is optional */ | ||
172 | |||
173 | status = acpi_read(&value2, &acpi_gbl_xpm1b_enable); | ||
174 | value1 |= value2; | ||
175 | break; | ||
176 | |||
177 | case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */ | ||
178 | |||
179 | status = acpi_read(&value1, &acpi_gbl_FADT.xpm1a_control_block); | ||
180 | if (ACPI_FAILURE(status)) { | ||
181 | goto exit; | ||
182 | } | ||
183 | |||
184 | status = acpi_read(&value2, &acpi_gbl_FADT.xpm1b_control_block); | ||
185 | value1 |= value2; | ||
186 | break; | ||
187 | |||
188 | case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ | ||
189 | |||
190 | status = acpi_read(&value1, &acpi_gbl_FADT.xpm2_control_block); | ||
191 | break; | ||
192 | |||
193 | case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ | ||
194 | |||
195 | status = acpi_read(&value1, &acpi_gbl_FADT.xpm_timer_block); | ||
196 | break; | ||
197 | |||
198 | case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ | ||
199 | |||
200 | status = | ||
201 | acpi_os_read_port(acpi_gbl_FADT.smi_command, &value1, 8); | ||
202 | break; | ||
203 | |||
204 | default: | ||
205 | ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id)); | ||
206 | status = AE_BAD_PARAMETER; | ||
207 | break; | ||
208 | } | ||
209 | |||
210 | exit: | ||
211 | |||
212 | if (ACPI_SUCCESS(status)) { | ||
213 | *return_value = value1; | ||
214 | } | ||
215 | |||
216 | return_ACPI_STATUS(status); | ||
217 | } | ||
218 | |||
219 | /****************************************************************************** | ||
220 | * | ||
221 | * FUNCTION: acpi_hw_register_write | ||
222 | * | ||
223 | * PARAMETERS: register_id - ACPI Register ID | ||
224 | * Value - The value to write | ||
225 | * | ||
226 | * RETURN: Status | ||
227 | * | ||
228 | * DESCRIPTION: Write to the specified ACPI register | ||
229 | * | ||
230 | * NOTE: In accordance with the ACPI specification, this function automatically | ||
231 | * preserves the value of the following bits, meaning that these bits cannot be | ||
232 | * changed via this interface: | ||
233 | * | ||
234 | * PM1_CONTROL[0] = SCI_EN | ||
235 | * PM1_CONTROL[9] | ||
236 | * PM1_STATUS[11] | ||
237 | * | ||
238 | * ACPI References: | ||
239 | * 1) Hardware Ignored Bits: When software writes to a register with ignored | ||
240 | * bit fields, it preserves the ignored bit fields | ||
241 | * 2) SCI_EN: OSPM always preserves this bit position | ||
242 | * | ||
243 | ******************************************************************************/ | ||
244 | |||
245 | acpi_status acpi_hw_register_write(u32 register_id, u32 value) | ||
246 | { | ||
247 | acpi_status status; | ||
248 | u32 read_value; | ||
249 | |||
250 | ACPI_FUNCTION_TRACE(hw_register_write); | ||
251 | |||
252 | switch (register_id) { | ||
253 | case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */ | ||
254 | |||
255 | /* Perform a read first to preserve certain bits (per ACPI spec) */ | ||
256 | |||
257 | status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, | ||
258 | &read_value); | ||
259 | if (ACPI_FAILURE(status)) { | ||
260 | goto exit; | ||
261 | } | ||
262 | |||
263 | /* Insert the bits to be preserved */ | ||
264 | |||
265 | ACPI_INSERT_BITS(value, ACPI_PM1_STATUS_PRESERVED_BITS, | ||
266 | read_value); | ||
267 | |||
268 | /* Now we can write the data */ | ||
269 | |||
270 | status = acpi_write(value, &acpi_gbl_FADT.xpm1a_event_block); | ||
271 | if (ACPI_FAILURE(status)) { | ||
272 | goto exit; | ||
273 | } | ||
274 | |||
275 | /* PM1B is optional */ | ||
276 | |||
277 | status = acpi_write(value, &acpi_gbl_FADT.xpm1b_event_block); | ||
278 | break; | ||
279 | |||
280 | case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */ | ||
281 | |||
282 | status = acpi_write(value, &acpi_gbl_xpm1a_enable); | ||
283 | if (ACPI_FAILURE(status)) { | ||
284 | goto exit; | ||
285 | } | ||
286 | |||
287 | /* PM1B is optional */ | ||
288 | |||
289 | status = acpi_write(value, &acpi_gbl_xpm1b_enable); | ||
290 | break; | ||
291 | |||
292 | case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */ | ||
293 | |||
294 | /* | ||
295 | * Perform a read first to preserve certain bits (per ACPI spec) | ||
296 | */ | ||
297 | status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, | ||
298 | &read_value); | ||
299 | if (ACPI_FAILURE(status)) { | ||
300 | goto exit; | ||
301 | } | ||
302 | |||
303 | /* Insert the bits to be preserved */ | ||
304 | |||
305 | ACPI_INSERT_BITS(value, ACPI_PM1_CONTROL_PRESERVED_BITS, | ||
306 | read_value); | ||
307 | |||
308 | /* Now we can write the data */ | ||
309 | |||
310 | status = acpi_write(value, &acpi_gbl_FADT.xpm1a_control_block); | ||
311 | if (ACPI_FAILURE(status)) { | ||
312 | goto exit; | ||
313 | } | ||
314 | |||
315 | status = acpi_write(value, &acpi_gbl_FADT.xpm1b_control_block); | ||
316 | break; | ||
317 | |||
318 | case ACPI_REGISTER_PM1A_CONTROL: /* 16-bit access */ | ||
319 | |||
320 | status = acpi_write(value, &acpi_gbl_FADT.xpm1a_control_block); | ||
321 | break; | ||
322 | |||
323 | case ACPI_REGISTER_PM1B_CONTROL: /* 16-bit access */ | ||
324 | |||
325 | status = acpi_write(value, &acpi_gbl_FADT.xpm1b_control_block); | ||
326 | break; | ||
327 | |||
328 | case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ | ||
329 | |||
330 | status = acpi_write(value, &acpi_gbl_FADT.xpm2_control_block); | ||
331 | break; | ||
332 | |||
333 | case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ | ||
334 | |||
335 | status = acpi_write(value, &acpi_gbl_FADT.xpm_timer_block); | ||
336 | break; | ||
337 | |||
338 | case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ | ||
339 | |||
340 | /* SMI_CMD is currently always in IO space */ | ||
341 | |||
342 | status = | ||
343 | acpi_os_write_port(acpi_gbl_FADT.smi_command, value, 8); | ||
344 | break; | ||
345 | |||
346 | default: | ||
347 | status = AE_BAD_PARAMETER; | ||
348 | break; | ||
349 | } | ||
350 | |||
351 | exit: | ||
352 | return_ACPI_STATUS(status); | ||
353 | } | ||
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index 25dccdf179b9..a2af2a4f2f26 100644 --- a/drivers/acpi/hardware/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c | |||
@@ -43,7 +43,8 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/actables.h> | 46 | #include "accommon.h" |
47 | #include "actables.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_HARDWARE | 49 | #define _COMPONENT ACPI_HARDWARE |
49 | ACPI_MODULE_NAME("hwsleep") | 50 | ACPI_MODULE_NAME("hwsleep") |
@@ -52,31 +53,19 @@ ACPI_MODULE_NAME("hwsleep") | |||
52 | * | 53 | * |
53 | * FUNCTION: acpi_set_firmware_waking_vector | 54 | * FUNCTION: acpi_set_firmware_waking_vector |
54 | * | 55 | * |
55 | * PARAMETERS: physical_address - Physical address of ACPI real mode | 56 | * PARAMETERS: physical_address - 32-bit physical address of ACPI real mode |
56 | * entry point. | 57 | * entry point. |
57 | * | 58 | * |
58 | * RETURN: Status | 59 | * RETURN: Status |
59 | * | 60 | * |
60 | * DESCRIPTION: Access function for the firmware_waking_vector field in FACS | 61 | * DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS |
61 | * | 62 | * |
62 | ******************************************************************************/ | 63 | ******************************************************************************/ |
63 | acpi_status | 64 | acpi_status |
64 | acpi_set_firmware_waking_vector(acpi_physical_address physical_address) | 65 | acpi_set_firmware_waking_vector(u32 physical_address) |
65 | { | 66 | { |
66 | struct acpi_table_facs *facs; | ||
67 | acpi_status status; | ||
68 | |||
69 | ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector); | 67 | ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector); |
70 | 68 | ||
71 | /* Get the FACS */ | ||
72 | |||
73 | status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, | ||
74 | ACPI_CAST_INDIRECT_PTR(struct | ||
75 | acpi_table_header, | ||
76 | &facs)); | ||
77 | if (ACPI_FAILURE(status)) { | ||
78 | return_ACPI_STATUS(status); | ||
79 | } | ||
80 | 69 | ||
81 | /* | 70 | /* |
82 | * According to the ACPI specification 2.0c and later, the 64-bit | 71 | * According to the ACPI specification 2.0c and later, the 64-bit |
@@ -85,10 +74,16 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address) | |||
85 | * Protected Mode. Some systems (for example HP dv5-1004nr) are known | 74 | * Protected Mode. Some systems (for example HP dv5-1004nr) are known |
86 | * to fail to resume if the 64-bit vector is used. | 75 | * to fail to resume if the 64-bit vector is used. |
87 | */ | 76 | */ |
88 | if (facs->version >= 1) | ||
89 | facs->xfirmware_waking_vector = 0; | ||
90 | 77 | ||
91 | facs->firmware_waking_vector = (u32)physical_address; | 78 | /* Set the 32-bit vector */ |
79 | |||
80 | acpi_gbl_FACS->firmware_waking_vector = physical_address; | ||
81 | |||
82 | /* Clear the 64-bit vector if it exists */ | ||
83 | |||
84 | if ((acpi_gbl_FACS->length > 32) && (acpi_gbl_FACS->version >= 1)) { | ||
85 | acpi_gbl_FACS->xfirmware_waking_vector = 0; | ||
86 | } | ||
92 | 87 | ||
93 | return_ACPI_STATUS(AE_OK); | 88 | return_ACPI_STATUS(AE_OK); |
94 | } | 89 | } |
@@ -97,48 +92,39 @@ ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector) | |||
97 | 92 | ||
98 | /******************************************************************************* | 93 | /******************************************************************************* |
99 | * | 94 | * |
100 | * FUNCTION: acpi_get_firmware_waking_vector | 95 | * FUNCTION: acpi_set_firmware_waking_vector64 |
101 | * | 96 | * |
102 | * PARAMETERS: *physical_address - Where the contents of | 97 | * PARAMETERS: physical_address - 64-bit physical address of ACPI protected |
103 | * the firmware_waking_vector field of | 98 | * mode entry point. |
104 | * the FACS will be returned. | ||
105 | * | 99 | * |
106 | * RETURN: Status, vector | 100 | * RETURN: Status |
107 | * | 101 | * |
108 | * DESCRIPTION: Access function for the firmware_waking_vector field in FACS | 102 | * DESCRIPTION: Sets the 64-bit X_firmware_waking_vector field of the FACS, if |
103 | * it exists in the table. | ||
109 | * | 104 | * |
110 | ******************************************************************************/ | 105 | ******************************************************************************/ |
111 | #ifdef ACPI_FUTURE_USAGE | ||
112 | acpi_status | 106 | acpi_status |
113 | acpi_get_firmware_waking_vector(acpi_physical_address * physical_address) | 107 | acpi_set_firmware_waking_vector64(u64 physical_address) |
114 | { | 108 | { |
115 | struct acpi_table_facs *facs; | 109 | ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64); |
116 | acpi_status status; | ||
117 | 110 | ||
118 | ACPI_FUNCTION_TRACE(acpi_get_firmware_waking_vector); | ||
119 | |||
120 | if (!physical_address) { | ||
121 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
122 | } | ||
123 | 111 | ||
124 | /* Get the FACS */ | 112 | /* Determine if the 64-bit vector actually exists */ |
125 | 113 | ||
126 | status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, | 114 | if ((acpi_gbl_FACS->length <= 32) || (acpi_gbl_FACS->version < 1)) { |
127 | ACPI_CAST_INDIRECT_PTR(struct | 115 | return_ACPI_STATUS(AE_NOT_EXIST); |
128 | acpi_table_header, | ||
129 | &facs)); | ||
130 | if (ACPI_FAILURE(status)) { | ||
131 | return_ACPI_STATUS(status); | ||
132 | } | 116 | } |
133 | 117 | ||
134 | /* Get the vector */ | 118 | /* Clear 32-bit vector, set the 64-bit X_ vector */ |
135 | *physical_address = (acpi_physical_address)facs->firmware_waking_vector; | 119 | |
120 | acpi_gbl_FACS->firmware_waking_vector = 0; | ||
121 | acpi_gbl_FACS->xfirmware_waking_vector = physical_address; | ||
136 | 122 | ||
137 | return_ACPI_STATUS(AE_OK); | 123 | return_ACPI_STATUS(AE_OK); |
138 | } | 124 | } |
139 | 125 | ||
140 | ACPI_EXPORT_SYMBOL(acpi_get_firmware_waking_vector) | 126 | ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64) |
141 | #endif | 127 | |
142 | /******************************************************************************* | 128 | /******************************************************************************* |
143 | * | 129 | * |
144 | * FUNCTION: acpi_enter_sleep_state_prep | 130 | * FUNCTION: acpi_enter_sleep_state_prep |
diff --git a/drivers/acpi/hardware/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index b53d575491b9..b7f522c8f023 100644 --- a/drivers/acpi/hardware/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c | |||
@@ -43,6 +43,7 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_HARDWARE | 48 | #define _COMPONENT ACPI_HARDWARE |
48 | ACPI_MODULE_NAME("hwtimer") | 49 | ACPI_MODULE_NAME("hwtimer") |
diff --git a/drivers/acpi/hardware/hwregs.c b/drivers/acpi/acpica/hwxface.c index ddf792adcf96..ae597c0ab53f 100644 --- a/drivers/acpi/hardware/hwregs.c +++ b/drivers/acpi/acpica/hwxface.c | |||
@@ -1,10 +1,9 @@ | |||
1 | 1 | ||
2 | /******************************************************************************* | 2 | /****************************************************************************** |
3 | * | 3 | * |
4 | * Module Name: hwregs - Read/write access functions for the various ACPI | 4 | * Module Name: hwxface - Public ACPICA hardware interfaces |
5 | * control and status registers. | ||
6 | * | 5 | * |
7 | ******************************************************************************/ | 6 | *****************************************************************************/ |
8 | 7 | ||
9 | /* | 8 | /* |
10 | * Copyright (C) 2000 - 2008, Intel Corp. | 9 | * Copyright (C) 2000 - 2008, Intel Corp. |
@@ -44,209 +43,208 @@ | |||
44 | */ | 43 | */ |
45 | 44 | ||
46 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
47 | #include <acpi/acnamesp.h> | 46 | #include "accommon.h" |
48 | #include <acpi/acevents.h> | 47 | #include "acnamesp.h" |
49 | 48 | ||
50 | #define _COMPONENT ACPI_HARDWARE | 49 | #define _COMPONENT ACPI_HARDWARE |
51 | ACPI_MODULE_NAME("hwregs") | 50 | ACPI_MODULE_NAME("hwxface") |
52 | 51 | ||
53 | /******************************************************************************* | 52 | /****************************************************************************** |
54 | * | 53 | * |
55 | * FUNCTION: acpi_hw_clear_acpi_status | 54 | * FUNCTION: acpi_reset |
56 | * | 55 | * |
57 | * PARAMETERS: None | 56 | * PARAMETERS: None |
58 | * | 57 | * |
59 | * RETURN: None | 58 | * RETURN: Status |
60 | * | 59 | * |
61 | * DESCRIPTION: Clears all fixed and general purpose status bits | 60 | * DESCRIPTION: Set reset register in memory or IO space. Note: Does not |
62 | * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED | 61 | * support reset register in PCI config space, this must be |
62 | * handled separately. | ||
63 | * | 63 | * |
64 | ******************************************************************************/ | 64 | ******************************************************************************/ |
65 | acpi_status acpi_hw_clear_acpi_status(void) | 65 | acpi_status acpi_reset(void) |
66 | { | 66 | { |
67 | struct acpi_generic_address *reset_reg; | ||
67 | acpi_status status; | 68 | acpi_status status; |
68 | acpi_cpu_flags lock_flags = 0; | ||
69 | 69 | ||
70 | ACPI_FUNCTION_TRACE(hw_clear_acpi_status); | 70 | ACPI_FUNCTION_TRACE(acpi_reset); |
71 | 71 | ||
72 | ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n", | 72 | reset_reg = &acpi_gbl_FADT.reset_register; |
73 | ACPI_BITMASK_ALL_FIXED_STATUS, | ||
74 | (u16) acpi_gbl_FADT.xpm1a_event_block.address)); | ||
75 | 73 | ||
76 | lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); | 74 | /* Check if the reset register is supported */ |
77 | 75 | ||
78 | status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, | 76 | if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) || |
79 | ACPI_BITMASK_ALL_FIXED_STATUS); | 77 | !reset_reg->address) { |
80 | if (ACPI_FAILURE(status)) { | 78 | return_ACPI_STATUS(AE_NOT_EXIST); |
81 | goto unlock_and_exit; | ||
82 | } | 79 | } |
83 | 80 | ||
84 | /* Clear the fixed events */ | 81 | /* Write the reset value to the reset register */ |
85 | |||
86 | if (acpi_gbl_FADT.xpm1b_event_block.address) { | ||
87 | status = | ||
88 | acpi_hw_low_level_write(16, ACPI_BITMASK_ALL_FIXED_STATUS, | ||
89 | &acpi_gbl_FADT.xpm1b_event_block); | ||
90 | if (ACPI_FAILURE(status)) { | ||
91 | goto unlock_and_exit; | ||
92 | } | ||
93 | } | ||
94 | |||
95 | /* Clear the GPE Bits in all GPE registers in all GPE blocks */ | ||
96 | |||
97 | status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block); | ||
98 | 82 | ||
99 | unlock_and_exit: | 83 | status = acpi_write(acpi_gbl_FADT.reset_value, reset_reg); |
100 | acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); | ||
101 | return_ACPI_STATUS(status); | 84 | return_ACPI_STATUS(status); |
102 | } | 85 | } |
103 | 86 | ||
104 | /******************************************************************************* | 87 | ACPI_EXPORT_SYMBOL(acpi_reset) |
88 | |||
89 | /****************************************************************************** | ||
105 | * | 90 | * |
106 | * FUNCTION: acpi_get_sleep_type_data | 91 | * FUNCTION: acpi_read |
107 | * | 92 | * |
108 | * PARAMETERS: sleep_state - Numeric sleep state | 93 | * PARAMETERS: Value - Where the value is returned |
109 | * *sleep_type_a - Where SLP_TYPa is returned | 94 | * Reg - GAS register structure |
110 | * *sleep_type_b - Where SLP_TYPb is returned | ||
111 | * | 95 | * |
112 | * RETURN: Status - ACPI status | 96 | * RETURN: Status |
113 | * | 97 | * |
114 | * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep | 98 | * DESCRIPTION: Read from either memory or IO space. |
115 | * state. | ||
116 | * | 99 | * |
117 | ******************************************************************************/ | 100 | ******************************************************************************/ |
118 | 101 | acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg) | |
119 | acpi_status | ||
120 | acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b) | ||
121 | { | 102 | { |
122 | acpi_status status = AE_OK; | 103 | u32 width; |
123 | struct acpi_evaluate_info *info; | 104 | u64 address; |
124 | 105 | acpi_status status; | |
125 | ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data); | ||
126 | |||
127 | /* Validate parameters */ | ||
128 | |||
129 | if ((sleep_state > ACPI_S_STATES_MAX) || !sleep_type_a || !sleep_type_b) { | ||
130 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
131 | } | ||
132 | 106 | ||
133 | /* Allocate the evaluation information block */ | 107 | ACPI_FUNCTION_NAME(acpi_read); |
134 | 108 | ||
135 | info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); | 109 | /* |
136 | if (!info) { | 110 | * Must have a valid pointer to a GAS structure, and |
137 | return_ACPI_STATUS(AE_NO_MEMORY); | 111 | * a non-zero address within. However, don't return an error |
112 | * because the PM1A/B code must not fail if B isn't present. | ||
113 | */ | ||
114 | if (!reg) { | ||
115 | return (AE_OK); | ||
138 | } | 116 | } |
139 | 117 | ||
140 | info->pathname = | 118 | /* Get a local copy of the address. Handles possible alignment issues */ |
141 | ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]); | ||
142 | |||
143 | /* Evaluate the namespace object containing the values for this state */ | ||
144 | |||
145 | status = acpi_ns_evaluate(info); | ||
146 | if (ACPI_FAILURE(status)) { | ||
147 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | ||
148 | "%s while evaluating SleepState [%s]\n", | ||
149 | acpi_format_exception(status), | ||
150 | info->pathname)); | ||
151 | 119 | ||
152 | goto cleanup; | 120 | ACPI_MOVE_64_TO_64(&address, ®->address); |
121 | if (!address) { | ||
122 | return (AE_OK); | ||
153 | } | 123 | } |
154 | 124 | ||
155 | /* Must have a return object */ | 125 | /* Supported widths are 8/16/32 */ |
156 | 126 | ||
157 | if (!info->return_object) { | 127 | width = reg->bit_width; |
158 | ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]", | 128 | if ((width != 8) && (width != 16) && (width != 32)) { |
159 | info->pathname)); | 129 | return (AE_SUPPORT); |
160 | status = AE_NOT_EXIST; | ||
161 | } | 130 | } |
162 | 131 | ||
163 | /* It must be of type Package */ | 132 | /* Initialize entire 32-bit return value to zero */ |
164 | 133 | ||
165 | else if (ACPI_GET_OBJECT_TYPE(info->return_object) != ACPI_TYPE_PACKAGE) { | 134 | *value = 0; |
166 | ACPI_ERROR((AE_INFO, | ||
167 | "Sleep State return object is not a Package")); | ||
168 | status = AE_AML_OPERAND_TYPE; | ||
169 | } | ||
170 | 135 | ||
171 | /* | 136 | /* |
172 | * The package must have at least two elements. NOTE (March 2005): This | 137 | * Two address spaces supported: Memory or IO. |
173 | * goes against the current ACPI spec which defines this object as a | 138 | * PCI_Config is not supported here because the GAS struct is insufficient |
174 | * package with one encoded DWORD element. However, existing practice | ||
175 | * by BIOS vendors seems to be to have 2 or more elements, at least | ||
176 | * one per sleep type (A/B). | ||
177 | */ | 139 | */ |
178 | else if (info->return_object->package.count < 2) { | 140 | switch (reg->space_id) { |
179 | ACPI_ERROR((AE_INFO, | 141 | case ACPI_ADR_SPACE_SYSTEM_MEMORY: |
180 | "Sleep State return package does not have at least two elements")); | ||
181 | status = AE_AML_NO_OPERAND; | ||
182 | } | ||
183 | 142 | ||
184 | /* The first two elements must both be of type Integer */ | 143 | status = acpi_os_read_memory((acpi_physical_address) address, |
144 | value, width); | ||
145 | break; | ||
185 | 146 | ||
186 | else if ((ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[0]) | 147 | case ACPI_ADR_SPACE_SYSTEM_IO: |
187 | != ACPI_TYPE_INTEGER) || | ||
188 | (ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[1]) | ||
189 | != ACPI_TYPE_INTEGER)) { | ||
190 | ACPI_ERROR((AE_INFO, | ||
191 | "Sleep State return package elements are not both Integers (%s, %s)", | ||
192 | acpi_ut_get_object_type_name(info->return_object-> | ||
193 | package.elements[0]), | ||
194 | acpi_ut_get_object_type_name(info->return_object-> | ||
195 | package.elements[1]))); | ||
196 | status = AE_AML_OPERAND_TYPE; | ||
197 | } else { | ||
198 | /* Valid _Sx_ package size, type, and value */ | ||
199 | 148 | ||
200 | *sleep_type_a = (u8) | 149 | status = |
201 | (info->return_object->package.elements[0])->integer.value; | 150 | acpi_os_read_port((acpi_io_address) address, value, width); |
202 | *sleep_type_b = (u8) | 151 | break; |
203 | (info->return_object->package.elements[1])->integer.value; | ||
204 | } | ||
205 | 152 | ||
206 | if (ACPI_FAILURE(status)) { | 153 | default: |
207 | ACPI_EXCEPTION((AE_INFO, status, | 154 | ACPI_ERROR((AE_INFO, |
208 | "While evaluating SleepState [%s], bad Sleep object %p type %s", | 155 | "Unsupported address space: %X", reg->space_id)); |
209 | info->pathname, info->return_object, | 156 | return (AE_BAD_PARAMETER); |
210 | acpi_ut_get_object_type_name(info-> | ||
211 | return_object))); | ||
212 | } | 157 | } |
213 | 158 | ||
214 | acpi_ut_remove_reference(info->return_object); | 159 | ACPI_DEBUG_PRINT((ACPI_DB_IO, |
160 | "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n", | ||
161 | *value, width, ACPI_FORMAT_UINT64(address), | ||
162 | acpi_ut_get_region_name(reg->space_id))); | ||
215 | 163 | ||
216 | cleanup: | 164 | return (status); |
217 | ACPI_FREE(info); | ||
218 | return_ACPI_STATUS(status); | ||
219 | } | 165 | } |
220 | 166 | ||
221 | ACPI_EXPORT_SYMBOL(acpi_get_sleep_type_data) | 167 | ACPI_EXPORT_SYMBOL(acpi_read) |
222 | 168 | ||
223 | /******************************************************************************* | 169 | /****************************************************************************** |
224 | * | 170 | * |
225 | * FUNCTION: acpi_hw_get_register_bit_mask | 171 | * FUNCTION: acpi_write |
226 | * | 172 | * |
227 | * PARAMETERS: register_id - Index of ACPI Register to access | 173 | * PARAMETERS: Value - To be written |
174 | * Reg - GAS register structure | ||
228 | * | 175 | * |
229 | * RETURN: The bitmask to be used when accessing the register | 176 | * RETURN: Status |
230 | * | 177 | * |
231 | * DESCRIPTION: Map register_id into a register bitmask. | 178 | * DESCRIPTION: Write to either memory or IO space. |
232 | * | 179 | * |
233 | ******************************************************************************/ | 180 | ******************************************************************************/ |
234 | struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id) | 181 | acpi_status acpi_write(u32 value, struct acpi_generic_address *reg) |
235 | { | 182 | { |
236 | ACPI_FUNCTION_ENTRY(); | 183 | u32 width; |
184 | u64 address; | ||
185 | acpi_status status; | ||
237 | 186 | ||
238 | if (register_id > ACPI_BITREG_MAX) { | 187 | ACPI_FUNCTION_NAME(acpi_write); |
239 | ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X", | 188 | |
240 | register_id)); | 189 | /* |
241 | return (NULL); | 190 | * Must have a valid pointer to a GAS structure, and |
191 | * a non-zero address within. However, don't return an error | ||
192 | * because the PM1A/B code must not fail if B isn't present. | ||
193 | */ | ||
194 | if (!reg) { | ||
195 | return (AE_OK); | ||
242 | } | 196 | } |
243 | 197 | ||
244 | return (&acpi_gbl_bit_register_info[register_id]); | 198 | /* Get a local copy of the address. Handles possible alignment issues */ |
199 | |||
200 | ACPI_MOVE_64_TO_64(&address, ®->address); | ||
201 | if (!address) { | ||
202 | return (AE_OK); | ||
203 | } | ||
204 | |||
205 | /* Supported widths are 8/16/32 */ | ||
206 | |||
207 | width = reg->bit_width; | ||
208 | if ((width != 8) && (width != 16) && (width != 32)) { | ||
209 | return (AE_SUPPORT); | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * Two address spaces supported: Memory or IO. | ||
214 | * PCI_Config is not supported here because the GAS struct is insufficient | ||
215 | */ | ||
216 | switch (reg->space_id) { | ||
217 | case ACPI_ADR_SPACE_SYSTEM_MEMORY: | ||
218 | |||
219 | status = acpi_os_write_memory((acpi_physical_address) address, | ||
220 | value, width); | ||
221 | break; | ||
222 | |||
223 | case ACPI_ADR_SPACE_SYSTEM_IO: | ||
224 | |||
225 | status = acpi_os_write_port((acpi_io_address) address, value, | ||
226 | width); | ||
227 | break; | ||
228 | |||
229 | default: | ||
230 | ACPI_ERROR((AE_INFO, | ||
231 | "Unsupported address space: %X", reg->space_id)); | ||
232 | return (AE_BAD_PARAMETER); | ||
233 | } | ||
234 | |||
235 | ACPI_DEBUG_PRINT((ACPI_DB_IO, | ||
236 | "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", | ||
237 | value, width, ACPI_FORMAT_UINT64(address), | ||
238 | acpi_ut_get_region_name(reg->space_id))); | ||
239 | |||
240 | return (status); | ||
245 | } | 241 | } |
246 | 242 | ||
243 | ACPI_EXPORT_SYMBOL(acpi_write) | ||
244 | |||
247 | /******************************************************************************* | 245 | /******************************************************************************* |
248 | * | 246 | * |
249 | * FUNCTION: acpi_get_register | 247 | * FUNCTION: acpi_get_register_unlocked |
250 | * | 248 | * |
251 | * PARAMETERS: register_id - ID of ACPI bit_register to access | 249 | * PARAMETERS: register_id - ID of ACPI bit_register to access |
252 | * return_value - Value that was read from the register | 250 | * return_value - Value that was read from the register |
@@ -254,17 +252,16 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id) | |||
254 | * RETURN: Status and the value read from specified Register. Value | 252 | * RETURN: Status and the value read from specified Register. Value |
255 | * returned is normalized to bit0 (is shifted all the way right) | 253 | * returned is normalized to bit0 (is shifted all the way right) |
256 | * | 254 | * |
257 | * DESCRIPTION: ACPI bit_register read function. | 255 | * DESCRIPTION: ACPI bit_register read function. Does not acquire the HW lock. |
258 | * | 256 | * |
259 | ******************************************************************************/ | 257 | ******************************************************************************/ |
260 | 258 | acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value) | |
261 | acpi_status acpi_get_register_unlocked(u32 register_id, u32 * return_value) | ||
262 | { | 259 | { |
263 | u32 register_value = 0; | 260 | u32 register_value = 0; |
264 | struct acpi_bit_register_info *bit_reg_info; | 261 | struct acpi_bit_register_info *bit_reg_info; |
265 | acpi_status status; | 262 | acpi_status status; |
266 | 263 | ||
267 | ACPI_FUNCTION_TRACE(acpi_get_register); | 264 | ACPI_FUNCTION_TRACE(acpi_get_register_unlocked); |
268 | 265 | ||
269 | /* Get the info structure corresponding to the requested ACPI Register */ | 266 | /* Get the info structure corresponding to the requested ACPI Register */ |
270 | 267 | ||
@@ -296,14 +293,31 @@ acpi_status acpi_get_register_unlocked(u32 register_id, u32 * return_value) | |||
296 | return_ACPI_STATUS(status); | 293 | return_ACPI_STATUS(status); |
297 | } | 294 | } |
298 | 295 | ||
299 | acpi_status acpi_get_register(u32 register_id, u32 * return_value) | 296 | ACPI_EXPORT_SYMBOL(acpi_get_register_unlocked) |
297 | |||
298 | /******************************************************************************* | ||
299 | * | ||
300 | * FUNCTION: acpi_get_register | ||
301 | * | ||
302 | * PARAMETERS: register_id - ID of ACPI bit_register to access | ||
303 | * return_value - Value that was read from the register | ||
304 | * | ||
305 | * RETURN: Status and the value read from specified Register. Value | ||
306 | * returned is normalized to bit0 (is shifted all the way right) | ||
307 | * | ||
308 | * DESCRIPTION: ACPI bit_register read function. | ||
309 | * | ||
310 | ******************************************************************************/ | ||
311 | acpi_status acpi_get_register(u32 register_id, u32 *return_value) | ||
300 | { | 312 | { |
301 | acpi_status status; | 313 | acpi_status status; |
302 | acpi_cpu_flags flags; | 314 | acpi_cpu_flags flags; |
315 | |||
303 | flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); | 316 | flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); |
304 | status = acpi_get_register_unlocked(register_id, return_value); | 317 | status = acpi_get_register_unlocked(register_id, return_value); |
305 | acpi_os_release_lock(acpi_gbl_hardware_lock, flags); | 318 | acpi_os_release_lock(acpi_gbl_hardware_lock, flags); |
306 | return status; | 319 | |
320 | return (status); | ||
307 | } | 321 | } |
308 | 322 | ||
309 | ACPI_EXPORT_SYMBOL(acpi_get_register) | 323 | ACPI_EXPORT_SYMBOL(acpi_get_register) |
@@ -370,8 +384,9 @@ acpi_status acpi_set_register(u32 register_id, u32 value) | |||
370 | bit_reg_info-> | 384 | bit_reg_info-> |
371 | access_bit_mask); | 385 | access_bit_mask); |
372 | if (value) { | 386 | if (value) { |
373 | status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, | 387 | status = |
374 | (u16) value); | 388 | acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, |
389 | (u16) value); | ||
375 | register_value = 0; | 390 | register_value = 0; |
376 | } | 391 | } |
377 | break; | 392 | break; |
@@ -459,399 +474,120 @@ acpi_status acpi_set_register(u32 register_id, u32 value) | |||
459 | 474 | ||
460 | ACPI_EXPORT_SYMBOL(acpi_set_register) | 475 | ACPI_EXPORT_SYMBOL(acpi_set_register) |
461 | 476 | ||
462 | /****************************************************************************** | 477 | /******************************************************************************* |
463 | * | 478 | * |
464 | * FUNCTION: acpi_hw_register_read | 479 | * FUNCTION: acpi_get_sleep_type_data |
465 | * | 480 | * |
466 | * PARAMETERS: register_id - ACPI Register ID | 481 | * PARAMETERS: sleep_state - Numeric sleep state |
467 | * return_value - Where the register value is returned | 482 | * *sleep_type_a - Where SLP_TYPa is returned |
483 | * *sleep_type_b - Where SLP_TYPb is returned | ||
468 | * | 484 | * |
469 | * RETURN: Status and the value read. | 485 | * RETURN: Status - ACPI status |
470 | * | 486 | * |
471 | * DESCRIPTION: Read from the specified ACPI register | 487 | * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep |
488 | * state. | ||
472 | * | 489 | * |
473 | ******************************************************************************/ | 490 | ******************************************************************************/ |
474 | acpi_status | 491 | acpi_status |
475 | acpi_hw_register_read(u32 register_id, u32 * return_value) | 492 | acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b) |
476 | { | 493 | { |
477 | u32 value1 = 0; | 494 | acpi_status status = AE_OK; |
478 | u32 value2 = 0; | 495 | struct acpi_evaluate_info *info; |
479 | acpi_status status; | ||
480 | |||
481 | ACPI_FUNCTION_TRACE(hw_register_read); | ||
482 | |||
483 | switch (register_id) { | ||
484 | case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */ | ||
485 | |||
486 | status = | ||
487 | acpi_hw_low_level_read(16, &value1, | ||
488 | &acpi_gbl_FADT.xpm1a_event_block); | ||
489 | if (ACPI_FAILURE(status)) { | ||
490 | goto exit; | ||
491 | } | ||
492 | |||
493 | /* PM1B is optional */ | ||
494 | |||
495 | status = | ||
496 | acpi_hw_low_level_read(16, &value2, | ||
497 | &acpi_gbl_FADT.xpm1b_event_block); | ||
498 | value1 |= value2; | ||
499 | break; | ||
500 | |||
501 | case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */ | ||
502 | |||
503 | status = | ||
504 | acpi_hw_low_level_read(16, &value1, &acpi_gbl_xpm1a_enable); | ||
505 | if (ACPI_FAILURE(status)) { | ||
506 | goto exit; | ||
507 | } | ||
508 | |||
509 | /* PM1B is optional */ | ||
510 | |||
511 | status = | ||
512 | acpi_hw_low_level_read(16, &value2, &acpi_gbl_xpm1b_enable); | ||
513 | value1 |= value2; | ||
514 | break; | ||
515 | |||
516 | case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */ | ||
517 | |||
518 | status = | ||
519 | acpi_hw_low_level_read(16, &value1, | ||
520 | &acpi_gbl_FADT.xpm1a_control_block); | ||
521 | if (ACPI_FAILURE(status)) { | ||
522 | goto exit; | ||
523 | } | ||
524 | |||
525 | status = | ||
526 | acpi_hw_low_level_read(16, &value2, | ||
527 | &acpi_gbl_FADT.xpm1b_control_block); | ||
528 | value1 |= value2; | ||
529 | break; | ||
530 | |||
531 | case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ | ||
532 | |||
533 | status = | ||
534 | acpi_hw_low_level_read(8, &value1, | ||
535 | &acpi_gbl_FADT.xpm2_control_block); | ||
536 | break; | ||
537 | |||
538 | case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ | ||
539 | |||
540 | status = | ||
541 | acpi_hw_low_level_read(32, &value1, | ||
542 | &acpi_gbl_FADT.xpm_timer_block); | ||
543 | break; | ||
544 | 496 | ||
545 | case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ | 497 | ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data); |
546 | 498 | ||
547 | status = | 499 | /* Validate parameters */ |
548 | acpi_os_read_port(acpi_gbl_FADT.smi_command, &value1, 8); | ||
549 | break; | ||
550 | 500 | ||
551 | default: | 501 | if ((sleep_state > ACPI_S_STATES_MAX) || !sleep_type_a || !sleep_type_b) { |
552 | ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id)); | 502 | return_ACPI_STATUS(AE_BAD_PARAMETER); |
553 | status = AE_BAD_PARAMETER; | ||
554 | break; | ||
555 | } | 503 | } |
556 | 504 | ||
557 | exit: | 505 | /* Allocate the evaluation information block */ |
558 | 506 | ||
559 | if (ACPI_SUCCESS(status)) { | 507 | info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); |
560 | *return_value = value1; | 508 | if (!info) { |
509 | return_ACPI_STATUS(AE_NO_MEMORY); | ||
561 | } | 510 | } |
562 | 511 | ||
563 | return_ACPI_STATUS(status); | 512 | info->pathname = |
564 | } | 513 | ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]); |
565 | |||
566 | /****************************************************************************** | ||
567 | * | ||
568 | * FUNCTION: acpi_hw_register_write | ||
569 | * | ||
570 | * PARAMETERS: register_id - ACPI Register ID | ||
571 | * Value - The value to write | ||
572 | * | ||
573 | * RETURN: Status | ||
574 | * | ||
575 | * DESCRIPTION: Write to the specified ACPI register | ||
576 | * | ||
577 | * NOTE: In accordance with the ACPI specification, this function automatically | ||
578 | * preserves the value of the following bits, meaning that these bits cannot be | ||
579 | * changed via this interface: | ||
580 | * | ||
581 | * PM1_CONTROL[0] = SCI_EN | ||
582 | * PM1_CONTROL[9] | ||
583 | * PM1_STATUS[11] | ||
584 | * | ||
585 | * ACPI References: | ||
586 | * 1) Hardware Ignored Bits: When software writes to a register with ignored | ||
587 | * bit fields, it preserves the ignored bit fields | ||
588 | * 2) SCI_EN: OSPM always preserves this bit position | ||
589 | * | ||
590 | ******************************************************************************/ | ||
591 | |||
592 | acpi_status acpi_hw_register_write(u32 register_id, u32 value) | ||
593 | { | ||
594 | acpi_status status; | ||
595 | u32 read_value; | ||
596 | |||
597 | ACPI_FUNCTION_TRACE(hw_register_write); | ||
598 | |||
599 | switch (register_id) { | ||
600 | case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */ | ||
601 | |||
602 | /* Perform a read first to preserve certain bits (per ACPI spec) */ | ||
603 | |||
604 | status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, | ||
605 | &read_value); | ||
606 | if (ACPI_FAILURE(status)) { | ||
607 | goto exit; | ||
608 | } | ||
609 | |||
610 | /* Insert the bits to be preserved */ | ||
611 | |||
612 | ACPI_INSERT_BITS(value, ACPI_PM1_STATUS_PRESERVED_BITS, | ||
613 | read_value); | ||
614 | |||
615 | /* Now we can write the data */ | ||
616 | |||
617 | status = | ||
618 | acpi_hw_low_level_write(16, value, | ||
619 | &acpi_gbl_FADT.xpm1a_event_block); | ||
620 | if (ACPI_FAILURE(status)) { | ||
621 | goto exit; | ||
622 | } | ||
623 | |||
624 | /* PM1B is optional */ | ||
625 | |||
626 | status = | ||
627 | acpi_hw_low_level_write(16, value, | ||
628 | &acpi_gbl_FADT.xpm1b_event_block); | ||
629 | break; | ||
630 | |||
631 | case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */ | ||
632 | |||
633 | status = | ||
634 | acpi_hw_low_level_write(16, value, &acpi_gbl_xpm1a_enable); | ||
635 | if (ACPI_FAILURE(status)) { | ||
636 | goto exit; | ||
637 | } | ||
638 | |||
639 | /* PM1B is optional */ | ||
640 | |||
641 | status = | ||
642 | acpi_hw_low_level_write(16, value, &acpi_gbl_xpm1b_enable); | ||
643 | break; | ||
644 | |||
645 | case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */ | ||
646 | |||
647 | /* | ||
648 | * Perform a read first to preserve certain bits (per ACPI spec) | ||
649 | */ | ||
650 | status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, | ||
651 | &read_value); | ||
652 | if (ACPI_FAILURE(status)) { | ||
653 | goto exit; | ||
654 | } | ||
655 | |||
656 | /* Insert the bits to be preserved */ | ||
657 | |||
658 | ACPI_INSERT_BITS(value, ACPI_PM1_CONTROL_PRESERVED_BITS, | ||
659 | read_value); | ||
660 | |||
661 | /* Now we can write the data */ | ||
662 | |||
663 | status = | ||
664 | acpi_hw_low_level_write(16, value, | ||
665 | &acpi_gbl_FADT.xpm1a_control_block); | ||
666 | if (ACPI_FAILURE(status)) { | ||
667 | goto exit; | ||
668 | } | ||
669 | |||
670 | status = | ||
671 | acpi_hw_low_level_write(16, value, | ||
672 | &acpi_gbl_FADT.xpm1b_control_block); | ||
673 | break; | ||
674 | |||
675 | case ACPI_REGISTER_PM1A_CONTROL: /* 16-bit access */ | ||
676 | |||
677 | status = | ||
678 | acpi_hw_low_level_write(16, value, | ||
679 | &acpi_gbl_FADT.xpm1a_control_block); | ||
680 | break; | ||
681 | |||
682 | case ACPI_REGISTER_PM1B_CONTROL: /* 16-bit access */ | ||
683 | |||
684 | status = | ||
685 | acpi_hw_low_level_write(16, value, | ||
686 | &acpi_gbl_FADT.xpm1b_control_block); | ||
687 | break; | ||
688 | |||
689 | case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ | ||
690 | |||
691 | status = | ||
692 | acpi_hw_low_level_write(8, value, | ||
693 | &acpi_gbl_FADT.xpm2_control_block); | ||
694 | break; | ||
695 | |||
696 | case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ | ||
697 | |||
698 | status = | ||
699 | acpi_hw_low_level_write(32, value, | ||
700 | &acpi_gbl_FADT.xpm_timer_block); | ||
701 | break; | ||
702 | |||
703 | case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ | ||
704 | 514 | ||
705 | /* SMI_CMD is currently always in IO space */ | 515 | /* Evaluate the namespace object containing the values for this state */ |
706 | 516 | ||
707 | status = | 517 | status = acpi_ns_evaluate(info); |
708 | acpi_os_write_port(acpi_gbl_FADT.smi_command, value, 8); | 518 | if (ACPI_FAILURE(status)) { |
709 | break; | 519 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, |
520 | "%s while evaluating SleepState [%s]\n", | ||
521 | acpi_format_exception(status), | ||
522 | info->pathname)); | ||
710 | 523 | ||
711 | default: | 524 | goto cleanup; |
712 | status = AE_BAD_PARAMETER; | ||
713 | break; | ||
714 | } | 525 | } |
715 | 526 | ||
716 | exit: | 527 | /* Must have a return object */ |
717 | return_ACPI_STATUS(status); | ||
718 | } | ||
719 | |||
720 | /****************************************************************************** | ||
721 | * | ||
722 | * FUNCTION: acpi_hw_low_level_read | ||
723 | * | ||
724 | * PARAMETERS: Width - 8, 16, or 32 | ||
725 | * Value - Where the value is returned | ||
726 | * Reg - GAS register structure | ||
727 | * | ||
728 | * RETURN: Status | ||
729 | * | ||
730 | * DESCRIPTION: Read from either memory or IO space. | ||
731 | * | ||
732 | ******************************************************************************/ | ||
733 | |||
734 | acpi_status | ||
735 | acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg) | ||
736 | { | ||
737 | u64 address; | ||
738 | acpi_status status; | ||
739 | |||
740 | ACPI_FUNCTION_NAME(hw_low_level_read); | ||
741 | 528 | ||
742 | /* | 529 | if (!info->return_object) { |
743 | * Must have a valid pointer to a GAS structure, and | 530 | ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]", |
744 | * a non-zero address within. However, don't return an error | 531 | info->pathname)); |
745 | * because the PM1A/B code must not fail if B isn't present. | 532 | status = AE_NOT_EXIST; |
746 | */ | ||
747 | if (!reg) { | ||
748 | return (AE_OK); | ||
749 | } | 533 | } |
750 | 534 | ||
751 | /* Get a local copy of the address. Handles possible alignment issues */ | 535 | /* It must be of type Package */ |
752 | 536 | ||
753 | ACPI_MOVE_64_TO_64(&address, ®->address); | 537 | else if (ACPI_GET_OBJECT_TYPE(info->return_object) != ACPI_TYPE_PACKAGE) { |
754 | if (!address) { | 538 | ACPI_ERROR((AE_INFO, |
755 | return (AE_OK); | 539 | "Sleep State return object is not a Package")); |
540 | status = AE_AML_OPERAND_TYPE; | ||
756 | } | 541 | } |
757 | *value = 0; | ||
758 | 542 | ||
759 | /* | 543 | /* |
760 | * Two address spaces supported: Memory or IO. | 544 | * The package must have at least two elements. NOTE (March 2005): This |
761 | * PCI_Config is not supported here because the GAS struct is insufficient | 545 | * goes against the current ACPI spec which defines this object as a |
546 | * package with one encoded DWORD element. However, existing practice | ||
547 | * by BIOS vendors seems to be to have 2 or more elements, at least | ||
548 | * one per sleep type (A/B). | ||
762 | */ | 549 | */ |
763 | switch (reg->space_id) { | 550 | else if (info->return_object->package.count < 2) { |
764 | case ACPI_ADR_SPACE_SYSTEM_MEMORY: | ||
765 | |||
766 | status = acpi_os_read_memory((acpi_physical_address) address, | ||
767 | value, width); | ||
768 | break; | ||
769 | |||
770 | case ACPI_ADR_SPACE_SYSTEM_IO: | ||
771 | |||
772 | status = | ||
773 | acpi_os_read_port((acpi_io_address) address, value, width); | ||
774 | break; | ||
775 | |||
776 | default: | ||
777 | ACPI_ERROR((AE_INFO, | 551 | ACPI_ERROR((AE_INFO, |
778 | "Unsupported address space: %X", reg->space_id)); | 552 | "Sleep State return package does not have at least two elements")); |
779 | return (AE_BAD_PARAMETER); | 553 | status = AE_AML_NO_OPERAND; |
780 | } | 554 | } |
781 | 555 | ||
782 | ACPI_DEBUG_PRINT((ACPI_DB_IO, | 556 | /* The first two elements must both be of type Integer */ |
783 | "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n", | ||
784 | *value, width, ACPI_FORMAT_UINT64(address), | ||
785 | acpi_ut_get_region_name(reg->space_id))); | ||
786 | |||
787 | return (status); | ||
788 | } | ||
789 | |||
790 | /****************************************************************************** | ||
791 | * | ||
792 | * FUNCTION: acpi_hw_low_level_write | ||
793 | * | ||
794 | * PARAMETERS: Width - 8, 16, or 32 | ||
795 | * Value - To be written | ||
796 | * Reg - GAS register structure | ||
797 | * | ||
798 | * RETURN: Status | ||
799 | * | ||
800 | * DESCRIPTION: Write to either memory or IO space. | ||
801 | * | ||
802 | ******************************************************************************/ | ||
803 | |||
804 | acpi_status | ||
805 | acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg) | ||
806 | { | ||
807 | u64 address; | ||
808 | acpi_status status; | ||
809 | |||
810 | ACPI_FUNCTION_NAME(hw_low_level_write); | ||
811 | |||
812 | /* | ||
813 | * Must have a valid pointer to a GAS structure, and | ||
814 | * a non-zero address within. However, don't return an error | ||
815 | * because the PM1A/B code must not fail if B isn't present. | ||
816 | */ | ||
817 | if (!reg) { | ||
818 | return (AE_OK); | ||
819 | } | ||
820 | 557 | ||
821 | /* Get a local copy of the address. Handles possible alignment issues */ | 558 | else if ((ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[0]) |
559 | != ACPI_TYPE_INTEGER) || | ||
560 | (ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[1]) | ||
561 | != ACPI_TYPE_INTEGER)) { | ||
562 | ACPI_ERROR((AE_INFO, | ||
563 | "Sleep State return package elements are not both Integers (%s, %s)", | ||
564 | acpi_ut_get_object_type_name(info->return_object-> | ||
565 | package.elements[0]), | ||
566 | acpi_ut_get_object_type_name(info->return_object-> | ||
567 | package.elements[1]))); | ||
568 | status = AE_AML_OPERAND_TYPE; | ||
569 | } else { | ||
570 | /* Valid _Sx_ package size, type, and value */ | ||
822 | 571 | ||
823 | ACPI_MOVE_64_TO_64(&address, ®->address); | 572 | *sleep_type_a = (u8) |
824 | if (!address) { | 573 | (info->return_object->package.elements[0])->integer.value; |
825 | return (AE_OK); | 574 | *sleep_type_b = (u8) |
575 | (info->return_object->package.elements[1])->integer.value; | ||
826 | } | 576 | } |
827 | 577 | ||
828 | /* | 578 | if (ACPI_FAILURE(status)) { |
829 | * Two address spaces supported: Memory or IO. | 579 | ACPI_EXCEPTION((AE_INFO, status, |
830 | * PCI_Config is not supported here because the GAS struct is insufficient | 580 | "While evaluating SleepState [%s], bad Sleep object %p type %s", |
831 | */ | 581 | info->pathname, info->return_object, |
832 | switch (reg->space_id) { | 582 | acpi_ut_get_object_type_name(info-> |
833 | case ACPI_ADR_SPACE_SYSTEM_MEMORY: | 583 | return_object))); |
834 | |||
835 | status = acpi_os_write_memory((acpi_physical_address) address, | ||
836 | value, width); | ||
837 | break; | ||
838 | |||
839 | case ACPI_ADR_SPACE_SYSTEM_IO: | ||
840 | |||
841 | status = acpi_os_write_port((acpi_io_address) address, value, | ||
842 | width); | ||
843 | break; | ||
844 | |||
845 | default: | ||
846 | ACPI_ERROR((AE_INFO, | ||
847 | "Unsupported address space: %X", reg->space_id)); | ||
848 | return (AE_BAD_PARAMETER); | ||
849 | } | 584 | } |
850 | 585 | ||
851 | ACPI_DEBUG_PRINT((ACPI_DB_IO, | 586 | acpi_ut_remove_reference(info->return_object); |
852 | "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", | ||
853 | value, width, ACPI_FORMAT_UINT64(address), | ||
854 | acpi_ut_get_region_name(reg->space_id))); | ||
855 | 587 | ||
856 | return (status); | 588 | cleanup: |
589 | ACPI_FREE(info); | ||
590 | return_ACPI_STATUS(status); | ||
857 | } | 591 | } |
592 | |||
593 | ACPI_EXPORT_SYMBOL(acpi_get_sleep_type_data) | ||
diff --git a/drivers/acpi/namespace/nsaccess.c b/drivers/acpi/acpica/nsaccess.c index c39a7f68b889..88303ebe924c 100644 --- a/drivers/acpi/namespace/nsaccess.c +++ b/drivers/acpi/acpica/nsaccess.c | |||
@@ -42,9 +42,10 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/amlcode.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "amlcode.h" |
47 | #include <acpi/acdispat.h> | 47 | #include "acnamesp.h" |
48 | #include "acdispat.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_NAMESPACE | 50 | #define _COMPONENT ACPI_NAMESPACE |
50 | ACPI_MODULE_NAME("nsaccess") | 51 | ACPI_MODULE_NAME("nsaccess") |
@@ -165,12 +166,9 @@ acpi_status acpi_ns_root_initialize(void) | |||
165 | 166 | ||
166 | obj_desc->method.method_flags = | 167 | obj_desc->method.method_flags = |
167 | AML_METHOD_INTERNAL_ONLY; | 168 | AML_METHOD_INTERNAL_ONLY; |
168 | |||
169 | #ifndef ACPI_DUMP_APP | ||
170 | obj_desc->method.implementation = | 169 | obj_desc->method.implementation = |
171 | acpi_ut_osi_implementation; | 170 | acpi_ut_osi_implementation; |
172 | #endif | 171 | #endif |
173 | #endif | ||
174 | break; | 172 | break; |
175 | 173 | ||
176 | case ACPI_TYPE_INTEGER: | 174 | case ACPI_TYPE_INTEGER: |
@@ -521,11 +519,11 @@ acpi_ns_lookup(union acpi_generic_state *scope_info, | |||
521 | } | 519 | } |
522 | 520 | ||
523 | /* | 521 | /* |
524 | * Search namespace for each segment of the name. Loop through and | 522 | * Search namespace for each segment of the name. Loop through and |
525 | * verify (or add to the namespace) each name segment. | 523 | * verify (or add to the namespace) each name segment. |
526 | * | 524 | * |
527 | * The object type is significant only at the last name | 525 | * The object type is significant only at the last name |
528 | * segment. (We don't care about the types along the path, only | 526 | * segment. (We don't care about the types along the path, only |
529 | * the type of the final target object.) | 527 | * the type of the final target object.) |
530 | */ | 528 | */ |
531 | this_search_type = ACPI_TYPE_ANY; | 529 | this_search_type = ACPI_TYPE_ANY; |
@@ -591,6 +589,10 @@ acpi_ns_lookup(union acpi_generic_state *scope_info, | |||
591 | * segments). | 589 | * segments). |
592 | */ | 590 | */ |
593 | if (this_node->type == ACPI_TYPE_LOCAL_ALIAS) { | 591 | if (this_node->type == ACPI_TYPE_LOCAL_ALIAS) { |
592 | if (!this_node->object) { | ||
593 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
594 | } | ||
595 | |||
594 | if (acpi_ns_opens_scope | 596 | if (acpi_ns_opens_scope |
595 | (((struct acpi_namespace_node *)this_node-> | 597 | (((struct acpi_namespace_node *)this_node-> |
596 | object)->type)) { | 598 | object)->type)) { |
diff --git a/drivers/acpi/namespace/nsalloc.c b/drivers/acpi/acpica/nsalloc.c index 3a1740ac2edc..f976d848fe82 100644 --- a/drivers/acpi/namespace/nsalloc.c +++ b/drivers/acpi/acpica/nsalloc.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acnamesp.h> | 45 | #include "accommon.h" |
46 | #include "acnamesp.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_NAMESPACE | 48 | #define _COMPONENT ACPI_NAMESPACE |
48 | ACPI_MODULE_NAME("nsalloc") | 49 | ACPI_MODULE_NAME("nsalloc") |
diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/acpica/nsdump.c index cc0ae39440e4..0da33c8e9ba2 100644 --- a/drivers/acpi/namespace/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acnamesp.h> | 45 | #include "accommon.h" |
46 | #include "acnamesp.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_NAMESPACE | 48 | #define _COMPONENT ACPI_NAMESPACE |
48 | ACPI_MODULE_NAME("nsdump") | 49 | ACPI_MODULE_NAME("nsdump") |
diff --git a/drivers/acpi/namespace/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c index 428f50fde11a..41994fe7fbb8 100644 --- a/drivers/acpi/namespace/nsdumpdv.c +++ b/drivers/acpi/acpica/nsdumpdv.c | |||
@@ -42,6 +42,7 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include "accommon.h" | ||
45 | 46 | ||
46 | /* TBD: This entire module is apparently obsolete and should be removed */ | 47 | /* TBD: This entire module is apparently obsolete and should be removed */ |
47 | 48 | ||
@@ -49,7 +50,7 @@ | |||
49 | ACPI_MODULE_NAME("nsdumpdv") | 50 | ACPI_MODULE_NAME("nsdumpdv") |
50 | #ifdef ACPI_OBSOLETE_FUNCTIONS | 51 | #ifdef ACPI_OBSOLETE_FUNCTIONS |
51 | #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) | 52 | #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) |
52 | #include <acpi/acnamesp.h> | 53 | #include "acnamesp.h" |
53 | /******************************************************************************* | 54 | /******************************************************************************* |
54 | * | 55 | * |
55 | * FUNCTION: acpi_ns_dump_one_device | 56 | * FUNCTION: acpi_ns_dump_one_device |
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/acpica/nseval.c index 4cdf03ac2b46..0f3d5f9b5966 100644 --- a/drivers/acpi/namespace/nseval.c +++ b/drivers/acpi/acpica/nseval.c | |||
@@ -42,9 +42,10 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acparser.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acinterp.h> | 46 | #include "acparser.h" |
47 | #include <acpi/acnamesp.h> | 47 | #include "acinterp.h" |
48 | #include "acnamesp.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_NAMESPACE | 50 | #define _COMPONENT ACPI_NAMESPACE |
50 | ACPI_MODULE_NAME("nseval") | 51 | ACPI_MODULE_NAME("nseval") |
@@ -89,6 +90,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info) | |||
89 | /* Initialize the return value to an invalid object */ | 90 | /* Initialize the return value to an invalid object */ |
90 | 91 | ||
91 | info->return_object = NULL; | 92 | info->return_object = NULL; |
93 | info->param_count = 0; | ||
92 | 94 | ||
93 | /* | 95 | /* |
94 | * Get the actual namespace node for the target object. Handles these cases: | 96 | * Get the actual namespace node for the target object. Handles these cases: |
@@ -141,41 +143,17 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info) | |||
141 | return_ACPI_STATUS(AE_NULL_OBJECT); | 143 | return_ACPI_STATUS(AE_NULL_OBJECT); |
142 | } | 144 | } |
143 | 145 | ||
144 | /* | 146 | /* Count the number of arguments being passed to the method */ |
145 | * Calculate the number of arguments being passed to the method | ||
146 | */ | ||
147 | 147 | ||
148 | info->param_count = 0; | ||
149 | if (info->parameters) { | 148 | if (info->parameters) { |
150 | while (info->parameters[info->param_count]) | 149 | while (info->parameters[info->param_count]) { |
150 | if (info->param_count > ACPI_METHOD_MAX_ARG) { | ||
151 | return_ACPI_STATUS(AE_LIMIT); | ||
152 | } | ||
151 | info->param_count++; | 153 | info->param_count++; |
154 | } | ||
152 | } | 155 | } |
153 | 156 | ||
154 | /* | ||
155 | * Warning if too few or too many arguments have been passed by the | ||
156 | * caller. We don't want to abort here with an error because an | ||
157 | * incorrect number of arguments may not cause the method to fail. | ||
158 | * However, the method will fail if there are too few arguments passed | ||
159 | * and the method attempts to use one of the missing ones. | ||
160 | */ | ||
161 | |||
162 | if (info->param_count < info->obj_desc->method.param_count) { | ||
163 | ACPI_WARNING((AE_INFO, | ||
164 | "Insufficient arguments - " | ||
165 | "method [%4.4s] needs %d, found %d", | ||
166 | acpi_ut_get_node_name(info->resolved_node), | ||
167 | info->obj_desc->method.param_count, | ||
168 | info->param_count)); | ||
169 | } else if (info->param_count > | ||
170 | info->obj_desc->method.param_count) { | ||
171 | ACPI_WARNING((AE_INFO, | ||
172 | "Excess arguments - " | ||
173 | "method [%4.4s] needs %d, found %d", | ||
174 | acpi_ut_get_node_name(info-> | ||
175 | resolved_node), | ||
176 | info->obj_desc->method.param_count, | ||
177 | info->param_count)); | ||
178 | } | ||
179 | 157 | ||
180 | ACPI_DUMP_PATHNAME(info->resolved_node, "Execute Method:", | 158 | ACPI_DUMP_PATHNAME(info->resolved_node, "Execute Method:", |
181 | ACPI_LV_INFO, _COMPONENT); | 159 | ACPI_LV_INFO, _COMPONENT); |
@@ -264,32 +242,13 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info) | |||
264 | } | 242 | } |
265 | } | 243 | } |
266 | 244 | ||
267 | /* Validation of return values for ACPI-predefined methods and objects */ | 245 | /* |
268 | 246 | * Check input argument count against the ASL-defined count for a method. | |
269 | if ((status == AE_OK) || (status == AE_CTRL_RETURN_VALUE)) { | 247 | * Also check predefined names: argument count and return value against |
270 | /* | 248 | * the ACPI specification. Some incorrect return value types are repaired. |
271 | * If this is the first evaluation, check the return value. This | 249 | */ |
272 | * ensures that any warnings will only be emitted during the very | 250 | (void)acpi_ns_check_predefined_names(node, info->param_count, |
273 | * first evaluation of the object. | 251 | status, &info->return_object); |
274 | */ | ||
275 | if (!(node->flags & ANOBJ_EVALUATED)) { | ||
276 | /* | ||
277 | * Check for a predefined ACPI name. If found, validate the | ||
278 | * returned object. | ||
279 | * | ||
280 | * Note: Ignore return status for now, emit warnings if there are | ||
281 | * problems with the returned object. May change later to abort | ||
282 | * the method on invalid return object. | ||
283 | */ | ||
284 | (void)acpi_ns_check_predefined_names(node, | ||
285 | info-> | ||
286 | return_object); | ||
287 | } | ||
288 | |||
289 | /* Mark the node as having been evaluated */ | ||
290 | |||
291 | node->flags |= ANOBJ_EVALUATED; | ||
292 | } | ||
293 | 252 | ||
294 | /* Check if there is a return value that must be dealt with */ | 253 | /* Check if there is a return value that must be dealt with */ |
295 | 254 | ||
diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/acpica/nsinit.c index e4c57510d798..13501cb81863 100644 --- a/drivers/acpi/namespace/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c | |||
@@ -42,9 +42,10 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acnamesp.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acdispat.h> | 46 | #include "acnamesp.h" |
47 | #include <acpi/acinterp.h> | 47 | #include "acdispat.h" |
48 | #include "acinterp.h" | ||
48 | #include <linux/nmi.h> | 49 | #include <linux/nmi.h> |
49 | 50 | ||
50 | #define _COMPONENT ACPI_NAMESPACE | 51 | #define _COMPONENT ACPI_NAMESPACE |
diff --git a/drivers/acpi/namespace/nsload.c b/drivers/acpi/acpica/nsload.c index a4a412b7c029..a0ba9e12379e 100644 --- a/drivers/acpi/namespace/nsload.c +++ b/drivers/acpi/acpica/nsload.c | |||
@@ -42,9 +42,10 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acnamesp.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acdispat.h> | 46 | #include "acnamesp.h" |
47 | #include <acpi/actables.h> | 47 | #include "acdispat.h" |
48 | #include "actables.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_NAMESPACE | 50 | #define _COMPONENT ACPI_NAMESPACE |
50 | ACPI_MODULE_NAME("nsload") | 51 | ACPI_MODULE_NAME("nsload") |
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/acpica/nsnames.c index 42a39a7c96e9..ae3dc10a7e81 100644 --- a/drivers/acpi/namespace/nsnames.c +++ b/drivers/acpi/acpica/nsnames.c | |||
@@ -42,8 +42,9 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/amlcode.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "amlcode.h" |
47 | #include "acnamesp.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_NAMESPACE | 49 | #define _COMPONENT ACPI_NAMESPACE |
49 | ACPI_MODULE_NAME("nsnames") | 50 | ACPI_MODULE_NAME("nsnames") |
diff --git a/drivers/acpi/namespace/nsobject.c b/drivers/acpi/acpica/nsobject.c index 15fe09e24f71..08a97a57f8f9 100644 --- a/drivers/acpi/namespace/nsobject.c +++ b/drivers/acpi/acpica/nsobject.c | |||
@@ -43,7 +43,8 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acnamesp.h> | 46 | #include "accommon.h" |
47 | #include "acnamesp.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_NAMESPACE | 49 | #define _COMPONENT ACPI_NAMESPACE |
49 | ACPI_MODULE_NAME("nsobject") | 50 | ACPI_MODULE_NAME("nsobject") |
diff --git a/drivers/acpi/namespace/nsparse.c b/drivers/acpi/acpica/nsparse.c index a82271a9dbb3..b9e8d0070b6f 100644 --- a/drivers/acpi/namespace/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c | |||
@@ -42,10 +42,11 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acnamesp.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acparser.h> | 46 | #include "acnamesp.h" |
47 | #include <acpi/acdispat.h> | 47 | #include "acparser.h" |
48 | #include <acpi/actables.h> | 48 | #include "acdispat.h" |
49 | #include "actables.h" | ||
49 | 50 | ||
50 | #define _COMPONENT ACPI_NAMESPACE | 51 | #define _COMPONENT ACPI_NAMESPACE |
51 | ACPI_MODULE_NAME("nsparse") | 52 | ACPI_MODULE_NAME("nsparse") |
diff --git a/drivers/acpi/namespace/nspredef.c b/drivers/acpi/acpica/nspredef.c index 0f17cf0898c9..452703290d35 100644 --- a/drivers/acpi/namespace/nspredef.c +++ b/drivers/acpi/acpica/nspredef.c | |||
@@ -43,8 +43,9 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acnamesp.h> | 46 | #include "accommon.h" |
47 | #include <acpi/acpredef.h> | 47 | #include "acnamesp.h" |
48 | #include "acpredef.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_NAMESPACE | 50 | #define _COMPONENT ACPI_NAMESPACE |
50 | ACPI_MODULE_NAME("nspredef") | 51 | ACPI_MODULE_NAME("nspredef") |
@@ -72,7 +73,7 @@ ACPI_MODULE_NAME("nspredef") | |||
72 | /* Local prototypes */ | 73 | /* Local prototypes */ |
73 | static acpi_status | 74 | static acpi_status |
74 | acpi_ns_check_package(char *pathname, | 75 | acpi_ns_check_package(char *pathname, |
75 | union acpi_operand_object *return_object, | 76 | union acpi_operand_object **return_object_ptr, |
76 | const union acpi_predefined_info *predefined); | 77 | const union acpi_predefined_info *predefined); |
77 | 78 | ||
78 | static acpi_status | 79 | static acpi_status |
@@ -82,13 +83,18 @@ acpi_ns_check_package_elements(char *pathname, | |||
82 | 83 | ||
83 | static acpi_status | 84 | static acpi_status |
84 | acpi_ns_check_object_type(char *pathname, | 85 | acpi_ns_check_object_type(char *pathname, |
85 | union acpi_operand_object *return_object, | 86 | union acpi_operand_object **return_object_ptr, |
86 | u32 expected_btypes, u32 package_index); | 87 | u32 expected_btypes, u32 package_index); |
87 | 88 | ||
88 | static acpi_status | 89 | static acpi_status |
89 | acpi_ns_check_reference(char *pathname, | 90 | acpi_ns_check_reference(char *pathname, |
90 | union acpi_operand_object *return_object); | 91 | union acpi_operand_object *return_object); |
91 | 92 | ||
93 | static acpi_status | ||
94 | acpi_ns_repair_object(u32 expected_btypes, | ||
95 | u32 package_index, | ||
96 | union acpi_operand_object **return_object_ptr); | ||
97 | |||
92 | /* | 98 | /* |
93 | * Names for the types that can be returned by the predefined objects. | 99 | * Names for the types that can be returned by the predefined objects. |
94 | * Used for warning messages. Must be in the same order as the ACPI_RTYPEs | 100 | * Used for warning messages. Must be in the same order as the ACPI_RTYPEs |
@@ -108,8 +114,8 @@ static const char *acpi_rtype_names[] = { | |||
108 | * FUNCTION: acpi_ns_check_predefined_names | 114 | * FUNCTION: acpi_ns_check_predefined_names |
109 | * | 115 | * |
110 | * PARAMETERS: Node - Namespace node for the method/object | 116 | * PARAMETERS: Node - Namespace node for the method/object |
111 | * return_object - Object returned from the evaluation of this | 117 | * return_object_ptr - Pointer to the object returned from the |
112 | * method/object | 118 | * evaluation of a method or object |
113 | * | 119 | * |
114 | * RETURN: Status | 120 | * RETURN: Status |
115 | * | 121 | * |
@@ -119,8 +125,11 @@ static const char *acpi_rtype_names[] = { | |||
119 | 125 | ||
120 | acpi_status | 126 | acpi_status |
121 | acpi_ns_check_predefined_names(struct acpi_namespace_node *node, | 127 | acpi_ns_check_predefined_names(struct acpi_namespace_node *node, |
122 | union acpi_operand_object *return_object) | 128 | u32 user_param_count, |
129 | acpi_status return_status, | ||
130 | union acpi_operand_object **return_object_ptr) | ||
123 | { | 131 | { |
132 | union acpi_operand_object *return_object = *return_object_ptr; | ||
124 | acpi_status status = AE_OK; | 133 | acpi_status status = AE_OK; |
125 | const union acpi_predefined_info *predefined; | 134 | const union acpi_predefined_info *predefined; |
126 | char *pathname; | 135 | char *pathname; |
@@ -128,12 +137,6 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node, | |||
128 | /* Match the name for this method/object against the predefined list */ | 137 | /* Match the name for this method/object against the predefined list */ |
129 | 138 | ||
130 | predefined = acpi_ns_check_for_predefined_name(node); | 139 | predefined = acpi_ns_check_for_predefined_name(node); |
131 | if (!predefined) { | ||
132 | |||
133 | /* Name was not one of the predefined names */ | ||
134 | |||
135 | return (AE_OK); | ||
136 | } | ||
137 | 140 | ||
138 | /* Get the full pathname to the object, for use in error messages */ | 141 | /* Get the full pathname to the object, for use in error messages */ |
139 | 142 | ||
@@ -143,10 +146,37 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node, | |||
143 | } | 146 | } |
144 | 147 | ||
145 | /* | 148 | /* |
146 | * Check that the parameter count for this method is in accordance | 149 | * Check that the parameter count for this method matches the ASL |
147 | * with the ACPI specification. | 150 | * definition. For predefined names, ensure that both the caller and |
151 | * the method itself are in accordance with the ACPI specification. | ||
148 | */ | 152 | */ |
149 | acpi_ns_check_parameter_count(pathname, node, predefined); | 153 | acpi_ns_check_parameter_count(pathname, node, user_param_count, |
154 | predefined); | ||
155 | |||
156 | /* If not a predefined name, we cannot validate the return object */ | ||
157 | |||
158 | if (!predefined) { | ||
159 | goto exit; | ||
160 | } | ||
161 | |||
162 | /* If the method failed, we cannot validate the return object */ | ||
163 | |||
164 | if ((return_status != AE_OK) && (return_status != AE_CTRL_RETURN_VALUE)) { | ||
165 | goto exit; | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * Only validate the return value on the first successful evaluation of | ||
170 | * the method. This ensures that any warnings will only be emitted during | ||
171 | * the very first evaluation of the method/object. | ||
172 | */ | ||
173 | if (node->flags & ANOBJ_EVALUATED) { | ||
174 | goto exit; | ||
175 | } | ||
176 | |||
177 | /* Mark the node as having been successfully evaluated */ | ||
178 | |||
179 | node->flags |= ANOBJ_EVALUATED; | ||
150 | 180 | ||
151 | /* | 181 | /* |
152 | * If there is no return value, check if we require a return value for | 182 | * If there is no return value, check if we require a return value for |
@@ -171,7 +201,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node, | |||
171 | * We have a return value, but if one wasn't expected, just exit, this is | 201 | * We have a return value, but if one wasn't expected, just exit, this is |
172 | * not a problem | 202 | * not a problem |
173 | * | 203 | * |
174 | * For example, if "Implicit return value" is enabled, methods will | 204 | * For example, if the "Implicit Return" feature is enabled, methods will |
175 | * always return a value | 205 | * always return a value |
176 | */ | 206 | */ |
177 | if (!predefined->info.expected_btypes) { | 207 | if (!predefined->info.expected_btypes) { |
@@ -182,7 +212,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node, | |||
182 | * Check that the type of the return object is what is expected for | 212 | * Check that the type of the return object is what is expected for |
183 | * this predefined name | 213 | * this predefined name |
184 | */ | 214 | */ |
185 | status = acpi_ns_check_object_type(pathname, return_object, | 215 | status = acpi_ns_check_object_type(pathname, return_object_ptr, |
186 | predefined->info.expected_btypes, | 216 | predefined->info.expected_btypes, |
187 | ACPI_NOT_PACKAGE); | 217 | ACPI_NOT_PACKAGE); |
188 | if (ACPI_FAILURE(status)) { | 218 | if (ACPI_FAILURE(status)) { |
@@ -193,11 +223,12 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node, | |||
193 | 223 | ||
194 | if (ACPI_GET_OBJECT_TYPE(return_object) == ACPI_TYPE_PACKAGE) { | 224 | if (ACPI_GET_OBJECT_TYPE(return_object) == ACPI_TYPE_PACKAGE) { |
195 | status = | 225 | status = |
196 | acpi_ns_check_package(pathname, return_object, predefined); | 226 | acpi_ns_check_package(pathname, return_object_ptr, |
227 | predefined); | ||
197 | } | 228 | } |
198 | 229 | ||
199 | exit: | 230 | exit: |
200 | if (pathname) { | 231 | if (pathname != predefined->info.name) { |
201 | ACPI_FREE(pathname); | 232 | ACPI_FREE(pathname); |
202 | } | 233 | } |
203 | 234 | ||
@@ -210,6 +241,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node, | |||
210 | * | 241 | * |
211 | * PARAMETERS: Pathname - Full pathname to the node (for error msgs) | 242 | * PARAMETERS: Pathname - Full pathname to the node (for error msgs) |
212 | * Node - Namespace node for the method/object | 243 | * Node - Namespace node for the method/object |
244 | * user_param_count - Number of args passed in by the caller | ||
213 | * Predefined - Pointer to entry in predefined name table | 245 | * Predefined - Pointer to entry in predefined name table |
214 | * | 246 | * |
215 | * RETURN: None | 247 | * RETURN: None |
@@ -223,32 +255,76 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node, | |||
223 | void | 255 | void |
224 | acpi_ns_check_parameter_count(char *pathname, | 256 | acpi_ns_check_parameter_count(char *pathname, |
225 | struct acpi_namespace_node *node, | 257 | struct acpi_namespace_node *node, |
258 | u32 user_param_count, | ||
226 | const union acpi_predefined_info *predefined) | 259 | const union acpi_predefined_info *predefined) |
227 | { | 260 | { |
228 | u32 param_count; | 261 | u32 param_count; |
229 | u32 required_params_current; | 262 | u32 required_params_current; |
230 | u32 required_params_old; | 263 | u32 required_params_old; |
231 | 264 | ||
232 | /* | 265 | /* Methods have 0-7 parameters. All other types have zero. */ |
233 | * Check that the ASL-defined parameter count is what is expected for | 266 | |
234 | * this predefined name. | ||
235 | * | ||
236 | * Methods have 0-7 parameters. All other types have zero. | ||
237 | */ | ||
238 | param_count = 0; | 267 | param_count = 0; |
239 | if (node->type == ACPI_TYPE_METHOD) { | 268 | if (node->type == ACPI_TYPE_METHOD) { |
240 | param_count = node->object->method.param_count; | 269 | param_count = node->object->method.param_count; |
241 | } | 270 | } |
242 | 271 | ||
243 | /* Validate parameter count - allow two different legal counts (_SCP) */ | 272 | /* Argument count check for non-predefined methods/objects */ |
273 | |||
274 | if (!predefined) { | ||
275 | /* | ||
276 | * Warning if too few or too many arguments have been passed by the | ||
277 | * caller. An incorrect number of arguments may not cause the method | ||
278 | * to fail. However, the method will fail if there are too few | ||
279 | * arguments and the method attempts to use one of the missing ones. | ||
280 | */ | ||
281 | if (user_param_count < param_count) { | ||
282 | ACPI_WARNING((AE_INFO, | ||
283 | "%s: Insufficient arguments - needs %d, found %d", | ||
284 | pathname, param_count, user_param_count)); | ||
285 | } else if (user_param_count > param_count) { | ||
286 | ACPI_WARNING((AE_INFO, | ||
287 | "%s: Excess arguments - needs %d, found %d", | ||
288 | pathname, param_count, user_param_count)); | ||
289 | } | ||
290 | return; | ||
291 | } | ||
292 | |||
293 | /* Allow two different legal argument counts (_SCP, etc.) */ | ||
244 | 294 | ||
245 | required_params_current = predefined->info.param_count & 0x0F; | 295 | required_params_current = predefined->info.param_count & 0x0F; |
246 | required_params_old = predefined->info.param_count >> 4; | 296 | required_params_old = predefined->info.param_count >> 4; |
247 | 297 | ||
298 | if (user_param_count != ACPI_UINT32_MAX) { | ||
299 | |||
300 | /* Validate the user-supplied parameter count */ | ||
301 | |||
302 | if ((user_param_count != required_params_current) && | ||
303 | (user_param_count != required_params_old)) { | ||
304 | ACPI_WARNING((AE_INFO, | ||
305 | "%s: Parameter count mismatch - caller passed %d, ACPI requires %d", | ||
306 | pathname, user_param_count, | ||
307 | required_params_current)); | ||
308 | } | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * Only validate the argument count on the first successful evaluation of | ||
313 | * the method. This ensures that any warnings will only be emitted during | ||
314 | * the very first evaluation of the method/object. | ||
315 | */ | ||
316 | if (node->flags & ANOBJ_EVALUATED) { | ||
317 | return; | ||
318 | } | ||
319 | |||
320 | /* | ||
321 | * Check that the ASL-defined parameter count is what is expected for | ||
322 | * this predefined name. | ||
323 | */ | ||
248 | if ((param_count != required_params_current) && | 324 | if ((param_count != required_params_current) && |
249 | (param_count != required_params_old)) { | 325 | (param_count != required_params_old)) { |
250 | ACPI_WARNING((AE_INFO, | 326 | ACPI_WARNING((AE_INFO, |
251 | "%s: Parameter count mismatch - ASL declared %d, expected %d", | 327 | "%s: Parameter count mismatch - ASL declared %d, ACPI requires %d", |
252 | pathname, param_count, required_params_current)); | 328 | pathname, param_count, required_params_current)); |
253 | } | 329 | } |
254 | } | 330 | } |
@@ -307,8 +383,8 @@ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct | |||
307 | * FUNCTION: acpi_ns_check_package | 383 | * FUNCTION: acpi_ns_check_package |
308 | * | 384 | * |
309 | * PARAMETERS: Pathname - Full pathname to the node (for error msgs) | 385 | * PARAMETERS: Pathname - Full pathname to the node (for error msgs) |
310 | * return_object - Object returned from the evaluation of a | 386 | * return_object_ptr - Pointer to the object returned from the |
311 | * method or object | 387 | * evaluation of a method or object |
312 | * Predefined - Pointer to entry in predefined name table | 388 | * Predefined - Pointer to entry in predefined name table |
313 | * | 389 | * |
314 | * RETURN: Status | 390 | * RETURN: Status |
@@ -320,9 +396,10 @@ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct | |||
320 | 396 | ||
321 | static acpi_status | 397 | static acpi_status |
322 | acpi_ns_check_package(char *pathname, | 398 | acpi_ns_check_package(char *pathname, |
323 | union acpi_operand_object *return_object, | 399 | union acpi_operand_object **return_object_ptr, |
324 | const union acpi_predefined_info *predefined) | 400 | const union acpi_predefined_info *predefined) |
325 | { | 401 | { |
402 | union acpi_operand_object *return_object = *return_object_ptr; | ||
326 | const union acpi_predefined_info *package; | 403 | const union acpi_predefined_info *package; |
327 | union acpi_operand_object *sub_package; | 404 | union acpi_operand_object *sub_package; |
328 | union acpi_operand_object **elements; | 405 | union acpi_operand_object **elements; |
@@ -408,7 +485,7 @@ acpi_ns_check_package(char *pathname, | |||
408 | * elements must be of the same type | 485 | * elements must be of the same type |
409 | */ | 486 | */ |
410 | for (i = 0; i < count; i++) { | 487 | for (i = 0; i < count; i++) { |
411 | status = acpi_ns_check_object_type(pathname, *elements, | 488 | status = acpi_ns_check_object_type(pathname, elements, |
412 | package->ret_info. | 489 | package->ret_info. |
413 | object_type1, i); | 490 | object_type1, i); |
414 | if (ACPI_FAILURE(status)) { | 491 | if (ACPI_FAILURE(status)) { |
@@ -441,7 +518,7 @@ acpi_ns_check_package(char *pathname, | |||
441 | 518 | ||
442 | status = | 519 | status = |
443 | acpi_ns_check_object_type(pathname, | 520 | acpi_ns_check_object_type(pathname, |
444 | *elements, | 521 | elements, |
445 | package-> | 522 | package-> |
446 | ret_info3. | 523 | ret_info3. |
447 | object_type[i], | 524 | object_type[i], |
@@ -454,7 +531,7 @@ acpi_ns_check_package(char *pathname, | |||
454 | 531 | ||
455 | status = | 532 | status = |
456 | acpi_ns_check_object_type(pathname, | 533 | acpi_ns_check_object_type(pathname, |
457 | *elements, | 534 | elements, |
458 | package-> | 535 | package-> |
459 | ret_info3. | 536 | ret_info3. |
460 | tail_object_type, | 537 | tail_object_type, |
@@ -471,7 +548,7 @@ acpi_ns_check_package(char *pathname, | |||
471 | 548 | ||
472 | /* First element is the (Integer) count of sub-packages to follow */ | 549 | /* First element is the (Integer) count of sub-packages to follow */ |
473 | 550 | ||
474 | status = acpi_ns_check_object_type(pathname, *elements, | 551 | status = acpi_ns_check_object_type(pathname, elements, |
475 | ACPI_RTYPE_INTEGER, 0); | 552 | ACPI_RTYPE_INTEGER, 0); |
476 | if (ACPI_FAILURE(status)) { | 553 | if (ACPI_FAILURE(status)) { |
477 | return (status); | 554 | return (status); |
@@ -509,7 +586,7 @@ acpi_ns_check_package(char *pathname, | |||
509 | /* Each sub-object must be of type Package */ | 586 | /* Each sub-object must be of type Package */ |
510 | 587 | ||
511 | status = | 588 | status = |
512 | acpi_ns_check_object_type(pathname, sub_package, | 589 | acpi_ns_check_object_type(pathname, &sub_package, |
513 | ACPI_RTYPE_PACKAGE, i); | 590 | ACPI_RTYPE_PACKAGE, i); |
514 | if (ACPI_FAILURE(status)) { | 591 | if (ACPI_FAILURE(status)) { |
515 | return (status); | 592 | return (status); |
@@ -567,12 +644,8 @@ acpi_ns_check_package(char *pathname, | |||
567 | for (j = 0; j < expected_count; j++) { | 644 | for (j = 0; j < expected_count; j++) { |
568 | status = | 645 | status = |
569 | acpi_ns_check_object_type(pathname, | 646 | acpi_ns_check_object_type(pathname, |
570 | sub_elements | 647 | &sub_elements[j], |
571 | [j], | 648 | package->ret_info2.object_type[j], j); |
572 | package-> | ||
573 | ret_info2. | ||
574 | object_type | ||
575 | [j], j); | ||
576 | if (ACPI_FAILURE(status)) { | 649 | if (ACPI_FAILURE(status)) { |
577 | return (status); | 650 | return (status); |
578 | } | 651 | } |
@@ -611,7 +684,7 @@ acpi_ns_check_package(char *pathname, | |||
611 | 684 | ||
612 | status = | 685 | status = |
613 | acpi_ns_check_object_type(pathname, | 686 | acpi_ns_check_object_type(pathname, |
614 | *sub_elements, | 687 | sub_elements, |
615 | ACPI_RTYPE_INTEGER, | 688 | ACPI_RTYPE_INTEGER, |
616 | 0); | 689 | 0); |
617 | if (ACPI_FAILURE(status)) { | 690 | if (ACPI_FAILURE(status)) { |
@@ -708,7 +781,7 @@ acpi_ns_check_package_elements(char *pathname, | |||
708 | * The second group can have a count of zero. | 781 | * The second group can have a count of zero. |
709 | */ | 782 | */ |
710 | for (i = 0; i < count1; i++) { | 783 | for (i = 0; i < count1; i++) { |
711 | status = acpi_ns_check_object_type(pathname, *this_element, | 784 | status = acpi_ns_check_object_type(pathname, this_element, |
712 | type1, i); | 785 | type1, i); |
713 | if (ACPI_FAILURE(status)) { | 786 | if (ACPI_FAILURE(status)) { |
714 | return (status); | 787 | return (status); |
@@ -717,7 +790,7 @@ acpi_ns_check_package_elements(char *pathname, | |||
717 | } | 790 | } |
718 | 791 | ||
719 | for (i = 0; i < count2; i++) { | 792 | for (i = 0; i < count2; i++) { |
720 | status = acpi_ns_check_object_type(pathname, *this_element, | 793 | status = acpi_ns_check_object_type(pathname, this_element, |
721 | type2, (i + count1)); | 794 | type2, (i + count1)); |
722 | if (ACPI_FAILURE(status)) { | 795 | if (ACPI_FAILURE(status)) { |
723 | return (status); | 796 | return (status); |
@@ -733,8 +806,8 @@ acpi_ns_check_package_elements(char *pathname, | |||
733 | * FUNCTION: acpi_ns_check_object_type | 806 | * FUNCTION: acpi_ns_check_object_type |
734 | * | 807 | * |
735 | * PARAMETERS: Pathname - Full pathname to the node (for error msgs) | 808 | * PARAMETERS: Pathname - Full pathname to the node (for error msgs) |
736 | * return_object - Object return from the execution of this | 809 | * return_object_ptr - Pointer to the object returned from the |
737 | * method/object | 810 | * evaluation of a method or object |
738 | * expected_btypes - Bitmap of expected return type(s) | 811 | * expected_btypes - Bitmap of expected return type(s) |
739 | * package_index - Index of object within parent package (if | 812 | * package_index - Index of object within parent package (if |
740 | * applicable - ACPI_NOT_PACKAGE otherwise) | 813 | * applicable - ACPI_NOT_PACKAGE otherwise) |
@@ -748,9 +821,10 @@ acpi_ns_check_package_elements(char *pathname, | |||
748 | 821 | ||
749 | static acpi_status | 822 | static acpi_status |
750 | acpi_ns_check_object_type(char *pathname, | 823 | acpi_ns_check_object_type(char *pathname, |
751 | union acpi_operand_object *return_object, | 824 | union acpi_operand_object **return_object_ptr, |
752 | u32 expected_btypes, u32 package_index) | 825 | u32 expected_btypes, u32 package_index) |
753 | { | 826 | { |
827 | union acpi_operand_object *return_object = *return_object_ptr; | ||
754 | acpi_status status = AE_OK; | 828 | acpi_status status = AE_OK; |
755 | u32 return_btype; | 829 | u32 return_btype; |
756 | char type_buffer[48]; /* Room for 5 types */ | 830 | char type_buffer[48]; /* Room for 5 types */ |
@@ -814,6 +888,14 @@ acpi_ns_check_object_type(char *pathname, | |||
814 | /* Is the object one of the expected types? */ | 888 | /* Is the object one of the expected types? */ |
815 | 889 | ||
816 | if (!(return_btype & expected_btypes)) { | 890 | if (!(return_btype & expected_btypes)) { |
891 | |||
892 | /* Type mismatch -- attempt repair of the returned object */ | ||
893 | |||
894 | status = acpi_ns_repair_object(expected_btypes, package_index, | ||
895 | return_object_ptr); | ||
896 | if (ACPI_SUCCESS(status)) { | ||
897 | return (status); | ||
898 | } | ||
817 | goto type_error_exit; | 899 | goto type_error_exit; |
818 | } | 900 | } |
819 | 901 | ||
@@ -898,3 +980,86 @@ acpi_ns_check_reference(char *pathname, | |||
898 | 980 | ||
899 | return (AE_AML_OPERAND_TYPE); | 981 | return (AE_AML_OPERAND_TYPE); |
900 | } | 982 | } |
983 | |||
984 | /******************************************************************************* | ||
985 | * | ||
986 | * FUNCTION: acpi_ns_repair_object | ||
987 | * | ||
988 | * PARAMETERS: Pathname - Full pathname to the node (for error msgs) | ||
989 | * package_index - Used to determine if target is in a package | ||
990 | * return_object_ptr - Pointer to the object returned from the | ||
991 | * evaluation of a method or object | ||
992 | * | ||
993 | * RETURN: Status. AE_OK if repair was successful. | ||
994 | * | ||
995 | * DESCRIPTION: Attempt to repair/convert a return object of a type that was | ||
996 | * not expected. | ||
997 | * | ||
998 | ******************************************************************************/ | ||
999 | |||
1000 | static acpi_status | ||
1001 | acpi_ns_repair_object(u32 expected_btypes, | ||
1002 | u32 package_index, | ||
1003 | union acpi_operand_object **return_object_ptr) | ||
1004 | { | ||
1005 | union acpi_operand_object *return_object = *return_object_ptr; | ||
1006 | union acpi_operand_object *new_object; | ||
1007 | acpi_size length; | ||
1008 | |||
1009 | switch (ACPI_GET_OBJECT_TYPE(return_object)) { | ||
1010 | case ACPI_TYPE_BUFFER: | ||
1011 | |||
1012 | if (!(expected_btypes & ACPI_RTYPE_STRING)) { | ||
1013 | return (AE_AML_OPERAND_TYPE); | ||
1014 | } | ||
1015 | |||
1016 | /* | ||
1017 | * Have a Buffer, expected a String, convert. Use a to_string | ||
1018 | * conversion, no transform performed on the buffer data. The best | ||
1019 | * example of this is the _BIF method, where the string data from | ||
1020 | * the battery is often (incorrectly) returned as buffer object(s). | ||
1021 | */ | ||
1022 | length = 0; | ||
1023 | while ((length < return_object->buffer.length) && | ||
1024 | (return_object->buffer.pointer[length])) { | ||
1025 | length++; | ||
1026 | } | ||
1027 | |||
1028 | /* Allocate a new string object */ | ||
1029 | |||
1030 | new_object = acpi_ut_create_string_object(length); | ||
1031 | if (!new_object) { | ||
1032 | return (AE_NO_MEMORY); | ||
1033 | } | ||
1034 | |||
1035 | /* | ||
1036 | * Copy the raw buffer data with no transform. String is already NULL | ||
1037 | * terminated at Length+1. | ||
1038 | */ | ||
1039 | ACPI_MEMCPY(new_object->string.pointer, | ||
1040 | return_object->buffer.pointer, length); | ||
1041 | |||
1042 | /* Install the new return object */ | ||
1043 | |||
1044 | acpi_ut_remove_reference(return_object); | ||
1045 | *return_object_ptr = new_object; | ||
1046 | |||
1047 | /* | ||
1048 | * If the object is a package element, we need to: | ||
1049 | * 1. Decrement the reference count of the orignal object, it was | ||
1050 | * incremented when building the package | ||
1051 | * 2. Increment the reference count of the new object, it will be | ||
1052 | * decremented when releasing the package | ||
1053 | */ | ||
1054 | if (package_index != ACPI_NOT_PACKAGE) { | ||
1055 | acpi_ut_remove_reference(return_object); | ||
1056 | acpi_ut_add_reference(new_object); | ||
1057 | } | ||
1058 | return (AE_OK); | ||
1059 | |||
1060 | default: | ||
1061 | break; | ||
1062 | } | ||
1063 | |||
1064 | return (AE_AML_OPERAND_TYPE); | ||
1065 | } | ||
diff --git a/drivers/acpi/namespace/nssearch.c b/drivers/acpi/acpica/nssearch.c index a9a80bf811b3..6fea13f3f52d 100644 --- a/drivers/acpi/namespace/nssearch.c +++ b/drivers/acpi/acpica/nssearch.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acnamesp.h> | 45 | #include "accommon.h" |
46 | #include "acnamesp.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_NAMESPACE | 48 | #define _COMPONENT ACPI_NAMESPACE |
48 | ACPI_MODULE_NAME("nssearch") | 49 | ACPI_MODULE_NAME("nssearch") |
diff --git a/drivers/acpi/namespace/nsutils.c b/drivers/acpi/acpica/nsutils.c index b0817e1127b1..3e1149bf4aa5 100644 --- a/drivers/acpi/namespace/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c | |||
@@ -43,9 +43,10 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acnamesp.h> | 46 | #include "accommon.h" |
47 | #include <acpi/amlcode.h> | 47 | #include "acnamesp.h" |
48 | #include <acpi/actables.h> | 48 | #include "amlcode.h" |
49 | #include "actables.h" | ||
49 | 50 | ||
50 | #define _COMPONENT ACPI_NAMESPACE | 51 | #define _COMPONENT ACPI_NAMESPACE |
51 | ACPI_MODULE_NAME("nsutils") | 52 | ACPI_MODULE_NAME("nsutils") |
@@ -314,9 +315,15 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info) | |||
314 | * | 315 | * |
315 | * strlen() + 1 covers the first name_seg, which has no path separator | 316 | * strlen() + 1 covers the first name_seg, which has no path separator |
316 | */ | 317 | */ |
317 | if (acpi_ns_valid_root_prefix(next_external_char[0])) { | 318 | if (acpi_ns_valid_root_prefix(*next_external_char)) { |
318 | info->fully_qualified = TRUE; | 319 | info->fully_qualified = TRUE; |
319 | next_external_char++; | 320 | next_external_char++; |
321 | |||
322 | /* Skip redundant root_prefix, like \\_SB.PCI0.SBRG.EC0 */ | ||
323 | |||
324 | while (acpi_ns_valid_root_prefix(*next_external_char)) { | ||
325 | next_external_char++; | ||
326 | } | ||
320 | } else { | 327 | } else { |
321 | /* | 328 | /* |
322 | * Handle Carat prefixes | 329 | * Handle Carat prefixes |
diff --git a/drivers/acpi/namespace/nswalk.c b/drivers/acpi/acpica/nswalk.c index 3c905ce26d7d..200895fa2728 100644 --- a/drivers/acpi/namespace/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acnamesp.h> | 45 | #include "accommon.h" |
46 | #include "acnamesp.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_NAMESPACE | 48 | #define _COMPONENT ACPI_NAMESPACE |
48 | ACPI_MODULE_NAME("nswalk") | 49 | ACPI_MODULE_NAME("nswalk") |
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c index a085cc39c055..22a7171ac1ed 100644 --- a/drivers/acpi/namespace/nsxfeval.c +++ b/drivers/acpi/acpica/nsxfeval.c | |||
@@ -43,8 +43,9 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acnamesp.h> | 46 | #include "accommon.h" |
47 | #include <acpi/acinterp.h> | 47 | #include "acnamesp.h" |
48 | #include "acinterp.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_NAMESPACE | 50 | #define _COMPONENT ACPI_NAMESPACE |
50 | ACPI_MODULE_NAME("nsxfeval") | 51 | ACPI_MODULE_NAME("nsxfeval") |
diff --git a/drivers/acpi/namespace/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index 5efa4e7ddb0b..9589fea24997 100644 --- a/drivers/acpi/namespace/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c | |||
@@ -43,7 +43,8 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acnamesp.h> | 46 | #include "accommon.h" |
47 | #include "acnamesp.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_NAMESPACE | 49 | #define _COMPONENT ACPI_NAMESPACE |
49 | ACPI_MODULE_NAME("nsxfname") | 50 | ACPI_MODULE_NAME("nsxfname") |
diff --git a/drivers/acpi/namespace/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c index 2b375ee80cef..1c7efc15225f 100644 --- a/drivers/acpi/namespace/nsxfobj.c +++ b/drivers/acpi/acpica/nsxfobj.c | |||
@@ -43,7 +43,8 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acnamesp.h> | 46 | #include "accommon.h" |
47 | #include "acnamesp.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_NAMESPACE | 49 | #define _COMPONENT ACPI_NAMESPACE |
49 | ACPI_MODULE_NAME("nsxfobj") | 50 | ACPI_MODULE_NAME("nsxfobj") |
diff --git a/drivers/acpi/parser/psargs.c b/drivers/acpi/acpica/psargs.c index d830b29b85b1..b161f3544b51 100644 --- a/drivers/acpi/parser/psargs.c +++ b/drivers/acpi/acpica/psargs.c | |||
@@ -42,10 +42,11 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acparser.h> | 45 | #include "accommon.h" |
46 | #include <acpi/amlcode.h> | 46 | #include "acparser.h" |
47 | #include <acpi/acnamesp.h> | 47 | #include "amlcode.h" |
48 | #include <acpi/acdispat.h> | 48 | #include "acnamesp.h" |
49 | #include "acdispat.h" | ||
49 | 50 | ||
50 | #define _COMPONENT ACPI_PARSER | 51 | #define _COMPONENT ACPI_PARSER |
51 | ACPI_MODULE_NAME("psargs") | 52 | ACPI_MODULE_NAME("psargs") |
diff --git a/drivers/acpi/parser/psloop.c b/drivers/acpi/acpica/psloop.c index 4647039a0d8a..c5f6ce19a401 100644 --- a/drivers/acpi/parser/psloop.c +++ b/drivers/acpi/acpica/psloop.c | |||
@@ -50,9 +50,10 @@ | |||
50 | */ | 50 | */ |
51 | 51 | ||
52 | #include <acpi/acpi.h> | 52 | #include <acpi/acpi.h> |
53 | #include <acpi/acparser.h> | 53 | #include "accommon.h" |
54 | #include <acpi/acdispat.h> | 54 | #include "acparser.h" |
55 | #include <acpi/amlcode.h> | 55 | #include "acdispat.h" |
56 | #include "amlcode.h" | ||
56 | 57 | ||
57 | #define _COMPONENT ACPI_PARSER | 58 | #define _COMPONENT ACPI_PARSER |
58 | ACPI_MODULE_NAME("psloop") | 59 | ACPI_MODULE_NAME("psloop") |
diff --git a/drivers/acpi/parser/psopcode.c b/drivers/acpi/acpica/psopcode.c index f425ab30eae8..3bc3a60194d6 100644 --- a/drivers/acpi/parser/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c | |||
@@ -42,9 +42,10 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acparser.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acopcode.h> | 46 | #include "acparser.h" |
47 | #include <acpi/amlcode.h> | 47 | #include "acopcode.h" |
48 | #include "amlcode.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_PARSER | 50 | #define _COMPONENT ACPI_PARSER |
50 | ACPI_MODULE_NAME("psopcode") | 51 | ACPI_MODULE_NAME("psopcode") |
diff --git a/drivers/acpi/parser/psparse.c b/drivers/acpi/acpica/psparse.c index 68e932f215ea..70838e9b608c 100644 --- a/drivers/acpi/parser/psparse.c +++ b/drivers/acpi/acpica/psparse.c | |||
@@ -51,11 +51,12 @@ | |||
51 | */ | 51 | */ |
52 | 52 | ||
53 | #include <acpi/acpi.h> | 53 | #include <acpi/acpi.h> |
54 | #include <acpi/acparser.h> | 54 | #include "accommon.h" |
55 | #include <acpi/acdispat.h> | 55 | #include "acparser.h" |
56 | #include <acpi/amlcode.h> | 56 | #include "acdispat.h" |
57 | #include <acpi/acnamesp.h> | 57 | #include "amlcode.h" |
58 | #include <acpi/acinterp.h> | 58 | #include "acnamesp.h" |
59 | #include "acinterp.h" | ||
59 | 60 | ||
60 | #define _COMPONENT ACPI_PARSER | 61 | #define _COMPONENT ACPI_PARSER |
61 | ACPI_MODULE_NAME("psparse") | 62 | ACPI_MODULE_NAME("psparse") |
@@ -447,10 +448,22 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) | |||
447 | walk_state, walk_state->parser_state.aml, | 448 | walk_state, walk_state->parser_state.aml, |
448 | walk_state->parser_state.aml_size)); | 449 | walk_state->parser_state.aml_size)); |
449 | 450 | ||
451 | if (!walk_state->parser_state.aml) { | ||
452 | return_ACPI_STATUS(AE_NULL_OBJECT); | ||
453 | } | ||
454 | |||
450 | /* Create and initialize a new thread state */ | 455 | /* Create and initialize a new thread state */ |
451 | 456 | ||
452 | thread = acpi_ut_create_thread_state(); | 457 | thread = acpi_ut_create_thread_state(); |
453 | if (!thread) { | 458 | if (!thread) { |
459 | if (walk_state->method_desc) { | ||
460 | |||
461 | /* Executing a control method - additional cleanup */ | ||
462 | |||
463 | acpi_ds_terminate_control_method( | ||
464 | walk_state->method_desc, walk_state); | ||
465 | } | ||
466 | |||
454 | acpi_ds_delete_walk_state(walk_state); | 467 | acpi_ds_delete_walk_state(walk_state); |
455 | return_ACPI_STATUS(AE_NO_MEMORY); | 468 | return_ACPI_STATUS(AE_NO_MEMORY); |
456 | } | 469 | } |
diff --git a/drivers/acpi/parser/psscope.c b/drivers/acpi/acpica/psscope.c index ee50e67c9443..2feca5ca9581 100644 --- a/drivers/acpi/parser/psscope.c +++ b/drivers/acpi/acpica/psscope.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acparser.h> | 45 | #include "accommon.h" |
46 | #include "acparser.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_PARSER | 48 | #define _COMPONENT ACPI_PARSER |
48 | ACPI_MODULE_NAME("psscope") | 49 | ACPI_MODULE_NAME("psscope") |
diff --git a/drivers/acpi/parser/pstree.c b/drivers/acpi/acpica/pstree.c index 1dd355ddd182..4d3389118ec3 100644 --- a/drivers/acpi/parser/pstree.c +++ b/drivers/acpi/acpica/pstree.c | |||
@@ -42,8 +42,9 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acparser.h> | 45 | #include "accommon.h" |
46 | #include <acpi/amlcode.h> | 46 | #include "acparser.h" |
47 | #include "amlcode.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_PARSER | 49 | #define _COMPONENT ACPI_PARSER |
49 | ACPI_MODULE_NAME("pstree") | 50 | ACPI_MODULE_NAME("pstree") |
diff --git a/drivers/acpi/parser/psutils.c b/drivers/acpi/acpica/psutils.c index 7cf1f65cd5bb..e636e078ad3d 100644 --- a/drivers/acpi/parser/psutils.c +++ b/drivers/acpi/acpica/psutils.c | |||
@@ -42,8 +42,9 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acparser.h> | 45 | #include "accommon.h" |
46 | #include <acpi/amlcode.h> | 46 | #include "acparser.h" |
47 | #include "amlcode.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_PARSER | 49 | #define _COMPONENT ACPI_PARSER |
49 | ACPI_MODULE_NAME("psutils") | 50 | ACPI_MODULE_NAME("psutils") |
diff --git a/drivers/acpi/parser/pswalk.c b/drivers/acpi/acpica/pswalk.c index 8b86ad5a3201..78b8b791f2ae 100644 --- a/drivers/acpi/parser/pswalk.c +++ b/drivers/acpi/acpica/pswalk.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acparser.h> | 45 | #include "accommon.h" |
46 | #include "acparser.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_PARSER | 48 | #define _COMPONENT ACPI_PARSER |
48 | ACPI_MODULE_NAME("pswalk") | 49 | ACPI_MODULE_NAME("pswalk") |
diff --git a/drivers/acpi/parser/psxface.c b/drivers/acpi/acpica/psxface.c index 270469aae842..ff06032c0f06 100644 --- a/drivers/acpi/parser/psxface.c +++ b/drivers/acpi/acpica/psxface.c | |||
@@ -42,9 +42,11 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acparser.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acdispat.h> | 46 | #include "acparser.h" |
47 | #include <acpi/acinterp.h> | 47 | #include "acdispat.h" |
48 | #include "acinterp.h" | ||
49 | #include "amlcode.h" | ||
48 | 50 | ||
49 | #define _COMPONENT ACPI_PARSER | 51 | #define _COMPONENT ACPI_PARSER |
50 | ACPI_MODULE_NAME("psxface") | 52 | ACPI_MODULE_NAME("psxface") |
@@ -278,6 +280,38 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info) | |||
278 | goto cleanup; | 280 | goto cleanup; |
279 | } | 281 | } |
280 | 282 | ||
283 | /* Invoke an internal method if necessary */ | ||
284 | |||
285 | if (info->obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) { | ||
286 | status = info->obj_desc->method.implementation(walk_state); | ||
287 | info->return_object = walk_state->return_desc; | ||
288 | |||
289 | /* Cleanup states */ | ||
290 | |||
291 | acpi_ds_scope_stack_clear(walk_state); | ||
292 | acpi_ps_cleanup_scope(&walk_state->parser_state); | ||
293 | acpi_ds_terminate_control_method(walk_state->method_desc, | ||
294 | walk_state); | ||
295 | acpi_ds_delete_walk_state(walk_state); | ||
296 | goto cleanup; | ||
297 | } | ||
298 | |||
299 | /* | ||
300 | * Start method evaluation with an implicit return of zero. | ||
301 | * This is done for Windows compatibility. | ||
302 | */ | ||
303 | if (acpi_gbl_enable_interpreter_slack) { | ||
304 | walk_state->implicit_return_obj = | ||
305 | acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); | ||
306 | if (!walk_state->implicit_return_obj) { | ||
307 | status = AE_NO_MEMORY; | ||
308 | acpi_ds_delete_walk_state(walk_state); | ||
309 | goto cleanup; | ||
310 | } | ||
311 | |||
312 | walk_state->implicit_return_obj->integer.value = 0; | ||
313 | } | ||
314 | |||
281 | /* Parse the AML */ | 315 | /* Parse the AML */ |
282 | 316 | ||
283 | status = acpi_ps_parse_aml(walk_state); | 317 | status = acpi_ps_parse_aml(walk_state); |
diff --git a/drivers/acpi/resources/rsaddr.c b/drivers/acpi/acpica/rsaddr.c index 7f96332822bf..1e437bfd8db5 100644 --- a/drivers/acpi/resources/rsaddr.c +++ b/drivers/acpi/acpica/rsaddr.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acresrc.h> | 45 | #include "accommon.h" |
46 | #include "acresrc.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_RESOURCES | 48 | #define _COMPONENT ACPI_RESOURCES |
48 | ACPI_MODULE_NAME("rsaddr") | 49 | ACPI_MODULE_NAME("rsaddr") |
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/acpica/rscalc.c index 8eaaecf92009..52865ee6bc77 100644 --- a/drivers/acpi/resources/rscalc.c +++ b/drivers/acpi/acpica/rscalc.c | |||
@@ -42,8 +42,9 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acresrc.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "acresrc.h" |
47 | #include "acnamesp.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_RESOURCES | 49 | #define _COMPONENT ACPI_RESOURCES |
49 | ACPI_MODULE_NAME("rscalc") | 50 | ACPI_MODULE_NAME("rscalc") |
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/acpica/rscreate.c index 08b8d73e6ee5..61566b1a0616 100644 --- a/drivers/acpi/resources/rscreate.c +++ b/drivers/acpi/acpica/rscreate.c | |||
@@ -42,8 +42,9 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acresrc.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "acresrc.h" |
47 | #include "acnamesp.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_RESOURCES | 49 | #define _COMPONENT ACPI_RESOURCES |
49 | ACPI_MODULE_NAME("rscreate") | 50 | ACPI_MODULE_NAME("rscreate") |
diff --git a/drivers/acpi/resources/rsdump.c b/drivers/acpi/acpica/rsdump.c index 6bbbb7b8941a..3f0ca5a12d34 100644 --- a/drivers/acpi/resources/rsdump.c +++ b/drivers/acpi/acpica/rsdump.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acresrc.h> | 45 | #include "accommon.h" |
46 | #include "acresrc.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_RESOURCES | 48 | #define _COMPONENT ACPI_RESOURCES |
48 | ACPI_MODULE_NAME("rsdump") | 49 | ACPI_MODULE_NAME("rsdump") |
diff --git a/drivers/acpi/resources/rsinfo.c b/drivers/acpi/acpica/rsinfo.c index 3f0a1fedbe0e..77b25fdb459c 100644 --- a/drivers/acpi/resources/rsinfo.c +++ b/drivers/acpi/acpica/rsinfo.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acresrc.h> | 45 | #include "accommon.h" |
46 | #include "acresrc.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_RESOURCES | 48 | #define _COMPONENT ACPI_RESOURCES |
48 | ACPI_MODULE_NAME("rsinfo") | 49 | ACPI_MODULE_NAME("rsinfo") |
diff --git a/drivers/acpi/resources/rsio.c b/drivers/acpi/acpica/rsio.c index b66d42e7402e..35a49aa95609 100644 --- a/drivers/acpi/resources/rsio.c +++ b/drivers/acpi/acpica/rsio.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acresrc.h> | 45 | #include "accommon.h" |
46 | #include "acresrc.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_RESOURCES | 48 | #define _COMPONENT ACPI_RESOURCES |
48 | ACPI_MODULE_NAME("rsio") | 49 | ACPI_MODULE_NAME("rsio") |
diff --git a/drivers/acpi/resources/rsirq.c b/drivers/acpi/acpica/rsirq.c index a8805efc0366..2e0256983aa6 100644 --- a/drivers/acpi/resources/rsirq.c +++ b/drivers/acpi/acpica/rsirq.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acresrc.h> | 45 | #include "accommon.h" |
46 | #include "acresrc.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_RESOURCES | 48 | #define _COMPONENT ACPI_RESOURCES |
48 | ACPI_MODULE_NAME("rsirq") | 49 | ACPI_MODULE_NAME("rsirq") |
diff --git a/drivers/acpi/resources/rslist.c b/drivers/acpi/acpica/rslist.c index b78c7e797a19..1b1dbc69f087 100644 --- a/drivers/acpi/resources/rslist.c +++ b/drivers/acpi/acpica/rslist.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acresrc.h> | 45 | #include "accommon.h" |
46 | #include "acresrc.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_RESOURCES | 48 | #define _COMPONENT ACPI_RESOURCES |
48 | ACPI_MODULE_NAME("rslist") | 49 | ACPI_MODULE_NAME("rslist") |
diff --git a/drivers/acpi/resources/rsmemory.c b/drivers/acpi/acpica/rsmemory.c index 63b21abd90bb..ddc76cebdc92 100644 --- a/drivers/acpi/resources/rsmemory.c +++ b/drivers/acpi/acpica/rsmemory.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acresrc.h> | 45 | #include "accommon.h" |
46 | #include "acresrc.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_RESOURCES | 48 | #define _COMPONENT ACPI_RESOURCES |
48 | ACPI_MODULE_NAME("rsmemory") | 49 | ACPI_MODULE_NAME("rsmemory") |
diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/acpica/rsmisc.c index 96a6c0353255..5bc49a553284 100644 --- a/drivers/acpi/resources/rsmisc.c +++ b/drivers/acpi/acpica/rsmisc.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acresrc.h> | 45 | #include "accommon.h" |
46 | #include "acresrc.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_RESOURCES | 48 | #define _COMPONENT ACPI_RESOURCES |
48 | ACPI_MODULE_NAME("rsmisc") | 49 | ACPI_MODULE_NAME("rsmisc") |
diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/acpica/rsutils.c index f7b3bcd59ba7..bc03d5966829 100644 --- a/drivers/acpi/resources/rsutils.c +++ b/drivers/acpi/acpica/rsutils.c | |||
@@ -42,8 +42,9 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acnamesp.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acresrc.h> | 46 | #include "acnamesp.h" |
47 | #include "acresrc.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_RESOURCES | 49 | #define _COMPONENT ACPI_RESOURCES |
49 | ACPI_MODULE_NAME("rsutils") | 50 | ACPI_MODULE_NAME("rsutils") |
diff --git a/drivers/acpi/resources/rsxface.c b/drivers/acpi/acpica/rsxface.c index f59f4c4e034c..69a2aa5b5d83 100644 --- a/drivers/acpi/resources/rsxface.c +++ b/drivers/acpi/acpica/rsxface.c | |||
@@ -42,8 +42,9 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acresrc.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "acresrc.h" |
47 | #include "acnamesp.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_RESOURCES | 49 | #define _COMPONENT ACPI_RESOURCES |
49 | ACPI_MODULE_NAME("rsxface") | 50 | ACPI_MODULE_NAME("rsxface") |
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 2817158fb6a1..3636e4f8fb73 100644 --- a/drivers/acpi/tables/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c | |||
@@ -42,15 +42,16 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/actables.h> | 45 | #include "accommon.h" |
46 | #include "actables.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_TABLES | 48 | #define _COMPONENT ACPI_TABLES |
48 | ACPI_MODULE_NAME("tbfadt") | 49 | ACPI_MODULE_NAME("tbfadt") |
49 | 50 | ||
50 | /* Local prototypes */ | 51 | /* Local prototypes */ |
51 | static void inline | 52 | static inline void |
52 | acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, | 53 | acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, |
53 | u8 byte_width, u64 address); | 54 | u8 space_id, u8 byte_width, u64 address); |
54 | 55 | ||
55 | static void acpi_tb_convert_fadt(void); | 56 | static void acpi_tb_convert_fadt(void); |
56 | 57 | ||
@@ -60,9 +61,10 @@ static void acpi_tb_validate_fadt(void); | |||
60 | 61 | ||
61 | typedef struct acpi_fadt_info { | 62 | typedef struct acpi_fadt_info { |
62 | char *name; | 63 | char *name; |
63 | u8 target; | 64 | u8 address64; |
64 | u8 source; | 65 | u8 address32; |
65 | u8 length; | 66 | u8 length; |
67 | u8 default_length; | ||
66 | u8 type; | 68 | u8 type; |
67 | 69 | ||
68 | } acpi_fadt_info; | 70 | } acpi_fadt_info; |
@@ -71,37 +73,61 @@ typedef struct acpi_fadt_info { | |||
71 | #define ACPI_FADT_SEPARATE_LENGTH 2 | 73 | #define ACPI_FADT_SEPARATE_LENGTH 2 |
72 | 74 | ||
73 | static struct acpi_fadt_info fadt_info_table[] = { | 75 | static struct acpi_fadt_info fadt_info_table[] = { |
74 | {"Pm1aEventBlock", ACPI_FADT_OFFSET(xpm1a_event_block), | 76 | {"Pm1aEventBlock", |
77 | ACPI_FADT_OFFSET(xpm1a_event_block), | ||
75 | ACPI_FADT_OFFSET(pm1a_event_block), | 78 | ACPI_FADT_OFFSET(pm1a_event_block), |
76 | ACPI_FADT_OFFSET(pm1_event_length), ACPI_FADT_REQUIRED}, | 79 | ACPI_FADT_OFFSET(pm1_event_length), |
80 | ACPI_PM1_REGISTER_WIDTH * 2, /* Enable + Status register */ | ||
81 | ACPI_FADT_REQUIRED}, | ||
77 | 82 | ||
78 | {"Pm1bEventBlock", ACPI_FADT_OFFSET(xpm1b_event_block), | 83 | {"Pm1bEventBlock", |
84 | ACPI_FADT_OFFSET(xpm1b_event_block), | ||
79 | ACPI_FADT_OFFSET(pm1b_event_block), | 85 | ACPI_FADT_OFFSET(pm1b_event_block), |
80 | ACPI_FADT_OFFSET(pm1_event_length), 0}, | 86 | ACPI_FADT_OFFSET(pm1_event_length), |
87 | ACPI_PM1_REGISTER_WIDTH * 2, /* Enable + Status register */ | ||
88 | 0}, | ||
81 | 89 | ||
82 | {"Pm1aControlBlock", ACPI_FADT_OFFSET(xpm1a_control_block), | 90 | {"Pm1aControlBlock", |
91 | ACPI_FADT_OFFSET(xpm1a_control_block), | ||
83 | ACPI_FADT_OFFSET(pm1a_control_block), | 92 | ACPI_FADT_OFFSET(pm1a_control_block), |
84 | ACPI_FADT_OFFSET(pm1_control_length), ACPI_FADT_REQUIRED}, | 93 | ACPI_FADT_OFFSET(pm1_control_length), |
94 | ACPI_PM1_REGISTER_WIDTH, | ||
95 | ACPI_FADT_REQUIRED}, | ||
85 | 96 | ||
86 | {"Pm1bControlBlock", ACPI_FADT_OFFSET(xpm1b_control_block), | 97 | {"Pm1bControlBlock", |
98 | ACPI_FADT_OFFSET(xpm1b_control_block), | ||
87 | ACPI_FADT_OFFSET(pm1b_control_block), | 99 | ACPI_FADT_OFFSET(pm1b_control_block), |
88 | ACPI_FADT_OFFSET(pm1_control_length), 0}, | 100 | ACPI_FADT_OFFSET(pm1_control_length), |
101 | ACPI_PM1_REGISTER_WIDTH, | ||
102 | 0}, | ||
89 | 103 | ||
90 | {"Pm2ControlBlock", ACPI_FADT_OFFSET(xpm2_control_block), | 104 | {"Pm2ControlBlock", |
105 | ACPI_FADT_OFFSET(xpm2_control_block), | ||
91 | ACPI_FADT_OFFSET(pm2_control_block), | 106 | ACPI_FADT_OFFSET(pm2_control_block), |
92 | ACPI_FADT_OFFSET(pm2_control_length), ACPI_FADT_SEPARATE_LENGTH}, | 107 | ACPI_FADT_OFFSET(pm2_control_length), |
108 | ACPI_PM2_REGISTER_WIDTH, | ||
109 | ACPI_FADT_SEPARATE_LENGTH}, | ||
93 | 110 | ||
94 | {"PmTimerBlock", ACPI_FADT_OFFSET(xpm_timer_block), | 111 | {"PmTimerBlock", |
112 | ACPI_FADT_OFFSET(xpm_timer_block), | ||
95 | ACPI_FADT_OFFSET(pm_timer_block), | 113 | ACPI_FADT_OFFSET(pm_timer_block), |
96 | ACPI_FADT_OFFSET(pm_timer_length), ACPI_FADT_REQUIRED}, | 114 | ACPI_FADT_OFFSET(pm_timer_length), |
115 | ACPI_PM_TIMER_WIDTH, | ||
116 | ACPI_FADT_REQUIRED}, | ||
97 | 117 | ||
98 | {"Gpe0Block", ACPI_FADT_OFFSET(xgpe0_block), | 118 | {"Gpe0Block", |
119 | ACPI_FADT_OFFSET(xgpe0_block), | ||
99 | ACPI_FADT_OFFSET(gpe0_block), | 120 | ACPI_FADT_OFFSET(gpe0_block), |
100 | ACPI_FADT_OFFSET(gpe0_block_length), ACPI_FADT_SEPARATE_LENGTH}, | 121 | ACPI_FADT_OFFSET(gpe0_block_length), |
122 | 0, | ||
123 | ACPI_FADT_SEPARATE_LENGTH}, | ||
101 | 124 | ||
102 | {"Gpe1Block", ACPI_FADT_OFFSET(xgpe1_block), | 125 | {"Gpe1Block", |
126 | ACPI_FADT_OFFSET(xgpe1_block), | ||
103 | ACPI_FADT_OFFSET(gpe1_block), | 127 | ACPI_FADT_OFFSET(gpe1_block), |
104 | ACPI_FADT_OFFSET(gpe1_block_length), ACPI_FADT_SEPARATE_LENGTH} | 128 | ACPI_FADT_OFFSET(gpe1_block_length), |
129 | 0, | ||
130 | ACPI_FADT_SEPARATE_LENGTH} | ||
105 | }; | 131 | }; |
106 | 132 | ||
107 | #define ACPI_FADT_INFO_ENTRIES (sizeof (fadt_info_table) / sizeof (struct acpi_fadt_info)) | 133 | #define ACPI_FADT_INFO_ENTRIES (sizeof (fadt_info_table) / sizeof (struct acpi_fadt_info)) |
@@ -122,9 +148,9 @@ static struct acpi_fadt_info fadt_info_table[] = { | |||
122 | * | 148 | * |
123 | ******************************************************************************/ | 149 | ******************************************************************************/ |
124 | 150 | ||
125 | static void inline | 151 | static inline void |
126 | acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, | 152 | acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, |
127 | u8 byte_width, u64 address) | 153 | u8 space_id, u8 byte_width, u64 address) |
128 | { | 154 | { |
129 | 155 | ||
130 | /* | 156 | /* |
@@ -135,10 +161,10 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, | |||
135 | 161 | ||
136 | /* All other fields are byte-wide */ | 162 | /* All other fields are byte-wide */ |
137 | 163 | ||
138 | generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO; | 164 | generic_address->space_id = space_id; |
139 | generic_address->bit_width = byte_width << 3; | 165 | generic_address->bit_width = (u8)ACPI_MUL_8(byte_width); |
140 | generic_address->bit_offset = 0; | 166 | generic_address->bit_offset = 0; |
141 | generic_address->access_width = 0; | 167 | generic_address->access_width = 0; /* Access width ANY */ |
142 | } | 168 | } |
143 | 169 | ||
144 | /******************************************************************************* | 170 | /******************************************************************************* |
@@ -225,7 +251,8 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length) | |||
225 | */ | 251 | */ |
226 | if (length > sizeof(struct acpi_table_fadt)) { | 252 | if (length > sizeof(struct acpi_table_fadt)) { |
227 | ACPI_WARNING((AE_INFO, | 253 | ACPI_WARNING((AE_INFO, |
228 | "FADT (revision %u) is longer than ACPI 2.0 version, truncating length 0x%X to 0x%zX", | 254 | "FADT (revision %u) is longer than ACPI 2.0 version, " |
255 | "truncating length 0x%X to 0x%zX", | ||
229 | table->revision, (unsigned)length, | 256 | table->revision, (unsigned)length, |
230 | sizeof(struct acpi_table_fadt))); | 257 | sizeof(struct acpi_table_fadt))); |
231 | } | 258 | } |
@@ -244,7 +271,6 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length) | |||
244 | * 2) Validate some of the important values within the FADT | 271 | * 2) Validate some of the important values within the FADT |
245 | */ | 272 | */ |
246 | acpi_tb_convert_fadt(); | 273 | acpi_tb_convert_fadt(); |
247 | acpi_tb_validate_fadt(); | ||
248 | } | 274 | } |
249 | 275 | ||
250 | /******************************************************************************* | 276 | /******************************************************************************* |
@@ -278,22 +304,36 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length) | |||
278 | 304 | ||
279 | static void acpi_tb_convert_fadt(void) | 305 | static void acpi_tb_convert_fadt(void) |
280 | { | 306 | { |
281 | u8 pm1_register_length; | 307 | u8 pm1_register_bit_width; |
282 | struct acpi_generic_address *target; | 308 | u8 pm1_register_byte_width; |
309 | struct acpi_generic_address *target64; | ||
283 | u32 i; | 310 | u32 i; |
284 | 311 | ||
285 | /* Update the local FADT table header length */ | 312 | /* Update the local FADT table header length */ |
286 | 313 | ||
287 | acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt); | 314 | acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt); |
288 | 315 | ||
289 | /* Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary */ | 316 | /* |
290 | 317 | * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary. | |
318 | * Later code will always use the X 64-bit field. Also, check for an | ||
319 | * address mismatch between the 32-bit and 64-bit address fields | ||
320 | * (FIRMWARE_CTRL/X_FIRMWARE_CTRL, DSDT/X_DSDT) which would indicate | ||
321 | * the presence of two FACS or two DSDT tables. | ||
322 | */ | ||
291 | if (!acpi_gbl_FADT.Xfacs) { | 323 | if (!acpi_gbl_FADT.Xfacs) { |
292 | acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs; | 324 | acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs; |
325 | } else if (acpi_gbl_FADT.facs && | ||
326 | (acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) { | ||
327 | ACPI_WARNING((AE_INFO, | ||
328 | "32/64 FACS address mismatch in FADT - two FACS tables!")); | ||
293 | } | 329 | } |
294 | 330 | ||
295 | if (!acpi_gbl_FADT.Xdsdt) { | 331 | if (!acpi_gbl_FADT.Xdsdt) { |
296 | acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt; | 332 | acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt; |
333 | } else if (acpi_gbl_FADT.dsdt && | ||
334 | (acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) { | ||
335 | ACPI_WARNING((AE_INFO, | ||
336 | "32/64 DSDT address mismatch in FADT - two DSDT tables!")); | ||
297 | } | 337 | } |
298 | 338 | ||
299 | /* | 339 | /* |
@@ -312,18 +352,23 @@ static void acpi_tb_convert_fadt(void) | |||
312 | } | 352 | } |
313 | 353 | ||
314 | /* | 354 | /* |
315 | * Expand the ACPI 1.0 32-bit V1.0 addresses to the ACPI 2.0 64-bit "X" | 355 | * Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X" |
316 | * generic address structures as necessary. | 356 | * generic address structures as necessary. Later code will always use |
357 | * the 64-bit address structures. | ||
317 | */ | 358 | */ |
318 | for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) { | 359 | for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) { |
319 | target = | 360 | target64 = |
320 | ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT, | 361 | ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT, |
321 | fadt_info_table[i].target); | 362 | fadt_info_table[i].address64); |
322 | 363 | ||
323 | /* Expand only if the X target is null */ | 364 | /* Expand only if the 64-bit X target is null */ |
324 | 365 | ||
325 | if (!target->address) { | 366 | if (!target64->address) { |
326 | acpi_tb_init_generic_address(target, | 367 | |
368 | /* The space_id is always I/O for the 32-bit legacy address fields */ | ||
369 | |||
370 | acpi_tb_init_generic_address(target64, | ||
371 | ACPI_ADR_SPACE_SYSTEM_IO, | ||
327 | *ACPI_ADD_PTR(u8, | 372 | *ACPI_ADD_PTR(u8, |
328 | &acpi_gbl_FADT, | 373 | &acpi_gbl_FADT, |
329 | fadt_info_table | 374 | fadt_info_table |
@@ -332,11 +377,64 @@ static void acpi_tb_convert_fadt(void) | |||
332 | &acpi_gbl_FADT, | 377 | &acpi_gbl_FADT, |
333 | fadt_info_table | 378 | fadt_info_table |
334 | [i]. | 379 | [i]. |
335 | source)); | 380 | address32)); |
381 | } | ||
382 | } | ||
383 | |||
384 | /* Validate FADT values now, before we make any changes */ | ||
385 | |||
386 | acpi_tb_validate_fadt(); | ||
387 | |||
388 | /* | ||
389 | * Optionally check all register lengths against the default values and | ||
390 | * update them if they are incorrect. | ||
391 | */ | ||
392 | if (acpi_gbl_use_default_register_widths) { | ||
393 | for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) { | ||
394 | target64 = | ||
395 | ACPI_ADD_PTR(struct acpi_generic_address, | ||
396 | &acpi_gbl_FADT, | ||
397 | fadt_info_table[i].address64); | ||
398 | |||
399 | /* | ||
400 | * If a valid register (Address != 0) and the (default_length > 0) | ||
401 | * (Not a GPE register), then check the width against the default. | ||
402 | */ | ||
403 | if ((target64->address) && | ||
404 | (fadt_info_table[i].default_length > 0) && | ||
405 | (fadt_info_table[i].default_length != | ||
406 | target64->bit_width)) { | ||
407 | ACPI_WARNING((AE_INFO, | ||
408 | "Invalid length for %s: %d, using default %d", | ||
409 | fadt_info_table[i].name, | ||
410 | target64->bit_width, | ||
411 | fadt_info_table[i]. | ||
412 | default_length)); | ||
413 | |||
414 | /* Incorrect size, set width to the default */ | ||
415 | |||
416 | target64->bit_width = | ||
417 | fadt_info_table[i].default_length; | ||
418 | } | ||
336 | } | 419 | } |
337 | } | 420 | } |
338 | 421 | ||
339 | /* | 422 | /* |
423 | * Get the length of the individual PM1 registers (enable and status). | ||
424 | * Each register is defined to be (event block length / 2). | ||
425 | */ | ||
426 | pm1_register_bit_width = | ||
427 | (u8)ACPI_DIV_2(acpi_gbl_FADT.xpm1a_event_block.bit_width); | ||
428 | pm1_register_byte_width = (u8)ACPI_DIV_8(pm1_register_bit_width); | ||
429 | |||
430 | /* | ||
431 | * Adjust the lengths of the PM1 Event Blocks so that they can be used to | ||
432 | * access the PM1 status register(s). Use (width / 2) | ||
433 | */ | ||
434 | acpi_gbl_FADT.xpm1a_event_block.bit_width = pm1_register_bit_width; | ||
435 | acpi_gbl_FADT.xpm1b_event_block.bit_width = pm1_register_bit_width; | ||
436 | |||
437 | /* | ||
340 | * Calculate separate GAS structs for the PM1 Enable registers. | 438 | * Calculate separate GAS structs for the PM1 Enable registers. |
341 | * These addresses do not appear (directly) in the FADT, so it is | 439 | * These addresses do not appear (directly) in the FADT, so it is |
342 | * useful to calculate them once, here. | 440 | * useful to calculate them once, here. |
@@ -356,14 +454,14 @@ static void acpi_tb_convert_fadt(void) | |||
356 | " PM1_EVT_LEN (%u)\n", | 454 | " PM1_EVT_LEN (%u)\n", |
357 | acpi_gbl_FADT.xpm1a_event_block.bit_width, | 455 | acpi_gbl_FADT.xpm1a_event_block.bit_width, |
358 | acpi_gbl_FADT.pm1_event_length); | 456 | acpi_gbl_FADT.pm1_event_length); |
359 | pm1_register_length = (u8) ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length); | ||
360 | 457 | ||
361 | /* The PM1A register block is required */ | 458 | /* The PM1A register block is required */ |
362 | 459 | ||
363 | acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable, | 460 | acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable, |
364 | pm1_register_length, | 461 | acpi_gbl_FADT.xpm1a_event_block.space_id, |
462 | pm1_register_byte_width, | ||
365 | (acpi_gbl_FADT.xpm1a_event_block.address + | 463 | (acpi_gbl_FADT.xpm1a_event_block.address + |
366 | pm1_register_length)); | 464 | pm1_register_byte_width)); |
367 | /* Don't forget to copy space_id of the GAS */ | 465 | /* Don't forget to copy space_id of the GAS */ |
368 | acpi_gbl_xpm1a_enable.space_id = | 466 | acpi_gbl_xpm1a_enable.space_id = |
369 | acpi_gbl_FADT.xpm1a_event_block.space_id; | 467 | acpi_gbl_FADT.xpm1a_event_block.space_id; |
@@ -379,9 +477,10 @@ static void acpi_tb_convert_fadt(void) | |||
379 | acpi_gbl_FADT.xpm1b_event_block.bit_width, | 477 | acpi_gbl_FADT.xpm1b_event_block.bit_width, |
380 | acpi_gbl_FADT.pm1_event_length); | 478 | acpi_gbl_FADT.pm1_event_length); |
381 | acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable, | 479 | acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable, |
382 | pm1_register_length, | 480 | acpi_gbl_FADT.xpm1b_event_block.space_id, |
481 | pm1_register_byte_width, | ||
383 | (acpi_gbl_FADT.xpm1b_event_block. | 482 | (acpi_gbl_FADT.xpm1b_event_block. |
384 | address + pm1_register_length)); | 483 | address + pm1_register_byte_width)); |
385 | /* Don't forget to copy space_id of the GAS */ | 484 | /* Don't forget to copy space_id of the GAS */ |
386 | acpi_gbl_xpm1b_enable.space_id = | 485 | acpi_gbl_xpm1b_enable.space_id = |
387 | acpi_gbl_FADT.xpm1b_event_block.space_id; | 486 | acpi_gbl_FADT.xpm1b_event_block.space_id; |
@@ -411,26 +510,63 @@ static void acpi_tb_convert_fadt(void) | |||
411 | 510 | ||
412 | static void acpi_tb_validate_fadt(void) | 511 | static void acpi_tb_validate_fadt(void) |
413 | { | 512 | { |
513 | char *name; | ||
414 | u32 *address32; | 514 | u32 *address32; |
415 | struct acpi_generic_address *address64; | 515 | struct acpi_generic_address *address64; |
416 | u8 length; | 516 | u8 length; |
417 | u32 i; | 517 | u32 i; |
418 | 518 | ||
419 | /* Examine all of the 64-bit extended address fields (X fields) */ | 519 | /* |
520 | * Check for FACS and DSDT address mismatches. An address mismatch between | ||
521 | * the 32-bit and 64-bit address fields (FIRMWARE_CTRL/X_FIRMWARE_CTRL and | ||
522 | * DSDT/X_DSDT) would indicate the presence of two FACS or two DSDT tables. | ||
523 | */ | ||
524 | if (acpi_gbl_FADT.facs && | ||
525 | (acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) { | ||
526 | ACPI_WARNING((AE_INFO, | ||
527 | "32/64X FACS address mismatch in FADT - " | ||
528 | "two FACS tables! %8.8X/%8.8X%8.8X", | ||
529 | acpi_gbl_FADT.facs, | ||
530 | ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs))); | ||
531 | } | ||
420 | 532 | ||
421 | for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) { | 533 | if (acpi_gbl_FADT.dsdt && |
534 | (acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) { | ||
535 | ACPI_WARNING((AE_INFO, | ||
536 | "32/64X DSDT address mismatch in FADT - " | ||
537 | "two DSDT tables! %8.8X/%8.8X%8.8X", | ||
538 | acpi_gbl_FADT.dsdt, | ||
539 | ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt))); | ||
540 | } | ||
422 | 541 | ||
423 | /* Generate pointers to the 32-bit and 64-bit addresses and get the length */ | 542 | /* Examine all of the 64-bit extended address fields (X fields) */ |
424 | 543 | ||
425 | address64 = | 544 | for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) { |
426 | ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT, | 545 | /* |
427 | fadt_info_table[i].target); | 546 | * Generate pointers to the 32-bit and 64-bit addresses, get the |
547 | * register length (width), and the register name | ||
548 | */ | ||
549 | address64 = ACPI_ADD_PTR(struct acpi_generic_address, | ||
550 | &acpi_gbl_FADT, | ||
551 | fadt_info_table[i].address64); | ||
428 | address32 = | 552 | address32 = |
429 | ACPI_ADD_PTR(u32, &acpi_gbl_FADT, | 553 | ACPI_ADD_PTR(u32, &acpi_gbl_FADT, |
430 | fadt_info_table[i].source); | 554 | fadt_info_table[i].address32); |
431 | length = | 555 | length = |
432 | *ACPI_ADD_PTR(u8, &acpi_gbl_FADT, | 556 | *ACPI_ADD_PTR(u8, &acpi_gbl_FADT, |
433 | fadt_info_table[i].length); | 557 | fadt_info_table[i].length); |
558 | name = fadt_info_table[i].name; | ||
559 | |||
560 | /* | ||
561 | * For each extended field, check for length mismatch between the | ||
562 | * legacy length field and the corresponding 64-bit X length field. | ||
563 | */ | ||
564 | if (address64 && (address64->bit_width != ACPI_MUL_8(length))) { | ||
565 | ACPI_WARNING((AE_INFO, | ||
566 | "32/64X length mismatch in %s: %d/%d", | ||
567 | name, ACPI_MUL_8(length), | ||
568 | address64->bit_width)); | ||
569 | } | ||
434 | 570 | ||
435 | if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) { | 571 | if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) { |
436 | /* | 572 | /* |
@@ -439,8 +575,8 @@ static void acpi_tb_validate_fadt(void) | |||
439 | */ | 575 | */ |
440 | if (!address64->address || !length) { | 576 | if (!address64->address || !length) { |
441 | ACPI_ERROR((AE_INFO, | 577 | ACPI_ERROR((AE_INFO, |
442 | "Required field \"%s\" has zero address and/or length: %8.8X%8.8X/%X", | 578 | "Required field %s has zero address and/or length: %8.8X%8.8X/%X", |
443 | fadt_info_table[i].name, | 579 | name, |
444 | ACPI_FORMAT_UINT64(address64-> | 580 | ACPI_FORMAT_UINT64(address64-> |
445 | address), | 581 | address), |
446 | length)); | 582 | length)); |
@@ -453,8 +589,8 @@ static void acpi_tb_validate_fadt(void) | |||
453 | if ((address64->address && !length) | 589 | if ((address64->address && !length) |
454 | || (!address64->address && length)) { | 590 | || (!address64->address && length)) { |
455 | ACPI_WARNING((AE_INFO, | 591 | ACPI_WARNING((AE_INFO, |
456 | "Optional field \"%s\" has zero address or length: %8.8X%8.8X/%X", | 592 | "Optional field %s has zero address or length: %8.8X%8.8X/%X", |
457 | fadt_info_table[i].name, | 593 | name, |
458 | ACPI_FORMAT_UINT64(address64-> | 594 | ACPI_FORMAT_UINT64(address64-> |
459 | address), | 595 | address), |
460 | length)); | 596 | length)); |
@@ -466,8 +602,8 @@ static void acpi_tb_validate_fadt(void) | |||
466 | if (address64->address && *address32 && | 602 | if (address64->address && *address32 && |
467 | (address64->address != (u64) * address32)) { | 603 | (address64->address != (u64) * address32)) { |
468 | ACPI_ERROR((AE_INFO, | 604 | ACPI_ERROR((AE_INFO, |
469 | "32/64X address mismatch in \"%s\": [%8.8X] [%8.8X%8.8X], using 64X", | 605 | "32/64X address mismatch in %s: %8.8X/%8.8X%8.8X, using 64X", |
470 | fadt_info_table[i].name, *address32, | 606 | name, *address32, |
471 | ACPI_FORMAT_UINT64(address64->address))); | 607 | ACPI_FORMAT_UINT64(address64->address))); |
472 | } | 608 | } |
473 | } | 609 | } |
diff --git a/drivers/acpi/tables/tbfind.c b/drivers/acpi/acpica/tbfind.c index 531584defbb8..1054dfd49207 100644 --- a/drivers/acpi/tables/tbfind.c +++ b/drivers/acpi/acpica/tbfind.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/actables.h> | 45 | #include "accommon.h" |
46 | #include "actables.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_TABLES | 48 | #define _COMPONENT ACPI_TABLES |
48 | ACPI_MODULE_NAME("tbfind") | 49 | ACPI_MODULE_NAME("tbfind") |
diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 18747ce8dd2f..37374b21969d 100644 --- a/drivers/acpi/tables/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c | |||
@@ -42,8 +42,9 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acnamesp.h> | 45 | #include "accommon.h" |
46 | #include <acpi/actables.h> | 46 | #include "acnamesp.h" |
47 | #include "actables.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_TABLES | 49 | #define _COMPONENT ACPI_TABLES |
49 | ACPI_MODULE_NAME("tbinstal") | 50 | ACPI_MODULE_NAME("tbinstal") |
diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/acpica/tbutils.c index 0cc92ef5236f..9684cc827930 100644 --- a/drivers/acpi/tables/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/actables.h> | 45 | #include "accommon.h" |
46 | #include "actables.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_TABLES | 48 | #define _COMPONENT ACPI_TABLES |
48 | ACPI_MODULE_NAME("tbutils") | 49 | ACPI_MODULE_NAME("tbutils") |
@@ -113,6 +114,30 @@ acpi_tb_check_xsdt(acpi_physical_address address) | |||
113 | 114 | ||
114 | /******************************************************************************* | 115 | /******************************************************************************* |
115 | * | 116 | * |
117 | * FUNCTION: acpi_tb_initialize_facs | ||
118 | * | ||
119 | * PARAMETERS: None | ||
120 | * | ||
121 | * RETURN: Status | ||
122 | * | ||
123 | * DESCRIPTION: Create a permanent mapping for the FADT and save it in a global | ||
124 | * for accessing the Global Lock and Firmware Waking Vector | ||
125 | * | ||
126 | ******************************************************************************/ | ||
127 | |||
128 | acpi_status acpi_tb_initialize_facs(void) | ||
129 | { | ||
130 | acpi_status status; | ||
131 | |||
132 | status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, | ||
133 | ACPI_CAST_INDIRECT_PTR(struct | ||
134 | acpi_table_header, | ||
135 | &acpi_gbl_FACS)); | ||
136 | return status; | ||
137 | } | ||
138 | |||
139 | /******************************************************************************* | ||
140 | * | ||
116 | * FUNCTION: acpi_tb_tables_loaded | 141 | * FUNCTION: acpi_tb_tables_loaded |
117 | * | 142 | * |
118 | * PARAMETERS: None | 143 | * PARAMETERS: None |
@@ -420,7 +445,8 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags) | |||
420 | 445 | ||
421 | /* Differentiate between RSDT and XSDT root tables */ | 446 | /* Differentiate between RSDT and XSDT root tables */ |
422 | 447 | ||
423 | if (rsdp->revision > 1 && rsdp->xsdt_physical_address) { | 448 | if (rsdp->revision > 1 && rsdp->xsdt_physical_address |
449 | && !acpi_rsdt_forced) { | ||
424 | /* | 450 | /* |
425 | * Root table is an XSDT (64-bit physical addresses). We must use the | 451 | * Root table is an XSDT (64-bit physical addresses). We must use the |
426 | * XSDT if the revision is > 1 and the XSDT pointer is present, as per | 452 | * XSDT if the revision is > 1 and the XSDT pointer is present, as per |
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/acpica/tbxface.c index fd7770aa1061..c3e841f3cde9 100644 --- a/drivers/acpi/tables/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c | |||
@@ -43,8 +43,9 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <acpi/acpi.h> | 45 | #include <acpi/acpi.h> |
46 | #include <acpi/acnamesp.h> | 46 | #include "accommon.h" |
47 | #include <acpi/actables.h> | 47 | #include "acnamesp.h" |
48 | #include "actables.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_TABLES | 50 | #define _COMPONENT ACPI_TABLES |
50 | ACPI_MODULE_NAME("tbxface") | 51 | ACPI_MODULE_NAME("tbxface") |
diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index 2d157e0f98d2..b7fc8dd43341 100644 --- a/drivers/acpi/tables/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/actables.h> | 45 | #include "accommon.h" |
46 | #include "actables.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_TABLES | 48 | #define _COMPONENT ACPI_TABLES |
48 | ACPI_MODULE_NAME("tbxfroot") | 49 | ACPI_MODULE_NAME("tbxfroot") |
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/acpica/utalloc.c index 241c535c1753..7580f6b3069e 100644 --- a/drivers/acpi/utilities/utalloc.c +++ b/drivers/acpi/acpica/utalloc.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acdebug.h> | 45 | #include "accommon.h" |
46 | #include "acdebug.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_UTILITIES | 48 | #define _COMPONENT ACPI_UTILITIES |
48 | ACPI_MODULE_NAME("utalloc") | 49 | ACPI_MODULE_NAME("utalloc") |
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/acpica/utcopy.c index 5b2f7c27b705..b0dcfd3c872a 100644 --- a/drivers/acpi/utilities/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acnamesp.h> | 45 | #include "accommon.h" |
46 | #include "acnamesp.h" | ||
46 | 47 | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_UTILITIES | 49 | #define _COMPONENT ACPI_UTILITIES |
diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/acpica/utdebug.c index fd66ecb6741e..38821f53042c 100644 --- a/drivers/acpi/utilities/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c | |||
@@ -42,6 +42,7 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include "accommon.h" | ||
45 | 46 | ||
46 | #define _COMPONENT ACPI_UTILITIES | 47 | #define _COMPONENT ACPI_UTILITIES |
47 | ACPI_MODULE_NAME("utdebug") | 48 | ACPI_MODULE_NAME("utdebug") |
@@ -136,7 +137,7 @@ static const char *acpi_ut_trim_function_name(const char *function_name) | |||
136 | 137 | ||
137 | /******************************************************************************* | 138 | /******************************************************************************* |
138 | * | 139 | * |
139 | * FUNCTION: acpi_ut_debug_print | 140 | * FUNCTION: acpi_debug_print |
140 | * | 141 | * |
141 | * PARAMETERS: requested_debug_level - Requested debug print level | 142 | * PARAMETERS: requested_debug_level - Requested debug print level |
142 | * line_number - Caller's line number (for error output) | 143 | * line_number - Caller's line number (for error output) |
@@ -154,11 +155,11 @@ static const char *acpi_ut_trim_function_name(const char *function_name) | |||
154 | ******************************************************************************/ | 155 | ******************************************************************************/ |
155 | 156 | ||
156 | void ACPI_INTERNAL_VAR_XFACE | 157 | void ACPI_INTERNAL_VAR_XFACE |
157 | acpi_ut_debug_print(u32 requested_debug_level, | 158 | acpi_debug_print(u32 requested_debug_level, |
158 | u32 line_number, | 159 | u32 line_number, |
159 | const char *function_name, | 160 | const char *function_name, |
160 | const char *module_name, | 161 | const char *module_name, |
161 | u32 component_id, const char *format, ...) | 162 | u32 component_id, const char *format, ...) |
162 | { | 163 | { |
163 | acpi_thread_id thread_id; | 164 | acpi_thread_id thread_id; |
164 | va_list args; | 165 | va_list args; |
@@ -205,11 +206,11 @@ acpi_ut_debug_print(u32 requested_debug_level, | |||
205 | va_end(args); | 206 | va_end(args); |
206 | } | 207 | } |
207 | 208 | ||
208 | ACPI_EXPORT_SYMBOL(acpi_ut_debug_print) | 209 | ACPI_EXPORT_SYMBOL(acpi_debug_print) |
209 | 210 | ||
210 | /******************************************************************************* | 211 | /******************************************************************************* |
211 | * | 212 | * |
212 | * FUNCTION: acpi_ut_debug_print_raw | 213 | * FUNCTION: acpi_debug_print_raw |
213 | * | 214 | * |
214 | * PARAMETERS: requested_debug_level - Requested debug print level | 215 | * PARAMETERS: requested_debug_level - Requested debug print level |
215 | * line_number - Caller's line number | 216 | * line_number - Caller's line number |
@@ -226,11 +227,11 @@ ACPI_EXPORT_SYMBOL(acpi_ut_debug_print) | |||
226 | * | 227 | * |
227 | ******************************************************************************/ | 228 | ******************************************************************************/ |
228 | void ACPI_INTERNAL_VAR_XFACE | 229 | void ACPI_INTERNAL_VAR_XFACE |
229 | acpi_ut_debug_print_raw(u32 requested_debug_level, | 230 | acpi_debug_print_raw(u32 requested_debug_level, |
230 | u32 line_number, | 231 | u32 line_number, |
231 | const char *function_name, | 232 | const char *function_name, |
232 | const char *module_name, | 233 | const char *module_name, |
233 | u32 component_id, const char *format, ...) | 234 | u32 component_id, const char *format, ...) |
234 | { | 235 | { |
235 | va_list args; | 236 | va_list args; |
236 | 237 | ||
@@ -244,7 +245,7 @@ acpi_ut_debug_print_raw(u32 requested_debug_level, | |||
244 | va_end(args); | 245 | va_end(args); |
245 | } | 246 | } |
246 | 247 | ||
247 | ACPI_EXPORT_SYMBOL(acpi_ut_debug_print_raw) | 248 | ACPI_EXPORT_SYMBOL(acpi_debug_print_raw) |
248 | 249 | ||
249 | /******************************************************************************* | 250 | /******************************************************************************* |
250 | * | 251 | * |
@@ -270,9 +271,9 @@ acpi_ut_trace(u32 line_number, | |||
270 | acpi_gbl_nesting_level++; | 271 | acpi_gbl_nesting_level++; |
271 | acpi_ut_track_stack_ptr(); | 272 | acpi_ut_track_stack_ptr(); |
272 | 273 | ||
273 | acpi_ut_debug_print(ACPI_LV_FUNCTIONS, | 274 | acpi_debug_print(ACPI_LV_FUNCTIONS, |
274 | line_number, function_name, module_name, | 275 | line_number, function_name, module_name, component_id, |
275 | component_id, "%s\n", acpi_gbl_fn_entry_str); | 276 | "%s\n", acpi_gbl_fn_entry_str); |
276 | } | 277 | } |
277 | 278 | ||
278 | ACPI_EXPORT_SYMBOL(acpi_ut_trace) | 279 | ACPI_EXPORT_SYMBOL(acpi_ut_trace) |
@@ -301,10 +302,9 @@ acpi_ut_trace_ptr(u32 line_number, | |||
301 | acpi_gbl_nesting_level++; | 302 | acpi_gbl_nesting_level++; |
302 | acpi_ut_track_stack_ptr(); | 303 | acpi_ut_track_stack_ptr(); |
303 | 304 | ||
304 | acpi_ut_debug_print(ACPI_LV_FUNCTIONS, | 305 | acpi_debug_print(ACPI_LV_FUNCTIONS, |
305 | line_number, function_name, module_name, | 306 | line_number, function_name, module_name, component_id, |
306 | component_id, "%s %p\n", acpi_gbl_fn_entry_str, | 307 | "%s %p\n", acpi_gbl_fn_entry_str, pointer); |
307 | pointer); | ||
308 | } | 308 | } |
309 | 309 | ||
310 | /******************************************************************************* | 310 | /******************************************************************************* |
@@ -333,10 +333,9 @@ acpi_ut_trace_str(u32 line_number, | |||
333 | acpi_gbl_nesting_level++; | 333 | acpi_gbl_nesting_level++; |
334 | acpi_ut_track_stack_ptr(); | 334 | acpi_ut_track_stack_ptr(); |
335 | 335 | ||
336 | acpi_ut_debug_print(ACPI_LV_FUNCTIONS, | 336 | acpi_debug_print(ACPI_LV_FUNCTIONS, |
337 | line_number, function_name, module_name, | 337 | line_number, function_name, module_name, component_id, |
338 | component_id, "%s %s\n", acpi_gbl_fn_entry_str, | 338 | "%s %s\n", acpi_gbl_fn_entry_str, string); |
339 | string); | ||
340 | } | 339 | } |
341 | 340 | ||
342 | /******************************************************************************* | 341 | /******************************************************************************* |
@@ -365,10 +364,9 @@ acpi_ut_trace_u32(u32 line_number, | |||
365 | acpi_gbl_nesting_level++; | 364 | acpi_gbl_nesting_level++; |
366 | acpi_ut_track_stack_ptr(); | 365 | acpi_ut_track_stack_ptr(); |
367 | 366 | ||
368 | acpi_ut_debug_print(ACPI_LV_FUNCTIONS, | 367 | acpi_debug_print(ACPI_LV_FUNCTIONS, |
369 | line_number, function_name, module_name, | 368 | line_number, function_name, module_name, component_id, |
370 | component_id, "%s %08X\n", acpi_gbl_fn_entry_str, | 369 | "%s %08X\n", acpi_gbl_fn_entry_str, integer); |
371 | integer); | ||
372 | } | 370 | } |
373 | 371 | ||
374 | /******************************************************************************* | 372 | /******************************************************************************* |
@@ -393,9 +391,9 @@ acpi_ut_exit(u32 line_number, | |||
393 | const char *module_name, u32 component_id) | 391 | const char *module_name, u32 component_id) |
394 | { | 392 | { |
395 | 393 | ||
396 | acpi_ut_debug_print(ACPI_LV_FUNCTIONS, | 394 | acpi_debug_print(ACPI_LV_FUNCTIONS, |
397 | line_number, function_name, module_name, | 395 | line_number, function_name, module_name, component_id, |
398 | component_id, "%s\n", acpi_gbl_fn_exit_str); | 396 | "%s\n", acpi_gbl_fn_exit_str); |
399 | 397 | ||
400 | acpi_gbl_nesting_level--; | 398 | acpi_gbl_nesting_level--; |
401 | } | 399 | } |
@@ -426,17 +424,16 @@ acpi_ut_status_exit(u32 line_number, | |||
426 | { | 424 | { |
427 | 425 | ||
428 | if (ACPI_SUCCESS(status)) { | 426 | if (ACPI_SUCCESS(status)) { |
429 | acpi_ut_debug_print(ACPI_LV_FUNCTIONS, | 427 | acpi_debug_print(ACPI_LV_FUNCTIONS, |
430 | line_number, function_name, module_name, | 428 | line_number, function_name, module_name, |
431 | component_id, "%s %s\n", | 429 | component_id, "%s %s\n", acpi_gbl_fn_exit_str, |
432 | acpi_gbl_fn_exit_str, | 430 | acpi_format_exception(status)); |
433 | acpi_format_exception(status)); | ||
434 | } else { | 431 | } else { |
435 | acpi_ut_debug_print(ACPI_LV_FUNCTIONS, | 432 | acpi_debug_print(ACPI_LV_FUNCTIONS, |
436 | line_number, function_name, module_name, | 433 | line_number, function_name, module_name, |
437 | component_id, "%s ****Exception****: %s\n", | 434 | component_id, "%s ****Exception****: %s\n", |
438 | acpi_gbl_fn_exit_str, | 435 | acpi_gbl_fn_exit_str, |
439 | acpi_format_exception(status)); | 436 | acpi_format_exception(status)); |
440 | } | 437 | } |
441 | 438 | ||
442 | acpi_gbl_nesting_level--; | 439 | acpi_gbl_nesting_level--; |
@@ -467,10 +464,10 @@ acpi_ut_value_exit(u32 line_number, | |||
467 | u32 component_id, acpi_integer value) | 464 | u32 component_id, acpi_integer value) |
468 | { | 465 | { |
469 | 466 | ||
470 | acpi_ut_debug_print(ACPI_LV_FUNCTIONS, | 467 | acpi_debug_print(ACPI_LV_FUNCTIONS, |
471 | line_number, function_name, module_name, | 468 | line_number, function_name, module_name, component_id, |
472 | component_id, "%s %8.8X%8.8X\n", | 469 | "%s %8.8X%8.8X\n", acpi_gbl_fn_exit_str, |
473 | acpi_gbl_fn_exit_str, ACPI_FORMAT_UINT64(value)); | 470 | ACPI_FORMAT_UINT64(value)); |
474 | 471 | ||
475 | acpi_gbl_nesting_level--; | 472 | acpi_gbl_nesting_level--; |
476 | } | 473 | } |
@@ -499,9 +496,9 @@ acpi_ut_ptr_exit(u32 line_number, | |||
499 | const char *module_name, u32 component_id, u8 *ptr) | 496 | const char *module_name, u32 component_id, u8 *ptr) |
500 | { | 497 | { |
501 | 498 | ||
502 | acpi_ut_debug_print(ACPI_LV_FUNCTIONS, | 499 | acpi_debug_print(ACPI_LV_FUNCTIONS, |
503 | line_number, function_name, module_name, | 500 | line_number, function_name, module_name, component_id, |
504 | component_id, "%s %p\n", acpi_gbl_fn_exit_str, ptr); | 501 | "%s %p\n", acpi_gbl_fn_exit_str, ptr); |
505 | 502 | ||
506 | acpi_gbl_nesting_level--; | 503 | acpi_gbl_nesting_level--; |
507 | } | 504 | } |
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/acpica/utdelete.c index d197c6b29e17..a0be9e39531e 100644 --- a/drivers/acpi/utilities/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c | |||
@@ -42,9 +42,10 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acinterp.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "acinterp.h" |
47 | #include <acpi/acevents.h> | 47 | #include "acnamesp.h" |
48 | #include "acevents.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_UTILITIES | 50 | #define _COMPONENT ACPI_UTILITIES |
50 | ACPI_MODULE_NAME("utdelete") | 51 | ACPI_MODULE_NAME("utdelete") |
diff --git a/drivers/acpi/utilities/uteval.c b/drivers/acpi/acpica/uteval.c index 352747e49c7a..da9450bc60f7 100644 --- a/drivers/acpi/utilities/uteval.c +++ b/drivers/acpi/acpica/uteval.c | |||
@@ -42,8 +42,9 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acnamesp.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acinterp.h> | 46 | #include "acnamesp.h" |
47 | #include "acinterp.h" | ||
47 | 48 | ||
48 | #define _COMPONENT ACPI_UTILITIES | 49 | #define _COMPONENT ACPI_UTILITIES |
49 | ACPI_MODULE_NAME("uteval") | 50 | ACPI_MODULE_NAME("uteval") |
@@ -129,7 +130,7 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state) | |||
129 | 130 | ||
130 | /* The interface is supported */ | 131 | /* The interface is supported */ |
131 | 132 | ||
132 | return_ACPI_STATUS(AE_CTRL_TERMINATE); | 133 | return_ACPI_STATUS(AE_OK); |
133 | } | 134 | } |
134 | } | 135 | } |
135 | 136 | ||
@@ -143,13 +144,13 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state) | |||
143 | 144 | ||
144 | /* The interface is supported */ | 145 | /* The interface is supported */ |
145 | 146 | ||
146 | return_ACPI_STATUS(AE_CTRL_TERMINATE); | 147 | return_ACPI_STATUS(AE_OK); |
147 | } | 148 | } |
148 | 149 | ||
149 | /* The interface is not supported */ | 150 | /* The interface is not supported */ |
150 | 151 | ||
151 | return_desc->integer.value = 0; | 152 | return_desc->integer.value = 0; |
152 | return_ACPI_STATUS(AE_CTRL_TERMINATE); | 153 | return_ACPI_STATUS(AE_OK); |
153 | } | 154 | } |
154 | 155 | ||
155 | /******************************************************************************* | 156 | /******************************************************************************* |
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/acpica/utglobal.c index 17ed5ac840f7..a3ab9d9da299 100644 --- a/drivers/acpi/utilities/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c | |||
@@ -44,11 +44,11 @@ | |||
44 | #define DEFINE_ACPI_GLOBALS | 44 | #define DEFINE_ACPI_GLOBALS |
45 | 45 | ||
46 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
47 | #include <acpi/acnamesp.h> | 47 | #include "accommon.h" |
48 | #include "acnamesp.h" | ||
48 | 49 | ||
49 | ACPI_EXPORT_SYMBOL(acpi_gbl_FADT) | ||
50 | #define _COMPONENT ACPI_UTILITIES | 50 | #define _COMPONENT ACPI_UTILITIES |
51 | ACPI_MODULE_NAME("utglobal") | 51 | ACPI_MODULE_NAME("utglobal") |
52 | 52 | ||
53 | /******************************************************************************* | 53 | /******************************************************************************* |
54 | * | 54 | * |
@@ -352,7 +352,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = { | |||
352 | "PCI_Config", | 352 | "PCI_Config", |
353 | "EmbeddedControl", | 353 | "EmbeddedControl", |
354 | "SMBus", | 354 | "SMBus", |
355 | "CMOS", | 355 | "SystemCMOS", |
356 | "PCIBARTarget", | 356 | "PCIBARTarget", |
357 | "DataTable" | 357 | "DataTable" |
358 | }; | 358 | }; |
@@ -756,6 +756,7 @@ acpi_status acpi_ut_init_globals(void) | |||
756 | acpi_gbl_gpe_xrupt_list_head = NULL; | 756 | acpi_gbl_gpe_xrupt_list_head = NULL; |
757 | acpi_gbl_gpe_fadt_blocks[0] = NULL; | 757 | acpi_gbl_gpe_fadt_blocks[0] = NULL; |
758 | acpi_gbl_gpe_fadt_blocks[1] = NULL; | 758 | acpi_gbl_gpe_fadt_blocks[1] = NULL; |
759 | acpi_current_gpe_count = 0; | ||
759 | 760 | ||
760 | /* Global handlers */ | 761 | /* Global handlers */ |
761 | 762 | ||
@@ -771,6 +772,7 @@ acpi_status acpi_ut_init_globals(void) | |||
771 | acpi_gbl_global_lock_mutex = NULL; | 772 | acpi_gbl_global_lock_mutex = NULL; |
772 | acpi_gbl_global_lock_acquired = FALSE; | 773 | acpi_gbl_global_lock_acquired = FALSE; |
773 | acpi_gbl_global_lock_handle = 0; | 774 | acpi_gbl_global_lock_handle = 0; |
775 | acpi_gbl_global_lock_present = FALSE; | ||
774 | 776 | ||
775 | /* Miscellaneous variables */ | 777 | /* Miscellaneous variables */ |
776 | 778 | ||
@@ -815,5 +817,7 @@ acpi_status acpi_ut_init_globals(void) | |||
815 | return_ACPI_STATUS(AE_OK); | 817 | return_ACPI_STATUS(AE_OK); |
816 | } | 818 | } |
817 | 819 | ||
820 | ACPI_EXPORT_SYMBOL(acpi_gbl_FADT) | ||
818 | ACPI_EXPORT_SYMBOL(acpi_dbg_level) | 821 | ACPI_EXPORT_SYMBOL(acpi_dbg_level) |
819 | ACPI_EXPORT_SYMBOL(acpi_dbg_layer) | 822 | ACPI_EXPORT_SYMBOL(acpi_dbg_layer) |
823 | ACPI_EXPORT_SYMBOL(acpi_current_gpe_count) | ||
diff --git a/drivers/acpi/utilities/utinit.c b/drivers/acpi/acpica/utinit.c index cae515fc02d3..a54ca84eb362 100644 --- a/drivers/acpi/utilities/utinit.c +++ b/drivers/acpi/acpica/utinit.c | |||
@@ -42,9 +42,10 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acnamesp.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acevents.h> | 46 | #include "acnamesp.h" |
47 | #include <acpi/actables.h> | 47 | #include "acevents.h" |
48 | #include "actables.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_UTILITIES | 50 | #define _COMPONENT ACPI_UTILITIES |
50 | ACPI_MODULE_NAME("utinit") | 51 | ACPI_MODULE_NAME("utinit") |
diff --git a/drivers/acpi/utilities/utmath.c b/drivers/acpi/acpica/utmath.c index c927324fdd26..c9f682d640ef 100644 --- a/drivers/acpi/utilities/utmath.c +++ b/drivers/acpi/acpica/utmath.c | |||
@@ -42,6 +42,7 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include "accommon.h" | ||
45 | 46 | ||
46 | #define _COMPONENT ACPI_UTILITIES | 47 | #define _COMPONENT ACPI_UTILITIES |
47 | ACPI_MODULE_NAME("utmath") | 48 | ACPI_MODULE_NAME("utmath") |
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/acpica/utmisc.c index 9089a158a874..c1f7f4e1a72d 100644 --- a/drivers/acpi/utilities/utmisc.c +++ b/drivers/acpi/acpica/utmisc.c | |||
@@ -44,7 +44,8 @@ | |||
44 | #include <linux/module.h> | 44 | #include <linux/module.h> |
45 | 45 | ||
46 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
47 | #include <acpi/acnamesp.h> | 47 | #include "accommon.h" |
48 | #include "acnamesp.h" | ||
48 | 49 | ||
49 | #define _COMPONENT ACPI_UTILITIES | 50 | #define _COMPONENT ACPI_UTILITIES |
50 | ACPI_MODULE_NAME("utmisc") | 51 | ACPI_MODULE_NAME("utmisc") |
@@ -1016,7 +1017,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object, | |||
1016 | 1017 | ||
1017 | /******************************************************************************* | 1018 | /******************************************************************************* |
1018 | * | 1019 | * |
1019 | * FUNCTION: acpi_ut_error, acpi_ut_warning, acpi_ut_info | 1020 | * FUNCTION: acpi_error, acpi_exception, acpi_warning, acpi_info |
1020 | * | 1021 | * |
1021 | * PARAMETERS: module_name - Caller's module name (for error output) | 1022 | * PARAMETERS: module_name - Caller's module name (for error output) |
1022 | * line_number - Caller's line number (for error output) | 1023 | * line_number - Caller's line number (for error output) |
@@ -1029,7 +1030,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object, | |||
1029 | ******************************************************************************/ | 1030 | ******************************************************************************/ |
1030 | 1031 | ||
1031 | void ACPI_INTERNAL_VAR_XFACE | 1032 | void ACPI_INTERNAL_VAR_XFACE |
1032 | acpi_ut_error(const char *module_name, u32 line_number, const char *format, ...) | 1033 | acpi_error(const char *module_name, u32 line_number, const char *format, ...) |
1033 | { | 1034 | { |
1034 | va_list args; | 1035 | va_list args; |
1035 | 1036 | ||
@@ -1042,8 +1043,8 @@ acpi_ut_error(const char *module_name, u32 line_number, const char *format, ...) | |||
1042 | } | 1043 | } |
1043 | 1044 | ||
1044 | void ACPI_INTERNAL_VAR_XFACE | 1045 | void ACPI_INTERNAL_VAR_XFACE |
1045 | acpi_ut_exception(const char *module_name, | 1046 | acpi_exception(const char *module_name, |
1046 | u32 line_number, acpi_status status, const char *format, ...) | 1047 | u32 line_number, acpi_status status, const char *format, ...) |
1047 | { | 1048 | { |
1048 | va_list args; | 1049 | va_list args; |
1049 | 1050 | ||
@@ -1056,11 +1057,8 @@ acpi_ut_exception(const char *module_name, | |||
1056 | va_end(args); | 1057 | va_end(args); |
1057 | } | 1058 | } |
1058 | 1059 | ||
1059 | EXPORT_SYMBOL(acpi_ut_exception); | ||
1060 | |||
1061 | void ACPI_INTERNAL_VAR_XFACE | 1060 | void ACPI_INTERNAL_VAR_XFACE |
1062 | acpi_ut_warning(const char *module_name, | 1061 | acpi_warning(const char *module_name, u32 line_number, const char *format, ...) |
1063 | u32 line_number, const char *format, ...) | ||
1064 | { | 1062 | { |
1065 | va_list args; | 1063 | va_list args; |
1066 | 1064 | ||
@@ -1073,7 +1071,7 @@ acpi_ut_warning(const char *module_name, | |||
1073 | } | 1071 | } |
1074 | 1072 | ||
1075 | void ACPI_INTERNAL_VAR_XFACE | 1073 | void ACPI_INTERNAL_VAR_XFACE |
1076 | acpi_ut_info(const char *module_name, u32 line_number, const char *format, ...) | 1074 | acpi_info(const char *module_name, u32 line_number, const char *format, ...) |
1077 | { | 1075 | { |
1078 | va_list args; | 1076 | va_list args; |
1079 | 1077 | ||
@@ -1088,3 +1086,8 @@ acpi_ut_info(const char *module_name, u32 line_number, const char *format, ...) | |||
1088 | acpi_os_printf("\n"); | 1086 | acpi_os_printf("\n"); |
1089 | va_end(args); | 1087 | va_end(args); |
1090 | } | 1088 | } |
1089 | |||
1090 | ACPI_EXPORT_SYMBOL(acpi_error) | ||
1091 | ACPI_EXPORT_SYMBOL(acpi_exception) | ||
1092 | ACPI_EXPORT_SYMBOL(acpi_warning) | ||
1093 | ACPI_EXPORT_SYMBOL(acpi_info) | ||
diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/acpica/utmutex.c index 7331dde9e1b3..14eb52c4d647 100644 --- a/drivers/acpi/utilities/utmutex.c +++ b/drivers/acpi/acpica/utmutex.c | |||
@@ -42,6 +42,7 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include "accommon.h" | ||
45 | 46 | ||
46 | #define _COMPONENT ACPI_UTILITIES | 47 | #define _COMPONENT ACPI_UTILITIES |
47 | ACPI_MODULE_NAME("utmutex") | 48 | ACPI_MODULE_NAME("utmutex") |
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/acpica/utobject.c index 4bef3cfbaccb..fd5ea7543e5b 100644 --- a/drivers/acpi/utilities/utobject.c +++ b/drivers/acpi/acpica/utobject.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acnamesp.h> | 45 | #include "accommon.h" |
46 | #include "acnamesp.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_UTILITIES | 48 | #define _COMPONENT ACPI_UTILITIES |
48 | ACPI_MODULE_NAME("utobject") | 49 | ACPI_MODULE_NAME("utobject") |
diff --git a/drivers/acpi/utilities/utresrc.c b/drivers/acpi/acpica/utresrc.c index c3e3e1308edc..91b7c00236f4 100644 --- a/drivers/acpi/utilities/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c | |||
@@ -42,7 +42,8 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/amlresrc.h> | 45 | #include "accommon.h" |
46 | #include "amlresrc.h" | ||
46 | 47 | ||
47 | #define _COMPONENT ACPI_UTILITIES | 48 | #define _COMPONENT ACPI_UTILITIES |
48 | ACPI_MODULE_NAME("utresrc") | 49 | ACPI_MODULE_NAME("utresrc") |
diff --git a/drivers/acpi/utilities/utstate.c b/drivers/acpi/acpica/utstate.c index 63a6d3d77d88..0440c958f5a4 100644 --- a/drivers/acpi/utilities/utstate.c +++ b/drivers/acpi/acpica/utstate.c | |||
@@ -42,6 +42,7 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include "accommon.h" | ||
45 | 46 | ||
46 | #define _COMPONENT ACPI_UTILITIES | 47 | #define _COMPONENT ACPI_UTILITIES |
47 | ACPI_MODULE_NAME("utstate") | 48 | ACPI_MODULE_NAME("utstate") |
diff --git a/drivers/acpi/utilities/utxface.c b/drivers/acpi/acpica/utxface.c index c198a4d40583..078a22728c6b 100644 --- a/drivers/acpi/utilities/utxface.c +++ b/drivers/acpi/acpica/utxface.c | |||
@@ -42,9 +42,11 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include <acpi/acevents.h> | 45 | #include "accommon.h" |
46 | #include <acpi/acnamesp.h> | 46 | #include "acevents.h" |
47 | #include <acpi/acdebug.h> | 47 | #include "acnamesp.h" |
48 | #include "acdebug.h" | ||
49 | #include "actables.h" | ||
48 | 50 | ||
49 | #define _COMPONENT ACPI_UTILITIES | 51 | #define _COMPONENT ACPI_UTILITIES |
50 | ACPI_MODULE_NAME("utxface") | 52 | ACPI_MODULE_NAME("utxface") |
@@ -148,6 +150,16 @@ acpi_status acpi_enable_subsystem(u32 flags) | |||
148 | } | 150 | } |
149 | 151 | ||
150 | /* | 152 | /* |
153 | * Obtain a permanent mapping for the FACS. This is required for the | ||
154 | * Global Lock and the Firmware Waking Vector | ||
155 | */ | ||
156 | status = acpi_tb_initialize_facs(); | ||
157 | if (ACPI_FAILURE(status)) { | ||
158 | ACPI_WARNING((AE_INFO, "Could not map the FACS table")); | ||
159 | return_ACPI_STATUS(status); | ||
160 | } | ||
161 | |||
162 | /* | ||
151 | * Install the default op_region handlers. These are installed unless | 163 | * Install the default op_region handlers. These are installed unless |
152 | * other handlers have already been installed via the | 164 | * other handlers have already been installed via the |
153 | * install_address_space_handler interface. | 165 | * install_address_space_handler interface. |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 1423b0c0cd2e..65132f920459 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -471,7 +471,7 @@ static void sysfs_remove_battery(struct acpi_battery *battery) | |||
471 | 471 | ||
472 | static int acpi_battery_update(struct acpi_battery *battery) | 472 | static int acpi_battery_update(struct acpi_battery *battery) |
473 | { | 473 | { |
474 | int result; | 474 | int result, old_present = acpi_battery_present(battery); |
475 | result = acpi_battery_get_status(battery); | 475 | result = acpi_battery_get_status(battery); |
476 | if (result) | 476 | if (result) |
477 | return result; | 477 | return result; |
@@ -482,7 +482,8 @@ static int acpi_battery_update(struct acpi_battery *battery) | |||
482 | return 0; | 482 | return 0; |
483 | } | 483 | } |
484 | #endif | 484 | #endif |
485 | if (!battery->update_time) { | 485 | if (!battery->update_time || |
486 | old_present != acpi_battery_present(battery)) { | ||
486 | result = acpi_battery_get_info(battery); | 487 | result = acpi_battery_get_info(battery); |
487 | if (result) | 488 | if (result) |
488 | return result; | 489 | return result; |
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c index 307963bd1043..332fe4b21708 100644 --- a/drivers/acpi/cm_sbs.c +++ b/drivers/acpi/cm_sbs.c | |||
@@ -27,9 +27,6 @@ | |||
27 | #include <linux/seq_file.h> | 27 | #include <linux/seq_file.h> |
28 | #include <acpi/acpi_bus.h> | 28 | #include <acpi/acpi_bus.h> |
29 | #include <acpi/acpi_drivers.h> | 29 | #include <acpi/acpi_drivers.h> |
30 | #include <acpi/acmacros.h> | ||
31 | #include <acpi/actypes.h> | ||
32 | #include <acpi/acutils.h> | ||
33 | 30 | ||
34 | ACPI_MODULE_NAME("cm_sbs"); | 31 | ACPI_MODULE_NAME("cm_sbs"); |
35 | #define ACPI_AC_CLASS "ac_adapter" | 32 | #define ACPI_AC_CLASS "ac_adapter" |
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c index c48396892008..20223cbd0d1c 100644 --- a/drivers/acpi/debug.c +++ b/drivers/acpi/debug.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/moduleparam.h> | 9 | #include <linux/moduleparam.h> |
10 | #include <asm/uaccess.h> | 10 | #include <asm/uaccess.h> |
11 | #include <acpi/acpi_drivers.h> | 11 | #include <acpi/acpi_drivers.h> |
12 | #include <acpi/acglobal.h> | ||
13 | 12 | ||
14 | #define _COMPONENT ACPI_SYSTEM_COMPONENT | 13 | #define _COMPONENT ACPI_SYSTEM_COMPONENT |
15 | ACPI_MODULE_NAME("debug"); | 14 | ACPI_MODULE_NAME("debug"); |
diff --git a/drivers/acpi/dispatcher/Makefile b/drivers/acpi/dispatcher/Makefile deleted file mode 100644 index eb7e602a83cd..000000000000 --- a/drivers/acpi/dispatcher/Makefile +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for all Linux ACPI interpreter subdirectories | ||
3 | # | ||
4 | |||
5 | obj-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \ | ||
6 | dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \ | ||
7 | dsinit.o | ||
8 | |||
9 | EXTRA_CFLAGS += $(ACPI_CFLAGS) | ||
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 30f3ef236ecb..8dfcbb8aff73 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include <asm/io.h> | 42 | #include <asm/io.h> |
43 | #include <acpi/acpi_bus.h> | 43 | #include <acpi/acpi_bus.h> |
44 | #include <acpi/acpi_drivers.h> | 44 | #include <acpi/acpi_drivers.h> |
45 | #include <acpi/actypes.h> | ||
46 | 45 | ||
47 | #define ACPI_EC_CLASS "embedded_controller" | 46 | #define ACPI_EC_CLASS "embedded_controller" |
48 | #define ACPI_EC_DEVICE_NAME "Embedded Controller" | 47 | #define ACPI_EC_DEVICE_NAME "Embedded Controller" |
@@ -370,7 +369,7 @@ unlock: | |||
370 | * Note: samsung nv5000 doesn't work with ec burst mode. | 369 | * Note: samsung nv5000 doesn't work with ec burst mode. |
371 | * http://bugzilla.kernel.org/show_bug.cgi?id=4980 | 370 | * http://bugzilla.kernel.org/show_bug.cgi?id=4980 |
372 | */ | 371 | */ |
373 | int acpi_ec_burst_enable(struct acpi_ec *ec) | 372 | static int acpi_ec_burst_enable(struct acpi_ec *ec) |
374 | { | 373 | { |
375 | u8 d; | 374 | u8 d; |
376 | struct transaction t = {.command = ACPI_EC_BURST_ENABLE, | 375 | struct transaction t = {.command = ACPI_EC_BURST_ENABLE, |
@@ -380,7 +379,7 @@ int acpi_ec_burst_enable(struct acpi_ec *ec) | |||
380 | return acpi_ec_transaction(ec, &t, 0); | 379 | return acpi_ec_transaction(ec, &t, 0); |
381 | } | 380 | } |
382 | 381 | ||
383 | int acpi_ec_burst_disable(struct acpi_ec *ec) | 382 | static int acpi_ec_burst_disable(struct acpi_ec *ec) |
384 | { | 383 | { |
385 | struct transaction t = {.command = ACPI_EC_BURST_DISABLE, | 384 | struct transaction t = {.command = ACPI_EC_BURST_DISABLE, |
386 | .wdata = NULL, .rdata = NULL, | 385 | .wdata = NULL, .rdata = NULL, |
@@ -756,10 +755,15 @@ static acpi_status | |||
756 | acpi_ec_register_query_methods(acpi_handle handle, u32 level, | 755 | acpi_ec_register_query_methods(acpi_handle handle, u32 level, |
757 | void *context, void **return_value) | 756 | void *context, void **return_value) |
758 | { | 757 | { |
759 | struct acpi_namespace_node *node = handle; | 758 | char node_name[5]; |
759 | struct acpi_buffer buffer = { sizeof(node_name), node_name }; | ||
760 | struct acpi_ec *ec = context; | 760 | struct acpi_ec *ec = context; |
761 | int value = 0; | 761 | int value = 0; |
762 | if (sscanf(node->name.ascii, "_Q%x", &value) == 1) { | 762 | acpi_status status; |
763 | |||
764 | status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer); | ||
765 | |||
766 | if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1) { | ||
763 | acpi_ec_add_query_handler(ec, value, handle, NULL, NULL); | 767 | acpi_ec_add_query_handler(ec, value, handle, NULL, NULL); |
764 | } | 768 | } |
765 | return AE_OK; | 769 | return AE_OK; |
@@ -978,9 +982,9 @@ static const struct acpi_device_id ec_device_ids[] = { | |||
978 | 982 | ||
979 | int __init acpi_ec_ecdt_probe(void) | 983 | int __init acpi_ec_ecdt_probe(void) |
980 | { | 984 | { |
981 | int ret; | ||
982 | acpi_status status; | 985 | acpi_status status; |
983 | struct acpi_table_ecdt *ecdt_ptr; | 986 | struct acpi_table_ecdt *ecdt_ptr; |
987 | acpi_handle dummy; | ||
984 | 988 | ||
985 | boot_ec = make_acpi_ec(); | 989 | boot_ec = make_acpi_ec(); |
986 | if (!boot_ec) | 990 | if (!boot_ec) |
@@ -1006,30 +1010,31 @@ int __init acpi_ec_ecdt_probe(void) | |||
1006 | boot_ec->gpe = ecdt_ptr->gpe; | 1010 | boot_ec->gpe = ecdt_ptr->gpe; |
1007 | boot_ec->handle = ACPI_ROOT_OBJECT; | 1011 | boot_ec->handle = ACPI_ROOT_OBJECT; |
1008 | acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); | 1012 | acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); |
1009 | } else { | 1013 | /* Add some basic check against completely broken table */ |
1010 | /* This workaround is needed only on some broken machines, | 1014 | if (boot_ec->data_addr != boot_ec->command_addr) |
1011 | * which require early EC, but fail to provide ECDT */ | 1015 | goto install; |
1012 | acpi_handle x; | 1016 | /* fall through */ |
1013 | printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); | ||
1014 | status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, | ||
1015 | boot_ec, NULL); | ||
1016 | /* Check that acpi_get_devices actually find something */ | ||
1017 | if (ACPI_FAILURE(status) || !boot_ec->handle) | ||
1018 | goto error; | ||
1019 | /* We really need to limit this workaround, the only ASUS, | ||
1020 | * which needs it, has fake EC._INI method, so use it as flag. | ||
1021 | * Keep boot_ec struct as it will be needed soon. | ||
1022 | */ | ||
1023 | if (ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", &x))) | ||
1024 | return -ENODEV; | ||
1025 | } | 1017 | } |
1026 | 1018 | /* This workaround is needed only on some broken machines, | |
1027 | ret = ec_install_handlers(boot_ec); | 1019 | * which require early EC, but fail to provide ECDT */ |
1028 | if (!ret) { | 1020 | printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); |
1021 | status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, | ||
1022 | boot_ec, NULL); | ||
1023 | /* Check that acpi_get_devices actually find something */ | ||
1024 | if (ACPI_FAILURE(status) || !boot_ec->handle) | ||
1025 | goto error; | ||
1026 | /* We really need to limit this workaround, the only ASUS, | ||
1027 | * which needs it, has fake EC._INI method, so use it as flag. | ||
1028 | * Keep boot_ec struct as it will be needed soon. | ||
1029 | */ | ||
1030 | if (ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", &dummy))) | ||
1031 | return -ENODEV; | ||
1032 | install: | ||
1033 | if (!ec_install_handlers(boot_ec)) { | ||
1029 | first_ec = boot_ec; | 1034 | first_ec = boot_ec; |
1030 | return 0; | 1035 | return 0; |
1031 | } | 1036 | } |
1032 | error: | 1037 | error: |
1033 | kfree(boot_ec); | 1038 | kfree(boot_ec); |
1034 | boot_ec = NULL; | 1039 | boot_ec = NULL; |
1035 | return -ENODEV; | 1040 | return -ENODEV; |
diff --git a/drivers/acpi/events/Makefile b/drivers/acpi/events/Makefile deleted file mode 100644 index d29f2ee449cc..000000000000 --- a/drivers/acpi/events/Makefile +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for all Linux ACPI interpreter subdirectories | ||
3 | # | ||
4 | |||
5 | obj-y := evevent.o evregion.o evsci.o evxfevnt.o \ | ||
6 | evmisc.o evrgnini.o evxface.o evxfregn.o \ | ||
7 | evgpe.o evgpeblk.o | ||
8 | |||
9 | EXTRA_CFLAGS += $(ACPI_CFLAGS) | ||
diff --git a/drivers/acpi/executer/Makefile b/drivers/acpi/executer/Makefile deleted file mode 100644 index e09998aa012f..000000000000 --- a/drivers/acpi/executer/Makefile +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for all Linux ACPI interpreter subdirectories | ||
3 | # | ||
4 | |||
5 | obj-y := exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ | ||
6 | exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ | ||
7 | excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \ | ||
8 | exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o | ||
9 | |||
10 | EXTRA_CFLAGS += $(ACPI_CFLAGS) | ||
diff --git a/drivers/acpi/hardware/Makefile b/drivers/acpi/hardware/Makefile deleted file mode 100644 index 438ad373b9ad..000000000000 --- a/drivers/acpi/hardware/Makefile +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for all Linux ACPI interpreter subdirectories | ||
3 | # | ||
4 | |||
5 | obj-y := hwacpi.o hwgpe.o hwregs.o hwsleep.o | ||
6 | |||
7 | obj-$(ACPI_FUTURE_USAGE) += hwtimer.o | ||
8 | |||
9 | EXTRA_CFLAGS += $(ACPI_CFLAGS) | ||
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/main.c index 28a691cc625e..7e3c609cbef2 100644 --- a/drivers/acpi/sleep/main.c +++ b/drivers/acpi/main.c | |||
@@ -101,13 +101,26 @@ void __init acpi_old_suspend_ordering(void) | |||
101 | * cases. | 101 | * cases. |
102 | */ | 102 | */ |
103 | static bool set_sci_en_on_resume; | 103 | static bool set_sci_en_on_resume; |
104 | /* | ||
105 | * The ACPI specification wants us to save NVS memory regions during hibernation | ||
106 | * and to restore them during the subsequent resume. However, it is not certain | ||
107 | * if this mechanism is going to work on all machines, so we allow the user to | ||
108 | * disable this mechanism using the 'acpi_sleep=s4_nonvs' kernel command line | ||
109 | * option. | ||
110 | */ | ||
111 | static bool s4_no_nvs; | ||
112 | |||
113 | void __init acpi_s4_no_nvs(void) | ||
114 | { | ||
115 | s4_no_nvs = true; | ||
116 | } | ||
104 | 117 | ||
105 | /** | 118 | /** |
106 | * acpi_pm_disable_gpes - Disable the GPEs. | 119 | * acpi_pm_disable_gpes - Disable the GPEs. |
107 | */ | 120 | */ |
108 | static int acpi_pm_disable_gpes(void) | 121 | static int acpi_pm_disable_gpes(void) |
109 | { | 122 | { |
110 | acpi_hw_disable_all_gpes(); | 123 | acpi_disable_all_gpes(); |
111 | return 0; | 124 | return 0; |
112 | } | 125 | } |
113 | 126 | ||
@@ -135,7 +148,7 @@ static int acpi_pm_prepare(void) | |||
135 | int error = __acpi_pm_prepare(); | 148 | int error = __acpi_pm_prepare(); |
136 | 149 | ||
137 | if (!error) | 150 | if (!error) |
138 | acpi_hw_disable_all_gpes(); | 151 | acpi_disable_all_gpes(); |
139 | return error; | 152 | return error; |
140 | } | 153 | } |
141 | 154 | ||
@@ -267,7 +280,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state) | |||
267 | * (like wakeup GPE) haven't handler, this can avoid such GPE misfire. | 280 | * (like wakeup GPE) haven't handler, this can avoid such GPE misfire. |
268 | * acpi_leave_sleep_state will reenable specific GPEs later | 281 | * acpi_leave_sleep_state will reenable specific GPEs later |
269 | */ | 282 | */ |
270 | acpi_hw_disable_all_gpes(); | 283 | acpi_disable_all_gpes(); |
271 | 284 | ||
272 | local_irq_restore(flags); | 285 | local_irq_restore(flags); |
273 | printk(KERN_DEBUG "Back to C!\n"); | 286 | printk(KERN_DEBUG "Back to C!\n"); |
@@ -394,9 +407,25 @@ void __init acpi_no_s4_hw_signature(void) | |||
394 | 407 | ||
395 | static int acpi_hibernation_begin(void) | 408 | static int acpi_hibernation_begin(void) |
396 | { | 409 | { |
397 | acpi_target_sleep_state = ACPI_STATE_S4; | 410 | int error; |
398 | acpi_sleep_tts_switch(acpi_target_sleep_state); | 411 | |
399 | return 0; | 412 | error = s4_no_nvs ? 0 : hibernate_nvs_alloc(); |
413 | if (!error) { | ||
414 | acpi_target_sleep_state = ACPI_STATE_S4; | ||
415 | acpi_sleep_tts_switch(acpi_target_sleep_state); | ||
416 | } | ||
417 | |||
418 | return error; | ||
419 | } | ||
420 | |||
421 | static int acpi_hibernation_pre_snapshot(void) | ||
422 | { | ||
423 | int error = acpi_pm_prepare(); | ||
424 | |||
425 | if (!error) | ||
426 | hibernate_nvs_save(); | ||
427 | |||
428 | return error; | ||
400 | } | 429 | } |
401 | 430 | ||
402 | static int acpi_hibernation_enter(void) | 431 | static int acpi_hibernation_enter(void) |
@@ -417,6 +446,12 @@ static int acpi_hibernation_enter(void) | |||
417 | return ACPI_SUCCESS(status) ? 0 : -EFAULT; | 446 | return ACPI_SUCCESS(status) ? 0 : -EFAULT; |
418 | } | 447 | } |
419 | 448 | ||
449 | static void acpi_hibernation_finish(void) | ||
450 | { | ||
451 | hibernate_nvs_free(); | ||
452 | acpi_pm_finish(); | ||
453 | } | ||
454 | |||
420 | static void acpi_hibernation_leave(void) | 455 | static void acpi_hibernation_leave(void) |
421 | { | 456 | { |
422 | /* | 457 | /* |
@@ -432,18 +467,20 @@ static void acpi_hibernation_leave(void) | |||
432 | "cannot resume!\n"); | 467 | "cannot resume!\n"); |
433 | panic("ACPI S4 hardware signature mismatch"); | 468 | panic("ACPI S4 hardware signature mismatch"); |
434 | } | 469 | } |
470 | /* Restore the NVS memory area */ | ||
471 | hibernate_nvs_restore(); | ||
435 | } | 472 | } |
436 | 473 | ||
437 | static void acpi_pm_enable_gpes(void) | 474 | static void acpi_pm_enable_gpes(void) |
438 | { | 475 | { |
439 | acpi_hw_enable_all_runtime_gpes(); | 476 | acpi_enable_all_runtime_gpes(); |
440 | } | 477 | } |
441 | 478 | ||
442 | static struct platform_hibernation_ops acpi_hibernation_ops = { | 479 | static struct platform_hibernation_ops acpi_hibernation_ops = { |
443 | .begin = acpi_hibernation_begin, | 480 | .begin = acpi_hibernation_begin, |
444 | .end = acpi_pm_end, | 481 | .end = acpi_pm_end, |
445 | .pre_snapshot = acpi_pm_prepare, | 482 | .pre_snapshot = acpi_hibernation_pre_snapshot, |
446 | .finish = acpi_pm_finish, | 483 | .finish = acpi_hibernation_finish, |
447 | .prepare = acpi_pm_prepare, | 484 | .prepare = acpi_pm_prepare, |
448 | .enter = acpi_hibernation_enter, | 485 | .enter = acpi_hibernation_enter, |
449 | .leave = acpi_hibernation_leave, | 486 | .leave = acpi_hibernation_leave, |
@@ -469,8 +506,22 @@ static int acpi_hibernation_begin_old(void) | |||
469 | 506 | ||
470 | error = acpi_sleep_prepare(ACPI_STATE_S4); | 507 | error = acpi_sleep_prepare(ACPI_STATE_S4); |
471 | 508 | ||
509 | if (!error) { | ||
510 | if (!s4_no_nvs) | ||
511 | error = hibernate_nvs_alloc(); | ||
512 | if (!error) | ||
513 | acpi_target_sleep_state = ACPI_STATE_S4; | ||
514 | } | ||
515 | return error; | ||
516 | } | ||
517 | |||
518 | static int acpi_hibernation_pre_snapshot_old(void) | ||
519 | { | ||
520 | int error = acpi_pm_disable_gpes(); | ||
521 | |||
472 | if (!error) | 522 | if (!error) |
473 | acpi_target_sleep_state = ACPI_STATE_S4; | 523 | hibernate_nvs_save(); |
524 | |||
474 | return error; | 525 | return error; |
475 | } | 526 | } |
476 | 527 | ||
@@ -481,8 +532,8 @@ static int acpi_hibernation_begin_old(void) | |||
481 | static struct platform_hibernation_ops acpi_hibernation_ops_old = { | 532 | static struct platform_hibernation_ops acpi_hibernation_ops_old = { |
482 | .begin = acpi_hibernation_begin_old, | 533 | .begin = acpi_hibernation_begin_old, |
483 | .end = acpi_pm_end, | 534 | .end = acpi_pm_end, |
484 | .pre_snapshot = acpi_pm_disable_gpes, | 535 | .pre_snapshot = acpi_hibernation_pre_snapshot_old, |
485 | .finish = acpi_pm_finish, | 536 | .finish = acpi_hibernation_finish, |
486 | .prepare = acpi_pm_disable_gpes, | 537 | .prepare = acpi_pm_disable_gpes, |
487 | .enter = acpi_hibernation_enter, | 538 | .enter = acpi_hibernation_enter, |
488 | .leave = acpi_hibernation_leave, | 539 | .leave = acpi_hibernation_leave, |
@@ -622,7 +673,7 @@ static void acpi_power_off_prepare(void) | |||
622 | { | 673 | { |
623 | /* Prepare to power off the system */ | 674 | /* Prepare to power off the system */ |
624 | acpi_sleep_prepare(ACPI_STATE_S5); | 675 | acpi_sleep_prepare(ACPI_STATE_S5); |
625 | acpi_hw_disable_all_gpes(); | 676 | acpi_disable_all_gpes(); |
626 | } | 677 | } |
627 | 678 | ||
628 | static void acpi_power_off(void) | 679 | static void acpi_power_off(void) |
@@ -671,7 +722,7 @@ int __init acpi_sleep_init(void) | |||
671 | sleep_states[ACPI_STATE_S4] = 1; | 722 | sleep_states[ACPI_STATE_S4] = 1; |
672 | printk(" S4"); | 723 | printk(" S4"); |
673 | if (!nosigcheck) { | 724 | if (!nosigcheck) { |
674 | acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, | 725 | acpi_get_table(ACPI_SIG_FACS, 1, |
675 | (struct acpi_table_header **)&facs); | 726 | (struct acpi_table_header **)&facs); |
676 | if (facs) | 727 | if (facs) |
677 | s4_hardware_signature = | 728 | s4_hardware_signature = |
diff --git a/drivers/acpi/namespace/Makefile b/drivers/acpi/namespace/Makefile deleted file mode 100644 index 371a2daf837f..000000000000 --- a/drivers/acpi/namespace/Makefile +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for all Linux ACPI interpreter subdirectories | ||
3 | # | ||
4 | |||
5 | obj-y := nsaccess.o nsload.o nssearch.o nsxfeval.o \ | ||
6 | nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \ | ||
7 | nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \ | ||
8 | nsparse.o nspredef.o | ||
9 | |||
10 | obj-$(ACPI_FUTURE_USAGE) += nsdumpdv.o | ||
11 | |||
12 | EXTRA_CFLAGS += $(ACPI_CFLAGS) | ||
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 25ceae9191ef..c5e292aab0e3 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/errno.h> | 29 | #include <linux/errno.h> |
30 | #include <linux/acpi.h> | 30 | #include <linux/acpi.h> |
31 | #include <acpi/acpi_bus.h> | 31 | #include <acpi/acpi_bus.h> |
32 | #include <acpi/acmacros.h> | ||
33 | 32 | ||
34 | #define ACPI_NUMA 0x80000000 | 33 | #define ACPI_NUMA 0x80000000 |
35 | #define _COMPONENT ACPI_NUMA | 34 | #define _COMPONENT ACPI_NUMA |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index c8111424dcb8..6729a4992f2b 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -726,7 +726,7 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, | |||
726 | 726 | ||
727 | dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); | 727 | dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); |
728 | if (!dpc) | 728 | if (!dpc) |
729 | return_ACPI_STATUS(AE_NO_MEMORY); | 729 | return AE_NO_MEMORY; |
730 | 730 | ||
731 | dpc->function = function; | 731 | dpc->function = function; |
732 | dpc->context = context; | 732 | dpc->context = context; |
@@ -747,7 +747,7 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, | |||
747 | status = AE_ERROR; | 747 | status = AE_ERROR; |
748 | kfree(dpc); | 748 | kfree(dpc); |
749 | } | 749 | } |
750 | return_ACPI_STATUS(status); | 750 | return status; |
751 | } | 751 | } |
752 | 752 | ||
753 | acpi_status acpi_os_execute(acpi_execute_type type, | 753 | acpi_status acpi_os_execute(acpi_execute_type type, |
diff --git a/drivers/acpi/parser/Makefile b/drivers/acpi/parser/Makefile deleted file mode 100644 index db24ee09cf11..000000000000 --- a/drivers/acpi/parser/Makefile +++ /dev/null | |||
@@ -1,8 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for all Linux ACPI interpreter subdirectories | ||
3 | # | ||
4 | |||
5 | obj-y := psargs.o psparse.o psloop.o pstree.o pswalk.o \ | ||
6 | psopcode.o psscope.o psutils.o psxface.o | ||
7 | |||
8 | EXTRA_CFLAGS += $(ACPI_CFLAGS) | ||
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c index 4b252ea0e952..95650f83ce2e 100644 --- a/drivers/acpi/pci_bind.c +++ b/drivers/acpi/pci_bind.c | |||
@@ -99,7 +99,7 @@ acpi_status acpi_get_pci_id(acpi_handle handle, struct acpi_pci_id *id) | |||
99 | */ | 99 | */ |
100 | 100 | ||
101 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 101 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
102 | "Device %s has PCI address %02x:%02x:%02x.%02x\n", | 102 | "Device %s has PCI address %04x:%02x:%02x.%d\n", |
103 | acpi_device_bid(device), id->segment, id->bus, | 103 | acpi_device_bid(device), id->segment, id->bus, |
104 | id->device, id->function)); | 104 | id->device, id->function)); |
105 | 105 | ||
@@ -111,12 +111,11 @@ EXPORT_SYMBOL(acpi_get_pci_id); | |||
111 | int acpi_pci_bind(struct acpi_device *device) | 111 | int acpi_pci_bind(struct acpi_device *device) |
112 | { | 112 | { |
113 | int result = 0; | 113 | int result = 0; |
114 | acpi_status status = AE_OK; | 114 | acpi_status status; |
115 | struct acpi_pci_data *data = NULL; | 115 | struct acpi_pci_data *data; |
116 | struct acpi_pci_data *pdata = NULL; | 116 | struct acpi_pci_data *pdata; |
117 | char *pathname = NULL; | 117 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
118 | struct acpi_buffer buffer = { 0, NULL }; | 118 | acpi_handle handle; |
119 | acpi_handle handle = NULL; | ||
120 | struct pci_dev *dev; | 119 | struct pci_dev *dev; |
121 | struct pci_bus *bus; | 120 | struct pci_bus *bus; |
122 | 121 | ||
@@ -124,21 +123,18 @@ int acpi_pci_bind(struct acpi_device *device) | |||
124 | if (!device || !device->parent) | 123 | if (!device || !device->parent) |
125 | return -EINVAL; | 124 | return -EINVAL; |
126 | 125 | ||
127 | pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL); | ||
128 | if (!pathname) | ||
129 | return -ENOMEM; | ||
130 | buffer.length = ACPI_PATHNAME_MAX; | ||
131 | buffer.pointer = pathname; | ||
132 | |||
133 | data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL); | 126 | data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL); |
134 | if (!data) { | 127 | if (!data) |
135 | kfree(pathname); | ||
136 | return -ENOMEM; | 128 | return -ENOMEM; |
129 | |||
130 | status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer); | ||
131 | if (ACPI_FAILURE(status)) { | ||
132 | kfree(data); | ||
133 | return -ENODEV; | ||
137 | } | 134 | } |
138 | 135 | ||
139 | acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer); | ||
140 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI device [%s]...\n", | 136 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI device [%s]...\n", |
141 | pathname)); | 137 | (char *)buffer.pointer)); |
142 | 138 | ||
143 | /* | 139 | /* |
144 | * Segment & Bus | 140 | * Segment & Bus |
@@ -166,7 +162,7 @@ int acpi_pci_bind(struct acpi_device *device) | |||
166 | data->id.device = device->pnp.bus_address >> 16; | 162 | data->id.device = device->pnp.bus_address >> 16; |
167 | data->id.function = device->pnp.bus_address & 0xFFFF; | 163 | data->id.function = device->pnp.bus_address & 0xFFFF; |
168 | 164 | ||
169 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "...to %02x:%02x:%02x.%02x\n", | 165 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "...to %04x:%02x:%02x.%d\n", |
170 | data->id.segment, data->id.bus, data->id.device, | 166 | data->id.segment, data->id.bus, data->id.device, |
171 | data->id.function)); | 167 | data->id.function)); |
172 | 168 | ||
@@ -196,7 +192,7 @@ int acpi_pci_bind(struct acpi_device *device) | |||
196 | } | 192 | } |
197 | if (!data->dev) { | 193 | if (!data->dev) { |
198 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 194 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
199 | "Device %02x:%02x:%02x.%02x not present in PCI namespace\n", | 195 | "Device %04x:%02x:%02x.%d not present in PCI namespace\n", |
200 | data->id.segment, data->id.bus, | 196 | data->id.segment, data->id.bus, |
201 | data->id.device, data->id.function)); | 197 | data->id.device, data->id.function)); |
202 | result = -ENODEV; | 198 | result = -ENODEV; |
@@ -204,7 +200,7 @@ int acpi_pci_bind(struct acpi_device *device) | |||
204 | } | 200 | } |
205 | if (!data->dev->bus) { | 201 | if (!data->dev->bus) { |
206 | printk(KERN_ERR PREFIX | 202 | printk(KERN_ERR PREFIX |
207 | "Device %02x:%02x:%02x.%02x has invalid 'bus' field\n", | 203 | "Device %04x:%02x:%02x.%d has invalid 'bus' field\n", |
208 | data->id.segment, data->id.bus, | 204 | data->id.segment, data->id.bus, |
209 | data->id.device, data->id.function); | 205 | data->id.device, data->id.function); |
210 | result = -ENODEV; | 206 | result = -ENODEV; |
@@ -219,7 +215,7 @@ int acpi_pci_bind(struct acpi_device *device) | |||
219 | */ | 215 | */ |
220 | if (data->dev->subordinate) { | 216 | if (data->dev->subordinate) { |
221 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 217 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
222 | "Device %02x:%02x:%02x.%02x is a PCI bridge\n", | 218 | "Device %04x:%02x:%02x.%d is a PCI bridge\n", |
223 | data->id.segment, data->id.bus, | 219 | data->id.segment, data->id.bus, |
224 | data->id.device, data->id.function)); | 220 | data->id.device, data->id.function)); |
225 | data->bus = data->dev->subordinate; | 221 | data->bus = data->dev->subordinate; |
@@ -262,7 +258,7 @@ int acpi_pci_bind(struct acpi_device *device) | |||
262 | } | 258 | } |
263 | 259 | ||
264 | end: | 260 | end: |
265 | kfree(pathname); | 261 | kfree(buffer.pointer); |
266 | if (result) | 262 | if (result) |
267 | kfree(data); | 263 | kfree(data); |
268 | 264 | ||
@@ -272,25 +268,21 @@ int acpi_pci_bind(struct acpi_device *device) | |||
272 | static int acpi_pci_unbind(struct acpi_device *device) | 268 | static int acpi_pci_unbind(struct acpi_device *device) |
273 | { | 269 | { |
274 | int result = 0; | 270 | int result = 0; |
275 | acpi_status status = AE_OK; | 271 | acpi_status status; |
276 | struct acpi_pci_data *data = NULL; | 272 | struct acpi_pci_data *data; |
277 | char *pathname = NULL; | 273 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
278 | struct acpi_buffer buffer = { 0, NULL }; | ||
279 | 274 | ||
280 | 275 | ||
281 | if (!device || !device->parent) | 276 | if (!device || !device->parent) |
282 | return -EINVAL; | 277 | return -EINVAL; |
283 | 278 | ||
284 | pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL); | 279 | status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer); |
285 | if (!pathname) | 280 | if (ACPI_FAILURE(status)) |
286 | return -ENOMEM; | 281 | return -ENODEV; |
287 | 282 | ||
288 | buffer.length = ACPI_PATHNAME_MAX; | ||
289 | buffer.pointer = pathname; | ||
290 | acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer); | ||
291 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unbinding PCI device [%s]...\n", | 283 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unbinding PCI device [%s]...\n", |
292 | pathname)); | 284 | (char *) buffer.pointer)); |
293 | kfree(pathname); | 285 | kfree(buffer.pointer); |
294 | 286 | ||
295 | status = | 287 | status = |
296 | acpi_get_data(device->handle, acpi_pci_data_handler, | 288 | acpi_get_data(device->handle, acpi_pci_data_handler, |
@@ -322,50 +314,44 @@ acpi_pci_bind_root(struct acpi_device *device, | |||
322 | struct acpi_pci_id *id, struct pci_bus *bus) | 314 | struct acpi_pci_id *id, struct pci_bus *bus) |
323 | { | 315 | { |
324 | int result = 0; | 316 | int result = 0; |
325 | acpi_status status = AE_OK; | 317 | acpi_status status; |
326 | struct acpi_pci_data *data = NULL; | 318 | struct acpi_pci_data *data = NULL; |
327 | char *pathname = NULL; | 319 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
328 | struct acpi_buffer buffer = { 0, NULL }; | ||
329 | |||
330 | pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL); | ||
331 | if (!pathname) | ||
332 | return -ENOMEM; | ||
333 | |||
334 | buffer.length = ACPI_PATHNAME_MAX; | ||
335 | buffer.pointer = pathname; | ||
336 | 320 | ||
337 | if (!device || !id || !bus) { | 321 | if (!device || !id || !bus) { |
338 | kfree(pathname); | ||
339 | return -EINVAL; | 322 | return -EINVAL; |
340 | } | 323 | } |
341 | 324 | ||
342 | data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL); | 325 | data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL); |
343 | if (!data) { | 326 | if (!data) |
344 | kfree(pathname); | ||
345 | return -ENOMEM; | 327 | return -ENOMEM; |
346 | } | ||
347 | 328 | ||
348 | data->id = *id; | 329 | data->id = *id; |
349 | data->bus = bus; | 330 | data->bus = bus; |
350 | device->ops.bind = acpi_pci_bind; | 331 | device->ops.bind = acpi_pci_bind; |
351 | device->ops.unbind = acpi_pci_unbind; | 332 | device->ops.unbind = acpi_pci_unbind; |
352 | 333 | ||
353 | acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer); | 334 | status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer); |
335 | if (ACPI_FAILURE(status)) { | ||
336 | kfree (data); | ||
337 | return -ENODEV; | ||
338 | } | ||
354 | 339 | ||
355 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI root bridge [%s] to " | 340 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI root bridge [%s] to " |
356 | "%02x:%02x\n", pathname, id->segment, id->bus)); | 341 | "%04x:%02x\n", (char *)buffer.pointer, |
342 | id->segment, id->bus)); | ||
357 | 343 | ||
358 | status = acpi_attach_data(device->handle, acpi_pci_data_handler, data); | 344 | status = acpi_attach_data(device->handle, acpi_pci_data_handler, data); |
359 | if (ACPI_FAILURE(status)) { | 345 | if (ACPI_FAILURE(status)) { |
360 | ACPI_EXCEPTION((AE_INFO, status, | 346 | ACPI_EXCEPTION((AE_INFO, status, |
361 | "Unable to attach ACPI-PCI context to device %s", | 347 | "Unable to attach ACPI-PCI context to device %s", |
362 | pathname)); | 348 | (char *)buffer.pointer)); |
363 | result = -ENODEV; | 349 | result = -ENODEV; |
364 | goto end; | 350 | goto end; |
365 | } | 351 | } |
366 | 352 | ||
367 | end: | 353 | end: |
368 | kfree(pathname); | 354 | kfree(buffer.pointer); |
369 | if (result != 0) | 355 | if (result != 0) |
370 | kfree(data); | 356 | kfree(data); |
371 | 357 | ||
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index bf79d83bdfbb..891bdf6679f3 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -4,6 +4,8 @@ | |||
4 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> | 4 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> |
5 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | 5 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
6 | * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de> | 6 | * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de> |
7 | * (c) Copyright 2008 Hewlett-Packard Development Company, L.P. | ||
8 | * Bjorn Helgaas <bjorn.helgaas@hp.com> | ||
7 | * | 9 | * |
8 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 10 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
9 | * | 11 | * |
@@ -41,29 +43,36 @@ | |||
41 | #define _COMPONENT ACPI_PCI_COMPONENT | 43 | #define _COMPONENT ACPI_PCI_COMPONENT |
42 | ACPI_MODULE_NAME("pci_irq"); | 44 | ACPI_MODULE_NAME("pci_irq"); |
43 | 45 | ||
44 | static struct acpi_prt_list acpi_prt; | 46 | struct acpi_prt_entry { |
47 | struct list_head list; | ||
48 | struct acpi_pci_id id; | ||
49 | u8 pin; | ||
50 | acpi_handle link; | ||
51 | u32 index; /* GSI, or link _CRS index */ | ||
52 | }; | ||
53 | |||
54 | static LIST_HEAD(acpi_prt_list); | ||
45 | static DEFINE_SPINLOCK(acpi_prt_lock); | 55 | static DEFINE_SPINLOCK(acpi_prt_lock); |
46 | 56 | ||
57 | static inline char pin_name(int pin) | ||
58 | { | ||
59 | return 'A' + pin - 1; | ||
60 | } | ||
61 | |||
47 | /* -------------------------------------------------------------------------- | 62 | /* -------------------------------------------------------------------------- |
48 | PCI IRQ Routing Table (PRT) Support | 63 | PCI IRQ Routing Table (PRT) Support |
49 | -------------------------------------------------------------------------- */ | 64 | -------------------------------------------------------------------------- */ |
50 | 65 | ||
51 | static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(int segment, | 66 | static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(struct pci_dev *dev, |
52 | int bus, | 67 | int pin) |
53 | int device, int pin) | ||
54 | { | 68 | { |
55 | struct acpi_prt_entry *entry = NULL; | 69 | struct acpi_prt_entry *entry; |
56 | 70 | int segment = pci_domain_nr(dev->bus); | |
57 | if (!acpi_prt.count) | 71 | int bus = dev->bus->number; |
58 | return NULL; | 72 | int device = PCI_SLOT(dev->devfn); |
59 | 73 | ||
60 | /* | ||
61 | * Parse through all PRT entries looking for a match on the specified | ||
62 | * PCI device's segment, bus, device, and pin (don't care about func). | ||
63 | * | ||
64 | */ | ||
65 | spin_lock(&acpi_prt_lock); | 74 | spin_lock(&acpi_prt_lock); |
66 | list_for_each_entry(entry, &acpi_prt.entries, node) { | 75 | list_for_each_entry(entry, &acpi_prt_list, list) { |
67 | if ((segment == entry->id.segment) | 76 | if ((segment == entry->id.segment) |
68 | && (bus == entry->id.bus) | 77 | && (bus == entry->id.bus) |
69 | && (device == entry->id.device) | 78 | && (device == entry->id.device) |
@@ -72,7 +81,6 @@ static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(int segment, | |||
72 | return entry; | 81 | return entry; |
73 | } | 82 | } |
74 | } | 83 | } |
75 | |||
76 | spin_unlock(&acpi_prt_lock); | 84 | spin_unlock(&acpi_prt_lock); |
77 | return NULL; | 85 | return NULL; |
78 | } | 86 | } |
@@ -124,25 +132,27 @@ struct prt_quirk { | |||
124 | char *actual_source; | 132 | char *actual_source; |
125 | }; | 133 | }; |
126 | 134 | ||
135 | #define PCI_INTX_PIN(c) (c - 'A' + 1) | ||
136 | |||
127 | /* | 137 | /* |
128 | * These systems have incorrect _PRT entries. The BIOS claims the PCI | 138 | * These systems have incorrect _PRT entries. The BIOS claims the PCI |
129 | * interrupt at the listed segment/bus/device/pin is connected to the first | 139 | * interrupt at the listed segment/bus/device/pin is connected to the first |
130 | * link device, but it is actually connected to the second. | 140 | * link device, but it is actually connected to the second. |
131 | */ | 141 | */ |
132 | static struct prt_quirk prt_quirks[] = { | 142 | static struct prt_quirk prt_quirks[] = { |
133 | { medion_md9580, 0, 0, 9, 'A', | 143 | { medion_md9580, 0, 0, 9, PCI_INTX_PIN('A'), |
134 | "\\_SB_.PCI0.ISA_.LNKA", | 144 | "\\_SB_.PCI0.ISA_.LNKA", |
135 | "\\_SB_.PCI0.ISA_.LNKB"}, | 145 | "\\_SB_.PCI0.ISA_.LNKB"}, |
136 | { dell_optiplex, 0, 0, 0xd, 'A', | 146 | { dell_optiplex, 0, 0, 0xd, PCI_INTX_PIN('A'), |
137 | "\\_SB_.LNKB", | 147 | "\\_SB_.LNKB", |
138 | "\\_SB_.LNKA"}, | 148 | "\\_SB_.LNKA"}, |
139 | { hp_t5710, 0, 0, 1, 'A', | 149 | { hp_t5710, 0, 0, 1, PCI_INTX_PIN('A'), |
140 | "\\_SB_.PCI0.LNK1", | 150 | "\\_SB_.PCI0.LNK1", |
141 | "\\_SB_.PCI0.LNK3"}, | 151 | "\\_SB_.PCI0.LNK3"}, |
142 | }; | 152 | }; |
143 | 153 | ||
144 | static void | 154 | static void do_prt_fixups(struct acpi_prt_entry *entry, |
145 | do_prt_fixups(struct acpi_prt_entry *entry, struct acpi_pci_routing_table *prt) | 155 | struct acpi_pci_routing_table *prt) |
146 | { | 156 | { |
147 | int i; | 157 | int i; |
148 | struct prt_quirk *quirk; | 158 | struct prt_quirk *quirk; |
@@ -158,42 +168,43 @@ do_prt_fixups(struct acpi_prt_entry *entry, struct acpi_pci_routing_table *prt) | |||
158 | entry->id.segment == quirk->segment && | 168 | entry->id.segment == quirk->segment && |
159 | entry->id.bus == quirk->bus && | 169 | entry->id.bus == quirk->bus && |
160 | entry->id.device == quirk->device && | 170 | entry->id.device == quirk->device && |
161 | entry->pin + 'A' == quirk->pin && | 171 | entry->pin == quirk->pin && |
162 | !strcmp(prt->source, quirk->source) && | 172 | !strcmp(prt->source, quirk->source) && |
163 | strlen(prt->source) >= strlen(quirk->actual_source)) { | 173 | strlen(prt->source) >= strlen(quirk->actual_source)) { |
164 | printk(KERN_WARNING PREFIX "firmware reports " | 174 | printk(KERN_WARNING PREFIX "firmware reports " |
165 | "%04x:%02x:%02x PCI INT %c connected to %s; " | 175 | "%04x:%02x:%02x PCI INT %c connected to %s; " |
166 | "changing to %s\n", | 176 | "changing to %s\n", |
167 | entry->id.segment, entry->id.bus, | 177 | entry->id.segment, entry->id.bus, |
168 | entry->id.device, 'A' + entry->pin, | 178 | entry->id.device, pin_name(entry->pin), |
169 | prt->source, quirk->actual_source); | 179 | prt->source, quirk->actual_source); |
170 | strcpy(prt->source, quirk->actual_source); | 180 | strcpy(prt->source, quirk->actual_source); |
171 | } | 181 | } |
172 | } | 182 | } |
173 | } | 183 | } |
174 | 184 | ||
175 | static int | 185 | static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus, |
176 | acpi_pci_irq_add_entry(acpi_handle handle, | 186 | struct acpi_pci_routing_table *prt) |
177 | int segment, int bus, struct acpi_pci_routing_table *prt) | ||
178 | { | 187 | { |
179 | struct acpi_prt_entry *entry = NULL; | 188 | struct acpi_prt_entry *entry; |
180 | |||
181 | |||
182 | if (!prt) | ||
183 | return -EINVAL; | ||
184 | 189 | ||
185 | entry = kzalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL); | 190 | entry = kzalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL); |
186 | if (!entry) | 191 | if (!entry) |
187 | return -ENOMEM; | 192 | return -ENOMEM; |
188 | 193 | ||
194 | /* | ||
195 | * Note that the _PRT uses 0=INTA, 1=INTB, etc, while PCI uses | ||
196 | * 1=INTA, 2=INTB. We use the PCI encoding throughout, so convert | ||
197 | * it here. | ||
198 | */ | ||
189 | entry->id.segment = segment; | 199 | entry->id.segment = segment; |
190 | entry->id.bus = bus; | 200 | entry->id.bus = bus; |
191 | entry->id.device = (prt->address >> 16) & 0xFFFF; | 201 | entry->id.device = (prt->address >> 16) & 0xFFFF; |
192 | entry->id.function = prt->address & 0xFFFF; | 202 | entry->pin = prt->pin + 1; |
193 | entry->pin = prt->pin; | ||
194 | 203 | ||
195 | do_prt_fixups(entry, prt); | 204 | do_prt_fixups(entry, prt); |
196 | 205 | ||
206 | entry->index = prt->source_index; | ||
207 | |||
197 | /* | 208 | /* |
198 | * Type 1: Dynamic | 209 | * Type 1: Dynamic |
199 | * --------------- | 210 | * --------------- |
@@ -207,10 +218,9 @@ acpi_pci_irq_add_entry(acpi_handle handle, | |||
207 | * (e.g. exists somewhere 'below' this _PRT entry in the ACPI | 218 | * (e.g. exists somewhere 'below' this _PRT entry in the ACPI |
208 | * namespace). | 219 | * namespace). |
209 | */ | 220 | */ |
210 | if (prt->source[0]) { | 221 | if (prt->source[0]) |
211 | acpi_get_handle(handle, prt->source, &entry->link.handle); | 222 | acpi_get_handle(handle, prt->source, &entry->link); |
212 | entry->link.index = prt->source_index; | 223 | |
213 | } | ||
214 | /* | 224 | /* |
215 | * Type 2: Static | 225 | * Type 2: Static |
216 | * -------------- | 226 | * -------------- |
@@ -218,84 +228,38 @@ acpi_pci_irq_add_entry(acpi_handle handle, | |||
218 | * the IRQ value, which is hardwired to specific interrupt inputs on | 228 | * the IRQ value, which is hardwired to specific interrupt inputs on |
219 | * the interrupt controller. | 229 | * the interrupt controller. |
220 | */ | 230 | */ |
221 | else | ||
222 | entry->link.index = prt->source_index; | ||
223 | 231 | ||
224 | ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO, | 232 | ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO, |
225 | " %02X:%02X:%02X[%c] -> %s[%d]\n", | 233 | " %04x:%02x:%02x[%c] -> %s[%d]\n", |
226 | entry->id.segment, entry->id.bus, | 234 | entry->id.segment, entry->id.bus, |
227 | entry->id.device, ('A' + entry->pin), prt->source, | 235 | entry->id.device, pin_name(entry->pin), |
228 | entry->link.index)); | 236 | prt->source, entry->index)); |
229 | 237 | ||
230 | spin_lock(&acpi_prt_lock); | 238 | spin_lock(&acpi_prt_lock); |
231 | list_add_tail(&entry->node, &acpi_prt.entries); | 239 | list_add_tail(&entry->list, &acpi_prt_list); |
232 | acpi_prt.count++; | ||
233 | spin_unlock(&acpi_prt_lock); | 240 | spin_unlock(&acpi_prt_lock); |
234 | 241 | ||
235 | return 0; | 242 | return 0; |
236 | } | 243 | } |
237 | 244 | ||
238 | static void | ||
239 | acpi_pci_irq_del_entry(int segment, int bus, struct acpi_prt_entry *entry) | ||
240 | { | ||
241 | if (segment == entry->id.segment && bus == entry->id.bus) { | ||
242 | acpi_prt.count--; | ||
243 | list_del(&entry->node); | ||
244 | kfree(entry); | ||
245 | } | ||
246 | } | ||
247 | |||
248 | int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus) | 245 | int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus) |
249 | { | 246 | { |
250 | acpi_status status = AE_OK; | 247 | acpi_status status; |
251 | char *pathname = NULL; | 248 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
252 | struct acpi_buffer buffer = { 0, NULL }; | 249 | struct acpi_pci_routing_table *entry; |
253 | struct acpi_pci_routing_table *prt = NULL; | ||
254 | struct acpi_pci_routing_table *entry = NULL; | ||
255 | static int first_time = 1; | ||
256 | |||
257 | |||
258 | pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL); | ||
259 | if (!pathname) | ||
260 | return -ENOMEM; | ||
261 | |||
262 | if (first_time) { | ||
263 | acpi_prt.count = 0; | ||
264 | INIT_LIST_HEAD(&acpi_prt.entries); | ||
265 | first_time = 0; | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * NOTE: We're given a 'handle' to the _PRT object's parent device | ||
270 | * (either a PCI root bridge or PCI-PCI bridge). | ||
271 | */ | ||
272 | 250 | ||
273 | buffer.length = ACPI_PATHNAME_MAX; | 251 | /* 'handle' is the _PRT's parent (root bridge or PCI-PCI bridge) */ |
274 | buffer.pointer = pathname; | 252 | status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); |
275 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); | 253 | if (ACPI_FAILURE(status)) |
254 | return -ENODEV; | ||
276 | 255 | ||
277 | printk(KERN_DEBUG "ACPI: PCI Interrupt Routing Table [%s._PRT]\n", | 256 | printk(KERN_DEBUG "ACPI: PCI Interrupt Routing Table [%s._PRT]\n", |
278 | pathname); | 257 | (char *) buffer.pointer); |
279 | 258 | ||
280 | /* | 259 | kfree(buffer.pointer); |
281 | * Evaluate this _PRT and add its entries to our global list (acpi_prt). | ||
282 | */ | ||
283 | 260 | ||
284 | buffer.length = 0; | 261 | buffer.length = ACPI_ALLOCATE_BUFFER; |
285 | buffer.pointer = NULL; | 262 | buffer.pointer = NULL; |
286 | kfree(pathname); | ||
287 | status = acpi_get_irq_routing_table(handle, &buffer); | ||
288 | if (status != AE_BUFFER_OVERFLOW) { | ||
289 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRT [%s]", | ||
290 | acpi_format_exception(status))); | ||
291 | return -ENODEV; | ||
292 | } | ||
293 | |||
294 | prt = kzalloc(buffer.length, GFP_KERNEL); | ||
295 | if (!prt) { | ||
296 | return -ENOMEM; | ||
297 | } | ||
298 | buffer.pointer = prt; | ||
299 | 263 | ||
300 | status = acpi_get_irq_routing_table(handle, &buffer); | 264 | status = acpi_get_irq_routing_table(handle, &buffer); |
301 | if (ACPI_FAILURE(status)) { | 265 | if (ACPI_FAILURE(status)) { |
@@ -305,36 +269,30 @@ int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus) | |||
305 | return -ENODEV; | 269 | return -ENODEV; |
306 | } | 270 | } |
307 | 271 | ||
308 | entry = prt; | 272 | entry = buffer.pointer; |
309 | |||
310 | while (entry && (entry->length > 0)) { | 273 | while (entry && (entry->length > 0)) { |
311 | acpi_pci_irq_add_entry(handle, segment, bus, entry); | 274 | acpi_pci_irq_add_entry(handle, segment, bus, entry); |
312 | entry = (struct acpi_pci_routing_table *) | 275 | entry = (struct acpi_pci_routing_table *) |
313 | ((unsigned long)entry + entry->length); | 276 | ((unsigned long)entry + entry->length); |
314 | } | 277 | } |
315 | 278 | ||
316 | kfree(prt); | 279 | kfree(buffer.pointer); |
317 | |||
318 | return 0; | 280 | return 0; |
319 | } | 281 | } |
320 | 282 | ||
321 | void acpi_pci_irq_del_prt(int segment, int bus) | 283 | void acpi_pci_irq_del_prt(int segment, int bus) |
322 | { | 284 | { |
323 | struct list_head *node = NULL, *n = NULL; | 285 | struct acpi_prt_entry *entry, *tmp; |
324 | struct acpi_prt_entry *entry = NULL; | ||
325 | |||
326 | if (!acpi_prt.count) { | ||
327 | return; | ||
328 | } | ||
329 | 286 | ||
330 | printk(KERN_DEBUG | 287 | printk(KERN_DEBUG |
331 | "ACPI: Delete PCI Interrupt Routing Table for %x:%x\n", segment, | 288 | "ACPI: Delete PCI Interrupt Routing Table for %04x:%02x\n", |
332 | bus); | 289 | segment, bus); |
333 | spin_lock(&acpi_prt_lock); | 290 | spin_lock(&acpi_prt_lock); |
334 | list_for_each_safe(node, n, &acpi_prt.entries) { | 291 | list_for_each_entry_safe(entry, tmp, &acpi_prt_list, list) { |
335 | entry = list_entry(node, struct acpi_prt_entry, node); | 292 | if (segment == entry->id.segment && bus == entry->id.bus) { |
336 | 293 | list_del(&entry->list); | |
337 | acpi_pci_irq_del_entry(segment, bus, entry); | 294 | kfree(entry); |
295 | } | ||
338 | } | 296 | } |
339 | spin_unlock(&acpi_prt_lock); | 297 | spin_unlock(&acpi_prt_lock); |
340 | } | 298 | } |
@@ -342,162 +300,26 @@ void acpi_pci_irq_del_prt(int segment, int bus) | |||
342 | /* -------------------------------------------------------------------------- | 300 | /* -------------------------------------------------------------------------- |
343 | PCI Interrupt Routing Support | 301 | PCI Interrupt Routing Support |
344 | -------------------------------------------------------------------------- */ | 302 | -------------------------------------------------------------------------- */ |
345 | typedef int (*irq_lookup_func) (struct acpi_prt_entry *, int *, int *, char **); | 303 | static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin) |
346 | |||
347 | static int | ||
348 | acpi_pci_allocate_irq(struct acpi_prt_entry *entry, | ||
349 | int *triggering, int *polarity, char **link) | ||
350 | { | ||
351 | int irq; | ||
352 | |||
353 | |||
354 | if (entry->link.handle) { | ||
355 | irq = acpi_pci_link_allocate_irq(entry->link.handle, | ||
356 | entry->link.index, triggering, | ||
357 | polarity, link); | ||
358 | if (irq < 0) { | ||
359 | printk(KERN_WARNING PREFIX | ||
360 | "Invalid IRQ link routing entry\n"); | ||
361 | return -1; | ||
362 | } | ||
363 | } else { | ||
364 | irq = entry->link.index; | ||
365 | *triggering = ACPI_LEVEL_SENSITIVE; | ||
366 | *polarity = ACPI_ACTIVE_LOW; | ||
367 | } | ||
368 | |||
369 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found IRQ %d\n", irq)); | ||
370 | return irq; | ||
371 | } | ||
372 | |||
373 | static int | ||
374 | acpi_pci_free_irq(struct acpi_prt_entry *entry, | ||
375 | int *triggering, int *polarity, char **link) | ||
376 | { | ||
377 | int irq; | ||
378 | |||
379 | if (entry->link.handle) { | ||
380 | irq = acpi_pci_link_free_irq(entry->link.handle); | ||
381 | } else { | ||
382 | irq = entry->link.index; | ||
383 | } | ||
384 | return irq; | ||
385 | } | ||
386 | |||
387 | #ifdef CONFIG_X86_IO_APIC | ||
388 | extern int noioapicquirk; | ||
389 | |||
390 | static int bridge_has_boot_interrupt_variant(struct pci_bus *bus) | ||
391 | { | 304 | { |
392 | struct pci_bus *bus_it; | 305 | struct acpi_prt_entry *entry; |
393 | 306 | struct pci_dev *bridge; | |
394 | for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) { | 307 | u8 bridge_pin, orig_pin = pin; |
395 | if (!bus_it->self) | 308 | |
396 | return 0; | 309 | entry = acpi_pci_irq_find_prt_entry(dev, pin); |
397 | 310 | if (entry) { | |
398 | printk(KERN_INFO "vendor=%04x device=%04x\n", bus_it->self->vendor, | 311 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n", |
399 | bus_it->self->device); | 312 | pci_name(dev), pin_name(pin))); |
400 | 313 | return entry; | |
401 | if (bus_it->self->irq_reroute_variant) | ||
402 | return bus_it->self->irq_reroute_variant; | ||
403 | } | ||
404 | return 0; | ||
405 | } | ||
406 | #endif /* CONFIG_X86_IO_APIC */ | ||
407 | |||
408 | /* | ||
409 | * acpi_pci_irq_lookup | ||
410 | * success: return IRQ >= 0 | ||
411 | * failure: return -1 | ||
412 | */ | ||
413 | static int | ||
414 | acpi_pci_irq_lookup(struct pci_bus *bus, | ||
415 | int device, | ||
416 | int pin, | ||
417 | int *triggering, | ||
418 | int *polarity, char **link, irq_lookup_func func) | ||
419 | { | ||
420 | struct acpi_prt_entry *entry = NULL; | ||
421 | int segment = pci_domain_nr(bus); | ||
422 | int bus_nr = bus->number; | ||
423 | int ret; | ||
424 | |||
425 | |||
426 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
427 | "Searching for PRT entry for %02x:%02x:%02x[%c]\n", | ||
428 | segment, bus_nr, device, ('A' + pin))); | ||
429 | |||
430 | entry = acpi_pci_irq_find_prt_entry(segment, bus_nr, device, pin); | ||
431 | if (!entry) { | ||
432 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PRT entry not found\n")); | ||
433 | return -1; | ||
434 | } | ||
435 | |||
436 | ret = func(entry, triggering, polarity, link); | ||
437 | |||
438 | #ifdef CONFIG_X86_IO_APIC | ||
439 | /* | ||
440 | * Some chipsets (e.g. intel 6700PXH) generate a legacy INTx when the | ||
441 | * IRQ entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel | ||
442 | * does during interrupt handling). When this INTx generation cannot be | ||
443 | * disabled, we reroute these interrupts to their legacy equivalent to | ||
444 | * get rid of spurious interrupts. | ||
445 | */ | ||
446 | if (!noioapicquirk) { | ||
447 | switch (bridge_has_boot_interrupt_variant(bus)) { | ||
448 | case 0: | ||
449 | /* no rerouting necessary */ | ||
450 | break; | ||
451 | |||
452 | case INTEL_IRQ_REROUTE_VARIANT: | ||
453 | /* | ||
454 | * Remap according to INTx routing table in 6700PXH | ||
455 | * specs, intel order number 302628-002, section | ||
456 | * 2.15.2. Other chipsets (80332, ...) have the same | ||
457 | * mapping and are handled here as well. | ||
458 | */ | ||
459 | printk(KERN_INFO "pci irq %d -> rerouted to legacy " | ||
460 | "irq %d\n", ret, (ret % 4) + 16); | ||
461 | ret = (ret % 4) + 16; | ||
462 | break; | ||
463 | |||
464 | default: | ||
465 | printk(KERN_INFO "not rerouting irq %d to legacy irq: " | ||
466 | "unknown mapping\n", ret); | ||
467 | break; | ||
468 | } | ||
469 | } | 314 | } |
470 | #endif /* CONFIG_X86_IO_APIC */ | ||
471 | |||
472 | return ret; | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * acpi_pci_irq_derive | ||
477 | * success: return IRQ >= 0 | ||
478 | * failure: return < 0 | ||
479 | */ | ||
480 | static int | ||
481 | acpi_pci_irq_derive(struct pci_dev *dev, | ||
482 | int pin, | ||
483 | int *triggering, | ||
484 | int *polarity, char **link, irq_lookup_func func) | ||
485 | { | ||
486 | struct pci_dev *bridge = dev; | ||
487 | int irq = -1; | ||
488 | u8 bridge_pin = 0, orig_pin = pin; | ||
489 | |||
490 | |||
491 | if (!dev) | ||
492 | return -EINVAL; | ||
493 | 315 | ||
494 | /* | 316 | /* |
495 | * Attempt to derive an IRQ for this device from a parent bridge's | 317 | * Attempt to derive an IRQ for this device from a parent bridge's |
496 | * PCI interrupt routing entry (eg. yenta bridge and add-in card bridge). | 318 | * PCI interrupt routing entry (eg. yenta bridge and add-in card bridge). |
497 | */ | 319 | */ |
498 | while (irq < 0 && bridge->bus->self) { | 320 | bridge = dev->bus->self; |
499 | pin = (pin + PCI_SLOT(bridge->devfn)) % 4; | 321 | while (bridge) { |
500 | bridge = bridge->bus->self; | 322 | pin = (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; |
501 | 323 | ||
502 | if ((bridge->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS) { | 324 | if ((bridge->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS) { |
503 | /* PC card has the same IRQ as its cardbridge */ | 325 | /* PC card has the same IRQ as its cardbridge */ |
@@ -506,50 +328,40 @@ acpi_pci_irq_derive(struct pci_dev *dev, | |||
506 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 328 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
507 | "No interrupt pin configured for device %s\n", | 329 | "No interrupt pin configured for device %s\n", |
508 | pci_name(bridge))); | 330 | pci_name(bridge))); |
509 | return -1; | 331 | return NULL; |
510 | } | 332 | } |
511 | /* Pin is from 0 to 3 */ | ||
512 | bridge_pin--; | ||
513 | pin = bridge_pin; | 333 | pin = bridge_pin; |
514 | } | 334 | } |
515 | 335 | ||
516 | irq = acpi_pci_irq_lookup(bridge->bus, PCI_SLOT(bridge->devfn), | 336 | entry = acpi_pci_irq_find_prt_entry(bridge, pin); |
517 | pin, triggering, polarity, | 337 | if (entry) { |
518 | link, func); | 338 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
519 | } | 339 | "Derived GSI for %s INT %c from %s\n", |
340 | pci_name(dev), pin_name(orig_pin), | ||
341 | pci_name(bridge))); | ||
342 | return entry; | ||
343 | } | ||
520 | 344 | ||
521 | if (irq < 0) { | 345 | dev = bridge; |
522 | dev_warn(&dev->dev, "can't derive routing for PCI INT %c\n", | 346 | bridge = dev->bus->self; |
523 | 'A' + orig_pin); | ||
524 | return -1; | ||
525 | } | 347 | } |
526 | 348 | ||
527 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Derive IRQ %d for device %s from %s\n", | 349 | dev_warn(&dev->dev, "can't derive routing for PCI INT %c\n", |
528 | irq, pci_name(dev), pci_name(bridge))); | 350 | pin_name(orig_pin)); |
529 | 351 | return NULL; | |
530 | return irq; | ||
531 | } | 352 | } |
532 | 353 | ||
533 | /* | ||
534 | * acpi_pci_irq_enable | ||
535 | * success: return 0 | ||
536 | * failure: return < 0 | ||
537 | */ | ||
538 | |||
539 | int acpi_pci_irq_enable(struct pci_dev *dev) | 354 | int acpi_pci_irq_enable(struct pci_dev *dev) |
540 | { | 355 | { |
541 | int irq = 0; | 356 | struct acpi_prt_entry *entry; |
542 | u8 pin = 0; | 357 | int gsi; |
358 | u8 pin; | ||
543 | int triggering = ACPI_LEVEL_SENSITIVE; | 359 | int triggering = ACPI_LEVEL_SENSITIVE; |
544 | int polarity = ACPI_ACTIVE_LOW; | 360 | int polarity = ACPI_ACTIVE_LOW; |
545 | char *link = NULL; | 361 | char *link = NULL; |
546 | char link_desc[16]; | 362 | char link_desc[16]; |
547 | int rc; | 363 | int rc; |
548 | 364 | ||
549 | |||
550 | if (!dev) | ||
551 | return -EINVAL; | ||
552 | |||
553 | pin = dev->pin; | 365 | pin = dev->pin; |
554 | if (!pin) { | 366 | if (!pin) { |
555 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 367 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
@@ -557,31 +369,9 @@ int acpi_pci_irq_enable(struct pci_dev *dev) | |||
557 | pci_name(dev))); | 369 | pci_name(dev))); |
558 | return 0; | 370 | return 0; |
559 | } | 371 | } |
560 | pin--; | ||
561 | |||
562 | if (!dev->bus) { | ||
563 | dev_err(&dev->dev, "invalid (NULL) 'bus' field\n"); | ||
564 | return -ENODEV; | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * First we check the PCI IRQ routing table (PRT) for an IRQ. PRT | ||
569 | * values override any BIOS-assigned IRQs set during boot. | ||
570 | */ | ||
571 | irq = acpi_pci_irq_lookup(dev->bus, PCI_SLOT(dev->devfn), pin, | ||
572 | &triggering, &polarity, &link, | ||
573 | acpi_pci_allocate_irq); | ||
574 | |||
575 | /* | ||
576 | * If no PRT entry was found, we'll try to derive an IRQ from the | ||
577 | * device's parent bridge. | ||
578 | */ | ||
579 | if (irq < 0) | ||
580 | irq = acpi_pci_irq_derive(dev, pin, &triggering, | ||
581 | &polarity, &link, | ||
582 | acpi_pci_allocate_irq); | ||
583 | 372 | ||
584 | if (irq < 0) { | 373 | entry = acpi_pci_irq_lookup(dev, pin); |
374 | if (!entry) { | ||
585 | /* | 375 | /* |
586 | * IDE legacy mode controller IRQs are magic. Why do compat | 376 | * IDE legacy mode controller IRQs are magic. Why do compat |
587 | * extensions always make such a nasty mess. | 377 | * extensions always make such a nasty mess. |
@@ -590,12 +380,24 @@ int acpi_pci_irq_enable(struct pci_dev *dev) | |||
590 | (dev->class & 0x05) == 0) | 380 | (dev->class & 0x05) == 0) |
591 | return 0; | 381 | return 0; |
592 | } | 382 | } |
383 | |||
384 | if (entry) { | ||
385 | if (entry->link) | ||
386 | gsi = acpi_pci_link_allocate_irq(entry->link, | ||
387 | entry->index, | ||
388 | &triggering, &polarity, | ||
389 | &link); | ||
390 | else | ||
391 | gsi = entry->index; | ||
392 | } else | ||
393 | gsi = -1; | ||
394 | |||
593 | /* | 395 | /* |
594 | * No IRQ known to the ACPI subsystem - maybe the BIOS / | 396 | * No IRQ known to the ACPI subsystem - maybe the BIOS / |
595 | * driver reported one, then use it. Exit in any case. | 397 | * driver reported one, then use it. Exit in any case. |
596 | */ | 398 | */ |
597 | if (irq < 0) { | 399 | if (gsi < 0) { |
598 | dev_warn(&dev->dev, "PCI INT %c: no GSI", 'A' + pin); | 400 | dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin)); |
599 | /* Interrupt Line values above 0xF are forbidden */ | 401 | /* Interrupt Line values above 0xF are forbidden */ |
600 | if (dev->irq > 0 && (dev->irq <= 0xF)) { | 402 | if (dev->irq > 0 && (dev->irq <= 0xF)) { |
601 | printk(" - using IRQ %d\n", dev->irq); | 403 | printk(" - using IRQ %d\n", dev->irq); |
@@ -608,10 +410,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev) | |||
608 | } | 410 | } |
609 | } | 411 | } |
610 | 412 | ||
611 | rc = acpi_register_gsi(irq, triggering, polarity); | 413 | rc = acpi_register_gsi(gsi, triggering, polarity); |
612 | if (rc < 0) { | 414 | if (rc < 0) { |
613 | dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n", | 415 | dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n", |
614 | 'A' + pin); | 416 | pin_name(pin)); |
615 | return rc; | 417 | return rc; |
616 | } | 418 | } |
617 | dev->irq = rc; | 419 | dev->irq = rc; |
@@ -622,7 +424,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) | |||
622 | link_desc[0] = '\0'; | 424 | link_desc[0] = '\0'; |
623 | 425 | ||
624 | dev_info(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n", | 426 | dev_info(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n", |
625 | 'A' + pin, link_desc, irq, | 427 | pin_name(pin), link_desc, gsi, |
626 | (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge", | 428 | (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge", |
627 | (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq); | 429 | (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq); |
628 | 430 | ||
@@ -636,42 +438,28 @@ void __attribute__ ((weak)) acpi_unregister_gsi(u32 i) | |||
636 | 438 | ||
637 | void acpi_pci_irq_disable(struct pci_dev *dev) | 439 | void acpi_pci_irq_disable(struct pci_dev *dev) |
638 | { | 440 | { |
639 | int gsi = 0; | 441 | struct acpi_prt_entry *entry; |
640 | u8 pin = 0; | 442 | int gsi; |
641 | int triggering = ACPI_LEVEL_SENSITIVE; | 443 | u8 pin; |
642 | int polarity = ACPI_ACTIVE_LOW; | ||
643 | |||
644 | |||
645 | if (!dev || !dev->bus) | ||
646 | return; | ||
647 | 444 | ||
648 | pin = dev->pin; | 445 | pin = dev->pin; |
649 | if (!pin) | 446 | if (!pin) |
650 | return; | 447 | return; |
651 | pin--; | ||
652 | 448 | ||
653 | /* | 449 | entry = acpi_pci_irq_lookup(dev, pin); |
654 | * First we check the PCI IRQ routing table (PRT) for an IRQ. | 450 | if (!entry) |
655 | */ | ||
656 | gsi = acpi_pci_irq_lookup(dev->bus, PCI_SLOT(dev->devfn), pin, | ||
657 | &triggering, &polarity, NULL, | ||
658 | acpi_pci_free_irq); | ||
659 | /* | ||
660 | * If no PRT entry was found, we'll try to derive an IRQ from the | ||
661 | * device's parent bridge. | ||
662 | */ | ||
663 | if (gsi < 0) | ||
664 | gsi = acpi_pci_irq_derive(dev, pin, | ||
665 | &triggering, &polarity, NULL, | ||
666 | acpi_pci_free_irq); | ||
667 | if (gsi < 0) | ||
668 | return; | 451 | return; |
669 | 452 | ||
453 | if (entry->link) | ||
454 | gsi = acpi_pci_link_free_irq(entry->link); | ||
455 | else | ||
456 | gsi = entry->index; | ||
457 | |||
670 | /* | 458 | /* |
671 | * TBD: It might be worth clearing dev->irq by magic constant | 459 | * TBD: It might be worth clearing dev->irq by magic constant |
672 | * (e.g. PCI_UNDEFINED_IRQ). | 460 | * (e.g. PCI_UNDEFINED_IRQ). |
673 | */ | 461 | */ |
674 | 462 | ||
675 | dev_info(&dev->dev, "PCI INT %c disabled\n", 'A' + pin); | 463 | dev_info(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); |
676 | acpi_unregister_gsi(gsi); | 464 | acpi_unregister_gsi(gsi); |
677 | } | 465 | } |
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index e52ad91ce2dc..1c6e73c7865e 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c | |||
@@ -796,10 +796,6 @@ static int irqrouter_resume(struct sys_device *dev) | |||
796 | struct list_head *node = NULL; | 796 | struct list_head *node = NULL; |
797 | struct acpi_pci_link *link = NULL; | 797 | struct acpi_pci_link *link = NULL; |
798 | 798 | ||
799 | |||
800 | /* Make sure SCI is enabled again (Apple firmware bug?) */ | ||
801 | acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1); | ||
802 | |||
803 | list_for_each(node, &acpi_link.entries) { | 799 | list_for_each(node, &acpi_link.entries) { |
804 | link = list_entry(node, struct acpi_pci_link, node); | 800 | link = list_entry(node, struct acpi_pci_link, node); |
805 | if (!link) { | 801 | if (!link) { |
@@ -912,7 +908,7 @@ static int __init acpi_irq_nobalance_set(char *str) | |||
912 | 908 | ||
913 | __setup("acpi_irq_nobalance", acpi_irq_nobalance_set); | 909 | __setup("acpi_irq_nobalance", acpi_irq_nobalance_set); |
914 | 910 | ||
915 | int __init acpi_irq_balance_set(char *str) | 911 | static int __init acpi_irq_balance_set(char *str) |
916 | { | 912 | { |
917 | acpi_irq_balance = 1; | 913 | acpi_irq_balance = 1; |
918 | return 1; | 914 | return 1; |
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index bb7d50dd2818..c926e7d4a0d6 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c | |||
@@ -139,6 +139,8 @@ static int acpi_power_get_state(acpi_handle handle, int *state) | |||
139 | { | 139 | { |
140 | acpi_status status = AE_OK; | 140 | acpi_status status = AE_OK; |
141 | unsigned long long sta = 0; | 141 | unsigned long long sta = 0; |
142 | char node_name[5]; | ||
143 | struct acpi_buffer buffer = { sizeof(node_name), node_name }; | ||
142 | 144 | ||
143 | 145 | ||
144 | if (!handle || !state) | 146 | if (!handle || !state) |
@@ -151,8 +153,10 @@ static int acpi_power_get_state(acpi_handle handle, int *state) | |||
151 | *state = (sta & 0x01)?ACPI_POWER_RESOURCE_STATE_ON: | 153 | *state = (sta & 0x01)?ACPI_POWER_RESOURCE_STATE_ON: |
152 | ACPI_POWER_RESOURCE_STATE_OFF; | 154 | ACPI_POWER_RESOURCE_STATE_OFF; |
153 | 155 | ||
156 | acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer); | ||
157 | |||
154 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] is %s\n", | 158 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] is %s\n", |
155 | acpi_ut_get_node_name(handle), | 159 | node_name, |
156 | *state ? "on" : "off")); | 160 | *state ? "on" : "off")); |
157 | 161 | ||
158 | return 0; | 162 | return 0; |
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/proc.c index 4dbc2271acf5..428c911dba08 100644 --- a/drivers/acpi/sleep/proc.c +++ b/drivers/acpi/proc.c | |||
@@ -28,8 +28,6 @@ static int acpi_system_sleep_seq_show(struct seq_file *seq, void *offset) | |||
28 | { | 28 | { |
29 | int i; | 29 | int i; |
30 | 30 | ||
31 | ACPI_FUNCTION_TRACE("acpi_system_sleep_seq_show"); | ||
32 | |||
33 | for (i = 0; i <= ACPI_STATE_S5; i++) { | 31 | for (i = 0; i <= ACPI_STATE_S5; i++) { |
34 | if (sleep_states[i]) { | 32 | if (sleep_states[i]) { |
35 | seq_printf(seq, "S%d ", i); | 33 | seq_printf(seq, "S%d ", i); |
@@ -86,49 +84,44 @@ acpi_system_write_sleep(struct file *file, | |||
86 | 84 | ||
87 | #ifdef HAVE_ACPI_LEGACY_ALARM | 85 | #ifdef HAVE_ACPI_LEGACY_ALARM |
88 | 86 | ||
87 | static u32 cmos_bcd_read(int offset, int rtc_control); | ||
88 | |||
89 | static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset) | 89 | static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset) |
90 | { | 90 | { |
91 | u32 sec, min, hr; | 91 | u32 sec, min, hr; |
92 | u32 day, mo, yr, cent = 0; | 92 | u32 day, mo, yr, cent = 0; |
93 | u32 today = 0; | ||
93 | unsigned char rtc_control = 0; | 94 | unsigned char rtc_control = 0; |
94 | unsigned long flags; | 95 | unsigned long flags; |
95 | 96 | ||
96 | ACPI_FUNCTION_TRACE("acpi_system_alarm_seq_show"); | ||
97 | |||
98 | spin_lock_irqsave(&rtc_lock, flags); | 97 | spin_lock_irqsave(&rtc_lock, flags); |
99 | 98 | ||
100 | sec = CMOS_READ(RTC_SECONDS_ALARM); | ||
101 | min = CMOS_READ(RTC_MINUTES_ALARM); | ||
102 | hr = CMOS_READ(RTC_HOURS_ALARM); | ||
103 | rtc_control = CMOS_READ(RTC_CONTROL); | 99 | rtc_control = CMOS_READ(RTC_CONTROL); |
100 | sec = cmos_bcd_read(RTC_SECONDS_ALARM, rtc_control); | ||
101 | min = cmos_bcd_read(RTC_MINUTES_ALARM, rtc_control); | ||
102 | hr = cmos_bcd_read(RTC_HOURS_ALARM, rtc_control); | ||
104 | 103 | ||
105 | /* If we ever get an FACP with proper values... */ | 104 | /* If we ever get an FACP with proper values... */ |
106 | if (acpi_gbl_FADT.day_alarm) | 105 | if (acpi_gbl_FADT.day_alarm) { |
107 | /* ACPI spec: only low 6 its should be cared */ | 106 | /* ACPI spec: only low 6 its should be cared */ |
108 | day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F; | 107 | day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F; |
109 | else | 108 | if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) |
110 | day = CMOS_READ(RTC_DAY_OF_MONTH); | 109 | day = bcd2bin(day); |
110 | } else | ||
111 | day = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control); | ||
111 | if (acpi_gbl_FADT.month_alarm) | 112 | if (acpi_gbl_FADT.month_alarm) |
112 | mo = CMOS_READ(acpi_gbl_FADT.month_alarm); | 113 | mo = cmos_bcd_read(acpi_gbl_FADT.month_alarm, rtc_control); |
113 | else | 114 | else { |
114 | mo = CMOS_READ(RTC_MONTH); | 115 | mo = cmos_bcd_read(RTC_MONTH, rtc_control); |
116 | today = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control); | ||
117 | } | ||
115 | if (acpi_gbl_FADT.century) | 118 | if (acpi_gbl_FADT.century) |
116 | cent = CMOS_READ(acpi_gbl_FADT.century); | 119 | cent = cmos_bcd_read(acpi_gbl_FADT.century, rtc_control); |
117 | 120 | ||
118 | yr = CMOS_READ(RTC_YEAR); | 121 | yr = cmos_bcd_read(RTC_YEAR, rtc_control); |
119 | 122 | ||
120 | spin_unlock_irqrestore(&rtc_lock, flags); | 123 | spin_unlock_irqrestore(&rtc_lock, flags); |
121 | 124 | ||
122 | if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { | ||
123 | sec = bcd2bin(sec); | ||
124 | min = bcd2bin(min); | ||
125 | hr = bcd2bin(hr); | ||
126 | day = bcd2bin(day); | ||
127 | mo = bcd2bin(mo); | ||
128 | yr = bcd2bin(yr); | ||
129 | cent = bcd2bin(cent); | ||
130 | } | ||
131 | |||
132 | /* we're trusting the FADT (see above) */ | 125 | /* we're trusting the FADT (see above) */ |
133 | if (!acpi_gbl_FADT.century) | 126 | if (!acpi_gbl_FADT.century) |
134 | /* If we're not trusting the FADT, we should at least make it | 127 | /* If we're not trusting the FADT, we should at least make it |
@@ -153,6 +146,20 @@ static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset) | |||
153 | else | 146 | else |
154 | yr += cent * 100; | 147 | yr += cent * 100; |
155 | 148 | ||
149 | /* | ||
150 | * Show correct dates for alarms up to a month into the future. | ||
151 | * This solves issues for nearly all situations with the common | ||
152 | * 30-day alarm clocks in PC hardware. | ||
153 | */ | ||
154 | if (day < today) { | ||
155 | if (mo < 12) { | ||
156 | mo += 1; | ||
157 | } else { | ||
158 | mo = 1; | ||
159 | yr += 1; | ||
160 | } | ||
161 | } | ||
162 | |||
156 | seq_printf(seq, "%4.4u-", yr); | 163 | seq_printf(seq, "%4.4u-", yr); |
157 | (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo); | 164 | (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo); |
158 | (day > 31) ? seq_puts(seq, "** ") : seq_printf(seq, "%2.2u ", day); | 165 | (day > 31) ? seq_puts(seq, "** ") : seq_printf(seq, "%2.2u ", day); |
@@ -227,13 +234,11 @@ acpi_system_write_alarm(struct file *file, | |||
227 | int adjust = 0; | 234 | int adjust = 0; |
228 | unsigned char rtc_control = 0; | 235 | unsigned char rtc_control = 0; |
229 | 236 | ||
230 | ACPI_FUNCTION_TRACE("acpi_system_write_alarm"); | ||
231 | |||
232 | if (count > sizeof(alarm_string) - 1) | 237 | if (count > sizeof(alarm_string) - 1) |
233 | return_VALUE(-EINVAL); | 238 | return -EINVAL; |
234 | 239 | ||
235 | if (copy_from_user(alarm_string, buffer, count)) | 240 | if (copy_from_user(alarm_string, buffer, count)) |
236 | return_VALUE(-EFAULT); | 241 | return -EFAULT; |
237 | 242 | ||
238 | alarm_string[count] = '\0'; | 243 | alarm_string[count] = '\0'; |
239 | 244 | ||
@@ -334,7 +339,7 @@ acpi_system_write_alarm(struct file *file, | |||
334 | 339 | ||
335 | result = 0; | 340 | result = 0; |
336 | end: | 341 | end: |
337 | return_VALUE(result ? result : count); | 342 | return result ? result : count; |
338 | } | 343 | } |
339 | #endif /* HAVE_ACPI_LEGACY_ALARM */ | 344 | #endif /* HAVE_ACPI_LEGACY_ALARM */ |
340 | 345 | ||
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c index a6b662c00b67..93f91142d7ad 100644 --- a/drivers/acpi/reboot.c +++ b/drivers/acpi/reboot.c | |||
@@ -42,7 +42,7 @@ void acpi_reboot(void) | |||
42 | case ACPI_ADR_SPACE_SYSTEM_MEMORY: | 42 | case ACPI_ADR_SPACE_SYSTEM_MEMORY: |
43 | case ACPI_ADR_SPACE_SYSTEM_IO: | 43 | case ACPI_ADR_SPACE_SYSTEM_IO: |
44 | printk(KERN_DEBUG "ACPI MEMORY or I/O RESET_REG.\n"); | 44 | printk(KERN_DEBUG "ACPI MEMORY or I/O RESET_REG.\n"); |
45 | acpi_hw_low_level_write(8, reset_value, rr); | 45 | acpi_reset(); |
46 | break; | 46 | break; |
47 | } | 47 | } |
48 | /* Wait ten seconds */ | 48 | /* Wait ten seconds */ |
diff --git a/drivers/acpi/resources/Makefile b/drivers/acpi/resources/Makefile deleted file mode 100644 index 8de4f69dfa09..000000000000 --- a/drivers/acpi/resources/Makefile +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for all Linux ACPI interpreter subdirectories | ||
3 | # | ||
4 | |||
5 | obj-y := rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \ | ||
6 | rscalc.o rsirq.o rsmemory.o rsutils.o | ||
7 | |||
8 | obj-$(ACPI_FUTURE_USAGE) += rsdump.o | ||
9 | |||
10 | EXTRA_CFLAGS += $(ACPI_CFLAGS) | ||
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index e53e590252c0..0619734895b2 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c | |||
@@ -10,7 +10,6 @@ | |||
10 | 10 | ||
11 | #include <acpi/acpi_bus.h> | 11 | #include <acpi/acpi_bus.h> |
12 | #include <acpi/acpi_drivers.h> | 12 | #include <acpi/acpi_drivers.h> |
13 | #include <acpi/actypes.h> | ||
14 | #include <linux/wait.h> | 13 | #include <linux/wait.h> |
15 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
16 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 39b7233c3485..c54d7b6c4066 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/kthread.h> | 10 | #include <linux/kthread.h> |
11 | 11 | ||
12 | #include <acpi/acpi_drivers.h> | 12 | #include <acpi/acpi_drivers.h> |
13 | #include <acpi/acinterp.h> /* for acpi_ex_eisa_id_to_string() */ | ||
14 | 13 | ||
15 | #define _COMPONENT ACPI_BUS_COMPONENT | 14 | #define _COMPONENT ACPI_BUS_COMPONENT |
16 | ACPI_MODULE_NAME("scan"); | 15 | ACPI_MODULE_NAME("scan"); |
diff --git a/drivers/acpi/sleep/sleep.h b/drivers/acpi/sleep.h index cfaf8f5b0a14..cfaf8f5b0a14 100644 --- a/drivers/acpi/sleep/sleep.h +++ b/drivers/acpi/sleep.h | |||
diff --git a/drivers/acpi/sleep/Makefile b/drivers/acpi/sleep/Makefile deleted file mode 100644 index f1fb888c2d29..000000000000 --- a/drivers/acpi/sleep/Makefile +++ /dev/null | |||
@@ -1,5 +0,0 @@ | |||
1 | obj-y := wakeup.o | ||
2 | obj-y += main.o | ||
3 | obj-$(CONFIG_ACPI_SLEEP) += proc.o | ||
4 | |||
5 | EXTRA_CFLAGS += $(ACPI_CFLAGS) | ||
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c index 6e4107f82403..391d0358a592 100644 --- a/drivers/acpi/system.c +++ b/drivers/acpi/system.c | |||
@@ -192,65 +192,6 @@ static struct attribute_group interrupt_stats_attr_group = { | |||
192 | }; | 192 | }; |
193 | static struct kobj_attribute *counter_attrs; | 193 | static struct kobj_attribute *counter_attrs; |
194 | 194 | ||
195 | static int count_num_gpes(void) | ||
196 | { | ||
197 | int count = 0; | ||
198 | struct acpi_gpe_xrupt_info *gpe_xrupt_info; | ||
199 | struct acpi_gpe_block_info *gpe_block; | ||
200 | acpi_cpu_flags flags; | ||
201 | |||
202 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
203 | |||
204 | gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; | ||
205 | while (gpe_xrupt_info) { | ||
206 | gpe_block = gpe_xrupt_info->gpe_block_list_head; | ||
207 | while (gpe_block) { | ||
208 | count += gpe_block->register_count * | ||
209 | ACPI_GPE_REGISTER_WIDTH; | ||
210 | gpe_block = gpe_block->next; | ||
211 | } | ||
212 | gpe_xrupt_info = gpe_xrupt_info->next; | ||
213 | } | ||
214 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
215 | |||
216 | return count; | ||
217 | } | ||
218 | |||
219 | static int get_gpe_device(int index, acpi_handle *handle) | ||
220 | { | ||
221 | struct acpi_gpe_xrupt_info *gpe_xrupt_info; | ||
222 | struct acpi_gpe_block_info *gpe_block; | ||
223 | acpi_cpu_flags flags; | ||
224 | struct acpi_namespace_node *node; | ||
225 | |||
226 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
227 | |||
228 | gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; | ||
229 | while (gpe_xrupt_info) { | ||
230 | gpe_block = gpe_xrupt_info->gpe_block_list_head; | ||
231 | node = gpe_block->node; | ||
232 | while (gpe_block) { | ||
233 | index -= gpe_block->register_count * | ||
234 | ACPI_GPE_REGISTER_WIDTH; | ||
235 | if (index < 0) { | ||
236 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
237 | /* return NULL if it's FADT GPE */ | ||
238 | if (node->type != ACPI_TYPE_DEVICE) | ||
239 | *handle = NULL; | ||
240 | else | ||
241 | *handle = node; | ||
242 | return 0; | ||
243 | } | ||
244 | node = gpe_block->node; | ||
245 | gpe_block = gpe_block->next; | ||
246 | } | ||
247 | gpe_xrupt_info = gpe_xrupt_info->next; | ||
248 | } | ||
249 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
250 | |||
251 | return -ENODEV; | ||
252 | } | ||
253 | |||
254 | static void delete_gpe_attr_array(void) | 195 | static void delete_gpe_attr_array(void) |
255 | { | 196 | { |
256 | struct event_counter *tmp = all_counters; | 197 | struct event_counter *tmp = all_counters; |
@@ -309,7 +250,7 @@ static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle) | |||
309 | goto end; | 250 | goto end; |
310 | 251 | ||
311 | if (index < num_gpes) { | 252 | if (index < num_gpes) { |
312 | result = get_gpe_device(index, handle); | 253 | result = acpi_get_gpe_device(index, handle); |
313 | if (result) { | 254 | if (result) { |
314 | ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND, | 255 | ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND, |
315 | "Invalid GPE 0x%x\n", index)); | 256 | "Invalid GPE 0x%x\n", index)); |
@@ -436,7 +377,7 @@ void acpi_irq_stats_init(void) | |||
436 | if (all_counters) | 377 | if (all_counters) |
437 | return; | 378 | return; |
438 | 379 | ||
439 | num_gpes = count_num_gpes(); | 380 | num_gpes = acpi_current_gpe_count; |
440 | num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA; | 381 | num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA; |
441 | 382 | ||
442 | all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1), | 383 | all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1), |
diff --git a/drivers/acpi/tables/Makefile b/drivers/acpi/tables/Makefile deleted file mode 100644 index 7385efa61622..000000000000 --- a/drivers/acpi/tables/Makefile +++ /dev/null | |||
@@ -1,7 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for all Linux ACPI interpreter subdirectories | ||
3 | # | ||
4 | |||
5 | obj-y := tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o | ||
6 | |||
7 | EXTRA_CFLAGS += $(ACPI_CFLAGS) | ||
diff --git a/drivers/acpi/utilities/Makefile b/drivers/acpi/utilities/Makefile deleted file mode 100644 index 88eff14c4894..000000000000 --- a/drivers/acpi/utilities/Makefile +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for all Linux ACPI interpreter subdirectories | ||
3 | # | ||
4 | |||
5 | obj-y := utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \ | ||
6 | utcopy.o utdelete.o utglobal.o utmath.o utobject.o \ | ||
7 | utstate.o utmutex.o utobject.o utcache.o utresrc.o | ||
8 | |||
9 | EXTRA_CFLAGS += $(ACPI_CFLAGS) | ||
diff --git a/drivers/acpi/utilities/utcache.c b/drivers/acpi/utilities/utcache.c deleted file mode 100644 index 245fa80cf600..000000000000 --- a/drivers/acpi/utilities/utcache.c +++ /dev/null | |||
@@ -1,314 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: utcache - local cache allocation routines | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | |||
46 | #define _COMPONENT ACPI_UTILITIES | ||
47 | ACPI_MODULE_NAME("utcache") | ||
48 | #ifdef ACPI_USE_LOCAL_CACHE | ||
49 | /******************************************************************************* | ||
50 | * | ||
51 | * FUNCTION: acpi_os_create_cache | ||
52 | * | ||
53 | * PARAMETERS: cache_name - Ascii name for the cache | ||
54 | * object_size - Size of each cached object | ||
55 | * max_depth - Maximum depth of the cache (in objects) | ||
56 | * return_cache - Where the new cache object is returned | ||
57 | * | ||
58 | * RETURN: Status | ||
59 | * | ||
60 | * DESCRIPTION: Create a cache object | ||
61 | * | ||
62 | ******************************************************************************/ | ||
63 | acpi_status | ||
64 | acpi_os_create_cache(char *cache_name, | ||
65 | u16 object_size, | ||
66 | u16 max_depth, struct acpi_memory_list ** return_cache) | ||
67 | { | ||
68 | struct acpi_memory_list *cache; | ||
69 | |||
70 | ACPI_FUNCTION_ENTRY(); | ||
71 | |||
72 | if (!cache_name || !return_cache || (object_size < 16)) { | ||
73 | return (AE_BAD_PARAMETER); | ||
74 | } | ||
75 | |||
76 | /* Create the cache object */ | ||
77 | |||
78 | cache = acpi_os_allocate(sizeof(struct acpi_memory_list)); | ||
79 | if (!cache) { | ||
80 | return (AE_NO_MEMORY); | ||
81 | } | ||
82 | |||
83 | /* Populate the cache object and return it */ | ||
84 | |||
85 | ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list)); | ||
86 | cache->link_offset = 8; | ||
87 | cache->list_name = cache_name; | ||
88 | cache->object_size = object_size; | ||
89 | cache->max_depth = max_depth; | ||
90 | |||
91 | *return_cache = cache; | ||
92 | return (AE_OK); | ||
93 | } | ||
94 | |||
95 | /******************************************************************************* | ||
96 | * | ||
97 | * FUNCTION: acpi_os_purge_cache | ||
98 | * | ||
99 | * PARAMETERS: Cache - Handle to cache object | ||
100 | * | ||
101 | * RETURN: Status | ||
102 | * | ||
103 | * DESCRIPTION: Free all objects within the requested cache. | ||
104 | * | ||
105 | ******************************************************************************/ | ||
106 | |||
107 | acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache) | ||
108 | { | ||
109 | char *next; | ||
110 | |||
111 | ACPI_FUNCTION_ENTRY(); | ||
112 | |||
113 | if (!cache) { | ||
114 | return (AE_BAD_PARAMETER); | ||
115 | } | ||
116 | |||
117 | /* Walk the list of objects in this cache */ | ||
118 | |||
119 | while (cache->list_head) { | ||
120 | |||
121 | /* Delete and unlink one cached state object */ | ||
122 | |||
123 | next = *(ACPI_CAST_INDIRECT_PTR(char, | ||
124 | &(((char *)cache-> | ||
125 | list_head)[cache-> | ||
126 | link_offset]))); | ||
127 | ACPI_FREE(cache->list_head); | ||
128 | |||
129 | cache->list_head = next; | ||
130 | cache->current_depth--; | ||
131 | } | ||
132 | |||
133 | return (AE_OK); | ||
134 | } | ||
135 | |||
136 | /******************************************************************************* | ||
137 | * | ||
138 | * FUNCTION: acpi_os_delete_cache | ||
139 | * | ||
140 | * PARAMETERS: Cache - Handle to cache object | ||
141 | * | ||
142 | * RETURN: Status | ||
143 | * | ||
144 | * DESCRIPTION: Free all objects within the requested cache and delete the | ||
145 | * cache object. | ||
146 | * | ||
147 | ******************************************************************************/ | ||
148 | |||
149 | acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache) | ||
150 | { | ||
151 | acpi_status status; | ||
152 | |||
153 | ACPI_FUNCTION_ENTRY(); | ||
154 | |||
155 | /* Purge all objects in the cache */ | ||
156 | |||
157 | status = acpi_os_purge_cache(cache); | ||
158 | if (ACPI_FAILURE(status)) { | ||
159 | return (status); | ||
160 | } | ||
161 | |||
162 | /* Now we can delete the cache object */ | ||
163 | |||
164 | ACPI_FREE(cache); | ||
165 | return (AE_OK); | ||
166 | } | ||
167 | |||
168 | /******************************************************************************* | ||
169 | * | ||
170 | * FUNCTION: acpi_os_release_object | ||
171 | * | ||
172 | * PARAMETERS: Cache - Handle to cache object | ||
173 | * Object - The object to be released | ||
174 | * | ||
175 | * RETURN: None | ||
176 | * | ||
177 | * DESCRIPTION: Release an object to the specified cache. If cache is full, | ||
178 | * the object is deleted. | ||
179 | * | ||
180 | ******************************************************************************/ | ||
181 | |||
182 | acpi_status | ||
183 | acpi_os_release_object(struct acpi_memory_list * cache, void *object) | ||
184 | { | ||
185 | acpi_status status; | ||
186 | |||
187 | ACPI_FUNCTION_ENTRY(); | ||
188 | |||
189 | if (!cache || !object) { | ||
190 | return (AE_BAD_PARAMETER); | ||
191 | } | ||
192 | |||
193 | /* If cache is full, just free this object */ | ||
194 | |||
195 | if (cache->current_depth >= cache->max_depth) { | ||
196 | ACPI_FREE(object); | ||
197 | ACPI_MEM_TRACKING(cache->total_freed++); | ||
198 | } | ||
199 | |||
200 | /* Otherwise put this object back into the cache */ | ||
201 | |||
202 | else { | ||
203 | status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); | ||
204 | if (ACPI_FAILURE(status)) { | ||
205 | return (status); | ||
206 | } | ||
207 | |||
208 | /* Mark the object as cached */ | ||
209 | |||
210 | ACPI_MEMSET(object, 0xCA, cache->object_size); | ||
211 | ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_CACHED); | ||
212 | |||
213 | /* Put the object at the head of the cache list */ | ||
214 | |||
215 | *(ACPI_CAST_INDIRECT_PTR(char, | ||
216 | &(((char *)object)[cache-> | ||
217 | link_offset]))) = | ||
218 | cache->list_head; | ||
219 | cache->list_head = object; | ||
220 | cache->current_depth++; | ||
221 | |||
222 | (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); | ||
223 | } | ||
224 | |||
225 | return (AE_OK); | ||
226 | } | ||
227 | |||
228 | /******************************************************************************* | ||
229 | * | ||
230 | * FUNCTION: acpi_os_acquire_object | ||
231 | * | ||
232 | * PARAMETERS: Cache - Handle to cache object | ||
233 | * | ||
234 | * RETURN: the acquired object. NULL on error | ||
235 | * | ||
236 | * DESCRIPTION: Get an object from the specified cache. If cache is empty, | ||
237 | * the object is allocated. | ||
238 | * | ||
239 | ******************************************************************************/ | ||
240 | |||
241 | void *acpi_os_acquire_object(struct acpi_memory_list *cache) | ||
242 | { | ||
243 | acpi_status status; | ||
244 | void *object; | ||
245 | |||
246 | ACPI_FUNCTION_NAME(os_acquire_object); | ||
247 | |||
248 | if (!cache) { | ||
249 | return (NULL); | ||
250 | } | ||
251 | |||
252 | status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); | ||
253 | if (ACPI_FAILURE(status)) { | ||
254 | return (NULL); | ||
255 | } | ||
256 | |||
257 | ACPI_MEM_TRACKING(cache->requests++); | ||
258 | |||
259 | /* Check the cache first */ | ||
260 | |||
261 | if (cache->list_head) { | ||
262 | |||
263 | /* There is an object available, use it */ | ||
264 | |||
265 | object = cache->list_head; | ||
266 | cache->list_head = *(ACPI_CAST_INDIRECT_PTR(char, | ||
267 | &(((char *) | ||
268 | object)[cache-> | ||
269 | link_offset]))); | ||
270 | |||
271 | cache->current_depth--; | ||
272 | |||
273 | ACPI_MEM_TRACKING(cache->hits++); | ||
274 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | ||
275 | "Object %p from %s cache\n", object, | ||
276 | cache->list_name)); | ||
277 | |||
278 | status = acpi_ut_release_mutex(ACPI_MTX_CACHES); | ||
279 | if (ACPI_FAILURE(status)) { | ||
280 | return (NULL); | ||
281 | } | ||
282 | |||
283 | /* Clear (zero) the previously used Object */ | ||
284 | |||
285 | ACPI_MEMSET(object, 0, cache->object_size); | ||
286 | } else { | ||
287 | /* The cache is empty, create a new object */ | ||
288 | |||
289 | ACPI_MEM_TRACKING(cache->total_allocated++); | ||
290 | |||
291 | #ifdef ACPI_DBG_TRACK_ALLOCATIONS | ||
292 | if ((cache->total_allocated - cache->total_freed) > | ||
293 | cache->max_occupied) { | ||
294 | cache->max_occupied = | ||
295 | cache->total_allocated - cache->total_freed; | ||
296 | } | ||
297 | #endif | ||
298 | |||
299 | /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */ | ||
300 | |||
301 | status = acpi_ut_release_mutex(ACPI_MTX_CACHES); | ||
302 | if (ACPI_FAILURE(status)) { | ||
303 | return (NULL); | ||
304 | } | ||
305 | |||
306 | object = ACPI_ALLOCATE_ZEROED(cache->object_size); | ||
307 | if (!object) { | ||
308 | return (NULL); | ||
309 | } | ||
310 | } | ||
311 | |||
312 | return (object); | ||
313 | } | ||
314 | #endif /* ACPI_USE_LOCAL_CACHE */ | ||
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index baa441929720..f261737636da 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/backlight.h> | 36 | #include <linux/backlight.h> |
37 | #include <linux/thermal.h> | 37 | #include <linux/thermal.h> |
38 | #include <linux/video_output.h> | 38 | #include <linux/video_output.h> |
39 | #include <linux/sort.h> | ||
39 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
40 | 41 | ||
41 | #include <acpi/acpi_bus.h> | 42 | #include <acpi/acpi_bus.h> |
@@ -481,6 +482,7 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level) | |||
481 | int status = AE_OK; | 482 | int status = AE_OK; |
482 | union acpi_object arg0 = { ACPI_TYPE_INTEGER }; | 483 | union acpi_object arg0 = { ACPI_TYPE_INTEGER }; |
483 | struct acpi_object_list args = { 1, &arg0 }; | 484 | struct acpi_object_list args = { 1, &arg0 }; |
485 | int state; | ||
484 | 486 | ||
485 | 487 | ||
486 | arg0.integer.value = level; | 488 | arg0.integer.value = level; |
@@ -489,6 +491,10 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level) | |||
489 | status = acpi_evaluate_object(device->dev->handle, "_BCM", | 491 | status = acpi_evaluate_object(device->dev->handle, "_BCM", |
490 | &args, NULL); | 492 | &args, NULL); |
491 | device->brightness->curr = level; | 493 | device->brightness->curr = level; |
494 | for (state = 2; state < device->brightness->count; state++) | ||
495 | if (level == device->brightness->levels[state]) | ||
496 | device->backlight->props.brightness = state - 2; | ||
497 | |||
492 | return status; | 498 | return status; |
493 | } | 499 | } |
494 | 500 | ||
@@ -626,6 +632,16 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag) | |||
626 | } | 632 | } |
627 | 633 | ||
628 | /* | 634 | /* |
635 | * Simple comparison function used to sort backlight levels. | ||
636 | */ | ||
637 | |||
638 | static int | ||
639 | acpi_video_cmp_level(const void *a, const void *b) | ||
640 | { | ||
641 | return *(int *)a - *(int *)b; | ||
642 | } | ||
643 | |||
644 | /* | ||
629 | * Arg: | 645 | * Arg: |
630 | * device : video output device (LCD, CRT, ..) | 646 | * device : video output device (LCD, CRT, ..) |
631 | * | 647 | * |
@@ -676,6 +692,10 @@ acpi_video_init_brightness(struct acpi_video_device *device) | |||
676 | count++; | 692 | count++; |
677 | } | 693 | } |
678 | 694 | ||
695 | /* don't sort the first two brightness levels */ | ||
696 | sort(&br->levels[2], count - 2, sizeof(br->levels[2]), | ||
697 | acpi_video_cmp_level, NULL); | ||
698 | |||
679 | if (count < 2) | 699 | if (count < 2) |
680 | goto out_free_levels; | 700 | goto out_free_levels; |
681 | 701 | ||
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index f022eb6f5637..50e3d2dbf3af 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
@@ -234,7 +234,7 @@ EXPORT_SYMBOL(acpi_video_display_switch_support); | |||
234 | * To force that backlight or display output switching is processed by vendor | 234 | * To force that backlight or display output switching is processed by vendor |
235 | * specific acpi drivers or video.ko driver. | 235 | * specific acpi drivers or video.ko driver. |
236 | */ | 236 | */ |
237 | int __init acpi_backlight(char *str) | 237 | static int __init acpi_backlight(char *str) |
238 | { | 238 | { |
239 | if (str == NULL || *str == '\0') | 239 | if (str == NULL || *str == '\0') |
240 | return 1; | 240 | return 1; |
@@ -250,7 +250,7 @@ int __init acpi_backlight(char *str) | |||
250 | } | 250 | } |
251 | __setup("acpi_backlight=", acpi_backlight); | 251 | __setup("acpi_backlight=", acpi_backlight); |
252 | 252 | ||
253 | int __init acpi_display_output(char *str) | 253 | static int __init acpi_display_output(char *str) |
254 | { | 254 | { |
255 | if (str == NULL || *str == '\0') | 255 | if (str == NULL || *str == '\0') |
256 | return 1; | 256 | return 1; |
diff --git a/drivers/acpi/sleep/wakeup.c b/drivers/acpi/wakeup.c index dea4c23df764..2d34806d45dd 100644 --- a/drivers/acpi/sleep/wakeup.c +++ b/drivers/acpi/wakeup.c | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <acpi/acpi_drivers.h> | 8 | #include <acpi/acpi_drivers.h> |
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <acpi/acevents.h> | ||
12 | #include "sleep.h" | 11 | #include "sleep.h" |
13 | 12 | ||
14 | #define _COMPONENT ACPI_SYSTEM_COMPONENT | 13 | #define _COMPONENT ACPI_SYSTEM_COMPONENT |
@@ -28,8 +27,6 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state) | |||
28 | { | 27 | { |
29 | struct list_head *node, *next; | 28 | struct list_head *node, *next; |
30 | 29 | ||
31 | ACPI_FUNCTION_TRACE("acpi_enable_wakeup_device_prep"); | ||
32 | |||
33 | spin_lock(&acpi_device_lock); | 30 | spin_lock(&acpi_device_lock); |
34 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { | 31 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { |
35 | struct acpi_device *dev = container_of(node, | 32 | struct acpi_device *dev = container_of(node, |
@@ -61,7 +58,6 @@ void acpi_enable_wakeup_device(u8 sleep_state) | |||
61 | * Caution: this routine must be invoked when interrupt is disabled | 58 | * Caution: this routine must be invoked when interrupt is disabled |
62 | * Refer ACPI2.0: P212 | 59 | * Refer ACPI2.0: P212 |
63 | */ | 60 | */ |
64 | ACPI_FUNCTION_TRACE("acpi_enable_wakeup_device"); | ||
65 | spin_lock(&acpi_device_lock); | 61 | spin_lock(&acpi_device_lock); |
66 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { | 62 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { |
67 | struct acpi_device *dev = | 63 | struct acpi_device *dev = |
@@ -103,8 +99,6 @@ void acpi_disable_wakeup_device(u8 sleep_state) | |||
103 | { | 99 | { |
104 | struct list_head *node, *next; | 100 | struct list_head *node, *next; |
105 | 101 | ||
106 | ACPI_FUNCTION_TRACE("acpi_disable_wakeup_device"); | ||
107 | |||
108 | spin_lock(&acpi_device_lock); | 102 | spin_lock(&acpi_device_lock); |
109 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { | 103 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { |
110 | struct acpi_device *dev = | 104 | struct acpi_device *dev = |
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index ef02e488d468..6273d98d00eb 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c | |||
@@ -19,12 +19,6 @@ | |||
19 | #include "libata.h" | 19 | #include "libata.h" |
20 | 20 | ||
21 | #include <acpi/acpi_bus.h> | 21 | #include <acpi/acpi_bus.h> |
22 | #include <acpi/acnames.h> | ||
23 | #include <acpi/acnamesp.h> | ||
24 | #include <acpi/acparser.h> | ||
25 | #include <acpi/acexcep.h> | ||
26 | #include <acpi/acmacros.h> | ||
27 | #include <acpi/actypes.h> | ||
28 | 22 | ||
29 | enum { | 23 | enum { |
30 | ATA_ACPI_FILTER_SETXFER = 1 << 0, | 24 | ATA_ACPI_FILTER_SETXFER = 1 << 0, |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 175df54eb664..c507a9ac78f4 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4556,7 +4556,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc) | |||
4556 | struct scatterlist *sg = qc->sg; | 4556 | struct scatterlist *sg = qc->sg; |
4557 | int dir = qc->dma_dir; | 4557 | int dir = qc->dma_dir; |
4558 | 4558 | ||
4559 | WARN_ON(sg == NULL); | 4559 | WARN_ON_ONCE(sg == NULL); |
4560 | 4560 | ||
4561 | VPRINTK("unmapping %u sg elements\n", qc->n_elem); | 4561 | VPRINTK("unmapping %u sg elements\n", qc->n_elem); |
4562 | 4562 | ||
@@ -4776,7 +4776,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) | |||
4776 | struct ata_port *ap = qc->ap; | 4776 | struct ata_port *ap = qc->ap; |
4777 | unsigned int tag; | 4777 | unsigned int tag; |
4778 | 4778 | ||
4779 | WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ | 4779 | WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ |
4780 | 4780 | ||
4781 | qc->flags = 0; | 4781 | qc->flags = 0; |
4782 | tag = qc->tag; | 4782 | tag = qc->tag; |
@@ -4791,8 +4791,8 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) | |||
4791 | struct ata_port *ap = qc->ap; | 4791 | struct ata_port *ap = qc->ap; |
4792 | struct ata_link *link = qc->dev->link; | 4792 | struct ata_link *link = qc->dev->link; |
4793 | 4793 | ||
4794 | WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ | 4794 | WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ |
4795 | WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); | 4795 | WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); |
4796 | 4796 | ||
4797 | if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) | 4797 | if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) |
4798 | ata_sg_clean(qc); | 4798 | ata_sg_clean(qc); |
@@ -4878,7 +4878,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
4878 | struct ata_device *dev = qc->dev; | 4878 | struct ata_device *dev = qc->dev; |
4879 | struct ata_eh_info *ehi = &dev->link->eh_info; | 4879 | struct ata_eh_info *ehi = &dev->link->eh_info; |
4880 | 4880 | ||
4881 | WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); | 4881 | WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); |
4882 | 4882 | ||
4883 | if (unlikely(qc->err_mask)) | 4883 | if (unlikely(qc->err_mask)) |
4884 | qc->flags |= ATA_QCFLAG_FAILED; | 4884 | qc->flags |= ATA_QCFLAG_FAILED; |
@@ -5000,16 +5000,16 @@ void ata_qc_issue(struct ata_queued_cmd *qc) | |||
5000 | * check is skipped for old EH because it reuses active qc to | 5000 | * check is skipped for old EH because it reuses active qc to |
5001 | * request ATAPI sense. | 5001 | * request ATAPI sense. |
5002 | */ | 5002 | */ |
5003 | WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag)); | 5003 | WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); |
5004 | 5004 | ||
5005 | if (ata_is_ncq(prot)) { | 5005 | if (ata_is_ncq(prot)) { |
5006 | WARN_ON(link->sactive & (1 << qc->tag)); | 5006 | WARN_ON_ONCE(link->sactive & (1 << qc->tag)); |
5007 | 5007 | ||
5008 | if (!link->sactive) | 5008 | if (!link->sactive) |
5009 | ap->nr_active_links++; | 5009 | ap->nr_active_links++; |
5010 | link->sactive |= 1 << qc->tag; | 5010 | link->sactive |= 1 << qc->tag; |
5011 | } else { | 5011 | } else { |
5012 | WARN_ON(link->sactive); | 5012 | WARN_ON_ONCE(link->sactive); |
5013 | 5013 | ||
5014 | ap->nr_active_links++; | 5014 | ap->nr_active_links++; |
5015 | link->active_tag = qc->tag; | 5015 | link->active_tag = qc->tag; |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index c59ad76c84b1..0eae9b453556 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -578,7 +578,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | |||
578 | } | 578 | } |
579 | 579 | ||
580 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { | 580 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { |
581 | WARN_ON(!ioaddr->ctl_addr); | 581 | WARN_ON_ONCE(!ioaddr->ctl_addr); |
582 | iowrite8(tf->hob_feature, ioaddr->feature_addr); | 582 | iowrite8(tf->hob_feature, ioaddr->feature_addr); |
583 | iowrite8(tf->hob_nsect, ioaddr->nsect_addr); | 583 | iowrite8(tf->hob_nsect, ioaddr->nsect_addr); |
584 | iowrite8(tf->hob_lbal, ioaddr->lbal_addr); | 584 | iowrite8(tf->hob_lbal, ioaddr->lbal_addr); |
@@ -651,7 +651,7 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
651 | iowrite8(tf->ctl, ioaddr->ctl_addr); | 651 | iowrite8(tf->ctl, ioaddr->ctl_addr); |
652 | ap->last_ctl = tf->ctl; | 652 | ap->last_ctl = tf->ctl; |
653 | } else | 653 | } else |
654 | WARN_ON(1); | 654 | WARN_ON_ONCE(1); |
655 | } | 655 | } |
656 | } | 656 | } |
657 | EXPORT_SYMBOL_GPL(ata_sff_tf_read); | 657 | EXPORT_SYMBOL_GPL(ata_sff_tf_read); |
@@ -891,7 +891,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc) | |||
891 | /* READ/WRITE MULTIPLE */ | 891 | /* READ/WRITE MULTIPLE */ |
892 | unsigned int nsect; | 892 | unsigned int nsect; |
893 | 893 | ||
894 | WARN_ON(qc->dev->multi_count == 0); | 894 | WARN_ON_ONCE(qc->dev->multi_count == 0); |
895 | 895 | ||
896 | nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, | 896 | nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, |
897 | qc->dev->multi_count); | 897 | qc->dev->multi_count); |
@@ -918,7 +918,7 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
918 | { | 918 | { |
919 | /* send SCSI cdb */ | 919 | /* send SCSI cdb */ |
920 | DPRINTK("send cdb\n"); | 920 | DPRINTK("send cdb\n"); |
921 | WARN_ON(qc->dev->cdb_len < 12); | 921 | WARN_ON_ONCE(qc->dev->cdb_len < 12); |
922 | 922 | ||
923 | ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); | 923 | ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); |
924 | ata_sff_sync(ap); | 924 | ata_sff_sync(ap); |
@@ -1014,7 +1014,7 @@ next_sg: | |||
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | /* consumed can be larger than count only for the last transfer */ | 1016 | /* consumed can be larger than count only for the last transfer */ |
1017 | WARN_ON(qc->cursg && count != consumed); | 1017 | WARN_ON_ONCE(qc->cursg && count != consumed); |
1018 | 1018 | ||
1019 | if (bytes) | 1019 | if (bytes) |
1020 | goto next_sg; | 1020 | goto next_sg; |
@@ -1172,13 +1172,13 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | |||
1172 | unsigned long flags = 0; | 1172 | unsigned long flags = 0; |
1173 | int poll_next; | 1173 | int poll_next; |
1174 | 1174 | ||
1175 | WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); | 1175 | WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); |
1176 | 1176 | ||
1177 | /* Make sure ata_sff_qc_issue() does not throw things | 1177 | /* Make sure ata_sff_qc_issue() does not throw things |
1178 | * like DMA polling into the workqueue. Notice that | 1178 | * like DMA polling into the workqueue. Notice that |
1179 | * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). | 1179 | * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). |
1180 | */ | 1180 | */ |
1181 | WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); | 1181 | WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); |
1182 | 1182 | ||
1183 | fsm_start: | 1183 | fsm_start: |
1184 | DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", | 1184 | DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", |
@@ -1387,7 +1387,7 @@ fsm_start: | |||
1387 | DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", | 1387 | DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", |
1388 | ap->print_id, qc->dev->devno, status); | 1388 | ap->print_id, qc->dev->devno, status); |
1389 | 1389 | ||
1390 | WARN_ON(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); | 1390 | WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); |
1391 | 1391 | ||
1392 | ap->hsm_task_state = HSM_ST_IDLE; | 1392 | ap->hsm_task_state = HSM_ST_IDLE; |
1393 | 1393 | ||
@@ -1423,7 +1423,7 @@ void ata_pio_task(struct work_struct *work) | |||
1423 | int poll_next; | 1423 | int poll_next; |
1424 | 1424 | ||
1425 | fsm_start: | 1425 | fsm_start: |
1426 | WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); | 1426 | WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); |
1427 | 1427 | ||
1428 | /* | 1428 | /* |
1429 | * This is purely heuristic. This is a fast path. | 1429 | * This is purely heuristic. This is a fast path. |
@@ -1512,7 +1512,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
1512 | break; | 1512 | break; |
1513 | 1513 | ||
1514 | case ATA_PROT_DMA: | 1514 | case ATA_PROT_DMA: |
1515 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); | 1515 | WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); |
1516 | 1516 | ||
1517 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ | 1517 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ |
1518 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 1518 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
@@ -1564,7 +1564,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
1564 | break; | 1564 | break; |
1565 | 1565 | ||
1566 | case ATAPI_PROT_DMA: | 1566 | case ATAPI_PROT_DMA: |
1567 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); | 1567 | WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); |
1568 | 1568 | ||
1569 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ | 1569 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ |
1570 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 1570 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
@@ -1576,7 +1576,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
1576 | break; | 1576 | break; |
1577 | 1577 | ||
1578 | default: | 1578 | default: |
1579 | WARN_ON(1); | 1579 | WARN_ON_ONCE(1); |
1580 | return AC_ERR_SYSTEM; | 1580 | return AC_ERR_SYSTEM; |
1581 | } | 1581 | } |
1582 | 1582 | ||
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c index e2e332d8ff95..8b77a9802df1 100644 --- a/drivers/ata/pata_acpi.c +++ b/drivers/ata/pata_acpi.c | |||
@@ -13,12 +13,6 @@ | |||
13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
14 | #include <scsi/scsi_host.h> | 14 | #include <scsi/scsi_host.h> |
15 | #include <acpi/acpi_bus.h> | 15 | #include <acpi/acpi_bus.h> |
16 | #include <acpi/acnames.h> | ||
17 | #include <acpi/acnamesp.h> | ||
18 | #include <acpi/acparser.h> | ||
19 | #include <acpi/acexcep.h> | ||
20 | #include <acpi/acmacros.h> | ||
21 | #include <acpi/actypes.h> | ||
22 | 16 | ||
23 | #include <linux/libata.h> | 17 | #include <linux/libata.h> |
24 | #include <linux/ata.h> | 18 | #include <linux/ata.h> |
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c index 68f052b42ed7..ed306eb1057f 100644 --- a/drivers/char/tpm/tpm_bios.c +++ b/drivers/char/tpm/tpm_bios.c | |||
@@ -23,8 +23,6 @@ | |||
23 | #include <linux/security.h> | 23 | #include <linux/security.h> |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <acpi/acpi.h> | 25 | #include <acpi/acpi.h> |
26 | #include <acpi/actypes.h> | ||
27 | #include <acpi/actbl.h> | ||
28 | #include "tpm.h" | 26 | #include "tpm.h" |
29 | 27 | ||
30 | #define TCG_EVENT_NAME_LEN_MAX 255 | 28 | #define TCG_EVENT_NAME_LEN_MAX 255 |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 8d7cf3f31450..f1df59f59a37 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -15,12 +15,14 @@ | |||
15 | #include <linux/tick.h> | 15 | #include <linux/tick.h> |
16 | 16 | ||
17 | #define BREAK_FUZZ 4 /* 4 us */ | 17 | #define BREAK_FUZZ 4 /* 4 us */ |
18 | #define PRED_HISTORY_PCT 50 | ||
18 | 19 | ||
19 | struct menu_device { | 20 | struct menu_device { |
20 | int last_state_idx; | 21 | int last_state_idx; |
21 | 22 | ||
22 | unsigned int expected_us; | 23 | unsigned int expected_us; |
23 | unsigned int predicted_us; | 24 | unsigned int predicted_us; |
25 | unsigned int current_predicted_us; | ||
24 | unsigned int last_measured_us; | 26 | unsigned int last_measured_us; |
25 | unsigned int elapsed_us; | 27 | unsigned int elapsed_us; |
26 | }; | 28 | }; |
@@ -47,6 +49,12 @@ static int menu_select(struct cpuidle_device *dev) | |||
47 | data->expected_us = | 49 | data->expected_us = |
48 | (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; | 50 | (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; |
49 | 51 | ||
52 | /* Recalculate predicted_us based on prediction_history_pct */ | ||
53 | data->predicted_us *= PRED_HISTORY_PCT; | ||
54 | data->predicted_us += (100 - PRED_HISTORY_PCT) * | ||
55 | data->current_predicted_us; | ||
56 | data->predicted_us /= 100; | ||
57 | |||
50 | /* find the deepest idle state that satisfies our constraints */ | 58 | /* find the deepest idle state that satisfies our constraints */ |
51 | for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) { | 59 | for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) { |
52 | struct cpuidle_state *s = &dev->states[i]; | 60 | struct cpuidle_state *s = &dev->states[i]; |
@@ -97,7 +105,7 @@ static void menu_reflect(struct cpuidle_device *dev) | |||
97 | measured_us = -1; | 105 | measured_us = -1; |
98 | 106 | ||
99 | /* Predict time until next break event */ | 107 | /* Predict time until next break event */ |
100 | data->predicted_us = max(measured_us, data->last_measured_us); | 108 | data->current_predicted_us = max(measured_us, data->last_measured_us); |
101 | 109 | ||
102 | if (last_idle_us + BREAK_FUZZ < | 110 | if (last_idle_us + BREAK_FUZZ < |
103 | data->expected_us - target->exit_latency) { | 111 | data->expected_us - target->exit_latency) { |
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index d883e1b8bb8c..55433849bfa6 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c | |||
@@ -270,6 +270,6 @@ static void __exit dca_exit(void) | |||
270 | dca_sysfs_exit(); | 270 | dca_sysfs_exit(); |
271 | } | 271 | } |
272 | 272 | ||
273 | subsys_initcall(dca_init); | 273 | arch_initcall(dca_init); |
274 | module_exit(dca_exit); | 274 | module_exit(dca_exit); |
275 | 275 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 904e57558bb5..e34b06420816 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -33,7 +33,6 @@ config INTEL_IOATDMA | |||
33 | config INTEL_IOP_ADMA | 33 | config INTEL_IOP_ADMA |
34 | tristate "Intel IOP ADMA support" | 34 | tristate "Intel IOP ADMA support" |
35 | depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX | 35 | depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX |
36 | select ASYNC_CORE | ||
37 | select DMA_ENGINE | 36 | select DMA_ENGINE |
38 | help | 37 | help |
39 | Enable support for the Intel(R) IOP Series RAID engines. | 38 | Enable support for the Intel(R) IOP Series RAID engines. |
@@ -59,7 +58,6 @@ config FSL_DMA | |||
59 | config MV_XOR | 58 | config MV_XOR |
60 | bool "Marvell XOR engine support" | 59 | bool "Marvell XOR engine support" |
61 | depends on PLAT_ORION | 60 | depends on PLAT_ORION |
62 | select ASYNC_CORE | ||
63 | select DMA_ENGINE | 61 | select DMA_ENGINE |
64 | ---help--- | 62 | ---help--- |
65 | Enable support for the Marvell XOR engine. | 63 | Enable support for the Marvell XOR engine. |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 657996517374..403dbe781122 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -31,32 +31,18 @@ | |||
31 | * | 31 | * |
32 | * LOCKING: | 32 | * LOCKING: |
33 | * | 33 | * |
34 | * The subsystem keeps two global lists, dma_device_list and dma_client_list. | 34 | * The subsystem keeps a global list of dma_device structs it is protected by a |
35 | * Both of these are protected by a mutex, dma_list_mutex. | 35 | * mutex, dma_list_mutex. |
36 | * | ||
37 | * A subsystem can get access to a channel by calling dmaengine_get() followed | ||
38 | * by dma_find_channel(), or if it has need for an exclusive channel it can call | ||
39 | * dma_request_channel(). Once a channel is allocated a reference is taken | ||
40 | * against its corresponding driver to disable removal. | ||
36 | * | 41 | * |
37 | * Each device has a channels list, which runs unlocked but is never modified | 42 | * Each device has a channels list, which runs unlocked but is never modified |
38 | * once the device is registered, it's just setup by the driver. | 43 | * once the device is registered, it's just setup by the driver. |
39 | * | 44 | * |
40 | * Each client is responsible for keeping track of the channels it uses. See | 45 | * See Documentation/dmaengine.txt for more details |
41 | * the definition of dma_event_callback in dmaengine.h. | ||
42 | * | ||
43 | * Each device has a kref, which is initialized to 1 when the device is | ||
44 | * registered. A kref_get is done for each device registered. When the | ||
45 | * device is released, the corresponding kref_put is done in the release | ||
46 | * method. Every time one of the device's channels is allocated to a client, | ||
47 | * a kref_get occurs. When the channel is freed, the corresponding kref_put | ||
48 | * happens. The device's release function does a completion, so | ||
49 | * unregister_device does a remove event, device_unregister, a kref_put | ||
50 | * for the first reference, then waits on the completion for all other | ||
51 | * references to finish. | ||
52 | * | ||
53 | * Each channel has an open-coded implementation of Rusty Russell's "bigref," | ||
54 | * with a kref and a per_cpu local_t. A dma_chan_get is called when a client | ||
55 | * signals that it wants to use a channel, and dma_chan_put is called when | ||
56 | * a channel is removed or a client using it is unregistered. A client can | ||
57 | * take extra references per outstanding transaction, as is the case with | ||
58 | * the NET DMA client. The release function does a kref_put on the device. | ||
59 | * -ChrisL, DanW | ||
60 | */ | 46 | */ |
61 | 47 | ||
62 | #include <linux/init.h> | 48 | #include <linux/init.h> |
@@ -70,54 +56,85 @@ | |||
70 | #include <linux/rcupdate.h> | 56 | #include <linux/rcupdate.h> |
71 | #include <linux/mutex.h> | 57 | #include <linux/mutex.h> |
72 | #include <linux/jiffies.h> | 58 | #include <linux/jiffies.h> |
59 | #include <linux/rculist.h> | ||
60 | #include <linux/idr.h> | ||
73 | 61 | ||
74 | static DEFINE_MUTEX(dma_list_mutex); | 62 | static DEFINE_MUTEX(dma_list_mutex); |
75 | static LIST_HEAD(dma_device_list); | 63 | static LIST_HEAD(dma_device_list); |
76 | static LIST_HEAD(dma_client_list); | 64 | static long dmaengine_ref_count; |
65 | static struct idr dma_idr; | ||
77 | 66 | ||
78 | /* --- sysfs implementation --- */ | 67 | /* --- sysfs implementation --- */ |
79 | 68 | ||
69 | /** | ||
70 | * dev_to_dma_chan - convert a device pointer to the its sysfs container object | ||
71 | * @dev - device node | ||
72 | * | ||
73 | * Must be called under dma_list_mutex | ||
74 | */ | ||
75 | static struct dma_chan *dev_to_dma_chan(struct device *dev) | ||
76 | { | ||
77 | struct dma_chan_dev *chan_dev; | ||
78 | |||
79 | chan_dev = container_of(dev, typeof(*chan_dev), device); | ||
80 | return chan_dev->chan; | ||
81 | } | ||
82 | |||
80 | static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) | 83 | static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) |
81 | { | 84 | { |
82 | struct dma_chan *chan = to_dma_chan(dev); | 85 | struct dma_chan *chan; |
83 | unsigned long count = 0; | 86 | unsigned long count = 0; |
84 | int i; | 87 | int i; |
88 | int err; | ||
85 | 89 | ||
86 | for_each_possible_cpu(i) | 90 | mutex_lock(&dma_list_mutex); |
87 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | 91 | chan = dev_to_dma_chan(dev); |
92 | if (chan) { | ||
93 | for_each_possible_cpu(i) | ||
94 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | ||
95 | err = sprintf(buf, "%lu\n", count); | ||
96 | } else | ||
97 | err = -ENODEV; | ||
98 | mutex_unlock(&dma_list_mutex); | ||
88 | 99 | ||
89 | return sprintf(buf, "%lu\n", count); | 100 | return err; |
90 | } | 101 | } |
91 | 102 | ||
92 | static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, | 103 | static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, |
93 | char *buf) | 104 | char *buf) |
94 | { | 105 | { |
95 | struct dma_chan *chan = to_dma_chan(dev); | 106 | struct dma_chan *chan; |
96 | unsigned long count = 0; | 107 | unsigned long count = 0; |
97 | int i; | 108 | int i; |
109 | int err; | ||
98 | 110 | ||
99 | for_each_possible_cpu(i) | 111 | mutex_lock(&dma_list_mutex); |
100 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | 112 | chan = dev_to_dma_chan(dev); |
113 | if (chan) { | ||
114 | for_each_possible_cpu(i) | ||
115 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | ||
116 | err = sprintf(buf, "%lu\n", count); | ||
117 | } else | ||
118 | err = -ENODEV; | ||
119 | mutex_unlock(&dma_list_mutex); | ||
101 | 120 | ||
102 | return sprintf(buf, "%lu\n", count); | 121 | return err; |
103 | } | 122 | } |
104 | 123 | ||
105 | static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) | 124 | static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) |
106 | { | 125 | { |
107 | struct dma_chan *chan = to_dma_chan(dev); | 126 | struct dma_chan *chan; |
108 | int in_use = 0; | 127 | int err; |
109 | |||
110 | if (unlikely(chan->slow_ref) && | ||
111 | atomic_read(&chan->refcount.refcount) > 1) | ||
112 | in_use = 1; | ||
113 | else { | ||
114 | if (local_read(&(per_cpu_ptr(chan->local, | ||
115 | get_cpu())->refcount)) > 0) | ||
116 | in_use = 1; | ||
117 | put_cpu(); | ||
118 | } | ||
119 | 128 | ||
120 | return sprintf(buf, "%d\n", in_use); | 129 | mutex_lock(&dma_list_mutex); |
130 | chan = dev_to_dma_chan(dev); | ||
131 | if (chan) | ||
132 | err = sprintf(buf, "%d\n", chan->client_count); | ||
133 | else | ||
134 | err = -ENODEV; | ||
135 | mutex_unlock(&dma_list_mutex); | ||
136 | |||
137 | return err; | ||
121 | } | 138 | } |
122 | 139 | ||
123 | static struct device_attribute dma_attrs[] = { | 140 | static struct device_attribute dma_attrs[] = { |
@@ -127,76 +144,110 @@ static struct device_attribute dma_attrs[] = { | |||
127 | __ATTR_NULL | 144 | __ATTR_NULL |
128 | }; | 145 | }; |
129 | 146 | ||
130 | static void dma_async_device_cleanup(struct kref *kref); | 147 | static void chan_dev_release(struct device *dev) |
131 | |||
132 | static void dma_dev_release(struct device *dev) | ||
133 | { | 148 | { |
134 | struct dma_chan *chan = to_dma_chan(dev); | 149 | struct dma_chan_dev *chan_dev; |
135 | kref_put(&chan->device->refcount, dma_async_device_cleanup); | 150 | |
151 | chan_dev = container_of(dev, typeof(*chan_dev), device); | ||
152 | if (atomic_dec_and_test(chan_dev->idr_ref)) { | ||
153 | mutex_lock(&dma_list_mutex); | ||
154 | idr_remove(&dma_idr, chan_dev->dev_id); | ||
155 | mutex_unlock(&dma_list_mutex); | ||
156 | kfree(chan_dev->idr_ref); | ||
157 | } | ||
158 | kfree(chan_dev); | ||
136 | } | 159 | } |
137 | 160 | ||
138 | static struct class dma_devclass = { | 161 | static struct class dma_devclass = { |
139 | .name = "dma", | 162 | .name = "dma", |
140 | .dev_attrs = dma_attrs, | 163 | .dev_attrs = dma_attrs, |
141 | .dev_release = dma_dev_release, | 164 | .dev_release = chan_dev_release, |
142 | }; | 165 | }; |
143 | 166 | ||
144 | /* --- client and device registration --- */ | 167 | /* --- client and device registration --- */ |
145 | 168 | ||
146 | #define dma_chan_satisfies_mask(chan, mask) \ | 169 | #define dma_device_satisfies_mask(device, mask) \ |
147 | __dma_chan_satisfies_mask((chan), &(mask)) | 170 | __dma_device_satisfies_mask((device), &(mask)) |
148 | static int | 171 | static int |
149 | __dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want) | 172 | __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want) |
150 | { | 173 | { |
151 | dma_cap_mask_t has; | 174 | dma_cap_mask_t has; |
152 | 175 | ||
153 | bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits, | 176 | bitmap_and(has.bits, want->bits, device->cap_mask.bits, |
154 | DMA_TX_TYPE_END); | 177 | DMA_TX_TYPE_END); |
155 | return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); | 178 | return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); |
156 | } | 179 | } |
157 | 180 | ||
181 | static struct module *dma_chan_to_owner(struct dma_chan *chan) | ||
182 | { | ||
183 | return chan->device->dev->driver->owner; | ||
184 | } | ||
185 | |||
158 | /** | 186 | /** |
159 | * dma_client_chan_alloc - try to allocate channels to a client | 187 | * balance_ref_count - catch up the channel reference count |
160 | * @client: &dma_client | 188 | * @chan - channel to balance ->client_count versus dmaengine_ref_count |
161 | * | 189 | * |
162 | * Called with dma_list_mutex held. | 190 | * balance_ref_count must be called under dma_list_mutex |
163 | */ | 191 | */ |
164 | static void dma_client_chan_alloc(struct dma_client *client) | 192 | static void balance_ref_count(struct dma_chan *chan) |
165 | { | 193 | { |
166 | struct dma_device *device; | 194 | struct module *owner = dma_chan_to_owner(chan); |
167 | struct dma_chan *chan; | ||
168 | int desc; /* allocated descriptor count */ | ||
169 | enum dma_state_client ack; | ||
170 | 195 | ||
171 | /* Find a channel */ | 196 | while (chan->client_count < dmaengine_ref_count) { |
172 | list_for_each_entry(device, &dma_device_list, global_node) { | 197 | __module_get(owner); |
173 | /* Does the client require a specific DMA controller? */ | 198 | chan->client_count++; |
174 | if (client->slave && client->slave->dma_dev | 199 | } |
175 | && client->slave->dma_dev != device->dev) | 200 | } |
176 | continue; | ||
177 | 201 | ||
178 | list_for_each_entry(chan, &device->channels, device_node) { | 202 | /** |
179 | if (!dma_chan_satisfies_mask(chan, client->cap_mask)) | 203 | * dma_chan_get - try to grab a dma channel's parent driver module |
180 | continue; | 204 | * @chan - channel to grab |
205 | * | ||
206 | * Must be called under dma_list_mutex | ||
207 | */ | ||
208 | static int dma_chan_get(struct dma_chan *chan) | ||
209 | { | ||
210 | int err = -ENODEV; | ||
211 | struct module *owner = dma_chan_to_owner(chan); | ||
212 | |||
213 | if (chan->client_count) { | ||
214 | __module_get(owner); | ||
215 | err = 0; | ||
216 | } else if (try_module_get(owner)) | ||
217 | err = 0; | ||
218 | |||
219 | if (err == 0) | ||
220 | chan->client_count++; | ||
221 | |||
222 | /* allocate upon first client reference */ | ||
223 | if (chan->client_count == 1 && err == 0) { | ||
224 | int desc_cnt = chan->device->device_alloc_chan_resources(chan); | ||
225 | |||
226 | if (desc_cnt < 0) { | ||
227 | err = desc_cnt; | ||
228 | chan->client_count = 0; | ||
229 | module_put(owner); | ||
230 | } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) | ||
231 | balance_ref_count(chan); | ||
232 | } | ||
181 | 233 | ||
182 | desc = chan->device->device_alloc_chan_resources( | 234 | return err; |
183 | chan, client); | 235 | } |
184 | if (desc >= 0) { | ||
185 | ack = client->event_callback(client, | ||
186 | chan, | ||
187 | DMA_RESOURCE_AVAILABLE); | ||
188 | 236 | ||
189 | /* we are done once this client rejects | 237 | /** |
190 | * an available resource | 238 | * dma_chan_put - drop a reference to a dma channel's parent driver module |
191 | */ | 239 | * @chan - channel to release |
192 | if (ack == DMA_ACK) { | 240 | * |
193 | dma_chan_get(chan); | 241 | * Must be called under dma_list_mutex |
194 | chan->client_count++; | 242 | */ |
195 | } else if (ack == DMA_NAK) | 243 | static void dma_chan_put(struct dma_chan *chan) |
196 | return; | 244 | { |
197 | } | 245 | if (!chan->client_count) |
198 | } | 246 | return; /* this channel failed alloc_chan_resources */ |
199 | } | 247 | chan->client_count--; |
248 | module_put(dma_chan_to_owner(chan)); | ||
249 | if (chan->client_count == 0) | ||
250 | chan->device->device_free_chan_resources(chan); | ||
200 | } | 251 | } |
201 | 252 | ||
202 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | 253 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) |
@@ -218,138 +269,342 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | |||
218 | EXPORT_SYMBOL(dma_sync_wait); | 269 | EXPORT_SYMBOL(dma_sync_wait); |
219 | 270 | ||
220 | /** | 271 | /** |
221 | * dma_chan_cleanup - release a DMA channel's resources | 272 | * dma_cap_mask_all - enable iteration over all operation types |
222 | * @kref: kernel reference structure that contains the DMA channel device | 273 | */ |
274 | static dma_cap_mask_t dma_cap_mask_all; | ||
275 | |||
276 | /** | ||
277 | * dma_chan_tbl_ent - tracks channel allocations per core/operation | ||
278 | * @chan - associated channel for this entry | ||
279 | */ | ||
280 | struct dma_chan_tbl_ent { | ||
281 | struct dma_chan *chan; | ||
282 | }; | ||
283 | |||
284 | /** | ||
285 | * channel_table - percpu lookup table for memory-to-memory offload providers | ||
223 | */ | 286 | */ |
224 | void dma_chan_cleanup(struct kref *kref) | 287 | static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END]; |
288 | |||
289 | static int __init dma_channel_table_init(void) | ||
225 | { | 290 | { |
226 | struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); | 291 | enum dma_transaction_type cap; |
227 | chan->device->device_free_chan_resources(chan); | 292 | int err = 0; |
228 | kref_put(&chan->device->refcount, dma_async_device_cleanup); | 293 | |
294 | bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); | ||
295 | |||
296 | /* 'interrupt', 'private', and 'slave' are channel capabilities, | ||
297 | * but are not associated with an operation so they do not need | ||
298 | * an entry in the channel_table | ||
299 | */ | ||
300 | clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); | ||
301 | clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); | ||
302 | clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); | ||
303 | |||
304 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { | ||
305 | channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); | ||
306 | if (!channel_table[cap]) { | ||
307 | err = -ENOMEM; | ||
308 | break; | ||
309 | } | ||
310 | } | ||
311 | |||
312 | if (err) { | ||
313 | pr_err("dmaengine: initialization failure\n"); | ||
314 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
315 | if (channel_table[cap]) | ||
316 | free_percpu(channel_table[cap]); | ||
317 | } | ||
318 | |||
319 | return err; | ||
229 | } | 320 | } |
230 | EXPORT_SYMBOL(dma_chan_cleanup); | 321 | arch_initcall(dma_channel_table_init); |
231 | 322 | ||
232 | static void dma_chan_free_rcu(struct rcu_head *rcu) | 323 | /** |
324 | * dma_find_channel - find a channel to carry out the operation | ||
325 | * @tx_type: transaction type | ||
326 | */ | ||
327 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | ||
233 | { | 328 | { |
234 | struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu); | 329 | struct dma_chan *chan; |
235 | int bias = 0x7FFFFFFF; | 330 | int cpu; |
236 | int i; | 331 | |
237 | for_each_possible_cpu(i) | 332 | WARN_ONCE(dmaengine_ref_count == 0, |
238 | bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount); | 333 | "client called %s without a reference", __func__); |
239 | atomic_sub(bias, &chan->refcount.refcount); | 334 | |
240 | kref_put(&chan->refcount, dma_chan_cleanup); | 335 | cpu = get_cpu(); |
336 | chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan; | ||
337 | put_cpu(); | ||
338 | |||
339 | return chan; | ||
241 | } | 340 | } |
341 | EXPORT_SYMBOL(dma_find_channel); | ||
242 | 342 | ||
243 | static void dma_chan_release(struct dma_chan *chan) | 343 | /** |
344 | * dma_issue_pending_all - flush all pending operations across all channels | ||
345 | */ | ||
346 | void dma_issue_pending_all(void) | ||
244 | { | 347 | { |
245 | atomic_add(0x7FFFFFFF, &chan->refcount.refcount); | 348 | struct dma_device *device; |
246 | chan->slow_ref = 1; | 349 | struct dma_chan *chan; |
247 | call_rcu(&chan->rcu, dma_chan_free_rcu); | 350 | |
351 | WARN_ONCE(dmaengine_ref_count == 0, | ||
352 | "client called %s without a reference", __func__); | ||
353 | |||
354 | rcu_read_lock(); | ||
355 | list_for_each_entry_rcu(device, &dma_device_list, global_node) { | ||
356 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
357 | continue; | ||
358 | list_for_each_entry(chan, &device->channels, device_node) | ||
359 | if (chan->client_count) | ||
360 | device->device_issue_pending(chan); | ||
361 | } | ||
362 | rcu_read_unlock(); | ||
248 | } | 363 | } |
364 | EXPORT_SYMBOL(dma_issue_pending_all); | ||
249 | 365 | ||
250 | /** | 366 | /** |
251 | * dma_chans_notify_available - broadcast available channels to the clients | 367 | * nth_chan - returns the nth channel of the given capability |
368 | * @cap: capability to match | ||
369 | * @n: nth channel desired | ||
370 | * | ||
371 | * Defaults to returning the channel with the desired capability and the | ||
372 | * lowest reference count when 'n' cannot be satisfied. Must be called | ||
373 | * under dma_list_mutex. | ||
252 | */ | 374 | */ |
253 | static void dma_clients_notify_available(void) | 375 | static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) |
254 | { | 376 | { |
255 | struct dma_client *client; | 377 | struct dma_device *device; |
378 | struct dma_chan *chan; | ||
379 | struct dma_chan *ret = NULL; | ||
380 | struct dma_chan *min = NULL; | ||
256 | 381 | ||
257 | mutex_lock(&dma_list_mutex); | 382 | list_for_each_entry(device, &dma_device_list, global_node) { |
383 | if (!dma_has_cap(cap, device->cap_mask) || | ||
384 | dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
385 | continue; | ||
386 | list_for_each_entry(chan, &device->channels, device_node) { | ||
387 | if (!chan->client_count) | ||
388 | continue; | ||
389 | if (!min) | ||
390 | min = chan; | ||
391 | else if (chan->table_count < min->table_count) | ||
392 | min = chan; | ||
393 | |||
394 | if (n-- == 0) { | ||
395 | ret = chan; | ||
396 | break; /* done */ | ||
397 | } | ||
398 | } | ||
399 | if (ret) | ||
400 | break; /* done */ | ||
401 | } | ||
258 | 402 | ||
259 | list_for_each_entry(client, &dma_client_list, global_node) | 403 | if (!ret) |
260 | dma_client_chan_alloc(client); | 404 | ret = min; |
261 | 405 | ||
262 | mutex_unlock(&dma_list_mutex); | 406 | if (ret) |
407 | ret->table_count++; | ||
408 | |||
409 | return ret; | ||
263 | } | 410 | } |
264 | 411 | ||
265 | /** | 412 | /** |
266 | * dma_chans_notify_available - tell the clients that a channel is going away | 413 | * dma_channel_rebalance - redistribute the available channels |
267 | * @chan: channel on its way out | 414 | * |
415 | * Optimize for cpu isolation (each cpu gets a dedicated channel for an | ||
416 | * operation type) in the SMP case, and operation isolation (avoid | ||
417 | * multi-tasking channels) in the non-SMP case. Must be called under | ||
418 | * dma_list_mutex. | ||
268 | */ | 419 | */ |
269 | static void dma_clients_notify_removed(struct dma_chan *chan) | 420 | static void dma_channel_rebalance(void) |
270 | { | 421 | { |
271 | struct dma_client *client; | 422 | struct dma_chan *chan; |
272 | enum dma_state_client ack; | 423 | struct dma_device *device; |
424 | int cpu; | ||
425 | int cap; | ||
426 | int n; | ||
273 | 427 | ||
274 | mutex_lock(&dma_list_mutex); | 428 | /* undo the last distribution */ |
429 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
430 | for_each_possible_cpu(cpu) | ||
431 | per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; | ||
432 | |||
433 | list_for_each_entry(device, &dma_device_list, global_node) { | ||
434 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
435 | continue; | ||
436 | list_for_each_entry(chan, &device->channels, device_node) | ||
437 | chan->table_count = 0; | ||
438 | } | ||
275 | 439 | ||
276 | list_for_each_entry(client, &dma_client_list, global_node) { | 440 | /* don't populate the channel_table if no clients are available */ |
277 | ack = client->event_callback(client, chan, | 441 | if (!dmaengine_ref_count) |
278 | DMA_RESOURCE_REMOVED); | 442 | return; |
279 | 443 | ||
280 | /* client was holding resources for this channel so | 444 | /* redistribute available channels */ |
281 | * free it | 445 | n = 0; |
282 | */ | 446 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
283 | if (ack == DMA_ACK) { | 447 | for_each_online_cpu(cpu) { |
284 | dma_chan_put(chan); | 448 | if (num_possible_cpus() > 1) |
285 | chan->client_count--; | 449 | chan = nth_chan(cap, n++); |
450 | else | ||
451 | chan = nth_chan(cap, -1); | ||
452 | |||
453 | per_cpu_ptr(channel_table[cap], cpu)->chan = chan; | ||
454 | } | ||
455 | } | ||
456 | |||
457 | static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev, | ||
458 | dma_filter_fn fn, void *fn_param) | ||
459 | { | ||
460 | struct dma_chan *chan; | ||
461 | |||
462 | if (!__dma_device_satisfies_mask(dev, mask)) { | ||
463 | pr_debug("%s: wrong capabilities\n", __func__); | ||
464 | return NULL; | ||
465 | } | ||
466 | /* devices with multiple channels need special handling as we need to | ||
467 | * ensure that all channels are either private or public. | ||
468 | */ | ||
469 | if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) | ||
470 | list_for_each_entry(chan, &dev->channels, device_node) { | ||
471 | /* some channels are already publicly allocated */ | ||
472 | if (chan->client_count) | ||
473 | return NULL; | ||
286 | } | 474 | } |
475 | |||
476 | list_for_each_entry(chan, &dev->channels, device_node) { | ||
477 | if (chan->client_count) { | ||
478 | pr_debug("%s: %s busy\n", | ||
479 | __func__, dma_chan_name(chan)); | ||
480 | continue; | ||
481 | } | ||
482 | if (fn && !fn(chan, fn_param)) { | ||
483 | pr_debug("%s: %s filter said false\n", | ||
484 | __func__, dma_chan_name(chan)); | ||
485 | continue; | ||
486 | } | ||
487 | return chan; | ||
287 | } | 488 | } |
288 | 489 | ||
289 | mutex_unlock(&dma_list_mutex); | 490 | return NULL; |
290 | } | 491 | } |
291 | 492 | ||
292 | /** | 493 | /** |
293 | * dma_async_client_register - register a &dma_client | 494 | * dma_request_channel - try to allocate an exclusive channel |
294 | * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask' | 495 | * @mask: capabilities that the channel must satisfy |
496 | * @fn: optional callback to disposition available channels | ||
497 | * @fn_param: opaque parameter to pass to dma_filter_fn | ||
295 | */ | 498 | */ |
296 | void dma_async_client_register(struct dma_client *client) | 499 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param) |
297 | { | 500 | { |
298 | /* validate client data */ | 501 | struct dma_device *device, *_d; |
299 | BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) && | 502 | struct dma_chan *chan = NULL; |
300 | !client->slave); | 503 | int err; |
301 | 504 | ||
505 | /* Find a channel */ | ||
506 | mutex_lock(&dma_list_mutex); | ||
507 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { | ||
508 | chan = private_candidate(mask, device, fn, fn_param); | ||
509 | if (chan) { | ||
510 | /* Found a suitable channel, try to grab, prep, and | ||
511 | * return it. We first set DMA_PRIVATE to disable | ||
512 | * balance_ref_count as this channel will not be | ||
513 | * published in the general-purpose allocator | ||
514 | */ | ||
515 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | ||
516 | err = dma_chan_get(chan); | ||
517 | |||
518 | if (err == -ENODEV) { | ||
519 | pr_debug("%s: %s module removed\n", __func__, | ||
520 | dma_chan_name(chan)); | ||
521 | list_del_rcu(&device->global_node); | ||
522 | } else if (err) | ||
523 | pr_err("dmaengine: failed to get %s: (%d)\n", | ||
524 | dma_chan_name(chan), err); | ||
525 | else | ||
526 | break; | ||
527 | chan = NULL; | ||
528 | } | ||
529 | } | ||
530 | mutex_unlock(&dma_list_mutex); | ||
531 | |||
532 | pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", | ||
533 | chan ? dma_chan_name(chan) : NULL); | ||
534 | |||
535 | return chan; | ||
536 | } | ||
537 | EXPORT_SYMBOL_GPL(__dma_request_channel); | ||
538 | |||
539 | void dma_release_channel(struct dma_chan *chan) | ||
540 | { | ||
302 | mutex_lock(&dma_list_mutex); | 541 | mutex_lock(&dma_list_mutex); |
303 | list_add_tail(&client->global_node, &dma_client_list); | 542 | WARN_ONCE(chan->client_count != 1, |
543 | "chan reference count %d != 1\n", chan->client_count); | ||
544 | dma_chan_put(chan); | ||
304 | mutex_unlock(&dma_list_mutex); | 545 | mutex_unlock(&dma_list_mutex); |
305 | } | 546 | } |
306 | EXPORT_SYMBOL(dma_async_client_register); | 547 | EXPORT_SYMBOL_GPL(dma_release_channel); |
307 | 548 | ||
308 | /** | 549 | /** |
309 | * dma_async_client_unregister - unregister a client and free the &dma_client | 550 | * dmaengine_get - register interest in dma_channels |
310 | * @client: &dma_client to free | ||
311 | * | ||
312 | * Force frees any allocated DMA channels, frees the &dma_client memory | ||
313 | */ | 551 | */ |
314 | void dma_async_client_unregister(struct dma_client *client) | 552 | void dmaengine_get(void) |
315 | { | 553 | { |
316 | struct dma_device *device; | 554 | struct dma_device *device, *_d; |
317 | struct dma_chan *chan; | 555 | struct dma_chan *chan; |
318 | enum dma_state_client ack; | 556 | int err; |
319 | |||
320 | if (!client) | ||
321 | return; | ||
322 | 557 | ||
323 | mutex_lock(&dma_list_mutex); | 558 | mutex_lock(&dma_list_mutex); |
324 | /* free all channels the client is holding */ | 559 | dmaengine_ref_count++; |
325 | list_for_each_entry(device, &dma_device_list, global_node) | ||
326 | list_for_each_entry(chan, &device->channels, device_node) { | ||
327 | ack = client->event_callback(client, chan, | ||
328 | DMA_RESOURCE_REMOVED); | ||
329 | 560 | ||
330 | if (ack == DMA_ACK) { | 561 | /* try to grab channels */ |
331 | dma_chan_put(chan); | 562 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
332 | chan->client_count--; | 563 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
333 | } | 564 | continue; |
565 | list_for_each_entry(chan, &device->channels, device_node) { | ||
566 | err = dma_chan_get(chan); | ||
567 | if (err == -ENODEV) { | ||
568 | /* module removed before we could use it */ | ||
569 | list_del_rcu(&device->global_node); | ||
570 | break; | ||
571 | } else if (err) | ||
572 | pr_err("dmaengine: failed to get %s: (%d)\n", | ||
573 | dma_chan_name(chan), err); | ||
334 | } | 574 | } |
575 | } | ||
335 | 576 | ||
336 | list_del(&client->global_node); | 577 | /* if this is the first reference and there were channels |
578 | * waiting we need to rebalance to get those channels | ||
579 | * incorporated into the channel table | ||
580 | */ | ||
581 | if (dmaengine_ref_count == 1) | ||
582 | dma_channel_rebalance(); | ||
337 | mutex_unlock(&dma_list_mutex); | 583 | mutex_unlock(&dma_list_mutex); |
338 | } | 584 | } |
339 | EXPORT_SYMBOL(dma_async_client_unregister); | 585 | EXPORT_SYMBOL(dmaengine_get); |
340 | 586 | ||
341 | /** | 587 | /** |
342 | * dma_async_client_chan_request - send all available channels to the | 588 | * dmaengine_put - let dma drivers be removed when ref_count == 0 |
343 | * client that satisfy the capability mask | ||
344 | * @client - requester | ||
345 | */ | 589 | */ |
346 | void dma_async_client_chan_request(struct dma_client *client) | 590 | void dmaengine_put(void) |
347 | { | 591 | { |
592 | struct dma_device *device; | ||
593 | struct dma_chan *chan; | ||
594 | |||
348 | mutex_lock(&dma_list_mutex); | 595 | mutex_lock(&dma_list_mutex); |
349 | dma_client_chan_alloc(client); | 596 | dmaengine_ref_count--; |
597 | BUG_ON(dmaengine_ref_count < 0); | ||
598 | /* drop channel references */ | ||
599 | list_for_each_entry(device, &dma_device_list, global_node) { | ||
600 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
601 | continue; | ||
602 | list_for_each_entry(chan, &device->channels, device_node) | ||
603 | dma_chan_put(chan); | ||
604 | } | ||
350 | mutex_unlock(&dma_list_mutex); | 605 | mutex_unlock(&dma_list_mutex); |
351 | } | 606 | } |
352 | EXPORT_SYMBOL(dma_async_client_chan_request); | 607 | EXPORT_SYMBOL(dmaengine_put); |
353 | 608 | ||
354 | /** | 609 | /** |
355 | * dma_async_device_register - registers DMA devices found | 610 | * dma_async_device_register - registers DMA devices found |
@@ -357,9 +612,9 @@ EXPORT_SYMBOL(dma_async_client_chan_request); | |||
357 | */ | 612 | */ |
358 | int dma_async_device_register(struct dma_device *device) | 613 | int dma_async_device_register(struct dma_device *device) |
359 | { | 614 | { |
360 | static int id; | ||
361 | int chancnt = 0, rc; | 615 | int chancnt = 0, rc; |
362 | struct dma_chan* chan; | 616 | struct dma_chan* chan; |
617 | atomic_t *idr_ref; | ||
363 | 618 | ||
364 | if (!device) | 619 | if (!device) |
365 | return -ENODEV; | 620 | return -ENODEV; |
@@ -386,57 +641,83 @@ int dma_async_device_register(struct dma_device *device) | |||
386 | BUG_ON(!device->device_issue_pending); | 641 | BUG_ON(!device->device_issue_pending); |
387 | BUG_ON(!device->dev); | 642 | BUG_ON(!device->dev); |
388 | 643 | ||
389 | init_completion(&device->done); | 644 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); |
390 | kref_init(&device->refcount); | 645 | if (!idr_ref) |
391 | 646 | return -ENOMEM; | |
647 | atomic_set(idr_ref, 0); | ||
648 | idr_retry: | ||
649 | if (!idr_pre_get(&dma_idr, GFP_KERNEL)) | ||
650 | return -ENOMEM; | ||
392 | mutex_lock(&dma_list_mutex); | 651 | mutex_lock(&dma_list_mutex); |
393 | device->dev_id = id++; | 652 | rc = idr_get_new(&dma_idr, NULL, &device->dev_id); |
394 | mutex_unlock(&dma_list_mutex); | 653 | mutex_unlock(&dma_list_mutex); |
654 | if (rc == -EAGAIN) | ||
655 | goto idr_retry; | ||
656 | else if (rc != 0) | ||
657 | return rc; | ||
395 | 658 | ||
396 | /* represent channels in sysfs. Probably want devs too */ | 659 | /* represent channels in sysfs. Probably want devs too */ |
397 | list_for_each_entry(chan, &device->channels, device_node) { | 660 | list_for_each_entry(chan, &device->channels, device_node) { |
398 | chan->local = alloc_percpu(typeof(*chan->local)); | 661 | chan->local = alloc_percpu(typeof(*chan->local)); |
399 | if (chan->local == NULL) | 662 | if (chan->local == NULL) |
400 | continue; | 663 | continue; |
664 | chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); | ||
665 | if (chan->dev == NULL) { | ||
666 | free_percpu(chan->local); | ||
667 | continue; | ||
668 | } | ||
401 | 669 | ||
402 | chan->chan_id = chancnt++; | 670 | chan->chan_id = chancnt++; |
403 | chan->dev.class = &dma_devclass; | 671 | chan->dev->device.class = &dma_devclass; |
404 | chan->dev.parent = device->dev; | 672 | chan->dev->device.parent = device->dev; |
405 | dev_set_name(&chan->dev, "dma%dchan%d", | 673 | chan->dev->chan = chan; |
674 | chan->dev->idr_ref = idr_ref; | ||
675 | chan->dev->dev_id = device->dev_id; | ||
676 | atomic_inc(idr_ref); | ||
677 | dev_set_name(&chan->dev->device, "dma%dchan%d", | ||
406 | device->dev_id, chan->chan_id); | 678 | device->dev_id, chan->chan_id); |
407 | 679 | ||
408 | rc = device_register(&chan->dev); | 680 | rc = device_register(&chan->dev->device); |
409 | if (rc) { | 681 | if (rc) { |
410 | chancnt--; | ||
411 | free_percpu(chan->local); | 682 | free_percpu(chan->local); |
412 | chan->local = NULL; | 683 | chan->local = NULL; |
413 | goto err_out; | 684 | goto err_out; |
414 | } | 685 | } |
415 | |||
416 | /* One for the channel, one of the class device */ | ||
417 | kref_get(&device->refcount); | ||
418 | kref_get(&device->refcount); | ||
419 | kref_init(&chan->refcount); | ||
420 | chan->client_count = 0; | 686 | chan->client_count = 0; |
421 | chan->slow_ref = 0; | ||
422 | INIT_RCU_HEAD(&chan->rcu); | ||
423 | } | 687 | } |
688 | device->chancnt = chancnt; | ||
424 | 689 | ||
425 | mutex_lock(&dma_list_mutex); | 690 | mutex_lock(&dma_list_mutex); |
426 | list_add_tail(&device->global_node, &dma_device_list); | 691 | /* take references on public channels */ |
692 | if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
693 | list_for_each_entry(chan, &device->channels, device_node) { | ||
694 | /* if clients are already waiting for channels we need | ||
695 | * to take references on their behalf | ||
696 | */ | ||
697 | if (dma_chan_get(chan) == -ENODEV) { | ||
698 | /* note we can only get here for the first | ||
699 | * channel as the remaining channels are | ||
700 | * guaranteed to get a reference | ||
701 | */ | ||
702 | rc = -ENODEV; | ||
703 | mutex_unlock(&dma_list_mutex); | ||
704 | goto err_out; | ||
705 | } | ||
706 | } | ||
707 | list_add_tail_rcu(&device->global_node, &dma_device_list); | ||
708 | dma_channel_rebalance(); | ||
427 | mutex_unlock(&dma_list_mutex); | 709 | mutex_unlock(&dma_list_mutex); |
428 | 710 | ||
429 | dma_clients_notify_available(); | ||
430 | |||
431 | return 0; | 711 | return 0; |
432 | 712 | ||
433 | err_out: | 713 | err_out: |
434 | list_for_each_entry(chan, &device->channels, device_node) { | 714 | list_for_each_entry(chan, &device->channels, device_node) { |
435 | if (chan->local == NULL) | 715 | if (chan->local == NULL) |
436 | continue; | 716 | continue; |
437 | kref_put(&device->refcount, dma_async_device_cleanup); | 717 | mutex_lock(&dma_list_mutex); |
438 | device_unregister(&chan->dev); | 718 | chan->dev->chan = NULL; |
439 | chancnt--; | 719 | mutex_unlock(&dma_list_mutex); |
720 | device_unregister(&chan->dev->device); | ||
440 | free_percpu(chan->local); | 721 | free_percpu(chan->local); |
441 | } | 722 | } |
442 | return rc; | 723 | return rc; |
@@ -444,37 +725,30 @@ err_out: | |||
444 | EXPORT_SYMBOL(dma_async_device_register); | 725 | EXPORT_SYMBOL(dma_async_device_register); |
445 | 726 | ||
446 | /** | 727 | /** |
447 | * dma_async_device_cleanup - function called when all references are released | 728 | * dma_async_device_unregister - unregister a DMA device |
448 | * @kref: kernel reference object | ||
449 | */ | ||
450 | static void dma_async_device_cleanup(struct kref *kref) | ||
451 | { | ||
452 | struct dma_device *device; | ||
453 | |||
454 | device = container_of(kref, struct dma_device, refcount); | ||
455 | complete(&device->done); | ||
456 | } | ||
457 | |||
458 | /** | ||
459 | * dma_async_device_unregister - unregisters DMA devices | ||
460 | * @device: &dma_device | 729 | * @device: &dma_device |
730 | * | ||
731 | * This routine is called by dma driver exit routines, dmaengine holds module | ||
732 | * references to prevent it being called while channels are in use. | ||
461 | */ | 733 | */ |
462 | void dma_async_device_unregister(struct dma_device *device) | 734 | void dma_async_device_unregister(struct dma_device *device) |
463 | { | 735 | { |
464 | struct dma_chan *chan; | 736 | struct dma_chan *chan; |
465 | 737 | ||
466 | mutex_lock(&dma_list_mutex); | 738 | mutex_lock(&dma_list_mutex); |
467 | list_del(&device->global_node); | 739 | list_del_rcu(&device->global_node); |
740 | dma_channel_rebalance(); | ||
468 | mutex_unlock(&dma_list_mutex); | 741 | mutex_unlock(&dma_list_mutex); |
469 | 742 | ||
470 | list_for_each_entry(chan, &device->channels, device_node) { | 743 | list_for_each_entry(chan, &device->channels, device_node) { |
471 | dma_clients_notify_removed(chan); | 744 | WARN_ONCE(chan->client_count, |
472 | device_unregister(&chan->dev); | 745 | "%s called while %d clients hold a reference\n", |
473 | dma_chan_release(chan); | 746 | __func__, chan->client_count); |
747 | mutex_lock(&dma_list_mutex); | ||
748 | chan->dev->chan = NULL; | ||
749 | mutex_unlock(&dma_list_mutex); | ||
750 | device_unregister(&chan->dev->device); | ||
474 | } | 751 | } |
475 | |||
476 | kref_put(&device->refcount, dma_async_device_cleanup); | ||
477 | wait_for_completion(&device->done); | ||
478 | } | 752 | } |
479 | EXPORT_SYMBOL(dma_async_device_unregister); | 753 | EXPORT_SYMBOL(dma_async_device_unregister); |
480 | 754 | ||
@@ -626,10 +900,96 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
626 | } | 900 | } |
627 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | 901 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); |
628 | 902 | ||
903 | /* dma_wait_for_async_tx - spin wait for a transaction to complete | ||
904 | * @tx: in-flight transaction to wait on | ||
905 | * | ||
906 | * This routine assumes that tx was obtained from a call to async_memcpy, | ||
907 | * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped | ||
908 | * and submitted). Walking the parent chain is only meant to cover for DMA | ||
909 | * drivers that do not implement the DMA_INTERRUPT capability and may race with | ||
910 | * the driver's descriptor cleanup routine. | ||
911 | */ | ||
912 | enum dma_status | ||
913 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | ||
914 | { | ||
915 | enum dma_status status; | ||
916 | struct dma_async_tx_descriptor *iter; | ||
917 | struct dma_async_tx_descriptor *parent; | ||
918 | |||
919 | if (!tx) | ||
920 | return DMA_SUCCESS; | ||
921 | |||
922 | WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" | ||
923 | " %s\n", __func__, dma_chan_name(tx->chan)); | ||
924 | |||
925 | /* poll through the dependency chain, return when tx is complete */ | ||
926 | do { | ||
927 | iter = tx; | ||
928 | |||
929 | /* find the root of the unsubmitted dependency chain */ | ||
930 | do { | ||
931 | parent = iter->parent; | ||
932 | if (!parent) | ||
933 | break; | ||
934 | else | ||
935 | iter = parent; | ||
936 | } while (parent); | ||
937 | |||
938 | /* there is a small window for ->parent == NULL and | ||
939 | * ->cookie == -EBUSY | ||
940 | */ | ||
941 | while (iter->cookie == -EBUSY) | ||
942 | cpu_relax(); | ||
943 | |||
944 | status = dma_sync_wait(iter->chan, iter->cookie); | ||
945 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | ||
946 | |||
947 | return status; | ||
948 | } | ||
949 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | ||
950 | |||
951 | /* dma_run_dependencies - helper routine for dma drivers to process | ||
952 | * (start) dependent operations on their target channel | ||
953 | * @tx: transaction with dependencies | ||
954 | */ | ||
955 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | ||
956 | { | ||
957 | struct dma_async_tx_descriptor *dep = tx->next; | ||
958 | struct dma_async_tx_descriptor *dep_next; | ||
959 | struct dma_chan *chan; | ||
960 | |||
961 | if (!dep) | ||
962 | return; | ||
963 | |||
964 | chan = dep->chan; | ||
965 | |||
966 | /* keep submitting up until a channel switch is detected | ||
967 | * in that case we will be called again as a result of | ||
968 | * processing the interrupt from async_tx_channel_switch | ||
969 | */ | ||
970 | for (; dep; dep = dep_next) { | ||
971 | spin_lock_bh(&dep->lock); | ||
972 | dep->parent = NULL; | ||
973 | dep_next = dep->next; | ||
974 | if (dep_next && dep_next->chan == chan) | ||
975 | dep->next = NULL; /* ->next will be submitted */ | ||
976 | else | ||
977 | dep_next = NULL; /* submit current dep and terminate */ | ||
978 | spin_unlock_bh(&dep->lock); | ||
979 | |||
980 | dep->tx_submit(dep); | ||
981 | } | ||
982 | |||
983 | chan->device->device_issue_pending(chan); | ||
984 | } | ||
985 | EXPORT_SYMBOL_GPL(dma_run_dependencies); | ||
986 | |||
629 | static int __init dma_bus_init(void) | 987 | static int __init dma_bus_init(void) |
630 | { | 988 | { |
989 | idr_init(&dma_idr); | ||
631 | mutex_init(&dma_list_mutex); | 990 | mutex_init(&dma_list_mutex); |
632 | return class_register(&dma_devclass); | 991 | return class_register(&dma_devclass); |
633 | } | 992 | } |
634 | subsys_initcall(dma_bus_init); | 993 | arch_initcall(dma_bus_init); |
994 | |||
635 | 995 | ||
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index ed9636bfb54a..3603f1ea5b28 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -35,7 +35,7 @@ MODULE_PARM_DESC(threads_per_chan, | |||
35 | 35 | ||
36 | static unsigned int max_channels; | 36 | static unsigned int max_channels; |
37 | module_param(max_channels, uint, S_IRUGO); | 37 | module_param(max_channels, uint, S_IRUGO); |
38 | MODULE_PARM_DESC(nr_channels, | 38 | MODULE_PARM_DESC(max_channels, |
39 | "Maximum number of channels to use (default: all)"); | 39 | "Maximum number of channels to use (default: all)"); |
40 | 40 | ||
41 | /* | 41 | /* |
@@ -71,7 +71,7 @@ struct dmatest_chan { | |||
71 | 71 | ||
72 | /* | 72 | /* |
73 | * These are protected by dma_list_mutex since they're only used by | 73 | * These are protected by dma_list_mutex since they're only used by |
74 | * the DMA client event callback | 74 | * the DMA filter function callback |
75 | */ | 75 | */ |
76 | static LIST_HEAD(dmatest_channels); | 76 | static LIST_HEAD(dmatest_channels); |
77 | static unsigned int nr_channels; | 77 | static unsigned int nr_channels; |
@@ -80,7 +80,7 @@ static bool dmatest_match_channel(struct dma_chan *chan) | |||
80 | { | 80 | { |
81 | if (test_channel[0] == '\0') | 81 | if (test_channel[0] == '\0') |
82 | return true; | 82 | return true; |
83 | return strcmp(dev_name(&chan->dev), test_channel) == 0; | 83 | return strcmp(dma_chan_name(chan), test_channel) == 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | static bool dmatest_match_device(struct dma_device *device) | 86 | static bool dmatest_match_device(struct dma_device *device) |
@@ -215,7 +215,6 @@ static int dmatest_func(void *data) | |||
215 | 215 | ||
216 | smp_rmb(); | 216 | smp_rmb(); |
217 | chan = thread->chan; | 217 | chan = thread->chan; |
218 | dma_chan_get(chan); | ||
219 | 218 | ||
220 | while (!kthread_should_stop()) { | 219 | while (!kthread_should_stop()) { |
221 | total_tests++; | 220 | total_tests++; |
@@ -293,7 +292,6 @@ static int dmatest_func(void *data) | |||
293 | } | 292 | } |
294 | 293 | ||
295 | ret = 0; | 294 | ret = 0; |
296 | dma_chan_put(chan); | ||
297 | kfree(thread->dstbuf); | 295 | kfree(thread->dstbuf); |
298 | err_dstbuf: | 296 | err_dstbuf: |
299 | kfree(thread->srcbuf); | 297 | kfree(thread->srcbuf); |
@@ -319,21 +317,16 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) | |||
319 | kfree(dtc); | 317 | kfree(dtc); |
320 | } | 318 | } |
321 | 319 | ||
322 | static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) | 320 | static int dmatest_add_channel(struct dma_chan *chan) |
323 | { | 321 | { |
324 | struct dmatest_chan *dtc; | 322 | struct dmatest_chan *dtc; |
325 | struct dmatest_thread *thread; | 323 | struct dmatest_thread *thread; |
326 | unsigned int i; | 324 | unsigned int i; |
327 | 325 | ||
328 | /* Have we already been told about this channel? */ | ||
329 | list_for_each_entry(dtc, &dmatest_channels, node) | ||
330 | if (dtc->chan == chan) | ||
331 | return DMA_DUP; | ||
332 | |||
333 | dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); | 326 | dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); |
334 | if (!dtc) { | 327 | if (!dtc) { |
335 | pr_warning("dmatest: No memory for %s\n", dev_name(&chan->dev)); | 328 | pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); |
336 | return DMA_NAK; | 329 | return -ENOMEM; |
337 | } | 330 | } |
338 | 331 | ||
339 | dtc->chan = chan; | 332 | dtc->chan = chan; |
@@ -343,16 +336,16 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) | |||
343 | thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); | 336 | thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); |
344 | if (!thread) { | 337 | if (!thread) { |
345 | pr_warning("dmatest: No memory for %s-test%u\n", | 338 | pr_warning("dmatest: No memory for %s-test%u\n", |
346 | dev_name(&chan->dev), i); | 339 | dma_chan_name(chan), i); |
347 | break; | 340 | break; |
348 | } | 341 | } |
349 | thread->chan = dtc->chan; | 342 | thread->chan = dtc->chan; |
350 | smp_wmb(); | 343 | smp_wmb(); |
351 | thread->task = kthread_run(dmatest_func, thread, "%s-test%u", | 344 | thread->task = kthread_run(dmatest_func, thread, "%s-test%u", |
352 | dev_name(&chan->dev), i); | 345 | dma_chan_name(chan), i); |
353 | if (IS_ERR(thread->task)) { | 346 | if (IS_ERR(thread->task)) { |
354 | pr_warning("dmatest: Failed to run thread %s-test%u\n", | 347 | pr_warning("dmatest: Failed to run thread %s-test%u\n", |
355 | dev_name(&chan->dev), i); | 348 | dma_chan_name(chan), i); |
356 | kfree(thread); | 349 | kfree(thread); |
357 | break; | 350 | break; |
358 | } | 351 | } |
@@ -362,86 +355,62 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) | |||
362 | list_add_tail(&thread->node, &dtc->threads); | 355 | list_add_tail(&thread->node, &dtc->threads); |
363 | } | 356 | } |
364 | 357 | ||
365 | pr_info("dmatest: Started %u threads using %s\n", i, dev_name(&chan->dev)); | 358 | pr_info("dmatest: Started %u threads using %s\n", i, dma_chan_name(chan)); |
366 | 359 | ||
367 | list_add_tail(&dtc->node, &dmatest_channels); | 360 | list_add_tail(&dtc->node, &dmatest_channels); |
368 | nr_channels++; | 361 | nr_channels++; |
369 | 362 | ||
370 | return DMA_ACK; | 363 | return 0; |
371 | } | ||
372 | |||
373 | static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan) | ||
374 | { | ||
375 | struct dmatest_chan *dtc, *_dtc; | ||
376 | |||
377 | list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) { | ||
378 | if (dtc->chan == chan) { | ||
379 | list_del(&dtc->node); | ||
380 | dmatest_cleanup_channel(dtc); | ||
381 | pr_debug("dmatest: lost channel %s\n", | ||
382 | dev_name(&chan->dev)); | ||
383 | return DMA_ACK; | ||
384 | } | ||
385 | } | ||
386 | |||
387 | return DMA_DUP; | ||
388 | } | 364 | } |
389 | 365 | ||
390 | /* | 366 | static bool filter(struct dma_chan *chan, void *param) |
391 | * Start testing threads as new channels are assigned to us, and kill | ||
392 | * them when the channels go away. | ||
393 | * | ||
394 | * When we unregister the client, all channels are removed so this | ||
395 | * will also take care of cleaning things up when the module is | ||
396 | * unloaded. | ||
397 | */ | ||
398 | static enum dma_state_client | ||
399 | dmatest_event(struct dma_client *client, struct dma_chan *chan, | ||
400 | enum dma_state state) | ||
401 | { | 367 | { |
402 | enum dma_state_client ack = DMA_NAK; | 368 | if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device)) |
403 | 369 | return false; | |
404 | switch (state) { | 370 | else |
405 | case DMA_RESOURCE_AVAILABLE: | 371 | return true; |
406 | if (!dmatest_match_channel(chan) | ||
407 | || !dmatest_match_device(chan->device)) | ||
408 | ack = DMA_DUP; | ||
409 | else if (max_channels && nr_channels >= max_channels) | ||
410 | ack = DMA_NAK; | ||
411 | else | ||
412 | ack = dmatest_add_channel(chan); | ||
413 | break; | ||
414 | |||
415 | case DMA_RESOURCE_REMOVED: | ||
416 | ack = dmatest_remove_channel(chan); | ||
417 | break; | ||
418 | |||
419 | default: | ||
420 | pr_info("dmatest: Unhandled event %u (%s)\n", | ||
421 | state, dev_name(&chan->dev)); | ||
422 | break; | ||
423 | } | ||
424 | |||
425 | return ack; | ||
426 | } | 372 | } |
427 | 373 | ||
428 | static struct dma_client dmatest_client = { | ||
429 | .event_callback = dmatest_event, | ||
430 | }; | ||
431 | |||
432 | static int __init dmatest_init(void) | 374 | static int __init dmatest_init(void) |
433 | { | 375 | { |
434 | dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask); | 376 | dma_cap_mask_t mask; |
435 | dma_async_client_register(&dmatest_client); | 377 | struct dma_chan *chan; |
436 | dma_async_client_chan_request(&dmatest_client); | 378 | int err = 0; |
379 | |||
380 | dma_cap_zero(mask); | ||
381 | dma_cap_set(DMA_MEMCPY, mask); | ||
382 | for (;;) { | ||
383 | chan = dma_request_channel(mask, filter, NULL); | ||
384 | if (chan) { | ||
385 | err = dmatest_add_channel(chan); | ||
386 | if (err == 0) | ||
387 | continue; | ||
388 | else { | ||
389 | dma_release_channel(chan); | ||
390 | break; /* add_channel failed, punt */ | ||
391 | } | ||
392 | } else | ||
393 | break; /* no more channels available */ | ||
394 | if (max_channels && nr_channels >= max_channels) | ||
395 | break; /* we have all we need */ | ||
396 | } | ||
437 | 397 | ||
438 | return 0; | 398 | return err; |
439 | } | 399 | } |
440 | module_init(dmatest_init); | 400 | /* when compiled-in wait for drivers to load first */ |
401 | late_initcall(dmatest_init); | ||
441 | 402 | ||
442 | static void __exit dmatest_exit(void) | 403 | static void __exit dmatest_exit(void) |
443 | { | 404 | { |
444 | dma_async_client_unregister(&dmatest_client); | 405 | struct dmatest_chan *dtc, *_dtc; |
406 | |||
407 | list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) { | ||
408 | list_del(&dtc->node); | ||
409 | dmatest_cleanup_channel(dtc); | ||
410 | pr_debug("dmatest: dropped channel %s\n", | ||
411 | dma_chan_name(dtc->chan)); | ||
412 | dma_release_channel(dtc->chan); | ||
413 | } | ||
445 | } | 414 | } |
446 | module_exit(dmatest_exit); | 415 | module_exit(dmatest_exit); |
447 | 416 | ||
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 0778d99aea7c..6b702cc46b3d 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -70,6 +70,15 @@ | |||
70 | * the controller, though. | 70 | * the controller, though. |
71 | */ | 71 | */ |
72 | 72 | ||
73 | static struct device *chan2dev(struct dma_chan *chan) | ||
74 | { | ||
75 | return &chan->dev->device; | ||
76 | } | ||
77 | static struct device *chan2parent(struct dma_chan *chan) | ||
78 | { | ||
79 | return chan->dev->device.parent; | ||
80 | } | ||
81 | |||
73 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | 82 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
74 | { | 83 | { |
75 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | 84 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); |
@@ -93,12 +102,12 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | |||
93 | ret = desc; | 102 | ret = desc; |
94 | break; | 103 | break; |
95 | } | 104 | } |
96 | dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc); | 105 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
97 | i++; | 106 | i++; |
98 | } | 107 | } |
99 | spin_unlock_bh(&dwc->lock); | 108 | spin_unlock_bh(&dwc->lock); |
100 | 109 | ||
101 | dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i); | 110 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); |
102 | 111 | ||
103 | return ret; | 112 | return ret; |
104 | } | 113 | } |
@@ -108,10 +117,10 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
108 | struct dw_desc *child; | 117 | struct dw_desc *child; |
109 | 118 | ||
110 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | 119 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) |
111 | dma_sync_single_for_cpu(dwc->chan.dev.parent, | 120 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
112 | child->txd.phys, sizeof(child->lli), | 121 | child->txd.phys, sizeof(child->lli), |
113 | DMA_TO_DEVICE); | 122 | DMA_TO_DEVICE); |
114 | dma_sync_single_for_cpu(dwc->chan.dev.parent, | 123 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
115 | desc->txd.phys, sizeof(desc->lli), | 124 | desc->txd.phys, sizeof(desc->lli), |
116 | DMA_TO_DEVICE); | 125 | DMA_TO_DEVICE); |
117 | } | 126 | } |
@@ -129,11 +138,11 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
129 | 138 | ||
130 | spin_lock_bh(&dwc->lock); | 139 | spin_lock_bh(&dwc->lock); |
131 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | 140 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) |
132 | dev_vdbg(&dwc->chan.dev, | 141 | dev_vdbg(chan2dev(&dwc->chan), |
133 | "moving child desc %p to freelist\n", | 142 | "moving child desc %p to freelist\n", |
134 | child); | 143 | child); |
135 | list_splice_init(&desc->txd.tx_list, &dwc->free_list); | 144 | list_splice_init(&desc->txd.tx_list, &dwc->free_list); |
136 | dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc); | 145 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); |
137 | list_add(&desc->desc_node, &dwc->free_list); | 146 | list_add(&desc->desc_node, &dwc->free_list); |
138 | spin_unlock_bh(&dwc->lock); | 147 | spin_unlock_bh(&dwc->lock); |
139 | } | 148 | } |
@@ -163,9 +172,9 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
163 | 172 | ||
164 | /* ASSERT: channel is idle */ | 173 | /* ASSERT: channel is idle */ |
165 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 174 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
166 | dev_err(&dwc->chan.dev, | 175 | dev_err(chan2dev(&dwc->chan), |
167 | "BUG: Attempted to start non-idle channel\n"); | 176 | "BUG: Attempted to start non-idle channel\n"); |
168 | dev_err(&dwc->chan.dev, | 177 | dev_err(chan2dev(&dwc->chan), |
169 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | 178 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", |
170 | channel_readl(dwc, SAR), | 179 | channel_readl(dwc, SAR), |
171 | channel_readl(dwc, DAR), | 180 | channel_readl(dwc, DAR), |
@@ -193,7 +202,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
193 | void *param; | 202 | void *param; |
194 | struct dma_async_tx_descriptor *txd = &desc->txd; | 203 | struct dma_async_tx_descriptor *txd = &desc->txd; |
195 | 204 | ||
196 | dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie); | 205 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
197 | 206 | ||
198 | dwc->completed = txd->cookie; | 207 | dwc->completed = txd->cookie; |
199 | callback = txd->callback; | 208 | callback = txd->callback; |
@@ -208,11 +217,11 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
208 | * mapped before they were submitted... | 217 | * mapped before they were submitted... |
209 | */ | 218 | */ |
210 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) | 219 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) |
211 | dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len, | 220 | dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar, |
212 | DMA_FROM_DEVICE); | 221 | desc->len, DMA_FROM_DEVICE); |
213 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) | 222 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) |
214 | dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len, | 223 | dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar, |
215 | DMA_TO_DEVICE); | 224 | desc->len, DMA_TO_DEVICE); |
216 | 225 | ||
217 | /* | 226 | /* |
218 | * The API requires that no submissions are done from a | 227 | * The API requires that no submissions are done from a |
@@ -228,7 +237,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
228 | LIST_HEAD(list); | 237 | LIST_HEAD(list); |
229 | 238 | ||
230 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 239 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
231 | dev_err(&dwc->chan.dev, | 240 | dev_err(chan2dev(&dwc->chan), |
232 | "BUG: XFER bit set, but channel not idle!\n"); | 241 | "BUG: XFER bit set, but channel not idle!\n"); |
233 | 242 | ||
234 | /* Try to continue after resetting the channel... */ | 243 | /* Try to continue after resetting the channel... */ |
@@ -273,7 +282,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
273 | return; | 282 | return; |
274 | } | 283 | } |
275 | 284 | ||
276 | dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp); | 285 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); |
277 | 286 | ||
278 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 287 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
279 | if (desc->lli.llp == llp) | 288 | if (desc->lli.llp == llp) |
@@ -292,7 +301,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
292 | dwc_descriptor_complete(dwc, desc); | 301 | dwc_descriptor_complete(dwc, desc); |
293 | } | 302 | } |
294 | 303 | ||
295 | dev_err(&dwc->chan.dev, | 304 | dev_err(chan2dev(&dwc->chan), |
296 | "BUG: All descriptors done, but channel not idle!\n"); | 305 | "BUG: All descriptors done, but channel not idle!\n"); |
297 | 306 | ||
298 | /* Try to continue after resetting the channel... */ | 307 | /* Try to continue after resetting the channel... */ |
@@ -308,7 +317,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
308 | 317 | ||
309 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | 318 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) |
310 | { | 319 | { |
311 | dev_printk(KERN_CRIT, &dwc->chan.dev, | 320 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
312 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", | 321 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
313 | lli->sar, lli->dar, lli->llp, | 322 | lli->sar, lli->dar, lli->llp, |
314 | lli->ctlhi, lli->ctllo); | 323 | lli->ctlhi, lli->ctllo); |
@@ -342,9 +351,9 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
342 | * controller flagged an error instead of scribbling over | 351 | * controller flagged an error instead of scribbling over |
343 | * random memory locations. | 352 | * random memory locations. |
344 | */ | 353 | */ |
345 | dev_printk(KERN_CRIT, &dwc->chan.dev, | 354 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
346 | "Bad descriptor submitted for DMA!\n"); | 355 | "Bad descriptor submitted for DMA!\n"); |
347 | dev_printk(KERN_CRIT, &dwc->chan.dev, | 356 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
348 | " cookie: %d\n", bad_desc->txd.cookie); | 357 | " cookie: %d\n", bad_desc->txd.cookie); |
349 | dwc_dump_lli(dwc, &bad_desc->lli); | 358 | dwc_dump_lli(dwc, &bad_desc->lli); |
350 | list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) | 359 | list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) |
@@ -442,12 +451,12 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
442 | * for DMA. But this is hard to do in a race-free manner. | 451 | * for DMA. But this is hard to do in a race-free manner. |
443 | */ | 452 | */ |
444 | if (list_empty(&dwc->active_list)) { | 453 | if (list_empty(&dwc->active_list)) { |
445 | dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n", | 454 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", |
446 | desc->txd.cookie); | 455 | desc->txd.cookie); |
447 | dwc_dostart(dwc, desc); | 456 | dwc_dostart(dwc, desc); |
448 | list_add_tail(&desc->desc_node, &dwc->active_list); | 457 | list_add_tail(&desc->desc_node, &dwc->active_list); |
449 | } else { | 458 | } else { |
450 | dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n", | 459 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", |
451 | desc->txd.cookie); | 460 | desc->txd.cookie); |
452 | 461 | ||
453 | list_add_tail(&desc->desc_node, &dwc->queue); | 462 | list_add_tail(&desc->desc_node, &dwc->queue); |
@@ -472,11 +481,11 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
472 | unsigned int dst_width; | 481 | unsigned int dst_width; |
473 | u32 ctllo; | 482 | u32 ctllo; |
474 | 483 | ||
475 | dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", | 484 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", |
476 | dest, src, len, flags); | 485 | dest, src, len, flags); |
477 | 486 | ||
478 | if (unlikely(!len)) { | 487 | if (unlikely(!len)) { |
479 | dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n"); | 488 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); |
480 | return NULL; | 489 | return NULL; |
481 | } | 490 | } |
482 | 491 | ||
@@ -516,7 +525,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
516 | first = desc; | 525 | first = desc; |
517 | } else { | 526 | } else { |
518 | prev->lli.llp = desc->txd.phys; | 527 | prev->lli.llp = desc->txd.phys; |
519 | dma_sync_single_for_device(chan->dev.parent, | 528 | dma_sync_single_for_device(chan2parent(chan), |
520 | prev->txd.phys, sizeof(prev->lli), | 529 | prev->txd.phys, sizeof(prev->lli), |
521 | DMA_TO_DEVICE); | 530 | DMA_TO_DEVICE); |
522 | list_add_tail(&desc->desc_node, | 531 | list_add_tail(&desc->desc_node, |
@@ -531,7 +540,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
531 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 540 | prev->lli.ctllo |= DWC_CTLL_INT_EN; |
532 | 541 | ||
533 | prev->lli.llp = 0; | 542 | prev->lli.llp = 0; |
534 | dma_sync_single_for_device(chan->dev.parent, | 543 | dma_sync_single_for_device(chan2parent(chan), |
535 | prev->txd.phys, sizeof(prev->lli), | 544 | prev->txd.phys, sizeof(prev->lli), |
536 | DMA_TO_DEVICE); | 545 | DMA_TO_DEVICE); |
537 | 546 | ||
@@ -562,15 +571,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
562 | struct scatterlist *sg; | 571 | struct scatterlist *sg; |
563 | size_t total_len = 0; | 572 | size_t total_len = 0; |
564 | 573 | ||
565 | dev_vdbg(&chan->dev, "prep_dma_slave\n"); | 574 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); |
566 | 575 | ||
567 | if (unlikely(!dws || !sg_len)) | 576 | if (unlikely(!dws || !sg_len)) |
568 | return NULL; | 577 | return NULL; |
569 | 578 | ||
570 | reg_width = dws->slave.reg_width; | 579 | reg_width = dws->reg_width; |
571 | prev = first = NULL; | 580 | prev = first = NULL; |
572 | 581 | ||
573 | sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction); | 582 | sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction); |
574 | 583 | ||
575 | switch (direction) { | 584 | switch (direction) { |
576 | case DMA_TO_DEVICE: | 585 | case DMA_TO_DEVICE: |
@@ -579,7 +588,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
579 | | DWC_CTLL_DST_FIX | 588 | | DWC_CTLL_DST_FIX |
580 | | DWC_CTLL_SRC_INC | 589 | | DWC_CTLL_SRC_INC |
581 | | DWC_CTLL_FC_M2P); | 590 | | DWC_CTLL_FC_M2P); |
582 | reg = dws->slave.tx_reg; | 591 | reg = dws->tx_reg; |
583 | for_each_sg(sgl, sg, sg_len, i) { | 592 | for_each_sg(sgl, sg, sg_len, i) { |
584 | struct dw_desc *desc; | 593 | struct dw_desc *desc; |
585 | u32 len; | 594 | u32 len; |
@@ -587,7 +596,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
587 | 596 | ||
588 | desc = dwc_desc_get(dwc); | 597 | desc = dwc_desc_get(dwc); |
589 | if (!desc) { | 598 | if (!desc) { |
590 | dev_err(&chan->dev, | 599 | dev_err(chan2dev(chan), |
591 | "not enough descriptors available\n"); | 600 | "not enough descriptors available\n"); |
592 | goto err_desc_get; | 601 | goto err_desc_get; |
593 | } | 602 | } |
@@ -607,7 +616,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
607 | first = desc; | 616 | first = desc; |
608 | } else { | 617 | } else { |
609 | prev->lli.llp = desc->txd.phys; | 618 | prev->lli.llp = desc->txd.phys; |
610 | dma_sync_single_for_device(chan->dev.parent, | 619 | dma_sync_single_for_device(chan2parent(chan), |
611 | prev->txd.phys, | 620 | prev->txd.phys, |
612 | sizeof(prev->lli), | 621 | sizeof(prev->lli), |
613 | DMA_TO_DEVICE); | 622 | DMA_TO_DEVICE); |
@@ -625,7 +634,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
625 | | DWC_CTLL_SRC_FIX | 634 | | DWC_CTLL_SRC_FIX |
626 | | DWC_CTLL_FC_P2M); | 635 | | DWC_CTLL_FC_P2M); |
627 | 636 | ||
628 | reg = dws->slave.rx_reg; | 637 | reg = dws->rx_reg; |
629 | for_each_sg(sgl, sg, sg_len, i) { | 638 | for_each_sg(sgl, sg, sg_len, i) { |
630 | struct dw_desc *desc; | 639 | struct dw_desc *desc; |
631 | u32 len; | 640 | u32 len; |
@@ -633,7 +642,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
633 | 642 | ||
634 | desc = dwc_desc_get(dwc); | 643 | desc = dwc_desc_get(dwc); |
635 | if (!desc) { | 644 | if (!desc) { |
636 | dev_err(&chan->dev, | 645 | dev_err(chan2dev(chan), |
637 | "not enough descriptors available\n"); | 646 | "not enough descriptors available\n"); |
638 | goto err_desc_get; | 647 | goto err_desc_get; |
639 | } | 648 | } |
@@ -653,7 +662,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
653 | first = desc; | 662 | first = desc; |
654 | } else { | 663 | } else { |
655 | prev->lli.llp = desc->txd.phys; | 664 | prev->lli.llp = desc->txd.phys; |
656 | dma_sync_single_for_device(chan->dev.parent, | 665 | dma_sync_single_for_device(chan2parent(chan), |
657 | prev->txd.phys, | 666 | prev->txd.phys, |
658 | sizeof(prev->lli), | 667 | sizeof(prev->lli), |
659 | DMA_TO_DEVICE); | 668 | DMA_TO_DEVICE); |
@@ -673,7 +682,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
673 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 682 | prev->lli.ctllo |= DWC_CTLL_INT_EN; |
674 | 683 | ||
675 | prev->lli.llp = 0; | 684 | prev->lli.llp = 0; |
676 | dma_sync_single_for_device(chan->dev.parent, | 685 | dma_sync_single_for_device(chan2parent(chan), |
677 | prev->txd.phys, sizeof(prev->lli), | 686 | prev->txd.phys, sizeof(prev->lli), |
678 | DMA_TO_DEVICE); | 687 | DMA_TO_DEVICE); |
679 | 688 | ||
@@ -758,29 +767,21 @@ static void dwc_issue_pending(struct dma_chan *chan) | |||
758 | spin_unlock_bh(&dwc->lock); | 767 | spin_unlock_bh(&dwc->lock); |
759 | } | 768 | } |
760 | 769 | ||
761 | static int dwc_alloc_chan_resources(struct dma_chan *chan, | 770 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
762 | struct dma_client *client) | ||
763 | { | 771 | { |
764 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 772 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
765 | struct dw_dma *dw = to_dw_dma(chan->device); | 773 | struct dw_dma *dw = to_dw_dma(chan->device); |
766 | struct dw_desc *desc; | 774 | struct dw_desc *desc; |
767 | struct dma_slave *slave; | ||
768 | struct dw_dma_slave *dws; | 775 | struct dw_dma_slave *dws; |
769 | int i; | 776 | int i; |
770 | u32 cfghi; | 777 | u32 cfghi; |
771 | u32 cfglo; | 778 | u32 cfglo; |
772 | 779 | ||
773 | dev_vdbg(&chan->dev, "alloc_chan_resources\n"); | 780 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
774 | |||
775 | /* Channels doing slave DMA can only handle one client. */ | ||
776 | if (dwc->dws || client->slave) { | ||
777 | if (chan->client_count) | ||
778 | return -EBUSY; | ||
779 | } | ||
780 | 781 | ||
781 | /* ASSERT: channel is idle */ | 782 | /* ASSERT: channel is idle */ |
782 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 783 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
783 | dev_dbg(&chan->dev, "DMA channel not idle?\n"); | 784 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
784 | return -EIO; | 785 | return -EIO; |
785 | } | 786 | } |
786 | 787 | ||
@@ -789,23 +790,17 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan, | |||
789 | cfghi = DWC_CFGH_FIFO_MODE; | 790 | cfghi = DWC_CFGH_FIFO_MODE; |
790 | cfglo = 0; | 791 | cfglo = 0; |
791 | 792 | ||
792 | slave = client->slave; | 793 | dws = dwc->dws; |
793 | if (slave) { | 794 | if (dws) { |
794 | /* | 795 | /* |
795 | * We need controller-specific data to set up slave | 796 | * We need controller-specific data to set up slave |
796 | * transfers. | 797 | * transfers. |
797 | */ | 798 | */ |
798 | BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev); | 799 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); |
799 | |||
800 | dws = container_of(slave, struct dw_dma_slave, slave); | ||
801 | 800 | ||
802 | dwc->dws = dws; | ||
803 | cfghi = dws->cfg_hi; | 801 | cfghi = dws->cfg_hi; |
804 | cfglo = dws->cfg_lo; | 802 | cfglo = dws->cfg_lo; |
805 | } else { | ||
806 | dwc->dws = NULL; | ||
807 | } | 803 | } |
808 | |||
809 | channel_writel(dwc, CFG_LO, cfglo); | 804 | channel_writel(dwc, CFG_LO, cfglo); |
810 | channel_writel(dwc, CFG_HI, cfghi); | 805 | channel_writel(dwc, CFG_HI, cfghi); |
811 | 806 | ||
@@ -822,7 +817,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan, | |||
822 | 817 | ||
823 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | 818 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); |
824 | if (!desc) { | 819 | if (!desc) { |
825 | dev_info(&chan->dev, | 820 | dev_info(chan2dev(chan), |
826 | "only allocated %d descriptors\n", i); | 821 | "only allocated %d descriptors\n", i); |
827 | spin_lock_bh(&dwc->lock); | 822 | spin_lock_bh(&dwc->lock); |
828 | break; | 823 | break; |
@@ -832,7 +827,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan, | |||
832 | desc->txd.tx_submit = dwc_tx_submit; | 827 | desc->txd.tx_submit = dwc_tx_submit; |
833 | desc->txd.flags = DMA_CTRL_ACK; | 828 | desc->txd.flags = DMA_CTRL_ACK; |
834 | INIT_LIST_HEAD(&desc->txd.tx_list); | 829 | INIT_LIST_HEAD(&desc->txd.tx_list); |
835 | desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli, | 830 | desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, |
836 | sizeof(desc->lli), DMA_TO_DEVICE); | 831 | sizeof(desc->lli), DMA_TO_DEVICE); |
837 | dwc_desc_put(dwc, desc); | 832 | dwc_desc_put(dwc, desc); |
838 | 833 | ||
@@ -847,7 +842,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan, | |||
847 | 842 | ||
848 | spin_unlock_bh(&dwc->lock); | 843 | spin_unlock_bh(&dwc->lock); |
849 | 844 | ||
850 | dev_dbg(&chan->dev, | 845 | dev_dbg(chan2dev(chan), |
851 | "alloc_chan_resources allocated %d descriptors\n", i); | 846 | "alloc_chan_resources allocated %d descriptors\n", i); |
852 | 847 | ||
853 | return i; | 848 | return i; |
@@ -860,7 +855,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
860 | struct dw_desc *desc, *_desc; | 855 | struct dw_desc *desc, *_desc; |
861 | LIST_HEAD(list); | 856 | LIST_HEAD(list); |
862 | 857 | ||
863 | dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n", | 858 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", |
864 | dwc->descs_allocated); | 859 | dwc->descs_allocated); |
865 | 860 | ||
866 | /* ASSERT: channel is idle */ | 861 | /* ASSERT: channel is idle */ |
@@ -881,13 +876,13 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
881 | spin_unlock_bh(&dwc->lock); | 876 | spin_unlock_bh(&dwc->lock); |
882 | 877 | ||
883 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | 878 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
884 | dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc); | 879 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
885 | dma_unmap_single(chan->dev.parent, desc->txd.phys, | 880 | dma_unmap_single(chan2parent(chan), desc->txd.phys, |
886 | sizeof(desc->lli), DMA_TO_DEVICE); | 881 | sizeof(desc->lli), DMA_TO_DEVICE); |
887 | kfree(desc); | 882 | kfree(desc); |
888 | } | 883 | } |
889 | 884 | ||
890 | dev_vdbg(&chan->dev, "free_chan_resources done\n"); | 885 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); |
891 | } | 886 | } |
892 | 887 | ||
893 | /*----------------------------------------------------------------------*/ | 888 | /*----------------------------------------------------------------------*/ |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 0b95dcce447e..ca70a21afc68 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -366,8 +366,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | |||
366 | * | 366 | * |
367 | * Return - The number of descriptors allocated. | 367 | * Return - The number of descriptors allocated. |
368 | */ | 368 | */ |
369 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan, | 369 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) |
370 | struct dma_client *client) | ||
371 | { | 370 | { |
372 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | 371 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); |
373 | 372 | ||
@@ -823,7 +822,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, | |||
823 | */ | 822 | */ |
824 | WARN_ON(fdev->feature != new_fsl_chan->feature); | 823 | WARN_ON(fdev->feature != new_fsl_chan->feature); |
825 | 824 | ||
826 | new_fsl_chan->dev = &new_fsl_chan->common.dev; | 825 | new_fsl_chan->dev = &new_fsl_chan->common.dev->device; |
827 | new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, | 826 | new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, |
828 | new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); | 827 | new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); |
829 | 828 | ||
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c index 9b16a3af9a0a..4105d6575b64 100644 --- a/drivers/dma/ioat.c +++ b/drivers/dma/ioat.c | |||
@@ -75,60 +75,10 @@ static int ioat_dca_enabled = 1; | |||
75 | module_param(ioat_dca_enabled, int, 0644); | 75 | module_param(ioat_dca_enabled, int, 0644); |
76 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); | 76 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); |
77 | 77 | ||
78 | static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase) | ||
79 | { | ||
80 | struct ioat_device *device = pci_get_drvdata(pdev); | ||
81 | u8 version; | ||
82 | int err = 0; | ||
83 | |||
84 | version = readb(iobase + IOAT_VER_OFFSET); | ||
85 | switch (version) { | ||
86 | case IOAT_VER_1_2: | ||
87 | device->dma = ioat_dma_probe(pdev, iobase); | ||
88 | if (device->dma && ioat_dca_enabled) | ||
89 | device->dca = ioat_dca_init(pdev, iobase); | ||
90 | break; | ||
91 | case IOAT_VER_2_0: | ||
92 | device->dma = ioat_dma_probe(pdev, iobase); | ||
93 | if (device->dma && ioat_dca_enabled) | ||
94 | device->dca = ioat2_dca_init(pdev, iobase); | ||
95 | break; | ||
96 | case IOAT_VER_3_0: | ||
97 | device->dma = ioat_dma_probe(pdev, iobase); | ||
98 | if (device->dma && ioat_dca_enabled) | ||
99 | device->dca = ioat3_dca_init(pdev, iobase); | ||
100 | break; | ||
101 | default: | ||
102 | err = -ENODEV; | ||
103 | break; | ||
104 | } | ||
105 | if (!device->dma) | ||
106 | err = -ENODEV; | ||
107 | return err; | ||
108 | } | ||
109 | |||
110 | static void ioat_shutdown_functionality(struct pci_dev *pdev) | ||
111 | { | ||
112 | struct ioat_device *device = pci_get_drvdata(pdev); | ||
113 | |||
114 | dev_err(&pdev->dev, "Removing dma and dca services\n"); | ||
115 | if (device->dca) { | ||
116 | unregister_dca_provider(device->dca); | ||
117 | free_dca_provider(device->dca); | ||
118 | device->dca = NULL; | ||
119 | } | ||
120 | |||
121 | if (device->dma) { | ||
122 | ioat_dma_remove(device->dma); | ||
123 | device->dma = NULL; | ||
124 | } | ||
125 | } | ||
126 | |||
127 | static struct pci_driver ioat_pci_driver = { | 78 | static struct pci_driver ioat_pci_driver = { |
128 | .name = "ioatdma", | 79 | .name = "ioatdma", |
129 | .id_table = ioat_pci_tbl, | 80 | .id_table = ioat_pci_tbl, |
130 | .probe = ioat_probe, | 81 | .probe = ioat_probe, |
131 | .shutdown = ioat_shutdown_functionality, | ||
132 | .remove = __devexit_p(ioat_remove), | 82 | .remove = __devexit_p(ioat_remove), |
133 | }; | 83 | }; |
134 | 84 | ||
@@ -179,7 +129,29 @@ static int __devinit ioat_probe(struct pci_dev *pdev, | |||
179 | 129 | ||
180 | pci_set_master(pdev); | 130 | pci_set_master(pdev); |
181 | 131 | ||
182 | err = ioat_setup_functionality(pdev, iobase); | 132 | switch (readb(iobase + IOAT_VER_OFFSET)) { |
133 | case IOAT_VER_1_2: | ||
134 | device->dma = ioat_dma_probe(pdev, iobase); | ||
135 | if (device->dma && ioat_dca_enabled) | ||
136 | device->dca = ioat_dca_init(pdev, iobase); | ||
137 | break; | ||
138 | case IOAT_VER_2_0: | ||
139 | device->dma = ioat_dma_probe(pdev, iobase); | ||
140 | if (device->dma && ioat_dca_enabled) | ||
141 | device->dca = ioat2_dca_init(pdev, iobase); | ||
142 | break; | ||
143 | case IOAT_VER_3_0: | ||
144 | device->dma = ioat_dma_probe(pdev, iobase); | ||
145 | if (device->dma && ioat_dca_enabled) | ||
146 | device->dca = ioat3_dca_init(pdev, iobase); | ||
147 | break; | ||
148 | default: | ||
149 | err = -ENODEV; | ||
150 | break; | ||
151 | } | ||
152 | if (!device->dma) | ||
153 | err = -ENODEV; | ||
154 | |||
183 | if (err) | 155 | if (err) |
184 | goto err_version; | 156 | goto err_version; |
185 | 157 | ||
@@ -198,17 +170,21 @@ err_enable_device: | |||
198 | return err; | 170 | return err; |
199 | } | 171 | } |
200 | 172 | ||
201 | /* | ||
202 | * It is unsafe to remove this module: if removed while a requested | ||
203 | * dma is outstanding, esp. from tcp, it is possible to hang while | ||
204 | * waiting for something that will never finish. However, if you're | ||
205 | * feeling lucky, this usually works just fine. | ||
206 | */ | ||
207 | static void __devexit ioat_remove(struct pci_dev *pdev) | 173 | static void __devexit ioat_remove(struct pci_dev *pdev) |
208 | { | 174 | { |
209 | struct ioat_device *device = pci_get_drvdata(pdev); | 175 | struct ioat_device *device = pci_get_drvdata(pdev); |
210 | 176 | ||
211 | ioat_shutdown_functionality(pdev); | 177 | dev_err(&pdev->dev, "Removing dma and dca services\n"); |
178 | if (device->dca) { | ||
179 | unregister_dca_provider(device->dca); | ||
180 | free_dca_provider(device->dca); | ||
181 | device->dca = NULL; | ||
182 | } | ||
183 | |||
184 | if (device->dma) { | ||
185 | ioat_dma_remove(device->dma); | ||
186 | device->dma = NULL; | ||
187 | } | ||
212 | 188 | ||
213 | kfree(device); | 189 | kfree(device); |
214 | } | 190 | } |
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 6607fdd00b1c..b3759c4b6536 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -734,8 +734,7 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) | |||
734 | * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors | 734 | * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors |
735 | * @chan: the channel to be filled out | 735 | * @chan: the channel to be filled out |
736 | */ | 736 | */ |
737 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan, | 737 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) |
738 | struct dma_client *client) | ||
739 | { | 738 | { |
740 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 739 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
741 | struct ioat_desc_sw *desc; | 740 | struct ioat_desc_sw *desc; |
@@ -1341,12 +1340,11 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | |||
1341 | */ | 1340 | */ |
1342 | #define IOAT_TEST_SIZE 2000 | 1341 | #define IOAT_TEST_SIZE 2000 |
1343 | 1342 | ||
1344 | DECLARE_COMPLETION(test_completion); | ||
1345 | static void ioat_dma_test_callback(void *dma_async_param) | 1343 | static void ioat_dma_test_callback(void *dma_async_param) |
1346 | { | 1344 | { |
1347 | printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", | 1345 | struct completion *cmp = dma_async_param; |
1348 | dma_async_param); | 1346 | |
1349 | complete(&test_completion); | 1347 | complete(cmp); |
1350 | } | 1348 | } |
1351 | 1349 | ||
1352 | /** | 1350 | /** |
@@ -1363,6 +1361,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1363 | dma_addr_t dma_dest, dma_src; | 1361 | dma_addr_t dma_dest, dma_src; |
1364 | dma_cookie_t cookie; | 1362 | dma_cookie_t cookie; |
1365 | int err = 0; | 1363 | int err = 0; |
1364 | struct completion cmp; | ||
1366 | 1365 | ||
1367 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | 1366 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); |
1368 | if (!src) | 1367 | if (!src) |
@@ -1381,7 +1380,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1381 | dma_chan = container_of(device->common.channels.next, | 1380 | dma_chan = container_of(device->common.channels.next, |
1382 | struct dma_chan, | 1381 | struct dma_chan, |
1383 | device_node); | 1382 | device_node); |
1384 | if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) { | 1383 | if (device->common.device_alloc_chan_resources(dma_chan) < 1) { |
1385 | dev_err(&device->pdev->dev, | 1384 | dev_err(&device->pdev->dev, |
1386 | "selftest cannot allocate chan resource\n"); | 1385 | "selftest cannot allocate chan resource\n"); |
1387 | err = -ENODEV; | 1386 | err = -ENODEV; |
@@ -1402,8 +1401,9 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1402 | } | 1401 | } |
1403 | 1402 | ||
1404 | async_tx_ack(tx); | 1403 | async_tx_ack(tx); |
1404 | init_completion(&cmp); | ||
1405 | tx->callback = ioat_dma_test_callback; | 1405 | tx->callback = ioat_dma_test_callback; |
1406 | tx->callback_param = (void *)0x8086; | 1406 | tx->callback_param = &cmp; |
1407 | cookie = tx->tx_submit(tx); | 1407 | cookie = tx->tx_submit(tx); |
1408 | if (cookie < 0) { | 1408 | if (cookie < 0) { |
1409 | dev_err(&device->pdev->dev, | 1409 | dev_err(&device->pdev->dev, |
@@ -1413,7 +1413,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1413 | } | 1413 | } |
1414 | device->common.device_issue_pending(dma_chan); | 1414 | device->common.device_issue_pending(dma_chan); |
1415 | 1415 | ||
1416 | wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000)); | 1416 | wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); |
1417 | 1417 | ||
1418 | if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) | 1418 | if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) |
1419 | != DMA_SUCCESS) { | 1419 | != DMA_SUCCESS) { |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 6be317262200..ea5440dd10dc 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -24,7 +24,6 @@ | |||
24 | 24 | ||
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/async_tx.h> | ||
28 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
29 | #include <linux/dma-mapping.h> | 28 | #include <linux/dma-mapping.h> |
30 | #include <linux/spinlock.h> | 29 | #include <linux/spinlock.h> |
@@ -116,7 +115,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
116 | } | 115 | } |
117 | 116 | ||
118 | /* run dependent operations */ | 117 | /* run dependent operations */ |
119 | async_tx_run_dependencies(&desc->async_tx); | 118 | dma_run_dependencies(&desc->async_tx); |
120 | 119 | ||
121 | return cookie; | 120 | return cookie; |
122 | } | 121 | } |
@@ -270,8 +269,6 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
270 | break; | 269 | break; |
271 | } | 270 | } |
272 | 271 | ||
273 | BUG_ON(!seen_current); | ||
274 | |||
275 | if (cookie > 0) { | 272 | if (cookie > 0) { |
276 | iop_chan->completed_cookie = cookie; | 273 | iop_chan->completed_cookie = cookie; |
277 | pr_debug("\tcompleted cookie %d\n", cookie); | 274 | pr_debug("\tcompleted cookie %d\n", cookie); |
@@ -471,8 +468,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); | |||
471 | * greater than 2x the number slots needed to satisfy a device->max_xor | 468 | * greater than 2x the number slots needed to satisfy a device->max_xor |
472 | * request. | 469 | * request. |
473 | * */ | 470 | * */ |
474 | static int iop_adma_alloc_chan_resources(struct dma_chan *chan, | 471 | static int iop_adma_alloc_chan_resources(struct dma_chan *chan) |
475 | struct dma_client *client) | ||
476 | { | 472 | { |
477 | char *hw_desc; | 473 | char *hw_desc; |
478 | int idx; | 474 | int idx; |
@@ -866,7 +862,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) | |||
866 | dma_chan = container_of(device->common.channels.next, | 862 | dma_chan = container_of(device->common.channels.next, |
867 | struct dma_chan, | 863 | struct dma_chan, |
868 | device_node); | 864 | device_node); |
869 | if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { | 865 | if (iop_adma_alloc_chan_resources(dma_chan) < 1) { |
870 | err = -ENODEV; | 866 | err = -ENODEV; |
871 | goto out; | 867 | goto out; |
872 | } | 868 | } |
@@ -964,7 +960,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
964 | dma_chan = container_of(device->common.channels.next, | 960 | dma_chan = container_of(device->common.channels.next, |
965 | struct dma_chan, | 961 | struct dma_chan, |
966 | device_node); | 962 | device_node); |
967 | if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { | 963 | if (iop_adma_alloc_chan_resources(dma_chan) < 1) { |
968 | err = -ENODEV; | 964 | err = -ENODEV; |
969 | goto out; | 965 | goto out; |
970 | } | 966 | } |
@@ -1115,26 +1111,13 @@ static int __devexit iop_adma_remove(struct platform_device *dev) | |||
1115 | struct iop_adma_device *device = platform_get_drvdata(dev); | 1111 | struct iop_adma_device *device = platform_get_drvdata(dev); |
1116 | struct dma_chan *chan, *_chan; | 1112 | struct dma_chan *chan, *_chan; |
1117 | struct iop_adma_chan *iop_chan; | 1113 | struct iop_adma_chan *iop_chan; |
1118 | int i; | ||
1119 | struct iop_adma_platform_data *plat_data = dev->dev.platform_data; | 1114 | struct iop_adma_platform_data *plat_data = dev->dev.platform_data; |
1120 | 1115 | ||
1121 | dma_async_device_unregister(&device->common); | 1116 | dma_async_device_unregister(&device->common); |
1122 | 1117 | ||
1123 | for (i = 0; i < 3; i++) { | ||
1124 | unsigned int irq; | ||
1125 | irq = platform_get_irq(dev, i); | ||
1126 | free_irq(irq, device); | ||
1127 | } | ||
1128 | |||
1129 | dma_free_coherent(&dev->dev, plat_data->pool_size, | 1118 | dma_free_coherent(&dev->dev, plat_data->pool_size, |
1130 | device->dma_desc_pool_virt, device->dma_desc_pool); | 1119 | device->dma_desc_pool_virt, device->dma_desc_pool); |
1131 | 1120 | ||
1132 | do { | ||
1133 | struct resource *res; | ||
1134 | res = platform_get_resource(dev, IORESOURCE_MEM, 0); | ||
1135 | release_mem_region(res->start, res->end - res->start); | ||
1136 | } while (0); | ||
1137 | |||
1138 | list_for_each_entry_safe(chan, _chan, &device->common.channels, | 1121 | list_for_each_entry_safe(chan, _chan, &device->common.channels, |
1139 | device_node) { | 1122 | device_node) { |
1140 | iop_chan = to_iop_adma_chan(chan); | 1123 | iop_chan = to_iop_adma_chan(chan); |
@@ -1255,7 +1238,6 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
1255 | spin_lock_init(&iop_chan->lock); | 1238 | spin_lock_init(&iop_chan->lock); |
1256 | INIT_LIST_HEAD(&iop_chan->chain); | 1239 | INIT_LIST_HEAD(&iop_chan->chain); |
1257 | INIT_LIST_HEAD(&iop_chan->all_slots); | 1240 | INIT_LIST_HEAD(&iop_chan->all_slots); |
1258 | INIT_RCU_HEAD(&iop_chan->common.rcu); | ||
1259 | iop_chan->common.device = dma_dev; | 1241 | iop_chan->common.device = dma_dev; |
1260 | list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); | 1242 | list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); |
1261 | 1243 | ||
@@ -1431,16 +1413,12 @@ static int __init iop_adma_init (void) | |||
1431 | return platform_driver_register(&iop_adma_driver); | 1413 | return platform_driver_register(&iop_adma_driver); |
1432 | } | 1414 | } |
1433 | 1415 | ||
1434 | /* it's currently unsafe to unload this module */ | ||
1435 | #if 0 | ||
1436 | static void __exit iop_adma_exit (void) | 1416 | static void __exit iop_adma_exit (void) |
1437 | { | 1417 | { |
1438 | platform_driver_unregister(&iop_adma_driver); | 1418 | platform_driver_unregister(&iop_adma_driver); |
1439 | return; | 1419 | return; |
1440 | } | 1420 | } |
1441 | module_exit(iop_adma_exit); | 1421 | module_exit(iop_adma_exit); |
1442 | #endif | ||
1443 | |||
1444 | module_init(iop_adma_init); | 1422 | module_init(iop_adma_init); |
1445 | 1423 | ||
1446 | MODULE_AUTHOR("Intel Corporation"); | 1424 | MODULE_AUTHOR("Intel Corporation"); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index bcda17426411..d35cbd1ff0b3 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -18,7 +18,6 @@ | |||
18 | 18 | ||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/async_tx.h> | ||
22 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
23 | #include <linux/dma-mapping.h> | 22 | #include <linux/dma-mapping.h> |
24 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
@@ -340,7 +339,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | |||
340 | } | 339 | } |
341 | 340 | ||
342 | /* run dependent operations */ | 341 | /* run dependent operations */ |
343 | async_tx_run_dependencies(&desc->async_tx); | 342 | dma_run_dependencies(&desc->async_tx); |
344 | 343 | ||
345 | return cookie; | 344 | return cookie; |
346 | } | 345 | } |
@@ -607,8 +606,7 @@ submit_done: | |||
607 | } | 606 | } |
608 | 607 | ||
609 | /* returns the number of allocated descriptors */ | 608 | /* returns the number of allocated descriptors */ |
610 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan, | 609 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan) |
611 | struct dma_client *client) | ||
612 | { | 610 | { |
613 | char *hw_desc; | 611 | char *hw_desc; |
614 | int idx; | 612 | int idx; |
@@ -958,7 +956,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) | |||
958 | dma_chan = container_of(device->common.channels.next, | 956 | dma_chan = container_of(device->common.channels.next, |
959 | struct dma_chan, | 957 | struct dma_chan, |
960 | device_node); | 958 | device_node); |
961 | if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { | 959 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
962 | err = -ENODEV; | 960 | err = -ENODEV; |
963 | goto out; | 961 | goto out; |
964 | } | 962 | } |
@@ -1053,7 +1051,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device) | |||
1053 | dma_chan = container_of(device->common.channels.next, | 1051 | dma_chan = container_of(device->common.channels.next, |
1054 | struct dma_chan, | 1052 | struct dma_chan, |
1055 | device_node); | 1053 | device_node); |
1056 | if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { | 1054 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
1057 | err = -ENODEV; | 1055 | err = -ENODEV; |
1058 | goto out; | 1056 | goto out; |
1059 | } | 1057 | } |
@@ -1221,7 +1219,6 @@ static int __devinit mv_xor_probe(struct platform_device *pdev) | |||
1221 | INIT_LIST_HEAD(&mv_chan->chain); | 1219 | INIT_LIST_HEAD(&mv_chan->chain); |
1222 | INIT_LIST_HEAD(&mv_chan->completed_slots); | 1220 | INIT_LIST_HEAD(&mv_chan->completed_slots); |
1223 | INIT_LIST_HEAD(&mv_chan->all_slots); | 1221 | INIT_LIST_HEAD(&mv_chan->all_slots); |
1224 | INIT_RCU_HEAD(&mv_chan->common.rcu); | ||
1225 | mv_chan->common.device = dma_dev; | 1222 | mv_chan->common.device = dma_dev; |
1226 | 1223 | ||
1227 | list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); | 1224 | list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); |
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c index 2f9e941968d6..d8f295bdad76 100644 --- a/drivers/ide/ide-acpi.c +++ b/drivers/ide/ide-acpi.c | |||
@@ -18,12 +18,6 @@ | |||
18 | #include <linux/dmi.h> | 18 | #include <linux/dmi.h> |
19 | 19 | ||
20 | #include <acpi/acpi_bus.h> | 20 | #include <acpi/acpi_bus.h> |
21 | #include <acpi/acnames.h> | ||
22 | #include <acpi/acnamesp.h> | ||
23 | #include <acpi/acparser.h> | ||
24 | #include <acpi/acexcep.h> | ||
25 | #include <acpi/acmacros.h> | ||
26 | #include <acpi/actypes.h> | ||
27 | 21 | ||
28 | #define REGS_PER_GTF 7 | 22 | #define REGS_PER_GTF 7 |
29 | struct taskfile_array { | 23 | struct taskfile_array { |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 3949a1c73451..419c378bd24b 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -120,7 +120,7 @@ config TIFM_CORE | |||
120 | cards are supported via 'MMC/SD Card support: TI Flash Media MMC/SD | 120 | cards are supported via 'MMC/SD Card support: TI Flash Media MMC/SD |
121 | Interface support (MMC_TIFM_SD)'. | 121 | Interface support (MMC_TIFM_SD)'. |
122 | 122 | ||
123 | To compile this driver as a module, choose M here: the module will | 123 | To compile this driver as a module, choose M here: the module will |
124 | be called tifm_core. | 124 | be called tifm_core. |
125 | 125 | ||
126 | config TIFM_7XX1 | 126 | config TIFM_7XX1 |
@@ -133,100 +133,9 @@ config TIFM_7XX1 | |||
133 | To make actual use of the device, you will have to select some | 133 | To make actual use of the device, you will have to select some |
134 | flash card format drivers, as outlined in the TIFM_CORE Help. | 134 | flash card format drivers, as outlined in the TIFM_CORE Help. |
135 | 135 | ||
136 | To compile this driver as a module, choose M here: the module will | 136 | To compile this driver as a module, choose M here: the module will |
137 | be called tifm_7xx1. | 137 | be called tifm_7xx1. |
138 | 138 | ||
139 | config ACER_WMI | ||
140 | tristate "Acer WMI Laptop Extras (EXPERIMENTAL)" | ||
141 | depends on X86 | ||
142 | depends on EXPERIMENTAL | ||
143 | depends on ACPI | ||
144 | depends on LEDS_CLASS | ||
145 | depends on NEW_LEDS | ||
146 | depends on BACKLIGHT_CLASS_DEVICE | ||
147 | depends on SERIO_I8042 | ||
148 | depends on RFKILL | ||
149 | select ACPI_WMI | ||
150 | ---help--- | ||
151 | This is a driver for newer Acer (and Wistron) laptops. It adds | ||
152 | wireless radio and bluetooth control, and on some laptops, | ||
153 | exposes the mail LED and LCD backlight. | ||
154 | |||
155 | For more information about this driver see | ||
156 | <file:Documentation/laptops/acer-wmi.txt> | ||
157 | |||
158 | If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M | ||
159 | here. | ||
160 | |||
161 | config ASUS_LAPTOP | ||
162 | tristate "Asus Laptop Extras (EXPERIMENTAL)" | ||
163 | depends on X86 | ||
164 | depends on ACPI | ||
165 | depends on EXPERIMENTAL && !ACPI_ASUS | ||
166 | depends on LEDS_CLASS | ||
167 | depends on NEW_LEDS | ||
168 | depends on BACKLIGHT_CLASS_DEVICE | ||
169 | ---help--- | ||
170 | This is the new Linux driver for Asus laptops. It may also support some | ||
171 | MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate | ||
172 | standard ACPI events that go through /proc/acpi/events. It also adds | ||
173 | support for video output switching, LCD backlight control, Bluetooth and | ||
174 | Wlan control, and most importantly, allows you to blink those fancy LEDs. | ||
175 | |||
176 | For more information and a userspace daemon for handling the extra | ||
177 | buttons see <http://acpi4asus.sf.net/>. | ||
178 | |||
179 | If you have an ACPI-compatible ASUS laptop, say Y or M here. | ||
180 | |||
181 | config FUJITSU_LAPTOP | ||
182 | tristate "Fujitsu Laptop Extras" | ||
183 | depends on X86 | ||
184 | depends on ACPI | ||
185 | depends on INPUT | ||
186 | depends on BACKLIGHT_CLASS_DEVICE | ||
187 | ---help--- | ||
188 | This is a driver for laptops built by Fujitsu: | ||
189 | |||
190 | * P2xxx/P5xxx/S6xxx/S7xxx series Lifebooks | ||
191 | * Possibly other Fujitsu laptop models | ||
192 | * Tested with S6410 and S7020 | ||
193 | |||
194 | It adds support for LCD brightness control and some hotkeys. | ||
195 | |||
196 | If you have a Fujitsu laptop, say Y or M here. | ||
197 | |||
198 | config FUJITSU_LAPTOP_DEBUG | ||
199 | bool "Verbose debug mode for Fujitsu Laptop Extras" | ||
200 | depends on FUJITSU_LAPTOP | ||
201 | default n | ||
202 | ---help--- | ||
203 | Enables extra debug output from the fujitsu extras driver, at the | ||
204 | expense of a slight increase in driver size. | ||
205 | |||
206 | If you are not sure, say N here. | ||
207 | |||
208 | config TC1100_WMI | ||
209 | tristate "HP Compaq TC1100 Tablet WMI Extras (EXPERIMENTAL)" | ||
210 | depends on X86 && !X86_64 | ||
211 | depends on EXPERIMENTAL | ||
212 | depends on ACPI | ||
213 | select ACPI_WMI | ||
214 | ---help--- | ||
215 | This is a driver for the WMI extensions (wireless and bluetooth power | ||
216 | control) of the HP Compaq TC1100 tablet. | ||
217 | |||
218 | config HP_WMI | ||
219 | tristate "HP WMI extras" | ||
220 | depends on ACPI_WMI | ||
221 | depends on INPUT | ||
222 | depends on RFKILL | ||
223 | help | ||
224 | Say Y here if you want to support WMI-based hotkeys on HP laptops and | ||
225 | to read data from WMI such as docking or ambient light sensor state. | ||
226 | |||
227 | To compile this driver as a module, choose M here: the module will | ||
228 | be called hp-wmi. | ||
229 | |||
230 | config ICS932S401 | 139 | config ICS932S401 |
231 | tristate "Integrated Circuits ICS932S401" | 140 | tristate "Integrated Circuits ICS932S401" |
232 | depends on I2C && EXPERIMENTAL | 141 | depends on I2C && EXPERIMENTAL |
@@ -237,170 +146,6 @@ config ICS932S401 | |||
237 | This driver can also be built as a module. If so, the module | 146 | This driver can also be built as a module. If so, the module |
238 | will be called ics932s401. | 147 | will be called ics932s401. |
239 | 148 | ||
240 | config MSI_LAPTOP | ||
241 | tristate "MSI Laptop Extras" | ||
242 | depends on X86 | ||
243 | depends on ACPI | ||
244 | depends on BACKLIGHT_CLASS_DEVICE | ||
245 | ---help--- | ||
246 | This is a driver for laptops built by MSI (MICRO-STAR | ||
247 | INTERNATIONAL): | ||
248 | |||
249 | MSI MegaBook S270 (MS-1013) | ||
250 | Cytron/TCM/Medion/Tchibo MD96100/SAM2000 | ||
251 | |||
252 | It adds support for Bluetooth, WLAN and LCD brightness control. | ||
253 | |||
254 | More information about this driver is available at | ||
255 | <http://0pointer.de/lennart/tchibo.html>. | ||
256 | |||
257 | If you have an MSI S270 laptop, say Y or M here. | ||
258 | |||
259 | config PANASONIC_LAPTOP | ||
260 | tristate "Panasonic Laptop Extras" | ||
261 | depends on X86 && INPUT && ACPI | ||
262 | depends on BACKLIGHT_CLASS_DEVICE | ||
263 | ---help--- | ||
264 | This driver adds support for access to backlight control and hotkeys | ||
265 | on Panasonic Let's Note laptops. | ||
266 | |||
267 | If you have a Panasonic Let's note laptop (such as the R1(N variant), | ||
268 | R2, R3, R5, T2, W2 and Y2 series), say Y. | ||
269 | |||
270 | config COMPAL_LAPTOP | ||
271 | tristate "Compal Laptop Extras" | ||
272 | depends on X86 | ||
273 | depends on ACPI | ||
274 | depends on BACKLIGHT_CLASS_DEVICE | ||
275 | ---help--- | ||
276 | This is a driver for laptops built by Compal: | ||
277 | |||
278 | Compal FL90/IFL90 | ||
279 | Compal FL91/IFL91 | ||
280 | Compal FL92/JFL92 | ||
281 | Compal FT00/IFT00 | ||
282 | |||
283 | It adds support for Bluetooth, WLAN and LCD brightness control. | ||
284 | |||
285 | If you have an Compal FL9x/IFL9x/FT00 laptop, say Y or M here. | ||
286 | |||
287 | config SONY_LAPTOP | ||
288 | tristate "Sony Laptop Extras" | ||
289 | depends on X86 && ACPI | ||
290 | select BACKLIGHT_CLASS_DEVICE | ||
291 | depends on INPUT | ||
292 | ---help--- | ||
293 | This mini-driver drives the SNC and SPIC devices present in the ACPI | ||
294 | BIOS of the Sony Vaio laptops. | ||
295 | |||
296 | It gives access to some extra laptop functionalities like Bluetooth, | ||
297 | screen brightness control, Fn keys and allows powering on/off some | ||
298 | devices. | ||
299 | |||
300 | Read <file:Documentation/laptops/sony-laptop.txt> for more information. | ||
301 | |||
302 | config SONYPI_COMPAT | ||
303 | bool "Sonypi compatibility" | ||
304 | depends on SONY_LAPTOP | ||
305 | ---help--- | ||
306 | Build the sonypi driver compatibility code into the sony-laptop driver. | ||
307 | |||
308 | config THINKPAD_ACPI | ||
309 | tristate "ThinkPad ACPI Laptop Extras" | ||
310 | depends on X86 && ACPI | ||
311 | select BACKLIGHT_LCD_SUPPORT | ||
312 | select BACKLIGHT_CLASS_DEVICE | ||
313 | select HWMON | ||
314 | select NVRAM | ||
315 | select INPUT | ||
316 | select NEW_LEDS | ||
317 | select LEDS_CLASS | ||
318 | select NET | ||
319 | select RFKILL | ||
320 | ---help--- | ||
321 | This is a driver for the IBM and Lenovo ThinkPad laptops. It adds | ||
322 | support for Fn-Fx key combinations, Bluetooth control, video | ||
323 | output switching, ThinkLight control, UltraBay eject and more. | ||
324 | For more information about this driver see | ||
325 | <file:Documentation/laptops/thinkpad-acpi.txt> and | ||
326 | <http://ibm-acpi.sf.net/> . | ||
327 | |||
328 | This driver was formerly known as ibm-acpi. | ||
329 | |||
330 | If you have an IBM or Lenovo ThinkPad laptop, say Y or M here. | ||
331 | |||
332 | config THINKPAD_ACPI_DEBUG | ||
333 | bool "Verbose debug mode" | ||
334 | depends on THINKPAD_ACPI | ||
335 | default n | ||
336 | ---help--- | ||
337 | Enables extra debugging information, at the expense of a slightly | ||
338 | increase in driver size. | ||
339 | |||
340 | If you are not sure, say N here. | ||
341 | |||
342 | config THINKPAD_ACPI_DOCK | ||
343 | bool "Legacy Docking Station Support" | ||
344 | depends on THINKPAD_ACPI | ||
345 | depends on ACPI_DOCK=n | ||
346 | default n | ||
347 | ---help--- | ||
348 | Allows the thinkpad_acpi driver to handle docking station events. | ||
349 | This support was made obsolete by the generic ACPI docking station | ||
350 | support (CONFIG_ACPI_DOCK). It will allow locking and removing the | ||
351 | laptop from the docking station, but will not properly connect PCI | ||
352 | devices. | ||
353 | |||
354 | If you are not sure, say N here. | ||
355 | |||
356 | config THINKPAD_ACPI_BAY | ||
357 | bool "Legacy Removable Bay Support" | ||
358 | depends on THINKPAD_ACPI | ||
359 | default y | ||
360 | ---help--- | ||
361 | Allows the thinkpad_acpi driver to handle removable bays. It will | ||
362 | electrically disable the device in the bay, and also generate | ||
363 | notifications when the bay lever is ejected or inserted. | ||
364 | |||
365 | If you are not sure, say Y here. | ||
366 | |||
367 | config THINKPAD_ACPI_VIDEO | ||
368 | bool "Video output control support" | ||
369 | depends on THINKPAD_ACPI | ||
370 | default y | ||
371 | ---help--- | ||
372 | Allows the thinkpad_acpi driver to provide an interface to control | ||
373 | the various video output ports. | ||
374 | |||
375 | This feature often won't work well, depending on ThinkPad model, | ||
376 | display state, video output devices in use, whether there is a X | ||
377 | server running, phase of the moon, and the current mood of | ||
378 | Schroedinger's cat. If you can use X.org's RandR to control | ||
379 | your ThinkPad's video output ports instead of this feature, | ||
380 | don't think twice: do it and say N here to save some memory. | ||
381 | |||
382 | If you are not sure, say Y here. | ||
383 | |||
384 | config THINKPAD_ACPI_HOTKEY_POLL | ||
385 | bool "Support NVRAM polling for hot keys" | ||
386 | depends on THINKPAD_ACPI | ||
387 | default y | ||
388 | ---help--- | ||
389 | Some thinkpad models benefit from NVRAM polling to detect a few of | ||
390 | the hot key press events. If you know your ThinkPad model does not | ||
391 | need to do NVRAM polling to support any of the hot keys you use, | ||
392 | unselecting this option will save about 1kB of memory. | ||
393 | |||
394 | ThinkPads T40 and newer, R52 and newer, and X31 and newer are | ||
395 | unlikely to need NVRAM polling in their latest BIOS versions. | ||
396 | |||
397 | NVRAM polling can detect at most the following keys: ThinkPad/Access | ||
398 | IBM, Zoom, Switch Display (fn+F7), ThinkLight, Volume up/down/mute, | ||
399 | Brightness up/down, Display Expand (fn+F8), Hibernate (fn+F12). | ||
400 | |||
401 | If you are not sure, say Y here. The driver enables polling only if | ||
402 | it is strictly necessary to do so. | ||
403 | |||
404 | config ATMEL_SSC | 149 | config ATMEL_SSC |
405 | tristate "Device driver for Atmel SSC peripheral" | 150 | tristate "Device driver for Atmel SSC peripheral" |
406 | depends on AVR32 || ARCH_AT91 | 151 | depends on AVR32 || ARCH_AT91 |
@@ -413,31 +158,6 @@ config ATMEL_SSC | |||
413 | 158 | ||
414 | If unsure, say N. | 159 | If unsure, say N. |
415 | 160 | ||
416 | config INTEL_MENLOW | ||
417 | tristate "Thermal Management driver for Intel menlow platform" | ||
418 | depends on ACPI_THERMAL | ||
419 | select THERMAL | ||
420 | depends on X86 | ||
421 | ---help--- | ||
422 | ACPI thermal management enhancement driver on | ||
423 | Intel Menlow platform. | ||
424 | |||
425 | If unsure, say N. | ||
426 | |||
427 | config EEEPC_LAPTOP | ||
428 | tristate "Eee PC Hotkey Driver (EXPERIMENTAL)" | ||
429 | depends on X86 | ||
430 | depends on ACPI | ||
431 | depends on BACKLIGHT_CLASS_DEVICE | ||
432 | depends on HWMON | ||
433 | depends on EXPERIMENTAL | ||
434 | depends on RFKILL | ||
435 | ---help--- | ||
436 | This driver supports the Fn-Fx keys on Eee PC laptops. | ||
437 | It also adds the ability to switch camera/wlan on/off. | ||
438 | |||
439 | If you have an Eee PC laptop, say Y or M here. | ||
440 | |||
441 | config ENCLOSURE_SERVICES | 161 | config ENCLOSURE_SERVICES |
442 | tristate "Enclosure Services" | 162 | tristate "Enclosure Services" |
443 | default n | 163 | default n |
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 5de863a0e395..9cf8ae6e4b39 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile | |||
@@ -1,33 +1,20 @@ | |||
1 | # | 1 | # |
2 | # Makefile for misc devices that really don't fit anywhere else. | 2 | # Makefile for misc devices that really don't fit anywhere else. |
3 | # | 3 | # |
4 | obj- := misc.o # Dummy rule to force built-in.o to be made | ||
5 | 4 | ||
6 | obj-$(CONFIG_IBM_ASM) += ibmasm/ | 5 | obj-$(CONFIG_IBM_ASM) += ibmasm/ |
7 | obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ | 6 | obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ |
8 | obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o | ||
9 | obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o | ||
10 | obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o | ||
11 | obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o | ||
12 | obj-$(CONFIG_ACER_WMI) += acer-wmi.o | ||
13 | obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o | 7 | obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o |
14 | obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o | 8 | obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o |
15 | obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o | 9 | obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o |
16 | obj-$(CONFIG_HP_WMI) += hp-wmi.o | ||
17 | obj-$(CONFIG_ICS932S401) += ics932s401.o | 10 | obj-$(CONFIG_ICS932S401) += ics932s401.o |
18 | obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o | ||
19 | obj-$(CONFIG_LKDTM) += lkdtm.o | 11 | obj-$(CONFIG_LKDTM) += lkdtm.o |
20 | obj-$(CONFIG_TIFM_CORE) += tifm_core.o | 12 | obj-$(CONFIG_TIFM_CORE) += tifm_core.o |
21 | obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o | 13 | obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o |
22 | obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o | 14 | obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o |
23 | obj-$(CONFIG_PHANTOM) += phantom.o | 15 | obj-$(CONFIG_PHANTOM) += phantom.o |
24 | obj-$(CONFIG_SGI_IOC4) += ioc4.o | 16 | obj-$(CONFIG_SGI_IOC4) += ioc4.o |
25 | obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o | ||
26 | obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o | ||
27 | obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o | ||
28 | obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o | ||
29 | obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o | 17 | obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o |
30 | obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o | ||
31 | obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o | 18 | obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o |
32 | obj-$(CONFIG_KGDB_TESTS) += kgdbts.o | 19 | obj-$(CONFIG_KGDB_TESTS) += kgdbts.o |
33 | obj-$(CONFIG_SGI_XP) += sgi-xp/ | 20 | obj-$(CONFIG_SGI_XP) += sgi-xp/ |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 1e97916914ad..76bfe16c09b1 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
@@ -55,7 +55,6 @@ enum atmel_mci_state { | |||
55 | 55 | ||
56 | struct atmel_mci_dma { | 56 | struct atmel_mci_dma { |
57 | #ifdef CONFIG_MMC_ATMELMCI_DMA | 57 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
58 | struct dma_client client; | ||
59 | struct dma_chan *chan; | 58 | struct dma_chan *chan; |
60 | struct dma_async_tx_descriptor *data_desc; | 59 | struct dma_async_tx_descriptor *data_desc; |
61 | #endif | 60 | #endif |
@@ -593,10 +592,8 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
593 | 592 | ||
594 | /* If we don't have a channel, we can't do DMA */ | 593 | /* If we don't have a channel, we can't do DMA */ |
595 | chan = host->dma.chan; | 594 | chan = host->dma.chan; |
596 | if (chan) { | 595 | if (chan) |
597 | dma_chan_get(chan); | ||
598 | host->data_chan = chan; | 596 | host->data_chan = chan; |
599 | } | ||
600 | 597 | ||
601 | if (!chan) | 598 | if (!chan) |
602 | return -ENODEV; | 599 | return -ENODEV; |
@@ -1443,60 +1440,6 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) | |||
1443 | return IRQ_HANDLED; | 1440 | return IRQ_HANDLED; |
1444 | } | 1441 | } |
1445 | 1442 | ||
1446 | #ifdef CONFIG_MMC_ATMELMCI_DMA | ||
1447 | |||
1448 | static inline struct atmel_mci * | ||
1449 | dma_client_to_atmel_mci(struct dma_client *client) | ||
1450 | { | ||
1451 | return container_of(client, struct atmel_mci, dma.client); | ||
1452 | } | ||
1453 | |||
1454 | static enum dma_state_client atmci_dma_event(struct dma_client *client, | ||
1455 | struct dma_chan *chan, enum dma_state state) | ||
1456 | { | ||
1457 | struct atmel_mci *host; | ||
1458 | enum dma_state_client ret = DMA_NAK; | ||
1459 | |||
1460 | host = dma_client_to_atmel_mci(client); | ||
1461 | |||
1462 | switch (state) { | ||
1463 | case DMA_RESOURCE_AVAILABLE: | ||
1464 | spin_lock_bh(&host->lock); | ||
1465 | if (!host->dma.chan) { | ||
1466 | host->dma.chan = chan; | ||
1467 | ret = DMA_ACK; | ||
1468 | } | ||
1469 | spin_unlock_bh(&host->lock); | ||
1470 | |||
1471 | if (ret == DMA_ACK) | ||
1472 | dev_info(&host->pdev->dev, | ||
1473 | "Using %s for DMA transfers\n", | ||
1474 | chan->dev.bus_id); | ||
1475 | break; | ||
1476 | |||
1477 | case DMA_RESOURCE_REMOVED: | ||
1478 | spin_lock_bh(&host->lock); | ||
1479 | if (host->dma.chan == chan) { | ||
1480 | host->dma.chan = NULL; | ||
1481 | ret = DMA_ACK; | ||
1482 | } | ||
1483 | spin_unlock_bh(&host->lock); | ||
1484 | |||
1485 | if (ret == DMA_ACK) | ||
1486 | dev_info(&host->pdev->dev, | ||
1487 | "Lost %s, falling back to PIO\n", | ||
1488 | chan->dev.bus_id); | ||
1489 | break; | ||
1490 | |||
1491 | default: | ||
1492 | break; | ||
1493 | } | ||
1494 | |||
1495 | |||
1496 | return ret; | ||
1497 | } | ||
1498 | #endif /* CONFIG_MMC_ATMELMCI_DMA */ | ||
1499 | |||
1500 | static int __init atmci_init_slot(struct atmel_mci *host, | 1443 | static int __init atmci_init_slot(struct atmel_mci *host, |
1501 | struct mci_slot_pdata *slot_data, unsigned int id, | 1444 | struct mci_slot_pdata *slot_data, unsigned int id, |
1502 | u32 sdc_reg) | 1445 | u32 sdc_reg) |
@@ -1600,6 +1543,18 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot, | |||
1600 | mmc_free_host(slot->mmc); | 1543 | mmc_free_host(slot->mmc); |
1601 | } | 1544 | } |
1602 | 1545 | ||
1546 | #ifdef CONFIG_MMC_ATMELMCI_DMA | ||
1547 | static bool filter(struct dma_chan *chan, void *slave) | ||
1548 | { | ||
1549 | struct dw_dma_slave *dws = slave; | ||
1550 | |||
1551 | if (dws->dma_dev == chan->device->dev) | ||
1552 | return true; | ||
1553 | else | ||
1554 | return false; | ||
1555 | } | ||
1556 | #endif | ||
1557 | |||
1603 | static int __init atmci_probe(struct platform_device *pdev) | 1558 | static int __init atmci_probe(struct platform_device *pdev) |
1604 | { | 1559 | { |
1605 | struct mci_platform_data *pdata; | 1560 | struct mci_platform_data *pdata; |
@@ -1652,22 +1607,20 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
1652 | goto err_request_irq; | 1607 | goto err_request_irq; |
1653 | 1608 | ||
1654 | #ifdef CONFIG_MMC_ATMELMCI_DMA | 1609 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1655 | if (pdata->dma_slave) { | 1610 | if (pdata->dma_slave.dma_dev) { |
1656 | struct dma_slave *slave = pdata->dma_slave; | 1611 | struct dw_dma_slave *dws = &pdata->dma_slave; |
1612 | dma_cap_mask_t mask; | ||
1657 | 1613 | ||
1658 | slave->tx_reg = regs->start + MCI_TDR; | 1614 | dws->tx_reg = regs->start + MCI_TDR; |
1659 | slave->rx_reg = regs->start + MCI_RDR; | 1615 | dws->rx_reg = regs->start + MCI_RDR; |
1660 | 1616 | ||
1661 | /* Try to grab a DMA channel */ | 1617 | /* Try to grab a DMA channel */ |
1662 | host->dma.client.event_callback = atmci_dma_event; | 1618 | dma_cap_zero(mask); |
1663 | dma_cap_set(DMA_SLAVE, host->dma.client.cap_mask); | 1619 | dma_cap_set(DMA_SLAVE, mask); |
1664 | host->dma.client.slave = slave; | 1620 | host->dma.chan = dma_request_channel(mask, filter, dws); |
1665 | |||
1666 | dma_async_client_register(&host->dma.client); | ||
1667 | dma_async_client_chan_request(&host->dma.client); | ||
1668 | } else { | ||
1669 | dev_notice(&pdev->dev, "DMA not available, using PIO\n"); | ||
1670 | } | 1621 | } |
1622 | if (!host->dma.chan) | ||
1623 | dev_notice(&pdev->dev, "DMA not available, using PIO\n"); | ||
1671 | #endif /* CONFIG_MMC_ATMELMCI_DMA */ | 1624 | #endif /* CONFIG_MMC_ATMELMCI_DMA */ |
1672 | 1625 | ||
1673 | platform_set_drvdata(pdev, host); | 1626 | platform_set_drvdata(pdev, host); |
@@ -1699,8 +1652,8 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
1699 | 1652 | ||
1700 | err_init_slot: | 1653 | err_init_slot: |
1701 | #ifdef CONFIG_MMC_ATMELMCI_DMA | 1654 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1702 | if (pdata->dma_slave) | 1655 | if (host->dma.chan) |
1703 | dma_async_client_unregister(&host->dma.client); | 1656 | dma_release_channel(host->dma.chan); |
1704 | #endif | 1657 | #endif |
1705 | free_irq(irq, host); | 1658 | free_irq(irq, host); |
1706 | err_request_irq: | 1659 | err_request_irq: |
@@ -1731,8 +1684,8 @@ static int __exit atmci_remove(struct platform_device *pdev) | |||
1731 | clk_disable(host->mck); | 1684 | clk_disable(host->mck); |
1732 | 1685 | ||
1733 | #ifdef CONFIG_MMC_ATMELMCI_DMA | 1686 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1734 | if (host->dma.client.slave) | 1687 | if (host->dma.chan) |
1735 | dma_async_client_unregister(&host->dma.client); | 1688 | dma_release_channel(host->dma.chan); |
1736 | #endif | 1689 | #endif |
1737 | 1690 | ||
1738 | free_irq(platform_get_irq(pdev, 0), host); | 1691 | free_irq(platform_get_irq(pdev, 0), host); |
@@ -1761,7 +1714,7 @@ static void __exit atmci_exit(void) | |||
1761 | platform_driver_unregister(&atmci_driver); | 1714 | platform_driver_unregister(&atmci_driver); |
1762 | } | 1715 | } |
1763 | 1716 | ||
1764 | module_init(atmci_init); | 1717 | late_initcall(atmci_init); /* try to load after dma driver when built-in */ |
1765 | module_exit(atmci_exit); | 1718 | module_exit(atmci_exit); |
1766 | 1719 | ||
1767 | MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); | 1720 | MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); |
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index a90d50c2c3e5..7d04fb9ddcaa 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig | |||
@@ -45,6 +45,14 @@ config MTD_PARTITIONS | |||
45 | devices. Partitioning on NFTL 'devices' is a different - that's the | 45 | devices. Partitioning on NFTL 'devices' is a different - that's the |
46 | 'normal' form of partitioning used on a block device. | 46 | 'normal' form of partitioning used on a block device. |
47 | 47 | ||
48 | config MTD_TESTS | ||
49 | tristate "MTD tests support" | ||
50 | depends on m | ||
51 | help | ||
52 | This option includes various MTD tests into compilation. The tests | ||
53 | should normally be compiled as kernel modules. The modules perform | ||
54 | various checks and verifications when loaded. | ||
55 | |||
48 | config MTD_REDBOOT_PARTS | 56 | config MTD_REDBOOT_PARTS |
49 | tristate "RedBoot partition table parsing" | 57 | tristate "RedBoot partition table parsing" |
50 | depends on MTD_PARTITIONS | 58 | depends on MTD_PARTITIONS |
@@ -316,6 +324,8 @@ source "drivers/mtd/nand/Kconfig" | |||
316 | 324 | ||
317 | source "drivers/mtd/onenand/Kconfig" | 325 | source "drivers/mtd/onenand/Kconfig" |
318 | 326 | ||
327 | source "drivers/mtd/lpddr/Kconfig" | ||
328 | |||
319 | source "drivers/mtd/ubi/Kconfig" | 329 | source "drivers/mtd/ubi/Kconfig" |
320 | 330 | ||
321 | endif # MTD | 331 | endif # MTD |
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index 4b77335715f0..4521b1ecce45 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile | |||
@@ -29,6 +29,6 @@ obj-$(CONFIG_MTD_OOPS) += mtdoops.o | |||
29 | nftl-objs := nftlcore.o nftlmount.o | 29 | nftl-objs := nftlcore.o nftlmount.o |
30 | inftl-objs := inftlcore.o inftlmount.o | 30 | inftl-objs := inftlcore.o inftlmount.o |
31 | 31 | ||
32 | obj-y += chips/ maps/ devices/ nand/ onenand/ | 32 | obj-y += chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/ |
33 | 33 | ||
34 | obj-$(CONFIG_MTD_UBI) += ubi/ | 34 | obj-$(CONFIG_MTD_UBI) += ubi/ |
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index c93a8be5d5f1..f5ab6fa1057b 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c | |||
@@ -58,8 +58,8 @@ static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t | |||
58 | static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *); | 58 | static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *); |
59 | static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *); | 59 | static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *); |
60 | static void cfi_intelext_sync (struct mtd_info *); | 60 | static void cfi_intelext_sync (struct mtd_info *); |
61 | static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len); | 61 | static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
62 | static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); | 62 | static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
63 | #ifdef CONFIG_MTD_OTP | 63 | #ifdef CONFIG_MTD_OTP |
64 | static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *); | 64 | static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *); |
65 | static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *); | 65 | static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *); |
@@ -558,8 +558,8 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd) | |||
558 | } | 558 | } |
559 | 559 | ||
560 | for (i=0; i<mtd->numeraseregions;i++){ | 560 | for (i=0; i<mtd->numeraseregions;i++){ |
561 | printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n", | 561 | printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n", |
562 | i,mtd->eraseregions[i].offset, | 562 | i,(unsigned long long)mtd->eraseregions[i].offset, |
563 | mtd->eraseregions[i].erasesize, | 563 | mtd->eraseregions[i].erasesize, |
564 | mtd->eraseregions[i].numblocks); | 564 | mtd->eraseregions[i].numblocks); |
565 | } | 565 | } |
@@ -2058,7 +2058,7 @@ out: put_chip(map, chip, adr); | |||
2058 | return ret; | 2058 | return ret; |
2059 | } | 2059 | } |
2060 | 2060 | ||
2061 | static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len) | 2061 | static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
2062 | { | 2062 | { |
2063 | int ret; | 2063 | int ret; |
2064 | 2064 | ||
@@ -2082,7 +2082,7 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len) | |||
2082 | return ret; | 2082 | return ret; |
2083 | } | 2083 | } |
2084 | 2084 | ||
2085 | static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) | 2085 | static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
2086 | { | 2086 | { |
2087 | int ret; | 2087 | int ret; |
2088 | 2088 | ||
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index d74ec46aa032..94bb61e19047 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
@@ -71,8 +71,8 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
71 | static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); | 71 | static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); |
72 | #include "fwh_lock.h" | 72 | #include "fwh_lock.h" |
73 | 73 | ||
74 | static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len); | 74 | static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
75 | static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); | 75 | static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
76 | 76 | ||
77 | static struct mtd_chip_driver cfi_amdstd_chipdrv = { | 77 | static struct mtd_chip_driver cfi_amdstd_chipdrv = { |
78 | .probe = NULL, /* Not usable directly */ | 78 | .probe = NULL, /* Not usable directly */ |
@@ -322,6 +322,14 @@ static struct cfi_fixup fixup_table[] = { | |||
322 | }; | 322 | }; |
323 | 323 | ||
324 | 324 | ||
325 | static void cfi_fixup_major_minor(struct cfi_private *cfi, | ||
326 | struct cfi_pri_amdstd *extp) | ||
327 | { | ||
328 | if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e && | ||
329 | extp->MajorVersion == '0') | ||
330 | extp->MajorVersion = '1'; | ||
331 | } | ||
332 | |||
325 | struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | 333 | struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) |
326 | { | 334 | { |
327 | struct cfi_private *cfi = map->fldrv_priv; | 335 | struct cfi_private *cfi = map->fldrv_priv; |
@@ -363,6 +371,8 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | |||
363 | return NULL; | 371 | return NULL; |
364 | } | 372 | } |
365 | 373 | ||
374 | cfi_fixup_major_minor(cfi, extp); | ||
375 | |||
366 | if (extp->MajorVersion != '1' || | 376 | if (extp->MajorVersion != '1' || |
367 | (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { | 377 | (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { |
368 | printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " | 378 | printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " |
@@ -1774,12 +1784,12 @@ out_unlock: | |||
1774 | return ret; | 1784 | return ret; |
1775 | } | 1785 | } |
1776 | 1786 | ||
1777 | static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len) | 1787 | static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
1778 | { | 1788 | { |
1779 | return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); | 1789 | return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); |
1780 | } | 1790 | } |
1781 | 1791 | ||
1782 | static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) | 1792 | static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
1783 | { | 1793 | { |
1784 | return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); | 1794 | return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); |
1785 | } | 1795 | } |
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index d4714dd9f7ab..6c740f346f91 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c | |||
@@ -42,8 +42,8 @@ static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs, | |||
42 | unsigned long count, loff_t to, size_t *retlen); | 42 | unsigned long count, loff_t to, size_t *retlen); |
43 | static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *); | 43 | static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *); |
44 | static void cfi_staa_sync (struct mtd_info *); | 44 | static void cfi_staa_sync (struct mtd_info *); |
45 | static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len); | 45 | static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
46 | static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); | 46 | static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
47 | static int cfi_staa_suspend (struct mtd_info *); | 47 | static int cfi_staa_suspend (struct mtd_info *); |
48 | static void cfi_staa_resume (struct mtd_info *); | 48 | static void cfi_staa_resume (struct mtd_info *); |
49 | 49 | ||
@@ -221,8 +221,8 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map) | |||
221 | } | 221 | } |
222 | 222 | ||
223 | for (i=0; i<mtd->numeraseregions;i++){ | 223 | for (i=0; i<mtd->numeraseregions;i++){ |
224 | printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n", | 224 | printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n", |
225 | i,mtd->eraseregions[i].offset, | 225 | i, (unsigned long long)mtd->eraseregions[i].offset, |
226 | mtd->eraseregions[i].erasesize, | 226 | mtd->eraseregions[i].erasesize, |
227 | mtd->eraseregions[i].numblocks); | 227 | mtd->eraseregions[i].numblocks); |
228 | } | 228 | } |
@@ -964,7 +964,7 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd, | |||
964 | adr += regions[i].erasesize; | 964 | adr += regions[i].erasesize; |
965 | len -= regions[i].erasesize; | 965 | len -= regions[i].erasesize; |
966 | 966 | ||
967 | if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift))) | 967 | if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift))) |
968 | i++; | 968 | i++; |
969 | 969 | ||
970 | if (adr >> cfi->chipshift) { | 970 | if (adr >> cfi->chipshift) { |
@@ -1135,7 +1135,7 @@ retry: | |||
1135 | spin_unlock_bh(chip->mutex); | 1135 | spin_unlock_bh(chip->mutex); |
1136 | return 0; | 1136 | return 0; |
1137 | } | 1137 | } |
1138 | static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len) | 1138 | static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
1139 | { | 1139 | { |
1140 | struct map_info *map = mtd->priv; | 1140 | struct map_info *map = mtd->priv; |
1141 | struct cfi_private *cfi = map->fldrv_priv; | 1141 | struct cfi_private *cfi = map->fldrv_priv; |
@@ -1284,7 +1284,7 @@ retry: | |||
1284 | spin_unlock_bh(chip->mutex); | 1284 | spin_unlock_bh(chip->mutex); |
1285 | return 0; | 1285 | return 0; |
1286 | } | 1286 | } |
1287 | static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) | 1287 | static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
1288 | { | 1288 | { |
1289 | struct map_info *map = mtd->priv; | 1289 | struct map_info *map = mtd->priv; |
1290 | struct cfi_private *cfi = map->fldrv_priv; | 1290 | struct cfi_private *cfi = map->fldrv_priv; |
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h index ab44f2b996f8..57e0e4e921f9 100644 --- a/drivers/mtd/chips/fwh_lock.h +++ b/drivers/mtd/chips/fwh_lock.h | |||
@@ -77,7 +77,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip, | |||
77 | } | 77 | } |
78 | 78 | ||
79 | 79 | ||
80 | static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len) | 80 | static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
81 | { | 81 | { |
82 | int ret; | 82 | int ret; |
83 | 83 | ||
@@ -88,7 +88,7 @@ static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len) | |||
88 | } | 88 | } |
89 | 89 | ||
90 | 90 | ||
91 | static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len) | 91 | static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
92 | { | 92 | { |
93 | int ret; | 93 | int ret; |
94 | 94 | ||
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c index f4bda4cee495..578de1c67bfe 100644 --- a/drivers/mtd/devices/lart.c +++ b/drivers/mtd/devices/lart.c | |||
@@ -619,7 +619,7 @@ static struct mtd_partition lart_partitions[] = { | |||
619 | }; | 619 | }; |
620 | #endif | 620 | #endif |
621 | 621 | ||
622 | int __init lart_flash_init (void) | 622 | static int __init lart_flash_init (void) |
623 | { | 623 | { |
624 | int result; | 624 | int result; |
625 | memset (&mtd,0,sizeof (mtd)); | 625 | memset (&mtd,0,sizeof (mtd)); |
@@ -690,7 +690,7 @@ int __init lart_flash_init (void) | |||
690 | return (result); | 690 | return (result); |
691 | } | 691 | } |
692 | 692 | ||
693 | void __exit lart_flash_exit (void) | 693 | static void __exit lart_flash_exit (void) |
694 | { | 694 | { |
695 | #ifndef HAVE_PARTITIONS | 695 | #ifndef HAVE_PARTITIONS |
696 | del_mtd_device (&mtd); | 696 | del_mtd_device (&mtd); |
@@ -705,5 +705,3 @@ module_exit (lart_flash_exit); | |||
705 | MODULE_LICENSE("GPL"); | 705 | MODULE_LICENSE("GPL"); |
706 | MODULE_AUTHOR("Abraham vd Merwe <abraham@2d3d.co.za>"); | 706 | MODULE_AUTHOR("Abraham vd Merwe <abraham@2d3d.co.za>"); |
707 | MODULE_DESCRIPTION("MTD driver for Intel 28F160F3 on LART board"); | 707 | MODULE_DESCRIPTION("MTD driver for Intel 28F160F3 on LART board"); |
708 | |||
709 | |||
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 5733f0643843..7c3fc766dcf1 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
23 | #include <linux/math64.h> | ||
23 | 24 | ||
24 | #include <linux/mtd/mtd.h> | 25 | #include <linux/mtd/mtd.h> |
25 | #include <linux/mtd/partitions.h> | 26 | #include <linux/mtd/partitions.h> |
@@ -169,9 +170,9 @@ static int wait_till_ready(struct m25p *flash) | |||
169 | */ | 170 | */ |
170 | static int erase_chip(struct m25p *flash) | 171 | static int erase_chip(struct m25p *flash) |
171 | { | 172 | { |
172 | DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n", | 173 | DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %lldKiB\n", |
173 | dev_name(&flash->spi->dev), __func__, | 174 | dev_name(&flash->spi->dev), __func__, |
174 | flash->mtd.size / 1024); | 175 | (long long)(flash->mtd.size >> 10)); |
175 | 176 | ||
176 | /* Wait until finished previous write command. */ | 177 | /* Wait until finished previous write command. */ |
177 | if (wait_till_ready(flash)) | 178 | if (wait_till_ready(flash)) |
@@ -232,18 +233,18 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
232 | { | 233 | { |
233 | struct m25p *flash = mtd_to_m25p(mtd); | 234 | struct m25p *flash = mtd_to_m25p(mtd); |
234 | u32 addr,len; | 235 | u32 addr,len; |
236 | uint32_t rem; | ||
235 | 237 | ||
236 | DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n", | 238 | DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%llx, len %lld\n", |
237 | dev_name(&flash->spi->dev), __func__, "at", | 239 | dev_name(&flash->spi->dev), __func__, "at", |
238 | (u32)instr->addr, instr->len); | 240 | (long long)instr->addr, (long long)instr->len); |
239 | 241 | ||
240 | /* sanity checks */ | 242 | /* sanity checks */ |
241 | if (instr->addr + instr->len > flash->mtd.size) | 243 | if (instr->addr + instr->len > flash->mtd.size) |
242 | return -EINVAL; | 244 | return -EINVAL; |
243 | if ((instr->addr % mtd->erasesize) != 0 | 245 | div_u64_rem(instr->len, mtd->erasesize, &rem); |
244 | || (instr->len % mtd->erasesize) != 0) { | 246 | if (rem) |
245 | return -EINVAL; | 247 | return -EINVAL; |
246 | } | ||
247 | 248 | ||
248 | addr = instr->addr; | 249 | addr = instr->addr; |
249 | len = instr->len; | 250 | len = instr->len; |
@@ -677,24 +678,24 @@ static int __devinit m25p_probe(struct spi_device *spi) | |||
677 | flash->mtd.erasesize = info->sector_size; | 678 | flash->mtd.erasesize = info->sector_size; |
678 | } | 679 | } |
679 | 680 | ||
680 | dev_info(&spi->dev, "%s (%d Kbytes)\n", info->name, | 681 | dev_info(&spi->dev, "%s (%lld Kbytes)\n", info->name, |
681 | flash->mtd.size / 1024); | 682 | (long long)flash->mtd.size >> 10); |
682 | 683 | ||
683 | DEBUG(MTD_DEBUG_LEVEL2, | 684 | DEBUG(MTD_DEBUG_LEVEL2, |
684 | "mtd .name = %s, .size = 0x%.8x (%uMiB) " | 685 | "mtd .name = %s, .size = 0x%llx (%lldMiB) " |
685 | ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", | 686 | ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", |
686 | flash->mtd.name, | 687 | flash->mtd.name, |
687 | flash->mtd.size, flash->mtd.size / (1024*1024), | 688 | (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), |
688 | flash->mtd.erasesize, flash->mtd.erasesize / 1024, | 689 | flash->mtd.erasesize, flash->mtd.erasesize / 1024, |
689 | flash->mtd.numeraseregions); | 690 | flash->mtd.numeraseregions); |
690 | 691 | ||
691 | if (flash->mtd.numeraseregions) | 692 | if (flash->mtd.numeraseregions) |
692 | for (i = 0; i < flash->mtd.numeraseregions; i++) | 693 | for (i = 0; i < flash->mtd.numeraseregions; i++) |
693 | DEBUG(MTD_DEBUG_LEVEL2, | 694 | DEBUG(MTD_DEBUG_LEVEL2, |
694 | "mtd.eraseregions[%d] = { .offset = 0x%.8x, " | 695 | "mtd.eraseregions[%d] = { .offset = 0x%llx, " |
695 | ".erasesize = 0x%.8x (%uKiB), " | 696 | ".erasesize = 0x%.8x (%uKiB), " |
696 | ".numblocks = %d }\n", | 697 | ".numblocks = %d }\n", |
697 | i, flash->mtd.eraseregions[i].offset, | 698 | i, (long long)flash->mtd.eraseregions[i].offset, |
698 | flash->mtd.eraseregions[i].erasesize, | 699 | flash->mtd.eraseregions[i].erasesize, |
699 | flash->mtd.eraseregions[i].erasesize / 1024, | 700 | flash->mtd.eraseregions[i].erasesize / 1024, |
700 | flash->mtd.eraseregions[i].numblocks); | 701 | flash->mtd.eraseregions[i].numblocks); |
@@ -722,12 +723,12 @@ static int __devinit m25p_probe(struct spi_device *spi) | |||
722 | if (nr_parts > 0) { | 723 | if (nr_parts > 0) { |
723 | for (i = 0; i < nr_parts; i++) { | 724 | for (i = 0; i < nr_parts; i++) { |
724 | DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " | 725 | DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " |
725 | "{.name = %s, .offset = 0x%.8x, " | 726 | "{.name = %s, .offset = 0x%llx, " |
726 | ".size = 0x%.8x (%uKiB) }\n", | 727 | ".size = 0x%llx (%lldKiB) }\n", |
727 | i, parts[i].name, | 728 | i, parts[i].name, |
728 | parts[i].offset, | 729 | (long long)parts[i].offset, |
729 | parts[i].size, | 730 | (long long)parts[i].size, |
730 | parts[i].size / 1024); | 731 | (long long)(parts[i].size >> 10)); |
731 | } | 732 | } |
732 | flash->partitioned = 1; | 733 | flash->partitioned = 1; |
733 | return add_mtd_partitions(&flash->mtd, parts, nr_parts); | 734 | return add_mtd_partitions(&flash->mtd, parts, nr_parts); |
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 65126cd668ff..d44f741ae229 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
19 | #include <linux/math64.h> | ||
19 | 20 | ||
20 | #include <linux/spi/spi.h> | 21 | #include <linux/spi/spi.h> |
21 | #include <linux/spi/flash.h> | 22 | #include <linux/spi/flash.h> |
@@ -152,15 +153,20 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
152 | struct spi_message msg; | 153 | struct spi_message msg; |
153 | unsigned blocksize = priv->page_size << 3; | 154 | unsigned blocksize = priv->page_size << 3; |
154 | uint8_t *command; | 155 | uint8_t *command; |
156 | uint32_t rem; | ||
155 | 157 | ||
156 | DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%x len 0x%x\n", | 158 | DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%llx len 0x%llx\n", |
157 | dev_name(&spi->dev), | 159 | dev_name(&spi->dev), (long long)instr->addr, |
158 | instr->addr, instr->len); | 160 | (long long)instr->len); |
159 | 161 | ||
160 | /* Sanity checks */ | 162 | /* Sanity checks */ |
161 | if ((instr->addr + instr->len) > mtd->size | 163 | if (instr->addr + instr->len > mtd->size) |
162 | || (instr->len % priv->page_size) != 0 | 164 | return -EINVAL; |
163 | || (instr->addr % priv->page_size) != 0) | 165 | div_u64_rem(instr->len, priv->page_size, &rem); |
166 | if (rem) | ||
167 | return -EINVAL; | ||
168 | div_u64_rem(instr->addr, priv->page_size, &rem); | ||
169 | if (rem) | ||
164 | return -EINVAL; | 170 | return -EINVAL; |
165 | 171 | ||
166 | spi_message_init(&msg); | 172 | spi_message_init(&msg); |
@@ -178,7 +184,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
178 | /* Calculate flash page address; use block erase (for speed) if | 184 | /* Calculate flash page address; use block erase (for speed) if |
179 | * we're at a block boundary and need to erase the whole block. | 185 | * we're at a block boundary and need to erase the whole block. |
180 | */ | 186 | */ |
181 | pageaddr = instr->addr / priv->page_size; | 187 | pageaddr = div_u64(instr->len, priv->page_size); |
182 | do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize; | 188 | do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize; |
183 | pageaddr = pageaddr << priv->page_offset; | 189 | pageaddr = pageaddr << priv->page_offset; |
184 | 190 | ||
@@ -667,8 +673,8 @@ add_dataflash_otp(struct spi_device *spi, char *name, | |||
667 | if (revision >= 'c') | 673 | if (revision >= 'c') |
668 | otp_tag = otp_setup(device, revision); | 674 | otp_tag = otp_setup(device, revision); |
669 | 675 | ||
670 | dev_info(&spi->dev, "%s (%d KBytes) pagesize %d bytes%s\n", | 676 | dev_info(&spi->dev, "%s (%lld KBytes) pagesize %d bytes%s\n", |
671 | name, DIV_ROUND_UP(device->size, 1024), | 677 | name, (long long)((device->size + 1023) >> 10), |
672 | pagesize, otp_tag); | 678 | pagesize, otp_tag); |
673 | dev_set_drvdata(&spi->dev, priv); | 679 | dev_set_drvdata(&spi->dev, priv); |
674 | 680 | ||
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c index 9bf581c4f740..a790c062af1f 100644 --- a/drivers/mtd/ftl.c +++ b/drivers/mtd/ftl.c | |||
@@ -109,25 +109,25 @@ module_param(shuffle_freq, int, 0); | |||
109 | /* Each memory region corresponds to a minor device */ | 109 | /* Each memory region corresponds to a minor device */ |
110 | typedef struct partition_t { | 110 | typedef struct partition_t { |
111 | struct mtd_blktrans_dev mbd; | 111 | struct mtd_blktrans_dev mbd; |
112 | u_int32_t state; | 112 | uint32_t state; |
113 | u_int32_t *VirtualBlockMap; | 113 | uint32_t *VirtualBlockMap; |
114 | u_int32_t *VirtualPageMap; | 114 | uint32_t *VirtualPageMap; |
115 | u_int32_t FreeTotal; | 115 | uint32_t FreeTotal; |
116 | struct eun_info_t { | 116 | struct eun_info_t { |
117 | u_int32_t Offset; | 117 | uint32_t Offset; |
118 | u_int32_t EraseCount; | 118 | uint32_t EraseCount; |
119 | u_int32_t Free; | 119 | uint32_t Free; |
120 | u_int32_t Deleted; | 120 | uint32_t Deleted; |
121 | } *EUNInfo; | 121 | } *EUNInfo; |
122 | struct xfer_info_t { | 122 | struct xfer_info_t { |
123 | u_int32_t Offset; | 123 | uint32_t Offset; |
124 | u_int32_t EraseCount; | 124 | uint32_t EraseCount; |
125 | u_int16_t state; | 125 | uint16_t state; |
126 | } *XferInfo; | 126 | } *XferInfo; |
127 | u_int16_t bam_index; | 127 | uint16_t bam_index; |
128 | u_int32_t *bam_cache; | 128 | uint32_t *bam_cache; |
129 | u_int16_t DataUnits; | 129 | uint16_t DataUnits; |
130 | u_int32_t BlocksPerUnit; | 130 | uint32_t BlocksPerUnit; |
131 | erase_unit_header_t header; | 131 | erase_unit_header_t header; |
132 | } partition_t; | 132 | } partition_t; |
133 | 133 | ||
@@ -199,8 +199,8 @@ static int scan_header(partition_t *part) | |||
199 | static int build_maps(partition_t *part) | 199 | static int build_maps(partition_t *part) |
200 | { | 200 | { |
201 | erase_unit_header_t header; | 201 | erase_unit_header_t header; |
202 | u_int16_t xvalid, xtrans, i; | 202 | uint16_t xvalid, xtrans, i; |
203 | u_int blocks, j; | 203 | unsigned blocks, j; |
204 | int hdr_ok, ret = -1; | 204 | int hdr_ok, ret = -1; |
205 | ssize_t retval; | 205 | ssize_t retval; |
206 | loff_t offset; | 206 | loff_t offset; |
@@ -269,14 +269,14 @@ static int build_maps(partition_t *part) | |||
269 | 269 | ||
270 | /* Set up virtual page map */ | 270 | /* Set up virtual page map */ |
271 | blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize; | 271 | blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize; |
272 | part->VirtualBlockMap = vmalloc(blocks * sizeof(u_int32_t)); | 272 | part->VirtualBlockMap = vmalloc(blocks * sizeof(uint32_t)); |
273 | if (!part->VirtualBlockMap) | 273 | if (!part->VirtualBlockMap) |
274 | goto out_XferInfo; | 274 | goto out_XferInfo; |
275 | 275 | ||
276 | memset(part->VirtualBlockMap, 0xff, blocks * sizeof(u_int32_t)); | 276 | memset(part->VirtualBlockMap, 0xff, blocks * sizeof(uint32_t)); |
277 | part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize; | 277 | part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize; |
278 | 278 | ||
279 | part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(u_int32_t), | 279 | part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(uint32_t), |
280 | GFP_KERNEL); | 280 | GFP_KERNEL); |
281 | if (!part->bam_cache) | 281 | if (!part->bam_cache) |
282 | goto out_VirtualBlockMap; | 282 | goto out_VirtualBlockMap; |
@@ -290,7 +290,7 @@ static int build_maps(partition_t *part) | |||
290 | offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset); | 290 | offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset); |
291 | 291 | ||
292 | ret = part->mbd.mtd->read(part->mbd.mtd, offset, | 292 | ret = part->mbd.mtd->read(part->mbd.mtd, offset, |
293 | part->BlocksPerUnit * sizeof(u_int32_t), &retval, | 293 | part->BlocksPerUnit * sizeof(uint32_t), &retval, |
294 | (unsigned char *)part->bam_cache); | 294 | (unsigned char *)part->bam_cache); |
295 | 295 | ||
296 | if (ret) | 296 | if (ret) |
@@ -332,7 +332,7 @@ out: | |||
332 | ======================================================================*/ | 332 | ======================================================================*/ |
333 | 333 | ||
334 | static int erase_xfer(partition_t *part, | 334 | static int erase_xfer(partition_t *part, |
335 | u_int16_t xfernum) | 335 | uint16_t xfernum) |
336 | { | 336 | { |
337 | int ret; | 337 | int ret; |
338 | struct xfer_info_t *xfer; | 338 | struct xfer_info_t *xfer; |
@@ -408,7 +408,7 @@ static int prepare_xfer(partition_t *part, int i) | |||
408 | erase_unit_header_t header; | 408 | erase_unit_header_t header; |
409 | struct xfer_info_t *xfer; | 409 | struct xfer_info_t *xfer; |
410 | int nbam, ret; | 410 | int nbam, ret; |
411 | u_int32_t ctl; | 411 | uint32_t ctl; |
412 | ssize_t retlen; | 412 | ssize_t retlen; |
413 | loff_t offset; | 413 | loff_t offset; |
414 | 414 | ||
@@ -430,15 +430,15 @@ static int prepare_xfer(partition_t *part, int i) | |||
430 | } | 430 | } |
431 | 431 | ||
432 | /* Write the BAM stub */ | 432 | /* Write the BAM stub */ |
433 | nbam = (part->BlocksPerUnit * sizeof(u_int32_t) + | 433 | nbam = (part->BlocksPerUnit * sizeof(uint32_t) + |
434 | le32_to_cpu(part->header.BAMOffset) + SECTOR_SIZE - 1) / SECTOR_SIZE; | 434 | le32_to_cpu(part->header.BAMOffset) + SECTOR_SIZE - 1) / SECTOR_SIZE; |
435 | 435 | ||
436 | offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset); | 436 | offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset); |
437 | ctl = cpu_to_le32(BLOCK_CONTROL); | 437 | ctl = cpu_to_le32(BLOCK_CONTROL); |
438 | 438 | ||
439 | for (i = 0; i < nbam; i++, offset += sizeof(u_int32_t)) { | 439 | for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) { |
440 | 440 | ||
441 | ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t), | 441 | ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t), |
442 | &retlen, (u_char *)&ctl); | 442 | &retlen, (u_char *)&ctl); |
443 | 443 | ||
444 | if (ret) | 444 | if (ret) |
@@ -461,18 +461,18 @@ static int prepare_xfer(partition_t *part, int i) | |||
461 | 461 | ||
462 | ======================================================================*/ | 462 | ======================================================================*/ |
463 | 463 | ||
464 | static int copy_erase_unit(partition_t *part, u_int16_t srcunit, | 464 | static int copy_erase_unit(partition_t *part, uint16_t srcunit, |
465 | u_int16_t xferunit) | 465 | uint16_t xferunit) |
466 | { | 466 | { |
467 | u_char buf[SECTOR_SIZE]; | 467 | u_char buf[SECTOR_SIZE]; |
468 | struct eun_info_t *eun; | 468 | struct eun_info_t *eun; |
469 | struct xfer_info_t *xfer; | 469 | struct xfer_info_t *xfer; |
470 | u_int32_t src, dest, free, i; | 470 | uint32_t src, dest, free, i; |
471 | u_int16_t unit; | 471 | uint16_t unit; |
472 | int ret; | 472 | int ret; |
473 | ssize_t retlen; | 473 | ssize_t retlen; |
474 | loff_t offset; | 474 | loff_t offset; |
475 | u_int16_t srcunitswap = cpu_to_le16(srcunit); | 475 | uint16_t srcunitswap = cpu_to_le16(srcunit); |
476 | 476 | ||
477 | eun = &part->EUNInfo[srcunit]; | 477 | eun = &part->EUNInfo[srcunit]; |
478 | xfer = &part->XferInfo[xferunit]; | 478 | xfer = &part->XferInfo[xferunit]; |
@@ -486,7 +486,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit, | |||
486 | offset = eun->Offset + le32_to_cpu(part->header.BAMOffset); | 486 | offset = eun->Offset + le32_to_cpu(part->header.BAMOffset); |
487 | 487 | ||
488 | ret = part->mbd.mtd->read(part->mbd.mtd, offset, | 488 | ret = part->mbd.mtd->read(part->mbd.mtd, offset, |
489 | part->BlocksPerUnit * sizeof(u_int32_t), | 489 | part->BlocksPerUnit * sizeof(uint32_t), |
490 | &retlen, (u_char *) (part->bam_cache)); | 490 | &retlen, (u_char *) (part->bam_cache)); |
491 | 491 | ||
492 | /* mark the cache bad, in case we get an error later */ | 492 | /* mark the cache bad, in case we get an error later */ |
@@ -503,7 +503,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit, | |||
503 | offset = xfer->Offset + 20; /* Bad! */ | 503 | offset = xfer->Offset + 20; /* Bad! */ |
504 | unit = cpu_to_le16(0x7fff); | 504 | unit = cpu_to_le16(0x7fff); |
505 | 505 | ||
506 | ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int16_t), | 506 | ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint16_t), |
507 | &retlen, (u_char *) &unit); | 507 | &retlen, (u_char *) &unit); |
508 | 508 | ||
509 | if (ret) { | 509 | if (ret) { |
@@ -560,7 +560,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit, | |||
560 | 560 | ||
561 | 561 | ||
562 | /* All clear? Then update the LogicalEUN again */ | 562 | /* All clear? Then update the LogicalEUN again */ |
563 | ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(u_int16_t), | 563 | ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t), |
564 | &retlen, (u_char *)&srcunitswap); | 564 | &retlen, (u_char *)&srcunitswap); |
565 | 565 | ||
566 | if (ret) { | 566 | if (ret) { |
@@ -605,8 +605,8 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit, | |||
605 | 605 | ||
606 | static int reclaim_block(partition_t *part) | 606 | static int reclaim_block(partition_t *part) |
607 | { | 607 | { |
608 | u_int16_t i, eun, xfer; | 608 | uint16_t i, eun, xfer; |
609 | u_int32_t best; | 609 | uint32_t best; |
610 | int queued, ret; | 610 | int queued, ret; |
611 | 611 | ||
612 | DEBUG(0, "ftl_cs: reclaiming space...\n"); | 612 | DEBUG(0, "ftl_cs: reclaiming space...\n"); |
@@ -723,10 +723,10 @@ static void dump_lists(partition_t *part) | |||
723 | } | 723 | } |
724 | #endif | 724 | #endif |
725 | 725 | ||
726 | static u_int32_t find_free(partition_t *part) | 726 | static uint32_t find_free(partition_t *part) |
727 | { | 727 | { |
728 | u_int16_t stop, eun; | 728 | uint16_t stop, eun; |
729 | u_int32_t blk; | 729 | uint32_t blk; |
730 | size_t retlen; | 730 | size_t retlen; |
731 | int ret; | 731 | int ret; |
732 | 732 | ||
@@ -749,7 +749,7 @@ static u_int32_t find_free(partition_t *part) | |||
749 | 749 | ||
750 | ret = part->mbd.mtd->read(part->mbd.mtd, | 750 | ret = part->mbd.mtd->read(part->mbd.mtd, |
751 | part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset), | 751 | part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset), |
752 | part->BlocksPerUnit * sizeof(u_int32_t), | 752 | part->BlocksPerUnit * sizeof(uint32_t), |
753 | &retlen, (u_char *) (part->bam_cache)); | 753 | &retlen, (u_char *) (part->bam_cache)); |
754 | 754 | ||
755 | if (ret) { | 755 | if (ret) { |
@@ -786,7 +786,7 @@ static u_int32_t find_free(partition_t *part) | |||
786 | static int ftl_read(partition_t *part, caddr_t buffer, | 786 | static int ftl_read(partition_t *part, caddr_t buffer, |
787 | u_long sector, u_long nblocks) | 787 | u_long sector, u_long nblocks) |
788 | { | 788 | { |
789 | u_int32_t log_addr, bsize; | 789 | uint32_t log_addr, bsize; |
790 | u_long i; | 790 | u_long i; |
791 | int ret; | 791 | int ret; |
792 | size_t offset, retlen; | 792 | size_t offset, retlen; |
@@ -829,14 +829,14 @@ static int ftl_read(partition_t *part, caddr_t buffer, | |||
829 | 829 | ||
830 | ======================================================================*/ | 830 | ======================================================================*/ |
831 | 831 | ||
832 | static int set_bam_entry(partition_t *part, u_int32_t log_addr, | 832 | static int set_bam_entry(partition_t *part, uint32_t log_addr, |
833 | u_int32_t virt_addr) | 833 | uint32_t virt_addr) |
834 | { | 834 | { |
835 | u_int32_t bsize, blk, le_virt_addr; | 835 | uint32_t bsize, blk, le_virt_addr; |
836 | #ifdef PSYCHO_DEBUG | 836 | #ifdef PSYCHO_DEBUG |
837 | u_int32_t old_addr; | 837 | uint32_t old_addr; |
838 | #endif | 838 | #endif |
839 | u_int16_t eun; | 839 | uint16_t eun; |
840 | int ret; | 840 | int ret; |
841 | size_t retlen, offset; | 841 | size_t retlen, offset; |
842 | 842 | ||
@@ -845,11 +845,11 @@ static int set_bam_entry(partition_t *part, u_int32_t log_addr, | |||
845 | bsize = 1 << part->header.EraseUnitSize; | 845 | bsize = 1 << part->header.EraseUnitSize; |
846 | eun = log_addr / bsize; | 846 | eun = log_addr / bsize; |
847 | blk = (log_addr % bsize) / SECTOR_SIZE; | 847 | blk = (log_addr % bsize) / SECTOR_SIZE; |
848 | offset = (part->EUNInfo[eun].Offset + blk * sizeof(u_int32_t) + | 848 | offset = (part->EUNInfo[eun].Offset + blk * sizeof(uint32_t) + |
849 | le32_to_cpu(part->header.BAMOffset)); | 849 | le32_to_cpu(part->header.BAMOffset)); |
850 | 850 | ||
851 | #ifdef PSYCHO_DEBUG | 851 | #ifdef PSYCHO_DEBUG |
852 | ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(u_int32_t), | 852 | ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(uint32_t), |
853 | &retlen, (u_char *)&old_addr); | 853 | &retlen, (u_char *)&old_addr); |
854 | if (ret) { | 854 | if (ret) { |
855 | printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret); | 855 | printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret); |
@@ -886,7 +886,7 @@ static int set_bam_entry(partition_t *part, u_int32_t log_addr, | |||
886 | #endif | 886 | #endif |
887 | part->bam_cache[blk] = le_virt_addr; | 887 | part->bam_cache[blk] = le_virt_addr; |
888 | } | 888 | } |
889 | ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t), | 889 | ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t), |
890 | &retlen, (u_char *)&le_virt_addr); | 890 | &retlen, (u_char *)&le_virt_addr); |
891 | 891 | ||
892 | if (ret) { | 892 | if (ret) { |
@@ -900,7 +900,7 @@ static int set_bam_entry(partition_t *part, u_int32_t log_addr, | |||
900 | static int ftl_write(partition_t *part, caddr_t buffer, | 900 | static int ftl_write(partition_t *part, caddr_t buffer, |
901 | u_long sector, u_long nblocks) | 901 | u_long sector, u_long nblocks) |
902 | { | 902 | { |
903 | u_int32_t bsize, log_addr, virt_addr, old_addr, blk; | 903 | uint32_t bsize, log_addr, virt_addr, old_addr, blk; |
904 | u_long i; | 904 | u_long i; |
905 | int ret; | 905 | int ret; |
906 | size_t retlen, offset; | 906 | size_t retlen, offset; |
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c index 50ce13887f63..73f05227dc8c 100644 --- a/drivers/mtd/inftlcore.c +++ b/drivers/mtd/inftlcore.c | |||
@@ -50,7 +50,7 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
50 | struct INFTLrecord *inftl; | 50 | struct INFTLrecord *inftl; |
51 | unsigned long temp; | 51 | unsigned long temp; |
52 | 52 | ||
53 | if (mtd->type != MTD_NANDFLASH) | 53 | if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX) |
54 | return; | 54 | return; |
55 | /* OK, this is moderately ugly. But probably safe. Alternatives? */ | 55 | /* OK, this is moderately ugly. But probably safe. Alternatives? */ |
56 | if (memcmp(mtd->name, "DiskOnChip", 10)) | 56 | if (memcmp(mtd->name, "DiskOnChip", 10)) |
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c index 9113628ed1ef..f751dd97c549 100644 --- a/drivers/mtd/inftlmount.c +++ b/drivers/mtd/inftlmount.c | |||
@@ -63,7 +63,7 @@ static int find_boot_record(struct INFTLrecord *inftl) | |||
63 | * otherwise. | 63 | * otherwise. |
64 | */ | 64 | */ |
65 | inftl->EraseSize = inftl->mbd.mtd->erasesize; | 65 | inftl->EraseSize = inftl->mbd.mtd->erasesize; |
66 | inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize; | 66 | inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize; |
67 | 67 | ||
68 | inftl->MediaUnit = BLOCK_NIL; | 68 | inftl->MediaUnit = BLOCK_NIL; |
69 | 69 | ||
@@ -187,7 +187,7 @@ static int find_boot_record(struct INFTLrecord *inftl) | |||
187 | mh->BlockMultiplierBits); | 187 | mh->BlockMultiplierBits); |
188 | inftl->EraseSize = inftl->mbd.mtd->erasesize << | 188 | inftl->EraseSize = inftl->mbd.mtd->erasesize << |
189 | mh->BlockMultiplierBits; | 189 | mh->BlockMultiplierBits; |
190 | inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize; | 190 | inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize; |
191 | block >>= mh->BlockMultiplierBits; | 191 | block >>= mh->BlockMultiplierBits; |
192 | } | 192 | } |
193 | 193 | ||
diff --git a/drivers/mtd/lpddr/Kconfig b/drivers/mtd/lpddr/Kconfig new file mode 100644 index 000000000000..acd4ea9b2278 --- /dev/null +++ b/drivers/mtd/lpddr/Kconfig | |||
@@ -0,0 +1,22 @@ | |||
1 | # drivers/mtd/chips/Kconfig | ||
2 | |||
3 | menu "LPDDR flash memory drivers" | ||
4 | depends on MTD!=n | ||
5 | |||
6 | config MTD_LPDDR | ||
7 | tristate "Support for LPDDR flash chips" | ||
8 | select MTD_QINFO_PROBE | ||
9 | help | ||
10 | This option enables support of LPDDR (Low power double data rate) | ||
11 | flash chips. Synonymous with Mobile-DDR. It is a new standard for | ||
12 | DDR memories, intended for battery-operated systems. | ||
13 | |||
14 | config MTD_QINFO_PROBE | ||
15 | tristate "Detect flash chips by QINFO probe" | ||
16 | help | ||
17 | Device Information for LPDDR chips is offered through the Overlay | ||
18 | Window QINFO interface, permits software to be used for entire | ||
19 | families of devices. This serves similar purpose of CFI on legacy | ||
20 | Flash products | ||
21 | endmenu | ||
22 | |||
diff --git a/drivers/mtd/lpddr/Makefile b/drivers/mtd/lpddr/Makefile new file mode 100644 index 000000000000..da48e46b5812 --- /dev/null +++ b/drivers/mtd/lpddr/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # | ||
2 | # linux/drivers/mtd/lpddr/Makefile | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_MTD_QINFO_PROBE) += qinfo_probe.o | ||
6 | obj-$(CONFIG_MTD_LPDDR) += lpddr_cmds.o | ||
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c new file mode 100644 index 000000000000..e22ca49583e7 --- /dev/null +++ b/drivers/mtd/lpddr/lpddr_cmds.c | |||
@@ -0,0 +1,796 @@ | |||
1 | /* | ||
2 | * LPDDR flash memory device operations. This module provides read, write, | ||
3 | * erase, lock/unlock support for LPDDR flash memories | ||
4 | * (C) 2008 Korolev Alexey <akorolev@infradead.org> | ||
5 | * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com> | ||
6 | * Many thanks to Roman Borisov for intial enabling | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version 2 | ||
11 | * of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * TODO: | ||
23 | * Implement VPP management | ||
24 | * Implement XIP support | ||
25 | * Implement OTP support | ||
26 | */ | ||
27 | #include <linux/mtd/pfow.h> | ||
28 | #include <linux/mtd/qinfo.h> | ||
29 | |||
30 | static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len, | ||
31 | size_t *retlen, u_char *buf); | ||
32 | static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, | ||
33 | size_t len, size_t *retlen, const u_char *buf); | ||
34 | static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs, | ||
35 | unsigned long count, loff_t to, size_t *retlen); | ||
36 | static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr); | ||
37 | static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); | ||
38 | static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); | ||
39 | static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, | ||
40 | size_t *retlen, void **mtdbuf, resource_size_t *phys); | ||
41 | static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len); | ||
42 | static int get_chip(struct map_info *map, struct flchip *chip, int mode); | ||
43 | static int chip_ready(struct map_info *map, struct flchip *chip, int mode); | ||
44 | static void put_chip(struct map_info *map, struct flchip *chip); | ||
45 | |||
46 | struct mtd_info *lpddr_cmdset(struct map_info *map) | ||
47 | { | ||
48 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
49 | struct flchip_shared *shared; | ||
50 | struct flchip *chip; | ||
51 | struct mtd_info *mtd; | ||
52 | int numchips; | ||
53 | int i, j; | ||
54 | |||
55 | mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); | ||
56 | if (!mtd) { | ||
57 | printk(KERN_ERR "Failed to allocate memory for MTD device\n"); | ||
58 | return NULL; | ||
59 | } | ||
60 | mtd->priv = map; | ||
61 | mtd->type = MTD_NORFLASH; | ||
62 | |||
63 | /* Fill in the default mtd operations */ | ||
64 | mtd->read = lpddr_read; | ||
65 | mtd->type = MTD_NORFLASH; | ||
66 | mtd->flags = MTD_CAP_NORFLASH; | ||
67 | mtd->flags &= ~MTD_BIT_WRITEABLE; | ||
68 | mtd->erase = lpddr_erase; | ||
69 | mtd->write = lpddr_write_buffers; | ||
70 | mtd->writev = lpddr_writev; | ||
71 | mtd->read_oob = NULL; | ||
72 | mtd->write_oob = NULL; | ||
73 | mtd->sync = NULL; | ||
74 | mtd->lock = lpddr_lock; | ||
75 | mtd->unlock = lpddr_unlock; | ||
76 | mtd->suspend = NULL; | ||
77 | mtd->resume = NULL; | ||
78 | if (map_is_linear(map)) { | ||
79 | mtd->point = lpddr_point; | ||
80 | mtd->unpoint = lpddr_unpoint; | ||
81 | } | ||
82 | mtd->block_isbad = NULL; | ||
83 | mtd->block_markbad = NULL; | ||
84 | mtd->size = 1 << lpddr->qinfo->DevSizeShift; | ||
85 | mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift; | ||
86 | mtd->writesize = 1 << lpddr->qinfo->BufSizeShift; | ||
87 | |||
88 | shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips, | ||
89 | GFP_KERNEL); | ||
90 | if (!shared) { | ||
91 | kfree(lpddr); | ||
92 | kfree(mtd); | ||
93 | return NULL; | ||
94 | } | ||
95 | |||
96 | chip = &lpddr->chips[0]; | ||
97 | numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum; | ||
98 | for (i = 0; i < numchips; i++) { | ||
99 | shared[i].writing = shared[i].erasing = NULL; | ||
100 | spin_lock_init(&shared[i].lock); | ||
101 | for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) { | ||
102 | *chip = lpddr->chips[i]; | ||
103 | chip->start += j << lpddr->chipshift; | ||
104 | chip->oldstate = chip->state = FL_READY; | ||
105 | chip->priv = &shared[i]; | ||
106 | /* those should be reset too since | ||
107 | they create memory references. */ | ||
108 | init_waitqueue_head(&chip->wq); | ||
109 | spin_lock_init(&chip->_spinlock); | ||
110 | chip->mutex = &chip->_spinlock; | ||
111 | chip++; | ||
112 | } | ||
113 | } | ||
114 | |||
115 | return mtd; | ||
116 | } | ||
117 | EXPORT_SYMBOL(lpddr_cmdset); | ||
118 | |||
119 | static int wait_for_ready(struct map_info *map, struct flchip *chip, | ||
120 | unsigned int chip_op_time) | ||
121 | { | ||
122 | unsigned int timeo, reset_timeo, sleep_time; | ||
123 | unsigned int dsr; | ||
124 | flstate_t chip_state = chip->state; | ||
125 | int ret = 0; | ||
126 | |||
127 | /* set our timeout to 8 times the expected delay */ | ||
128 | timeo = chip_op_time * 8; | ||
129 | if (!timeo) | ||
130 | timeo = 500000; | ||
131 | reset_timeo = timeo; | ||
132 | sleep_time = chip_op_time / 2; | ||
133 | |||
134 | for (;;) { | ||
135 | dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR)); | ||
136 | if (dsr & DSR_READY_STATUS) | ||
137 | break; | ||
138 | if (!timeo) { | ||
139 | printk(KERN_ERR "%s: Flash timeout error state %d \n", | ||
140 | map->name, chip_state); | ||
141 | ret = -ETIME; | ||
142 | break; | ||
143 | } | ||
144 | |||
145 | /* OK Still waiting. Drop the lock, wait a while and retry. */ | ||
146 | spin_unlock(chip->mutex); | ||
147 | if (sleep_time >= 1000000/HZ) { | ||
148 | /* | ||
149 | * Half of the normal delay still remaining | ||
150 | * can be performed with a sleeping delay instead | ||
151 | * of busy waiting. | ||
152 | */ | ||
153 | msleep(sleep_time/1000); | ||
154 | timeo -= sleep_time; | ||
155 | sleep_time = 1000000/HZ; | ||
156 | } else { | ||
157 | udelay(1); | ||
158 | cond_resched(); | ||
159 | timeo--; | ||
160 | } | ||
161 | spin_lock(chip->mutex); | ||
162 | |||
163 | while (chip->state != chip_state) { | ||
164 | /* Someone's suspended the operation: sleep */ | ||
165 | DECLARE_WAITQUEUE(wait, current); | ||
166 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
167 | add_wait_queue(&chip->wq, &wait); | ||
168 | spin_unlock(chip->mutex); | ||
169 | schedule(); | ||
170 | remove_wait_queue(&chip->wq, &wait); | ||
171 | spin_lock(chip->mutex); | ||
172 | } | ||
173 | if (chip->erase_suspended || chip->write_suspended) { | ||
174 | /* Suspend has occured while sleep: reset timeout */ | ||
175 | timeo = reset_timeo; | ||
176 | chip->erase_suspended = chip->write_suspended = 0; | ||
177 | } | ||
178 | } | ||
179 | /* check status for errors */ | ||
180 | if (dsr & DSR_ERR) { | ||
181 | /* Clear DSR*/ | ||
182 | map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR); | ||
183 | printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n", | ||
184 | map->name, dsr); | ||
185 | print_drs_error(dsr); | ||
186 | ret = -EIO; | ||
187 | } | ||
188 | chip->state = FL_READY; | ||
189 | return ret; | ||
190 | } | ||
191 | |||
192 | static int get_chip(struct map_info *map, struct flchip *chip, int mode) | ||
193 | { | ||
194 | int ret; | ||
195 | DECLARE_WAITQUEUE(wait, current); | ||
196 | |||
197 | retry: | ||
198 | if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING) | ||
199 | && chip->state != FL_SYNCING) { | ||
200 | /* | ||
201 | * OK. We have possibility for contension on the write/erase | ||
202 | * operations which are global to the real chip and not per | ||
203 | * partition. So let's fight it over in the partition which | ||
204 | * currently has authority on the operation. | ||
205 | * | ||
206 | * The rules are as follows: | ||
207 | * | ||
208 | * - any write operation must own shared->writing. | ||
209 | * | ||
210 | * - any erase operation must own _both_ shared->writing and | ||
211 | * shared->erasing. | ||
212 | * | ||
213 | * - contension arbitration is handled in the owner's context. | ||
214 | * | ||
215 | * The 'shared' struct can be read and/or written only when | ||
216 | * its lock is taken. | ||
217 | */ | ||
218 | struct flchip_shared *shared = chip->priv; | ||
219 | struct flchip *contender; | ||
220 | spin_lock(&shared->lock); | ||
221 | contender = shared->writing; | ||
222 | if (contender && contender != chip) { | ||
223 | /* | ||
224 | * The engine to perform desired operation on this | ||
225 | * partition is already in use by someone else. | ||
226 | * Let's fight over it in the context of the chip | ||
227 | * currently using it. If it is possible to suspend, | ||
228 | * that other partition will do just that, otherwise | ||
229 | * it'll happily send us to sleep. In any case, when | ||
230 | * get_chip returns success we're clear to go ahead. | ||
231 | */ | ||
232 | ret = spin_trylock(contender->mutex); | ||
233 | spin_unlock(&shared->lock); | ||
234 | if (!ret) | ||
235 | goto retry; | ||
236 | spin_unlock(chip->mutex); | ||
237 | ret = chip_ready(map, contender, mode); | ||
238 | spin_lock(chip->mutex); | ||
239 | |||
240 | if (ret == -EAGAIN) { | ||
241 | spin_unlock(contender->mutex); | ||
242 | goto retry; | ||
243 | } | ||
244 | if (ret) { | ||
245 | spin_unlock(contender->mutex); | ||
246 | return ret; | ||
247 | } | ||
248 | spin_lock(&shared->lock); | ||
249 | |||
250 | /* We should not own chip if it is already in FL_SYNCING | ||
251 | * state. Put contender and retry. */ | ||
252 | if (chip->state == FL_SYNCING) { | ||
253 | put_chip(map, contender); | ||
254 | spin_unlock(contender->mutex); | ||
255 | goto retry; | ||
256 | } | ||
257 | spin_unlock(contender->mutex); | ||
258 | } | ||
259 | |||
260 | /* Check if we have suspended erase on this chip. | ||
261 | Must sleep in such a case. */ | ||
262 | if (mode == FL_ERASING && shared->erasing | ||
263 | && shared->erasing->oldstate == FL_ERASING) { | ||
264 | spin_unlock(&shared->lock); | ||
265 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
266 | add_wait_queue(&chip->wq, &wait); | ||
267 | spin_unlock(chip->mutex); | ||
268 | schedule(); | ||
269 | remove_wait_queue(&chip->wq, &wait); | ||
270 | spin_lock(chip->mutex); | ||
271 | goto retry; | ||
272 | } | ||
273 | |||
274 | /* We now own it */ | ||
275 | shared->writing = chip; | ||
276 | if (mode == FL_ERASING) | ||
277 | shared->erasing = chip; | ||
278 | spin_unlock(&shared->lock); | ||
279 | } | ||
280 | |||
281 | ret = chip_ready(map, chip, mode); | ||
282 | if (ret == -EAGAIN) | ||
283 | goto retry; | ||
284 | |||
285 | return ret; | ||
286 | } | ||
287 | |||
288 | static int chip_ready(struct map_info *map, struct flchip *chip, int mode) | ||
289 | { | ||
290 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
291 | int ret = 0; | ||
292 | DECLARE_WAITQUEUE(wait, current); | ||
293 | |||
294 | /* Prevent setting state FL_SYNCING for chip in suspended state. */ | ||
295 | if (FL_SYNCING == mode && FL_READY != chip->oldstate) | ||
296 | goto sleep; | ||
297 | |||
298 | switch (chip->state) { | ||
299 | case FL_READY: | ||
300 | case FL_JEDEC_QUERY: | ||
301 | return 0; | ||
302 | |||
303 | case FL_ERASING: | ||
304 | if (!lpddr->qinfo->SuspEraseSupp || | ||
305 | !(mode == FL_READY || mode == FL_POINT)) | ||
306 | goto sleep; | ||
307 | |||
308 | map_write(map, CMD(LPDDR_SUSPEND), | ||
309 | map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND); | ||
310 | chip->oldstate = FL_ERASING; | ||
311 | chip->state = FL_ERASE_SUSPENDING; | ||
312 | ret = wait_for_ready(map, chip, 0); | ||
313 | if (ret) { | ||
314 | /* Oops. something got wrong. */ | ||
315 | /* Resume and pretend we weren't here. */ | ||
316 | map_write(map, CMD(LPDDR_RESUME), | ||
317 | map->pfow_base + PFOW_COMMAND_CODE); | ||
318 | map_write(map, CMD(LPDDR_START_EXECUTION), | ||
319 | map->pfow_base + PFOW_COMMAND_EXECUTE); | ||
320 | chip->state = FL_ERASING; | ||
321 | chip->oldstate = FL_READY; | ||
322 | printk(KERN_ERR "%s: suspend operation failed." | ||
323 | "State may be wrong \n", map->name); | ||
324 | return -EIO; | ||
325 | } | ||
326 | chip->erase_suspended = 1; | ||
327 | chip->state = FL_READY; | ||
328 | return 0; | ||
329 | /* Erase suspend */ | ||
330 | case FL_POINT: | ||
331 | /* Only if there's no operation suspended... */ | ||
332 | if (mode == FL_READY && chip->oldstate == FL_READY) | ||
333 | return 0; | ||
334 | |||
335 | default: | ||
336 | sleep: | ||
337 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
338 | add_wait_queue(&chip->wq, &wait); | ||
339 | spin_unlock(chip->mutex); | ||
340 | schedule(); | ||
341 | remove_wait_queue(&chip->wq, &wait); | ||
342 | spin_lock(chip->mutex); | ||
343 | return -EAGAIN; | ||
344 | } | ||
345 | } | ||
346 | |||
347 | static void put_chip(struct map_info *map, struct flchip *chip) | ||
348 | { | ||
349 | if (chip->priv) { | ||
350 | struct flchip_shared *shared = chip->priv; | ||
351 | spin_lock(&shared->lock); | ||
352 | if (shared->writing == chip && chip->oldstate == FL_READY) { | ||
353 | /* We own the ability to write, but we're done */ | ||
354 | shared->writing = shared->erasing; | ||
355 | if (shared->writing && shared->writing != chip) { | ||
356 | /* give back the ownership */ | ||
357 | struct flchip *loaner = shared->writing; | ||
358 | spin_lock(loaner->mutex); | ||
359 | spin_unlock(&shared->lock); | ||
360 | spin_unlock(chip->mutex); | ||
361 | put_chip(map, loaner); | ||
362 | spin_lock(chip->mutex); | ||
363 | spin_unlock(loaner->mutex); | ||
364 | wake_up(&chip->wq); | ||
365 | return; | ||
366 | } | ||
367 | shared->erasing = NULL; | ||
368 | shared->writing = NULL; | ||
369 | } else if (shared->erasing == chip && shared->writing != chip) { | ||
370 | /* | ||
371 | * We own the ability to erase without the ability | ||
372 | * to write, which means the erase was suspended | ||
373 | * and some other partition is currently writing. | ||
374 | * Don't let the switch below mess things up since | ||
375 | * we don't have ownership to resume anything. | ||
376 | */ | ||
377 | spin_unlock(&shared->lock); | ||
378 | wake_up(&chip->wq); | ||
379 | return; | ||
380 | } | ||
381 | spin_unlock(&shared->lock); | ||
382 | } | ||
383 | |||
384 | switch (chip->oldstate) { | ||
385 | case FL_ERASING: | ||
386 | chip->state = chip->oldstate; | ||
387 | map_write(map, CMD(LPDDR_RESUME), | ||
388 | map->pfow_base + PFOW_COMMAND_CODE); | ||
389 | map_write(map, CMD(LPDDR_START_EXECUTION), | ||
390 | map->pfow_base + PFOW_COMMAND_EXECUTE); | ||
391 | chip->oldstate = FL_READY; | ||
392 | chip->state = FL_ERASING; | ||
393 | break; | ||
394 | case FL_READY: | ||
395 | break; | ||
396 | default: | ||
397 | printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n", | ||
398 | map->name, chip->oldstate); | ||
399 | } | ||
400 | wake_up(&chip->wq); | ||
401 | } | ||
402 | |||
403 | int do_write_buffer(struct map_info *map, struct flchip *chip, | ||
404 | unsigned long adr, const struct kvec **pvec, | ||
405 | unsigned long *pvec_seek, int len) | ||
406 | { | ||
407 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
408 | map_word datum; | ||
409 | int ret, wbufsize, word_gap, words; | ||
410 | const struct kvec *vec; | ||
411 | unsigned long vec_seek; | ||
412 | unsigned long prog_buf_ofs; | ||
413 | |||
414 | wbufsize = 1 << lpddr->qinfo->BufSizeShift; | ||
415 | |||
416 | spin_lock(chip->mutex); | ||
417 | ret = get_chip(map, chip, FL_WRITING); | ||
418 | if (ret) { | ||
419 | spin_unlock(chip->mutex); | ||
420 | return ret; | ||
421 | } | ||
422 | /* Figure out the number of words to write */ | ||
423 | word_gap = (-adr & (map_bankwidth(map)-1)); | ||
424 | words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map); | ||
425 | if (!word_gap) { | ||
426 | words--; | ||
427 | } else { | ||
428 | word_gap = map_bankwidth(map) - word_gap; | ||
429 | adr -= word_gap; | ||
430 | datum = map_word_ff(map); | ||
431 | } | ||
432 | /* Write data */ | ||
433 | /* Get the program buffer offset from PFOW register data first*/ | ||
434 | prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map, | ||
435 | map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET)); | ||
436 | vec = *pvec; | ||
437 | vec_seek = *pvec_seek; | ||
438 | do { | ||
439 | int n = map_bankwidth(map) - word_gap; | ||
440 | |||
441 | if (n > vec->iov_len - vec_seek) | ||
442 | n = vec->iov_len - vec_seek; | ||
443 | if (n > len) | ||
444 | n = len; | ||
445 | |||
446 | if (!word_gap && (len < map_bankwidth(map))) | ||
447 | datum = map_word_ff(map); | ||
448 | |||
449 | datum = map_word_load_partial(map, datum, | ||
450 | vec->iov_base + vec_seek, word_gap, n); | ||
451 | |||
452 | len -= n; | ||
453 | word_gap += n; | ||
454 | if (!len || word_gap == map_bankwidth(map)) { | ||
455 | map_write(map, datum, prog_buf_ofs); | ||
456 | prog_buf_ofs += map_bankwidth(map); | ||
457 | word_gap = 0; | ||
458 | } | ||
459 | |||
460 | vec_seek += n; | ||
461 | if (vec_seek == vec->iov_len) { | ||
462 | vec++; | ||
463 | vec_seek = 0; | ||
464 | } | ||
465 | } while (len); | ||
466 | *pvec = vec; | ||
467 | *pvec_seek = vec_seek; | ||
468 | |||
469 | /* GO GO GO */ | ||
470 | send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL); | ||
471 | chip->state = FL_WRITING; | ||
472 | ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime)); | ||
473 | if (ret) { | ||
474 | printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n", | ||
475 | map->name, ret, adr); | ||
476 | goto out; | ||
477 | } | ||
478 | |||
479 | out: put_chip(map, chip); | ||
480 | spin_unlock(chip->mutex); | ||
481 | return ret; | ||
482 | } | ||
483 | |||
484 | int do_erase_oneblock(struct mtd_info *mtd, loff_t adr) | ||
485 | { | ||
486 | struct map_info *map = mtd->priv; | ||
487 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
488 | int chipnum = adr >> lpddr->chipshift; | ||
489 | struct flchip *chip = &lpddr->chips[chipnum]; | ||
490 | int ret; | ||
491 | |||
492 | spin_lock(chip->mutex); | ||
493 | ret = get_chip(map, chip, FL_ERASING); | ||
494 | if (ret) { | ||
495 | spin_unlock(chip->mutex); | ||
496 | return ret; | ||
497 | } | ||
498 | send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL); | ||
499 | chip->state = FL_ERASING; | ||
500 | ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000); | ||
501 | if (ret) { | ||
502 | printk(KERN_WARNING"%s Erase block error %d at : %llx\n", | ||
503 | map->name, ret, adr); | ||
504 | goto out; | ||
505 | } | ||
506 | out: put_chip(map, chip); | ||
507 | spin_unlock(chip->mutex); | ||
508 | return ret; | ||
509 | } | ||
510 | |||
511 | static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len, | ||
512 | size_t *retlen, u_char *buf) | ||
513 | { | ||
514 | struct map_info *map = mtd->priv; | ||
515 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
516 | int chipnum = adr >> lpddr->chipshift; | ||
517 | struct flchip *chip = &lpddr->chips[chipnum]; | ||
518 | int ret = 0; | ||
519 | |||
520 | spin_lock(chip->mutex); | ||
521 | ret = get_chip(map, chip, FL_READY); | ||
522 | if (ret) { | ||
523 | spin_unlock(chip->mutex); | ||
524 | return ret; | ||
525 | } | ||
526 | |||
527 | map_copy_from(map, buf, adr, len); | ||
528 | *retlen = len; | ||
529 | |||
530 | put_chip(map, chip); | ||
531 | spin_unlock(chip->mutex); | ||
532 | return ret; | ||
533 | } | ||
534 | |||
535 | static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, | ||
536 | size_t *retlen, void **mtdbuf, resource_size_t *phys) | ||
537 | { | ||
538 | struct map_info *map = mtd->priv; | ||
539 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
540 | int chipnum = adr >> lpddr->chipshift; | ||
541 | unsigned long ofs, last_end = 0; | ||
542 | struct flchip *chip = &lpddr->chips[chipnum]; | ||
543 | int ret = 0; | ||
544 | |||
545 | if (!map->virt || (adr + len > mtd->size)) | ||
546 | return -EINVAL; | ||
547 | |||
548 | /* ofs: offset within the first chip that the first read should start */ | ||
549 | ofs = adr - (chipnum << lpddr->chipshift); | ||
550 | |||
551 | *mtdbuf = (void *)map->virt + chip->start + ofs; | ||
552 | *retlen = 0; | ||
553 | |||
554 | while (len) { | ||
555 | unsigned long thislen; | ||
556 | |||
557 | if (chipnum >= lpddr->numchips) | ||
558 | break; | ||
559 | |||
560 | /* We cannot point across chips that are virtually disjoint */ | ||
561 | if (!last_end) | ||
562 | last_end = chip->start; | ||
563 | else if (chip->start != last_end) | ||
564 | break; | ||
565 | |||
566 | if ((len + ofs - 1) >> lpddr->chipshift) | ||
567 | thislen = (1<<lpddr->chipshift) - ofs; | ||
568 | else | ||
569 | thislen = len; | ||
570 | /* get the chip */ | ||
571 | spin_lock(chip->mutex); | ||
572 | ret = get_chip(map, chip, FL_POINT); | ||
573 | spin_unlock(chip->mutex); | ||
574 | if (ret) | ||
575 | break; | ||
576 | |||
577 | chip->state = FL_POINT; | ||
578 | chip->ref_point_counter++; | ||
579 | *retlen += thislen; | ||
580 | len -= thislen; | ||
581 | |||
582 | ofs = 0; | ||
583 | last_end += 1 << lpddr->chipshift; | ||
584 | chipnum++; | ||
585 | chip = &lpddr->chips[chipnum]; | ||
586 | } | ||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len) | ||
591 | { | ||
592 | struct map_info *map = mtd->priv; | ||
593 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
594 | int chipnum = adr >> lpddr->chipshift; | ||
595 | unsigned long ofs; | ||
596 | |||
597 | /* ofs: offset within the first chip that the first read should start */ | ||
598 | ofs = adr - (chipnum << lpddr->chipshift); | ||
599 | |||
600 | while (len) { | ||
601 | unsigned long thislen; | ||
602 | struct flchip *chip; | ||
603 | |||
604 | chip = &lpddr->chips[chipnum]; | ||
605 | if (chipnum >= lpddr->numchips) | ||
606 | break; | ||
607 | |||
608 | if ((len + ofs - 1) >> lpddr->chipshift) | ||
609 | thislen = (1<<lpddr->chipshift) - ofs; | ||
610 | else | ||
611 | thislen = len; | ||
612 | |||
613 | spin_lock(chip->mutex); | ||
614 | if (chip->state == FL_POINT) { | ||
615 | chip->ref_point_counter--; | ||
616 | if (chip->ref_point_counter == 0) | ||
617 | chip->state = FL_READY; | ||
618 | } else | ||
619 | printk(KERN_WARNING "%s: Warning: unpoint called on non" | ||
620 | "pointed region\n", map->name); | ||
621 | |||
622 | put_chip(map, chip); | ||
623 | spin_unlock(chip->mutex); | ||
624 | |||
625 | len -= thislen; | ||
626 | ofs = 0; | ||
627 | chipnum++; | ||
628 | } | ||
629 | } | ||
630 | |||
631 | static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, | ||
632 | size_t *retlen, const u_char *buf) | ||
633 | { | ||
634 | struct kvec vec; | ||
635 | |||
636 | vec.iov_base = (void *) buf; | ||
637 | vec.iov_len = len; | ||
638 | |||
639 | return lpddr_writev(mtd, &vec, 1, to, retlen); | ||
640 | } | ||
641 | |||
642 | |||
643 | static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs, | ||
644 | unsigned long count, loff_t to, size_t *retlen) | ||
645 | { | ||
646 | struct map_info *map = mtd->priv; | ||
647 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
648 | int ret = 0; | ||
649 | int chipnum; | ||
650 | unsigned long ofs, vec_seek, i; | ||
651 | int wbufsize = 1 << lpddr->qinfo->BufSizeShift; | ||
652 | |||
653 | size_t len = 0; | ||
654 | |||
655 | for (i = 0; i < count; i++) | ||
656 | len += vecs[i].iov_len; | ||
657 | |||
658 | *retlen = 0; | ||
659 | if (!len) | ||
660 | return 0; | ||
661 | |||
662 | chipnum = to >> lpddr->chipshift; | ||
663 | |||
664 | ofs = to; | ||
665 | vec_seek = 0; | ||
666 | |||
667 | do { | ||
668 | /* We must not cross write block boundaries */ | ||
669 | int size = wbufsize - (ofs & (wbufsize-1)); | ||
670 | |||
671 | if (size > len) | ||
672 | size = len; | ||
673 | |||
674 | ret = do_write_buffer(map, &lpddr->chips[chipnum], | ||
675 | ofs, &vecs, &vec_seek, size); | ||
676 | if (ret) | ||
677 | return ret; | ||
678 | |||
679 | ofs += size; | ||
680 | (*retlen) += size; | ||
681 | len -= size; | ||
682 | |||
683 | /* Be nice and reschedule with the chip in a usable | ||
684 | * state for other processes */ | ||
685 | cond_resched(); | ||
686 | |||
687 | } while (len); | ||
688 | |||
689 | return 0; | ||
690 | } | ||
691 | |||
692 | static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr) | ||
693 | { | ||
694 | unsigned long ofs, len; | ||
695 | int ret; | ||
696 | struct map_info *map = mtd->priv; | ||
697 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
698 | int size = 1 << lpddr->qinfo->UniformBlockSizeShift; | ||
699 | |||
700 | ofs = instr->addr; | ||
701 | len = instr->len; | ||
702 | |||
703 | if (ofs > mtd->size || (len + ofs) > mtd->size) | ||
704 | return -EINVAL; | ||
705 | |||
706 | while (len > 0) { | ||
707 | ret = do_erase_oneblock(mtd, ofs); | ||
708 | if (ret) | ||
709 | return ret; | ||
710 | ofs += size; | ||
711 | len -= size; | ||
712 | } | ||
713 | instr->state = MTD_ERASE_DONE; | ||
714 | mtd_erase_callback(instr); | ||
715 | |||
716 | return 0; | ||
717 | } | ||
718 | |||
719 | #define DO_XXLOCK_LOCK 1 | ||
720 | #define DO_XXLOCK_UNLOCK 2 | ||
721 | int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk) | ||
722 | { | ||
723 | int ret = 0; | ||
724 | struct map_info *map = mtd->priv; | ||
725 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
726 | int chipnum = adr >> lpddr->chipshift; | ||
727 | struct flchip *chip = &lpddr->chips[chipnum]; | ||
728 | |||
729 | spin_lock(chip->mutex); | ||
730 | ret = get_chip(map, chip, FL_LOCKING); | ||
731 | if (ret) { | ||
732 | spin_unlock(chip->mutex); | ||
733 | return ret; | ||
734 | } | ||
735 | |||
736 | if (thunk == DO_XXLOCK_LOCK) { | ||
737 | send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL); | ||
738 | chip->state = FL_LOCKING; | ||
739 | } else if (thunk == DO_XXLOCK_UNLOCK) { | ||
740 | send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL); | ||
741 | chip->state = FL_UNLOCKING; | ||
742 | } else | ||
743 | BUG(); | ||
744 | |||
745 | ret = wait_for_ready(map, chip, 1); | ||
746 | if (ret) { | ||
747 | printk(KERN_ERR "%s: block unlock error status %d \n", | ||
748 | map->name, ret); | ||
749 | goto out; | ||
750 | } | ||
751 | out: put_chip(map, chip); | ||
752 | spin_unlock(chip->mutex); | ||
753 | return ret; | ||
754 | } | ||
755 | |||
756 | static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | ||
757 | { | ||
758 | return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK); | ||
759 | } | ||
760 | |||
761 | static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | ||
762 | { | ||
763 | return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK); | ||
764 | } | ||
765 | |||
766 | int word_program(struct map_info *map, loff_t adr, uint32_t curval) | ||
767 | { | ||
768 | int ret; | ||
769 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
770 | int chipnum = adr >> lpddr->chipshift; | ||
771 | struct flchip *chip = &lpddr->chips[chipnum]; | ||
772 | |||
773 | spin_lock(chip->mutex); | ||
774 | ret = get_chip(map, chip, FL_WRITING); | ||
775 | if (ret) { | ||
776 | spin_unlock(chip->mutex); | ||
777 | return ret; | ||
778 | } | ||
779 | |||
780 | send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval); | ||
781 | |||
782 | ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime)); | ||
783 | if (ret) { | ||
784 | printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n", | ||
785 | map->name, adr, curval); | ||
786 | goto out; | ||
787 | } | ||
788 | |||
789 | out: put_chip(map, chip); | ||
790 | spin_unlock(chip->mutex); | ||
791 | return ret; | ||
792 | } | ||
793 | |||
794 | MODULE_LICENSE("GPL"); | ||
795 | MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>"); | ||
796 | MODULE_DESCRIPTION("MTD driver for LPDDR flash chips"); | ||
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c new file mode 100644 index 000000000000..79bf40f48b75 --- /dev/null +++ b/drivers/mtd/lpddr/qinfo_probe.c | |||
@@ -0,0 +1,255 @@ | |||
1 | /* | ||
2 | * Probing flash chips with QINFO records. | ||
3 | * (C) 2008 Korolev Alexey <akorolev@infradead.org> | ||
4 | * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version 2 | ||
9 | * of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | */ | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | |||
29 | #include <linux/mtd/xip.h> | ||
30 | #include <linux/mtd/map.h> | ||
31 | #include <linux/mtd/pfow.h> | ||
32 | #include <linux/mtd/qinfo.h> | ||
33 | |||
34 | static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr); | ||
35 | struct mtd_info *lpddr_probe(struct map_info *map); | ||
36 | static struct lpddr_private *lpddr_probe_chip(struct map_info *map); | ||
37 | static int lpddr_pfow_present(struct map_info *map, | ||
38 | struct lpddr_private *lpddr); | ||
39 | |||
40 | static struct qinfo_query_info qinfo_array[] = { | ||
41 | /* General device info */ | ||
42 | {0, 0, "DevSizeShift", "Device size 2^n bytes"}, | ||
43 | {0, 3, "BufSizeShift", "Program buffer size 2^n bytes"}, | ||
44 | /* Erase block information */ | ||
45 | {1, 1, "TotalBlocksNum", "Total number of blocks"}, | ||
46 | {1, 2, "UniformBlockSizeShift", "Uniform block size 2^n bytes"}, | ||
47 | /* Partition information */ | ||
48 | {2, 1, "HWPartsNum", "Number of hardware partitions"}, | ||
49 | /* Optional features */ | ||
50 | {5, 1, "SuspEraseSupp", "Suspend erase supported"}, | ||
51 | /* Operation typical time */ | ||
52 | {10, 0, "SingleWordProgTime", "Single word program 2^n u-sec"}, | ||
53 | {10, 1, "ProgBufferTime", "Program buffer write 2^n u-sec"}, | ||
54 | {10, 2, "BlockEraseTime", "Block erase 2^n m-sec"}, | ||
55 | {10, 3, "FullChipEraseTime", "Full chip erase 2^n m-sec"}, | ||
56 | }; | ||
57 | |||
58 | static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str) | ||
59 | { | ||
60 | int qinfo_lines = sizeof(qinfo_array)/sizeof(struct qinfo_query_info); | ||
61 | int i; | ||
62 | int bankwidth = map_bankwidth(map) * 8; | ||
63 | int major, minor; | ||
64 | |||
65 | for (i = 0; i < qinfo_lines; i++) { | ||
66 | if (strcmp(id_str, qinfo_array[i].id_str) == 0) { | ||
67 | major = qinfo_array[i].major & ((1 << bankwidth) - 1); | ||
68 | minor = qinfo_array[i].minor & ((1 << bankwidth) - 1); | ||
69 | return minor | (major << bankwidth); | ||
70 | } | ||
71 | } | ||
72 | printk(KERN_ERR"%s qinfo id string is wrong! \n", map->name); | ||
73 | BUG(); | ||
74 | return -1; | ||
75 | } | ||
76 | |||
77 | static uint16_t lpddr_info_query(struct map_info *map, char *id_str) | ||
78 | { | ||
79 | unsigned int dsr, val; | ||
80 | int bits_per_chip = map_bankwidth(map) * 8; | ||
81 | unsigned long adr = lpddr_get_qinforec_pos(map, id_str); | ||
82 | int attempts = 20; | ||
83 | |||
84 | /* Write a request for the PFOW record */ | ||
85 | map_write(map, CMD(LPDDR_INFO_QUERY), | ||
86 | map->pfow_base + PFOW_COMMAND_CODE); | ||
87 | map_write(map, CMD(adr & ((1 << bits_per_chip) - 1)), | ||
88 | map->pfow_base + PFOW_COMMAND_ADDRESS_L); | ||
89 | map_write(map, CMD(adr >> bits_per_chip), | ||
90 | map->pfow_base + PFOW_COMMAND_ADDRESS_H); | ||
91 | map_write(map, CMD(LPDDR_START_EXECUTION), | ||
92 | map->pfow_base + PFOW_COMMAND_EXECUTE); | ||
93 | |||
94 | while ((attempts--) > 0) { | ||
95 | dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR)); | ||
96 | if (dsr & DSR_READY_STATUS) | ||
97 | break; | ||
98 | udelay(10); | ||
99 | } | ||
100 | |||
101 | val = CMDVAL(map_read(map, map->pfow_base + PFOW_COMMAND_DATA)); | ||
102 | return val; | ||
103 | } | ||
104 | |||
105 | static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr) | ||
106 | { | ||
107 | map_word pfow_val[4]; | ||
108 | |||
109 | /* Check identification string */ | ||
110 | pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P); | ||
111 | pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F); | ||
112 | pfow_val[2] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_O); | ||
113 | pfow_val[3] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_W); | ||
114 | |||
115 | if (!map_word_equal(map, CMD('P'), pfow_val[0])) | ||
116 | goto out; | ||
117 | |||
118 | if (!map_word_equal(map, CMD('F'), pfow_val[1])) | ||
119 | goto out; | ||
120 | |||
121 | if (!map_word_equal(map, CMD('O'), pfow_val[2])) | ||
122 | goto out; | ||
123 | |||
124 | if (!map_word_equal(map, CMD('W'), pfow_val[3])) | ||
125 | goto out; | ||
126 | |||
127 | return 1; /* "PFOW" is found */ | ||
128 | out: | ||
129 | printk(KERN_WARNING"%s: PFOW string at 0x%lx is not found \n", | ||
130 | map->name, map->pfow_base); | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr) | ||
135 | { | ||
136 | |||
137 | lpddr->qinfo = kmalloc(sizeof(struct qinfo_chip), GFP_KERNEL); | ||
138 | if (!lpddr->qinfo) { | ||
139 | printk(KERN_WARNING "%s: no memory for LPDDR qinfo structure\n", | ||
140 | map->name); | ||
141 | return 0; | ||
142 | } | ||
143 | memset(lpddr->qinfo, 0, sizeof(struct qinfo_chip)); | ||
144 | |||
145 | /* Get the ManuID */ | ||
146 | lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID)); | ||
147 | /* Get the DeviceID */ | ||
148 | lpddr->DevId = CMDVAL(map_read(map, map->pfow_base + PFOW_DEVICE_ID)); | ||
149 | /* read parameters from chip qinfo table */ | ||
150 | lpddr->qinfo->DevSizeShift = lpddr_info_query(map, "DevSizeShift"); | ||
151 | lpddr->qinfo->TotalBlocksNum = lpddr_info_query(map, "TotalBlocksNum"); | ||
152 | lpddr->qinfo->BufSizeShift = lpddr_info_query(map, "BufSizeShift"); | ||
153 | lpddr->qinfo->HWPartsNum = lpddr_info_query(map, "HWPartsNum"); | ||
154 | lpddr->qinfo->UniformBlockSizeShift = | ||
155 | lpddr_info_query(map, "UniformBlockSizeShift"); | ||
156 | lpddr->qinfo->SuspEraseSupp = lpddr_info_query(map, "SuspEraseSupp"); | ||
157 | lpddr->qinfo->SingleWordProgTime = | ||
158 | lpddr_info_query(map, "SingleWordProgTime"); | ||
159 | lpddr->qinfo->ProgBufferTime = lpddr_info_query(map, "ProgBufferTime"); | ||
160 | lpddr->qinfo->BlockEraseTime = lpddr_info_query(map, "BlockEraseTime"); | ||
161 | return 1; | ||
162 | } | ||
163 | static struct lpddr_private *lpddr_probe_chip(struct map_info *map) | ||
164 | { | ||
165 | struct lpddr_private lpddr; | ||
166 | struct lpddr_private *retlpddr; | ||
167 | int numvirtchips; | ||
168 | |||
169 | |||
170 | if ((map->pfow_base + 0x1000) >= map->size) { | ||
171 | printk(KERN_NOTICE"%s Probe at base (0x%08lx) past the end of" | ||
172 | "the map(0x%08lx)\n", map->name, | ||
173 | (unsigned long)map->pfow_base, map->size - 1); | ||
174 | return NULL; | ||
175 | } | ||
176 | memset(&lpddr, 0, sizeof(struct lpddr_private)); | ||
177 | if (!lpddr_pfow_present(map, &lpddr)) | ||
178 | return NULL; | ||
179 | |||
180 | if (!lpddr_chip_setup(map, &lpddr)) | ||
181 | return NULL; | ||
182 | |||
183 | /* Ok so we found a chip */ | ||
184 | lpddr.chipshift = lpddr.qinfo->DevSizeShift; | ||
185 | lpddr.numchips = 1; | ||
186 | |||
187 | numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum; | ||
188 | retlpddr = kmalloc(sizeof(struct lpddr_private) + | ||
189 | numvirtchips * sizeof(struct flchip), GFP_KERNEL); | ||
190 | if (!retlpddr) | ||
191 | return NULL; | ||
192 | |||
193 | memset(retlpddr, 0, sizeof(struct lpddr_private) + | ||
194 | numvirtchips * sizeof(struct flchip)); | ||
195 | memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private)); | ||
196 | |||
197 | retlpddr->numchips = numvirtchips; | ||
198 | retlpddr->chipshift = retlpddr->qinfo->DevSizeShift - | ||
199 | __ffs(retlpddr->qinfo->HWPartsNum); | ||
200 | |||
201 | return retlpddr; | ||
202 | } | ||
203 | |||
204 | struct mtd_info *lpddr_probe(struct map_info *map) | ||
205 | { | ||
206 | struct mtd_info *mtd = NULL; | ||
207 | struct lpddr_private *lpddr; | ||
208 | |||
209 | /* First probe the map to see if we havecan open PFOW here */ | ||
210 | lpddr = lpddr_probe_chip(map); | ||
211 | if (!lpddr) | ||
212 | return NULL; | ||
213 | |||
214 | map->fldrv_priv = lpddr; | ||
215 | mtd = lpddr_cmdset(map); | ||
216 | if (mtd) { | ||
217 | if (mtd->size > map->size) { | ||
218 | printk(KERN_WARNING "Reducing visibility of %ldKiB chip" | ||
219 | "to %ldKiB\n", (unsigned long)mtd->size >> 10, | ||
220 | (unsigned long)map->size >> 10); | ||
221 | mtd->size = map->size; | ||
222 | } | ||
223 | return mtd; | ||
224 | } | ||
225 | |||
226 | kfree(lpddr->qinfo); | ||
227 | kfree(lpddr); | ||
228 | map->fldrv_priv = NULL; | ||
229 | return NULL; | ||
230 | } | ||
231 | |||
232 | static struct mtd_chip_driver lpddr_chipdrv = { | ||
233 | .probe = lpddr_probe, | ||
234 | .name = "qinfo_probe", | ||
235 | .module = THIS_MODULE | ||
236 | }; | ||
237 | |||
238 | static int __init lpddr_probe_init(void) | ||
239 | { | ||
240 | register_mtd_chip_driver(&lpddr_chipdrv); | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static void __exit lpddr_probe_exit(void) | ||
245 | { | ||
246 | unregister_mtd_chip_driver(&lpddr_chipdrv); | ||
247 | } | ||
248 | |||
249 | module_init(lpddr_probe_init); | ||
250 | module_exit(lpddr_probe_exit); | ||
251 | |||
252 | MODULE_LICENSE("GPL"); | ||
253 | MODULE_AUTHOR("Vasiliy Leonenko <vasiliy.leonenko@gmail.com>"); | ||
254 | MODULE_DESCRIPTION("Driver to probe qinfo flash chips"); | ||
255 | |||
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 5ea169362164..0225cbbf22de 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
@@ -10,8 +10,8 @@ config MTD_COMPLEX_MAPPINGS | |||
10 | paged mappings of flash chips. | 10 | paged mappings of flash chips. |
11 | 11 | ||
12 | config MTD_PHYSMAP | 12 | config MTD_PHYSMAP |
13 | tristate "CFI Flash device in physical memory map" | 13 | tristate "Flash device in physical memory map" |
14 | depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM | 14 | depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_LPDDR |
15 | help | 15 | help |
16 | This provides a 'mapping' driver which allows the NOR Flash and | 16 | This provides a 'mapping' driver which allows the NOR Flash and |
17 | ROM driver code to communicate with chips which are mapped | 17 | ROM driver code to communicate with chips which are mapped |
@@ -23,9 +23,20 @@ config MTD_PHYSMAP | |||
23 | To compile this driver as a module, choose M here: the | 23 | To compile this driver as a module, choose M here: the |
24 | module will be called physmap. | 24 | module will be called physmap. |
25 | 25 | ||
26 | config MTD_PHYSMAP_COMPAT | ||
27 | bool "Physmap compat support" | ||
28 | depends on MTD_PHYSMAP | ||
29 | default n | ||
30 | help | ||
31 | Setup a simple mapping via the Kconfig options. Normally the | ||
32 | physmap configuration options are done via your board's | ||
33 | resource file. | ||
34 | |||
35 | If unsure, say N here. | ||
36 | |||
26 | config MTD_PHYSMAP_START | 37 | config MTD_PHYSMAP_START |
27 | hex "Physical start address of flash mapping" | 38 | hex "Physical start address of flash mapping" |
28 | depends on MTD_PHYSMAP | 39 | depends on MTD_PHYSMAP_COMPAT |
29 | default "0x8000000" | 40 | default "0x8000000" |
30 | help | 41 | help |
31 | This is the physical memory location at which the flash chips | 42 | This is the physical memory location at which the flash chips |
@@ -37,7 +48,7 @@ config MTD_PHYSMAP_START | |||
37 | 48 | ||
38 | config MTD_PHYSMAP_LEN | 49 | config MTD_PHYSMAP_LEN |
39 | hex "Physical length of flash mapping" | 50 | hex "Physical length of flash mapping" |
40 | depends on MTD_PHYSMAP | 51 | depends on MTD_PHYSMAP_COMPAT |
41 | default "0" | 52 | default "0" |
42 | help | 53 | help |
43 | This is the total length of the mapping of the flash chips on | 54 | This is the total length of the mapping of the flash chips on |
@@ -51,7 +62,7 @@ config MTD_PHYSMAP_LEN | |||
51 | 62 | ||
52 | config MTD_PHYSMAP_BANKWIDTH | 63 | config MTD_PHYSMAP_BANKWIDTH |
53 | int "Bank width in octets" | 64 | int "Bank width in octets" |
54 | depends on MTD_PHYSMAP | 65 | depends on MTD_PHYSMAP_COMPAT |
55 | default "2" | 66 | default "2" |
56 | help | 67 | help |
57 | This is the total width of the data bus of the flash devices | 68 | This is the total width of the data bus of the flash devices |
diff --git a/drivers/mtd/maps/alchemy-flash.c b/drivers/mtd/maps/alchemy-flash.c index 82811bcb0436..845ad4f2a542 100644 --- a/drivers/mtd/maps/alchemy-flash.c +++ b/drivers/mtd/maps/alchemy-flash.c | |||
@@ -111,7 +111,7 @@ static struct mtd_partition alchemy_partitions[] = { | |||
111 | 111 | ||
112 | static struct mtd_info *mymtd; | 112 | static struct mtd_info *mymtd; |
113 | 113 | ||
114 | int __init alchemy_mtd_init(void) | 114 | static int __init alchemy_mtd_init(void) |
115 | { | 115 | { |
116 | struct mtd_partition *parts; | 116 | struct mtd_partition *parts; |
117 | int nb_parts = 0; | 117 | int nb_parts = 0; |
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c index d1eec7d3243f..237733d094c4 100644 --- a/drivers/mtd/maps/amd76xrom.c +++ b/drivers/mtd/maps/amd76xrom.c | |||
@@ -232,8 +232,8 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev, | |||
232 | /* Trim the size if we are larger than the map */ | 232 | /* Trim the size if we are larger than the map */ |
233 | if (map->mtd->size > map->map.size) { | 233 | if (map->mtd->size > map->map.size) { |
234 | printk(KERN_WARNING MOD_NAME | 234 | printk(KERN_WARNING MOD_NAME |
235 | " rom(%u) larger than window(%lu). fixing...\n", | 235 | " rom(%llu) larger than window(%lu). fixing...\n", |
236 | map->mtd->size, map->map.size); | 236 | (unsigned long long)map->mtd->size, map->map.size); |
237 | map->mtd->size = map->map.size; | 237 | map->mtd->size = map->map.size; |
238 | } | 238 | } |
239 | if (window->rsrc.parent) { | 239 | if (window->rsrc.parent) { |
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c index 0ecc3f6d735b..b4ed81611918 100644 --- a/drivers/mtd/maps/cfi_flagadm.c +++ b/drivers/mtd/maps/cfi_flagadm.c | |||
@@ -88,7 +88,7 @@ struct mtd_partition flagadm_parts[] = { | |||
88 | 88 | ||
89 | static struct mtd_info *mymtd; | 89 | static struct mtd_info *mymtd; |
90 | 90 | ||
91 | int __init init_flagadm(void) | 91 | static int __init init_flagadm(void) |
92 | { | 92 | { |
93 | printk(KERN_NOTICE "FlagaDM flash device: %x at %x\n", | 93 | printk(KERN_NOTICE "FlagaDM flash device: %x at %x\n", |
94 | FLASH_SIZE, FLASH_PHYS_ADDR); | 94 | FLASH_SIZE, FLASH_PHYS_ADDR); |
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c index 1a6feb4474de..5f7a245ed132 100644 --- a/drivers/mtd/maps/ck804xrom.c +++ b/drivers/mtd/maps/ck804xrom.c | |||
@@ -263,8 +263,8 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev, | |||
263 | /* Trim the size if we are larger than the map */ | 263 | /* Trim the size if we are larger than the map */ |
264 | if (map->mtd->size > map->map.size) { | 264 | if (map->mtd->size > map->map.size) { |
265 | printk(KERN_WARNING MOD_NAME | 265 | printk(KERN_WARNING MOD_NAME |
266 | " rom(%u) larger than window(%lu). fixing...\n", | 266 | " rom(%llu) larger than window(%lu). fixing...\n", |
267 | map->mtd->size, map->map.size); | 267 | (unsigned long long)map->mtd->size, map->map.size); |
268 | map->mtd->size = map->map.size; | 268 | map->mtd->size = map->map.size; |
269 | } | 269 | } |
270 | if (window->rsrc.parent) { | 270 | if (window->rsrc.parent) { |
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c index e115667bf1d0..cfacfa6f45dd 100644 --- a/drivers/mtd/maps/dbox2-flash.c +++ b/drivers/mtd/maps/dbox2-flash.c | |||
@@ -69,7 +69,7 @@ struct map_info dbox2_flash_map = { | |||
69 | .phys = WINDOW_ADDR, | 69 | .phys = WINDOW_ADDR, |
70 | }; | 70 | }; |
71 | 71 | ||
72 | int __init init_dbox2_flash(void) | 72 | static int __init init_dbox2_flash(void) |
73 | { | 73 | { |
74 | printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR); | 74 | printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR); |
75 | dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); | 75 | dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); |
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c index 9433738c1664..be9e90b44587 100644 --- a/drivers/mtd/maps/edb7312.c +++ b/drivers/mtd/maps/edb7312.c | |||
@@ -71,7 +71,7 @@ static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; | |||
71 | static int mtd_parts_nb = 0; | 71 | static int mtd_parts_nb = 0; |
72 | static struct mtd_partition *mtd_parts = 0; | 72 | static struct mtd_partition *mtd_parts = 0; |
73 | 73 | ||
74 | int __init init_edb7312nor(void) | 74 | static int __init init_edb7312nor(void) |
75 | { | 75 | { |
76 | static const char *rom_probe_types[] = PROBETYPES; | 76 | static const char *rom_probe_types[] = PROBETYPES; |
77 | const char **type; | 77 | const char **type; |
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c index bbbcdd4c8d13..11a2f57df9cf 100644 --- a/drivers/mtd/maps/esb2rom.c +++ b/drivers/mtd/maps/esb2rom.c | |||
@@ -324,8 +324,8 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev, | |||
324 | /* Trim the size if we are larger than the map */ | 324 | /* Trim the size if we are larger than the map */ |
325 | if (map->mtd->size > map->map.size) { | 325 | if (map->mtd->size > map->map.size) { |
326 | printk(KERN_WARNING MOD_NAME | 326 | printk(KERN_WARNING MOD_NAME |
327 | " rom(%u) larger than window(%lu). fixing...\n", | 327 | " rom(%llu) larger than window(%lu). fixing...\n", |
328 | map->mtd->size, map->map.size); | 328 | (unsigned long long)map->mtd->size, map->map.size); |
329 | map->mtd->size = map->map.size; | 329 | map->mtd->size = map->map.size; |
330 | } | 330 | } |
331 | if (window->rsrc.parent) { | 331 | if (window->rsrc.parent) { |
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c index a8e3fde4cbd5..1e43124d498b 100644 --- a/drivers/mtd/maps/fortunet.c +++ b/drivers/mtd/maps/fortunet.c | |||
@@ -181,7 +181,7 @@ __setup("MTD_Partition=", MTD_New_Partition); | |||
181 | /* Backwards-spelling-compatibility */ | 181 | /* Backwards-spelling-compatibility */ |
182 | __setup("MTD_Partion=", MTD_New_Partition); | 182 | __setup("MTD_Partion=", MTD_New_Partition); |
183 | 183 | ||
184 | int __init init_fortunet(void) | 184 | static int __init init_fortunet(void) |
185 | { | 185 | { |
186 | int ix,iy; | 186 | int ix,iy; |
187 | for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++) | 187 | for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++) |
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c index 3b959fad1c4e..72c724fa8c27 100644 --- a/drivers/mtd/maps/h720x-flash.c +++ b/drivers/mtd/maps/h720x-flash.c | |||
@@ -65,7 +65,7 @@ static const char *probes[] = { "cmdlinepart", NULL }; | |||
65 | /* | 65 | /* |
66 | * Initialize FLASH support | 66 | * Initialize FLASH support |
67 | */ | 67 | */ |
68 | int __init h720x_mtd_init(void) | 68 | static int __init h720x_mtd_init(void) |
69 | { | 69 | { |
70 | 70 | ||
71 | char *part_type = NULL; | 71 | char *part_type = NULL; |
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c index aeb6c916e23f..c32bc28920b3 100644 --- a/drivers/mtd/maps/ichxrom.c +++ b/drivers/mtd/maps/ichxrom.c | |||
@@ -258,8 +258,8 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev, | |||
258 | /* Trim the size if we are larger than the map */ | 258 | /* Trim the size if we are larger than the map */ |
259 | if (map->mtd->size > map->map.size) { | 259 | if (map->mtd->size > map->map.size) { |
260 | printk(KERN_WARNING MOD_NAME | 260 | printk(KERN_WARNING MOD_NAME |
261 | " rom(%u) larger than window(%lu). fixing...\n", | 261 | " rom(%llu) larger than window(%lu). fixing...\n", |
262 | map->mtd->size, map->map.size); | 262 | (unsigned long long)map->mtd->size, map->map.size); |
263 | map->mtd->size = map->map.size; | 263 | map->mtd->size = map->map.size; |
264 | } | 264 | } |
265 | if (window->rsrc.parent) { | 265 | if (window->rsrc.parent) { |
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c index 2682ab51a367..998a27da97f3 100644 --- a/drivers/mtd/maps/impa7.c +++ b/drivers/mtd/maps/impa7.c | |||
@@ -70,7 +70,7 @@ static struct mtd_partition *mtd_parts[NUM_FLASHBANKS]; | |||
70 | 70 | ||
71 | static const char *probes[] = { "cmdlinepart", NULL }; | 71 | static const char *probes[] = { "cmdlinepart", NULL }; |
72 | 72 | ||
73 | int __init init_impa7(void) | 73 | static int __init init_impa7(void) |
74 | { | 74 | { |
75 | static const char *rom_probe_types[] = PROBETYPES; | 75 | static const char *rom_probe_types[] = PROBETYPES; |
76 | const char **type; | 76 | const char **type; |
diff --git a/drivers/mtd/maps/ipaq-flash.c b/drivers/mtd/maps/ipaq-flash.c index ed58f6a77bd9..748c85f635f1 100644 --- a/drivers/mtd/maps/ipaq-flash.c +++ b/drivers/mtd/maps/ipaq-flash.c | |||
@@ -202,7 +202,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL }; | |||
202 | 202 | ||
203 | static int __init h1900_special_case(void); | 203 | static int __init h1900_special_case(void); |
204 | 204 | ||
205 | int __init ipaq_mtd_init(void) | 205 | static int __init ipaq_mtd_init(void) |
206 | { | 206 | { |
207 | struct mtd_partition *parts = NULL; | 207 | struct mtd_partition *parts = NULL; |
208 | int nb_parts = 0; | 208 | int nb_parts = 0; |
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c index 706f67394b07..0eb5a7c85380 100644 --- a/drivers/mtd/maps/mbx860.c +++ b/drivers/mtd/maps/mbx860.c | |||
@@ -55,7 +55,7 @@ struct map_info mbx_map = { | |||
55 | .bankwidth = 4, | 55 | .bankwidth = 4, |
56 | }; | 56 | }; |
57 | 57 | ||
58 | int __init init_mbx(void) | 58 | static int __init init_mbx(void) |
59 | { | 59 | { |
60 | printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR); | 60 | printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR); |
61 | mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4); | 61 | mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4); |
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c index 965e6c6d6ab0..a97133eb9d70 100644 --- a/drivers/mtd/maps/nettel.c +++ b/drivers/mtd/maps/nettel.c | |||
@@ -226,7 +226,7 @@ static int __init nettel_init(void) | |||
226 | 226 | ||
227 | if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) { | 227 | if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) { |
228 | printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n", | 228 | printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n", |
229 | amd_mtd->size>>10); | 229 | (int)(amd_mtd->size>>10)); |
230 | 230 | ||
231 | amd_mtd->owner = THIS_MODULE; | 231 | amd_mtd->owner = THIS_MODULE; |
232 | 232 | ||
@@ -357,13 +357,12 @@ static int __init nettel_init(void) | |||
357 | *intel1par = 0; | 357 | *intel1par = 0; |
358 | } | 358 | } |
359 | 359 | ||
360 | printk(KERN_NOTICE "SNAPGEAR: Intel flash device size = %dK\n", | 360 | printk(KERN_NOTICE "SNAPGEAR: Intel flash device size = %lldKiB\n", |
361 | (intel_mtd->size >> 10)); | 361 | (unsigned long long)(intel_mtd->size >> 10)); |
362 | 362 | ||
363 | intel_mtd->owner = THIS_MODULE; | 363 | intel_mtd->owner = THIS_MODULE; |
364 | 364 | ||
365 | num_intel_partitions = sizeof(nettel_intel_partitions) / | 365 | num_intel_partitions = ARRAY_SIZE(nettel_intel_partitions); |
366 | sizeof(nettel_intel_partitions[0]); | ||
367 | 366 | ||
368 | if (intelboot) { | 367 | if (intelboot) { |
369 | /* | 368 | /* |
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c index 43e04c1d22a9..2b2e45093218 100644 --- a/drivers/mtd/maps/octagon-5066.c +++ b/drivers/mtd/maps/octagon-5066.c | |||
@@ -184,7 +184,7 @@ void cleanup_oct5066(void) | |||
184 | release_region(PAGE_IO, 1); | 184 | release_region(PAGE_IO, 1); |
185 | } | 185 | } |
186 | 186 | ||
187 | int __init init_oct5066(void) | 187 | static int __init init_oct5066(void) |
188 | { | 188 | { |
189 | int i; | 189 | int i; |
190 | int ret = 0; | 190 | int ret = 0; |
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index 1db16e549e38..87743661d48e 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c | |||
@@ -29,7 +29,6 @@ struct physmap_flash_info { | |||
29 | struct map_info map[MAX_RESOURCES]; | 29 | struct map_info map[MAX_RESOURCES]; |
30 | #ifdef CONFIG_MTD_PARTITIONS | 30 | #ifdef CONFIG_MTD_PARTITIONS |
31 | int nr_parts; | 31 | int nr_parts; |
32 | struct mtd_partition *parts; | ||
33 | #endif | 32 | #endif |
34 | }; | 33 | }; |
35 | 34 | ||
@@ -56,14 +55,10 @@ static int physmap_flash_remove(struct platform_device *dev) | |||
56 | for (i = 0; i < MAX_RESOURCES; i++) { | 55 | for (i = 0; i < MAX_RESOURCES; i++) { |
57 | if (info->mtd[i] != NULL) { | 56 | if (info->mtd[i] != NULL) { |
58 | #ifdef CONFIG_MTD_PARTITIONS | 57 | #ifdef CONFIG_MTD_PARTITIONS |
59 | if (info->nr_parts) { | 58 | if (info->nr_parts || physmap_data->nr_parts) |
60 | del_mtd_partitions(info->mtd[i]); | 59 | del_mtd_partitions(info->mtd[i]); |
61 | kfree(info->parts); | 60 | else |
62 | } else if (physmap_data->nr_parts) { | ||
63 | del_mtd_partitions(info->mtd[i]); | ||
64 | } else { | ||
65 | del_mtd_device(info->mtd[i]); | 61 | del_mtd_device(info->mtd[i]); |
66 | } | ||
67 | #else | 62 | #else |
68 | del_mtd_device(info->mtd[i]); | 63 | del_mtd_device(info->mtd[i]); |
69 | #endif | 64 | #endif |
@@ -73,7 +68,12 @@ static int physmap_flash_remove(struct platform_device *dev) | |||
73 | return 0; | 68 | return 0; |
74 | } | 69 | } |
75 | 70 | ||
76 | static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL }; | 71 | static const char *rom_probe_types[] = { |
72 | "cfi_probe", | ||
73 | "jedec_probe", | ||
74 | "qinfo_probe", | ||
75 | "map_rom", | ||
76 | NULL }; | ||
77 | #ifdef CONFIG_MTD_PARTITIONS | 77 | #ifdef CONFIG_MTD_PARTITIONS |
78 | static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; | 78 | static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; |
79 | #endif | 79 | #endif |
@@ -86,6 +86,9 @@ static int physmap_flash_probe(struct platform_device *dev) | |||
86 | int err = 0; | 86 | int err = 0; |
87 | int i; | 87 | int i; |
88 | int devices_found = 0; | 88 | int devices_found = 0; |
89 | #ifdef CONFIG_MTD_PARTITIONS | ||
90 | struct mtd_partition *parts; | ||
91 | #endif | ||
89 | 92 | ||
90 | physmap_data = dev->dev.platform_data; | 93 | physmap_data = dev->dev.platform_data; |
91 | if (physmap_data == NULL) | 94 | if (physmap_data == NULL) |
@@ -119,6 +122,7 @@ static int physmap_flash_probe(struct platform_device *dev) | |||
119 | info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1; | 122 | info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1; |
120 | info->map[i].bankwidth = physmap_data->width; | 123 | info->map[i].bankwidth = physmap_data->width; |
121 | info->map[i].set_vpp = physmap_data->set_vpp; | 124 | info->map[i].set_vpp = physmap_data->set_vpp; |
125 | info->map[i].pfow_base = physmap_data->pfow_base; | ||
122 | 126 | ||
123 | info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys, | 127 | info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys, |
124 | info->map[i].size); | 128 | info->map[i].size); |
@@ -163,9 +167,10 @@ static int physmap_flash_probe(struct platform_device *dev) | |||
163 | goto err_out; | 167 | goto err_out; |
164 | 168 | ||
165 | #ifdef CONFIG_MTD_PARTITIONS | 169 | #ifdef CONFIG_MTD_PARTITIONS |
166 | err = parse_mtd_partitions(info->cmtd, part_probe_types, &info->parts, 0); | 170 | err = parse_mtd_partitions(info->cmtd, part_probe_types, &parts, 0); |
167 | if (err > 0) { | 171 | if (err > 0) { |
168 | add_mtd_partitions(info->cmtd, info->parts, err); | 172 | add_mtd_partitions(info->cmtd, parts, err); |
173 | kfree(parts); | ||
169 | return 0; | 174 | return 0; |
170 | } | 175 | } |
171 | 176 | ||
@@ -251,14 +256,7 @@ static struct platform_driver physmap_flash_driver = { | |||
251 | }; | 256 | }; |
252 | 257 | ||
253 | 258 | ||
254 | #ifdef CONFIG_MTD_PHYSMAP_LEN | 259 | #ifdef CONFIG_MTD_PHYSMAP_COMPAT |
255 | #if CONFIG_MTD_PHYSMAP_LEN != 0 | ||
256 | #warning using PHYSMAP compat code | ||
257 | #define PHYSMAP_COMPAT | ||
258 | #endif | ||
259 | #endif | ||
260 | |||
261 | #ifdef PHYSMAP_COMPAT | ||
262 | static struct physmap_flash_data physmap_flash_data = { | 260 | static struct physmap_flash_data physmap_flash_data = { |
263 | .width = CONFIG_MTD_PHYSMAP_BANKWIDTH, | 261 | .width = CONFIG_MTD_PHYSMAP_BANKWIDTH, |
264 | }; | 262 | }; |
@@ -302,7 +300,7 @@ static int __init physmap_init(void) | |||
302 | int err; | 300 | int err; |
303 | 301 | ||
304 | err = platform_driver_register(&physmap_flash_driver); | 302 | err = platform_driver_register(&physmap_flash_driver); |
305 | #ifdef PHYSMAP_COMPAT | 303 | #ifdef CONFIG_MTD_PHYSMAP_COMPAT |
306 | if (err == 0) | 304 | if (err == 0) |
307 | platform_device_register(&physmap_flash); | 305 | platform_device_register(&physmap_flash); |
308 | #endif | 306 | #endif |
@@ -312,7 +310,7 @@ static int __init physmap_init(void) | |||
312 | 310 | ||
313 | static void __exit physmap_exit(void) | 311 | static void __exit physmap_exit(void) |
314 | { | 312 | { |
315 | #ifdef PHYSMAP_COMPAT | 313 | #ifdef CONFIG_MTD_PHYSMAP_COMPAT |
316 | platform_device_unregister(&physmap_flash); | 314 | platform_device_unregister(&physmap_flash); |
317 | #endif | 315 | #endif |
318 | platform_driver_unregister(&physmap_flash_driver); | 316 | platform_driver_unregister(&physmap_flash_driver); |
@@ -326,8 +324,7 @@ MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); | |||
326 | MODULE_DESCRIPTION("Generic configurable MTD map driver"); | 324 | MODULE_DESCRIPTION("Generic configurable MTD map driver"); |
327 | 325 | ||
328 | /* legacy platform drivers can't hotplug or coldplg */ | 326 | /* legacy platform drivers can't hotplug or coldplg */ |
329 | #ifndef PHYSMAP_COMPAT | 327 | #ifndef CONFIG_MTD_PHYSMAP_COMPAT |
330 | /* work with hotplug and coldplug */ | 328 | /* work with hotplug and coldplug */ |
331 | MODULE_ALIAS("platform:physmap-flash"); | 329 | MODULE_ALIAS("platform:physmap-flash"); |
332 | #endif | 330 | #endif |
333 | |||
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c index f43ba2815cbb..4768bd5459d6 100644 --- a/drivers/mtd/maps/pmcmsp-flash.c +++ b/drivers/mtd/maps/pmcmsp-flash.c | |||
@@ -48,7 +48,7 @@ static int fcnt; | |||
48 | 48 | ||
49 | #define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n", __func__, __LINE__) | 49 | #define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n", __func__, __LINE__) |
50 | 50 | ||
51 | int __init init_msp_flash(void) | 51 | static int __init init_msp_flash(void) |
52 | { | 52 | { |
53 | int i, j; | 53 | int i, j; |
54 | int offset, coff; | 54 | int offset, coff; |
diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c index de002eb1a7fe..933c0b63b016 100644 --- a/drivers/mtd/maps/redwood.c +++ b/drivers/mtd/maps/redwood.c | |||
@@ -122,7 +122,7 @@ struct map_info redwood_flash_map = { | |||
122 | 122 | ||
123 | static struct mtd_info *redwood_mtd; | 123 | static struct mtd_info *redwood_mtd; |
124 | 124 | ||
125 | int __init init_redwood_flash(void) | 125 | static int __init init_redwood_flash(void) |
126 | { | 126 | { |
127 | int err; | 127 | int err; |
128 | 128 | ||
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c index 14d90edb4430..3e3ef53d4fd4 100644 --- a/drivers/mtd/maps/rpxlite.c +++ b/drivers/mtd/maps/rpxlite.c | |||
@@ -23,7 +23,7 @@ static struct map_info rpxlite_map = { | |||
23 | .phys = WINDOW_ADDR, | 23 | .phys = WINDOW_ADDR, |
24 | }; | 24 | }; |
25 | 25 | ||
26 | int __init init_rpxlite(void) | 26 | static int __init init_rpxlite(void) |
27 | { | 27 | { |
28 | printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR); | 28 | printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR); |
29 | rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4); | 29 | rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4); |
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c index 6e1e99cd2b59..d5374cdcb163 100644 --- a/drivers/mtd/maps/sbc8240.c +++ b/drivers/mtd/maps/sbc8240.c | |||
@@ -136,7 +136,7 @@ static struct mtd_part_def sbc8240_part_banks[NUM_FLASH_BANKS]; | |||
136 | #endif /* CONFIG_MTD_PARTITIONS */ | 136 | #endif /* CONFIG_MTD_PARTITIONS */ |
137 | 137 | ||
138 | 138 | ||
139 | int __init init_sbc8240_mtd (void) | 139 | static int __init init_sbc8240_mtd (void) |
140 | { | 140 | { |
141 | static struct _cjs { | 141 | static struct _cjs { |
142 | u_long addr; | 142 | u_long addr; |
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c index 21169e6d646c..7e329f09a548 100644 --- a/drivers/mtd/maps/scb2_flash.c +++ b/drivers/mtd/maps/scb2_flash.c | |||
@@ -118,7 +118,8 @@ scb2_fixup_mtd(struct mtd_info *mtd) | |||
118 | struct mtd_erase_region_info *region = &mtd->eraseregions[i]; | 118 | struct mtd_erase_region_info *region = &mtd->eraseregions[i]; |
119 | 119 | ||
120 | if (region->numblocks * region->erasesize > mtd->size) { | 120 | if (region->numblocks * region->erasesize > mtd->size) { |
121 | region->numblocks = (mtd->size / region->erasesize); | 121 | region->numblocks = ((unsigned long)mtd->size / |
122 | region->erasesize); | ||
122 | done = 1; | 123 | done = 1; |
123 | } else { | 124 | } else { |
124 | region->numblocks = 0; | 125 | region->numblocks = 0; |
@@ -187,8 +188,9 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent) | |||
187 | return -ENODEV; | 188 | return -ENODEV; |
188 | } | 189 | } |
189 | 190 | ||
190 | printk(KERN_NOTICE MODNAME ": chip size 0x%x at offset 0x%x\n", | 191 | printk(KERN_NOTICE MODNAME ": chip size 0x%llx at offset 0x%llx\n", |
191 | scb2_mtd->size, SCB2_WINDOW - scb2_mtd->size); | 192 | (unsigned long long)scb2_mtd->size, |
193 | (unsigned long long)(SCB2_WINDOW - scb2_mtd->size)); | ||
192 | 194 | ||
193 | add_mtd_device(scb2_mtd); | 195 | add_mtd_device(scb2_mtd); |
194 | 196 | ||
diff --git a/drivers/mtd/maps/sharpsl-flash.c b/drivers/mtd/maps/sharpsl-flash.c index 026eab028189..b392f096c706 100644 --- a/drivers/mtd/maps/sharpsl-flash.c +++ b/drivers/mtd/maps/sharpsl-flash.c | |||
@@ -47,7 +47,7 @@ static struct mtd_partition sharpsl_partitions[1] = { | |||
47 | } | 47 | } |
48 | }; | 48 | }; |
49 | 49 | ||
50 | int __init init_sharpsl(void) | 50 | static int __init init_sharpsl(void) |
51 | { | 51 | { |
52 | struct mtd_partition *parts; | 52 | struct mtd_partition *parts; |
53 | int nb_parts = 0; | 53 | int nb_parts = 0; |
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c index a5d3d8531faa..60146984f4be 100644 --- a/drivers/mtd/maps/tqm8xxl.c +++ b/drivers/mtd/maps/tqm8xxl.c | |||
@@ -109,7 +109,7 @@ static struct mtd_partition tqm8xxl_fs_partitions[] = { | |||
109 | }; | 109 | }; |
110 | #endif | 110 | #endif |
111 | 111 | ||
112 | int __init init_tqm_mtd(void) | 112 | static int __init init_tqm_mtd(void) |
113 | { | 113 | { |
114 | int idx = 0, ret = 0; | 114 | int idx = 0, ret = 0; |
115 | unsigned long flash_addr, flash_size, mtd_size = 0; | 115 | unsigned long flash_addr, flash_size, mtd_size = 0; |
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c index 0dc645f8152f..81756e397711 100644 --- a/drivers/mtd/maps/uclinux.c +++ b/drivers/mtd/maps/uclinux.c | |||
@@ -51,7 +51,7 @@ int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len, | |||
51 | 51 | ||
52 | /****************************************************************************/ | 52 | /****************************************************************************/ |
53 | 53 | ||
54 | int __init uclinux_mtd_init(void) | 54 | static int __init uclinux_mtd_init(void) |
55 | { | 55 | { |
56 | struct mtd_info *mtd; | 56 | struct mtd_info *mtd; |
57 | struct map_info *mapp; | 57 | struct map_info *mapp; |
@@ -94,7 +94,7 @@ int __init uclinux_mtd_init(void) | |||
94 | 94 | ||
95 | /****************************************************************************/ | 95 | /****************************************************************************/ |
96 | 96 | ||
97 | void __exit uclinux_mtd_cleanup(void) | 97 | static void __exit uclinux_mtd_cleanup(void) |
98 | { | 98 | { |
99 | if (uclinux_ram_mtdinfo) { | 99 | if (uclinux_ram_mtdinfo) { |
100 | del_mtd_partitions(uclinux_ram_mtdinfo); | 100 | del_mtd_partitions(uclinux_ram_mtdinfo); |
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c index 5a0c9a353b0f..6d452dcdfe34 100644 --- a/drivers/mtd/maps/vmax301.c +++ b/drivers/mtd/maps/vmax301.c | |||
@@ -146,7 +146,7 @@ static void __exit cleanup_vmax301(void) | |||
146 | iounmap((void *)vmax_map[0].map_priv_1 - WINDOW_START); | 146 | iounmap((void *)vmax_map[0].map_priv_1 - WINDOW_START); |
147 | } | 147 | } |
148 | 148 | ||
149 | int __init init_vmax301(void) | 149 | static int __init init_vmax301(void) |
150 | { | 150 | { |
151 | int i; | 151 | int i; |
152 | unsigned long iomapadr; | 152 | unsigned long iomapadr; |
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c index 413b0cf9bbd2..933a2b6598b4 100644 --- a/drivers/mtd/maps/wr_sbc82xx_flash.c +++ b/drivers/mtd/maps/wr_sbc82xx_flash.c | |||
@@ -74,7 +74,7 @@ do { \ | |||
74 | } \ | 74 | } \ |
75 | } while (0); | 75 | } while (0); |
76 | 76 | ||
77 | int __init init_sbc82xx_flash(void) | 77 | static int __init init_sbc82xx_flash(void) |
78 | { | 78 | { |
79 | volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl; | 79 | volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl; |
80 | int bigflash; | 80 | int bigflash; |
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index bcffeda2df3d..e9ec59e9a566 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c | |||
@@ -450,16 +450,20 @@ static int mtd_ioctl(struct inode *inode, struct file *file, | |||
450 | if (!erase) | 450 | if (!erase) |
451 | ret = -ENOMEM; | 451 | ret = -ENOMEM; |
452 | else { | 452 | else { |
453 | struct erase_info_user einfo; | ||
454 | |||
453 | wait_queue_head_t waitq; | 455 | wait_queue_head_t waitq; |
454 | DECLARE_WAITQUEUE(wait, current); | 456 | DECLARE_WAITQUEUE(wait, current); |
455 | 457 | ||
456 | init_waitqueue_head(&waitq); | 458 | init_waitqueue_head(&waitq); |
457 | 459 | ||
458 | if (copy_from_user(&erase->addr, argp, | 460 | if (copy_from_user(&einfo, argp, |
459 | sizeof(struct erase_info_user))) { | 461 | sizeof(struct erase_info_user))) { |
460 | kfree(erase); | 462 | kfree(erase); |
461 | return -EFAULT; | 463 | return -EFAULT; |
462 | } | 464 | } |
465 | erase->addr = einfo.start; | ||
466 | erase->len = einfo.length; | ||
463 | erase->mtd = mtd; | 467 | erase->mtd = mtd; |
464 | erase->callback = mtdchar_erase_callback; | 468 | erase->callback = mtdchar_erase_callback; |
465 | erase->priv = (unsigned long)&waitq; | 469 | erase->priv = (unsigned long)&waitq; |
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index 1a05cf37851e..3dbb1b38db66 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c | |||
@@ -197,7 +197,7 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs, | |||
197 | continue; | 197 | continue; |
198 | } | 198 | } |
199 | 199 | ||
200 | size = min(total_len, (size_t)(subdev->size - to)); | 200 | size = min_t(uint64_t, total_len, subdev->size - to); |
201 | wsize = size; /* store for future use */ | 201 | wsize = size; /* store for future use */ |
202 | 202 | ||
203 | entry_high = entry_low; | 203 | entry_high = entry_low; |
@@ -385,7 +385,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
385 | struct mtd_concat *concat = CONCAT(mtd); | 385 | struct mtd_concat *concat = CONCAT(mtd); |
386 | struct mtd_info *subdev; | 386 | struct mtd_info *subdev; |
387 | int i, err; | 387 | int i, err; |
388 | u_int32_t length, offset = 0; | 388 | uint64_t length, offset = 0; |
389 | struct erase_info *erase; | 389 | struct erase_info *erase; |
390 | 390 | ||
391 | if (!(mtd->flags & MTD_WRITEABLE)) | 391 | if (!(mtd->flags & MTD_WRITEABLE)) |
@@ -518,7 +518,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
518 | return 0; | 518 | return 0; |
519 | } | 519 | } |
520 | 520 | ||
521 | static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len) | 521 | static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
522 | { | 522 | { |
523 | struct mtd_concat *concat = CONCAT(mtd); | 523 | struct mtd_concat *concat = CONCAT(mtd); |
524 | int i, err = -EINVAL; | 524 | int i, err = -EINVAL; |
@@ -528,7 +528,7 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len) | |||
528 | 528 | ||
529 | for (i = 0; i < concat->num_subdev; i++) { | 529 | for (i = 0; i < concat->num_subdev; i++) { |
530 | struct mtd_info *subdev = concat->subdev[i]; | 530 | struct mtd_info *subdev = concat->subdev[i]; |
531 | size_t size; | 531 | uint64_t size; |
532 | 532 | ||
533 | if (ofs >= subdev->size) { | 533 | if (ofs >= subdev->size) { |
534 | size = 0; | 534 | size = 0; |
@@ -556,7 +556,7 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len) | |||
556 | return err; | 556 | return err; |
557 | } | 557 | } |
558 | 558 | ||
559 | static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) | 559 | static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
560 | { | 560 | { |
561 | struct mtd_concat *concat = CONCAT(mtd); | 561 | struct mtd_concat *concat = CONCAT(mtd); |
562 | int i, err = 0; | 562 | int i, err = 0; |
@@ -566,7 +566,7 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) | |||
566 | 566 | ||
567 | for (i = 0; i < concat->num_subdev; i++) { | 567 | for (i = 0; i < concat->num_subdev; i++) { |
568 | struct mtd_info *subdev = concat->subdev[i]; | 568 | struct mtd_info *subdev = concat->subdev[i]; |
569 | size_t size; | 569 | uint64_t size; |
570 | 570 | ||
571 | if (ofs >= subdev->size) { | 571 | if (ofs >= subdev->size) { |
572 | size = 0; | 572 | size = 0; |
@@ -696,7 +696,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
696 | int i; | 696 | int i; |
697 | size_t size; | 697 | size_t size; |
698 | struct mtd_concat *concat; | 698 | struct mtd_concat *concat; |
699 | u_int32_t max_erasesize, curr_erasesize; | 699 | uint32_t max_erasesize, curr_erasesize; |
700 | int num_erase_region; | 700 | int num_erase_region; |
701 | 701 | ||
702 | printk(KERN_NOTICE "Concatenating MTD devices:\n"); | 702 | printk(KERN_NOTICE "Concatenating MTD devices:\n"); |
@@ -842,12 +842,14 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
842 | concat->mtd.erasesize = curr_erasesize; | 842 | concat->mtd.erasesize = curr_erasesize; |
843 | concat->mtd.numeraseregions = 0; | 843 | concat->mtd.numeraseregions = 0; |
844 | } else { | 844 | } else { |
845 | uint64_t tmp64; | ||
846 | |||
845 | /* | 847 | /* |
846 | * erase block size varies across the subdevices: allocate | 848 | * erase block size varies across the subdevices: allocate |
847 | * space to store the data describing the variable erase regions | 849 | * space to store the data describing the variable erase regions |
848 | */ | 850 | */ |
849 | struct mtd_erase_region_info *erase_region_p; | 851 | struct mtd_erase_region_info *erase_region_p; |
850 | u_int32_t begin, position; | 852 | uint64_t begin, position; |
851 | 853 | ||
852 | concat->mtd.erasesize = max_erasesize; | 854 | concat->mtd.erasesize = max_erasesize; |
853 | concat->mtd.numeraseregions = num_erase_region; | 855 | concat->mtd.numeraseregions = num_erase_region; |
@@ -879,8 +881,9 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
879 | erase_region_p->offset = begin; | 881 | erase_region_p->offset = begin; |
880 | erase_region_p->erasesize = | 882 | erase_region_p->erasesize = |
881 | curr_erasesize; | 883 | curr_erasesize; |
882 | erase_region_p->numblocks = | 884 | tmp64 = position - begin; |
883 | (position - begin) / curr_erasesize; | 885 | do_div(tmp64, curr_erasesize); |
886 | erase_region_p->numblocks = tmp64; | ||
884 | begin = position; | 887 | begin = position; |
885 | 888 | ||
886 | curr_erasesize = subdev[i]->erasesize; | 889 | curr_erasesize = subdev[i]->erasesize; |
@@ -897,9 +900,9 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
897 | erase_region_p->offset = begin; | 900 | erase_region_p->offset = begin; |
898 | erase_region_p->erasesize = | 901 | erase_region_p->erasesize = |
899 | curr_erasesize; | 902 | curr_erasesize; |
900 | erase_region_p->numblocks = | 903 | tmp64 = position - begin; |
901 | (position - | 904 | do_div(tmp64, curr_erasesize); |
902 | begin) / curr_erasesize; | 905 | erase_region_p->numblocks = tmp64; |
903 | begin = position; | 906 | begin = position; |
904 | 907 | ||
905 | curr_erasesize = | 908 | curr_erasesize = |
@@ -909,14 +912,16 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
909 | } | 912 | } |
910 | position += | 913 | position += |
911 | subdev[i]->eraseregions[j]. | 914 | subdev[i]->eraseregions[j]. |
912 | numblocks * curr_erasesize; | 915 | numblocks * (uint64_t)curr_erasesize; |
913 | } | 916 | } |
914 | } | 917 | } |
915 | } | 918 | } |
916 | /* Now write the final entry */ | 919 | /* Now write the final entry */ |
917 | erase_region_p->offset = begin; | 920 | erase_region_p->offset = begin; |
918 | erase_region_p->erasesize = curr_erasesize; | 921 | erase_region_p->erasesize = curr_erasesize; |
919 | erase_region_p->numblocks = (position - begin) / curr_erasesize; | 922 | tmp64 = position - begin; |
923 | do_div(tmp64, curr_erasesize); | ||
924 | erase_region_p->numblocks = tmp64; | ||
920 | } | 925 | } |
921 | 926 | ||
922 | return &concat->mtd; | 927 | return &concat->mtd; |
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index a9d246949820..76fe0a1e7a5e 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -57,6 +57,19 @@ int add_mtd_device(struct mtd_info *mtd) | |||
57 | mtd->index = i; | 57 | mtd->index = i; |
58 | mtd->usecount = 0; | 58 | mtd->usecount = 0; |
59 | 59 | ||
60 | if (is_power_of_2(mtd->erasesize)) | ||
61 | mtd->erasesize_shift = ffs(mtd->erasesize) - 1; | ||
62 | else | ||
63 | mtd->erasesize_shift = 0; | ||
64 | |||
65 | if (is_power_of_2(mtd->writesize)) | ||
66 | mtd->writesize_shift = ffs(mtd->writesize) - 1; | ||
67 | else | ||
68 | mtd->writesize_shift = 0; | ||
69 | |||
70 | mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; | ||
71 | mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; | ||
72 | |||
60 | /* Some chips always power up locked. Unlock them now */ | 73 | /* Some chips always power up locked. Unlock them now */ |
61 | if ((mtd->flags & MTD_WRITEABLE) | 74 | if ((mtd->flags & MTD_WRITEABLE) |
62 | && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) { | 75 | && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) { |
@@ -344,7 +357,8 @@ static inline int mtd_proc_info (char *buf, int i) | |||
344 | if (!this) | 357 | if (!this) |
345 | return 0; | 358 | return 0; |
346 | 359 | ||
347 | return sprintf(buf, "mtd%d: %8.8x %8.8x \"%s\"\n", i, this->size, | 360 | return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", i, |
361 | (unsigned long long)this->size, | ||
348 | this->erasesize, this->name); | 362 | this->erasesize, this->name); |
349 | } | 363 | } |
350 | 364 | ||
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index aebb3b27edbd..1a6b3beabe8d 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c | |||
@@ -80,9 +80,9 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset) | |||
80 | if (ret) { | 80 | if (ret) { |
81 | set_current_state(TASK_RUNNING); | 81 | set_current_state(TASK_RUNNING); |
82 | remove_wait_queue(&wait_q, &wait); | 82 | remove_wait_queue(&wait_q, &wait); |
83 | printk (KERN_WARNING "mtdoops: erase of region [0x%x, 0x%x] " | 83 | printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] " |
84 | "on \"%s\" failed\n", | 84 | "on \"%s\" failed\n", |
85 | erase.addr, erase.len, mtd->name); | 85 | (unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name); |
86 | return ret; | 86 | return ret; |
87 | } | 87 | } |
88 | 88 | ||
@@ -289,7 +289,10 @@ static void mtdoops_notify_add(struct mtd_info *mtd) | |||
289 | } | 289 | } |
290 | 290 | ||
291 | cxt->mtd = mtd; | 291 | cxt->mtd = mtd; |
292 | cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE; | 292 | if (mtd->size > INT_MAX) |
293 | cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE; | ||
294 | else | ||
295 | cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE; | ||
293 | 296 | ||
294 | find_next_position(cxt); | 297 | find_next_position(cxt); |
295 | 298 | ||
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 3728913fa5fa..144e6b613a77 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c | |||
@@ -26,7 +26,7 @@ static LIST_HEAD(mtd_partitions); | |||
26 | struct mtd_part { | 26 | struct mtd_part { |
27 | struct mtd_info mtd; | 27 | struct mtd_info mtd; |
28 | struct mtd_info *master; | 28 | struct mtd_info *master; |
29 | u_int32_t offset; | 29 | uint64_t offset; |
30 | int index; | 30 | int index; |
31 | struct list_head list; | 31 | struct list_head list; |
32 | int registered; | 32 | int registered; |
@@ -235,7 +235,7 @@ void mtd_erase_callback(struct erase_info *instr) | |||
235 | } | 235 | } |
236 | EXPORT_SYMBOL_GPL(mtd_erase_callback); | 236 | EXPORT_SYMBOL_GPL(mtd_erase_callback); |
237 | 237 | ||
238 | static int part_lock(struct mtd_info *mtd, loff_t ofs, size_t len) | 238 | static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
239 | { | 239 | { |
240 | struct mtd_part *part = PART(mtd); | 240 | struct mtd_part *part = PART(mtd); |
241 | if ((len + ofs) > mtd->size) | 241 | if ((len + ofs) > mtd->size) |
@@ -243,7 +243,7 @@ static int part_lock(struct mtd_info *mtd, loff_t ofs, size_t len) | |||
243 | return part->master->lock(part->master, ofs + part->offset, len); | 243 | return part->master->lock(part->master, ofs + part->offset, len); |
244 | } | 244 | } |
245 | 245 | ||
246 | static int part_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) | 246 | static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
247 | { | 247 | { |
248 | struct mtd_part *part = PART(mtd); | 248 | struct mtd_part *part = PART(mtd); |
249 | if ((len + ofs) > mtd->size) | 249 | if ((len + ofs) > mtd->size) |
@@ -317,7 +317,7 @@ EXPORT_SYMBOL(del_mtd_partitions); | |||
317 | 317 | ||
318 | static struct mtd_part *add_one_partition(struct mtd_info *master, | 318 | static struct mtd_part *add_one_partition(struct mtd_info *master, |
319 | const struct mtd_partition *part, int partno, | 319 | const struct mtd_partition *part, int partno, |
320 | u_int32_t cur_offset) | 320 | uint64_t cur_offset) |
321 | { | 321 | { |
322 | struct mtd_part *slave; | 322 | struct mtd_part *slave; |
323 | 323 | ||
@@ -395,19 +395,19 @@ static struct mtd_part *add_one_partition(struct mtd_info *master, | |||
395 | slave->offset = cur_offset; | 395 | slave->offset = cur_offset; |
396 | if (slave->offset == MTDPART_OFS_NXTBLK) { | 396 | if (slave->offset == MTDPART_OFS_NXTBLK) { |
397 | slave->offset = cur_offset; | 397 | slave->offset = cur_offset; |
398 | if ((cur_offset % master->erasesize) != 0) { | 398 | if (mtd_mod_by_eb(cur_offset, master) != 0) { |
399 | /* Round up to next erasesize */ | 399 | /* Round up to next erasesize */ |
400 | slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize; | 400 | slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; |
401 | printk(KERN_NOTICE "Moving partition %d: " | 401 | printk(KERN_NOTICE "Moving partition %d: " |
402 | "0x%08x -> 0x%08x\n", partno, | 402 | "0x%012llx -> 0x%012llx\n", partno, |
403 | cur_offset, slave->offset); | 403 | (unsigned long long)cur_offset, (unsigned long long)slave->offset); |
404 | } | 404 | } |
405 | } | 405 | } |
406 | if (slave->mtd.size == MTDPART_SIZ_FULL) | 406 | if (slave->mtd.size == MTDPART_SIZ_FULL) |
407 | slave->mtd.size = master->size - slave->offset; | 407 | slave->mtd.size = master->size - slave->offset; |
408 | 408 | ||
409 | printk(KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset, | 409 | printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, |
410 | slave->offset + slave->mtd.size, slave->mtd.name); | 410 | (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name); |
411 | 411 | ||
412 | /* let's do some sanity checks */ | 412 | /* let's do some sanity checks */ |
413 | if (slave->offset >= master->size) { | 413 | if (slave->offset >= master->size) { |
@@ -420,13 +420,13 @@ static struct mtd_part *add_one_partition(struct mtd_info *master, | |||
420 | } | 420 | } |
421 | if (slave->offset + slave->mtd.size > master->size) { | 421 | if (slave->offset + slave->mtd.size > master->size) { |
422 | slave->mtd.size = master->size - slave->offset; | 422 | slave->mtd.size = master->size - slave->offset; |
423 | printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n", | 423 | printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", |
424 | part->name, master->name, slave->mtd.size); | 424 | part->name, master->name, (unsigned long long)slave->mtd.size); |
425 | } | 425 | } |
426 | if (master->numeraseregions > 1) { | 426 | if (master->numeraseregions > 1) { |
427 | /* Deal with variable erase size stuff */ | 427 | /* Deal with variable erase size stuff */ |
428 | int i, max = master->numeraseregions; | 428 | int i, max = master->numeraseregions; |
429 | u32 end = slave->offset + slave->mtd.size; | 429 | u64 end = slave->offset + slave->mtd.size; |
430 | struct mtd_erase_region_info *regions = master->eraseregions; | 430 | struct mtd_erase_region_info *regions = master->eraseregions; |
431 | 431 | ||
432 | /* Find the first erase regions which is part of this | 432 | /* Find the first erase regions which is part of this |
@@ -449,7 +449,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master, | |||
449 | } | 449 | } |
450 | 450 | ||
451 | if ((slave->mtd.flags & MTD_WRITEABLE) && | 451 | if ((slave->mtd.flags & MTD_WRITEABLE) && |
452 | (slave->offset % slave->mtd.erasesize)) { | 452 | mtd_mod_by_eb(slave->offset, &slave->mtd)) { |
453 | /* Doesn't start on a boundary of major erase size */ | 453 | /* Doesn't start on a boundary of major erase size */ |
454 | /* FIXME: Let it be writable if it is on a boundary of | 454 | /* FIXME: Let it be writable if it is on a boundary of |
455 | * _minor_ erase size though */ | 455 | * _minor_ erase size though */ |
@@ -458,7 +458,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master, | |||
458 | part->name); | 458 | part->name); |
459 | } | 459 | } |
460 | if ((slave->mtd.flags & MTD_WRITEABLE) && | 460 | if ((slave->mtd.flags & MTD_WRITEABLE) && |
461 | (slave->mtd.size % slave->mtd.erasesize)) { | 461 | mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) { |
462 | slave->mtd.flags &= ~MTD_WRITEABLE; | 462 | slave->mtd.flags &= ~MTD_WRITEABLE; |
463 | printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", | 463 | printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", |
464 | part->name); | 464 | part->name); |
@@ -466,7 +466,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master, | |||
466 | 466 | ||
467 | slave->mtd.ecclayout = master->ecclayout; | 467 | slave->mtd.ecclayout = master->ecclayout; |
468 | if (master->block_isbad) { | 468 | if (master->block_isbad) { |
469 | uint32_t offs = 0; | 469 | uint64_t offs = 0; |
470 | 470 | ||
471 | while (offs < slave->mtd.size) { | 471 | while (offs < slave->mtd.size) { |
472 | if (master->block_isbad(master, | 472 | if (master->block_isbad(master, |
@@ -501,7 +501,7 @@ int add_mtd_partitions(struct mtd_info *master, | |||
501 | int nbparts) | 501 | int nbparts) |
502 | { | 502 | { |
503 | struct mtd_part *slave; | 503 | struct mtd_part *slave; |
504 | u_int32_t cur_offset = 0; | 504 | uint64_t cur_offset = 0; |
505 | int i; | 505 | int i; |
506 | 506 | ||
507 | printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); | 507 | printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); |
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index f8ae0400c49c..8b12e6e109d3 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -163,6 +163,13 @@ config MTD_NAND_S3C2410_HWECC | |||
163 | incorrect ECC generation, and if using these, the default of | 163 | incorrect ECC generation, and if using these, the default of |
164 | software ECC is preferable. | 164 | software ECC is preferable. |
165 | 165 | ||
166 | config MTD_NAND_NDFC | ||
167 | tristate "NDFC NanD Flash Controller" | ||
168 | depends on 4xx | ||
169 | select MTD_NAND_ECC_SMC | ||
170 | help | ||
171 | NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs | ||
172 | |||
166 | config MTD_NAND_S3C2410_CLKSTOP | 173 | config MTD_NAND_S3C2410_CLKSTOP |
167 | bool "S3C2410 NAND IDLE clock stop" | 174 | bool "S3C2410 NAND IDLE clock stop" |
168 | depends on MTD_NAND_S3C2410 | 175 | depends on MTD_NAND_S3C2410 |
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c index 962380394855..6d9649159a18 100644 --- a/drivers/mtd/nand/alauda.c +++ b/drivers/mtd/nand/alauda.c | |||
@@ -676,11 +676,11 @@ static int alauda_probe(struct usb_interface *interface, | |||
676 | goto error; | 676 | goto error; |
677 | 677 | ||
678 | al->write_out = usb_sndbulkpipe(al->dev, | 678 | al->write_out = usb_sndbulkpipe(al->dev, |
679 | ep_wr->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); | 679 | usb_endpoint_num(ep_wr)); |
680 | al->bulk_in = usb_rcvbulkpipe(al->dev, | 680 | al->bulk_in = usb_rcvbulkpipe(al->dev, |
681 | ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); | 681 | usb_endpoint_num(ep_in)); |
682 | al->bulk_out = usb_sndbulkpipe(al->dev, | 682 | al->bulk_out = usb_sndbulkpipe(al->dev, |
683 | ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); | 683 | usb_endpoint_num(ep_out)); |
684 | 684 | ||
685 | /* second device is identical up to now */ | 685 | /* second device is identical up to now */ |
686 | memcpy(al+1, al, sizeof(*al)); | 686 | memcpy(al+1, al, sizeof(*al)); |
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index b8064bf3aee4..22a6b2e50e91 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c | |||
@@ -90,7 +90,7 @@ static int timing[3]; | |||
90 | module_param_array(timing, int, &numtimings, 0644); | 90 | module_param_array(timing, int, &numtimings, 0644); |
91 | 91 | ||
92 | #ifdef CONFIG_MTD_PARTITIONS | 92 | #ifdef CONFIG_MTD_PARTITIONS |
93 | static const char *part_probes[] = { "RedBoot", NULL }; | 93 | static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL }; |
94 | #endif | 94 | #endif |
95 | 95 | ||
96 | /* Hrm. Why isn't this already conditional on something in the struct device? */ | 96 | /* Hrm. Why isn't this already conditional on something in the struct device? */ |
@@ -805,10 +805,13 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev, | |||
805 | add_mtd_device(mtd); | 805 | add_mtd_device(mtd); |
806 | 806 | ||
807 | #ifdef CONFIG_MTD_PARTITIONS | 807 | #ifdef CONFIG_MTD_PARTITIONS |
808 | #ifdef CONFIG_MTD_CMDLINE_PARTS | ||
809 | mtd->name = "cafe_nand"; | ||
810 | #endif | ||
808 | nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0); | 811 | nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0); |
809 | if (nr_parts > 0) { | 812 | if (nr_parts > 0) { |
810 | cafe->parts = parts; | 813 | cafe->parts = parts; |
811 | dev_info(&cafe->pdev->dev, "%d RedBoot partitions found\n", nr_parts); | 814 | dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts); |
812 | add_mtd_partitions(mtd, parts, nr_parts); | 815 | add_mtd_partitions(mtd, parts, nr_parts); |
813 | } | 816 | } |
814 | #endif | 817 | #endif |
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index 4aa5bd6158da..65929db29446 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c | |||
@@ -777,7 +777,9 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) | |||
777 | /* Fill in fsl_elbc_mtd structure */ | 777 | /* Fill in fsl_elbc_mtd structure */ |
778 | priv->mtd.priv = chip; | 778 | priv->mtd.priv = chip; |
779 | priv->mtd.owner = THIS_MODULE; | 779 | priv->mtd.owner = THIS_MODULE; |
780 | priv->fmr = 0; /* rest filled in later */ | 780 | |
781 | /* Set the ECCM according to the settings in bootloader.*/ | ||
782 | priv->fmr = in_be32(&lbc->fmr) & FMR_ECCM; | ||
781 | 783 | ||
782 | /* fill in nand_chip structure */ | 784 | /* fill in nand_chip structure */ |
783 | /* set up function call table */ | 785 | /* set up function call table */ |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 0a9c9cd33f96..0c3afccde8a2 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -2014,13 +2014,14 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
2014 | int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, | 2014 | int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, |
2015 | int allowbbt) | 2015 | int allowbbt) |
2016 | { | 2016 | { |
2017 | int page, len, status, pages_per_block, ret, chipnr; | 2017 | int page, status, pages_per_block, ret, chipnr; |
2018 | struct nand_chip *chip = mtd->priv; | 2018 | struct nand_chip *chip = mtd->priv; |
2019 | int rewrite_bbt[NAND_MAX_CHIPS]={0}; | 2019 | loff_t rewrite_bbt[NAND_MAX_CHIPS]={0}; |
2020 | unsigned int bbt_masked_page = 0xffffffff; | 2020 | unsigned int bbt_masked_page = 0xffffffff; |
2021 | loff_t len; | ||
2021 | 2022 | ||
2022 | DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%08x, len = %i\n", | 2023 | DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%012llx, len = %llu\n", |
2023 | (unsigned int)instr->addr, (unsigned int)instr->len); | 2024 | (unsigned long long)instr->addr, (unsigned long long)instr->len); |
2024 | 2025 | ||
2025 | /* Start address must align on block boundary */ | 2026 | /* Start address must align on block boundary */ |
2026 | if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) { | 2027 | if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) { |
@@ -2116,7 +2117,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, | |||
2116 | DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " | 2117 | DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " |
2117 | "Failed erase, page 0x%08x\n", page); | 2118 | "Failed erase, page 0x%08x\n", page); |
2118 | instr->state = MTD_ERASE_FAILED; | 2119 | instr->state = MTD_ERASE_FAILED; |
2119 | instr->fail_addr = (page << chip->page_shift); | 2120 | instr->fail_addr = |
2121 | ((loff_t)page << chip->page_shift); | ||
2120 | goto erase_exit; | 2122 | goto erase_exit; |
2121 | } | 2123 | } |
2122 | 2124 | ||
@@ -2126,7 +2128,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, | |||
2126 | */ | 2128 | */ |
2127 | if (bbt_masked_page != 0xffffffff && | 2129 | if (bbt_masked_page != 0xffffffff && |
2128 | (page & BBT_PAGE_MASK) == bbt_masked_page) | 2130 | (page & BBT_PAGE_MASK) == bbt_masked_page) |
2129 | rewrite_bbt[chipnr] = (page << chip->page_shift); | 2131 | rewrite_bbt[chipnr] = |
2132 | ((loff_t)page << chip->page_shift); | ||
2130 | 2133 | ||
2131 | /* Increment page address and decrement length */ | 2134 | /* Increment page address and decrement length */ |
2132 | len -= (1 << chip->phys_erase_shift); | 2135 | len -= (1 << chip->phys_erase_shift); |
@@ -2173,7 +2176,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, | |||
2173 | continue; | 2176 | continue; |
2174 | /* update the BBT for chip */ | 2177 | /* update the BBT for chip */ |
2175 | DEBUG(MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt " | 2178 | DEBUG(MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt " |
2176 | "(%d:0x%0x 0x%0x)\n", chipnr, rewrite_bbt[chipnr], | 2179 | "(%d:0x%0llx 0x%0x)\n", chipnr, rewrite_bbt[chipnr], |
2177 | chip->bbt_td->pages[chipnr]); | 2180 | chip->bbt_td->pages[chipnr]); |
2178 | nand_update_bbt(mtd, rewrite_bbt[chipnr]); | 2181 | nand_update_bbt(mtd, rewrite_bbt[chipnr]); |
2179 | } | 2182 | } |
@@ -2365,7 +2368,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
2365 | if (!mtd->name) | 2368 | if (!mtd->name) |
2366 | mtd->name = type->name; | 2369 | mtd->name = type->name; |
2367 | 2370 | ||
2368 | chip->chipsize = type->chipsize << 20; | 2371 | chip->chipsize = (uint64_t)type->chipsize << 20; |
2369 | 2372 | ||
2370 | /* Newer devices have all the information in additional id bytes */ | 2373 | /* Newer devices have all the information in additional id bytes */ |
2371 | if (!type->pagesize) { | 2374 | if (!type->pagesize) { |
@@ -2423,7 +2426,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
2423 | 2426 | ||
2424 | chip->bbt_erase_shift = chip->phys_erase_shift = | 2427 | chip->bbt_erase_shift = chip->phys_erase_shift = |
2425 | ffs(mtd->erasesize) - 1; | 2428 | ffs(mtd->erasesize) - 1; |
2426 | chip->chip_shift = ffs(chip->chipsize) - 1; | 2429 | if (chip->chipsize & 0xffffffff) |
2430 | chip->chip_shift = ffs((unsigned)chip->chipsize) - 1; | ||
2431 | else | ||
2432 | chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1; | ||
2427 | 2433 | ||
2428 | /* Set the bad block position */ | 2434 | /* Set the bad block position */ |
2429 | chip->badblockpos = mtd->writesize > 512 ? | 2435 | chip->badblockpos = mtd->writesize > 512 ? |
@@ -2517,7 +2523,6 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips) | |||
2517 | /** | 2523 | /** |
2518 | * nand_scan_tail - [NAND Interface] Scan for the NAND device | 2524 | * nand_scan_tail - [NAND Interface] Scan for the NAND device |
2519 | * @mtd: MTD device structure | 2525 | * @mtd: MTD device structure |
2520 | * @maxchips: Number of chips to scan for | ||
2521 | * | 2526 | * |
2522 | * This is the second phase of the normal nand_scan() function. It | 2527 | * This is the second phase of the normal nand_scan() function. It |
2523 | * fills out all the uninitialized function pointers with the defaults | 2528 | * fills out all the uninitialized function pointers with the defaults |
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index 0b1c48595f12..55c23e5cd210 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c | |||
@@ -171,16 +171,16 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num, | |||
171 | if (tmp == msk) | 171 | if (tmp == msk) |
172 | continue; | 172 | continue; |
173 | if (reserved_block_code && (tmp == reserved_block_code)) { | 173 | if (reserved_block_code && (tmp == reserved_block_code)) { |
174 | printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%08x\n", | 174 | printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%012llx\n", |
175 | ((offs << 2) + (act >> 1)) << this->bbt_erase_shift); | 175 | (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); |
176 | this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06); | 176 | this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06); |
177 | mtd->ecc_stats.bbtblocks++; | 177 | mtd->ecc_stats.bbtblocks++; |
178 | continue; | 178 | continue; |
179 | } | 179 | } |
180 | /* Leave it for now, if its matured we can move this | 180 | /* Leave it for now, if its matured we can move this |
181 | * message to MTD_DEBUG_LEVEL0 */ | 181 | * message to MTD_DEBUG_LEVEL0 */ |
182 | printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%08x\n", | 182 | printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%012llx\n", |
183 | ((offs << 2) + (act >> 1)) << this->bbt_erase_shift); | 183 | (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); |
184 | /* Factory marked bad or worn out ? */ | 184 | /* Factory marked bad or worn out ? */ |
185 | if (tmp == 0) | 185 | if (tmp == 0) |
186 | this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06); | 186 | this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06); |
@@ -284,7 +284,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, | |||
284 | 284 | ||
285 | /* Read the primary version, if available */ | 285 | /* Read the primary version, if available */ |
286 | if (td->options & NAND_BBT_VERSION) { | 286 | if (td->options & NAND_BBT_VERSION) { |
287 | scan_read_raw(mtd, buf, td->pages[0] << this->page_shift, | 287 | scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift, |
288 | mtd->writesize); | 288 | mtd->writesize); |
289 | td->version[0] = buf[mtd->writesize + td->veroffs]; | 289 | td->version[0] = buf[mtd->writesize + td->veroffs]; |
290 | printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", | 290 | printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", |
@@ -293,7 +293,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, | |||
293 | 293 | ||
294 | /* Read the mirror version, if available */ | 294 | /* Read the mirror version, if available */ |
295 | if (md && (md->options & NAND_BBT_VERSION)) { | 295 | if (md && (md->options & NAND_BBT_VERSION)) { |
296 | scan_read_raw(mtd, buf, md->pages[0] << this->page_shift, | 296 | scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift, |
297 | mtd->writesize); | 297 | mtd->writesize); |
298 | md->version[0] = buf[mtd->writesize + md->veroffs]; | 298 | md->version[0] = buf[mtd->writesize + md->veroffs]; |
299 | printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", | 299 | printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", |
@@ -411,7 +411,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
411 | numblocks = this->chipsize >> (this->bbt_erase_shift - 1); | 411 | numblocks = this->chipsize >> (this->bbt_erase_shift - 1); |
412 | startblock = chip * numblocks; | 412 | startblock = chip * numblocks; |
413 | numblocks += startblock; | 413 | numblocks += startblock; |
414 | from = startblock << (this->bbt_erase_shift - 1); | 414 | from = (loff_t)startblock << (this->bbt_erase_shift - 1); |
415 | } | 415 | } |
416 | 416 | ||
417 | for (i = startblock; i < numblocks;) { | 417 | for (i = startblock; i < numblocks;) { |
@@ -428,8 +428,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
428 | 428 | ||
429 | if (ret) { | 429 | if (ret) { |
430 | this->bbt[i >> 3] |= 0x03 << (i & 0x6); | 430 | this->bbt[i >> 3] |= 0x03 << (i & 0x6); |
431 | printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n", | 431 | printk(KERN_WARNING "Bad eraseblock %d at 0x%012llx\n", |
432 | i >> 1, (unsigned int)from); | 432 | i >> 1, (unsigned long long)from); |
433 | mtd->ecc_stats.badblocks++; | 433 | mtd->ecc_stats.badblocks++; |
434 | } | 434 | } |
435 | 435 | ||
@@ -495,7 +495,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr | |||
495 | for (block = 0; block < td->maxblocks; block++) { | 495 | for (block = 0; block < td->maxblocks; block++) { |
496 | 496 | ||
497 | int actblock = startblock + dir * block; | 497 | int actblock = startblock + dir * block; |
498 | loff_t offs = actblock << this->bbt_erase_shift; | 498 | loff_t offs = (loff_t)actblock << this->bbt_erase_shift; |
499 | 499 | ||
500 | /* Read first page */ | 500 | /* Read first page */ |
501 | scan_read_raw(mtd, buf, offs, mtd->writesize); | 501 | scan_read_raw(mtd, buf, offs, mtd->writesize); |
@@ -719,7 +719,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
719 | 719 | ||
720 | memset(&einfo, 0, sizeof(einfo)); | 720 | memset(&einfo, 0, sizeof(einfo)); |
721 | einfo.mtd = mtd; | 721 | einfo.mtd = mtd; |
722 | einfo.addr = (unsigned long)to; | 722 | einfo.addr = to; |
723 | einfo.len = 1 << this->bbt_erase_shift; | 723 | einfo.len = 1 << this->bbt_erase_shift; |
724 | res = nand_erase_nand(mtd, &einfo, 1); | 724 | res = nand_erase_nand(mtd, &einfo, 1); |
725 | if (res < 0) | 725 | if (res < 0) |
@@ -729,8 +729,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
729 | if (res < 0) | 729 | if (res < 0) |
730 | goto outerr; | 730 | goto outerr; |
731 | 731 | ||
732 | printk(KERN_DEBUG "Bad block table written to 0x%08x, version " | 732 | printk(KERN_DEBUG "Bad block table written to 0x%012llx, version " |
733 | "0x%02X\n", (unsigned int)to, td->version[chip]); | 733 | "0x%02X\n", (unsigned long long)to, td->version[chip]); |
734 | 734 | ||
735 | /* Mark it as used */ | 735 | /* Mark it as used */ |
736 | td->pages[chip] = page; | 736 | td->pages[chip] = page; |
@@ -910,7 +910,7 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td) | |||
910 | newval = oldval | (0x2 << (block & 0x06)); | 910 | newval = oldval | (0x2 << (block & 0x06)); |
911 | this->bbt[(block >> 3)] = newval; | 911 | this->bbt[(block >> 3)] = newval; |
912 | if ((oldval != newval) && td->reserved_block_code) | 912 | if ((oldval != newval) && td->reserved_block_code) |
913 | nand_update_bbt(mtd, block << (this->bbt_erase_shift - 1)); | 913 | nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1)); |
914 | continue; | 914 | continue; |
915 | } | 915 | } |
916 | update = 0; | 916 | update = 0; |
@@ -931,7 +931,7 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td) | |||
931 | new ones have been marked, then we need to update the stored | 931 | new ones have been marked, then we need to update the stored |
932 | bbts. This should only happen once. */ | 932 | bbts. This should only happen once. */ |
933 | if (update && td->reserved_block_code) | 933 | if (update && td->reserved_block_code) |
934 | nand_update_bbt(mtd, (block - 2) << (this->bbt_erase_shift - 1)); | 934 | nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1)); |
935 | } | 935 | } |
936 | } | 936 | } |
937 | 937 | ||
@@ -1027,7 +1027,6 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs) | |||
1027 | if (!this->bbt || !td) | 1027 | if (!this->bbt || !td) |
1028 | return -EINVAL; | 1028 | return -EINVAL; |
1029 | 1029 | ||
1030 | len = mtd->size >> (this->bbt_erase_shift + 2); | ||
1031 | /* Allocate a temporary buffer for one eraseblock incl. oob */ | 1030 | /* Allocate a temporary buffer for one eraseblock incl. oob */ |
1032 | len = (1 << this->bbt_erase_shift); | 1031 | len = (1 << this->bbt_erase_shift); |
1033 | len += (len >> this->page_shift) * mtd->oobsize; | 1032 | len += (len >> this->page_shift) * mtd->oobsize; |
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index ae7c57781a68..cd0711b83ac4 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c | |||
@@ -38,6 +38,9 @@ | |||
38 | #include <linux/delay.h> | 38 | #include <linux/delay.h> |
39 | #include <linux/list.h> | 39 | #include <linux/list.h> |
40 | #include <linux/random.h> | 40 | #include <linux/random.h> |
41 | #include <linux/sched.h> | ||
42 | #include <linux/fs.h> | ||
43 | #include <linux/pagemap.h> | ||
41 | 44 | ||
42 | /* Default simulator parameters values */ | 45 | /* Default simulator parameters values */ |
43 | #if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \ | 46 | #if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \ |
@@ -100,6 +103,7 @@ static unsigned int bitflips = 0; | |||
100 | static char *gravepages = NULL; | 103 | static char *gravepages = NULL; |
101 | static unsigned int rptwear = 0; | 104 | static unsigned int rptwear = 0; |
102 | static unsigned int overridesize = 0; | 105 | static unsigned int overridesize = 0; |
106 | static char *cache_file = NULL; | ||
103 | 107 | ||
104 | module_param(first_id_byte, uint, 0400); | 108 | module_param(first_id_byte, uint, 0400); |
105 | module_param(second_id_byte, uint, 0400); | 109 | module_param(second_id_byte, uint, 0400); |
@@ -122,12 +126,13 @@ module_param(bitflips, uint, 0400); | |||
122 | module_param(gravepages, charp, 0400); | 126 | module_param(gravepages, charp, 0400); |
123 | module_param(rptwear, uint, 0400); | 127 | module_param(rptwear, uint, 0400); |
124 | module_param(overridesize, uint, 0400); | 128 | module_param(overridesize, uint, 0400); |
129 | module_param(cache_file, charp, 0400); | ||
125 | 130 | ||
126 | MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)"); | 131 | MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)"); |
127 | MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)"); | 132 | MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)"); |
128 | MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command"); | 133 | MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command"); |
129 | MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command"); | 134 | MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command"); |
130 | MODULE_PARM_DESC(access_delay, "Initial page access delay (microiseconds)"); | 135 | MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)"); |
131 | MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds"); | 136 | MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds"); |
132 | MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)"); | 137 | MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)"); |
133 | MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)"); | 138 | MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)"); |
@@ -153,6 +158,7 @@ MODULE_PARM_DESC(rptwear, "Number of erases inbetween reporting wear, if | |||
153 | MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. " | 158 | MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. " |
154 | "The size is specified in erase blocks and as the exponent of a power of two" | 159 | "The size is specified in erase blocks and as the exponent of a power of two" |
155 | " e.g. 5 means a size of 32 erase blocks"); | 160 | " e.g. 5 means a size of 32 erase blocks"); |
161 | MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory"); | ||
156 | 162 | ||
157 | /* The largest possible page size */ | 163 | /* The largest possible page size */ |
158 | #define NS_LARGEST_PAGE_SIZE 2048 | 164 | #define NS_LARGEST_PAGE_SIZE 2048 |
@@ -266,6 +272,9 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I | |||
266 | */ | 272 | */ |
267 | #define NS_MAX_PREVSTATES 1 | 273 | #define NS_MAX_PREVSTATES 1 |
268 | 274 | ||
275 | /* Maximum page cache pages needed to read or write a NAND page to the cache_file */ | ||
276 | #define NS_MAX_HELD_PAGES 16 | ||
277 | |||
269 | /* | 278 | /* |
270 | * A union to represent flash memory contents and flash buffer. | 279 | * A union to represent flash memory contents and flash buffer. |
271 | */ | 280 | */ |
@@ -295,6 +304,9 @@ struct nandsim { | |||
295 | /* The simulated NAND flash pages array */ | 304 | /* The simulated NAND flash pages array */ |
296 | union ns_mem *pages; | 305 | union ns_mem *pages; |
297 | 306 | ||
307 | /* Slab allocator for nand pages */ | ||
308 | struct kmem_cache *nand_pages_slab; | ||
309 | |||
298 | /* Internal buffer of page + OOB size bytes */ | 310 | /* Internal buffer of page + OOB size bytes */ |
299 | union ns_mem buf; | 311 | union ns_mem buf; |
300 | 312 | ||
@@ -335,6 +347,13 @@ struct nandsim { | |||
335 | int ale; /* address Latch Enable */ | 347 | int ale; /* address Latch Enable */ |
336 | int wp; /* write Protect */ | 348 | int wp; /* write Protect */ |
337 | } lines; | 349 | } lines; |
350 | |||
351 | /* Fields needed when using a cache file */ | ||
352 | struct file *cfile; /* Open file */ | ||
353 | unsigned char *pages_written; /* Which pages have been written */ | ||
354 | void *file_buf; | ||
355 | struct page *held_pages[NS_MAX_HELD_PAGES]; | ||
356 | int held_cnt; | ||
338 | }; | 357 | }; |
339 | 358 | ||
340 | /* | 359 | /* |
@@ -420,25 +439,69 @@ static struct mtd_info *nsmtd; | |||
420 | static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE]; | 439 | static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE]; |
421 | 440 | ||
422 | /* | 441 | /* |
423 | * Allocate array of page pointers and initialize the array to NULL | 442 | * Allocate array of page pointers, create slab allocation for an array |
424 | * pointers. | 443 | * and initialize the array by NULL pointers. |
425 | * | 444 | * |
426 | * RETURNS: 0 if success, -ENOMEM if memory alloc fails. | 445 | * RETURNS: 0 if success, -ENOMEM if memory alloc fails. |
427 | */ | 446 | */ |
428 | static int alloc_device(struct nandsim *ns) | 447 | static int alloc_device(struct nandsim *ns) |
429 | { | 448 | { |
430 | int i; | 449 | struct file *cfile; |
450 | int i, err; | ||
451 | |||
452 | if (cache_file) { | ||
453 | cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600); | ||
454 | if (IS_ERR(cfile)) | ||
455 | return PTR_ERR(cfile); | ||
456 | if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) { | ||
457 | NS_ERR("alloc_device: cache file not readable\n"); | ||
458 | err = -EINVAL; | ||
459 | goto err_close; | ||
460 | } | ||
461 | if (!cfile->f_op->write && !cfile->f_op->aio_write) { | ||
462 | NS_ERR("alloc_device: cache file not writeable\n"); | ||
463 | err = -EINVAL; | ||
464 | goto err_close; | ||
465 | } | ||
466 | ns->pages_written = vmalloc(ns->geom.pgnum); | ||
467 | if (!ns->pages_written) { | ||
468 | NS_ERR("alloc_device: unable to allocate pages written array\n"); | ||
469 | err = -ENOMEM; | ||
470 | goto err_close; | ||
471 | } | ||
472 | ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL); | ||
473 | if (!ns->file_buf) { | ||
474 | NS_ERR("alloc_device: unable to allocate file buf\n"); | ||
475 | err = -ENOMEM; | ||
476 | goto err_free; | ||
477 | } | ||
478 | ns->cfile = cfile; | ||
479 | memset(ns->pages_written, 0, ns->geom.pgnum); | ||
480 | return 0; | ||
481 | } | ||
431 | 482 | ||
432 | ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem)); | 483 | ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem)); |
433 | if (!ns->pages) { | 484 | if (!ns->pages) { |
434 | NS_ERR("alloc_map: unable to allocate page array\n"); | 485 | NS_ERR("alloc_device: unable to allocate page array\n"); |
435 | return -ENOMEM; | 486 | return -ENOMEM; |
436 | } | 487 | } |
437 | for (i = 0; i < ns->geom.pgnum; i++) { | 488 | for (i = 0; i < ns->geom.pgnum; i++) { |
438 | ns->pages[i].byte = NULL; | 489 | ns->pages[i].byte = NULL; |
439 | } | 490 | } |
491 | ns->nand_pages_slab = kmem_cache_create("nandsim", | ||
492 | ns->geom.pgszoob, 0, 0, NULL); | ||
493 | if (!ns->nand_pages_slab) { | ||
494 | NS_ERR("cache_create: unable to create kmem_cache\n"); | ||
495 | return -ENOMEM; | ||
496 | } | ||
440 | 497 | ||
441 | return 0; | 498 | return 0; |
499 | |||
500 | err_free: | ||
501 | vfree(ns->pages_written); | ||
502 | err_close: | ||
503 | filp_close(cfile, NULL); | ||
504 | return err; | ||
442 | } | 505 | } |
443 | 506 | ||
444 | /* | 507 | /* |
@@ -448,11 +511,20 @@ static void free_device(struct nandsim *ns) | |||
448 | { | 511 | { |
449 | int i; | 512 | int i; |
450 | 513 | ||
514 | if (ns->cfile) { | ||
515 | kfree(ns->file_buf); | ||
516 | vfree(ns->pages_written); | ||
517 | filp_close(ns->cfile, NULL); | ||
518 | return; | ||
519 | } | ||
520 | |||
451 | if (ns->pages) { | 521 | if (ns->pages) { |
452 | for (i = 0; i < ns->geom.pgnum; i++) { | 522 | for (i = 0; i < ns->geom.pgnum; i++) { |
453 | if (ns->pages[i].byte) | 523 | if (ns->pages[i].byte) |
454 | kfree(ns->pages[i].byte); | 524 | kmem_cache_free(ns->nand_pages_slab, |
525 | ns->pages[i].byte); | ||
455 | } | 526 | } |
527 | kmem_cache_destroy(ns->nand_pages_slab); | ||
456 | vfree(ns->pages); | 528 | vfree(ns->pages); |
457 | } | 529 | } |
458 | } | 530 | } |
@@ -464,7 +536,7 @@ static char *get_partition_name(int i) | |||
464 | return kstrdup(buf, GFP_KERNEL); | 536 | return kstrdup(buf, GFP_KERNEL); |
465 | } | 537 | } |
466 | 538 | ||
467 | static u_int64_t divide(u_int64_t n, u_int32_t d) | 539 | static uint64_t divide(uint64_t n, uint32_t d) |
468 | { | 540 | { |
469 | do_div(n, d); | 541 | do_div(n, d); |
470 | return n; | 542 | return n; |
@@ -480,8 +552,8 @@ static int init_nandsim(struct mtd_info *mtd) | |||
480 | struct nand_chip *chip = (struct nand_chip *)mtd->priv; | 552 | struct nand_chip *chip = (struct nand_chip *)mtd->priv; |
481 | struct nandsim *ns = (struct nandsim *)(chip->priv); | 553 | struct nandsim *ns = (struct nandsim *)(chip->priv); |
482 | int i, ret = 0; | 554 | int i, ret = 0; |
483 | u_int64_t remains; | 555 | uint64_t remains; |
484 | u_int64_t next_offset; | 556 | uint64_t next_offset; |
485 | 557 | ||
486 | if (NS_IS_INITIALIZED(ns)) { | 558 | if (NS_IS_INITIALIZED(ns)) { |
487 | NS_ERR("init_nandsim: nandsim is already initialized\n"); | 559 | NS_ERR("init_nandsim: nandsim is already initialized\n"); |
@@ -548,7 +620,7 @@ static int init_nandsim(struct mtd_info *mtd) | |||
548 | remains = ns->geom.totsz; | 620 | remains = ns->geom.totsz; |
549 | next_offset = 0; | 621 | next_offset = 0; |
550 | for (i = 0; i < parts_num; ++i) { | 622 | for (i = 0; i < parts_num; ++i) { |
551 | u_int64_t part_sz = (u_int64_t)parts[i] * ns->geom.secsz; | 623 | uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz; |
552 | 624 | ||
553 | if (!part_sz || part_sz > remains) { | 625 | if (!part_sz || part_sz > remains) { |
554 | NS_ERR("bad partition size.\n"); | 626 | NS_ERR("bad partition size.\n"); |
@@ -1211,6 +1283,97 @@ static int find_operation(struct nandsim *ns, uint32_t flag) | |||
1211 | return -1; | 1283 | return -1; |
1212 | } | 1284 | } |
1213 | 1285 | ||
1286 | static void put_pages(struct nandsim *ns) | ||
1287 | { | ||
1288 | int i; | ||
1289 | |||
1290 | for (i = 0; i < ns->held_cnt; i++) | ||
1291 | page_cache_release(ns->held_pages[i]); | ||
1292 | } | ||
1293 | |||
1294 | /* Get page cache pages in advance to provide NOFS memory allocation */ | ||
1295 | static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos) | ||
1296 | { | ||
1297 | pgoff_t index, start_index, end_index; | ||
1298 | struct page *page; | ||
1299 | struct address_space *mapping = file->f_mapping; | ||
1300 | |||
1301 | start_index = pos >> PAGE_CACHE_SHIFT; | ||
1302 | end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT; | ||
1303 | if (end_index - start_index + 1 > NS_MAX_HELD_PAGES) | ||
1304 | return -EINVAL; | ||
1305 | ns->held_cnt = 0; | ||
1306 | for (index = start_index; index <= end_index; index++) { | ||
1307 | page = find_get_page(mapping, index); | ||
1308 | if (page == NULL) { | ||
1309 | page = find_or_create_page(mapping, index, GFP_NOFS); | ||
1310 | if (page == NULL) { | ||
1311 | write_inode_now(mapping->host, 1); | ||
1312 | page = find_or_create_page(mapping, index, GFP_NOFS); | ||
1313 | } | ||
1314 | if (page == NULL) { | ||
1315 | put_pages(ns); | ||
1316 | return -ENOMEM; | ||
1317 | } | ||
1318 | unlock_page(page); | ||
1319 | } | ||
1320 | ns->held_pages[ns->held_cnt++] = page; | ||
1321 | } | ||
1322 | return 0; | ||
1323 | } | ||
1324 | |||
1325 | static int set_memalloc(void) | ||
1326 | { | ||
1327 | if (current->flags & PF_MEMALLOC) | ||
1328 | return 0; | ||
1329 | current->flags |= PF_MEMALLOC; | ||
1330 | return 1; | ||
1331 | } | ||
1332 | |||
1333 | static void clear_memalloc(int memalloc) | ||
1334 | { | ||
1335 | if (memalloc) | ||
1336 | current->flags &= ~PF_MEMALLOC; | ||
1337 | } | ||
1338 | |||
1339 | static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos) | ||
1340 | { | ||
1341 | mm_segment_t old_fs; | ||
1342 | ssize_t tx; | ||
1343 | int err, memalloc; | ||
1344 | |||
1345 | err = get_pages(ns, file, count, *pos); | ||
1346 | if (err) | ||
1347 | return err; | ||
1348 | old_fs = get_fs(); | ||
1349 | set_fs(get_ds()); | ||
1350 | memalloc = set_memalloc(); | ||
1351 | tx = vfs_read(file, (char __user *)buf, count, pos); | ||
1352 | clear_memalloc(memalloc); | ||
1353 | set_fs(old_fs); | ||
1354 | put_pages(ns); | ||
1355 | return tx; | ||
1356 | } | ||
1357 | |||
1358 | static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos) | ||
1359 | { | ||
1360 | mm_segment_t old_fs; | ||
1361 | ssize_t tx; | ||
1362 | int err, memalloc; | ||
1363 | |||
1364 | err = get_pages(ns, file, count, *pos); | ||
1365 | if (err) | ||
1366 | return err; | ||
1367 | old_fs = get_fs(); | ||
1368 | set_fs(get_ds()); | ||
1369 | memalloc = set_memalloc(); | ||
1370 | tx = vfs_write(file, (char __user *)buf, count, pos); | ||
1371 | clear_memalloc(memalloc); | ||
1372 | set_fs(old_fs); | ||
1373 | put_pages(ns); | ||
1374 | return tx; | ||
1375 | } | ||
1376 | |||
1214 | /* | 1377 | /* |
1215 | * Returns a pointer to the current page. | 1378 | * Returns a pointer to the current page. |
1216 | */ | 1379 | */ |
@@ -1227,6 +1390,38 @@ static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns) | |||
1227 | return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off; | 1390 | return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off; |
1228 | } | 1391 | } |
1229 | 1392 | ||
1393 | int do_read_error(struct nandsim *ns, int num) | ||
1394 | { | ||
1395 | unsigned int page_no = ns->regs.row; | ||
1396 | |||
1397 | if (read_error(page_no)) { | ||
1398 | int i; | ||
1399 | memset(ns->buf.byte, 0xFF, num); | ||
1400 | for (i = 0; i < num; ++i) | ||
1401 | ns->buf.byte[i] = random32(); | ||
1402 | NS_WARN("simulating read error in page %u\n", page_no); | ||
1403 | return 1; | ||
1404 | } | ||
1405 | return 0; | ||
1406 | } | ||
1407 | |||
1408 | void do_bit_flips(struct nandsim *ns, int num) | ||
1409 | { | ||
1410 | if (bitflips && random32() < (1 << 22)) { | ||
1411 | int flips = 1; | ||
1412 | if (bitflips > 1) | ||
1413 | flips = (random32() % (int) bitflips) + 1; | ||
1414 | while (flips--) { | ||
1415 | int pos = random32() % (num * 8); | ||
1416 | ns->buf.byte[pos / 8] ^= (1 << (pos % 8)); | ||
1417 | NS_WARN("read_page: flipping bit %d in page %d " | ||
1418 | "reading from %d ecc: corrected=%u failed=%u\n", | ||
1419 | pos, ns->regs.row, ns->regs.column + ns->regs.off, | ||
1420 | nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed); | ||
1421 | } | ||
1422 | } | ||
1423 | } | ||
1424 | |||
1230 | /* | 1425 | /* |
1231 | * Fill the NAND buffer with data read from the specified page. | 1426 | * Fill the NAND buffer with data read from the specified page. |
1232 | */ | 1427 | */ |
@@ -1234,36 +1429,40 @@ static void read_page(struct nandsim *ns, int num) | |||
1234 | { | 1429 | { |
1235 | union ns_mem *mypage; | 1430 | union ns_mem *mypage; |
1236 | 1431 | ||
1432 | if (ns->cfile) { | ||
1433 | if (!ns->pages_written[ns->regs.row]) { | ||
1434 | NS_DBG("read_page: page %d not written\n", ns->regs.row); | ||
1435 | memset(ns->buf.byte, 0xFF, num); | ||
1436 | } else { | ||
1437 | loff_t pos; | ||
1438 | ssize_t tx; | ||
1439 | |||
1440 | NS_DBG("read_page: page %d written, reading from %d\n", | ||
1441 | ns->regs.row, ns->regs.column + ns->regs.off); | ||
1442 | if (do_read_error(ns, num)) | ||
1443 | return; | ||
1444 | pos = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off; | ||
1445 | tx = read_file(ns, ns->cfile, ns->buf.byte, num, &pos); | ||
1446 | if (tx != num) { | ||
1447 | NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx); | ||
1448 | return; | ||
1449 | } | ||
1450 | do_bit_flips(ns, num); | ||
1451 | } | ||
1452 | return; | ||
1453 | } | ||
1454 | |||
1237 | mypage = NS_GET_PAGE(ns); | 1455 | mypage = NS_GET_PAGE(ns); |
1238 | if (mypage->byte == NULL) { | 1456 | if (mypage->byte == NULL) { |
1239 | NS_DBG("read_page: page %d not allocated\n", ns->regs.row); | 1457 | NS_DBG("read_page: page %d not allocated\n", ns->regs.row); |
1240 | memset(ns->buf.byte, 0xFF, num); | 1458 | memset(ns->buf.byte, 0xFF, num); |
1241 | } else { | 1459 | } else { |
1242 | unsigned int page_no = ns->regs.row; | ||
1243 | NS_DBG("read_page: page %d allocated, reading from %d\n", | 1460 | NS_DBG("read_page: page %d allocated, reading from %d\n", |
1244 | ns->regs.row, ns->regs.column + ns->regs.off); | 1461 | ns->regs.row, ns->regs.column + ns->regs.off); |
1245 | if (read_error(page_no)) { | 1462 | if (do_read_error(ns, num)) |
1246 | int i; | ||
1247 | memset(ns->buf.byte, 0xFF, num); | ||
1248 | for (i = 0; i < num; ++i) | ||
1249 | ns->buf.byte[i] = random32(); | ||
1250 | NS_WARN("simulating read error in page %u\n", page_no); | ||
1251 | return; | 1463 | return; |
1252 | } | ||
1253 | memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num); | 1464 | memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num); |
1254 | if (bitflips && random32() < (1 << 22)) { | 1465 | do_bit_flips(ns, num); |
1255 | int flips = 1; | ||
1256 | if (bitflips > 1) | ||
1257 | flips = (random32() % (int) bitflips) + 1; | ||
1258 | while (flips--) { | ||
1259 | int pos = random32() % (num * 8); | ||
1260 | ns->buf.byte[pos / 8] ^= (1 << (pos % 8)); | ||
1261 | NS_WARN("read_page: flipping bit %d in page %d " | ||
1262 | "reading from %d ecc: corrected=%u failed=%u\n", | ||
1263 | pos, ns->regs.row, ns->regs.column + ns->regs.off, | ||
1264 | nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed); | ||
1265 | } | ||
1266 | } | ||
1267 | } | 1466 | } |
1268 | } | 1467 | } |
1269 | 1468 | ||
@@ -1275,11 +1474,20 @@ static void erase_sector(struct nandsim *ns) | |||
1275 | union ns_mem *mypage; | 1474 | union ns_mem *mypage; |
1276 | int i; | 1475 | int i; |
1277 | 1476 | ||
1477 | if (ns->cfile) { | ||
1478 | for (i = 0; i < ns->geom.pgsec; i++) | ||
1479 | if (ns->pages_written[ns->regs.row + i]) { | ||
1480 | NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i); | ||
1481 | ns->pages_written[ns->regs.row + i] = 0; | ||
1482 | } | ||
1483 | return; | ||
1484 | } | ||
1485 | |||
1278 | mypage = NS_GET_PAGE(ns); | 1486 | mypage = NS_GET_PAGE(ns); |
1279 | for (i = 0; i < ns->geom.pgsec; i++) { | 1487 | for (i = 0; i < ns->geom.pgsec; i++) { |
1280 | if (mypage->byte != NULL) { | 1488 | if (mypage->byte != NULL) { |
1281 | NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i); | 1489 | NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i); |
1282 | kfree(mypage->byte); | 1490 | kmem_cache_free(ns->nand_pages_slab, mypage->byte); |
1283 | mypage->byte = NULL; | 1491 | mypage->byte = NULL; |
1284 | } | 1492 | } |
1285 | mypage++; | 1493 | mypage++; |
@@ -1295,16 +1503,57 @@ static int prog_page(struct nandsim *ns, int num) | |||
1295 | union ns_mem *mypage; | 1503 | union ns_mem *mypage; |
1296 | u_char *pg_off; | 1504 | u_char *pg_off; |
1297 | 1505 | ||
1506 | if (ns->cfile) { | ||
1507 | loff_t off, pos; | ||
1508 | ssize_t tx; | ||
1509 | int all; | ||
1510 | |||
1511 | NS_DBG("prog_page: writing page %d\n", ns->regs.row); | ||
1512 | pg_off = ns->file_buf + ns->regs.column + ns->regs.off; | ||
1513 | off = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off; | ||
1514 | if (!ns->pages_written[ns->regs.row]) { | ||
1515 | all = 1; | ||
1516 | memset(ns->file_buf, 0xff, ns->geom.pgszoob); | ||
1517 | } else { | ||
1518 | all = 0; | ||
1519 | pos = off; | ||
1520 | tx = read_file(ns, ns->cfile, pg_off, num, &pos); | ||
1521 | if (tx != num) { | ||
1522 | NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx); | ||
1523 | return -1; | ||
1524 | } | ||
1525 | } | ||
1526 | for (i = 0; i < num; i++) | ||
1527 | pg_off[i] &= ns->buf.byte[i]; | ||
1528 | if (all) { | ||
1529 | pos = (loff_t)ns->regs.row * ns->geom.pgszoob; | ||
1530 | tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, &pos); | ||
1531 | if (tx != ns->geom.pgszoob) { | ||
1532 | NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx); | ||
1533 | return -1; | ||
1534 | } | ||
1535 | ns->pages_written[ns->regs.row] = 1; | ||
1536 | } else { | ||
1537 | pos = off; | ||
1538 | tx = write_file(ns, ns->cfile, pg_off, num, &pos); | ||
1539 | if (tx != num) { | ||
1540 | NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx); | ||
1541 | return -1; | ||
1542 | } | ||
1543 | } | ||
1544 | return 0; | ||
1545 | } | ||
1546 | |||
1298 | mypage = NS_GET_PAGE(ns); | 1547 | mypage = NS_GET_PAGE(ns); |
1299 | if (mypage->byte == NULL) { | 1548 | if (mypage->byte == NULL) { |
1300 | NS_DBG("prog_page: allocating page %d\n", ns->regs.row); | 1549 | NS_DBG("prog_page: allocating page %d\n", ns->regs.row); |
1301 | /* | 1550 | /* |
1302 | * We allocate memory with GFP_NOFS because a flash FS may | 1551 | * We allocate memory with GFP_NOFS because a flash FS may |
1303 | * utilize this. If it is holding an FS lock, then gets here, | 1552 | * utilize this. If it is holding an FS lock, then gets here, |
1304 | * then kmalloc runs writeback which goes to the FS again | 1553 | * then kernel memory alloc runs writeback which goes to the FS |
1305 | * and deadlocks. This was seen in practice. | 1554 | * again and deadlocks. This was seen in practice. |
1306 | */ | 1555 | */ |
1307 | mypage->byte = kmalloc(ns->geom.pgszoob, GFP_NOFS); | 1556 | mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS); |
1308 | if (mypage->byte == NULL) { | 1557 | if (mypage->byte == NULL) { |
1309 | NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row); | 1558 | NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row); |
1310 | return -1; | 1559 | return -1; |
@@ -1736,13 +1985,17 @@ static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte) | |||
1736 | 1985 | ||
1737 | /* Check if chip is expecting command */ | 1986 | /* Check if chip is expecting command */ |
1738 | if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) { | 1987 | if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) { |
1739 | /* | 1988 | /* Do not warn if only 2 id bytes are read */ |
1740 | * We are in situation when something else (not command) | 1989 | if (!(ns->regs.command == NAND_CMD_READID && |
1741 | * was expected but command was input. In this case ignore | 1990 | NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) { |
1742 | * previous command(s)/state(s) and accept the last one. | 1991 | /* |
1743 | */ | 1992 | * We are in situation when something else (not command) |
1744 | NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, " | 1993 | * was expected but command was input. In this case ignore |
1745 | "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate)); | 1994 | * previous command(s)/state(s) and accept the last one. |
1995 | */ | ||
1996 | NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, " | ||
1997 | "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate)); | ||
1998 | } | ||
1746 | switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); | 1999 | switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); |
1747 | } | 2000 | } |
1748 | 2001 | ||
@@ -2044,7 +2297,7 @@ static int __init ns_init_module(void) | |||
2044 | } | 2297 | } |
2045 | 2298 | ||
2046 | if (overridesize) { | 2299 | if (overridesize) { |
2047 | u_int64_t new_size = (u_int64_t)nsmtd->erasesize << overridesize; | 2300 | uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize; |
2048 | if (new_size >> overridesize != nsmtd->erasesize) { | 2301 | if (new_size >> overridesize != nsmtd->erasesize) { |
2049 | NS_ERR("overridesize is too big\n"); | 2302 | NS_ERR("overridesize is too big\n"); |
2050 | goto err_exit; | 2303 | goto err_exit; |
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c index 955959eb02d4..582cf80f555a 100644 --- a/drivers/mtd/nand/ndfc.c +++ b/drivers/mtd/nand/ndfc.c | |||
@@ -2,12 +2,20 @@ | |||
2 | * drivers/mtd/ndfc.c | 2 | * drivers/mtd/ndfc.c |
3 | * | 3 | * |
4 | * Overview: | 4 | * Overview: |
5 | * Platform independend driver for NDFC (NanD Flash Controller) | 5 | * Platform independent driver for NDFC (NanD Flash Controller) |
6 | * integrated into EP440 cores | 6 | * integrated into EP440 cores |
7 | * | 7 | * |
8 | * Ported to an OF platform driver by Sean MacLennan | ||
9 | * | ||
10 | * The NDFC supports multiple chips, but this driver only supports a | ||
11 | * single chip since I do not have access to any boards with | ||
12 | * multiple chips. | ||
13 | * | ||
8 | * Author: Thomas Gleixner | 14 | * Author: Thomas Gleixner |
9 | * | 15 | * |
10 | * Copyright 2006 IBM | 16 | * Copyright 2006 IBM |
17 | * Copyright 2008 PIKA Technologies | ||
18 | * Sean MacLennan <smaclennan@pikatech.com> | ||
11 | * | 19 | * |
12 | * This program is free software; you can redistribute it and/or modify it | 20 | * This program is free software; you can redistribute it and/or modify it |
13 | * under the terms of the GNU General Public License as published by the | 21 | * under the terms of the GNU General Public License as published by the |
@@ -21,27 +29,20 @@ | |||
21 | #include <linux/mtd/partitions.h> | 29 | #include <linux/mtd/partitions.h> |
22 | #include <linux/mtd/ndfc.h> | 30 | #include <linux/mtd/ndfc.h> |
23 | #include <linux/mtd/mtd.h> | 31 | #include <linux/mtd/mtd.h> |
24 | #include <linux/platform_device.h> | 32 | #include <linux/of_platform.h> |
25 | |||
26 | #include <asm/io.h> | 33 | #include <asm/io.h> |
27 | #ifdef CONFIG_40x | ||
28 | #include <asm/ibm405.h> | ||
29 | #else | ||
30 | #include <asm/ibm44x.h> | ||
31 | #endif | ||
32 | |||
33 | struct ndfc_nand_mtd { | ||
34 | struct mtd_info mtd; | ||
35 | struct nand_chip chip; | ||
36 | struct platform_nand_chip *pl_chip; | ||
37 | }; | ||
38 | 34 | ||
39 | static struct ndfc_nand_mtd ndfc_mtd[NDFC_MAX_BANKS]; | ||
40 | 35 | ||
41 | struct ndfc_controller { | 36 | struct ndfc_controller { |
42 | void __iomem *ndfcbase; | 37 | struct of_device *ofdev; |
43 | struct nand_hw_control ndfc_control; | 38 | void __iomem *ndfcbase; |
44 | atomic_t childs_active; | 39 | struct mtd_info mtd; |
40 | struct nand_chip chip; | ||
41 | int chip_select; | ||
42 | struct nand_hw_control ndfc_control; | ||
43 | #ifdef CONFIG_MTD_PARTITIONS | ||
44 | struct mtd_partition *parts; | ||
45 | #endif | ||
45 | }; | 46 | }; |
46 | 47 | ||
47 | static struct ndfc_controller ndfc_ctrl; | 48 | static struct ndfc_controller ndfc_ctrl; |
@@ -50,17 +51,14 @@ static void ndfc_select_chip(struct mtd_info *mtd, int chip) | |||
50 | { | 51 | { |
51 | uint32_t ccr; | 52 | uint32_t ccr; |
52 | struct ndfc_controller *ndfc = &ndfc_ctrl; | 53 | struct ndfc_controller *ndfc = &ndfc_ctrl; |
53 | struct nand_chip *nandchip = mtd->priv; | ||
54 | struct ndfc_nand_mtd *nandmtd = nandchip->priv; | ||
55 | struct platform_nand_chip *pchip = nandmtd->pl_chip; | ||
56 | 54 | ||
57 | ccr = __raw_readl(ndfc->ndfcbase + NDFC_CCR); | 55 | ccr = in_be32(ndfc->ndfcbase + NDFC_CCR); |
58 | if (chip >= 0) { | 56 | if (chip >= 0) { |
59 | ccr &= ~NDFC_CCR_BS_MASK; | 57 | ccr &= ~NDFC_CCR_BS_MASK; |
60 | ccr |= NDFC_CCR_BS(chip + pchip->chip_offset); | 58 | ccr |= NDFC_CCR_BS(chip + ndfc->chip_select); |
61 | } else | 59 | } else |
62 | ccr |= NDFC_CCR_RESET_CE; | 60 | ccr |= NDFC_CCR_RESET_CE; |
63 | __raw_writel(ccr, ndfc->ndfcbase + NDFC_CCR); | 61 | out_be32(ndfc->ndfcbase + NDFC_CCR, ccr); |
64 | } | 62 | } |
65 | 63 | ||
66 | static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) | 64 | static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) |
@@ -80,7 +78,7 @@ static int ndfc_ready(struct mtd_info *mtd) | |||
80 | { | 78 | { |
81 | struct ndfc_controller *ndfc = &ndfc_ctrl; | 79 | struct ndfc_controller *ndfc = &ndfc_ctrl; |
82 | 80 | ||
83 | return __raw_readl(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY; | 81 | return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY; |
84 | } | 82 | } |
85 | 83 | ||
86 | static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode) | 84 | static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode) |
@@ -88,9 +86,9 @@ static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode) | |||
88 | uint32_t ccr; | 86 | uint32_t ccr; |
89 | struct ndfc_controller *ndfc = &ndfc_ctrl; | 87 | struct ndfc_controller *ndfc = &ndfc_ctrl; |
90 | 88 | ||
91 | ccr = __raw_readl(ndfc->ndfcbase + NDFC_CCR); | 89 | ccr = in_be32(ndfc->ndfcbase + NDFC_CCR); |
92 | ccr |= NDFC_CCR_RESET_ECC; | 90 | ccr |= NDFC_CCR_RESET_ECC; |
93 | __raw_writel(ccr, ndfc->ndfcbase + NDFC_CCR); | 91 | out_be32(ndfc->ndfcbase + NDFC_CCR, ccr); |
94 | wmb(); | 92 | wmb(); |
95 | } | 93 | } |
96 | 94 | ||
@@ -102,9 +100,10 @@ static int ndfc_calculate_ecc(struct mtd_info *mtd, | |||
102 | uint8_t *p = (uint8_t *)&ecc; | 100 | uint8_t *p = (uint8_t *)&ecc; |
103 | 101 | ||
104 | wmb(); | 102 | wmb(); |
105 | ecc = __raw_readl(ndfc->ndfcbase + NDFC_ECC); | 103 | ecc = in_be32(ndfc->ndfcbase + NDFC_ECC); |
106 | ecc_code[0] = p[1]; | 104 | /* The NDFC uses Smart Media (SMC) bytes order */ |
107 | ecc_code[1] = p[2]; | 105 | ecc_code[0] = p[2]; |
106 | ecc_code[1] = p[1]; | ||
108 | ecc_code[2] = p[3]; | 107 | ecc_code[2] = p[3]; |
109 | 108 | ||
110 | return 0; | 109 | return 0; |
@@ -123,7 +122,7 @@ static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | |||
123 | uint32_t *p = (uint32_t *) buf; | 122 | uint32_t *p = (uint32_t *) buf; |
124 | 123 | ||
125 | for(;len > 0; len -= 4) | 124 | for(;len > 0; len -= 4) |
126 | *p++ = __raw_readl(ndfc->ndfcbase + NDFC_DATA); | 125 | *p++ = in_be32(ndfc->ndfcbase + NDFC_DATA); |
127 | } | 126 | } |
128 | 127 | ||
129 | static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | 128 | static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) |
@@ -132,7 +131,7 @@ static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | |||
132 | uint32_t *p = (uint32_t *) buf; | 131 | uint32_t *p = (uint32_t *) buf; |
133 | 132 | ||
134 | for(;len > 0; len -= 4) | 133 | for(;len > 0; len -= 4) |
135 | __raw_writel(*p++, ndfc->ndfcbase + NDFC_DATA); | 134 | out_be32(ndfc->ndfcbase + NDFC_DATA, *p++); |
136 | } | 135 | } |
137 | 136 | ||
138 | static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | 137 | static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) |
@@ -141,7 +140,7 @@ static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | |||
141 | uint32_t *p = (uint32_t *) buf; | 140 | uint32_t *p = (uint32_t *) buf; |
142 | 141 | ||
143 | for(;len > 0; len -= 4) | 142 | for(;len > 0; len -= 4) |
144 | if (*p++ != __raw_readl(ndfc->ndfcbase + NDFC_DATA)) | 143 | if (*p++ != in_be32(ndfc->ndfcbase + NDFC_DATA)) |
145 | return -EFAULT; | 144 | return -EFAULT; |
146 | return 0; | 145 | return 0; |
147 | } | 146 | } |
@@ -149,10 +148,19 @@ static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | |||
149 | /* | 148 | /* |
150 | * Initialize chip structure | 149 | * Initialize chip structure |
151 | */ | 150 | */ |
152 | static void ndfc_chip_init(struct ndfc_nand_mtd *mtd) | 151 | static int ndfc_chip_init(struct ndfc_controller *ndfc, |
152 | struct device_node *node) | ||
153 | { | 153 | { |
154 | struct ndfc_controller *ndfc = &ndfc_ctrl; | 154 | #ifdef CONFIG_MTD_PARTITIONS |
155 | struct nand_chip *chip = &mtd->chip; | 155 | #ifdef CONFIG_MTD_CMDLINE_PARTS |
156 | static const char *part_types[] = { "cmdlinepart", NULL }; | ||
157 | #else | ||
158 | static const char *part_types[] = { NULL }; | ||
159 | #endif | ||
160 | #endif | ||
161 | struct device_node *flash_np; | ||
162 | struct nand_chip *chip = &ndfc->chip; | ||
163 | int ret; | ||
156 | 164 | ||
157 | chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA; | 165 | chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA; |
158 | chip->IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA; | 166 | chip->IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA; |
@@ -160,8 +168,6 @@ static void ndfc_chip_init(struct ndfc_nand_mtd *mtd) | |||
160 | chip->dev_ready = ndfc_ready; | 168 | chip->dev_ready = ndfc_ready; |
161 | chip->select_chip = ndfc_select_chip; | 169 | chip->select_chip = ndfc_select_chip; |
162 | chip->chip_delay = 50; | 170 | chip->chip_delay = 50; |
163 | chip->priv = mtd; | ||
164 | chip->options = mtd->pl_chip->options; | ||
165 | chip->controller = &ndfc->ndfc_control; | 171 | chip->controller = &ndfc->ndfc_control; |
166 | chip->read_buf = ndfc_read_buf; | 172 | chip->read_buf = ndfc_read_buf; |
167 | chip->write_buf = ndfc_write_buf; | 173 | chip->write_buf = ndfc_write_buf; |
@@ -172,143 +178,136 @@ static void ndfc_chip_init(struct ndfc_nand_mtd *mtd) | |||
172 | chip->ecc.mode = NAND_ECC_HW; | 178 | chip->ecc.mode = NAND_ECC_HW; |
173 | chip->ecc.size = 256; | 179 | chip->ecc.size = 256; |
174 | chip->ecc.bytes = 3; | 180 | chip->ecc.bytes = 3; |
175 | chip->ecclayout = chip->ecc.layout = mtd->pl_chip->ecclayout; | ||
176 | mtd->mtd.priv = chip; | ||
177 | mtd->mtd.owner = THIS_MODULE; | ||
178 | } | ||
179 | |||
180 | static int ndfc_chip_probe(struct platform_device *pdev) | ||
181 | { | ||
182 | struct platform_nand_chip *nc = pdev->dev.platform_data; | ||
183 | struct ndfc_chip_settings *settings = nc->priv; | ||
184 | struct ndfc_controller *ndfc = &ndfc_ctrl; | ||
185 | struct ndfc_nand_mtd *nandmtd; | ||
186 | |||
187 | if (nc->chip_offset >= NDFC_MAX_BANKS || nc->nr_chips > NDFC_MAX_BANKS) | ||
188 | return -EINVAL; | ||
189 | |||
190 | /* Set the bank settings */ | ||
191 | __raw_writel(settings->bank_settings, | ||
192 | ndfc->ndfcbase + NDFC_BCFG0 + (nc->chip_offset << 2)); | ||
193 | 181 | ||
194 | nandmtd = &ndfc_mtd[pdev->id]; | 182 | ndfc->mtd.priv = chip; |
195 | if (nandmtd->pl_chip) | 183 | ndfc->mtd.owner = THIS_MODULE; |
196 | return -EBUSY; | ||
197 | 184 | ||
198 | nandmtd->pl_chip = nc; | 185 | flash_np = of_get_next_child(node, NULL); |
199 | ndfc_chip_init(nandmtd); | 186 | if (!flash_np) |
200 | |||
201 | /* Scan for chips */ | ||
202 | if (nand_scan(&nandmtd->mtd, nc->nr_chips)) { | ||
203 | nandmtd->pl_chip = NULL; | ||
204 | return -ENODEV; | 187 | return -ENODEV; |
188 | |||
189 | ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s", | ||
190 | ndfc->ofdev->dev.bus_id, flash_np->name); | ||
191 | if (!ndfc->mtd.name) { | ||
192 | ret = -ENOMEM; | ||
193 | goto err; | ||
205 | } | 194 | } |
206 | 195 | ||
207 | #ifdef CONFIG_MTD_PARTITIONS | 196 | ret = nand_scan(&ndfc->mtd, 1); |
208 | printk("Number of partitions %d\n", nc->nr_partitions); | 197 | if (ret) |
209 | if (nc->nr_partitions) { | 198 | goto err; |
210 | /* Add the full device, so complete dumps can be made */ | ||
211 | add_mtd_device(&nandmtd->mtd); | ||
212 | add_mtd_partitions(&nandmtd->mtd, nc->partitions, | ||
213 | nc->nr_partitions); | ||
214 | 199 | ||
215 | } else | 200 | #ifdef CONFIG_MTD_PARTITIONS |
216 | #else | 201 | ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0); |
217 | add_mtd_device(&nandmtd->mtd); | 202 | if (ret < 0) |
203 | goto err; | ||
204 | |||
205 | #ifdef CONFIG_MTD_OF_PARTS | ||
206 | if (ret == 0) { | ||
207 | ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np, | ||
208 | &ndfc->parts); | ||
209 | if (ret < 0) | ||
210 | goto err; | ||
211 | } | ||
218 | #endif | 212 | #endif |
219 | 213 | ||
220 | atomic_inc(&ndfc->childs_active); | 214 | if (ret > 0) |
221 | return 0; | 215 | ret = add_mtd_partitions(&ndfc->mtd, ndfc->parts, ret); |
222 | } | 216 | else |
217 | #endif | ||
218 | ret = add_mtd_device(&ndfc->mtd); | ||
223 | 219 | ||
224 | static int ndfc_chip_remove(struct platform_device *pdev) | 220 | err: |
225 | { | 221 | of_node_put(flash_np); |
226 | return 0; | 222 | if (ret) |
223 | kfree(ndfc->mtd.name); | ||
224 | return ret; | ||
227 | } | 225 | } |
228 | 226 | ||
229 | static int ndfc_nand_probe(struct platform_device *pdev) | 227 | static int __devinit ndfc_probe(struct of_device *ofdev, |
228 | const struct of_device_id *match) | ||
230 | { | 229 | { |
231 | struct platform_nand_ctrl *nc = pdev->dev.platform_data; | ||
232 | struct ndfc_controller_settings *settings = nc->priv; | ||
233 | struct resource *res = pdev->resource; | ||
234 | struct ndfc_controller *ndfc = &ndfc_ctrl; | 230 | struct ndfc_controller *ndfc = &ndfc_ctrl; |
235 | unsigned long long phys = settings->ndfc_erpn | res->start; | 231 | const u32 *reg; |
232 | u32 ccr; | ||
233 | int err, len; | ||
236 | 234 | ||
237 | #ifndef CONFIG_PHYS_64BIT | 235 | spin_lock_init(&ndfc->ndfc_control.lock); |
238 | ndfc->ndfcbase = ioremap((phys_addr_t)phys, res->end - res->start + 1); | 236 | init_waitqueue_head(&ndfc->ndfc_control.wq); |
239 | #else | 237 | ndfc->ofdev = ofdev; |
240 | ndfc->ndfcbase = ioremap64(phys, res->end - res->start + 1); | 238 | dev_set_drvdata(&ofdev->dev, ndfc); |
241 | #endif | 239 | |
240 | /* Read the reg property to get the chip select */ | ||
241 | reg = of_get_property(ofdev->node, "reg", &len); | ||
242 | if (reg == NULL || len != 12) { | ||
243 | dev_err(&ofdev->dev, "unable read reg property (%d)\n", len); | ||
244 | return -ENOENT; | ||
245 | } | ||
246 | ndfc->chip_select = reg[0]; | ||
247 | |||
248 | ndfc->ndfcbase = of_iomap(ofdev->node, 0); | ||
242 | if (!ndfc->ndfcbase) { | 249 | if (!ndfc->ndfcbase) { |
243 | printk(KERN_ERR "NDFC: ioremap failed\n"); | 250 | dev_err(&ofdev->dev, "failed to get memory\n"); |
244 | return -EIO; | 251 | return -EIO; |
245 | } | 252 | } |
246 | 253 | ||
247 | __raw_writel(settings->ccr_settings, ndfc->ndfcbase + NDFC_CCR); | 254 | ccr = NDFC_CCR_BS(ndfc->chip_select); |
248 | 255 | ||
249 | spin_lock_init(&ndfc->ndfc_control.lock); | 256 | /* It is ok if ccr does not exist - just default to 0 */ |
250 | init_waitqueue_head(&ndfc->ndfc_control.wq); | 257 | reg = of_get_property(ofdev->node, "ccr", NULL); |
258 | if (reg) | ||
259 | ccr |= *reg; | ||
251 | 260 | ||
252 | platform_set_drvdata(pdev, ndfc); | 261 | out_be32(ndfc->ndfcbase + NDFC_CCR, ccr); |
253 | 262 | ||
254 | printk("NDFC NAND Driver initialized. Chip-Rev: 0x%08x\n", | 263 | /* Set the bank settings if given */ |
255 | __raw_readl(ndfc->ndfcbase + NDFC_REVID)); | 264 | reg = of_get_property(ofdev->node, "bank-settings", NULL); |
265 | if (reg) { | ||
266 | int offset = NDFC_BCFG0 + (ndfc->chip_select << 2); | ||
267 | out_be32(ndfc->ndfcbase + offset, *reg); | ||
268 | } | ||
269 | |||
270 | err = ndfc_chip_init(ndfc, ofdev->node); | ||
271 | if (err) { | ||
272 | iounmap(ndfc->ndfcbase); | ||
273 | return err; | ||
274 | } | ||
256 | 275 | ||
257 | return 0; | 276 | return 0; |
258 | } | 277 | } |
259 | 278 | ||
260 | static int ndfc_nand_remove(struct platform_device *pdev) | 279 | static int __devexit ndfc_remove(struct of_device *ofdev) |
261 | { | 280 | { |
262 | struct ndfc_controller *ndfc = platform_get_drvdata(pdev); | 281 | struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); |
263 | 282 | ||
264 | if (atomic_read(&ndfc->childs_active)) | 283 | nand_release(&ndfc->mtd); |
265 | return -EBUSY; | ||
266 | 284 | ||
267 | if (ndfc) { | ||
268 | platform_set_drvdata(pdev, NULL); | ||
269 | iounmap(ndfc_ctrl.ndfcbase); | ||
270 | ndfc_ctrl.ndfcbase = NULL; | ||
271 | } | ||
272 | return 0; | 285 | return 0; |
273 | } | 286 | } |
274 | 287 | ||
275 | /* driver device registration */ | 288 | static const struct of_device_id ndfc_match[] = { |
276 | 289 | { .compatible = "ibm,ndfc", }, | |
277 | static struct platform_driver ndfc_chip_driver = { | 290 | {} |
278 | .probe = ndfc_chip_probe, | ||
279 | .remove = ndfc_chip_remove, | ||
280 | .driver = { | ||
281 | .name = "ndfc-chip", | ||
282 | .owner = THIS_MODULE, | ||
283 | }, | ||
284 | }; | 291 | }; |
292 | MODULE_DEVICE_TABLE(of, ndfc_match); | ||
285 | 293 | ||
286 | static struct platform_driver ndfc_nand_driver = { | 294 | static struct of_platform_driver ndfc_driver = { |
287 | .probe = ndfc_nand_probe, | 295 | .driver = { |
288 | .remove = ndfc_nand_remove, | 296 | .name = "ndfc", |
289 | .driver = { | ||
290 | .name = "ndfc-nand", | ||
291 | .owner = THIS_MODULE, | ||
292 | }, | 297 | }, |
298 | .match_table = ndfc_match, | ||
299 | .probe = ndfc_probe, | ||
300 | .remove = __devexit_p(ndfc_remove), | ||
293 | }; | 301 | }; |
294 | 302 | ||
295 | static int __init ndfc_nand_init(void) | 303 | static int __init ndfc_nand_init(void) |
296 | { | 304 | { |
297 | int ret; | 305 | return of_register_platform_driver(&ndfc_driver); |
298 | |||
299 | spin_lock_init(&ndfc_ctrl.ndfc_control.lock); | ||
300 | init_waitqueue_head(&ndfc_ctrl.ndfc_control.wq); | ||
301 | |||
302 | ret = platform_driver_register(&ndfc_nand_driver); | ||
303 | if (!ret) | ||
304 | ret = platform_driver_register(&ndfc_chip_driver); | ||
305 | return ret; | ||
306 | } | 306 | } |
307 | 307 | ||
308 | static void __exit ndfc_nand_exit(void) | 308 | static void __exit ndfc_nand_exit(void) |
309 | { | 309 | { |
310 | platform_driver_unregister(&ndfc_chip_driver); | 310 | of_unregister_platform_driver(&ndfc_driver); |
311 | platform_driver_unregister(&ndfc_nand_driver); | ||
312 | } | 311 | } |
313 | 312 | ||
314 | module_init(ndfc_nand_init); | 313 | module_init(ndfc_nand_init); |
@@ -316,6 +315,4 @@ module_exit(ndfc_nand_exit); | |||
316 | 315 | ||
317 | MODULE_LICENSE("GPL"); | 316 | MODULE_LICENSE("GPL"); |
318 | MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); | 317 | MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); |
319 | MODULE_DESCRIPTION("Platform driver for NDFC"); | 318 | MODULE_DESCRIPTION("OF Platform driver for NDFC"); |
320 | MODULE_ALIAS("platform:ndfc-chip"); | ||
321 | MODULE_ALIAS("platform:ndfc-nand"); | ||
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index fc4144495610..cc55cbc2b308 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -298,7 +298,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = { | |||
298 | #define NDTR1_tAR(c) (min((c), 15) << 0) | 298 | #define NDTR1_tAR(c) (min((c), 15) << 0) |
299 | 299 | ||
300 | /* convert nano-seconds to nand flash controller clock cycles */ | 300 | /* convert nano-seconds to nand flash controller clock cycles */ |
301 | #define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) + 1) | 301 | #define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1) |
302 | 302 | ||
303 | static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info, | 303 | static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info, |
304 | const struct pxa3xx_nand_timing *t) | 304 | const struct pxa3xx_nand_timing *t) |
@@ -368,14 +368,14 @@ static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info, | |||
368 | /* large block, 2 cycles for column address | 368 | /* large block, 2 cycles for column address |
369 | * row address starts from 3rd cycle | 369 | * row address starts from 3rd cycle |
370 | */ | 370 | */ |
371 | info->ndcb1 |= (page_addr << 16) | (column & 0xffff); | 371 | info->ndcb1 |= page_addr << 16; |
372 | if (info->row_addr_cycles == 3) | 372 | if (info->row_addr_cycles == 3) |
373 | info->ndcb2 = (page_addr >> 16) & 0xff; | 373 | info->ndcb2 = (page_addr >> 16) & 0xff; |
374 | } else | 374 | } else |
375 | /* small block, 1 cycles for column address | 375 | /* small block, 1 cycles for column address |
376 | * row address starts from 2nd cycle | 376 | * row address starts from 2nd cycle |
377 | */ | 377 | */ |
378 | info->ndcb1 = (page_addr << 8) | (column & 0xff); | 378 | info->ndcb1 = page_addr << 8; |
379 | 379 | ||
380 | if (cmd == cmdset->program) | 380 | if (cmd == cmdset->program) |
381 | info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS; | 381 | info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS; |
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c index 30a518e211bd..54ec7542a7b7 100644 --- a/drivers/mtd/nand/sharpsl.c +++ b/drivers/mtd/nand/sharpsl.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * drivers/mtd/nand/sharpsl.c | 2 | * drivers/mtd/nand/sharpsl.c |
3 | * | 3 | * |
4 | * Copyright (C) 2004 Richard Purdie | 4 | * Copyright (C) 2004 Richard Purdie |
5 | * Copyright (C) 2008 Dmitry Baryshkov | ||
5 | * | 6 | * |
6 | * Based on Sharp's NAND driver sharp_sl.c | 7 | * Based on Sharp's NAND driver sharp_sl.c |
7 | * | 8 | * |
@@ -19,22 +20,31 @@ | |||
19 | #include <linux/mtd/nand.h> | 20 | #include <linux/mtd/nand.h> |
20 | #include <linux/mtd/nand_ecc.h> | 21 | #include <linux/mtd/nand_ecc.h> |
21 | #include <linux/mtd/partitions.h> | 22 | #include <linux/mtd/partitions.h> |
23 | #include <linux/mtd/sharpsl.h> | ||
22 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/platform_device.h> | ||
26 | |||
23 | #include <asm/io.h> | 27 | #include <asm/io.h> |
24 | #include <mach/hardware.h> | 28 | #include <mach/hardware.h> |
25 | #include <asm/mach-types.h> | 29 | #include <asm/mach-types.h> |
26 | 30 | ||
27 | static void __iomem *sharpsl_io_base; | 31 | struct sharpsl_nand { |
28 | static int sharpsl_phys_base = 0x0C000000; | 32 | struct mtd_info mtd; |
33 | struct nand_chip chip; | ||
34 | |||
35 | void __iomem *io; | ||
36 | }; | ||
37 | |||
38 | #define mtd_to_sharpsl(_mtd) container_of(_mtd, struct sharpsl_nand, mtd) | ||
29 | 39 | ||
30 | /* register offset */ | 40 | /* register offset */ |
31 | #define ECCLPLB sharpsl_io_base+0x00 /* line parity 7 - 0 bit */ | 41 | #define ECCLPLB 0x00 /* line parity 7 - 0 bit */ |
32 | #define ECCLPUB sharpsl_io_base+0x04 /* line parity 15 - 8 bit */ | 42 | #define ECCLPUB 0x04 /* line parity 15 - 8 bit */ |
33 | #define ECCCP sharpsl_io_base+0x08 /* column parity 5 - 0 bit */ | 43 | #define ECCCP 0x08 /* column parity 5 - 0 bit */ |
34 | #define ECCCNTR sharpsl_io_base+0x0C /* ECC byte counter */ | 44 | #define ECCCNTR 0x0C /* ECC byte counter */ |
35 | #define ECCCLRR sharpsl_io_base+0x10 /* cleare ECC */ | 45 | #define ECCCLRR 0x10 /* cleare ECC */ |
36 | #define FLASHIO sharpsl_io_base+0x14 /* Flash I/O */ | 46 | #define FLASHIO 0x14 /* Flash I/O */ |
37 | #define FLASHCTL sharpsl_io_base+0x18 /* Flash Control */ | 47 | #define FLASHCTL 0x18 /* Flash Control */ |
38 | 48 | ||
39 | /* Flash control bit */ | 49 | /* Flash control bit */ |
40 | #define FLRYBY (1 << 5) | 50 | #define FLRYBY (1 << 5) |
@@ -45,35 +55,6 @@ static int sharpsl_phys_base = 0x0C000000; | |||
45 | #define FLCE0 (1 << 0) | 55 | #define FLCE0 (1 << 0) |
46 | 56 | ||
47 | /* | 57 | /* |
48 | * MTD structure for SharpSL | ||
49 | */ | ||
50 | static struct mtd_info *sharpsl_mtd = NULL; | ||
51 | |||
52 | /* | ||
53 | * Define partitions for flash device | ||
54 | */ | ||
55 | #define DEFAULT_NUM_PARTITIONS 3 | ||
56 | |||
57 | static int nr_partitions; | ||
58 | static struct mtd_partition sharpsl_nand_default_partition_info[] = { | ||
59 | { | ||
60 | .name = "System Area", | ||
61 | .offset = 0, | ||
62 | .size = 7 * 1024 * 1024, | ||
63 | }, | ||
64 | { | ||
65 | .name = "Root Filesystem", | ||
66 | .offset = 7 * 1024 * 1024, | ||
67 | .size = 30 * 1024 * 1024, | ||
68 | }, | ||
69 | { | ||
70 | .name = "Home Filesystem", | ||
71 | .offset = MTDPART_OFS_APPEND, | ||
72 | .size = MTDPART_SIZ_FULL, | ||
73 | }, | ||
74 | }; | ||
75 | |||
76 | /* | ||
77 | * hardware specific access to control-lines | 58 | * hardware specific access to control-lines |
78 | * ctrl: | 59 | * ctrl: |
79 | * NAND_CNE: bit 0 -> ! bit 0 & 4 | 60 | * NAND_CNE: bit 0 -> ! bit 0 & 4 |
@@ -84,6 +65,7 @@ static struct mtd_partition sharpsl_nand_default_partition_info[] = { | |||
84 | static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd, | 65 | static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd, |
85 | unsigned int ctrl) | 66 | unsigned int ctrl) |
86 | { | 67 | { |
68 | struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd); | ||
87 | struct nand_chip *chip = mtd->priv; | 69 | struct nand_chip *chip = mtd->priv; |
88 | 70 | ||
89 | if (ctrl & NAND_CTRL_CHANGE) { | 71 | if (ctrl & NAND_CTRL_CHANGE) { |
@@ -93,103 +75,97 @@ static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd, | |||
93 | 75 | ||
94 | bits ^= 0x11; | 76 | bits ^= 0x11; |
95 | 77 | ||
96 | writeb((readb(FLASHCTL) & ~0x17) | bits, FLASHCTL); | 78 | writeb((readb(sharpsl->io + FLASHCTL) & ~0x17) | bits, sharpsl->io + FLASHCTL); |
97 | } | 79 | } |
98 | 80 | ||
99 | if (cmd != NAND_CMD_NONE) | 81 | if (cmd != NAND_CMD_NONE) |
100 | writeb(cmd, chip->IO_ADDR_W); | 82 | writeb(cmd, chip->IO_ADDR_W); |
101 | } | 83 | } |
102 | 84 | ||
103 | static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; | ||
104 | |||
105 | static struct nand_bbt_descr sharpsl_bbt = { | ||
106 | .options = 0, | ||
107 | .offs = 4, | ||
108 | .len = 2, | ||
109 | .pattern = scan_ff_pattern | ||
110 | }; | ||
111 | |||
112 | static struct nand_bbt_descr sharpsl_akita_bbt = { | ||
113 | .options = 0, | ||
114 | .offs = 4, | ||
115 | .len = 1, | ||
116 | .pattern = scan_ff_pattern | ||
117 | }; | ||
118 | |||
119 | static struct nand_ecclayout akita_oobinfo = { | ||
120 | .eccbytes = 24, | ||
121 | .eccpos = { | ||
122 | 0x5, 0x1, 0x2, 0x3, 0x6, 0x7, 0x15, 0x11, | ||
123 | 0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23, | ||
124 | 0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37}, | ||
125 | .oobfree = {{0x08, 0x09}} | ||
126 | }; | ||
127 | |||
128 | static int sharpsl_nand_dev_ready(struct mtd_info *mtd) | 85 | static int sharpsl_nand_dev_ready(struct mtd_info *mtd) |
129 | { | 86 | { |
130 | return !((readb(FLASHCTL) & FLRYBY) == 0); | 87 | struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd); |
88 | return !((readb(sharpsl->io + FLASHCTL) & FLRYBY) == 0); | ||
131 | } | 89 | } |
132 | 90 | ||
133 | static void sharpsl_nand_enable_hwecc(struct mtd_info *mtd, int mode) | 91 | static void sharpsl_nand_enable_hwecc(struct mtd_info *mtd, int mode) |
134 | { | 92 | { |
135 | writeb(0, ECCCLRR); | 93 | struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd); |
94 | writeb(0, sharpsl->io + ECCCLRR); | ||
136 | } | 95 | } |
137 | 96 | ||
138 | static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat, u_char * ecc_code) | 97 | static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat, u_char * ecc_code) |
139 | { | 98 | { |
140 | ecc_code[0] = ~readb(ECCLPUB); | 99 | struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd); |
141 | ecc_code[1] = ~readb(ECCLPLB); | 100 | ecc_code[0] = ~readb(sharpsl->io + ECCLPUB); |
142 | ecc_code[2] = (~readb(ECCCP) << 2) | 0x03; | 101 | ecc_code[1] = ~readb(sharpsl->io + ECCLPLB); |
143 | return readb(ECCCNTR) != 0; | 102 | ecc_code[2] = (~readb(sharpsl->io + ECCCP) << 2) | 0x03; |
103 | return readb(sharpsl->io + ECCCNTR) != 0; | ||
144 | } | 104 | } |
145 | 105 | ||
146 | #ifdef CONFIG_MTD_PARTITIONS | 106 | #ifdef CONFIG_MTD_PARTITIONS |
147 | const char *part_probes[] = { "cmdlinepart", NULL }; | 107 | static const char *part_probes[] = { "cmdlinepart", NULL }; |
148 | #endif | 108 | #endif |
149 | 109 | ||
150 | /* | 110 | /* |
151 | * Main initialization routine | 111 | * Main initialization routine |
152 | */ | 112 | */ |
153 | static int __init sharpsl_nand_init(void) | 113 | static int __devinit sharpsl_nand_probe(struct platform_device *pdev) |
154 | { | 114 | { |
155 | struct nand_chip *this; | 115 | struct nand_chip *this; |
116 | #ifdef CONFIG_MTD_PARTITIONS | ||
156 | struct mtd_partition *sharpsl_partition_info; | 117 | struct mtd_partition *sharpsl_partition_info; |
118 | int nr_partitions; | ||
119 | #endif | ||
120 | struct resource *r; | ||
157 | int err = 0; | 121 | int err = 0; |
122 | struct sharpsl_nand *sharpsl; | ||
123 | struct sharpsl_nand_platform_data *data = pdev->dev.platform_data; | ||
124 | |||
125 | if (!data) { | ||
126 | dev_err(&pdev->dev, "no platform data!\n"); | ||
127 | return -EINVAL; | ||
128 | } | ||
158 | 129 | ||
159 | /* Allocate memory for MTD device structure and private data */ | 130 | /* Allocate memory for MTD device structure and private data */ |
160 | sharpsl_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); | 131 | sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL); |
161 | if (!sharpsl_mtd) { | 132 | if (!sharpsl) { |
162 | printk("Unable to allocate SharpSL NAND MTD device structure.\n"); | 133 | printk("Unable to allocate SharpSL NAND MTD device structure.\n"); |
163 | return -ENOMEM; | 134 | return -ENOMEM; |
164 | } | 135 | } |
165 | 136 | ||
137 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
138 | if (!r) { | ||
139 | dev_err(&pdev->dev, "no io memory resource defined!\n"); | ||
140 | err = -ENODEV; | ||
141 | goto err_get_res; | ||
142 | } | ||
143 | |||
166 | /* map physical address */ | 144 | /* map physical address */ |
167 | sharpsl_io_base = ioremap(sharpsl_phys_base, 0x1000); | 145 | sharpsl->io = ioremap(r->start, resource_size(r)); |
168 | if (!sharpsl_io_base) { | 146 | if (!sharpsl->io) { |
169 | printk("ioremap to access Sharp SL NAND chip failed\n"); | 147 | printk("ioremap to access Sharp SL NAND chip failed\n"); |
170 | kfree(sharpsl_mtd); | 148 | err = -EIO; |
171 | return -EIO; | 149 | goto err_ioremap; |
172 | } | 150 | } |
173 | 151 | ||
174 | /* Get pointer to private data */ | 152 | /* Get pointer to private data */ |
175 | this = (struct nand_chip *)(&sharpsl_mtd[1]); | 153 | this = (struct nand_chip *)(&sharpsl->chip); |
176 | |||
177 | /* Initialize structures */ | ||
178 | memset(sharpsl_mtd, 0, sizeof(struct mtd_info)); | ||
179 | memset(this, 0, sizeof(struct nand_chip)); | ||
180 | 154 | ||
181 | /* Link the private data with the MTD structure */ | 155 | /* Link the private data with the MTD structure */ |
182 | sharpsl_mtd->priv = this; | 156 | sharpsl->mtd.priv = this; |
183 | sharpsl_mtd->owner = THIS_MODULE; | 157 | sharpsl->mtd.owner = THIS_MODULE; |
158 | |||
159 | platform_set_drvdata(pdev, sharpsl); | ||
184 | 160 | ||
185 | /* | 161 | /* |
186 | * PXA initialize | 162 | * PXA initialize |
187 | */ | 163 | */ |
188 | writeb(readb(FLASHCTL) | FLWP, FLASHCTL); | 164 | writeb(readb(sharpsl->io + FLASHCTL) | FLWP, sharpsl->io + FLASHCTL); |
189 | 165 | ||
190 | /* Set address of NAND IO lines */ | 166 | /* Set address of NAND IO lines */ |
191 | this->IO_ADDR_R = FLASHIO; | 167 | this->IO_ADDR_R = sharpsl->io + FLASHIO; |
192 | this->IO_ADDR_W = FLASHIO; | 168 | this->IO_ADDR_W = sharpsl->io + FLASHIO; |
193 | /* Set address of hardware control function */ | 169 | /* Set address of hardware control function */ |
194 | this->cmd_ctrl = sharpsl_nand_hwcontrol; | 170 | this->cmd_ctrl = sharpsl_nand_hwcontrol; |
195 | this->dev_ready = sharpsl_nand_dev_ready; | 171 | this->dev_ready = sharpsl_nand_dev_ready; |
@@ -199,68 +175,89 @@ static int __init sharpsl_nand_init(void) | |||
199 | this->ecc.mode = NAND_ECC_HW; | 175 | this->ecc.mode = NAND_ECC_HW; |
200 | this->ecc.size = 256; | 176 | this->ecc.size = 256; |
201 | this->ecc.bytes = 3; | 177 | this->ecc.bytes = 3; |
202 | this->badblock_pattern = &sharpsl_bbt; | 178 | this->badblock_pattern = data->badblock_pattern; |
203 | if (machine_is_akita() || machine_is_borzoi()) { | 179 | this->ecc.layout = data->ecc_layout; |
204 | this->badblock_pattern = &sharpsl_akita_bbt; | ||
205 | this->ecc.layout = &akita_oobinfo; | ||
206 | } | ||
207 | this->ecc.hwctl = sharpsl_nand_enable_hwecc; | 180 | this->ecc.hwctl = sharpsl_nand_enable_hwecc; |
208 | this->ecc.calculate = sharpsl_nand_calculate_ecc; | 181 | this->ecc.calculate = sharpsl_nand_calculate_ecc; |
209 | this->ecc.correct = nand_correct_data; | 182 | this->ecc.correct = nand_correct_data; |
210 | 183 | ||
211 | /* Scan to find existence of the device */ | 184 | /* Scan to find existence of the device */ |
212 | err = nand_scan(sharpsl_mtd, 1); | 185 | err = nand_scan(&sharpsl->mtd, 1); |
213 | if (err) { | 186 | if (err) |
214 | iounmap(sharpsl_io_base); | 187 | goto err_scan; |
215 | kfree(sharpsl_mtd); | ||
216 | return err; | ||
217 | } | ||
218 | 188 | ||
219 | /* Register the partitions */ | 189 | /* Register the partitions */ |
220 | sharpsl_mtd->name = "sharpsl-nand"; | 190 | sharpsl->mtd.name = "sharpsl-nand"; |
221 | nr_partitions = parse_mtd_partitions(sharpsl_mtd, part_probes, &sharpsl_partition_info, 0); | 191 | #ifdef CONFIG_MTD_PARTITIONS |
222 | 192 | nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0); | |
223 | if (nr_partitions <= 0) { | 193 | if (nr_partitions <= 0) { |
224 | nr_partitions = DEFAULT_NUM_PARTITIONS; | 194 | nr_partitions = data->nr_partitions; |
225 | sharpsl_partition_info = sharpsl_nand_default_partition_info; | 195 | sharpsl_partition_info = data->partitions; |
226 | if (machine_is_poodle()) { | ||
227 | sharpsl_partition_info[1].size = 22 * 1024 * 1024; | ||
228 | } else if (machine_is_corgi() || machine_is_shepherd()) { | ||
229 | sharpsl_partition_info[1].size = 25 * 1024 * 1024; | ||
230 | } else if (machine_is_husky()) { | ||
231 | sharpsl_partition_info[1].size = 53 * 1024 * 1024; | ||
232 | } else if (machine_is_spitz()) { | ||
233 | sharpsl_partition_info[1].size = 5 * 1024 * 1024; | ||
234 | } else if (machine_is_akita()) { | ||
235 | sharpsl_partition_info[1].size = 58 * 1024 * 1024; | ||
236 | } else if (machine_is_borzoi()) { | ||
237 | sharpsl_partition_info[1].size = 32 * 1024 * 1024; | ||
238 | } | ||
239 | } | 196 | } |
240 | 197 | ||
241 | add_mtd_partitions(sharpsl_mtd, sharpsl_partition_info, nr_partitions); | 198 | if (nr_partitions > 0) |
199 | err = add_mtd_partitions(&sharpsl->mtd, sharpsl_partition_info, nr_partitions); | ||
200 | else | ||
201 | #endif | ||
202 | err = add_mtd_device(&sharpsl->mtd); | ||
203 | if (err) | ||
204 | goto err_add; | ||
242 | 205 | ||
243 | /* Return happy */ | 206 | /* Return happy */ |
244 | return 0; | 207 | return 0; |
245 | } | ||
246 | 208 | ||
247 | module_init(sharpsl_nand_init); | 209 | err_add: |
210 | nand_release(&sharpsl->mtd); | ||
211 | |||
212 | err_scan: | ||
213 | platform_set_drvdata(pdev, NULL); | ||
214 | iounmap(sharpsl->io); | ||
215 | err_ioremap: | ||
216 | err_get_res: | ||
217 | kfree(sharpsl); | ||
218 | return err; | ||
219 | } | ||
248 | 220 | ||
249 | /* | 221 | /* |
250 | * Clean up routine | 222 | * Clean up routine |
251 | */ | 223 | */ |
252 | static void __exit sharpsl_nand_cleanup(void) | 224 | static int __devexit sharpsl_nand_remove(struct platform_device *pdev) |
253 | { | 225 | { |
226 | struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev); | ||
227 | |||
254 | /* Release resources, unregister device */ | 228 | /* Release resources, unregister device */ |
255 | nand_release(sharpsl_mtd); | 229 | nand_release(&sharpsl->mtd); |
256 | 230 | ||
257 | iounmap(sharpsl_io_base); | 231 | platform_set_drvdata(pdev, NULL); |
232 | |||
233 | iounmap(sharpsl->io); | ||
258 | 234 | ||
259 | /* Free the MTD device structure */ | 235 | /* Free the MTD device structure */ |
260 | kfree(sharpsl_mtd); | 236 | kfree(sharpsl); |
237 | |||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | static struct platform_driver sharpsl_nand_driver = { | ||
242 | .driver = { | ||
243 | .name = "sharpsl-nand", | ||
244 | .owner = THIS_MODULE, | ||
245 | }, | ||
246 | .probe = sharpsl_nand_probe, | ||
247 | .remove = __devexit_p(sharpsl_nand_remove), | ||
248 | }; | ||
249 | |||
250 | static int __init sharpsl_nand_init(void) | ||
251 | { | ||
252 | return platform_driver_register(&sharpsl_nand_driver); | ||
261 | } | 253 | } |
254 | module_init(sharpsl_nand_init); | ||
262 | 255 | ||
263 | module_exit(sharpsl_nand_cleanup); | 256 | static void __exit sharpsl_nand_exit(void) |
257 | { | ||
258 | platform_driver_unregister(&sharpsl_nand_driver); | ||
259 | } | ||
260 | module_exit(sharpsl_nand_exit); | ||
264 | 261 | ||
265 | MODULE_LICENSE("GPL"); | 262 | MODULE_LICENSE("GPL"); |
266 | MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>"); | 263 | MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>"); |
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c index 320b929abe79..d1c4546513f7 100644 --- a/drivers/mtd/nftlcore.c +++ b/drivers/mtd/nftlcore.c | |||
@@ -39,7 +39,7 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
39 | struct NFTLrecord *nftl; | 39 | struct NFTLrecord *nftl; |
40 | unsigned long temp; | 40 | unsigned long temp; |
41 | 41 | ||
42 | if (mtd->type != MTD_NANDFLASH) | 42 | if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX) |
43 | return; | 43 | return; |
44 | /* OK, this is moderately ugly. But probably safe. Alternatives? */ | 44 | /* OK, this is moderately ugly. But probably safe. Alternatives? */ |
45 | if (memcmp(mtd->name, "DiskOnChip", 10)) | 45 | if (memcmp(mtd->name, "DiskOnChip", 10)) |
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c index ccc4f209fbb5..8b22b1836e9f 100644 --- a/drivers/mtd/nftlmount.c +++ b/drivers/mtd/nftlmount.c | |||
@@ -51,7 +51,7 @@ static int find_boot_record(struct NFTLrecord *nftl) | |||
51 | the mtd device accordingly. We could even get rid of | 51 | the mtd device accordingly. We could even get rid of |
52 | nftl->EraseSize if there were any point in doing so. */ | 52 | nftl->EraseSize if there were any point in doing so. */ |
53 | nftl->EraseSize = nftl->mbd.mtd->erasesize; | 53 | nftl->EraseSize = nftl->mbd.mtd->erasesize; |
54 | nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize; | 54 | nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize; |
55 | 55 | ||
56 | nftl->MediaUnit = BLOCK_NIL; | 56 | nftl->MediaUnit = BLOCK_NIL; |
57 | nftl->SpareMediaUnit = BLOCK_NIL; | 57 | nftl->SpareMediaUnit = BLOCK_NIL; |
@@ -168,7 +168,7 @@ device is already correct. | |||
168 | printk(KERN_NOTICE "WARNING: Support for NFTL with UnitSizeFactor 0x%02x is experimental\n", | 168 | printk(KERN_NOTICE "WARNING: Support for NFTL with UnitSizeFactor 0x%02x is experimental\n", |
169 | mh->UnitSizeFactor); | 169 | mh->UnitSizeFactor); |
170 | nftl->EraseSize = nftl->mbd.mtd->erasesize << (0xff - mh->UnitSizeFactor); | 170 | nftl->EraseSize = nftl->mbd.mtd->erasesize << (0xff - mh->UnitSizeFactor); |
171 | nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize; | 171 | nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize; |
172 | } | 172 | } |
173 | #endif | 173 | #endif |
174 | nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN); | 174 | nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN); |
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index 90ed319f26e6..529af271db17 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c | |||
@@ -1772,7 +1772,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
1772 | int len; | 1772 | int len; |
1773 | int ret = 0; | 1773 | int ret = 0; |
1774 | 1774 | ||
1775 | DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%08x, len = %i\n", (unsigned int) instr->addr, (unsigned int) instr->len); | 1775 | DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len); |
1776 | 1776 | ||
1777 | block_size = (1 << this->erase_shift); | 1777 | block_size = (1 << this->erase_shift); |
1778 | 1778 | ||
@@ -1810,7 +1810,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
1810 | 1810 | ||
1811 | /* Check if we have a bad block, we do not erase bad blocks */ | 1811 | /* Check if we have a bad block, we do not erase bad blocks */ |
1812 | if (onenand_block_isbad_nolock(mtd, addr, 0)) { | 1812 | if (onenand_block_isbad_nolock(mtd, addr, 0)) { |
1813 | printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%08x\n", (unsigned int) addr); | 1813 | printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%012llx\n", (unsigned long long) addr); |
1814 | instr->state = MTD_ERASE_FAILED; | 1814 | instr->state = MTD_ERASE_FAILED; |
1815 | goto erase_exit; | 1815 | goto erase_exit; |
1816 | } | 1816 | } |
@@ -2029,7 +2029,7 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int | |||
2029 | * | 2029 | * |
2030 | * Lock one or more blocks | 2030 | * Lock one or more blocks |
2031 | */ | 2031 | */ |
2032 | static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len) | 2032 | static int onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
2033 | { | 2033 | { |
2034 | int ret; | 2034 | int ret; |
2035 | 2035 | ||
@@ -2047,7 +2047,7 @@ static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len) | |||
2047 | * | 2047 | * |
2048 | * Unlock one or more blocks | 2048 | * Unlock one or more blocks |
2049 | */ | 2049 | */ |
2050 | static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) | 2050 | static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
2051 | { | 2051 | { |
2052 | int ret; | 2052 | int ret; |
2053 | 2053 | ||
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c index e538c0a72abb..d2aa9c46530f 100644 --- a/drivers/mtd/rfd_ftl.c +++ b/drivers/mtd/rfd_ftl.c | |||
@@ -21,8 +21,6 @@ | |||
21 | 21 | ||
22 | #include <asm/types.h> | 22 | #include <asm/types.h> |
23 | 23 | ||
24 | #define const_cpu_to_le16 __constant_cpu_to_le16 | ||
25 | |||
26 | static int block_size = 0; | 24 | static int block_size = 0; |
27 | module_param(block_size, int, 0); | 25 | module_param(block_size, int, 0); |
28 | MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size"); | 26 | MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size"); |
@@ -156,7 +154,7 @@ static int scan_header(struct partition *part) | |||
156 | size_t retlen; | 154 | size_t retlen; |
157 | 155 | ||
158 | sectors_per_block = part->block_size / SECTOR_SIZE; | 156 | sectors_per_block = part->block_size / SECTOR_SIZE; |
159 | part->total_blocks = part->mbd.mtd->size / part->block_size; | 157 | part->total_blocks = (u32)part->mbd.mtd->size / part->block_size; |
160 | 158 | ||
161 | if (part->total_blocks < 2) | 159 | if (part->total_blocks < 2) |
162 | return -ENOENT; | 160 | return -ENOENT; |
@@ -276,16 +274,17 @@ static void erase_callback(struct erase_info *erase) | |||
276 | 274 | ||
277 | part = (struct partition*)erase->priv; | 275 | part = (struct partition*)erase->priv; |
278 | 276 | ||
279 | i = erase->addr / part->block_size; | 277 | i = (u32)erase->addr / part->block_size; |
280 | if (i >= part->total_blocks || part->blocks[i].offset != erase->addr) { | 278 | if (i >= part->total_blocks || part->blocks[i].offset != erase->addr || |
281 | printk(KERN_ERR PREFIX "erase callback for unknown offset %x " | 279 | erase->addr > UINT_MAX) { |
282 | "on '%s'\n", erase->addr, part->mbd.mtd->name); | 280 | printk(KERN_ERR PREFIX "erase callback for unknown offset %llx " |
281 | "on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name); | ||
283 | return; | 282 | return; |
284 | } | 283 | } |
285 | 284 | ||
286 | if (erase->state != MTD_ERASE_DONE) { | 285 | if (erase->state != MTD_ERASE_DONE) { |
287 | printk(KERN_WARNING PREFIX "erase failed at 0x%x on '%s', " | 286 | printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', " |
288 | "state %d\n", erase->addr, | 287 | "state %d\n", (unsigned long long)erase->addr, |
289 | part->mbd.mtd->name, erase->state); | 288 | part->mbd.mtd->name, erase->state); |
290 | 289 | ||
291 | part->blocks[i].state = BLOCK_FAILED; | 290 | part->blocks[i].state = BLOCK_FAILED; |
@@ -297,7 +296,7 @@ static void erase_callback(struct erase_info *erase) | |||
297 | return; | 296 | return; |
298 | } | 297 | } |
299 | 298 | ||
300 | magic = const_cpu_to_le16(RFD_MAGIC); | 299 | magic = cpu_to_le16(RFD_MAGIC); |
301 | 300 | ||
302 | part->blocks[i].state = BLOCK_ERASED; | 301 | part->blocks[i].state = BLOCK_ERASED; |
303 | part->blocks[i].free_sectors = part->data_sectors_per_block; | 302 | part->blocks[i].free_sectors = part->data_sectors_per_block; |
@@ -345,9 +344,9 @@ static int erase_block(struct partition *part, int block) | |||
345 | rc = part->mbd.mtd->erase(part->mbd.mtd, erase); | 344 | rc = part->mbd.mtd->erase(part->mbd.mtd, erase); |
346 | 345 | ||
347 | if (rc) { | 346 | if (rc) { |
348 | printk(KERN_ERR PREFIX "erase of region %x,%x on '%s' " | 347 | printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' " |
349 | "failed\n", erase->addr, erase->len, | 348 | "failed\n", (unsigned long long)erase->addr, |
350 | part->mbd.mtd->name); | 349 | (unsigned long long)erase->len, part->mbd.mtd->name); |
351 | kfree(erase); | 350 | kfree(erase); |
352 | } | 351 | } |
353 | 352 | ||
@@ -587,7 +586,7 @@ static int mark_sector_deleted(struct partition *part, u_long old_addr) | |||
587 | int block, offset, rc; | 586 | int block, offset, rc; |
588 | u_long addr; | 587 | u_long addr; |
589 | size_t retlen; | 588 | size_t retlen; |
590 | u16 del = const_cpu_to_le16(SECTOR_DELETED); | 589 | u16 del = cpu_to_le16(SECTOR_DELETED); |
591 | 590 | ||
592 | block = old_addr / part->block_size; | 591 | block = old_addr / part->block_size; |
593 | offset = (old_addr % part->block_size) / SECTOR_SIZE - | 592 | offset = (old_addr % part->block_size) / SECTOR_SIZE - |
@@ -763,7 +762,7 @@ static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
763 | { | 762 | { |
764 | struct partition *part; | 763 | struct partition *part; |
765 | 764 | ||
766 | if (mtd->type != MTD_NORFLASH) | 765 | if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX) |
767 | return; | 766 | return; |
768 | 767 | ||
769 | part = kzalloc(sizeof(struct partition), GFP_KERNEL); | 768 | part = kzalloc(sizeof(struct partition), GFP_KERNEL); |
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c index 33a5d6ed6f18..3f67e00d98e0 100644 --- a/drivers/mtd/ssfdc.c +++ b/drivers/mtd/ssfdc.c | |||
@@ -294,7 +294,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
294 | int cis_sector; | 294 | int cis_sector; |
295 | 295 | ||
296 | /* Check for small page NAND flash */ | 296 | /* Check for small page NAND flash */ |
297 | if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE) | 297 | if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE || |
298 | mtd->size > UINT_MAX) | ||
298 | return; | 299 | return; |
299 | 300 | ||
300 | /* Check for SSDFC format by reading CIS/IDI sector */ | 301 | /* Check for SSDFC format by reading CIS/IDI sector */ |
@@ -316,7 +317,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
316 | 317 | ||
317 | ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT); | 318 | ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT); |
318 | ssfdc->erase_size = mtd->erasesize; | 319 | ssfdc->erase_size = mtd->erasesize; |
319 | ssfdc->map_len = mtd->size / mtd->erasesize; | 320 | ssfdc->map_len = (u32)mtd->size / mtd->erasesize; |
320 | 321 | ||
321 | DEBUG(MTD_DEBUG_LEVEL1, | 322 | DEBUG(MTD_DEBUG_LEVEL1, |
322 | "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n", | 323 | "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n", |
@@ -327,7 +328,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
327 | ssfdc->heads = 16; | 328 | ssfdc->heads = 16; |
328 | ssfdc->sectors = 32; | 329 | ssfdc->sectors = 32; |
329 | get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); | 330 | get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); |
330 | ssfdc->cylinders = (unsigned short)((mtd->size >> SECTOR_SHIFT) / | 331 | ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) / |
331 | ((long)ssfdc->sectors * (long)ssfdc->heads)); | 332 | ((long)ssfdc->sectors * (long)ssfdc->heads)); |
332 | 333 | ||
333 | DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n", | 334 | DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n", |
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile new file mode 100644 index 000000000000..c1d501335006 --- /dev/null +++ b/drivers/mtd/tests/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o | ||
2 | obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o | ||
3 | obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o | ||
4 | obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o | ||
5 | obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o | ||
6 | obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o | ||
7 | obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o | ||
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c new file mode 100644 index 000000000000..afbc3f8126db --- /dev/null +++ b/drivers/mtd/tests/mtd_oobtest.c | |||
@@ -0,0 +1,742 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2008 Nokia Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License version 2 as published by | ||
6 | * the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along with | ||
14 | * this program; see the file COPYING. If not, write to the Free Software | ||
15 | * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
16 | * | ||
17 | * Test OOB read and write on MTD device. | ||
18 | * | ||
19 | * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> | ||
20 | */ | ||
21 | |||
22 | #include <asm/div64.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/moduleparam.h> | ||
26 | #include <linux/err.h> | ||
27 | #include <linux/mtd/mtd.h> | ||
28 | #include <linux/sched.h> | ||
29 | |||
30 | #define PRINT_PREF KERN_INFO "mtd_oobtest: " | ||
31 | |||
32 | static int dev; | ||
33 | module_param(dev, int, S_IRUGO); | ||
34 | MODULE_PARM_DESC(dev, "MTD device number to use"); | ||
35 | |||
36 | static struct mtd_info *mtd; | ||
37 | static unsigned char *readbuf; | ||
38 | static unsigned char *writebuf; | ||
39 | static unsigned char *bbt; | ||
40 | |||
41 | static int ebcnt; | ||
42 | static int pgcnt; | ||
43 | static int errcnt; | ||
44 | static int use_offset; | ||
45 | static int use_len; | ||
46 | static int use_len_max; | ||
47 | static int vary_offset; | ||
48 | static unsigned long next = 1; | ||
49 | |||
50 | static inline unsigned int simple_rand(void) | ||
51 | { | ||
52 | next = next * 1103515245 + 12345; | ||
53 | return (unsigned int)((next / 65536) % 32768); | ||
54 | } | ||
55 | |||
56 | static inline void simple_srand(unsigned long seed) | ||
57 | { | ||
58 | next = seed; | ||
59 | } | ||
60 | |||
61 | static void set_random_data(unsigned char *buf, size_t len) | ||
62 | { | ||
63 | size_t i; | ||
64 | |||
65 | for (i = 0; i < len; ++i) | ||
66 | buf[i] = simple_rand(); | ||
67 | } | ||
68 | |||
69 | static int erase_eraseblock(int ebnum) | ||
70 | { | ||
71 | int err; | ||
72 | struct erase_info ei; | ||
73 | loff_t addr = ebnum * mtd->erasesize; | ||
74 | |||
75 | memset(&ei, 0, sizeof(struct erase_info)); | ||
76 | ei.mtd = mtd; | ||
77 | ei.addr = addr; | ||
78 | ei.len = mtd->erasesize; | ||
79 | |||
80 | err = mtd->erase(mtd, &ei); | ||
81 | if (err) { | ||
82 | printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); | ||
83 | return err; | ||
84 | } | ||
85 | |||
86 | if (ei.state == MTD_ERASE_FAILED) { | ||
87 | printk(PRINT_PREF "some erase error occurred at EB %d\n", | ||
88 | ebnum); | ||
89 | return -EIO; | ||
90 | } | ||
91 | |||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | static int erase_whole_device(void) | ||
96 | { | ||
97 | int err; | ||
98 | unsigned int i; | ||
99 | |||
100 | printk(PRINT_PREF "erasing whole device\n"); | ||
101 | for (i = 0; i < ebcnt; ++i) { | ||
102 | if (bbt[i]) | ||
103 | continue; | ||
104 | err = erase_eraseblock(i); | ||
105 | if (err) | ||
106 | return err; | ||
107 | cond_resched(); | ||
108 | } | ||
109 | printk(PRINT_PREF "erased %u eraseblocks\n", i); | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static void do_vary_offset(void) | ||
114 | { | ||
115 | use_len -= 1; | ||
116 | if (use_len < 1) { | ||
117 | use_offset += 1; | ||
118 | if (use_offset >= use_len_max) | ||
119 | use_offset = 0; | ||
120 | use_len = use_len_max - use_offset; | ||
121 | } | ||
122 | } | ||
123 | |||
124 | static int write_eraseblock(int ebnum) | ||
125 | { | ||
126 | int i; | ||
127 | struct mtd_oob_ops ops; | ||
128 | int err = 0; | ||
129 | loff_t addr = ebnum * mtd->erasesize; | ||
130 | |||
131 | for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { | ||
132 | set_random_data(writebuf, use_len); | ||
133 | ops.mode = MTD_OOB_AUTO; | ||
134 | ops.len = 0; | ||
135 | ops.retlen = 0; | ||
136 | ops.ooblen = use_len; | ||
137 | ops.oobretlen = 0; | ||
138 | ops.ooboffs = use_offset; | ||
139 | ops.datbuf = 0; | ||
140 | ops.oobbuf = writebuf; | ||
141 | err = mtd->write_oob(mtd, addr, &ops); | ||
142 | if (err || ops.oobretlen != use_len) { | ||
143 | printk(PRINT_PREF "error: writeoob failed at %#llx\n", | ||
144 | (long long)addr); | ||
145 | printk(PRINT_PREF "error: use_len %d, use_offset %d\n", | ||
146 | use_len, use_offset); | ||
147 | errcnt += 1; | ||
148 | return err ? err : -1; | ||
149 | } | ||
150 | if (vary_offset) | ||
151 | do_vary_offset(); | ||
152 | } | ||
153 | |||
154 | return err; | ||
155 | } | ||
156 | |||
157 | static int write_whole_device(void) | ||
158 | { | ||
159 | int err; | ||
160 | unsigned int i; | ||
161 | |||
162 | printk(PRINT_PREF "writing OOBs of whole device\n"); | ||
163 | for (i = 0; i < ebcnt; ++i) { | ||
164 | if (bbt[i]) | ||
165 | continue; | ||
166 | err = write_eraseblock(i); | ||
167 | if (err) | ||
168 | return err; | ||
169 | if (i % 256 == 0) | ||
170 | printk(PRINT_PREF "written up to eraseblock %u\n", i); | ||
171 | cond_resched(); | ||
172 | } | ||
173 | printk(PRINT_PREF "written %u eraseblocks\n", i); | ||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | static int verify_eraseblock(int ebnum) | ||
178 | { | ||
179 | int i; | ||
180 | struct mtd_oob_ops ops; | ||
181 | int err = 0; | ||
182 | loff_t addr = ebnum * mtd->erasesize; | ||
183 | |||
184 | for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { | ||
185 | set_random_data(writebuf, use_len); | ||
186 | ops.mode = MTD_OOB_AUTO; | ||
187 | ops.len = 0; | ||
188 | ops.retlen = 0; | ||
189 | ops.ooblen = use_len; | ||
190 | ops.oobretlen = 0; | ||
191 | ops.ooboffs = use_offset; | ||
192 | ops.datbuf = 0; | ||
193 | ops.oobbuf = readbuf; | ||
194 | err = mtd->read_oob(mtd, addr, &ops); | ||
195 | if (err || ops.oobretlen != use_len) { | ||
196 | printk(PRINT_PREF "error: readoob failed at %#llx\n", | ||
197 | (long long)addr); | ||
198 | errcnt += 1; | ||
199 | return err ? err : -1; | ||
200 | } | ||
201 | if (memcmp(readbuf, writebuf, use_len)) { | ||
202 | printk(PRINT_PREF "error: verify failed at %#llx\n", | ||
203 | (long long)addr); | ||
204 | errcnt += 1; | ||
205 | if (errcnt > 1000) { | ||
206 | printk(PRINT_PREF "error: too many errors\n"); | ||
207 | return -1; | ||
208 | } | ||
209 | } | ||
210 | if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) { | ||
211 | int k; | ||
212 | |||
213 | ops.mode = MTD_OOB_AUTO; | ||
214 | ops.len = 0; | ||
215 | ops.retlen = 0; | ||
216 | ops.ooblen = mtd->ecclayout->oobavail; | ||
217 | ops.oobretlen = 0; | ||
218 | ops.ooboffs = 0; | ||
219 | ops.datbuf = 0; | ||
220 | ops.oobbuf = readbuf; | ||
221 | err = mtd->read_oob(mtd, addr, &ops); | ||
222 | if (err || ops.oobretlen != mtd->ecclayout->oobavail) { | ||
223 | printk(PRINT_PREF "error: readoob failed at " | ||
224 | "%#llx\n", (long long)addr); | ||
225 | errcnt += 1; | ||
226 | return err ? err : -1; | ||
227 | } | ||
228 | if (memcmp(readbuf + use_offset, writebuf, use_len)) { | ||
229 | printk(PRINT_PREF "error: verify failed at " | ||
230 | "%#llx\n", (long long)addr); | ||
231 | errcnt += 1; | ||
232 | if (errcnt > 1000) { | ||
233 | printk(PRINT_PREF "error: too many " | ||
234 | "errors\n"); | ||
235 | return -1; | ||
236 | } | ||
237 | } | ||
238 | for (k = 0; k < use_offset; ++k) | ||
239 | if (readbuf[k] != 0xff) { | ||
240 | printk(PRINT_PREF "error: verify 0xff " | ||
241 | "failed at %#llx\n", | ||
242 | (long long)addr); | ||
243 | errcnt += 1; | ||
244 | if (errcnt > 1000) { | ||
245 | printk(PRINT_PREF "error: too " | ||
246 | "many errors\n"); | ||
247 | return -1; | ||
248 | } | ||
249 | } | ||
250 | for (k = use_offset + use_len; | ||
251 | k < mtd->ecclayout->oobavail; ++k) | ||
252 | if (readbuf[k] != 0xff) { | ||
253 | printk(PRINT_PREF "error: verify 0xff " | ||
254 | "failed at %#llx\n", | ||
255 | (long long)addr); | ||
256 | errcnt += 1; | ||
257 | if (errcnt > 1000) { | ||
258 | printk(PRINT_PREF "error: too " | ||
259 | "many errors\n"); | ||
260 | return -1; | ||
261 | } | ||
262 | } | ||
263 | } | ||
264 | if (vary_offset) | ||
265 | do_vary_offset(); | ||
266 | } | ||
267 | return err; | ||
268 | } | ||
269 | |||
270 | static int verify_eraseblock_in_one_go(int ebnum) | ||
271 | { | ||
272 | struct mtd_oob_ops ops; | ||
273 | int err = 0; | ||
274 | loff_t addr = ebnum * mtd->erasesize; | ||
275 | size_t len = mtd->ecclayout->oobavail * pgcnt; | ||
276 | |||
277 | set_random_data(writebuf, len); | ||
278 | ops.mode = MTD_OOB_AUTO; | ||
279 | ops.len = 0; | ||
280 | ops.retlen = 0; | ||
281 | ops.ooblen = len; | ||
282 | ops.oobretlen = 0; | ||
283 | ops.ooboffs = 0; | ||
284 | ops.datbuf = 0; | ||
285 | ops.oobbuf = readbuf; | ||
286 | err = mtd->read_oob(mtd, addr, &ops); | ||
287 | if (err || ops.oobretlen != len) { | ||
288 | printk(PRINT_PREF "error: readoob failed at %#llx\n", | ||
289 | (long long)addr); | ||
290 | errcnt += 1; | ||
291 | return err ? err : -1; | ||
292 | } | ||
293 | if (memcmp(readbuf, writebuf, len)) { | ||
294 | printk(PRINT_PREF "error: verify failed at %#llx\n", | ||
295 | (long long)addr); | ||
296 | errcnt += 1; | ||
297 | if (errcnt > 1000) { | ||
298 | printk(PRINT_PREF "error: too many errors\n"); | ||
299 | return -1; | ||
300 | } | ||
301 | } | ||
302 | |||
303 | return err; | ||
304 | } | ||
305 | |||
306 | static int verify_all_eraseblocks(void) | ||
307 | { | ||
308 | int err; | ||
309 | unsigned int i; | ||
310 | |||
311 | printk(PRINT_PREF "verifying all eraseblocks\n"); | ||
312 | for (i = 0; i < ebcnt; ++i) { | ||
313 | if (bbt[i]) | ||
314 | continue; | ||
315 | err = verify_eraseblock(i); | ||
316 | if (err) | ||
317 | return err; | ||
318 | if (i % 256 == 0) | ||
319 | printk(PRINT_PREF "verified up to eraseblock %u\n", i); | ||
320 | cond_resched(); | ||
321 | } | ||
322 | printk(PRINT_PREF "verified %u eraseblocks\n", i); | ||
323 | return 0; | ||
324 | } | ||
325 | |||
326 | static int is_block_bad(int ebnum) | ||
327 | { | ||
328 | int ret; | ||
329 | loff_t addr = ebnum * mtd->erasesize; | ||
330 | |||
331 | ret = mtd->block_isbad(mtd, addr); | ||
332 | if (ret) | ||
333 | printk(PRINT_PREF "block %d is bad\n", ebnum); | ||
334 | return ret; | ||
335 | } | ||
336 | |||
337 | static int scan_for_bad_eraseblocks(void) | ||
338 | { | ||
339 | int i, bad = 0; | ||
340 | |||
341 | bbt = kmalloc(ebcnt, GFP_KERNEL); | ||
342 | if (!bbt) { | ||
343 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
344 | return -ENOMEM; | ||
345 | } | ||
346 | memset(bbt, 0 , ebcnt); | ||
347 | |||
348 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); | ||
349 | for (i = 0; i < ebcnt; ++i) { | ||
350 | bbt[i] = is_block_bad(i) ? 1 : 0; | ||
351 | if (bbt[i]) | ||
352 | bad += 1; | ||
353 | cond_resched(); | ||
354 | } | ||
355 | printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); | ||
356 | return 0; | ||
357 | } | ||
358 | |||
359 | static int __init mtd_oobtest_init(void) | ||
360 | { | ||
361 | int err = 0; | ||
362 | unsigned int i; | ||
363 | uint64_t tmp; | ||
364 | struct mtd_oob_ops ops; | ||
365 | loff_t addr = 0, addr0; | ||
366 | |||
367 | printk(KERN_INFO "\n"); | ||
368 | printk(KERN_INFO "=================================================\n"); | ||
369 | printk(PRINT_PREF "MTD device: %d\n", dev); | ||
370 | |||
371 | mtd = get_mtd_device(NULL, dev); | ||
372 | if (IS_ERR(mtd)) { | ||
373 | err = PTR_ERR(mtd); | ||
374 | printk(PRINT_PREF "error: cannot get MTD device\n"); | ||
375 | return err; | ||
376 | } | ||
377 | |||
378 | if (mtd->type != MTD_NANDFLASH) { | ||
379 | printk(PRINT_PREF "this test requires NAND flash\n"); | ||
380 | goto out; | ||
381 | } | ||
382 | |||
383 | tmp = mtd->size; | ||
384 | do_div(tmp, mtd->erasesize); | ||
385 | ebcnt = tmp; | ||
386 | pgcnt = mtd->erasesize / mtd->writesize; | ||
387 | |||
388 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " | ||
389 | "page size %u, count of eraseblocks %u, pages per " | ||
390 | "eraseblock %u, OOB size %u\n", | ||
391 | (unsigned long long)mtd->size, mtd->erasesize, | ||
392 | mtd->writesize, ebcnt, pgcnt, mtd->oobsize); | ||
393 | |||
394 | err = -ENOMEM; | ||
395 | mtd->erasesize = mtd->erasesize; | ||
396 | readbuf = kmalloc(mtd->erasesize, GFP_KERNEL); | ||
397 | if (!readbuf) { | ||
398 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
399 | goto out; | ||
400 | } | ||
401 | writebuf = kmalloc(mtd->erasesize, GFP_KERNEL); | ||
402 | if (!writebuf) { | ||
403 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
404 | goto out; | ||
405 | } | ||
406 | |||
407 | err = scan_for_bad_eraseblocks(); | ||
408 | if (err) | ||
409 | goto out; | ||
410 | |||
411 | use_offset = 0; | ||
412 | use_len = mtd->ecclayout->oobavail; | ||
413 | use_len_max = mtd->ecclayout->oobavail; | ||
414 | vary_offset = 0; | ||
415 | |||
416 | /* First test: write all OOB, read it back and verify */ | ||
417 | printk(PRINT_PREF "test 1 of 5\n"); | ||
418 | |||
419 | err = erase_whole_device(); | ||
420 | if (err) | ||
421 | goto out; | ||
422 | |||
423 | simple_srand(1); | ||
424 | err = write_whole_device(); | ||
425 | if (err) | ||
426 | goto out; | ||
427 | |||
428 | simple_srand(1); | ||
429 | err = verify_all_eraseblocks(); | ||
430 | if (err) | ||
431 | goto out; | ||
432 | |||
433 | /* | ||
434 | * Second test: write all OOB, a block at a time, read it back and | ||
435 | * verify. | ||
436 | */ | ||
437 | printk(PRINT_PREF "test 2 of 5\n"); | ||
438 | |||
439 | err = erase_whole_device(); | ||
440 | if (err) | ||
441 | goto out; | ||
442 | |||
443 | simple_srand(3); | ||
444 | err = write_whole_device(); | ||
445 | if (err) | ||
446 | goto out; | ||
447 | |||
448 | /* Check all eraseblocks */ | ||
449 | simple_srand(3); | ||
450 | printk(PRINT_PREF "verifying all eraseblocks\n"); | ||
451 | for (i = 0; i < ebcnt; ++i) { | ||
452 | if (bbt[i]) | ||
453 | continue; | ||
454 | err = verify_eraseblock_in_one_go(i); | ||
455 | if (err) | ||
456 | goto out; | ||
457 | if (i % 256 == 0) | ||
458 | printk(PRINT_PREF "verified up to eraseblock %u\n", i); | ||
459 | cond_resched(); | ||
460 | } | ||
461 | printk(PRINT_PREF "verified %u eraseblocks\n", i); | ||
462 | |||
463 | /* | ||
464 | * Third test: write OOB at varying offsets and lengths, read it back | ||
465 | * and verify. | ||
466 | */ | ||
467 | printk(PRINT_PREF "test 3 of 5\n"); | ||
468 | |||
469 | err = erase_whole_device(); | ||
470 | if (err) | ||
471 | goto out; | ||
472 | |||
473 | /* Write all eraseblocks */ | ||
474 | use_offset = 0; | ||
475 | use_len = mtd->ecclayout->oobavail; | ||
476 | use_len_max = mtd->ecclayout->oobavail; | ||
477 | vary_offset = 1; | ||
478 | simple_srand(5); | ||
479 | printk(PRINT_PREF "writing OOBs of whole device\n"); | ||
480 | for (i = 0; i < ebcnt; ++i) { | ||
481 | if (bbt[i]) | ||
482 | continue; | ||
483 | err = write_eraseblock(i); | ||
484 | if (err) | ||
485 | goto out; | ||
486 | if (i % 256 == 0) | ||
487 | printk(PRINT_PREF "written up to eraseblock %u\n", i); | ||
488 | cond_resched(); | ||
489 | } | ||
490 | printk(PRINT_PREF "written %u eraseblocks\n", i); | ||
491 | |||
492 | /* Check all eraseblocks */ | ||
493 | use_offset = 0; | ||
494 | use_len = mtd->ecclayout->oobavail; | ||
495 | use_len_max = mtd->ecclayout->oobavail; | ||
496 | vary_offset = 1; | ||
497 | simple_srand(5); | ||
498 | err = verify_all_eraseblocks(); | ||
499 | if (err) | ||
500 | goto out; | ||
501 | |||
502 | use_offset = 0; | ||
503 | use_len = mtd->ecclayout->oobavail; | ||
504 | use_len_max = mtd->ecclayout->oobavail; | ||
505 | vary_offset = 0; | ||
506 | |||
507 | /* Fourth test: try to write off end of device */ | ||
508 | printk(PRINT_PREF "test 4 of 5\n"); | ||
509 | |||
510 | err = erase_whole_device(); | ||
511 | if (err) | ||
512 | goto out; | ||
513 | |||
514 | addr0 = 0; | ||
515 | for (i = 0; bbt[i] && i < ebcnt; ++i) | ||
516 | addr0 += mtd->erasesize; | ||
517 | |||
518 | /* Attempt to write off end of OOB */ | ||
519 | ops.mode = MTD_OOB_AUTO; | ||
520 | ops.len = 0; | ||
521 | ops.retlen = 0; | ||
522 | ops.ooblen = 1; | ||
523 | ops.oobretlen = 0; | ||
524 | ops.ooboffs = mtd->ecclayout->oobavail; | ||
525 | ops.datbuf = 0; | ||
526 | ops.oobbuf = writebuf; | ||
527 | printk(PRINT_PREF "attempting to start write past end of OOB\n"); | ||
528 | printk(PRINT_PREF "an error is expected...\n"); | ||
529 | err = mtd->write_oob(mtd, addr0, &ops); | ||
530 | if (err) { | ||
531 | printk(PRINT_PREF "error occurred as expected\n"); | ||
532 | err = 0; | ||
533 | } else { | ||
534 | printk(PRINT_PREF "error: can write past end of OOB\n"); | ||
535 | errcnt += 1; | ||
536 | } | ||
537 | |||
538 | /* Attempt to read off end of OOB */ | ||
539 | ops.mode = MTD_OOB_AUTO; | ||
540 | ops.len = 0; | ||
541 | ops.retlen = 0; | ||
542 | ops.ooblen = 1; | ||
543 | ops.oobretlen = 0; | ||
544 | ops.ooboffs = mtd->ecclayout->oobavail; | ||
545 | ops.datbuf = 0; | ||
546 | ops.oobbuf = readbuf; | ||
547 | printk(PRINT_PREF "attempting to start read past end of OOB\n"); | ||
548 | printk(PRINT_PREF "an error is expected...\n"); | ||
549 | err = mtd->read_oob(mtd, addr0, &ops); | ||
550 | if (err) { | ||
551 | printk(PRINT_PREF "error occurred as expected\n"); | ||
552 | err = 0; | ||
553 | } else { | ||
554 | printk(PRINT_PREF "error: can read past end of OOB\n"); | ||
555 | errcnt += 1; | ||
556 | } | ||
557 | |||
558 | if (bbt[ebcnt - 1]) | ||
559 | printk(PRINT_PREF "skipping end of device tests because last " | ||
560 | "block is bad\n"); | ||
561 | else { | ||
562 | /* Attempt to write off end of device */ | ||
563 | ops.mode = MTD_OOB_AUTO; | ||
564 | ops.len = 0; | ||
565 | ops.retlen = 0; | ||
566 | ops.ooblen = mtd->ecclayout->oobavail + 1; | ||
567 | ops.oobretlen = 0; | ||
568 | ops.ooboffs = 0; | ||
569 | ops.datbuf = 0; | ||
570 | ops.oobbuf = writebuf; | ||
571 | printk(PRINT_PREF "attempting to write past end of device\n"); | ||
572 | printk(PRINT_PREF "an error is expected...\n"); | ||
573 | err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops); | ||
574 | if (err) { | ||
575 | printk(PRINT_PREF "error occurred as expected\n"); | ||
576 | err = 0; | ||
577 | } else { | ||
578 | printk(PRINT_PREF "error: wrote past end of device\n"); | ||
579 | errcnt += 1; | ||
580 | } | ||
581 | |||
582 | /* Attempt to read off end of device */ | ||
583 | ops.mode = MTD_OOB_AUTO; | ||
584 | ops.len = 0; | ||
585 | ops.retlen = 0; | ||
586 | ops.ooblen = mtd->ecclayout->oobavail + 1; | ||
587 | ops.oobretlen = 0; | ||
588 | ops.ooboffs = 0; | ||
589 | ops.datbuf = 0; | ||
590 | ops.oobbuf = readbuf; | ||
591 | printk(PRINT_PREF "attempting to read past end of device\n"); | ||
592 | printk(PRINT_PREF "an error is expected...\n"); | ||
593 | err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops); | ||
594 | if (err) { | ||
595 | printk(PRINT_PREF "error occurred as expected\n"); | ||
596 | err = 0; | ||
597 | } else { | ||
598 | printk(PRINT_PREF "error: read past end of device\n"); | ||
599 | errcnt += 1; | ||
600 | } | ||
601 | |||
602 | err = erase_eraseblock(ebcnt - 1); | ||
603 | if (err) | ||
604 | goto out; | ||
605 | |||
606 | /* Attempt to write off end of device */ | ||
607 | ops.mode = MTD_OOB_AUTO; | ||
608 | ops.len = 0; | ||
609 | ops.retlen = 0; | ||
610 | ops.ooblen = mtd->ecclayout->oobavail; | ||
611 | ops.oobretlen = 0; | ||
612 | ops.ooboffs = 1; | ||
613 | ops.datbuf = 0; | ||
614 | ops.oobbuf = writebuf; | ||
615 | printk(PRINT_PREF "attempting to write past end of device\n"); | ||
616 | printk(PRINT_PREF "an error is expected...\n"); | ||
617 | err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops); | ||
618 | if (err) { | ||
619 | printk(PRINT_PREF "error occurred as expected\n"); | ||
620 | err = 0; | ||
621 | } else { | ||
622 | printk(PRINT_PREF "error: wrote past end of device\n"); | ||
623 | errcnt += 1; | ||
624 | } | ||
625 | |||
626 | /* Attempt to read off end of device */ | ||
627 | ops.mode = MTD_OOB_AUTO; | ||
628 | ops.len = 0; | ||
629 | ops.retlen = 0; | ||
630 | ops.ooblen = mtd->ecclayout->oobavail; | ||
631 | ops.oobretlen = 0; | ||
632 | ops.ooboffs = 1; | ||
633 | ops.datbuf = 0; | ||
634 | ops.oobbuf = readbuf; | ||
635 | printk(PRINT_PREF "attempting to read past end of device\n"); | ||
636 | printk(PRINT_PREF "an error is expected...\n"); | ||
637 | err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops); | ||
638 | if (err) { | ||
639 | printk(PRINT_PREF "error occurred as expected\n"); | ||
640 | err = 0; | ||
641 | } else { | ||
642 | printk(PRINT_PREF "error: read past end of device\n"); | ||
643 | errcnt += 1; | ||
644 | } | ||
645 | } | ||
646 | |||
647 | /* Fifth test: write / read across block boundaries */ | ||
648 | printk(PRINT_PREF "test 5 of 5\n"); | ||
649 | |||
650 | /* Erase all eraseblocks */ | ||
651 | err = erase_whole_device(); | ||
652 | if (err) | ||
653 | goto out; | ||
654 | |||
655 | /* Write all eraseblocks */ | ||
656 | simple_srand(11); | ||
657 | printk(PRINT_PREF "writing OOBs of whole device\n"); | ||
658 | for (i = 0; i < ebcnt - 1; ++i) { | ||
659 | int cnt = 2; | ||
660 | int pg; | ||
661 | size_t sz = mtd->ecclayout->oobavail; | ||
662 | if (bbt[i] || bbt[i + 1]) | ||
663 | continue; | ||
664 | addr = (i + 1) * mtd->erasesize - mtd->writesize; | ||
665 | for (pg = 0; pg < cnt; ++pg) { | ||
666 | set_random_data(writebuf, sz); | ||
667 | ops.mode = MTD_OOB_AUTO; | ||
668 | ops.len = 0; | ||
669 | ops.retlen = 0; | ||
670 | ops.ooblen = sz; | ||
671 | ops.oobretlen = 0; | ||
672 | ops.ooboffs = 0; | ||
673 | ops.datbuf = 0; | ||
674 | ops.oobbuf = writebuf; | ||
675 | err = mtd->write_oob(mtd, addr, &ops); | ||
676 | if (err) | ||
677 | goto out; | ||
678 | if (i % 256 == 0) | ||
679 | printk(PRINT_PREF "written up to eraseblock " | ||
680 | "%u\n", i); | ||
681 | cond_resched(); | ||
682 | addr += mtd->writesize; | ||
683 | } | ||
684 | } | ||
685 | printk(PRINT_PREF "written %u eraseblocks\n", i); | ||
686 | |||
687 | /* Check all eraseblocks */ | ||
688 | simple_srand(11); | ||
689 | printk(PRINT_PREF "verifying all eraseblocks\n"); | ||
690 | for (i = 0; i < ebcnt - 1; ++i) { | ||
691 | if (bbt[i] || bbt[i + 1]) | ||
692 | continue; | ||
693 | set_random_data(writebuf, mtd->ecclayout->oobavail * 2); | ||
694 | addr = (i + 1) * mtd->erasesize - mtd->writesize; | ||
695 | ops.mode = MTD_OOB_AUTO; | ||
696 | ops.len = 0; | ||
697 | ops.retlen = 0; | ||
698 | ops.ooblen = mtd->ecclayout->oobavail * 2; | ||
699 | ops.oobretlen = 0; | ||
700 | ops.ooboffs = 0; | ||
701 | ops.datbuf = 0; | ||
702 | ops.oobbuf = readbuf; | ||
703 | err = mtd->read_oob(mtd, addr, &ops); | ||
704 | if (err) | ||
705 | goto out; | ||
706 | if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) { | ||
707 | printk(PRINT_PREF "error: verify failed at %#llx\n", | ||
708 | (long long)addr); | ||
709 | errcnt += 1; | ||
710 | if (errcnt > 1000) { | ||
711 | printk(PRINT_PREF "error: too many errors\n"); | ||
712 | goto out; | ||
713 | } | ||
714 | } | ||
715 | if (i % 256 == 0) | ||
716 | printk(PRINT_PREF "verified up to eraseblock %u\n", i); | ||
717 | cond_resched(); | ||
718 | } | ||
719 | printk(PRINT_PREF "verified %u eraseblocks\n", i); | ||
720 | |||
721 | printk(PRINT_PREF "finished with %d errors\n", errcnt); | ||
722 | out: | ||
723 | kfree(bbt); | ||
724 | kfree(writebuf); | ||
725 | kfree(readbuf); | ||
726 | put_mtd_device(mtd); | ||
727 | if (err) | ||
728 | printk(PRINT_PREF "error %d occurred\n", err); | ||
729 | printk(KERN_INFO "=================================================\n"); | ||
730 | return err; | ||
731 | } | ||
732 | module_init(mtd_oobtest_init); | ||
733 | |||
734 | static void __exit mtd_oobtest_exit(void) | ||
735 | { | ||
736 | return; | ||
737 | } | ||
738 | module_exit(mtd_oobtest_exit); | ||
739 | |||
740 | MODULE_DESCRIPTION("Out-of-band test module"); | ||
741 | MODULE_AUTHOR("Adrian Hunter"); | ||
742 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c new file mode 100644 index 000000000000..9648818b9e2c --- /dev/null +++ b/drivers/mtd/tests/mtd_pagetest.c | |||
@@ -0,0 +1,632 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2008 Nokia Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License version 2 as published by | ||
6 | * the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along with | ||
14 | * this program; see the file COPYING. If not, write to the Free Software | ||
15 | * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
16 | * | ||
17 | * Test page read and write on MTD device. | ||
18 | * | ||
19 | * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> | ||
20 | */ | ||
21 | |||
22 | #include <asm/div64.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/moduleparam.h> | ||
26 | #include <linux/err.h> | ||
27 | #include <linux/mtd/mtd.h> | ||
28 | #include <linux/sched.h> | ||
29 | |||
30 | #define PRINT_PREF KERN_INFO "mtd_pagetest: " | ||
31 | |||
32 | static int dev; | ||
33 | module_param(dev, int, S_IRUGO); | ||
34 | MODULE_PARM_DESC(dev, "MTD device number to use"); | ||
35 | |||
36 | static struct mtd_info *mtd; | ||
37 | static unsigned char *twopages; | ||
38 | static unsigned char *writebuf; | ||
39 | static unsigned char *boundary; | ||
40 | static unsigned char *bbt; | ||
41 | |||
42 | static int pgsize; | ||
43 | static int bufsize; | ||
44 | static int ebcnt; | ||
45 | static int pgcnt; | ||
46 | static int errcnt; | ||
47 | static unsigned long next = 1; | ||
48 | |||
49 | static inline unsigned int simple_rand(void) | ||
50 | { | ||
51 | next = next * 1103515245 + 12345; | ||
52 | return (unsigned int)((next / 65536) % 32768); | ||
53 | } | ||
54 | |||
55 | static inline void simple_srand(unsigned long seed) | ||
56 | { | ||
57 | next = seed; | ||
58 | } | ||
59 | |||
60 | static void set_random_data(unsigned char *buf, size_t len) | ||
61 | { | ||
62 | size_t i; | ||
63 | |||
64 | for (i = 0; i < len; ++i) | ||
65 | buf[i] = simple_rand(); | ||
66 | } | ||
67 | |||
68 | static int erase_eraseblock(int ebnum) | ||
69 | { | ||
70 | int err; | ||
71 | struct erase_info ei; | ||
72 | loff_t addr = ebnum * mtd->erasesize; | ||
73 | |||
74 | memset(&ei, 0, sizeof(struct erase_info)); | ||
75 | ei.mtd = mtd; | ||
76 | ei.addr = addr; | ||
77 | ei.len = mtd->erasesize; | ||
78 | |||
79 | err = mtd->erase(mtd, &ei); | ||
80 | if (err) { | ||
81 | printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); | ||
82 | return err; | ||
83 | } | ||
84 | |||
85 | if (ei.state == MTD_ERASE_FAILED) { | ||
86 | printk(PRINT_PREF "some erase error occurred at EB %d\n", | ||
87 | ebnum); | ||
88 | return -EIO; | ||
89 | } | ||
90 | |||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static int write_eraseblock(int ebnum) | ||
95 | { | ||
96 | int err = 0; | ||
97 | size_t written = 0; | ||
98 | loff_t addr = ebnum * mtd->erasesize; | ||
99 | |||
100 | set_random_data(writebuf, mtd->erasesize); | ||
101 | cond_resched(); | ||
102 | err = mtd->write(mtd, addr, mtd->erasesize, &written, writebuf); | ||
103 | if (err || written != mtd->erasesize) | ||
104 | printk(PRINT_PREF "error: write failed at %#llx\n", | ||
105 | (long long)addr); | ||
106 | |||
107 | return err; | ||
108 | } | ||
109 | |||
110 | static int verify_eraseblock(int ebnum) | ||
111 | { | ||
112 | uint32_t j; | ||
113 | size_t read = 0; | ||
114 | int err = 0, i; | ||
115 | loff_t addr0, addrn; | ||
116 | loff_t addr = ebnum * mtd->erasesize; | ||
117 | |||
118 | addr0 = 0; | ||
119 | for (i = 0; bbt[i] && i < ebcnt; ++i) | ||
120 | addr0 += mtd->erasesize; | ||
121 | |||
122 | addrn = mtd->size; | ||
123 | for (i = 0; bbt[ebcnt - i - 1] && i < ebcnt; ++i) | ||
124 | addrn -= mtd->erasesize; | ||
125 | |||
126 | set_random_data(writebuf, mtd->erasesize); | ||
127 | for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) { | ||
128 | /* Do a read to set the internal dataRAMs to different data */ | ||
129 | err = mtd->read(mtd, addr0, bufsize, &read, twopages); | ||
130 | if (err == -EUCLEAN) | ||
131 | err = 0; | ||
132 | if (err || read != bufsize) { | ||
133 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
134 | (long long)addr0); | ||
135 | return err; | ||
136 | } | ||
137 | err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages); | ||
138 | if (err == -EUCLEAN) | ||
139 | err = 0; | ||
140 | if (err || read != bufsize) { | ||
141 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
142 | (long long)(addrn - bufsize)); | ||
143 | return err; | ||
144 | } | ||
145 | memset(twopages, 0, bufsize); | ||
146 | read = 0; | ||
147 | err = mtd->read(mtd, addr, bufsize, &read, twopages); | ||
148 | if (err == -EUCLEAN) | ||
149 | err = 0; | ||
150 | if (err || read != bufsize) { | ||
151 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
152 | (long long)addr); | ||
153 | break; | ||
154 | } | ||
155 | if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) { | ||
156 | printk(PRINT_PREF "error: verify failed at %#llx\n", | ||
157 | (long long)addr); | ||
158 | errcnt += 1; | ||
159 | } | ||
160 | } | ||
161 | /* Check boundary between eraseblocks */ | ||
162 | if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) { | ||
163 | unsigned long oldnext = next; | ||
164 | /* Do a read to set the internal dataRAMs to different data */ | ||
165 | err = mtd->read(mtd, addr0, bufsize, &read, twopages); | ||
166 | if (err == -EUCLEAN) | ||
167 | err = 0; | ||
168 | if (err || read != bufsize) { | ||
169 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
170 | (long long)addr0); | ||
171 | return err; | ||
172 | } | ||
173 | err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages); | ||
174 | if (err == -EUCLEAN) | ||
175 | err = 0; | ||
176 | if (err || read != bufsize) { | ||
177 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
178 | (long long)(addrn - bufsize)); | ||
179 | return err; | ||
180 | } | ||
181 | memset(twopages, 0, bufsize); | ||
182 | read = 0; | ||
183 | err = mtd->read(mtd, addr, bufsize, &read, twopages); | ||
184 | if (err == -EUCLEAN) | ||
185 | err = 0; | ||
186 | if (err || read != bufsize) { | ||
187 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
188 | (long long)addr); | ||
189 | return err; | ||
190 | } | ||
191 | memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize); | ||
192 | set_random_data(boundary + pgsize, pgsize); | ||
193 | if (memcmp(twopages, boundary, bufsize)) { | ||
194 | printk(PRINT_PREF "error: verify failed at %#llx\n", | ||
195 | (long long)addr); | ||
196 | errcnt += 1; | ||
197 | } | ||
198 | next = oldnext; | ||
199 | } | ||
200 | return err; | ||
201 | } | ||
202 | |||
203 | static int crosstest(void) | ||
204 | { | ||
205 | size_t read = 0; | ||
206 | int err = 0, i; | ||
207 | loff_t addr, addr0, addrn; | ||
208 | unsigned char *pp1, *pp2, *pp3, *pp4; | ||
209 | |||
210 | printk(PRINT_PREF "crosstest\n"); | ||
211 | pp1 = kmalloc(pgsize * 4, GFP_KERNEL); | ||
212 | if (!pp1) { | ||
213 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
214 | return -ENOMEM; | ||
215 | } | ||
216 | pp2 = pp1 + pgsize; | ||
217 | pp3 = pp2 + pgsize; | ||
218 | pp4 = pp3 + pgsize; | ||
219 | memset(pp1, 0, pgsize * 4); | ||
220 | |||
221 | addr0 = 0; | ||
222 | for (i = 0; bbt[i] && i < ebcnt; ++i) | ||
223 | addr0 += mtd->erasesize; | ||
224 | |||
225 | addrn = mtd->size; | ||
226 | for (i = 0; bbt[ebcnt - i - 1] && i < ebcnt; ++i) | ||
227 | addrn -= mtd->erasesize; | ||
228 | |||
229 | /* Read 2nd-to-last page to pp1 */ | ||
230 | read = 0; | ||
231 | addr = addrn - pgsize - pgsize; | ||
232 | err = mtd->read(mtd, addr, pgsize, &read, pp1); | ||
233 | if (err == -EUCLEAN) | ||
234 | err = 0; | ||
235 | if (err || read != pgsize) { | ||
236 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
237 | (long long)addr); | ||
238 | kfree(pp1); | ||
239 | return err; | ||
240 | } | ||
241 | |||
242 | /* Read 3rd-to-last page to pp1 */ | ||
243 | read = 0; | ||
244 | addr = addrn - pgsize - pgsize - pgsize; | ||
245 | err = mtd->read(mtd, addr, pgsize, &read, pp1); | ||
246 | if (err == -EUCLEAN) | ||
247 | err = 0; | ||
248 | if (err || read != pgsize) { | ||
249 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
250 | (long long)addr); | ||
251 | kfree(pp1); | ||
252 | return err; | ||
253 | } | ||
254 | |||
255 | /* Read first page to pp2 */ | ||
256 | read = 0; | ||
257 | addr = addr0; | ||
258 | printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); | ||
259 | err = mtd->read(mtd, addr, pgsize, &read, pp2); | ||
260 | if (err == -EUCLEAN) | ||
261 | err = 0; | ||
262 | if (err || read != pgsize) { | ||
263 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
264 | (long long)addr); | ||
265 | kfree(pp1); | ||
266 | return err; | ||
267 | } | ||
268 | |||
269 | /* Read last page to pp3 */ | ||
270 | read = 0; | ||
271 | addr = addrn - pgsize; | ||
272 | printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); | ||
273 | err = mtd->read(mtd, addr, pgsize, &read, pp3); | ||
274 | if (err == -EUCLEAN) | ||
275 | err = 0; | ||
276 | if (err || read != pgsize) { | ||
277 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
278 | (long long)addr); | ||
279 | kfree(pp1); | ||
280 | return err; | ||
281 | } | ||
282 | |||
283 | /* Read first page again to pp4 */ | ||
284 | read = 0; | ||
285 | addr = addr0; | ||
286 | printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); | ||
287 | err = mtd->read(mtd, addr, pgsize, &read, pp4); | ||
288 | if (err == -EUCLEAN) | ||
289 | err = 0; | ||
290 | if (err || read != pgsize) { | ||
291 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
292 | (long long)addr); | ||
293 | kfree(pp1); | ||
294 | return err; | ||
295 | } | ||
296 | |||
297 | /* pp2 and pp4 should be the same */ | ||
298 | printk(PRINT_PREF "verifying pages read at %#llx match\n", | ||
299 | (long long)addr0); | ||
300 | if (memcmp(pp2, pp4, pgsize)) { | ||
301 | printk(PRINT_PREF "verify failed!\n"); | ||
302 | errcnt += 1; | ||
303 | } else if (!err) | ||
304 | printk(PRINT_PREF "crosstest ok\n"); | ||
305 | kfree(pp1); | ||
306 | return err; | ||
307 | } | ||
308 | |||
309 | static int erasecrosstest(void) | ||
310 | { | ||
311 | size_t read = 0, written = 0; | ||
312 | int err = 0, i, ebnum, ok = 1, ebnum2; | ||
313 | loff_t addr0; | ||
314 | char *readbuf = twopages; | ||
315 | |||
316 | printk(PRINT_PREF "erasecrosstest\n"); | ||
317 | |||
318 | ebnum = 0; | ||
319 | addr0 = 0; | ||
320 | for (i = 0; bbt[i] && i < ebcnt; ++i) { | ||
321 | addr0 += mtd->erasesize; | ||
322 | ebnum += 1; | ||
323 | } | ||
324 | |||
325 | ebnum2 = ebcnt - 1; | ||
326 | while (ebnum2 && bbt[ebnum2]) | ||
327 | ebnum2 -= 1; | ||
328 | |||
329 | printk(PRINT_PREF "erasing block %d\n", ebnum); | ||
330 | err = erase_eraseblock(ebnum); | ||
331 | if (err) | ||
332 | return err; | ||
333 | |||
334 | printk(PRINT_PREF "writing 1st page of block %d\n", ebnum); | ||
335 | set_random_data(writebuf, pgsize); | ||
336 | strcpy(writebuf, "There is no data like this!"); | ||
337 | err = mtd->write(mtd, addr0, pgsize, &written, writebuf); | ||
338 | if (err || written != pgsize) { | ||
339 | printk(PRINT_PREF "error: write failed at %#llx\n", | ||
340 | (long long)addr0); | ||
341 | return err ? err : -1; | ||
342 | } | ||
343 | |||
344 | printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); | ||
345 | memset(readbuf, 0, pgsize); | ||
346 | err = mtd->read(mtd, addr0, pgsize, &read, readbuf); | ||
347 | if (err == -EUCLEAN) | ||
348 | err = 0; | ||
349 | if (err || read != pgsize) { | ||
350 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
351 | (long long)addr0); | ||
352 | return err ? err : -1; | ||
353 | } | ||
354 | |||
355 | printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum); | ||
356 | if (memcmp(writebuf, readbuf, pgsize)) { | ||
357 | printk(PRINT_PREF "verify failed!\n"); | ||
358 | errcnt += 1; | ||
359 | ok = 0; | ||
360 | return err; | ||
361 | } | ||
362 | |||
363 | printk(PRINT_PREF "erasing block %d\n", ebnum); | ||
364 | err = erase_eraseblock(ebnum); | ||
365 | if (err) | ||
366 | return err; | ||
367 | |||
368 | printk(PRINT_PREF "writing 1st page of block %d\n", ebnum); | ||
369 | set_random_data(writebuf, pgsize); | ||
370 | strcpy(writebuf, "There is no data like this!"); | ||
371 | err = mtd->write(mtd, addr0, pgsize, &written, writebuf); | ||
372 | if (err || written != pgsize) { | ||
373 | printk(PRINT_PREF "error: write failed at %#llx\n", | ||
374 | (long long)addr0); | ||
375 | return err ? err : -1; | ||
376 | } | ||
377 | |||
378 | printk(PRINT_PREF "erasing block %d\n", ebnum2); | ||
379 | err = erase_eraseblock(ebnum2); | ||
380 | if (err) | ||
381 | return err; | ||
382 | |||
383 | printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); | ||
384 | memset(readbuf, 0, pgsize); | ||
385 | err = mtd->read(mtd, addr0, pgsize, &read, readbuf); | ||
386 | if (err == -EUCLEAN) | ||
387 | err = 0; | ||
388 | if (err || read != pgsize) { | ||
389 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
390 | (long long)addr0); | ||
391 | return err ? err : -1; | ||
392 | } | ||
393 | |||
394 | printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum); | ||
395 | if (memcmp(writebuf, readbuf, pgsize)) { | ||
396 | printk(PRINT_PREF "verify failed!\n"); | ||
397 | errcnt += 1; | ||
398 | ok = 0; | ||
399 | } | ||
400 | |||
401 | if (ok && !err) | ||
402 | printk(PRINT_PREF "erasecrosstest ok\n"); | ||
403 | return err; | ||
404 | } | ||
405 | |||
406 | static int erasetest(void) | ||
407 | { | ||
408 | size_t read = 0, written = 0; | ||
409 | int err = 0, i, ebnum, ok = 1; | ||
410 | loff_t addr0; | ||
411 | |||
412 | printk(PRINT_PREF "erasetest\n"); | ||
413 | |||
414 | ebnum = 0; | ||
415 | addr0 = 0; | ||
416 | for (i = 0; bbt[i] && i < ebcnt; ++i) { | ||
417 | addr0 += mtd->erasesize; | ||
418 | ebnum += 1; | ||
419 | } | ||
420 | |||
421 | printk(PRINT_PREF "erasing block %d\n", ebnum); | ||
422 | err = erase_eraseblock(ebnum); | ||
423 | if (err) | ||
424 | return err; | ||
425 | |||
426 | printk(PRINT_PREF "writing 1st page of block %d\n", ebnum); | ||
427 | set_random_data(writebuf, pgsize); | ||
428 | err = mtd->write(mtd, addr0, pgsize, &written, writebuf); | ||
429 | if (err || written != pgsize) { | ||
430 | printk(PRINT_PREF "error: write failed at %#llx\n", | ||
431 | (long long)addr0); | ||
432 | return err ? err : -1; | ||
433 | } | ||
434 | |||
435 | printk(PRINT_PREF "erasing block %d\n", ebnum); | ||
436 | err = erase_eraseblock(ebnum); | ||
437 | if (err) | ||
438 | return err; | ||
439 | |||
440 | printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); | ||
441 | err = mtd->read(mtd, addr0, pgsize, &read, twopages); | ||
442 | if (err == -EUCLEAN) | ||
443 | err = 0; | ||
444 | if (err || read != pgsize) { | ||
445 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
446 | (long long)addr0); | ||
447 | return err ? err : -1; | ||
448 | } | ||
449 | |||
450 | printk(PRINT_PREF "verifying 1st page of block %d is all 0xff\n", | ||
451 | ebnum); | ||
452 | for (i = 0; i < pgsize; ++i) | ||
453 | if (twopages[i] != 0xff) { | ||
454 | printk(PRINT_PREF "verifying all 0xff failed at %d\n", | ||
455 | i); | ||
456 | errcnt += 1; | ||
457 | ok = 0; | ||
458 | break; | ||
459 | } | ||
460 | |||
461 | if (ok && !err) | ||
462 | printk(PRINT_PREF "erasetest ok\n"); | ||
463 | |||
464 | return err; | ||
465 | } | ||
466 | |||
467 | static int is_block_bad(int ebnum) | ||
468 | { | ||
469 | loff_t addr = ebnum * mtd->erasesize; | ||
470 | int ret; | ||
471 | |||
472 | ret = mtd->block_isbad(mtd, addr); | ||
473 | if (ret) | ||
474 | printk(PRINT_PREF "block %d is bad\n", ebnum); | ||
475 | return ret; | ||
476 | } | ||
477 | |||
478 | static int scan_for_bad_eraseblocks(void) | ||
479 | { | ||
480 | int i, bad = 0; | ||
481 | |||
482 | bbt = kmalloc(ebcnt, GFP_KERNEL); | ||
483 | if (!bbt) { | ||
484 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
485 | return -ENOMEM; | ||
486 | } | ||
487 | memset(bbt, 0 , ebcnt); | ||
488 | |||
489 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); | ||
490 | for (i = 0; i < ebcnt; ++i) { | ||
491 | bbt[i] = is_block_bad(i) ? 1 : 0; | ||
492 | if (bbt[i]) | ||
493 | bad += 1; | ||
494 | cond_resched(); | ||
495 | } | ||
496 | printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); | ||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | static int __init mtd_pagetest_init(void) | ||
501 | { | ||
502 | int err = 0; | ||
503 | uint64_t tmp; | ||
504 | uint32_t i; | ||
505 | |||
506 | printk(KERN_INFO "\n"); | ||
507 | printk(KERN_INFO "=================================================\n"); | ||
508 | printk(PRINT_PREF "MTD device: %d\n", dev); | ||
509 | |||
510 | mtd = get_mtd_device(NULL, dev); | ||
511 | if (IS_ERR(mtd)) { | ||
512 | err = PTR_ERR(mtd); | ||
513 | printk(PRINT_PREF "error: cannot get MTD device\n"); | ||
514 | return err; | ||
515 | } | ||
516 | |||
517 | if (mtd->type != MTD_NANDFLASH) { | ||
518 | printk(PRINT_PREF "this test requires NAND flash\n"); | ||
519 | goto out; | ||
520 | } | ||
521 | |||
522 | tmp = mtd->size; | ||
523 | do_div(tmp, mtd->erasesize); | ||
524 | ebcnt = tmp; | ||
525 | pgcnt = mtd->erasesize / mtd->writesize; | ||
526 | |||
527 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " | ||
528 | "page size %u, count of eraseblocks %u, pages per " | ||
529 | "eraseblock %u, OOB size %u\n", | ||
530 | (unsigned long long)mtd->size, mtd->erasesize, | ||
531 | pgsize, ebcnt, pgcnt, mtd->oobsize); | ||
532 | |||
533 | err = -ENOMEM; | ||
534 | bufsize = pgsize * 2; | ||
535 | writebuf = kmalloc(mtd->erasesize, GFP_KERNEL); | ||
536 | if (!writebuf) { | ||
537 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
538 | goto out; | ||
539 | } | ||
540 | twopages = kmalloc(bufsize, GFP_KERNEL); | ||
541 | if (!twopages) { | ||
542 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
543 | goto out; | ||
544 | } | ||
545 | boundary = kmalloc(bufsize, GFP_KERNEL); | ||
546 | if (!boundary) { | ||
547 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
548 | goto out; | ||
549 | } | ||
550 | |||
551 | err = scan_for_bad_eraseblocks(); | ||
552 | if (err) | ||
553 | goto out; | ||
554 | |||
555 | /* Erase all eraseblocks */ | ||
556 | printk(PRINT_PREF "erasing whole device\n"); | ||
557 | for (i = 0; i < ebcnt; ++i) { | ||
558 | if (bbt[i]) | ||
559 | continue; | ||
560 | err = erase_eraseblock(i); | ||
561 | if (err) | ||
562 | goto out; | ||
563 | cond_resched(); | ||
564 | } | ||
565 | printk(PRINT_PREF "erased %u eraseblocks\n", i); | ||
566 | |||
567 | /* Write all eraseblocks */ | ||
568 | simple_srand(1); | ||
569 | printk(PRINT_PREF "writing whole device\n"); | ||
570 | for (i = 0; i < ebcnt; ++i) { | ||
571 | if (bbt[i]) | ||
572 | continue; | ||
573 | err = write_eraseblock(i); | ||
574 | if (err) | ||
575 | goto out; | ||
576 | if (i % 256 == 0) | ||
577 | printk(PRINT_PREF "written up to eraseblock %u\n", i); | ||
578 | cond_resched(); | ||
579 | } | ||
580 | printk(PRINT_PREF "written %u eraseblocks\n", i); | ||
581 | |||
582 | /* Check all eraseblocks */ | ||
583 | simple_srand(1); | ||
584 | printk(PRINT_PREF "verifying all eraseblocks\n"); | ||
585 | for (i = 0; i < ebcnt; ++i) { | ||
586 | if (bbt[i]) | ||
587 | continue; | ||
588 | err = verify_eraseblock(i); | ||
589 | if (err) | ||
590 | goto out; | ||
591 | if (i % 256 == 0) | ||
592 | printk(PRINT_PREF "verified up to eraseblock %u\n", i); | ||
593 | cond_resched(); | ||
594 | } | ||
595 | printk(PRINT_PREF "verified %u eraseblocks\n", i); | ||
596 | |||
597 | err = crosstest(); | ||
598 | if (err) | ||
599 | goto out; | ||
600 | |||
601 | err = erasecrosstest(); | ||
602 | if (err) | ||
603 | goto out; | ||
604 | |||
605 | err = erasetest(); | ||
606 | if (err) | ||
607 | goto out; | ||
608 | |||
609 | printk(PRINT_PREF "finished with %d errors\n", errcnt); | ||
610 | out: | ||
611 | |||
612 | kfree(bbt); | ||
613 | kfree(boundary); | ||
614 | kfree(twopages); | ||
615 | kfree(writebuf); | ||
616 | put_mtd_device(mtd); | ||
617 | if (err) | ||
618 | printk(PRINT_PREF "error %d occurred\n", err); | ||
619 | printk(KERN_INFO "=================================================\n"); | ||
620 | return err; | ||
621 | } | ||
622 | module_init(mtd_pagetest_init); | ||
623 | |||
624 | static void __exit mtd_pagetest_exit(void) | ||
625 | { | ||
626 | return; | ||
627 | } | ||
628 | module_exit(mtd_pagetest_exit); | ||
629 | |||
630 | MODULE_DESCRIPTION("NAND page test"); | ||
631 | MODULE_AUTHOR("Adrian Hunter"); | ||
632 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c new file mode 100644 index 000000000000..645e77fdc63d --- /dev/null +++ b/drivers/mtd/tests/mtd_readtest.c | |||
@@ -0,0 +1,253 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2008 Nokia Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License version 2 as published by | ||
6 | * the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along with | ||
14 | * this program; see the file COPYING. If not, write to the Free Software | ||
15 | * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
16 | * | ||
17 | * Check MTD device read. | ||
18 | * | ||
19 | * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> | ||
20 | */ | ||
21 | |||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/moduleparam.h> | ||
25 | #include <linux/err.h> | ||
26 | #include <linux/mtd/mtd.h> | ||
27 | #include <linux/sched.h> | ||
28 | |||
29 | #define PRINT_PREF KERN_INFO "mtd_readtest: " | ||
30 | |||
31 | static int dev; | ||
32 | module_param(dev, int, S_IRUGO); | ||
33 | MODULE_PARM_DESC(dev, "MTD device number to use"); | ||
34 | |||
35 | static struct mtd_info *mtd; | ||
36 | static unsigned char *iobuf; | ||
37 | static unsigned char *iobuf1; | ||
38 | static unsigned char *bbt; | ||
39 | |||
40 | static int pgsize; | ||
41 | static int ebcnt; | ||
42 | static int pgcnt; | ||
43 | |||
44 | static int read_eraseblock_by_page(int ebnum) | ||
45 | { | ||
46 | size_t read = 0; | ||
47 | int i, ret, err = 0; | ||
48 | loff_t addr = ebnum * mtd->erasesize; | ||
49 | void *buf = iobuf; | ||
50 | void *oobbuf = iobuf1; | ||
51 | |||
52 | for (i = 0; i < pgcnt; i++) { | ||
53 | memset(buf, 0 , pgcnt); | ||
54 | ret = mtd->read(mtd, addr, pgsize, &read, buf); | ||
55 | if (ret == -EUCLEAN) | ||
56 | ret = 0; | ||
57 | if (ret || read != pgsize) { | ||
58 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
59 | (long long)addr); | ||
60 | if (!err) | ||
61 | err = ret; | ||
62 | if (!err) | ||
63 | err = -EINVAL; | ||
64 | } | ||
65 | if (mtd->oobsize) { | ||
66 | struct mtd_oob_ops ops; | ||
67 | |||
68 | ops.mode = MTD_OOB_PLACE; | ||
69 | ops.len = 0; | ||
70 | ops.retlen = 0; | ||
71 | ops.ooblen = mtd->oobsize; | ||
72 | ops.oobretlen = 0; | ||
73 | ops.ooboffs = 0; | ||
74 | ops.datbuf = 0; | ||
75 | ops.oobbuf = oobbuf; | ||
76 | ret = mtd->read_oob(mtd, addr, &ops); | ||
77 | if (ret || ops.oobretlen != mtd->oobsize) { | ||
78 | printk(PRINT_PREF "error: read oob failed at " | ||
79 | "%#llx\n", (long long)addr); | ||
80 | if (!err) | ||
81 | err = ret; | ||
82 | if (!err) | ||
83 | err = -EINVAL; | ||
84 | } | ||
85 | oobbuf += mtd->oobsize; | ||
86 | } | ||
87 | addr += pgsize; | ||
88 | buf += pgsize; | ||
89 | } | ||
90 | |||
91 | return err; | ||
92 | } | ||
93 | |||
94 | static void dump_eraseblock(int ebnum) | ||
95 | { | ||
96 | int i, j, n; | ||
97 | char line[128]; | ||
98 | int pg, oob; | ||
99 | |||
100 | printk(PRINT_PREF "dumping eraseblock %d\n", ebnum); | ||
101 | n = mtd->erasesize; | ||
102 | for (i = 0; i < n;) { | ||
103 | char *p = line; | ||
104 | |||
105 | p += sprintf(p, "%05x: ", i); | ||
106 | for (j = 0; j < 32 && i < n; j++, i++) | ||
107 | p += sprintf(p, "%02x", (unsigned int)iobuf[i]); | ||
108 | printk(KERN_CRIT "%s\n", line); | ||
109 | cond_resched(); | ||
110 | } | ||
111 | if (!mtd->oobsize) | ||
112 | return; | ||
113 | printk(PRINT_PREF "dumping oob from eraseblock %d\n", ebnum); | ||
114 | n = mtd->oobsize; | ||
115 | for (pg = 0, i = 0; pg < pgcnt; pg++) | ||
116 | for (oob = 0; oob < n;) { | ||
117 | char *p = line; | ||
118 | |||
119 | p += sprintf(p, "%05x: ", i); | ||
120 | for (j = 0; j < 32 && oob < n; j++, oob++, i++) | ||
121 | p += sprintf(p, "%02x", | ||
122 | (unsigned int)iobuf1[i]); | ||
123 | printk(KERN_CRIT "%s\n", line); | ||
124 | cond_resched(); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | static int is_block_bad(int ebnum) | ||
129 | { | ||
130 | loff_t addr = ebnum * mtd->erasesize; | ||
131 | int ret; | ||
132 | |||
133 | ret = mtd->block_isbad(mtd, addr); | ||
134 | if (ret) | ||
135 | printk(PRINT_PREF "block %d is bad\n", ebnum); | ||
136 | return ret; | ||
137 | } | ||
138 | |||
139 | static int scan_for_bad_eraseblocks(void) | ||
140 | { | ||
141 | int i, bad = 0; | ||
142 | |||
143 | bbt = kmalloc(ebcnt, GFP_KERNEL); | ||
144 | if (!bbt) { | ||
145 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
146 | return -ENOMEM; | ||
147 | } | ||
148 | memset(bbt, 0 , ebcnt); | ||
149 | |||
150 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); | ||
151 | for (i = 0; i < ebcnt; ++i) { | ||
152 | bbt[i] = is_block_bad(i) ? 1 : 0; | ||
153 | if (bbt[i]) | ||
154 | bad += 1; | ||
155 | cond_resched(); | ||
156 | } | ||
157 | printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static int __init mtd_readtest_init(void) | ||
162 | { | ||
163 | uint64_t tmp; | ||
164 | int err, i; | ||
165 | |||
166 | printk(KERN_INFO "\n"); | ||
167 | printk(KERN_INFO "=================================================\n"); | ||
168 | printk(PRINT_PREF "MTD device: %d\n", dev); | ||
169 | |||
170 | mtd = get_mtd_device(NULL, dev); | ||
171 | if (IS_ERR(mtd)) { | ||
172 | err = PTR_ERR(mtd); | ||
173 | printk(PRINT_PREF "error: Cannot get MTD device\n"); | ||
174 | return err; | ||
175 | } | ||
176 | |||
177 | if (mtd->writesize == 1) { | ||
178 | printk(PRINT_PREF "not NAND flash, assume page size is 512 " | ||
179 | "bytes.\n"); | ||
180 | pgsize = 512; | ||
181 | } else | ||
182 | pgsize = mtd->writesize; | ||
183 | |||
184 | tmp = mtd->size; | ||
185 | do_div(tmp, mtd->erasesize); | ||
186 | ebcnt = tmp; | ||
187 | pgcnt = mtd->erasesize / mtd->writesize; | ||
188 | |||
189 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " | ||
190 | "page size %u, count of eraseblocks %u, pages per " | ||
191 | "eraseblock %u, OOB size %u\n", | ||
192 | (unsigned long long)mtd->size, mtd->erasesize, | ||
193 | pgsize, ebcnt, pgcnt, mtd->oobsize); | ||
194 | |||
195 | err = -ENOMEM; | ||
196 | iobuf = kmalloc(mtd->erasesize, GFP_KERNEL); | ||
197 | if (!iobuf) { | ||
198 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
199 | goto out; | ||
200 | } | ||
201 | iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL); | ||
202 | if (!iobuf1) { | ||
203 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
204 | goto out; | ||
205 | } | ||
206 | |||
207 | err = scan_for_bad_eraseblocks(); | ||
208 | if (err) | ||
209 | goto out; | ||
210 | |||
211 | /* Read all eraseblocks 1 page at a time */ | ||
212 | printk(PRINT_PREF "testing page read\n"); | ||
213 | for (i = 0; i < ebcnt; ++i) { | ||
214 | int ret; | ||
215 | |||
216 | if (bbt[i]) | ||
217 | continue; | ||
218 | ret = read_eraseblock_by_page(i); | ||
219 | if (ret) { | ||
220 | dump_eraseblock(i); | ||
221 | if (!err) | ||
222 | err = ret; | ||
223 | } | ||
224 | cond_resched(); | ||
225 | } | ||
226 | |||
227 | if (err) | ||
228 | printk(PRINT_PREF "finished with errors\n"); | ||
229 | else | ||
230 | printk(PRINT_PREF "finished\n"); | ||
231 | |||
232 | out: | ||
233 | |||
234 | kfree(iobuf); | ||
235 | kfree(iobuf1); | ||
236 | kfree(bbt); | ||
237 | put_mtd_device(mtd); | ||
238 | if (err) | ||
239 | printk(PRINT_PREF "error %d occurred\n", err); | ||
240 | printk(KERN_INFO "=================================================\n"); | ||
241 | return err; | ||
242 | } | ||
243 | module_init(mtd_readtest_init); | ||
244 | |||
245 | static void __exit mtd_readtest_exit(void) | ||
246 | { | ||
247 | return; | ||
248 | } | ||
249 | module_exit(mtd_readtest_exit); | ||
250 | |||
251 | MODULE_DESCRIPTION("Read test module"); | ||
252 | MODULE_AUTHOR("Adrian Hunter"); | ||
253 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c new file mode 100644 index 000000000000..141363a7e805 --- /dev/null +++ b/drivers/mtd/tests/mtd_speedtest.c | |||
@@ -0,0 +1,502 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Nokia Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License version 2 as published by | ||
6 | * the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along with | ||
14 | * this program; see the file COPYING. If not, write to the Free Software | ||
15 | * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
16 | * | ||
17 | * Test read and write speed of a MTD device. | ||
18 | * | ||
19 | * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> | ||
20 | */ | ||
21 | |||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/moduleparam.h> | ||
25 | #include <linux/err.h> | ||
26 | #include <linux/mtd/mtd.h> | ||
27 | #include <linux/sched.h> | ||
28 | |||
29 | #define PRINT_PREF KERN_INFO "mtd_speedtest: " | ||
30 | |||
31 | static int dev; | ||
32 | module_param(dev, int, S_IRUGO); | ||
33 | MODULE_PARM_DESC(dev, "MTD device number to use"); | ||
34 | |||
35 | static struct mtd_info *mtd; | ||
36 | static unsigned char *iobuf; | ||
37 | static unsigned char *bbt; | ||
38 | |||
39 | static int pgsize; | ||
40 | static int ebcnt; | ||
41 | static int pgcnt; | ||
42 | static int goodebcnt; | ||
43 | static struct timeval start, finish; | ||
44 | static unsigned long next = 1; | ||
45 | |||
46 | static inline unsigned int simple_rand(void) | ||
47 | { | ||
48 | next = next * 1103515245 + 12345; | ||
49 | return (unsigned int)((next / 65536) % 32768); | ||
50 | } | ||
51 | |||
52 | static inline void simple_srand(unsigned long seed) | ||
53 | { | ||
54 | next = seed; | ||
55 | } | ||
56 | |||
57 | static void set_random_data(unsigned char *buf, size_t len) | ||
58 | { | ||
59 | size_t i; | ||
60 | |||
61 | for (i = 0; i < len; ++i) | ||
62 | buf[i] = simple_rand(); | ||
63 | } | ||
64 | |||
65 | static int erase_eraseblock(int ebnum) | ||
66 | { | ||
67 | int err; | ||
68 | struct erase_info ei; | ||
69 | loff_t addr = ebnum * mtd->erasesize; | ||
70 | |||
71 | memset(&ei, 0, sizeof(struct erase_info)); | ||
72 | ei.mtd = mtd; | ||
73 | ei.addr = addr; | ||
74 | ei.len = mtd->erasesize; | ||
75 | |||
76 | err = mtd->erase(mtd, &ei); | ||
77 | if (err) { | ||
78 | printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); | ||
79 | return err; | ||
80 | } | ||
81 | |||
82 | if (ei.state == MTD_ERASE_FAILED) { | ||
83 | printk(PRINT_PREF "some erase error occurred at EB %d\n", | ||
84 | ebnum); | ||
85 | return -EIO; | ||
86 | } | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static int erase_whole_device(void) | ||
92 | { | ||
93 | int err; | ||
94 | unsigned int i; | ||
95 | |||
96 | for (i = 0; i < ebcnt; ++i) { | ||
97 | if (bbt[i]) | ||
98 | continue; | ||
99 | err = erase_eraseblock(i); | ||
100 | if (err) | ||
101 | return err; | ||
102 | cond_resched(); | ||
103 | } | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static int write_eraseblock(int ebnum) | ||
108 | { | ||
109 | size_t written = 0; | ||
110 | int err = 0; | ||
111 | loff_t addr = ebnum * mtd->erasesize; | ||
112 | |||
113 | err = mtd->write(mtd, addr, mtd->erasesize, &written, iobuf); | ||
114 | if (err || written != mtd->erasesize) { | ||
115 | printk(PRINT_PREF "error: write failed at %#llx\n", addr); | ||
116 | if (!err) | ||
117 | err = -EINVAL; | ||
118 | } | ||
119 | |||
120 | return err; | ||
121 | } | ||
122 | |||
123 | static int write_eraseblock_by_page(int ebnum) | ||
124 | { | ||
125 | size_t written = 0; | ||
126 | int i, err = 0; | ||
127 | loff_t addr = ebnum * mtd->erasesize; | ||
128 | void *buf = iobuf; | ||
129 | |||
130 | for (i = 0; i < pgcnt; i++) { | ||
131 | err = mtd->write(mtd, addr, pgsize, &written, buf); | ||
132 | if (err || written != pgsize) { | ||
133 | printk(PRINT_PREF "error: write failed at %#llx\n", | ||
134 | addr); | ||
135 | if (!err) | ||
136 | err = -EINVAL; | ||
137 | break; | ||
138 | } | ||
139 | addr += pgsize; | ||
140 | buf += pgsize; | ||
141 | } | ||
142 | |||
143 | return err; | ||
144 | } | ||
145 | |||
146 | static int write_eraseblock_by_2pages(int ebnum) | ||
147 | { | ||
148 | size_t written = 0, sz = pgsize * 2; | ||
149 | int i, n = pgcnt / 2, err = 0; | ||
150 | loff_t addr = ebnum * mtd->erasesize; | ||
151 | void *buf = iobuf; | ||
152 | |||
153 | for (i = 0; i < n; i++) { | ||
154 | err = mtd->write(mtd, addr, sz, &written, buf); | ||
155 | if (err || written != sz) { | ||
156 | printk(PRINT_PREF "error: write failed at %#llx\n", | ||
157 | addr); | ||
158 | if (!err) | ||
159 | err = -EINVAL; | ||
160 | return err; | ||
161 | } | ||
162 | addr += sz; | ||
163 | buf += sz; | ||
164 | } | ||
165 | if (pgcnt % 2) { | ||
166 | err = mtd->write(mtd, addr, pgsize, &written, buf); | ||
167 | if (err || written != pgsize) { | ||
168 | printk(PRINT_PREF "error: write failed at %#llx\n", | ||
169 | addr); | ||
170 | if (!err) | ||
171 | err = -EINVAL; | ||
172 | } | ||
173 | } | ||
174 | |||
175 | return err; | ||
176 | } | ||
177 | |||
178 | static int read_eraseblock(int ebnum) | ||
179 | { | ||
180 | size_t read = 0; | ||
181 | int err = 0; | ||
182 | loff_t addr = ebnum * mtd->erasesize; | ||
183 | |||
184 | err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf); | ||
185 | /* Ignore corrected ECC errors */ | ||
186 | if (err == -EUCLEAN) | ||
187 | err = 0; | ||
188 | if (err || read != mtd->erasesize) { | ||
189 | printk(PRINT_PREF "error: read failed at %#llx\n", addr); | ||
190 | if (!err) | ||
191 | err = -EINVAL; | ||
192 | } | ||
193 | |||
194 | return err; | ||
195 | } | ||
196 | |||
197 | static int read_eraseblock_by_page(int ebnum) | ||
198 | { | ||
199 | size_t read = 0; | ||
200 | int i, err = 0; | ||
201 | loff_t addr = ebnum * mtd->erasesize; | ||
202 | void *buf = iobuf; | ||
203 | |||
204 | for (i = 0; i < pgcnt; i++) { | ||
205 | err = mtd->read(mtd, addr, pgsize, &read, buf); | ||
206 | /* Ignore corrected ECC errors */ | ||
207 | if (err == -EUCLEAN) | ||
208 | err = 0; | ||
209 | if (err || read != pgsize) { | ||
210 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
211 | addr); | ||
212 | if (!err) | ||
213 | err = -EINVAL; | ||
214 | break; | ||
215 | } | ||
216 | addr += pgsize; | ||
217 | buf += pgsize; | ||
218 | } | ||
219 | |||
220 | return err; | ||
221 | } | ||
222 | |||
223 | static int read_eraseblock_by_2pages(int ebnum) | ||
224 | { | ||
225 | size_t read = 0, sz = pgsize * 2; | ||
226 | int i, n = pgcnt / 2, err = 0; | ||
227 | loff_t addr = ebnum * mtd->erasesize; | ||
228 | void *buf = iobuf; | ||
229 | |||
230 | for (i = 0; i < n; i++) { | ||
231 | err = mtd->read(mtd, addr, sz, &read, buf); | ||
232 | /* Ignore corrected ECC errors */ | ||
233 | if (err == -EUCLEAN) | ||
234 | err = 0; | ||
235 | if (err || read != sz) { | ||
236 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
237 | addr); | ||
238 | if (!err) | ||
239 | err = -EINVAL; | ||
240 | return err; | ||
241 | } | ||
242 | addr += sz; | ||
243 | buf += sz; | ||
244 | } | ||
245 | if (pgcnt % 2) { | ||
246 | err = mtd->read(mtd, addr, pgsize, &read, buf); | ||
247 | /* Ignore corrected ECC errors */ | ||
248 | if (err == -EUCLEAN) | ||
249 | err = 0; | ||
250 | if (err || read != pgsize) { | ||
251 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
252 | addr); | ||
253 | if (!err) | ||
254 | err = -EINVAL; | ||
255 | } | ||
256 | } | ||
257 | |||
258 | return err; | ||
259 | } | ||
260 | |||
261 | static int is_block_bad(int ebnum) | ||
262 | { | ||
263 | loff_t addr = ebnum * mtd->erasesize; | ||
264 | int ret; | ||
265 | |||
266 | ret = mtd->block_isbad(mtd, addr); | ||
267 | if (ret) | ||
268 | printk(PRINT_PREF "block %d is bad\n", ebnum); | ||
269 | return ret; | ||
270 | } | ||
271 | |||
272 | static inline void start_timing(void) | ||
273 | { | ||
274 | do_gettimeofday(&start); | ||
275 | } | ||
276 | |||
277 | static inline void stop_timing(void) | ||
278 | { | ||
279 | do_gettimeofday(&finish); | ||
280 | } | ||
281 | |||
282 | static long calc_speed(void) | ||
283 | { | ||
284 | long ms, k, speed; | ||
285 | |||
286 | ms = (finish.tv_sec - start.tv_sec) * 1000 + | ||
287 | (finish.tv_usec - start.tv_usec) / 1000; | ||
288 | k = goodebcnt * mtd->erasesize / 1024; | ||
289 | speed = (k * 1000) / ms; | ||
290 | return speed; | ||
291 | } | ||
292 | |||
293 | static int scan_for_bad_eraseblocks(void) | ||
294 | { | ||
295 | int i, bad = 0; | ||
296 | |||
297 | bbt = kmalloc(ebcnt, GFP_KERNEL); | ||
298 | if (!bbt) { | ||
299 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
300 | return -ENOMEM; | ||
301 | } | ||
302 | memset(bbt, 0 , ebcnt); | ||
303 | |||
304 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); | ||
305 | for (i = 0; i < ebcnt; ++i) { | ||
306 | bbt[i] = is_block_bad(i) ? 1 : 0; | ||
307 | if (bbt[i]) | ||
308 | bad += 1; | ||
309 | cond_resched(); | ||
310 | } | ||
311 | printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); | ||
312 | goodebcnt = ebcnt - bad; | ||
313 | return 0; | ||
314 | } | ||
315 | |||
316 | static int __init mtd_speedtest_init(void) | ||
317 | { | ||
318 | int err, i; | ||
319 | long speed; | ||
320 | uint64_t tmp; | ||
321 | |||
322 | printk(KERN_INFO "\n"); | ||
323 | printk(KERN_INFO "=================================================\n"); | ||
324 | printk(PRINT_PREF "MTD device: %d\n", dev); | ||
325 | |||
326 | mtd = get_mtd_device(NULL, dev); | ||
327 | if (IS_ERR(mtd)) { | ||
328 | err = PTR_ERR(mtd); | ||
329 | printk(PRINT_PREF "error: cannot get MTD device\n"); | ||
330 | return err; | ||
331 | } | ||
332 | |||
333 | if (mtd->writesize == 1) { | ||
334 | printk(PRINT_PREF "not NAND flash, assume page size is 512 " | ||
335 | "bytes.\n"); | ||
336 | pgsize = 512; | ||
337 | } else | ||
338 | pgsize = mtd->writesize; | ||
339 | |||
340 | tmp = mtd->size; | ||
341 | do_div(tmp, mtd->erasesize); | ||
342 | ebcnt = tmp; | ||
343 | pgcnt = mtd->erasesize / mtd->writesize; | ||
344 | |||
345 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " | ||
346 | "page size %u, count of eraseblocks %u, pages per " | ||
347 | "eraseblock %u, OOB size %u\n", | ||
348 | (unsigned long long)mtd->size, mtd->erasesize, | ||
349 | pgsize, ebcnt, pgcnt, mtd->oobsize); | ||
350 | |||
351 | err = -ENOMEM; | ||
352 | iobuf = kmalloc(mtd->erasesize, GFP_KERNEL); | ||
353 | if (!iobuf) { | ||
354 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
355 | goto out; | ||
356 | } | ||
357 | |||
358 | simple_srand(1); | ||
359 | set_random_data(iobuf, mtd->erasesize); | ||
360 | |||
361 | err = scan_for_bad_eraseblocks(); | ||
362 | if (err) | ||
363 | goto out; | ||
364 | |||
365 | err = erase_whole_device(); | ||
366 | if (err) | ||
367 | goto out; | ||
368 | |||
369 | /* Write all eraseblocks, 1 eraseblock at a time */ | ||
370 | printk(PRINT_PREF "testing eraseblock write speed\n"); | ||
371 | start_timing(); | ||
372 | for (i = 0; i < ebcnt; ++i) { | ||
373 | if (bbt[i]) | ||
374 | continue; | ||
375 | err = write_eraseblock(i); | ||
376 | if (err) | ||
377 | goto out; | ||
378 | cond_resched(); | ||
379 | } | ||
380 | stop_timing(); | ||
381 | speed = calc_speed(); | ||
382 | printk(PRINT_PREF "eraseblock write speed is %ld KiB/s\n", speed); | ||
383 | |||
384 | /* Read all eraseblocks, 1 eraseblock at a time */ | ||
385 | printk(PRINT_PREF "testing eraseblock read speed\n"); | ||
386 | start_timing(); | ||
387 | for (i = 0; i < ebcnt; ++i) { | ||
388 | if (bbt[i]) | ||
389 | continue; | ||
390 | err = read_eraseblock(i); | ||
391 | if (err) | ||
392 | goto out; | ||
393 | cond_resched(); | ||
394 | } | ||
395 | stop_timing(); | ||
396 | speed = calc_speed(); | ||
397 | printk(PRINT_PREF "eraseblock read speed is %ld KiB/s\n", speed); | ||
398 | |||
399 | err = erase_whole_device(); | ||
400 | if (err) | ||
401 | goto out; | ||
402 | |||
403 | /* Write all eraseblocks, 1 page at a time */ | ||
404 | printk(PRINT_PREF "testing page write speed\n"); | ||
405 | start_timing(); | ||
406 | for (i = 0; i < ebcnt; ++i) { | ||
407 | if (bbt[i]) | ||
408 | continue; | ||
409 | err = write_eraseblock_by_page(i); | ||
410 | if (err) | ||
411 | goto out; | ||
412 | cond_resched(); | ||
413 | } | ||
414 | stop_timing(); | ||
415 | speed = calc_speed(); | ||
416 | printk(PRINT_PREF "page write speed is %ld KiB/s\n", speed); | ||
417 | |||
418 | /* Read all eraseblocks, 1 page at a time */ | ||
419 | printk(PRINT_PREF "testing page read speed\n"); | ||
420 | start_timing(); | ||
421 | for (i = 0; i < ebcnt; ++i) { | ||
422 | if (bbt[i]) | ||
423 | continue; | ||
424 | err = read_eraseblock_by_page(i); | ||
425 | if (err) | ||
426 | goto out; | ||
427 | cond_resched(); | ||
428 | } | ||
429 | stop_timing(); | ||
430 | speed = calc_speed(); | ||
431 | printk(PRINT_PREF "page read speed is %ld KiB/s\n", speed); | ||
432 | |||
433 | err = erase_whole_device(); | ||
434 | if (err) | ||
435 | goto out; | ||
436 | |||
437 | /* Write all eraseblocks, 2 pages at a time */ | ||
438 | printk(PRINT_PREF "testing 2 page write speed\n"); | ||
439 | start_timing(); | ||
440 | for (i = 0; i < ebcnt; ++i) { | ||
441 | if (bbt[i]) | ||
442 | continue; | ||
443 | err = write_eraseblock_by_2pages(i); | ||
444 | if (err) | ||
445 | goto out; | ||
446 | cond_resched(); | ||
447 | } | ||
448 | stop_timing(); | ||
449 | speed = calc_speed(); | ||
450 | printk(PRINT_PREF "2 page write speed is %ld KiB/s\n", speed); | ||
451 | |||
452 | /* Read all eraseblocks, 2 pages at a time */ | ||
453 | printk(PRINT_PREF "testing 2 page read speed\n"); | ||
454 | start_timing(); | ||
455 | for (i = 0; i < ebcnt; ++i) { | ||
456 | if (bbt[i]) | ||
457 | continue; | ||
458 | err = read_eraseblock_by_2pages(i); | ||
459 | if (err) | ||
460 | goto out; | ||
461 | cond_resched(); | ||
462 | } | ||
463 | stop_timing(); | ||
464 | speed = calc_speed(); | ||
465 | printk(PRINT_PREF "2 page read speed is %ld KiB/s\n", speed); | ||
466 | |||
467 | /* Erase all eraseblocks */ | ||
468 | printk(PRINT_PREF "Testing erase speed\n"); | ||
469 | start_timing(); | ||
470 | for (i = 0; i < ebcnt; ++i) { | ||
471 | if (bbt[i]) | ||
472 | continue; | ||
473 | err = erase_eraseblock(i); | ||
474 | if (err) | ||
475 | goto out; | ||
476 | cond_resched(); | ||
477 | } | ||
478 | stop_timing(); | ||
479 | speed = calc_speed(); | ||
480 | printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed); | ||
481 | |||
482 | printk(PRINT_PREF "finished\n"); | ||
483 | out: | ||
484 | kfree(iobuf); | ||
485 | kfree(bbt); | ||
486 | put_mtd_device(mtd); | ||
487 | if (err) | ||
488 | printk(PRINT_PREF "error %d occurred\n", err); | ||
489 | printk(KERN_INFO "=================================================\n"); | ||
490 | return err; | ||
491 | } | ||
492 | module_init(mtd_speedtest_init); | ||
493 | |||
494 | static void __exit mtd_speedtest_exit(void) | ||
495 | { | ||
496 | return; | ||
497 | } | ||
498 | module_exit(mtd_speedtest_exit); | ||
499 | |||
500 | MODULE_DESCRIPTION("Speed test module"); | ||
501 | MODULE_AUTHOR("Adrian Hunter"); | ||
502 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c new file mode 100644 index 000000000000..63920476b57a --- /dev/null +++ b/drivers/mtd/tests/mtd_stresstest.c | |||
@@ -0,0 +1,330 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2008 Nokia Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License version 2 as published by | ||
6 | * the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along with | ||
14 | * this program; see the file COPYING. If not, write to the Free Software | ||
15 | * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
16 | * | ||
17 | * Test random reads, writes and erases on MTD device. | ||
18 | * | ||
19 | * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> | ||
20 | */ | ||
21 | |||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/moduleparam.h> | ||
25 | #include <linux/err.h> | ||
26 | #include <linux/mtd/mtd.h> | ||
27 | #include <linux/sched.h> | ||
28 | #include <linux/vmalloc.h> | ||
29 | |||
30 | #define PRINT_PREF KERN_INFO "mtd_stresstest: " | ||
31 | |||
32 | static int dev; | ||
33 | module_param(dev, int, S_IRUGO); | ||
34 | MODULE_PARM_DESC(dev, "MTD device number to use"); | ||
35 | |||
36 | static int count = 10000; | ||
37 | module_param(count, int, S_IRUGO); | ||
38 | MODULE_PARM_DESC(count, "Number of operations to do (default is 10000)"); | ||
39 | |||
40 | static struct mtd_info *mtd; | ||
41 | static unsigned char *writebuf; | ||
42 | static unsigned char *readbuf; | ||
43 | static unsigned char *bbt; | ||
44 | static int *offsets; | ||
45 | |||
46 | static int pgsize; | ||
47 | static int bufsize; | ||
48 | static int ebcnt; | ||
49 | static int pgcnt; | ||
50 | static unsigned long next = 1; | ||
51 | |||
52 | static inline unsigned int simple_rand(void) | ||
53 | { | ||
54 | next = next * 1103515245 + 12345; | ||
55 | return (unsigned int)((next / 65536) % 32768); | ||
56 | } | ||
57 | |||
58 | static inline void simple_srand(unsigned long seed) | ||
59 | { | ||
60 | next = seed; | ||
61 | } | ||
62 | |||
63 | static int rand_eb(void) | ||
64 | { | ||
65 | int eb; | ||
66 | |||
67 | again: | ||
68 | if (ebcnt < 32768) | ||
69 | eb = simple_rand(); | ||
70 | else | ||
71 | eb = (simple_rand() << 15) | simple_rand(); | ||
72 | /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */ | ||
73 | eb %= (ebcnt - 1); | ||
74 | if (bbt[eb]) | ||
75 | goto again; | ||
76 | return eb; | ||
77 | } | ||
78 | |||
79 | static int rand_offs(void) | ||
80 | { | ||
81 | int offs; | ||
82 | |||
83 | if (bufsize < 32768) | ||
84 | offs = simple_rand(); | ||
85 | else | ||
86 | offs = (simple_rand() << 15) | simple_rand(); | ||
87 | offs %= bufsize; | ||
88 | return offs; | ||
89 | } | ||
90 | |||
91 | static int rand_len(int offs) | ||
92 | { | ||
93 | int len; | ||
94 | |||
95 | if (bufsize < 32768) | ||
96 | len = simple_rand(); | ||
97 | else | ||
98 | len = (simple_rand() << 15) | simple_rand(); | ||
99 | len %= (bufsize - offs); | ||
100 | return len; | ||
101 | } | ||
102 | |||
103 | static int erase_eraseblock(int ebnum) | ||
104 | { | ||
105 | int err; | ||
106 | struct erase_info ei; | ||
107 | loff_t addr = ebnum * mtd->erasesize; | ||
108 | |||
109 | memset(&ei, 0, sizeof(struct erase_info)); | ||
110 | ei.mtd = mtd; | ||
111 | ei.addr = addr; | ||
112 | ei.len = mtd->erasesize; | ||
113 | |||
114 | err = mtd->erase(mtd, &ei); | ||
115 | if (unlikely(err)) { | ||
116 | printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); | ||
117 | return err; | ||
118 | } | ||
119 | |||
120 | if (unlikely(ei.state == MTD_ERASE_FAILED)) { | ||
121 | printk(PRINT_PREF "some erase error occurred at EB %d\n", | ||
122 | ebnum); | ||
123 | return -EIO; | ||
124 | } | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static int is_block_bad(int ebnum) | ||
130 | { | ||
131 | loff_t addr = ebnum * mtd->erasesize; | ||
132 | int ret; | ||
133 | |||
134 | ret = mtd->block_isbad(mtd, addr); | ||
135 | if (ret) | ||
136 | printk(PRINT_PREF "block %d is bad\n", ebnum); | ||
137 | return ret; | ||
138 | } | ||
139 | |||
140 | static int do_read(void) | ||
141 | { | ||
142 | size_t read = 0; | ||
143 | int eb = rand_eb(); | ||
144 | int offs = rand_offs(); | ||
145 | int len = rand_len(offs), err; | ||
146 | loff_t addr; | ||
147 | |||
148 | if (bbt[eb + 1]) { | ||
149 | if (offs >= mtd->erasesize) | ||
150 | offs -= mtd->erasesize; | ||
151 | if (offs + len > mtd->erasesize) | ||
152 | len = mtd->erasesize - offs; | ||
153 | } | ||
154 | addr = eb * mtd->erasesize + offs; | ||
155 | err = mtd->read(mtd, addr, len, &read, readbuf); | ||
156 | if (err == -EUCLEAN) | ||
157 | err = 0; | ||
158 | if (unlikely(err || read != len)) { | ||
159 | printk(PRINT_PREF "error: read failed at 0x%llx\n", | ||
160 | (long long)addr); | ||
161 | if (!err) | ||
162 | err = -EINVAL; | ||
163 | return err; | ||
164 | } | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | static int do_write(void) | ||
169 | { | ||
170 | int eb = rand_eb(), offs, err, len; | ||
171 | size_t written = 0; | ||
172 | loff_t addr; | ||
173 | |||
174 | offs = offsets[eb]; | ||
175 | if (offs >= mtd->erasesize) { | ||
176 | err = erase_eraseblock(eb); | ||
177 | if (err) | ||
178 | return err; | ||
179 | offs = offsets[eb] = 0; | ||
180 | } | ||
181 | len = rand_len(offs); | ||
182 | len = ((len + pgsize - 1) / pgsize) * pgsize; | ||
183 | if (offs + len > mtd->erasesize) { | ||
184 | if (bbt[eb + 1]) | ||
185 | len = mtd->erasesize - offs; | ||
186 | else { | ||
187 | err = erase_eraseblock(eb + 1); | ||
188 | if (err) | ||
189 | return err; | ||
190 | offsets[eb + 1] = 0; | ||
191 | } | ||
192 | } | ||
193 | addr = eb * mtd->erasesize + offs; | ||
194 | err = mtd->write(mtd, addr, len, &written, writebuf); | ||
195 | if (unlikely(err || written != len)) { | ||
196 | printk(PRINT_PREF "error: write failed at 0x%llx\n", | ||
197 | (long long)addr); | ||
198 | if (!err) | ||
199 | err = -EINVAL; | ||
200 | return err; | ||
201 | } | ||
202 | offs += len; | ||
203 | while (offs > mtd->erasesize) { | ||
204 | offsets[eb++] = mtd->erasesize; | ||
205 | offs -= mtd->erasesize; | ||
206 | } | ||
207 | offsets[eb] = offs; | ||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static int do_operation(void) | ||
212 | { | ||
213 | if (simple_rand() & 1) | ||
214 | return do_read(); | ||
215 | else | ||
216 | return do_write(); | ||
217 | } | ||
218 | |||
219 | static int scan_for_bad_eraseblocks(void) | ||
220 | { | ||
221 | int i, bad = 0; | ||
222 | |||
223 | bbt = kmalloc(ebcnt, GFP_KERNEL); | ||
224 | if (!bbt) { | ||
225 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
226 | return -ENOMEM; | ||
227 | } | ||
228 | memset(bbt, 0 , ebcnt); | ||
229 | |||
230 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); | ||
231 | for (i = 0; i < ebcnt; ++i) { | ||
232 | bbt[i] = is_block_bad(i) ? 1 : 0; | ||
233 | if (bbt[i]) | ||
234 | bad += 1; | ||
235 | cond_resched(); | ||
236 | } | ||
237 | printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); | ||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | static int __init mtd_stresstest_init(void) | ||
242 | { | ||
243 | int err; | ||
244 | int i, op; | ||
245 | uint64_t tmp; | ||
246 | |||
247 | printk(KERN_INFO "\n"); | ||
248 | printk(KERN_INFO "=================================================\n"); | ||
249 | printk(PRINT_PREF "MTD device: %d\n", dev); | ||
250 | |||
251 | mtd = get_mtd_device(NULL, dev); | ||
252 | if (IS_ERR(mtd)) { | ||
253 | err = PTR_ERR(mtd); | ||
254 | printk(PRINT_PREF "error: cannot get MTD device\n"); | ||
255 | return err; | ||
256 | } | ||
257 | |||
258 | if (mtd->writesize == 1) { | ||
259 | printk(PRINT_PREF "not NAND flash, assume page size is 512 " | ||
260 | "bytes.\n"); | ||
261 | pgsize = 512; | ||
262 | } else | ||
263 | pgsize = mtd->writesize; | ||
264 | |||
265 | tmp = mtd->size; | ||
266 | do_div(tmp, mtd->erasesize); | ||
267 | ebcnt = tmp; | ||
268 | pgcnt = mtd->erasesize / mtd->writesize; | ||
269 | |||
270 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " | ||
271 | "page size %u, count of eraseblocks %u, pages per " | ||
272 | "eraseblock %u, OOB size %u\n", | ||
273 | (unsigned long long)mtd->size, mtd->erasesize, | ||
274 | pgsize, ebcnt, pgcnt, mtd->oobsize); | ||
275 | |||
276 | /* Read or write up 2 eraseblocks at a time */ | ||
277 | bufsize = mtd->erasesize * 2; | ||
278 | |||
279 | err = -ENOMEM; | ||
280 | readbuf = vmalloc(bufsize); | ||
281 | writebuf = vmalloc(bufsize); | ||
282 | offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL); | ||
283 | if (!readbuf || !writebuf || !offsets) { | ||
284 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
285 | goto out; | ||
286 | } | ||
287 | for (i = 0; i < ebcnt; i++) | ||
288 | offsets[i] = mtd->erasesize; | ||
289 | simple_srand(current->pid); | ||
290 | for (i = 0; i < bufsize; i++) | ||
291 | writebuf[i] = simple_rand(); | ||
292 | |||
293 | err = scan_for_bad_eraseblocks(); | ||
294 | if (err) | ||
295 | goto out; | ||
296 | |||
297 | /* Do operations */ | ||
298 | printk(PRINT_PREF "doing operations\n"); | ||
299 | for (op = 0; op < count; op++) { | ||
300 | if ((op & 1023) == 0) | ||
301 | printk(PRINT_PREF "%d operations done\n", op); | ||
302 | err = do_operation(); | ||
303 | if (err) | ||
304 | goto out; | ||
305 | cond_resched(); | ||
306 | } | ||
307 | printk(PRINT_PREF "finished, %d operations done\n", op); | ||
308 | |||
309 | out: | ||
310 | kfree(offsets); | ||
311 | kfree(bbt); | ||
312 | vfree(writebuf); | ||
313 | vfree(readbuf); | ||
314 | put_mtd_device(mtd); | ||
315 | if (err) | ||
316 | printk(PRINT_PREF "error %d occurred\n", err); | ||
317 | printk(KERN_INFO "=================================================\n"); | ||
318 | return err; | ||
319 | } | ||
320 | module_init(mtd_stresstest_init); | ||
321 | |||
322 | static void __exit mtd_stresstest_exit(void) | ||
323 | { | ||
324 | return; | ||
325 | } | ||
326 | module_exit(mtd_stresstest_exit); | ||
327 | |||
328 | MODULE_DESCRIPTION("Stress test module"); | ||
329 | MODULE_AUTHOR("Adrian Hunter"); | ||
330 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c new file mode 100644 index 000000000000..5b889724268e --- /dev/null +++ b/drivers/mtd/tests/mtd_subpagetest.c | |||
@@ -0,0 +1,525 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2007 Nokia Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License version 2 as published by | ||
6 | * the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along with | ||
14 | * this program; see the file COPYING. If not, write to the Free Software | ||
15 | * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
16 | * | ||
17 | * Test sub-page read and write on MTD device. | ||
18 | * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/moduleparam.h> | ||
25 | #include <linux/err.h> | ||
26 | #include <linux/mtd/mtd.h> | ||
27 | #include <linux/sched.h> | ||
28 | |||
29 | #define PRINT_PREF KERN_INFO "mtd_subpagetest: " | ||
30 | |||
31 | static int dev; | ||
32 | module_param(dev, int, S_IRUGO); | ||
33 | MODULE_PARM_DESC(dev, "MTD device number to use"); | ||
34 | |||
35 | static struct mtd_info *mtd; | ||
36 | static unsigned char *writebuf; | ||
37 | static unsigned char *readbuf; | ||
38 | static unsigned char *bbt; | ||
39 | |||
40 | static int subpgsize; | ||
41 | static int bufsize; | ||
42 | static int ebcnt; | ||
43 | static int pgcnt; | ||
44 | static int errcnt; | ||
45 | static unsigned long next = 1; | ||
46 | |||
47 | static inline unsigned int simple_rand(void) | ||
48 | { | ||
49 | next = next * 1103515245 + 12345; | ||
50 | return (unsigned int)((next / 65536) % 32768); | ||
51 | } | ||
52 | |||
53 | static inline void simple_srand(unsigned long seed) | ||
54 | { | ||
55 | next = seed; | ||
56 | } | ||
57 | |||
58 | static void set_random_data(unsigned char *buf, size_t len) | ||
59 | { | ||
60 | size_t i; | ||
61 | |||
62 | for (i = 0; i < len; ++i) | ||
63 | buf[i] = simple_rand(); | ||
64 | } | ||
65 | |||
66 | static inline void clear_data(unsigned char *buf, size_t len) | ||
67 | { | ||
68 | memset(buf, 0, len); | ||
69 | } | ||
70 | |||
71 | static int erase_eraseblock(int ebnum) | ||
72 | { | ||
73 | int err; | ||
74 | struct erase_info ei; | ||
75 | loff_t addr = ebnum * mtd->erasesize; | ||
76 | |||
77 | memset(&ei, 0, sizeof(struct erase_info)); | ||
78 | ei.mtd = mtd; | ||
79 | ei.addr = addr; | ||
80 | ei.len = mtd->erasesize; | ||
81 | |||
82 | err = mtd->erase(mtd, &ei); | ||
83 | if (err) { | ||
84 | printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); | ||
85 | return err; | ||
86 | } | ||
87 | |||
88 | if (ei.state == MTD_ERASE_FAILED) { | ||
89 | printk(PRINT_PREF "some erase error occurred at EB %d\n", | ||
90 | ebnum); | ||
91 | return -EIO; | ||
92 | } | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static int erase_whole_device(void) | ||
98 | { | ||
99 | int err; | ||
100 | unsigned int i; | ||
101 | |||
102 | printk(PRINT_PREF "erasing whole device\n"); | ||
103 | for (i = 0; i < ebcnt; ++i) { | ||
104 | if (bbt[i]) | ||
105 | continue; | ||
106 | err = erase_eraseblock(i); | ||
107 | if (err) | ||
108 | return err; | ||
109 | cond_resched(); | ||
110 | } | ||
111 | printk(PRINT_PREF "erased %u eraseblocks\n", i); | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static int write_eraseblock(int ebnum) | ||
116 | { | ||
117 | size_t written = 0; | ||
118 | int err = 0; | ||
119 | loff_t addr = ebnum * mtd->erasesize; | ||
120 | |||
121 | set_random_data(writebuf, subpgsize); | ||
122 | err = mtd->write(mtd, addr, subpgsize, &written, writebuf); | ||
123 | if (unlikely(err || written != subpgsize)) { | ||
124 | printk(PRINT_PREF "error: write failed at %#llx\n", | ||
125 | (long long)addr); | ||
126 | if (written != subpgsize) { | ||
127 | printk(PRINT_PREF " write size: %#x\n", subpgsize); | ||
128 | printk(PRINT_PREF " written: %#zx\n", written); | ||
129 | } | ||
130 | return err ? err : -1; | ||
131 | } | ||
132 | |||
133 | addr += subpgsize; | ||
134 | |||
135 | set_random_data(writebuf, subpgsize); | ||
136 | err = mtd->write(mtd, addr, subpgsize, &written, writebuf); | ||
137 | if (unlikely(err || written != subpgsize)) { | ||
138 | printk(PRINT_PREF "error: write failed at %#llx\n", | ||
139 | (long long)addr); | ||
140 | if (written != subpgsize) { | ||
141 | printk(PRINT_PREF " write size: %#x\n", subpgsize); | ||
142 | printk(PRINT_PREF " written: %#zx\n", written); | ||
143 | } | ||
144 | return err ? err : -1; | ||
145 | } | ||
146 | |||
147 | return err; | ||
148 | } | ||
149 | |||
150 | static int write_eraseblock2(int ebnum) | ||
151 | { | ||
152 | size_t written = 0; | ||
153 | int err = 0, k; | ||
154 | loff_t addr = ebnum * mtd->erasesize; | ||
155 | |||
156 | for (k = 1; k < 33; ++k) { | ||
157 | if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize) | ||
158 | break; | ||
159 | set_random_data(writebuf, subpgsize * k); | ||
160 | err = mtd->write(mtd, addr, subpgsize * k, &written, writebuf); | ||
161 | if (unlikely(err || written != subpgsize * k)) { | ||
162 | printk(PRINT_PREF "error: write failed at %#llx\n", | ||
163 | (long long)addr); | ||
164 | if (written != subpgsize) { | ||
165 | printk(PRINT_PREF " write size: %#x\n", | ||
166 | subpgsize * k); | ||
167 | printk(PRINT_PREF " written: %#08zx\n", | ||
168 | written); | ||
169 | } | ||
170 | return err ? err : -1; | ||
171 | } | ||
172 | addr += subpgsize * k; | ||
173 | } | ||
174 | |||
175 | return err; | ||
176 | } | ||
177 | |||
178 | static void print_subpage(unsigned char *p) | ||
179 | { | ||
180 | int i, j; | ||
181 | |||
182 | for (i = 0; i < subpgsize; ) { | ||
183 | for (j = 0; i < subpgsize && j < 32; ++i, ++j) | ||
184 | printk("%02x", *p++); | ||
185 | printk("\n"); | ||
186 | } | ||
187 | } | ||
188 | |||
189 | static int verify_eraseblock(int ebnum) | ||
190 | { | ||
191 | size_t read = 0; | ||
192 | int err = 0; | ||
193 | loff_t addr = ebnum * mtd->erasesize; | ||
194 | |||
195 | set_random_data(writebuf, subpgsize); | ||
196 | clear_data(readbuf, subpgsize); | ||
197 | read = 0; | ||
198 | err = mtd->read(mtd, addr, subpgsize, &read, readbuf); | ||
199 | if (unlikely(err || read != subpgsize)) { | ||
200 | if (err == -EUCLEAN && read == subpgsize) { | ||
201 | printk(PRINT_PREF "ECC correction at %#llx\n", | ||
202 | (long long)addr); | ||
203 | err = 0; | ||
204 | } else { | ||
205 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
206 | (long long)addr); | ||
207 | return err ? err : -1; | ||
208 | } | ||
209 | } | ||
210 | if (unlikely(memcmp(readbuf, writebuf, subpgsize))) { | ||
211 | printk(PRINT_PREF "error: verify failed at %#llx\n", | ||
212 | (long long)addr); | ||
213 | printk(PRINT_PREF "------------- written----------------\n"); | ||
214 | print_subpage(writebuf); | ||
215 | printk(PRINT_PREF "------------- read ------------------\n"); | ||
216 | print_subpage(readbuf); | ||
217 | printk(PRINT_PREF "-------------------------------------\n"); | ||
218 | errcnt += 1; | ||
219 | } | ||
220 | |||
221 | addr += subpgsize; | ||
222 | |||
223 | set_random_data(writebuf, subpgsize); | ||
224 | clear_data(readbuf, subpgsize); | ||
225 | read = 0; | ||
226 | err = mtd->read(mtd, addr, subpgsize, &read, readbuf); | ||
227 | if (unlikely(err || read != subpgsize)) { | ||
228 | if (err == -EUCLEAN && read == subpgsize) { | ||
229 | printk(PRINT_PREF "ECC correction at %#llx\n", | ||
230 | (long long)addr); | ||
231 | err = 0; | ||
232 | } else { | ||
233 | printk(PRINT_PREF "error: read failed at %#llx\n", | ||
234 | (long long)addr); | ||
235 | return err ? err : -1; | ||
236 | } | ||
237 | } | ||
238 | if (unlikely(memcmp(readbuf, writebuf, subpgsize))) { | ||
239 | printk(PRINT_PREF "error: verify failed at %#llx\n", | ||
240 | (long long)addr); | ||
241 | printk(PRINT_PREF "------------- written----------------\n"); | ||
242 | print_subpage(writebuf); | ||
243 | printk(PRINT_PREF "------------- read ------------------\n"); | ||
244 | print_subpage(readbuf); | ||
245 | printk(PRINT_PREF "-------------------------------------\n"); | ||
246 | errcnt += 1; | ||
247 | } | ||
248 | |||
249 | return err; | ||
250 | } | ||
251 | |||
252 | static int verify_eraseblock2(int ebnum) | ||
253 | { | ||
254 | size_t read = 0; | ||
255 | int err = 0, k; | ||
256 | loff_t addr = ebnum * mtd->erasesize; | ||
257 | |||
258 | for (k = 1; k < 33; ++k) { | ||
259 | if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize) | ||
260 | break; | ||
261 | set_random_data(writebuf, subpgsize * k); | ||
262 | clear_data(readbuf, subpgsize * k); | ||
263 | read = 0; | ||
264 | err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf); | ||
265 | if (unlikely(err || read != subpgsize * k)) { | ||
266 | if (err == -EUCLEAN && read == subpgsize * k) { | ||
267 | printk(PRINT_PREF "ECC correction at %#llx\n", | ||
268 | (long long)addr); | ||
269 | err = 0; | ||
270 | } else { | ||
271 | printk(PRINT_PREF "error: read failed at " | ||
272 | "%#llx\n", (long long)addr); | ||
273 | return err ? err : -1; | ||
274 | } | ||
275 | } | ||
276 | if (unlikely(memcmp(readbuf, writebuf, subpgsize * k))) { | ||
277 | printk(PRINT_PREF "error: verify failed at %#llx\n", | ||
278 | (long long)addr); | ||
279 | errcnt += 1; | ||
280 | } | ||
281 | addr += subpgsize * k; | ||
282 | } | ||
283 | |||
284 | return err; | ||
285 | } | ||
286 | |||
287 | static int verify_eraseblock_ff(int ebnum) | ||
288 | { | ||
289 | uint32_t j; | ||
290 | size_t read = 0; | ||
291 | int err = 0; | ||
292 | loff_t addr = ebnum * mtd->erasesize; | ||
293 | |||
294 | memset(writebuf, 0xff, subpgsize); | ||
295 | for (j = 0; j < mtd->erasesize / subpgsize; ++j) { | ||
296 | clear_data(readbuf, subpgsize); | ||
297 | read = 0; | ||
298 | err = mtd->read(mtd, addr, subpgsize, &read, readbuf); | ||
299 | if (unlikely(err || read != subpgsize)) { | ||
300 | if (err == -EUCLEAN && read == subpgsize) { | ||
301 | printk(PRINT_PREF "ECC correction at %#llx\n", | ||
302 | (long long)addr); | ||
303 | err = 0; | ||
304 | } else { | ||
305 | printk(PRINT_PREF "error: read failed at " | ||
306 | "%#llx\n", (long long)addr); | ||
307 | return err ? err : -1; | ||
308 | } | ||
309 | } | ||
310 | if (unlikely(memcmp(readbuf, writebuf, subpgsize))) { | ||
311 | printk(PRINT_PREF "error: verify 0xff failed at " | ||
312 | "%#llx\n", (long long)addr); | ||
313 | errcnt += 1; | ||
314 | } | ||
315 | addr += subpgsize; | ||
316 | } | ||
317 | |||
318 | return err; | ||
319 | } | ||
320 | |||
321 | static int verify_all_eraseblocks_ff(void) | ||
322 | { | ||
323 | int err; | ||
324 | unsigned int i; | ||
325 | |||
326 | printk(PRINT_PREF "verifying all eraseblocks for 0xff\n"); | ||
327 | for (i = 0; i < ebcnt; ++i) { | ||
328 | if (bbt[i]) | ||
329 | continue; | ||
330 | err = verify_eraseblock_ff(i); | ||
331 | if (err) | ||
332 | return err; | ||
333 | if (i % 256 == 0) | ||
334 | printk(PRINT_PREF "verified up to eraseblock %u\n", i); | ||
335 | cond_resched(); | ||
336 | } | ||
337 | printk(PRINT_PREF "verified %u eraseblocks\n", i); | ||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | static int is_block_bad(int ebnum) | ||
342 | { | ||
343 | loff_t addr = ebnum * mtd->erasesize; | ||
344 | int ret; | ||
345 | |||
346 | ret = mtd->block_isbad(mtd, addr); | ||
347 | if (ret) | ||
348 | printk(PRINT_PREF "block %d is bad\n", ebnum); | ||
349 | return ret; | ||
350 | } | ||
351 | |||
352 | static int scan_for_bad_eraseblocks(void) | ||
353 | { | ||
354 | int i, bad = 0; | ||
355 | |||
356 | bbt = kmalloc(ebcnt, GFP_KERNEL); | ||
357 | if (!bbt) { | ||
358 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
359 | return -ENOMEM; | ||
360 | } | ||
361 | memset(bbt, 0 , ebcnt); | ||
362 | |||
363 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); | ||
364 | for (i = 0; i < ebcnt; ++i) { | ||
365 | bbt[i] = is_block_bad(i) ? 1 : 0; | ||
366 | if (bbt[i]) | ||
367 | bad += 1; | ||
368 | cond_resched(); | ||
369 | } | ||
370 | printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); | ||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | static int __init mtd_subpagetest_init(void) | ||
375 | { | ||
376 | int err = 0; | ||
377 | uint32_t i; | ||
378 | uint64_t tmp; | ||
379 | |||
380 | printk(KERN_INFO "\n"); | ||
381 | printk(KERN_INFO "=================================================\n"); | ||
382 | printk(PRINT_PREF "MTD device: %d\n", dev); | ||
383 | |||
384 | mtd = get_mtd_device(NULL, dev); | ||
385 | if (IS_ERR(mtd)) { | ||
386 | err = PTR_ERR(mtd); | ||
387 | printk(PRINT_PREF "error: cannot get MTD device\n"); | ||
388 | return err; | ||
389 | } | ||
390 | |||
391 | if (mtd->type != MTD_NANDFLASH) { | ||
392 | printk(PRINT_PREF "this test requires NAND flash\n"); | ||
393 | goto out; | ||
394 | } | ||
395 | |||
396 | subpgsize = mtd->writesize >> mtd->subpage_sft; | ||
397 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " | ||
398 | "page size %u, subpage size %u, count of eraseblocks %u, " | ||
399 | "pages per eraseblock %u, OOB size %u\n", | ||
400 | (unsigned long long)mtd->size, mtd->erasesize, | ||
401 | mtd->writesize, subpgsize, ebcnt, pgcnt, mtd->oobsize); | ||
402 | |||
403 | err = -ENOMEM; | ||
404 | bufsize = subpgsize * 32; | ||
405 | writebuf = kmalloc(bufsize, GFP_KERNEL); | ||
406 | if (!writebuf) { | ||
407 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
408 | goto out; | ||
409 | } | ||
410 | readbuf = kmalloc(bufsize, GFP_KERNEL); | ||
411 | if (!readbuf) { | ||
412 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
413 | goto out; | ||
414 | } | ||
415 | |||
416 | tmp = mtd->size; | ||
417 | do_div(tmp, mtd->erasesize); | ||
418 | ebcnt = tmp; | ||
419 | pgcnt = mtd->erasesize / mtd->writesize; | ||
420 | |||
421 | err = scan_for_bad_eraseblocks(); | ||
422 | if (err) | ||
423 | goto out; | ||
424 | |||
425 | err = erase_whole_device(); | ||
426 | if (err) | ||
427 | goto out; | ||
428 | |||
429 | printk(PRINT_PREF "writing whole device\n"); | ||
430 | simple_srand(1); | ||
431 | for (i = 0; i < ebcnt; ++i) { | ||
432 | if (bbt[i]) | ||
433 | continue; | ||
434 | err = write_eraseblock(i); | ||
435 | if (unlikely(err)) | ||
436 | goto out; | ||
437 | if (i % 256 == 0) | ||
438 | printk(PRINT_PREF "written up to eraseblock %u\n", i); | ||
439 | cond_resched(); | ||
440 | } | ||
441 | printk(PRINT_PREF "written %u eraseblocks\n", i); | ||
442 | |||
443 | simple_srand(1); | ||
444 | printk(PRINT_PREF "verifying all eraseblocks\n"); | ||
445 | for (i = 0; i < ebcnt; ++i) { | ||
446 | if (bbt[i]) | ||
447 | continue; | ||
448 | err = verify_eraseblock(i); | ||
449 | if (unlikely(err)) | ||
450 | goto out; | ||
451 | if (i % 256 == 0) | ||
452 | printk(PRINT_PREF "verified up to eraseblock %u\n", i); | ||
453 | cond_resched(); | ||
454 | } | ||
455 | printk(PRINT_PREF "verified %u eraseblocks\n", i); | ||
456 | |||
457 | err = erase_whole_device(); | ||
458 | if (err) | ||
459 | goto out; | ||
460 | |||
461 | err = verify_all_eraseblocks_ff(); | ||
462 | if (err) | ||
463 | goto out; | ||
464 | |||
465 | /* Write all eraseblocks */ | ||
466 | simple_srand(3); | ||
467 | printk(PRINT_PREF "writing whole device\n"); | ||
468 | for (i = 0; i < ebcnt; ++i) { | ||
469 | if (bbt[i]) | ||
470 | continue; | ||
471 | err = write_eraseblock2(i); | ||
472 | if (unlikely(err)) | ||
473 | goto out; | ||
474 | if (i % 256 == 0) | ||
475 | printk(PRINT_PREF "written up to eraseblock %u\n", i); | ||
476 | cond_resched(); | ||
477 | } | ||
478 | printk(PRINT_PREF "written %u eraseblocks\n", i); | ||
479 | |||
480 | /* Check all eraseblocks */ | ||
481 | simple_srand(3); | ||
482 | printk(PRINT_PREF "verifying all eraseblocks\n"); | ||
483 | for (i = 0; i < ebcnt; ++i) { | ||
484 | if (bbt[i]) | ||
485 | continue; | ||
486 | err = verify_eraseblock2(i); | ||
487 | if (unlikely(err)) | ||
488 | goto out; | ||
489 | if (i % 256 == 0) | ||
490 | printk(PRINT_PREF "verified up to eraseblock %u\n", i); | ||
491 | cond_resched(); | ||
492 | } | ||
493 | printk(PRINT_PREF "verified %u eraseblocks\n", i); | ||
494 | |||
495 | err = erase_whole_device(); | ||
496 | if (err) | ||
497 | goto out; | ||
498 | |||
499 | err = verify_all_eraseblocks_ff(); | ||
500 | if (err) | ||
501 | goto out; | ||
502 | |||
503 | printk(PRINT_PREF "finished with %d errors\n", errcnt); | ||
504 | |||
505 | out: | ||
506 | kfree(bbt); | ||
507 | kfree(readbuf); | ||
508 | kfree(writebuf); | ||
509 | put_mtd_device(mtd); | ||
510 | if (err) | ||
511 | printk(PRINT_PREF "error %d occurred\n", err); | ||
512 | printk(KERN_INFO "=================================================\n"); | ||
513 | return err; | ||
514 | } | ||
515 | module_init(mtd_subpagetest_init); | ||
516 | |||
517 | static void __exit mtd_subpagetest_exit(void) | ||
518 | { | ||
519 | return; | ||
520 | } | ||
521 | module_exit(mtd_subpagetest_exit); | ||
522 | |||
523 | MODULE_DESCRIPTION("Subpage test module"); | ||
524 | MODULE_AUTHOR("Adrian Hunter"); | ||
525 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c new file mode 100644 index 000000000000..631a0ab3a33c --- /dev/null +++ b/drivers/mtd/tests/mtd_torturetest.c | |||
@@ -0,0 +1,530 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2008 Artem Bityutskiy | ||
3 | * Copyright (C) 2006-2008 Jarkko Lavinen | ||
4 | * Copyright (C) 2006-2008 Adrian Hunter | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published by | ||
8 | * the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program; see the file COPYING. If not, write to the Free Software | ||
17 | * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
18 | * | ||
19 | * Authors: Artem Bityutskiy, Jarkko Lavinen, Adria Hunter | ||
20 | * | ||
21 | * WARNING: this test program may kill your flash and your device. Do not | ||
22 | * use it unless you know what you do. Authors are not responsible for any | ||
23 | * damage caused by this program. | ||
24 | */ | ||
25 | |||
26 | #include <linux/init.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/moduleparam.h> | ||
29 | #include <linux/err.h> | ||
30 | #include <linux/mtd/mtd.h> | ||
31 | #include <linux/sched.h> | ||
32 | |||
33 | #define PRINT_PREF KERN_INFO "mtd_torturetest: " | ||
34 | #define RETRIES 3 | ||
35 | |||
36 | static int eb = 8; | ||
37 | module_param(eb, int, S_IRUGO); | ||
38 | MODULE_PARM_DESC(eb, "eraseblock number within the selected MTD device"); | ||
39 | |||
40 | static int ebcnt = 32; | ||
41 | module_param(ebcnt, int, S_IRUGO); | ||
42 | MODULE_PARM_DESC(ebcnt, "number of consecutive eraseblocks to torture"); | ||
43 | |||
44 | static int pgcnt; | ||
45 | module_param(pgcnt, int, S_IRUGO); | ||
46 | MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)"); | ||
47 | |||
48 | static int dev; | ||
49 | module_param(dev, int, S_IRUGO); | ||
50 | MODULE_PARM_DESC(dev, "MTD device number to use"); | ||
51 | |||
52 | static int gran = 512; | ||
53 | module_param(gran, int, S_IRUGO); | ||
54 | MODULE_PARM_DESC(gran, "how often the status information should be printed"); | ||
55 | |||
56 | static int check = 1; | ||
57 | module_param(check, int, S_IRUGO); | ||
58 | MODULE_PARM_DESC(check, "if the written data should be checked"); | ||
59 | |||
60 | static unsigned int cycles_count; | ||
61 | module_param(cycles_count, uint, S_IRUGO); | ||
62 | MODULE_PARM_DESC(cycles_count, "how many erase cycles to do " | ||
63 | "(infinite by default)"); | ||
64 | |||
65 | static struct mtd_info *mtd; | ||
66 | |||
67 | /* This buffer contains 0x555555...0xAAAAAA... pattern */ | ||
68 | static unsigned char *patt_5A5; | ||
69 | /* This buffer contains 0xAAAAAA...0x555555... pattern */ | ||
70 | static unsigned char *patt_A5A; | ||
71 | /* This buffer contains all 0xFF bytes */ | ||
72 | static unsigned char *patt_FF; | ||
73 | /* This a temporary buffer is use when checking data */ | ||
74 | static unsigned char *check_buf; | ||
75 | /* How many erase cycles were done */ | ||
76 | static unsigned int erase_cycles; | ||
77 | |||
78 | static int pgsize; | ||
79 | static struct timeval start, finish; | ||
80 | |||
81 | static void report_corrupt(unsigned char *read, unsigned char *written); | ||
82 | |||
83 | static inline void start_timing(void) | ||
84 | { | ||
85 | do_gettimeofday(&start); | ||
86 | } | ||
87 | |||
88 | static inline void stop_timing(void) | ||
89 | { | ||
90 | do_gettimeofday(&finish); | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * Erase eraseblock number @ebnum. | ||
95 | */ | ||
96 | static inline int erase_eraseblock(int ebnum) | ||
97 | { | ||
98 | int err; | ||
99 | struct erase_info ei; | ||
100 | loff_t addr = ebnum * mtd->erasesize; | ||
101 | |||
102 | memset(&ei, 0, sizeof(struct erase_info)); | ||
103 | ei.mtd = mtd; | ||
104 | ei.addr = addr; | ||
105 | ei.len = mtd->erasesize; | ||
106 | |||
107 | err = mtd->erase(mtd, &ei); | ||
108 | if (err) { | ||
109 | printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); | ||
110 | return err; | ||
111 | } | ||
112 | |||
113 | if (ei.state == MTD_ERASE_FAILED) { | ||
114 | printk(PRINT_PREF "some erase error occurred at EB %d\n", | ||
115 | ebnum); | ||
116 | return -EIO; | ||
117 | } | ||
118 | |||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Check that the contents of eraseblock number @enbum is equivalent to the | ||
124 | * @buf buffer. | ||
125 | */ | ||
126 | static inline int check_eraseblock(int ebnum, unsigned char *buf) | ||
127 | { | ||
128 | int err, retries = 0; | ||
129 | size_t read = 0; | ||
130 | loff_t addr = ebnum * mtd->erasesize; | ||
131 | size_t len = mtd->erasesize; | ||
132 | |||
133 | if (pgcnt) { | ||
134 | addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize; | ||
135 | len = pgcnt * pgsize; | ||
136 | } | ||
137 | |||
138 | retry: | ||
139 | err = mtd->read(mtd, addr, len, &read, check_buf); | ||
140 | if (err == -EUCLEAN) | ||
141 | printk(PRINT_PREF "single bit flip occurred at EB %d " | ||
142 | "MTD reported that it was fixed.\n", ebnum); | ||
143 | else if (err) { | ||
144 | printk(PRINT_PREF "error %d while reading EB %d, " | ||
145 | "read %zd\n", err, ebnum, read); | ||
146 | return err; | ||
147 | } | ||
148 | |||
149 | if (read != len) { | ||
150 | printk(PRINT_PREF "failed to read %zd bytes from EB %d, " | ||
151 | "read only %zd, but no error reported\n", | ||
152 | len, ebnum, read); | ||
153 | return -EIO; | ||
154 | } | ||
155 | |||
156 | if (memcmp(buf, check_buf, len)) { | ||
157 | printk(PRINT_PREF "read wrong data from EB %d\n", ebnum); | ||
158 | report_corrupt(check_buf, buf); | ||
159 | |||
160 | if (retries++ < RETRIES) { | ||
161 | /* Try read again */ | ||
162 | yield(); | ||
163 | printk(PRINT_PREF "re-try reading data from EB %d\n", | ||
164 | ebnum); | ||
165 | goto retry; | ||
166 | } else { | ||
167 | printk(PRINT_PREF "retried %d times, still errors, " | ||
168 | "give-up\n", RETRIES); | ||
169 | return -EINVAL; | ||
170 | } | ||
171 | } | ||
172 | |||
173 | if (retries != 0) | ||
174 | printk(PRINT_PREF "only attempt number %d was OK (!!!)\n", | ||
175 | retries); | ||
176 | |||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | static inline int write_pattern(int ebnum, void *buf) | ||
181 | { | ||
182 | int err; | ||
183 | size_t written = 0; | ||
184 | loff_t addr = ebnum * mtd->erasesize; | ||
185 | size_t len = mtd->erasesize; | ||
186 | |||
187 | if (pgcnt) { | ||
188 | addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize; | ||
189 | len = pgcnt * pgsize; | ||
190 | } | ||
191 | err = mtd->write(mtd, addr, len, &written, buf); | ||
192 | if (err) { | ||
193 | printk(PRINT_PREF "error %d while writing EB %d, written %zd" | ||
194 | " bytes\n", err, ebnum, written); | ||
195 | return err; | ||
196 | } | ||
197 | if (written != len) { | ||
198 | printk(PRINT_PREF "written only %zd bytes of %zd, but no error" | ||
199 | " reported\n", written, len); | ||
200 | return -EIO; | ||
201 | } | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static int __init tort_init(void) | ||
207 | { | ||
208 | int err = 0, i, infinite = !cycles_count; | ||
209 | int bad_ebs[ebcnt]; | ||
210 | |||
211 | printk(KERN_INFO "\n"); | ||
212 | printk(KERN_INFO "=================================================\n"); | ||
213 | printk(PRINT_PREF "Warning: this program is trying to wear out your " | ||
214 | "flash, stop it if this is not wanted.\n"); | ||
215 | printk(PRINT_PREF "MTD device: %d\n", dev); | ||
216 | printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n", | ||
217 | ebcnt, eb, eb + ebcnt - 1, dev); | ||
218 | if (pgcnt) | ||
219 | printk(PRINT_PREF "torturing just %d pages per eraseblock\n", | ||
220 | pgcnt); | ||
221 | printk(PRINT_PREF "write verify %s\n", check ? "enabled" : "disabled"); | ||
222 | |||
223 | mtd = get_mtd_device(NULL, dev); | ||
224 | if (IS_ERR(mtd)) { | ||
225 | err = PTR_ERR(mtd); | ||
226 | printk(PRINT_PREF "error: cannot get MTD device\n"); | ||
227 | return err; | ||
228 | } | ||
229 | |||
230 | if (mtd->writesize == 1) { | ||
231 | printk(PRINT_PREF "not NAND flash, assume page size is 512 " | ||
232 | "bytes.\n"); | ||
233 | pgsize = 512; | ||
234 | } else | ||
235 | pgsize = mtd->writesize; | ||
236 | |||
237 | if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) { | ||
238 | printk(PRINT_PREF "error: invalid pgcnt value %d\n", pgcnt); | ||
239 | goto out_mtd; | ||
240 | } | ||
241 | |||
242 | err = -ENOMEM; | ||
243 | patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL); | ||
244 | if (!patt_5A5) { | ||
245 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
246 | goto out_mtd; | ||
247 | } | ||
248 | |||
249 | patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL); | ||
250 | if (!patt_A5A) { | ||
251 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
252 | goto out_patt_5A5; | ||
253 | } | ||
254 | |||
255 | patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL); | ||
256 | if (!patt_FF) { | ||
257 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
258 | goto out_patt_A5A; | ||
259 | } | ||
260 | |||
261 | check_buf = kmalloc(mtd->erasesize, GFP_KERNEL); | ||
262 | if (!check_buf) { | ||
263 | printk(PRINT_PREF "error: cannot allocate memory\n"); | ||
264 | goto out_patt_FF; | ||
265 | } | ||
266 | |||
267 | err = 0; | ||
268 | |||
269 | /* Initialize patterns */ | ||
270 | memset(patt_FF, 0xFF, mtd->erasesize); | ||
271 | for (i = 0; i < mtd->erasesize / pgsize; i++) { | ||
272 | if (!(i & 1)) { | ||
273 | memset(patt_5A5 + i * pgsize, 0x55, pgsize); | ||
274 | memset(patt_A5A + i * pgsize, 0xAA, pgsize); | ||
275 | } else { | ||
276 | memset(patt_5A5 + i * pgsize, 0xAA, pgsize); | ||
277 | memset(patt_A5A + i * pgsize, 0x55, pgsize); | ||
278 | } | ||
279 | } | ||
280 | |||
281 | /* | ||
282 | * Check if there is a bad eraseblock among those we are going to test. | ||
283 | */ | ||
284 | memset(&bad_ebs[0], 0, sizeof(int) * ebcnt); | ||
285 | if (mtd->block_isbad) { | ||
286 | for (i = eb; i < eb + ebcnt; i++) { | ||
287 | err = mtd->block_isbad(mtd, | ||
288 | (loff_t)i * mtd->erasesize); | ||
289 | |||
290 | if (err < 0) { | ||
291 | printk(PRINT_PREF "block_isbad() returned %d " | ||
292 | "for EB %d\n", err, i); | ||
293 | goto out; | ||
294 | } | ||
295 | |||
296 | if (err) { | ||
297 | printk("EB %d is bad. Skip it.\n", i); | ||
298 | bad_ebs[i - eb] = 1; | ||
299 | } | ||
300 | } | ||
301 | } | ||
302 | |||
303 | start_timing(); | ||
304 | while (1) { | ||
305 | int i; | ||
306 | void *patt; | ||
307 | |||
308 | /* Erase all eraseblocks */ | ||
309 | for (i = eb; i < eb + ebcnt; i++) { | ||
310 | if (bad_ebs[i - eb]) | ||
311 | continue; | ||
312 | err = erase_eraseblock(i); | ||
313 | if (err) | ||
314 | goto out; | ||
315 | cond_resched(); | ||
316 | } | ||
317 | |||
318 | /* Check if the eraseblocks contain only 0xFF bytes */ | ||
319 | if (check) { | ||
320 | for (i = eb; i < eb + ebcnt; i++) { | ||
321 | if (bad_ebs[i - eb]) | ||
322 | continue; | ||
323 | err = check_eraseblock(i, patt_FF); | ||
324 | if (err) { | ||
325 | printk(PRINT_PREF "verify failed" | ||
326 | " for 0xFF... pattern\n"); | ||
327 | goto out; | ||
328 | } | ||
329 | cond_resched(); | ||
330 | } | ||
331 | } | ||
332 | |||
333 | /* Write the pattern */ | ||
334 | for (i = eb; i < eb + ebcnt; i++) { | ||
335 | if (bad_ebs[i - eb]) | ||
336 | continue; | ||
337 | if ((eb + erase_cycles) & 1) | ||
338 | patt = patt_5A5; | ||
339 | else | ||
340 | patt = patt_A5A; | ||
341 | err = write_pattern(i, patt); | ||
342 | if (err) | ||
343 | goto out; | ||
344 | cond_resched(); | ||
345 | } | ||
346 | |||
347 | /* Verify what we wrote */ | ||
348 | if (check) { | ||
349 | for (i = eb; i < eb + ebcnt; i++) { | ||
350 | if (bad_ebs[i - eb]) | ||
351 | continue; | ||
352 | if ((eb + erase_cycles) & 1) | ||
353 | patt = patt_5A5; | ||
354 | else | ||
355 | patt = patt_A5A; | ||
356 | err = check_eraseblock(i, patt); | ||
357 | if (err) { | ||
358 | printk(PRINT_PREF "verify failed for %s" | ||
359 | " pattern\n", | ||
360 | ((eb + erase_cycles) & 1) ? | ||
361 | "0x55AA55..." : "0xAA55AA..."); | ||
362 | goto out; | ||
363 | } | ||
364 | cond_resched(); | ||
365 | } | ||
366 | } | ||
367 | |||
368 | erase_cycles += 1; | ||
369 | |||
370 | if (erase_cycles % gran == 0) { | ||
371 | long ms; | ||
372 | |||
373 | stop_timing(); | ||
374 | ms = (finish.tv_sec - start.tv_sec) * 1000 + | ||
375 | (finish.tv_usec - start.tv_usec) / 1000; | ||
376 | printk(PRINT_PREF "%08u erase cycles done, took %lu " | ||
377 | "milliseconds (%lu seconds)\n", | ||
378 | erase_cycles, ms, ms / 1000); | ||
379 | start_timing(); | ||
380 | } | ||
381 | |||
382 | if (!infinite && --cycles_count == 0) | ||
383 | break; | ||
384 | } | ||
385 | out: | ||
386 | |||
387 | printk(PRINT_PREF "finished after %u erase cycles\n", | ||
388 | erase_cycles); | ||
389 | kfree(check_buf); | ||
390 | out_patt_FF: | ||
391 | kfree(patt_FF); | ||
392 | out_patt_A5A: | ||
393 | kfree(patt_A5A); | ||
394 | out_patt_5A5: | ||
395 | kfree(patt_5A5); | ||
396 | out_mtd: | ||
397 | put_mtd_device(mtd); | ||
398 | if (err) | ||
399 | printk(PRINT_PREF "error %d occurred during torturing\n", err); | ||
400 | printk(KERN_INFO "=================================================\n"); | ||
401 | return err; | ||
402 | } | ||
403 | module_init(tort_init); | ||
404 | |||
405 | static void __exit tort_exit(void) | ||
406 | { | ||
407 | return; | ||
408 | } | ||
409 | module_exit(tort_exit); | ||
410 | |||
411 | static int countdiffs(unsigned char *buf, unsigned char *check_buf, | ||
412 | unsigned offset, unsigned len, unsigned *bytesp, | ||
413 | unsigned *bitsp); | ||
414 | static void print_bufs(unsigned char *read, unsigned char *written, int start, | ||
415 | int len); | ||
416 | |||
417 | /* | ||
418 | * Report the detailed information about how the read EB differs from what was | ||
419 | * written. | ||
420 | */ | ||
421 | static void report_corrupt(unsigned char *read, unsigned char *written) | ||
422 | { | ||
423 | int i; | ||
424 | int bytes, bits, pages, first; | ||
425 | int offset, len; | ||
426 | size_t check_len = mtd->erasesize; | ||
427 | |||
428 | if (pgcnt) | ||
429 | check_len = pgcnt * pgsize; | ||
430 | |||
431 | bytes = bits = pages = 0; | ||
432 | for (i = 0; i < check_len; i += pgsize) | ||
433 | if (countdiffs(written, read, i, pgsize, &bytes, | ||
434 | &bits) >= 0) | ||
435 | pages++; | ||
436 | |||
437 | printk(PRINT_PREF "verify fails on %d pages, %d bytes/%d bits\n", | ||
438 | pages, bytes, bits); | ||
439 | printk(PRINT_PREF "The following is a list of all differences between" | ||
440 | " what was read from flash and what was expected\n"); | ||
441 | |||
442 | for (i = 0; i < check_len; i += pgsize) { | ||
443 | cond_resched(); | ||
444 | bytes = bits = 0; | ||
445 | first = countdiffs(written, read, i, pgsize, &bytes, | ||
446 | &bits); | ||
447 | if (first < 0) | ||
448 | continue; | ||
449 | |||
450 | printk("-------------------------------------------------------" | ||
451 | "----------------------------------\n"); | ||
452 | |||
453 | printk(PRINT_PREF "Page %zd has %d bytes/%d bits failing verify," | ||
454 | " starting at offset 0x%x\n", | ||
455 | (mtd->erasesize - check_len + i) / pgsize, | ||
456 | bytes, bits, first); | ||
457 | |||
458 | offset = first & ~0x7; | ||
459 | len = ((first + bytes) | 0x7) + 1 - offset; | ||
460 | |||
461 | print_bufs(read, written, offset, len); | ||
462 | } | ||
463 | } | ||
464 | |||
465 | static void print_bufs(unsigned char *read, unsigned char *written, int start, | ||
466 | int len) | ||
467 | { | ||
468 | int i = 0, j1, j2; | ||
469 | char *diff; | ||
470 | |||
471 | printk("Offset Read Written\n"); | ||
472 | while (i < len) { | ||
473 | printk("0x%08x: ", start + i); | ||
474 | diff = " "; | ||
475 | for (j1 = 0; j1 < 8 && i + j1 < len; j1++) { | ||
476 | printk(" %02x", read[start + i + j1]); | ||
477 | if (read[start + i + j1] != written[start + i + j1]) | ||
478 | diff = "***"; | ||
479 | } | ||
480 | |||
481 | while (j1 < 8) { | ||
482 | printk(" "); | ||
483 | j1 += 1; | ||
484 | } | ||
485 | |||
486 | printk(" %s ", diff); | ||
487 | |||
488 | for (j2 = 0; j2 < 8 && i + j2 < len; j2++) | ||
489 | printk(" %02x", written[start + i + j2]); | ||
490 | printk("\n"); | ||
491 | i += 8; | ||
492 | } | ||
493 | } | ||
494 | |||
495 | /* | ||
496 | * Count the number of differing bytes and bits and return the first differing | ||
497 | * offset. | ||
498 | */ | ||
499 | static int countdiffs(unsigned char *buf, unsigned char *check_buf, | ||
500 | unsigned offset, unsigned len, unsigned *bytesp, | ||
501 | unsigned *bitsp) | ||
502 | { | ||
503 | unsigned i, bit; | ||
504 | int first = -1; | ||
505 | |||
506 | for (i = offset; i < offset + len; i++) | ||
507 | if (buf[i] != check_buf[i]) { | ||
508 | first = i; | ||
509 | break; | ||
510 | } | ||
511 | |||
512 | while (i < offset + len) { | ||
513 | if (buf[i] != check_buf[i]) { | ||
514 | (*bytesp)++; | ||
515 | bit = 1; | ||
516 | while (bit < 256) { | ||
517 | if ((buf[i] & bit) != (check_buf[i] & bit)) | ||
518 | (*bitsp)++; | ||
519 | bit <<= 1; | ||
520 | } | ||
521 | } | ||
522 | i++; | ||
523 | } | ||
524 | |||
525 | return first; | ||
526 | } | ||
527 | |||
528 | MODULE_DESCRIPTION("Eraseblock torturing module"); | ||
529 | MODULE_AUTHOR("Artem Bityutskiy, Jarkko Lavinen, Adrian Hunter"); | ||
530 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 7caf22cd5ad0..9082768cc6c3 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
@@ -561,7 +561,7 @@ static int io_init(struct ubi_device *ubi) | |||
561 | */ | 561 | */ |
562 | 562 | ||
563 | ubi->peb_size = ubi->mtd->erasesize; | 563 | ubi->peb_size = ubi->mtd->erasesize; |
564 | ubi->peb_count = ubi->mtd->size / ubi->mtd->erasesize; | 564 | ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); |
565 | ubi->flash_size = ubi->mtd->size; | 565 | ubi->flash_size = ubi->mtd->size; |
566 | 566 | ||
567 | if (ubi->mtd->block_isbad && ubi->mtd->block_markbad) | 567 | if (ubi->mtd->block_isbad && ubi->mtd->block_markbad) |
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c index 605812bb0b1a..6dd4f5e77f82 100644 --- a/drivers/mtd/ubi/gluebi.c +++ b/drivers/mtd/ubi/gluebi.c | |||
@@ -215,7 +215,8 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
215 | struct ubi_volume *vol; | 215 | struct ubi_volume *vol; |
216 | struct ubi_device *ubi; | 216 | struct ubi_device *ubi; |
217 | 217 | ||
218 | dbg_gen("erase %u bytes at offset %u", instr->len, instr->addr); | 218 | dbg_gen("erase %llu bytes at offset %llu", (unsigned long long)instr->len, |
219 | (unsigned long long)instr->addr); | ||
219 | 220 | ||
220 | if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) | 221 | if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) |
221 | return -EINVAL; | 222 | return -EINVAL; |
@@ -223,11 +224,11 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
223 | if (instr->len < 0 || instr->addr + instr->len > mtd->size) | 224 | if (instr->len < 0 || instr->addr + instr->len > mtd->size) |
224 | return -EINVAL; | 225 | return -EINVAL; |
225 | 226 | ||
226 | if (instr->addr % mtd->writesize || instr->len % mtd->writesize) | 227 | if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd)) |
227 | return -EINVAL; | 228 | return -EINVAL; |
228 | 229 | ||
229 | lnum = instr->addr / mtd->erasesize; | 230 | lnum = mtd_div_by_eb(instr->addr, mtd); |
230 | count = instr->len / mtd->erasesize; | 231 | count = mtd_div_by_eb(instr->len, mtd); |
231 | 232 | ||
232 | vol = container_of(mtd, struct ubi_volume, gluebi_mtd); | 233 | vol = container_of(mtd, struct ubi_volume, gluebi_mtd); |
233 | ubi = vol->ubi; | 234 | ubi = vol->ubi; |
@@ -255,7 +256,7 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
255 | 256 | ||
256 | out_err: | 257 | out_err: |
257 | instr->state = MTD_ERASE_FAILED; | 258 | instr->state = MTD_ERASE_FAILED; |
258 | instr->fail_addr = lnum * mtd->erasesize; | 259 | instr->fail_addr = (long long)lnum * mtd->erasesize; |
259 | return err; | 260 | return err; |
260 | } | 261 | } |
261 | 262 | ||
@@ -294,7 +295,7 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol) | |||
294 | * bytes. | 295 | * bytes. |
295 | */ | 296 | */ |
296 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) | 297 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) |
297 | mtd->size = vol->usable_leb_size * vol->reserved_pebs; | 298 | mtd->size = (long long)vol->usable_leb_size * vol->reserved_pebs; |
298 | else | 299 | else |
299 | mtd->size = vol->used_bytes; | 300 | mtd->size = vol->used_bytes; |
300 | 301 | ||
@@ -304,8 +305,8 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol) | |||
304 | return -ENFILE; | 305 | return -ENFILE; |
305 | } | 306 | } |
306 | 307 | ||
307 | dbg_gen("added mtd%d (\"%s\"), size %u, EB size %u", | 308 | dbg_gen("added mtd%d (\"%s\"), size %llu, EB size %u", |
308 | mtd->index, mtd->name, mtd->size, mtd->erasesize); | 309 | mtd->index, mtd->name, (unsigned long long)mtd->size, mtd->erasesize); |
309 | return 0; | 310 | return 0; |
310 | } | 311 | } |
311 | 312 | ||
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 65e8294a9e29..9da5a4b81133 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c | |||
@@ -1,11 +1,12 @@ | |||
1 | /** | 1 | /** |
2 | * @file buffer_sync.c | 2 | * @file buffer_sync.c |
3 | * | 3 | * |
4 | * @remark Copyright 2002 OProfile authors | 4 | * @remark Copyright 2002-2009 OProfile authors |
5 | * @remark Read the file COPYING | 5 | * @remark Read the file COPYING |
6 | * | 6 | * |
7 | * @author John Levon <levon@movementarian.org> | 7 | * @author John Levon <levon@movementarian.org> |
8 | * @author Barry Kasindorf | 8 | * @author Barry Kasindorf |
9 | * @author Robert Richter <robert.richter@amd.com> | ||
9 | * | 10 | * |
10 | * This is the core of the buffer management. Each | 11 | * This is the core of the buffer management. Each |
11 | * CPU buffer is processed and entered into the | 12 | * CPU buffer is processed and entered into the |
@@ -315,88 +316,73 @@ static void add_trace_begin(void) | |||
315 | add_event_entry(TRACE_BEGIN_CODE); | 316 | add_event_entry(TRACE_BEGIN_CODE); |
316 | } | 317 | } |
317 | 318 | ||
318 | #ifdef CONFIG_OPROFILE_IBS | 319 | static void add_data(struct op_entry *entry, struct mm_struct *mm) |
319 | |||
320 | #define IBS_FETCH_CODE_SIZE 2 | ||
321 | #define IBS_OP_CODE_SIZE 5 | ||
322 | |||
323 | /* | ||
324 | * Add IBS fetch and op entries to event buffer | ||
325 | */ | ||
326 | static void add_ibs_begin(int cpu, int code, struct mm_struct *mm) | ||
327 | { | 320 | { |
328 | unsigned long rip; | 321 | unsigned long code, pc, val; |
329 | int i, count; | 322 | unsigned long cookie; |
330 | unsigned long ibs_cookie = 0; | ||
331 | off_t offset; | 323 | off_t offset; |
332 | struct op_sample *sample; | ||
333 | |||
334 | sample = cpu_buffer_read_entry(cpu); | ||
335 | if (!sample) | ||
336 | goto Error; | ||
337 | rip = sample->eip; | ||
338 | 324 | ||
339 | #ifdef __LP64__ | 325 | if (!op_cpu_buffer_get_data(entry, &code)) |
340 | rip += sample->event << 32; | 326 | return; |
341 | #endif | 327 | if (!op_cpu_buffer_get_data(entry, &pc)) |
328 | return; | ||
329 | if (!op_cpu_buffer_get_size(entry)) | ||
330 | return; | ||
342 | 331 | ||
343 | if (mm) { | 332 | if (mm) { |
344 | ibs_cookie = lookup_dcookie(mm, rip, &offset); | 333 | cookie = lookup_dcookie(mm, pc, &offset); |
345 | 334 | ||
346 | if (ibs_cookie == NO_COOKIE) | 335 | if (cookie == NO_COOKIE) |
347 | offset = rip; | 336 | offset = pc; |
348 | if (ibs_cookie == INVALID_COOKIE) { | 337 | if (cookie == INVALID_COOKIE) { |
349 | atomic_inc(&oprofile_stats.sample_lost_no_mapping); | 338 | atomic_inc(&oprofile_stats.sample_lost_no_mapping); |
350 | offset = rip; | 339 | offset = pc; |
351 | } | 340 | } |
352 | if (ibs_cookie != last_cookie) { | 341 | if (cookie != last_cookie) { |
353 | add_cookie_switch(ibs_cookie); | 342 | add_cookie_switch(cookie); |
354 | last_cookie = ibs_cookie; | 343 | last_cookie = cookie; |
355 | } | 344 | } |
356 | } else | 345 | } else |
357 | offset = rip; | 346 | offset = pc; |
358 | 347 | ||
359 | add_event_entry(ESCAPE_CODE); | 348 | add_event_entry(ESCAPE_CODE); |
360 | add_event_entry(code); | 349 | add_event_entry(code); |
361 | add_event_entry(offset); /* Offset from Dcookie */ | 350 | add_event_entry(offset); /* Offset from Dcookie */ |
362 | 351 | ||
363 | /* we send the Dcookie offset, but send the raw Linear Add also*/ | 352 | while (op_cpu_buffer_get_data(entry, &val)) |
364 | add_event_entry(sample->eip); | 353 | add_event_entry(val); |
365 | add_event_entry(sample->event); | ||
366 | |||
367 | if (code == IBS_FETCH_CODE) | ||
368 | count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/ | ||
369 | else | ||
370 | count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/ | ||
371 | |||
372 | for (i = 0; i < count; i++) { | ||
373 | sample = cpu_buffer_read_entry(cpu); | ||
374 | if (!sample) | ||
375 | goto Error; | ||
376 | add_event_entry(sample->eip); | ||
377 | add_event_entry(sample->event); | ||
378 | } | ||
379 | |||
380 | return; | ||
381 | |||
382 | Error: | ||
383 | return; | ||
384 | } | 354 | } |
385 | 355 | ||
386 | #endif | 356 | static inline void add_sample_entry(unsigned long offset, unsigned long event) |
387 | |||
388 | static void add_sample_entry(unsigned long offset, unsigned long event) | ||
389 | { | 357 | { |
390 | add_event_entry(offset); | 358 | add_event_entry(offset); |
391 | add_event_entry(event); | 359 | add_event_entry(event); |
392 | } | 360 | } |
393 | 361 | ||
394 | 362 | ||
395 | static int add_us_sample(struct mm_struct *mm, struct op_sample *s) | 363 | /* |
364 | * Add a sample to the global event buffer. If possible the | ||
365 | * sample is converted into a persistent dentry/offset pair | ||
366 | * for later lookup from userspace. Return 0 on failure. | ||
367 | */ | ||
368 | static int | ||
369 | add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) | ||
396 | { | 370 | { |
397 | unsigned long cookie; | 371 | unsigned long cookie; |
398 | off_t offset; | 372 | off_t offset; |
399 | 373 | ||
374 | if (in_kernel) { | ||
375 | add_sample_entry(s->eip, s->event); | ||
376 | return 1; | ||
377 | } | ||
378 | |||
379 | /* add userspace sample */ | ||
380 | |||
381 | if (!mm) { | ||
382 | atomic_inc(&oprofile_stats.sample_lost_no_mm); | ||
383 | return 0; | ||
384 | } | ||
385 | |||
400 | cookie = lookup_dcookie(mm, s->eip, &offset); | 386 | cookie = lookup_dcookie(mm, s->eip, &offset); |
401 | 387 | ||
402 | if (cookie == INVALID_COOKIE) { | 388 | if (cookie == INVALID_COOKIE) { |
@@ -415,25 +401,6 @@ static int add_us_sample(struct mm_struct *mm, struct op_sample *s) | |||
415 | } | 401 | } |
416 | 402 | ||
417 | 403 | ||
418 | /* Add a sample to the global event buffer. If possible the | ||
419 | * sample is converted into a persistent dentry/offset pair | ||
420 | * for later lookup from userspace. | ||
421 | */ | ||
422 | static int | ||
423 | add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) | ||
424 | { | ||
425 | if (in_kernel) { | ||
426 | add_sample_entry(s->eip, s->event); | ||
427 | return 1; | ||
428 | } else if (mm) { | ||
429 | return add_us_sample(mm, s); | ||
430 | } else { | ||
431 | atomic_inc(&oprofile_stats.sample_lost_no_mm); | ||
432 | } | ||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | |||
437 | static void release_mm(struct mm_struct *mm) | 404 | static void release_mm(struct mm_struct *mm) |
438 | { | 405 | { |
439 | if (!mm) | 406 | if (!mm) |
@@ -526,66 +493,69 @@ void sync_buffer(int cpu) | |||
526 | { | 493 | { |
527 | struct mm_struct *mm = NULL; | 494 | struct mm_struct *mm = NULL; |
528 | struct mm_struct *oldmm; | 495 | struct mm_struct *oldmm; |
496 | unsigned long val; | ||
529 | struct task_struct *new; | 497 | struct task_struct *new; |
530 | unsigned long cookie = 0; | 498 | unsigned long cookie = 0; |
531 | int in_kernel = 1; | 499 | int in_kernel = 1; |
532 | sync_buffer_state state = sb_buffer_start; | 500 | sync_buffer_state state = sb_buffer_start; |
533 | unsigned int i; | 501 | unsigned int i; |
534 | unsigned long available; | 502 | unsigned long available; |
503 | unsigned long flags; | ||
504 | struct op_entry entry; | ||
505 | struct op_sample *sample; | ||
535 | 506 | ||
536 | mutex_lock(&buffer_mutex); | 507 | mutex_lock(&buffer_mutex); |
537 | 508 | ||
538 | add_cpu_switch(cpu); | 509 | add_cpu_switch(cpu); |
539 | 510 | ||
540 | cpu_buffer_reset(cpu); | 511 | op_cpu_buffer_reset(cpu); |
541 | available = cpu_buffer_entries(cpu); | 512 | available = op_cpu_buffer_entries(cpu); |
542 | 513 | ||
543 | for (i = 0; i < available; ++i) { | 514 | for (i = 0; i < available; ++i) { |
544 | struct op_sample *s = cpu_buffer_read_entry(cpu); | 515 | sample = op_cpu_buffer_read_entry(&entry, cpu); |
545 | if (!s) | 516 | if (!sample) |
546 | break; | 517 | break; |
547 | 518 | ||
548 | if (is_code(s->eip)) { | 519 | if (is_code(sample->eip)) { |
549 | switch (s->event) { | 520 | flags = sample->event; |
550 | case 0: | 521 | if (flags & TRACE_BEGIN) { |
551 | case CPU_IS_KERNEL: | 522 | state = sb_bt_start; |
523 | add_trace_begin(); | ||
524 | } | ||
525 | if (flags & KERNEL_CTX_SWITCH) { | ||
552 | /* kernel/userspace switch */ | 526 | /* kernel/userspace switch */ |
553 | in_kernel = s->event; | 527 | in_kernel = flags & IS_KERNEL; |
554 | if (state == sb_buffer_start) | 528 | if (state == sb_buffer_start) |
555 | state = sb_sample_start; | 529 | state = sb_sample_start; |
556 | add_kernel_ctx_switch(s->event); | 530 | add_kernel_ctx_switch(flags & IS_KERNEL); |
557 | break; | 531 | } |
558 | case CPU_TRACE_BEGIN: | 532 | if (flags & USER_CTX_SWITCH |
559 | state = sb_bt_start; | 533 | && op_cpu_buffer_get_data(&entry, &val)) { |
560 | add_trace_begin(); | ||
561 | break; | ||
562 | #ifdef CONFIG_OPROFILE_IBS | ||
563 | case IBS_FETCH_BEGIN: | ||
564 | state = sb_bt_start; | ||
565 | add_ibs_begin(cpu, IBS_FETCH_CODE, mm); | ||
566 | break; | ||
567 | case IBS_OP_BEGIN: | ||
568 | state = sb_bt_start; | ||
569 | add_ibs_begin(cpu, IBS_OP_CODE, mm); | ||
570 | break; | ||
571 | #endif | ||
572 | default: | ||
573 | /* userspace context switch */ | 534 | /* userspace context switch */ |
535 | new = (struct task_struct *)val; | ||
574 | oldmm = mm; | 536 | oldmm = mm; |
575 | new = (struct task_struct *)s->event; | ||
576 | release_mm(oldmm); | 537 | release_mm(oldmm); |
577 | mm = take_tasks_mm(new); | 538 | mm = take_tasks_mm(new); |
578 | if (mm != oldmm) | 539 | if (mm != oldmm) |
579 | cookie = get_exec_dcookie(mm); | 540 | cookie = get_exec_dcookie(mm); |
580 | add_user_ctx_switch(new, cookie); | 541 | add_user_ctx_switch(new, cookie); |
581 | break; | ||
582 | } | ||
583 | } else if (state >= sb_bt_start && | ||
584 | !add_sample(mm, s, in_kernel)) { | ||
585 | if (state == sb_bt_start) { | ||
586 | state = sb_bt_ignore; | ||
587 | atomic_inc(&oprofile_stats.bt_lost_no_mapping); | ||
588 | } | 542 | } |
543 | if (op_cpu_buffer_get_size(&entry)) | ||
544 | add_data(&entry, mm); | ||
545 | continue; | ||
546 | } | ||
547 | |||
548 | if (state < sb_bt_start) | ||
549 | /* ignore sample */ | ||
550 | continue; | ||
551 | |||
552 | if (add_sample(mm, sample, in_kernel)) | ||
553 | continue; | ||
554 | |||
555 | /* ignore backtraces if failed to add a sample */ | ||
556 | if (state == sb_bt_start) { | ||
557 | state = sb_bt_ignore; | ||
558 | atomic_inc(&oprofile_stats.bt_lost_no_mapping); | ||
589 | } | 559 | } |
590 | } | 560 | } |
591 | release_mm(mm); | 561 | release_mm(mm); |
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 61090969158f..2e03b6d796d3 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
@@ -1,11 +1,12 @@ | |||
1 | /** | 1 | /** |
2 | * @file cpu_buffer.c | 2 | * @file cpu_buffer.c |
3 | * | 3 | * |
4 | * @remark Copyright 2002 OProfile authors | 4 | * @remark Copyright 2002-2009 OProfile authors |
5 | * @remark Read the file COPYING | 5 | * @remark Read the file COPYING |
6 | * | 6 | * |
7 | * @author John Levon <levon@movementarian.org> | 7 | * @author John Levon <levon@movementarian.org> |
8 | * @author Barry Kasindorf <barry.kasindorf@amd.com> | 8 | * @author Barry Kasindorf <barry.kasindorf@amd.com> |
9 | * @author Robert Richter <robert.richter@amd.com> | ||
9 | * | 10 | * |
10 | * Each CPU has a local buffer that stores PC value/event | 11 | * Each CPU has a local buffer that stores PC value/event |
11 | * pairs. We also log context switches when we notice them. | 12 | * pairs. We also log context switches when we notice them. |
@@ -45,8 +46,8 @@ | |||
45 | * can be changed to a single buffer solution when the ring buffer | 46 | * can be changed to a single buffer solution when the ring buffer |
46 | * access is implemented as non-locking atomic code. | 47 | * access is implemented as non-locking atomic code. |
47 | */ | 48 | */ |
48 | struct ring_buffer *op_ring_buffer_read; | 49 | static struct ring_buffer *op_ring_buffer_read; |
49 | struct ring_buffer *op_ring_buffer_write; | 50 | static struct ring_buffer *op_ring_buffer_write; |
50 | DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); | 51 | DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); |
51 | 52 | ||
52 | static void wq_sync_buffer(struct work_struct *work); | 53 | static void wq_sync_buffer(struct work_struct *work); |
@@ -54,19 +55,9 @@ static void wq_sync_buffer(struct work_struct *work); | |||
54 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) | 55 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) |
55 | static int work_enabled; | 56 | static int work_enabled; |
56 | 57 | ||
57 | void free_cpu_buffers(void) | ||
58 | { | ||
59 | if (op_ring_buffer_read) | ||
60 | ring_buffer_free(op_ring_buffer_read); | ||
61 | op_ring_buffer_read = NULL; | ||
62 | if (op_ring_buffer_write) | ||
63 | ring_buffer_free(op_ring_buffer_write); | ||
64 | op_ring_buffer_write = NULL; | ||
65 | } | ||
66 | |||
67 | unsigned long oprofile_get_cpu_buffer_size(void) | 58 | unsigned long oprofile_get_cpu_buffer_size(void) |
68 | { | 59 | { |
69 | return fs_cpu_buffer_size; | 60 | return oprofile_cpu_buffer_size; |
70 | } | 61 | } |
71 | 62 | ||
72 | void oprofile_cpu_buffer_inc_smpl_lost(void) | 63 | void oprofile_cpu_buffer_inc_smpl_lost(void) |
@@ -77,11 +68,21 @@ void oprofile_cpu_buffer_inc_smpl_lost(void) | |||
77 | cpu_buf->sample_lost_overflow++; | 68 | cpu_buf->sample_lost_overflow++; |
78 | } | 69 | } |
79 | 70 | ||
71 | void free_cpu_buffers(void) | ||
72 | { | ||
73 | if (op_ring_buffer_read) | ||
74 | ring_buffer_free(op_ring_buffer_read); | ||
75 | op_ring_buffer_read = NULL; | ||
76 | if (op_ring_buffer_write) | ||
77 | ring_buffer_free(op_ring_buffer_write); | ||
78 | op_ring_buffer_write = NULL; | ||
79 | } | ||
80 | |||
80 | int alloc_cpu_buffers(void) | 81 | int alloc_cpu_buffers(void) |
81 | { | 82 | { |
82 | int i; | 83 | int i; |
83 | 84 | ||
84 | unsigned long buffer_size = fs_cpu_buffer_size; | 85 | unsigned long buffer_size = oprofile_cpu_buffer_size; |
85 | 86 | ||
86 | op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); | 87 | op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); |
87 | if (!op_ring_buffer_read) | 88 | if (!op_ring_buffer_read) |
@@ -97,8 +98,6 @@ int alloc_cpu_buffers(void) | |||
97 | b->last_is_kernel = -1; | 98 | b->last_is_kernel = -1; |
98 | b->tracing = 0; | 99 | b->tracing = 0; |
99 | b->buffer_size = buffer_size; | 100 | b->buffer_size = buffer_size; |
100 | b->tail_pos = 0; | ||
101 | b->head_pos = 0; | ||
102 | b->sample_received = 0; | 101 | b->sample_received = 0; |
103 | b->sample_lost_overflow = 0; | 102 | b->sample_lost_overflow = 0; |
104 | b->backtrace_aborted = 0; | 103 | b->backtrace_aborted = 0; |
@@ -145,47 +144,156 @@ void end_cpu_work(void) | |||
145 | flush_scheduled_work(); | 144 | flush_scheduled_work(); |
146 | } | 145 | } |
147 | 146 | ||
148 | static inline int | 147 | /* |
149 | add_sample(struct oprofile_cpu_buffer *cpu_buf, | 148 | * This function prepares the cpu buffer to write a sample. |
150 | unsigned long pc, unsigned long event) | 149 | * |
150 | * Struct op_entry is used during operations on the ring buffer while | ||
151 | * struct op_sample contains the data that is stored in the ring | ||
152 | * buffer. Struct entry can be uninitialized. The function reserves a | ||
153 | * data array that is specified by size. Use | ||
154 | * op_cpu_buffer_write_commit() after preparing the sample. In case of | ||
155 | * errors a null pointer is returned, otherwise the pointer to the | ||
156 | * sample. | ||
157 | * | ||
158 | */ | ||
159 | struct op_sample | ||
160 | *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) | ||
161 | { | ||
162 | entry->event = ring_buffer_lock_reserve | ||
163 | (op_ring_buffer_write, sizeof(struct op_sample) + | ||
164 | size * sizeof(entry->sample->data[0]), &entry->irq_flags); | ||
165 | if (entry->event) | ||
166 | entry->sample = ring_buffer_event_data(entry->event); | ||
167 | else | ||
168 | entry->sample = NULL; | ||
169 | |||
170 | if (!entry->sample) | ||
171 | return NULL; | ||
172 | |||
173 | entry->size = size; | ||
174 | entry->data = entry->sample->data; | ||
175 | |||
176 | return entry->sample; | ||
177 | } | ||
178 | |||
179 | int op_cpu_buffer_write_commit(struct op_entry *entry) | ||
180 | { | ||
181 | return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event, | ||
182 | entry->irq_flags); | ||
183 | } | ||
184 | |||
185 | struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) | ||
186 | { | ||
187 | struct ring_buffer_event *e; | ||
188 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); | ||
189 | if (e) | ||
190 | goto event; | ||
191 | if (ring_buffer_swap_cpu(op_ring_buffer_read, | ||
192 | op_ring_buffer_write, | ||
193 | cpu)) | ||
194 | return NULL; | ||
195 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); | ||
196 | if (e) | ||
197 | goto event; | ||
198 | return NULL; | ||
199 | |||
200 | event: | ||
201 | entry->event = e; | ||
202 | entry->sample = ring_buffer_event_data(e); | ||
203 | entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) | ||
204 | / sizeof(entry->sample->data[0]); | ||
205 | entry->data = entry->sample->data; | ||
206 | return entry->sample; | ||
207 | } | ||
208 | |||
209 | unsigned long op_cpu_buffer_entries(int cpu) | ||
210 | { | ||
211 | return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) | ||
212 | + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); | ||
213 | } | ||
214 | |||
215 | static int | ||
216 | op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace, | ||
217 | int is_kernel, struct task_struct *task) | ||
151 | { | 218 | { |
152 | struct op_entry entry; | 219 | struct op_entry entry; |
153 | int ret; | 220 | struct op_sample *sample; |
221 | unsigned long flags; | ||
222 | int size; | ||
223 | |||
224 | flags = 0; | ||
225 | |||
226 | if (backtrace) | ||
227 | flags |= TRACE_BEGIN; | ||
228 | |||
229 | /* notice a switch from user->kernel or vice versa */ | ||
230 | is_kernel = !!is_kernel; | ||
231 | if (cpu_buf->last_is_kernel != is_kernel) { | ||
232 | cpu_buf->last_is_kernel = is_kernel; | ||
233 | flags |= KERNEL_CTX_SWITCH; | ||
234 | if (is_kernel) | ||
235 | flags |= IS_KERNEL; | ||
236 | } | ||
237 | |||
238 | /* notice a task switch */ | ||
239 | if (cpu_buf->last_task != task) { | ||
240 | cpu_buf->last_task = task; | ||
241 | flags |= USER_CTX_SWITCH; | ||
242 | } | ||
243 | |||
244 | if (!flags) | ||
245 | /* nothing to do */ | ||
246 | return 0; | ||
247 | |||
248 | if (flags & USER_CTX_SWITCH) | ||
249 | size = 1; | ||
250 | else | ||
251 | size = 0; | ||
252 | |||
253 | sample = op_cpu_buffer_write_reserve(&entry, size); | ||
254 | if (!sample) | ||
255 | return -ENOMEM; | ||
154 | 256 | ||
155 | ret = cpu_buffer_write_entry(&entry); | 257 | sample->eip = ESCAPE_CODE; |
156 | if (ret) | 258 | sample->event = flags; |
157 | return ret; | ||
158 | 259 | ||
159 | entry.sample->eip = pc; | 260 | if (size) |
160 | entry.sample->event = event; | 261 | op_cpu_buffer_add_data(&entry, (unsigned long)task); |
161 | 262 | ||
162 | ret = cpu_buffer_write_commit(&entry); | 263 | op_cpu_buffer_write_commit(&entry); |
163 | if (ret) | ||
164 | return ret; | ||
165 | 264 | ||
166 | return 0; | 265 | return 0; |
167 | } | 266 | } |
168 | 267 | ||
169 | static inline int | 268 | static inline int |
170 | add_code(struct oprofile_cpu_buffer *buffer, unsigned long value) | 269 | op_add_sample(struct oprofile_cpu_buffer *cpu_buf, |
270 | unsigned long pc, unsigned long event) | ||
171 | { | 271 | { |
172 | return add_sample(buffer, ESCAPE_CODE, value); | 272 | struct op_entry entry; |
273 | struct op_sample *sample; | ||
274 | |||
275 | sample = op_cpu_buffer_write_reserve(&entry, 0); | ||
276 | if (!sample) | ||
277 | return -ENOMEM; | ||
278 | |||
279 | sample->eip = pc; | ||
280 | sample->event = event; | ||
281 | |||
282 | return op_cpu_buffer_write_commit(&entry); | ||
173 | } | 283 | } |
174 | 284 | ||
175 | /* This must be safe from any context. It's safe writing here | 285 | /* |
176 | * because of the head/tail separation of the writer and reader | 286 | * This must be safe from any context. |
177 | * of the CPU buffer. | ||
178 | * | 287 | * |
179 | * is_kernel is needed because on some architectures you cannot | 288 | * is_kernel is needed because on some architectures you cannot |
180 | * tell if you are in kernel or user space simply by looking at | 289 | * tell if you are in kernel or user space simply by looking at |
181 | * pc. We tag this in the buffer by generating kernel enter/exit | 290 | * pc. We tag this in the buffer by generating kernel enter/exit |
182 | * events whenever is_kernel changes | 291 | * events whenever is_kernel changes |
183 | */ | 292 | */ |
184 | static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, | 293 | static int |
185 | int is_kernel, unsigned long event) | 294 | log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, |
295 | unsigned long backtrace, int is_kernel, unsigned long event) | ||
186 | { | 296 | { |
187 | struct task_struct *task; | ||
188 | |||
189 | cpu_buf->sample_received++; | 297 | cpu_buf->sample_received++; |
190 | 298 | ||
191 | if (pc == ESCAPE_CODE) { | 299 | if (pc == ESCAPE_CODE) { |
@@ -193,25 +301,10 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, | |||
193 | return 0; | 301 | return 0; |
194 | } | 302 | } |
195 | 303 | ||
196 | is_kernel = !!is_kernel; | 304 | if (op_add_code(cpu_buf, backtrace, is_kernel, current)) |
197 | 305 | goto fail; | |
198 | task = current; | ||
199 | |||
200 | /* notice a switch from user->kernel or vice versa */ | ||
201 | if (cpu_buf->last_is_kernel != is_kernel) { | ||
202 | cpu_buf->last_is_kernel = is_kernel; | ||
203 | if (add_code(cpu_buf, is_kernel)) | ||
204 | goto fail; | ||
205 | } | ||
206 | |||
207 | /* notice a task switch */ | ||
208 | if (cpu_buf->last_task != task) { | ||
209 | cpu_buf->last_task = task; | ||
210 | if (add_code(cpu_buf, (unsigned long)task)) | ||
211 | goto fail; | ||
212 | } | ||
213 | 306 | ||
214 | if (add_sample(cpu_buf, pc, event)) | 307 | if (op_add_sample(cpu_buf, pc, event)) |
215 | goto fail; | 308 | goto fail; |
216 | 309 | ||
217 | return 1; | 310 | return 1; |
@@ -221,109 +314,102 @@ fail: | |||
221 | return 0; | 314 | return 0; |
222 | } | 315 | } |
223 | 316 | ||
224 | static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) | 317 | static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) |
225 | { | 318 | { |
226 | add_code(cpu_buf, CPU_TRACE_BEGIN); | ||
227 | cpu_buf->tracing = 1; | 319 | cpu_buf->tracing = 1; |
228 | return 1; | ||
229 | } | 320 | } |
230 | 321 | ||
231 | static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) | 322 | static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) |
232 | { | 323 | { |
233 | cpu_buf->tracing = 0; | 324 | cpu_buf->tracing = 0; |
234 | } | 325 | } |
235 | 326 | ||
236 | void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, | 327 | static inline void |
237 | unsigned long event, int is_kernel) | 328 | __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, |
329 | unsigned long event, int is_kernel) | ||
238 | { | 330 | { |
239 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | 331 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
240 | 332 | unsigned long backtrace = oprofile_backtrace_depth; | |
241 | if (!backtrace_depth) { | ||
242 | log_sample(cpu_buf, pc, is_kernel, event); | ||
243 | return; | ||
244 | } | ||
245 | |||
246 | if (!oprofile_begin_trace(cpu_buf)) | ||
247 | return; | ||
248 | 333 | ||
249 | /* | 334 | /* |
250 | * if log_sample() fail we can't backtrace since we lost the | 335 | * if log_sample() fail we can't backtrace since we lost the |
251 | * source of this event | 336 | * source of this event |
252 | */ | 337 | */ |
253 | if (log_sample(cpu_buf, pc, is_kernel, event)) | 338 | if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event)) |
254 | oprofile_ops.backtrace(regs, backtrace_depth); | 339 | /* failed */ |
340 | return; | ||
341 | |||
342 | if (!backtrace) | ||
343 | return; | ||
344 | |||
345 | oprofile_begin_trace(cpu_buf); | ||
346 | oprofile_ops.backtrace(regs, backtrace); | ||
255 | oprofile_end_trace(cpu_buf); | 347 | oprofile_end_trace(cpu_buf); |
256 | } | 348 | } |
257 | 349 | ||
350 | void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, | ||
351 | unsigned long event, int is_kernel) | ||
352 | { | ||
353 | __oprofile_add_ext_sample(pc, regs, event, is_kernel); | ||
354 | } | ||
355 | |||
258 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) | 356 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) |
259 | { | 357 | { |
260 | int is_kernel = !user_mode(regs); | 358 | int is_kernel = !user_mode(regs); |
261 | unsigned long pc = profile_pc(regs); | 359 | unsigned long pc = profile_pc(regs); |
262 | 360 | ||
263 | oprofile_add_ext_sample(pc, regs, event, is_kernel); | 361 | __oprofile_add_ext_sample(pc, regs, event, is_kernel); |
264 | } | 362 | } |
265 | 363 | ||
266 | #ifdef CONFIG_OPROFILE_IBS | 364 | /* |
267 | 365 | * Add samples with data to the ring buffer. | |
268 | #define MAX_IBS_SAMPLE_SIZE 14 | 366 | * |
269 | 367 | * Use oprofile_add_data(&entry, val) to add data and | |
270 | void oprofile_add_ibs_sample(struct pt_regs * const regs, | 368 | * oprofile_write_commit(&entry) to commit the sample. |
271 | unsigned int * const ibs_sample, int ibs_code) | 369 | */ |
370 | void | ||
371 | oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, | ||
372 | unsigned long pc, int code, int size) | ||
272 | { | 373 | { |
374 | struct op_sample *sample; | ||
273 | int is_kernel = !user_mode(regs); | 375 | int is_kernel = !user_mode(regs); |
274 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | 376 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
275 | struct task_struct *task; | ||
276 | int fail = 0; | ||
277 | 377 | ||
278 | cpu_buf->sample_received++; | 378 | cpu_buf->sample_received++; |
279 | 379 | ||
280 | /* notice a switch from user->kernel or vice versa */ | 380 | /* no backtraces for samples with data */ |
281 | if (cpu_buf->last_is_kernel != is_kernel) { | 381 | if (op_add_code(cpu_buf, 0, is_kernel, current)) |
282 | if (add_code(cpu_buf, is_kernel)) | 382 | goto fail; |
283 | goto fail; | ||
284 | cpu_buf->last_is_kernel = is_kernel; | ||
285 | } | ||
286 | |||
287 | /* notice a task switch */ | ||
288 | if (!is_kernel) { | ||
289 | task = current; | ||
290 | if (cpu_buf->last_task != task) { | ||
291 | if (add_code(cpu_buf, (unsigned long)task)) | ||
292 | goto fail; | ||
293 | cpu_buf->last_task = task; | ||
294 | } | ||
295 | } | ||
296 | |||
297 | fail = fail || add_code(cpu_buf, ibs_code); | ||
298 | fail = fail || add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); | ||
299 | fail = fail || add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); | ||
300 | fail = fail || add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); | ||
301 | |||
302 | if (ibs_code == IBS_OP_BEGIN) { | ||
303 | fail = fail || add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); | ||
304 | fail = fail || add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); | ||
305 | fail = fail || add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); | ||
306 | } | ||
307 | 383 | ||
308 | if (fail) | 384 | sample = op_cpu_buffer_write_reserve(entry, size + 2); |
385 | if (!sample) | ||
309 | goto fail; | 386 | goto fail; |
387 | sample->eip = ESCAPE_CODE; | ||
388 | sample->event = 0; /* no flags */ | ||
310 | 389 | ||
311 | if (backtrace_depth) | 390 | op_cpu_buffer_add_data(entry, code); |
312 | oprofile_ops.backtrace(regs, backtrace_depth); | 391 | op_cpu_buffer_add_data(entry, pc); |
313 | 392 | ||
314 | return; | 393 | return; |
315 | 394 | ||
316 | fail: | 395 | fail: |
317 | cpu_buf->sample_lost_overflow++; | 396 | cpu_buf->sample_lost_overflow++; |
318 | return; | ||
319 | } | 397 | } |
320 | 398 | ||
321 | #endif | 399 | int oprofile_add_data(struct op_entry *entry, unsigned long val) |
400 | { | ||
401 | return op_cpu_buffer_add_data(entry, val); | ||
402 | } | ||
403 | |||
404 | int oprofile_write_commit(struct op_entry *entry) | ||
405 | { | ||
406 | return op_cpu_buffer_write_commit(entry); | ||
407 | } | ||
322 | 408 | ||
323 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) | 409 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) |
324 | { | 410 | { |
325 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | 411 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
326 | log_sample(cpu_buf, pc, is_kernel, event); | 412 | log_sample(cpu_buf, pc, 0, is_kernel, event); |
327 | } | 413 | } |
328 | 414 | ||
329 | void oprofile_add_trace(unsigned long pc) | 415 | void oprofile_add_trace(unsigned long pc) |
@@ -340,7 +426,7 @@ void oprofile_add_trace(unsigned long pc) | |||
340 | if (pc == ESCAPE_CODE) | 426 | if (pc == ESCAPE_CODE) |
341 | goto fail; | 427 | goto fail; |
342 | 428 | ||
343 | if (add_sample(cpu_buf, pc, 0)) | 429 | if (op_add_sample(cpu_buf, pc, 0)) |
344 | goto fail; | 430 | goto fail; |
345 | 431 | ||
346 | return; | 432 | return; |
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h index aacb0f0bc566..63f81c44846a 100644 --- a/drivers/oprofile/cpu_buffer.h +++ b/drivers/oprofile/cpu_buffer.h | |||
@@ -1,10 +1,11 @@ | |||
1 | /** | 1 | /** |
2 | * @file cpu_buffer.h | 2 | * @file cpu_buffer.h |
3 | * | 3 | * |
4 | * @remark Copyright 2002 OProfile authors | 4 | * @remark Copyright 2002-2009 OProfile authors |
5 | * @remark Read the file COPYING | 5 | * @remark Read the file COPYING |
6 | * | 6 | * |
7 | * @author John Levon <levon@movementarian.org> | 7 | * @author John Levon <levon@movementarian.org> |
8 | * @author Robert Richter <robert.richter@amd.com> | ||
8 | */ | 9 | */ |
9 | 10 | ||
10 | #ifndef OPROFILE_CPU_BUFFER_H | 11 | #ifndef OPROFILE_CPU_BUFFER_H |
@@ -31,17 +32,12 @@ void end_cpu_work(void); | |||
31 | struct op_sample { | 32 | struct op_sample { |
32 | unsigned long eip; | 33 | unsigned long eip; |
33 | unsigned long event; | 34 | unsigned long event; |
35 | unsigned long data[0]; | ||
34 | }; | 36 | }; |
35 | 37 | ||
36 | struct op_entry { | 38 | struct op_entry; |
37 | struct ring_buffer_event *event; | ||
38 | struct op_sample *sample; | ||
39 | unsigned long irq_flags; | ||
40 | }; | ||
41 | 39 | ||
42 | struct oprofile_cpu_buffer { | 40 | struct oprofile_cpu_buffer { |
43 | volatile unsigned long head_pos; | ||
44 | volatile unsigned long tail_pos; | ||
45 | unsigned long buffer_size; | 41 | unsigned long buffer_size; |
46 | struct task_struct *last_task; | 42 | struct task_struct *last_task; |
47 | int last_is_kernel; | 43 | int last_is_kernel; |
@@ -54,8 +50,6 @@ struct oprofile_cpu_buffer { | |||
54 | struct delayed_work work; | 50 | struct delayed_work work; |
55 | }; | 51 | }; |
56 | 52 | ||
57 | extern struct ring_buffer *op_ring_buffer_read; | ||
58 | extern struct ring_buffer *op_ring_buffer_write; | ||
59 | DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); | 53 | DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); |
60 | 54 | ||
61 | /* | 55 | /* |
@@ -64,7 +58,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); | |||
64 | * reset these to invalid values; the next sample collected will | 58 | * reset these to invalid values; the next sample collected will |
65 | * populate the buffer with proper values to initialize the buffer | 59 | * populate the buffer with proper values to initialize the buffer |
66 | */ | 60 | */ |
67 | static inline void cpu_buffer_reset(int cpu) | 61 | static inline void op_cpu_buffer_reset(int cpu) |
68 | { | 62 | { |
69 | struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); | 63 | struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); |
70 | 64 | ||
@@ -72,55 +66,48 @@ static inline void cpu_buffer_reset(int cpu) | |||
72 | cpu_buf->last_task = NULL; | 66 | cpu_buf->last_task = NULL; |
73 | } | 67 | } |
74 | 68 | ||
75 | static inline int cpu_buffer_write_entry(struct op_entry *entry) | 69 | struct op_sample |
76 | { | 70 | *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size); |
77 | entry->event = ring_buffer_lock_reserve(op_ring_buffer_write, | 71 | int op_cpu_buffer_write_commit(struct op_entry *entry); |
78 | sizeof(struct op_sample), | 72 | struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu); |
79 | &entry->irq_flags); | 73 | unsigned long op_cpu_buffer_entries(int cpu); |
80 | if (entry->event) | ||
81 | entry->sample = ring_buffer_event_data(entry->event); | ||
82 | else | ||
83 | entry->sample = NULL; | ||
84 | |||
85 | if (!entry->sample) | ||
86 | return -ENOMEM; | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | 74 | ||
91 | static inline int cpu_buffer_write_commit(struct op_entry *entry) | 75 | /* returns the remaining free size of data in the entry */ |
76 | static inline | ||
77 | int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val) | ||
92 | { | 78 | { |
93 | return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event, | 79 | if (!entry->size) |
94 | entry->irq_flags); | 80 | return 0; |
81 | *entry->data = val; | ||
82 | entry->size--; | ||
83 | entry->data++; | ||
84 | return entry->size; | ||
95 | } | 85 | } |
96 | 86 | ||
97 | static inline struct op_sample *cpu_buffer_read_entry(int cpu) | 87 | /* returns the size of data in the entry */ |
88 | static inline | ||
89 | int op_cpu_buffer_get_size(struct op_entry *entry) | ||
98 | { | 90 | { |
99 | struct ring_buffer_event *e; | 91 | return entry->size; |
100 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); | ||
101 | if (e) | ||
102 | return ring_buffer_event_data(e); | ||
103 | if (ring_buffer_swap_cpu(op_ring_buffer_read, | ||
104 | op_ring_buffer_write, | ||
105 | cpu)) | ||
106 | return NULL; | ||
107 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); | ||
108 | if (e) | ||
109 | return ring_buffer_event_data(e); | ||
110 | return NULL; | ||
111 | } | 92 | } |
112 | 93 | ||
113 | /* "acquire" as many cpu buffer slots as we can */ | 94 | /* returns 0 if empty or the size of data including the current value */ |
114 | static inline unsigned long cpu_buffer_entries(int cpu) | 95 | static inline |
96 | int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val) | ||
115 | { | 97 | { |
116 | return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) | 98 | int size = entry->size; |
117 | + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); | 99 | if (!size) |
100 | return 0; | ||
101 | *val = *entry->data; | ||
102 | entry->size--; | ||
103 | entry->data++; | ||
104 | return size; | ||
118 | } | 105 | } |
119 | 106 | ||
120 | /* transient events for the CPU buffer -> event buffer */ | 107 | /* extra data flags */ |
121 | #define CPU_IS_KERNEL 1 | 108 | #define KERNEL_CTX_SWITCH (1UL << 0) |
122 | #define CPU_TRACE_BEGIN 2 | 109 | #define IS_KERNEL (1UL << 1) |
123 | #define IBS_FETCH_BEGIN 3 | 110 | #define TRACE_BEGIN (1UL << 2) |
124 | #define IBS_OP_BEGIN 4 | 111 | #define USER_CTX_SWITCH (1UL << 3) |
125 | 112 | ||
126 | #endif /* OPROFILE_CPU_BUFFER_H */ | 113 | #endif /* OPROFILE_CPU_BUFFER_H */ |
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index 191a3202cecc..2b7ae366ceb1 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c | |||
@@ -73,8 +73,8 @@ int alloc_event_buffer(void) | |||
73 | unsigned long flags; | 73 | unsigned long flags; |
74 | 74 | ||
75 | spin_lock_irqsave(&oprofilefs_lock, flags); | 75 | spin_lock_irqsave(&oprofilefs_lock, flags); |
76 | buffer_size = fs_buffer_size; | 76 | buffer_size = oprofile_buffer_size; |
77 | buffer_watershed = fs_buffer_watershed; | 77 | buffer_watershed = oprofile_buffer_watershed; |
78 | spin_unlock_irqrestore(&oprofilefs_lock, flags); | 78 | spin_unlock_irqrestore(&oprofilefs_lock, flags); |
79 | 79 | ||
80 | if (buffer_watershed >= buffer_size) | 80 | if (buffer_watershed >= buffer_size) |
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index cd375907f26f..3cffce90f82a 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c | |||
@@ -23,7 +23,7 @@ | |||
23 | struct oprofile_operations oprofile_ops; | 23 | struct oprofile_operations oprofile_ops; |
24 | 24 | ||
25 | unsigned long oprofile_started; | 25 | unsigned long oprofile_started; |
26 | unsigned long backtrace_depth; | 26 | unsigned long oprofile_backtrace_depth; |
27 | static unsigned long is_setup; | 27 | static unsigned long is_setup; |
28 | static DEFINE_MUTEX(start_mutex); | 28 | static DEFINE_MUTEX(start_mutex); |
29 | 29 | ||
@@ -172,7 +172,7 @@ int oprofile_set_backtrace(unsigned long val) | |||
172 | goto out; | 172 | goto out; |
173 | } | 173 | } |
174 | 174 | ||
175 | backtrace_depth = val; | 175 | oprofile_backtrace_depth = val; |
176 | 176 | ||
177 | out: | 177 | out: |
178 | mutex_unlock(&start_mutex); | 178 | mutex_unlock(&start_mutex); |
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h index 5df0c21a608f..c288d3c24b50 100644 --- a/drivers/oprofile/oprof.h +++ b/drivers/oprofile/oprof.h | |||
@@ -21,12 +21,12 @@ void oprofile_stop(void); | |||
21 | 21 | ||
22 | struct oprofile_operations; | 22 | struct oprofile_operations; |
23 | 23 | ||
24 | extern unsigned long fs_buffer_size; | 24 | extern unsigned long oprofile_buffer_size; |
25 | extern unsigned long fs_cpu_buffer_size; | 25 | extern unsigned long oprofile_cpu_buffer_size; |
26 | extern unsigned long fs_buffer_watershed; | 26 | extern unsigned long oprofile_buffer_watershed; |
27 | extern struct oprofile_operations oprofile_ops; | 27 | extern struct oprofile_operations oprofile_ops; |
28 | extern unsigned long oprofile_started; | 28 | extern unsigned long oprofile_started; |
29 | extern unsigned long backtrace_depth; | 29 | extern unsigned long oprofile_backtrace_depth; |
30 | 30 | ||
31 | struct super_block; | 31 | struct super_block; |
32 | struct dentry; | 32 | struct dentry; |
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c index d8201998b0b7..5d36ffc30dd5 100644 --- a/drivers/oprofile/oprofile_files.c +++ b/drivers/oprofile/oprofile_files.c | |||
@@ -14,17 +14,18 @@ | |||
14 | #include "oprofile_stats.h" | 14 | #include "oprofile_stats.h" |
15 | #include "oprof.h" | 15 | #include "oprof.h" |
16 | 16 | ||
17 | #define FS_BUFFER_SIZE_DEFAULT 131072 | 17 | #define BUFFER_SIZE_DEFAULT 131072 |
18 | #define FS_CPU_BUFFER_SIZE_DEFAULT 8192 | 18 | #define CPU_BUFFER_SIZE_DEFAULT 8192 |
19 | #define FS_BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */ | 19 | #define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */ |
20 | 20 | ||
21 | unsigned long fs_buffer_size; | 21 | unsigned long oprofile_buffer_size; |
22 | unsigned long fs_cpu_buffer_size; | 22 | unsigned long oprofile_cpu_buffer_size; |
23 | unsigned long fs_buffer_watershed; | 23 | unsigned long oprofile_buffer_watershed; |
24 | 24 | ||
25 | static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) | 25 | static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) |
26 | { | 26 | { |
27 | return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset); | 27 | return oprofilefs_ulong_to_user(oprofile_backtrace_depth, buf, count, |
28 | offset); | ||
28 | } | 29 | } |
29 | 30 | ||
30 | 31 | ||
@@ -125,16 +126,16 @@ static const struct file_operations dump_fops = { | |||
125 | void oprofile_create_files(struct super_block *sb, struct dentry *root) | 126 | void oprofile_create_files(struct super_block *sb, struct dentry *root) |
126 | { | 127 | { |
127 | /* reinitialize default values */ | 128 | /* reinitialize default values */ |
128 | fs_buffer_size = FS_BUFFER_SIZE_DEFAULT; | 129 | oprofile_buffer_size = BUFFER_SIZE_DEFAULT; |
129 | fs_cpu_buffer_size = FS_CPU_BUFFER_SIZE_DEFAULT; | 130 | oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT; |
130 | fs_buffer_watershed = FS_BUFFER_WATERSHED_DEFAULT; | 131 | oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT; |
131 | 132 | ||
132 | oprofilefs_create_file(sb, root, "enable", &enable_fops); | 133 | oprofilefs_create_file(sb, root, "enable", &enable_fops); |
133 | oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); | 134 | oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); |
134 | oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); | 135 | oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); |
135 | oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size); | 136 | oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size); |
136 | oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed); | 137 | oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed); |
137 | oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size); | 138 | oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &oprofile_cpu_buffer_size); |
138 | oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); | 139 | oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); |
139 | oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); | 140 | oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); |
140 | oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); | 141 | oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); |
diff --git a/drivers/parisc/asp.c b/drivers/parisc/asp.c index 821369135369..7931133526c4 100644 --- a/drivers/parisc/asp.c +++ b/drivers/parisc/asp.c | |||
@@ -71,8 +71,7 @@ static void asp_choose_irq(struct parisc_device *dev, void *ctrl) | |||
71 | */ | 71 | */ |
72 | #define ASP_INTERRUPT_ADDR 0xf0800000 | 72 | #define ASP_INTERRUPT_ADDR 0xf0800000 |
73 | 73 | ||
74 | int __init | 74 | static int __init asp_init_chip(struct parisc_device *dev) |
75 | asp_init_chip(struct parisc_device *dev) | ||
76 | { | 75 | { |
77 | struct gsc_irq gsc_irq; | 76 | struct gsc_irq gsc_irq; |
78 | int ret; | 77 | int ret; |
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index dcc1e9958d2f..cd4dd7ed2c06 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
@@ -555,7 +555,7 @@ static u32 hint_lookup[] = { | |||
555 | * (Load Coherence Index) instruction. The 8 bits used for the virtual | 555 | * (Load Coherence Index) instruction. The 8 bits used for the virtual |
556 | * index are bits 12:19 of the value returned by LCI. | 556 | * index are bits 12:19 of the value returned by LCI. |
557 | */ | 557 | */ |
558 | void CCIO_INLINE | 558 | static void CCIO_INLINE |
559 | ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, | 559 | ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, |
560 | unsigned long hints) | 560 | unsigned long hints) |
561 | { | 561 | { |
@@ -1578,8 +1578,6 @@ static int __init ccio_probe(struct parisc_device *dev) | |||
1578 | 1578 | ||
1579 | ioc_count++; | 1579 | ioc_count++; |
1580 | 1580 | ||
1581 | parisc_vmerge_boundary = IOVP_SIZE; | ||
1582 | parisc_vmerge_max_size = BITS_PER_LONG * IOVP_SIZE; | ||
1583 | parisc_has_iommu(); | 1581 | parisc_has_iommu(); |
1584 | return 0; | 1582 | return 0; |
1585 | } | 1583 | } |
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 77cc8bfef8c9..d539d9df88e7 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c | |||
@@ -287,7 +287,7 @@ DINO_PORT_OUT(b, 8, 3) | |||
287 | DINO_PORT_OUT(w, 16, 2) | 287 | DINO_PORT_OUT(w, 16, 2) |
288 | DINO_PORT_OUT(l, 32, 0) | 288 | DINO_PORT_OUT(l, 32, 0) |
289 | 289 | ||
290 | struct pci_port_ops dino_port_ops = { | 290 | static struct pci_port_ops dino_port_ops = { |
291 | .inb = dino_in8, | 291 | .inb = dino_in8, |
292 | .inw = dino_in16, | 292 | .inw = dino_in16, |
293 | .inl = dino_in32, | 293 | .inl = dino_in32, |
@@ -690,7 +690,7 @@ dino_fixup_bus(struct pci_bus *bus) | |||
690 | } | 690 | } |
691 | 691 | ||
692 | 692 | ||
693 | struct pci_bios_ops dino_bios_ops = { | 693 | static struct pci_bios_ops dino_bios_ops = { |
694 | .init = dino_bios_init, | 694 | .init = dino_bios_init, |
695 | .fixup_bus = dino_fixup_bus | 695 | .fixup_bus = dino_fixup_bus |
696 | }; | 696 | }; |
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c index 65eee67aa2ae..13856415b432 100644 --- a/drivers/parisc/hppb.c +++ b/drivers/parisc/hppb.c | |||
@@ -29,7 +29,7 @@ struct hppb_card { | |||
29 | struct hppb_card *next; | 29 | struct hppb_card *next; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | struct hppb_card hppb_card_head = { | 32 | static struct hppb_card hppb_card_head = { |
33 | .hpa = 0, | 33 | .hpa = 0, |
34 | .next = NULL, | 34 | .next = NULL, |
35 | }; | 35 | }; |
diff --git a/drivers/parisc/lasi.c b/drivers/parisc/lasi.c index bee510098ce8..e65727ca9fc0 100644 --- a/drivers/parisc/lasi.c +++ b/drivers/parisc/lasi.c | |||
@@ -107,7 +107,7 @@ lasi_init_irq(struct gsc_asic *this_lasi) | |||
107 | 107 | ||
108 | #else | 108 | #else |
109 | 109 | ||
110 | void __init lasi_led_init(unsigned long lasi_hpa) | 110 | static void __init lasi_led_init(unsigned long lasi_hpa) |
111 | { | 111 | { |
112 | unsigned long datareg; | 112 | unsigned long datareg; |
113 | 113 | ||
@@ -163,8 +163,7 @@ static void lasi_power_off(void) | |||
163 | gsc_writel(0x02, datareg); | 163 | gsc_writel(0x02, datareg); |
164 | } | 164 | } |
165 | 165 | ||
166 | int __init | 166 | static int __init lasi_init_chip(struct parisc_device *dev) |
167 | lasi_init_chip(struct parisc_device *dev) | ||
168 | { | 167 | { |
169 | extern void (*chassis_power_off)(void); | 168 | extern void (*chassis_power_off)(void); |
170 | struct gsc_asic *lasi; | 169 | struct gsc_asic *lasi; |
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index a28c8946deaa..d8233de8c75d 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c | |||
@@ -824,7 +824,7 @@ lba_fixup_bus(struct pci_bus *bus) | |||
824 | } | 824 | } |
825 | 825 | ||
826 | 826 | ||
827 | struct pci_bios_ops lba_bios_ops = { | 827 | static struct pci_bios_ops lba_bios_ops = { |
828 | .init = lba_bios_init, | 828 | .init = lba_bios_init, |
829 | .fixup_bus = lba_fixup_bus, | 829 | .fixup_bus = lba_fixup_bus, |
830 | }; | 830 | }; |
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index bc73b96346ff..3fac8f81d59d 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c | |||
@@ -561,7 +561,7 @@ typedef unsigned long space_t; | |||
561 | * IOMMU uses little endian for the pdir. | 561 | * IOMMU uses little endian for the pdir. |
562 | */ | 562 | */ |
563 | 563 | ||
564 | void SBA_INLINE | 564 | static void SBA_INLINE |
565 | sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, | 565 | sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, |
566 | unsigned long hint) | 566 | unsigned long hint) |
567 | { | 567 | { |
@@ -1874,7 +1874,7 @@ static struct parisc_device_id sba_tbl[] = { | |||
1874 | { 0, } | 1874 | { 0, } |
1875 | }; | 1875 | }; |
1876 | 1876 | ||
1877 | int sba_driver_callback(struct parisc_device *); | 1877 | static int sba_driver_callback(struct parisc_device *); |
1878 | 1878 | ||
1879 | static struct parisc_driver sba_driver = { | 1879 | static struct parisc_driver sba_driver = { |
1880 | .name = MODULE_NAME, | 1880 | .name = MODULE_NAME, |
@@ -1887,8 +1887,7 @@ static struct parisc_driver sba_driver = { | |||
1887 | ** If so, initialize the chip and tell other partners in crime they | 1887 | ** If so, initialize the chip and tell other partners in crime they |
1888 | ** have work to do. | 1888 | ** have work to do. |
1889 | */ | 1889 | */ |
1890 | int | 1890 | static int sba_driver_callback(struct parisc_device *dev) |
1891 | sba_driver_callback(struct parisc_device *dev) | ||
1892 | { | 1891 | { |
1893 | struct sba_device *sba_dev; | 1892 | struct sba_device *sba_dev; |
1894 | u32 func_class; | 1893 | u32 func_class; |
@@ -1979,8 +1978,6 @@ sba_driver_callback(struct parisc_device *dev) | |||
1979 | proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops); | 1978 | proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops); |
1980 | #endif | 1979 | #endif |
1981 | 1980 | ||
1982 | parisc_vmerge_boundary = IOVP_SIZE; | ||
1983 | parisc_vmerge_max_size = IOVP_SIZE * BITS_PER_LONG; | ||
1984 | parisc_has_iommu(); | 1981 | parisc_has_iommu(); |
1985 | return 0; | 1982 | return 0; |
1986 | } | 1983 | } |
diff --git a/drivers/parisc/wax.c b/drivers/parisc/wax.c index 892a83bbe73d..da9d5ad1353c 100644 --- a/drivers/parisc/wax.c +++ b/drivers/parisc/wax.c | |||
@@ -68,8 +68,7 @@ wax_init_irq(struct gsc_asic *wax) | |||
68 | // gsc_writel(0xFFFFFFFF, base+0x2000); /* RS232-B on Wax */ | 68 | // gsc_writel(0xFFFFFFFF, base+0x2000); /* RS232-B on Wax */ |
69 | } | 69 | } |
70 | 70 | ||
71 | int __init | 71 | static int __init wax_init_chip(struct parisc_device *dev) |
72 | wax_init_chip(struct parisc_device *dev) | ||
73 | { | 72 | { |
74 | struct gsc_asic *wax; | 73 | struct gsc_asic *wax; |
75 | struct parisc_device *parent; | 74 | struct parisc_device *parent; |
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index c62ab8d240aa..1c1141801060 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/pci-acpi.h> | 33 | #include <linux/pci-acpi.h> |
34 | #include <acpi/acpi.h> | 34 | #include <acpi/acpi.h> |
35 | #include <acpi/acpi_bus.h> | 35 | #include <acpi/acpi_bus.h> |
36 | #include <acpi/actypes.h> | ||
37 | 36 | ||
38 | #define MY_NAME "acpi_pcihp" | 37 | #define MY_NAME "acpi_pcihp" |
39 | 38 | ||
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 27fd18f019f8..db85284ffb62 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
@@ -217,7 +217,6 @@ struct hpc_ops { | |||
217 | #ifdef CONFIG_ACPI | 217 | #ifdef CONFIG_ACPI |
218 | #include <acpi/acpi.h> | 218 | #include <acpi/acpi.h> |
219 | #include <acpi/acpi_bus.h> | 219 | #include <acpi/acpi_bus.h> |
220 | #include <acpi/actypes.h> | ||
221 | #include <linux/pci-acpi.h> | 220 | #include <linux/pci-acpi.h> |
222 | 221 | ||
223 | extern void __init pciehp_acpi_slot_detection_init(void); | 222 | extern void __init pciehp_acpi_slot_detection_init(void); |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 3582512e7226..deea8a187eb8 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -13,8 +13,6 @@ | |||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/pci-aspm.h> | 14 | #include <linux/pci-aspm.h> |
15 | #include <acpi/acpi.h> | 15 | #include <acpi/acpi.h> |
16 | #include <acpi/acnamesp.h> | ||
17 | #include <acpi/acresrc.h> | ||
18 | #include <acpi/acpi_bus.h> | 16 | #include <acpi/acpi_bus.h> |
19 | 17 | ||
20 | #include <linux/pci-acpi.h> | 18 | #include <linux/pci-acpi.h> |
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig new file mode 100644 index 000000000000..9652c3fe7f5e --- /dev/null +++ b/drivers/platform/Kconfig | |||
@@ -0,0 +1,5 @@ | |||
1 | # drivers/platform/Kconfig | ||
2 | |||
3 | if X86 | ||
4 | source "drivers/platform/x86/Kconfig" | ||
5 | endif | ||
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile new file mode 100644 index 000000000000..782953ae4c03 --- /dev/null +++ b/drivers/platform/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Makefile for linux/drivers/platform | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_X86) += x86/ | ||
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig new file mode 100644 index 000000000000..e65448e99b48 --- /dev/null +++ b/drivers/platform/x86/Kconfig | |||
@@ -0,0 +1,375 @@ | |||
1 | # | ||
2 | # X86 Platform Specific Drivers | ||
3 | # | ||
4 | |||
5 | menuconfig X86_PLATFORM_DEVICES | ||
6 | bool "X86 Platform Specific Device Drivers" | ||
7 | default y | ||
8 | ---help--- | ||
9 | Say Y here to get to see options for device drivers for various | ||
10 | x86 platforms, including vendor-specific laptop extension drivers. | ||
11 | This option alone does not add any kernel code. | ||
12 | |||
13 | If you say N, all options in this submenu will be skipped and disabled. | ||
14 | |||
15 | if X86_PLATFORM_DEVICES | ||
16 | |||
17 | config ACER_WMI | ||
18 | tristate "Acer WMI Laptop Extras (EXPERIMENTAL)" | ||
19 | depends on EXPERIMENTAL | ||
20 | depends on ACPI | ||
21 | depends on LEDS_CLASS | ||
22 | depends on NEW_LEDS | ||
23 | depends on BACKLIGHT_CLASS_DEVICE | ||
24 | depends on SERIO_I8042 | ||
25 | depends on RFKILL | ||
26 | select ACPI_WMI | ||
27 | ---help--- | ||
28 | This is a driver for newer Acer (and Wistron) laptops. It adds | ||
29 | wireless radio and bluetooth control, and on some laptops, | ||
30 | exposes the mail LED and LCD backlight. | ||
31 | |||
32 | For more information about this driver see | ||
33 | <file:Documentation/laptops/acer-wmi.txt> | ||
34 | |||
35 | If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M | ||
36 | here. | ||
37 | |||
38 | config ASUS_LAPTOP | ||
39 | tristate "Asus Laptop Extras (EXPERIMENTAL)" | ||
40 | depends on ACPI | ||
41 | depends on EXPERIMENTAL && !ACPI_ASUS | ||
42 | depends on LEDS_CLASS | ||
43 | depends on NEW_LEDS | ||
44 | depends on BACKLIGHT_CLASS_DEVICE | ||
45 | ---help--- | ||
46 | This is the new Linux driver for Asus laptops. It may also support some | ||
47 | MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate | ||
48 | standard ACPI events that go through /proc/acpi/events. It also adds | ||
49 | support for video output switching, LCD backlight control, Bluetooth and | ||
50 | Wlan control, and most importantly, allows you to blink those fancy LEDs. | ||
51 | |||
52 | For more information and a userspace daemon for handling the extra | ||
53 | buttons see <http://acpi4asus.sf.net/>. | ||
54 | |||
55 | If you have an ACPI-compatible ASUS laptop, say Y or M here. | ||
56 | |||
57 | config FUJITSU_LAPTOP | ||
58 | tristate "Fujitsu Laptop Extras" | ||
59 | depends on ACPI | ||
60 | depends on INPUT | ||
61 | depends on BACKLIGHT_CLASS_DEVICE | ||
62 | ---help--- | ||
63 | This is a driver for laptops built by Fujitsu: | ||
64 | |||
65 | * P2xxx/P5xxx/S6xxx/S7xxx series Lifebooks | ||
66 | * Possibly other Fujitsu laptop models | ||
67 | * Tested with S6410 and S7020 | ||
68 | |||
69 | It adds support for LCD brightness control and some hotkeys. | ||
70 | |||
71 | If you have a Fujitsu laptop, say Y or M here. | ||
72 | |||
73 | config FUJITSU_LAPTOP_DEBUG | ||
74 | bool "Verbose debug mode for Fujitsu Laptop Extras" | ||
75 | depends on FUJITSU_LAPTOP | ||
76 | default n | ||
77 | ---help--- | ||
78 | Enables extra debug output from the fujitsu extras driver, at the | ||
79 | expense of a slight increase in driver size. | ||
80 | |||
81 | If you are not sure, say N here. | ||
82 | |||
83 | config TC1100_WMI | ||
84 | tristate "HP Compaq TC1100 Tablet WMI Extras (EXPERIMENTAL)" | ||
85 | depends on !X86_64 | ||
86 | depends on EXPERIMENTAL | ||
87 | depends on ACPI | ||
88 | select ACPI_WMI | ||
89 | ---help--- | ||
90 | This is a driver for the WMI extensions (wireless and bluetooth power | ||
91 | control) of the HP Compaq TC1100 tablet. | ||
92 | |||
93 | config HP_WMI | ||
94 | tristate "HP WMI extras" | ||
95 | depends on ACPI_WMI | ||
96 | depends on INPUT | ||
97 | depends on RFKILL | ||
98 | help | ||
99 | Say Y here if you want to support WMI-based hotkeys on HP laptops and | ||
100 | to read data from WMI such as docking or ambient light sensor state. | ||
101 | |||
102 | To compile this driver as a module, choose M here: the module will | ||
103 | be called hp-wmi. | ||
104 | |||
105 | config MSI_LAPTOP | ||
106 | tristate "MSI Laptop Extras" | ||
107 | depends on ACPI | ||
108 | depends on BACKLIGHT_CLASS_DEVICE | ||
109 | ---help--- | ||
110 | This is a driver for laptops built by MSI (MICRO-STAR | ||
111 | INTERNATIONAL): | ||
112 | |||
113 | MSI MegaBook S270 (MS-1013) | ||
114 | Cytron/TCM/Medion/Tchibo MD96100/SAM2000 | ||
115 | |||
116 | It adds support for Bluetooth, WLAN and LCD brightness control. | ||
117 | |||
118 | More information about this driver is available at | ||
119 | <http://0pointer.de/lennart/tchibo.html>. | ||
120 | |||
121 | If you have an MSI S270 laptop, say Y or M here. | ||
122 | |||
123 | config PANASONIC_LAPTOP | ||
124 | tristate "Panasonic Laptop Extras" | ||
125 | depends on INPUT && ACPI | ||
126 | depends on BACKLIGHT_CLASS_DEVICE | ||
127 | ---help--- | ||
128 | This driver adds support for access to backlight control and hotkeys | ||
129 | on Panasonic Let's Note laptops. | ||
130 | |||
131 | If you have a Panasonic Let's note laptop (such as the R1(N variant), | ||
132 | R2, R3, R5, T2, W2 and Y2 series), say Y. | ||
133 | |||
134 | config COMPAL_LAPTOP | ||
135 | tristate "Compal Laptop Extras" | ||
136 | depends on ACPI | ||
137 | depends on BACKLIGHT_CLASS_DEVICE | ||
138 | ---help--- | ||
139 | This is a driver for laptops built by Compal: | ||
140 | |||
141 | Compal FL90/IFL90 | ||
142 | Compal FL91/IFL91 | ||
143 | Compal FL92/JFL92 | ||
144 | Compal FT00/IFT00 | ||
145 | |||
146 | It adds support for Bluetooth, WLAN and LCD brightness control. | ||
147 | |||
148 | If you have an Compal FL9x/IFL9x/FT00 laptop, say Y or M here. | ||
149 | |||
150 | config SONY_LAPTOP | ||
151 | tristate "Sony Laptop Extras" | ||
152 | depends on ACPI | ||
153 | select BACKLIGHT_CLASS_DEVICE | ||
154 | depends on INPUT | ||
155 | ---help--- | ||
156 | This mini-driver drives the SNC and SPIC devices present in the ACPI | ||
157 | BIOS of the Sony Vaio laptops. | ||
158 | |||
159 | It gives access to some extra laptop functionalities like Bluetooth, | ||
160 | screen brightness control, Fn keys and allows powering on/off some | ||
161 | devices. | ||
162 | |||
163 | Read <file:Documentation/laptops/sony-laptop.txt> for more information. | ||
164 | |||
165 | config SONYPI_COMPAT | ||
166 | bool "Sonypi compatibility" | ||
167 | depends on SONY_LAPTOP | ||
168 | ---help--- | ||
169 | Build the sonypi driver compatibility code into the sony-laptop driver. | ||
170 | |||
171 | config THINKPAD_ACPI | ||
172 | tristate "ThinkPad ACPI Laptop Extras" | ||
173 | depends on ACPI | ||
174 | select BACKLIGHT_LCD_SUPPORT | ||
175 | select BACKLIGHT_CLASS_DEVICE | ||
176 | select HWMON | ||
177 | select NVRAM | ||
178 | select INPUT | ||
179 | select NEW_LEDS | ||
180 | select LEDS_CLASS | ||
181 | select NET | ||
182 | select RFKILL | ||
183 | ---help--- | ||
184 | This is a driver for the IBM and Lenovo ThinkPad laptops. It adds | ||
185 | support for Fn-Fx key combinations, Bluetooth control, video | ||
186 | output switching, ThinkLight control, UltraBay eject and more. | ||
187 | For more information about this driver see | ||
188 | <file:Documentation/laptops/thinkpad-acpi.txt> and | ||
189 | <http://ibm-acpi.sf.net/> . | ||
190 | |||
191 | This driver was formerly known as ibm-acpi. | ||
192 | |||
193 | If you have an IBM or Lenovo ThinkPad laptop, say Y or M here. | ||
194 | |||
195 | config THINKPAD_ACPI_DEBUG | ||
196 | bool "Verbose debug mode" | ||
197 | depends on THINKPAD_ACPI | ||
198 | default n | ||
199 | ---help--- | ||
200 | Enables extra debugging information, at the expense of a slightly | ||
201 | increase in driver size. | ||
202 | |||
203 | If you are not sure, say N here. | ||
204 | |||
205 | config THINKPAD_ACPI_DOCK | ||
206 | bool "Legacy Docking Station Support" | ||
207 | depends on THINKPAD_ACPI | ||
208 | depends on ACPI_DOCK=n | ||
209 | default n | ||
210 | ---help--- | ||
211 | Allows the thinkpad_acpi driver to handle docking station events. | ||
212 | This support was made obsolete by the generic ACPI docking station | ||
213 | support (CONFIG_ACPI_DOCK). It will allow locking and removing the | ||
214 | laptop from the docking station, but will not properly connect PCI | ||
215 | devices. | ||
216 | |||
217 | If you are not sure, say N here. | ||
218 | |||
219 | config THINKPAD_ACPI_BAY | ||
220 | bool "Legacy Removable Bay Support" | ||
221 | depends on THINKPAD_ACPI | ||
222 | default y | ||
223 | ---help--- | ||
224 | Allows the thinkpad_acpi driver to handle removable bays. It will | ||
225 | electrically disable the device in the bay, and also generate | ||
226 | notifications when the bay lever is ejected or inserted. | ||
227 | |||
228 | If you are not sure, say Y here. | ||
229 | |||
230 | config THINKPAD_ACPI_VIDEO | ||
231 | bool "Video output control support" | ||
232 | depends on THINKPAD_ACPI | ||
233 | default y | ||
234 | ---help--- | ||
235 | Allows the thinkpad_acpi driver to provide an interface to control | ||
236 | the various video output ports. | ||
237 | |||
238 | This feature often won't work well, depending on ThinkPad model, | ||
239 | display state, video output devices in use, whether there is a X | ||
240 | server running, phase of the moon, and the current mood of | ||
241 | Schroedinger's cat. If you can use X.org's RandR to control | ||
242 | your ThinkPad's video output ports instead of this feature, | ||
243 | don't think twice: do it and say N here to save some memory. | ||
244 | |||
245 | If you are not sure, say Y here. | ||
246 | |||
247 | config THINKPAD_ACPI_HOTKEY_POLL | ||
248 | bool "Support NVRAM polling for hot keys" | ||
249 | depends on THINKPAD_ACPI | ||
250 | default y | ||
251 | ---help--- | ||
252 | Some thinkpad models benefit from NVRAM polling to detect a few of | ||
253 | the hot key press events. If you know your ThinkPad model does not | ||
254 | need to do NVRAM polling to support any of the hot keys you use, | ||
255 | unselecting this option will save about 1kB of memory. | ||
256 | |||
257 | ThinkPads T40 and newer, R52 and newer, and X31 and newer are | ||
258 | unlikely to need NVRAM polling in their latest BIOS versions. | ||
259 | |||
260 | NVRAM polling can detect at most the following keys: ThinkPad/Access | ||
261 | IBM, Zoom, Switch Display (fn+F7), ThinkLight, Volume up/down/mute, | ||
262 | Brightness up/down, Display Expand (fn+F8), Hibernate (fn+F12). | ||
263 | |||
264 | If you are not sure, say Y here. The driver enables polling only if | ||
265 | it is strictly necessary to do so. | ||
266 | |||
267 | config INTEL_MENLOW | ||
268 | tristate "Thermal Management driver for Intel menlow platform" | ||
269 | depends on ACPI_THERMAL | ||
270 | select THERMAL | ||
271 | ---help--- | ||
272 | ACPI thermal management enhancement driver on | ||
273 | Intel Menlow platform. | ||
274 | |||
275 | If unsure, say N. | ||
276 | |||
277 | config EEEPC_LAPTOP | ||
278 | tristate "Eee PC Hotkey Driver (EXPERIMENTAL)" | ||
279 | depends on ACPI | ||
280 | depends on EXPERIMENTAL | ||
281 | select BACKLIGHT_CLASS_DEVICE | ||
282 | select HWMON | ||
283 | select RFKILL | ||
284 | ---help--- | ||
285 | This driver supports the Fn-Fx keys on Eee PC laptops. | ||
286 | It also adds the ability to switch camera/wlan on/off. | ||
287 | |||
288 | If you have an Eee PC laptop, say Y or M here. | ||
289 | |||
290 | |||
291 | config ACPI_WMI | ||
292 | tristate "WMI (EXPERIMENTAL)" | ||
293 | depends on ACPI | ||
294 | depends on EXPERIMENTAL | ||
295 | help | ||
296 | This driver adds support for the ACPI-WMI (Windows Management | ||
297 | Instrumentation) mapper device (PNP0C14) found on some systems. | ||
298 | |||
299 | ACPI-WMI is a proprietary extension to ACPI to expose parts of the | ||
300 | ACPI firmware to userspace - this is done through various vendor | ||
301 | defined methods and data blocks in a PNP0C14 device, which are then | ||
302 | made available for userspace to call. | ||
303 | |||
304 | The implementation of this in Linux currently only exposes this to | ||
305 | other kernel space drivers. | ||
306 | |||
307 | This driver is a required dependency to build the firmware specific | ||
308 | drivers needed on many machines, including Acer and HP laptops. | ||
309 | |||
310 | It is safe to enable this driver even if your DSDT doesn't define | ||
311 | any ACPI-WMI devices. | ||
312 | |||
313 | config ACPI_ASUS | ||
314 | tristate "ASUS/Medion Laptop Extras" | ||
315 | depends on ACPI | ||
316 | select BACKLIGHT_CLASS_DEVICE | ||
317 | ---help--- | ||
318 | This driver provides support for extra features of ACPI-compatible | ||
319 | ASUS laptops. As some of Medion laptops are made by ASUS, it may also | ||
320 | support some Medion laptops (such as 9675 for example). It makes all | ||
321 | the extra buttons generate standard ACPI events that go through | ||
322 | /proc/acpi/events, and (on some models) adds support for changing the | ||
323 | display brightness and output, switching the LCD backlight on and off, | ||
324 | and most importantly, allows you to blink those fancy LEDs intended | ||
325 | for reporting mail and wireless status. | ||
326 | |||
327 | Note: display switching code is currently considered EXPERIMENTAL, | ||
328 | toying with these values may even lock your machine. | ||
329 | |||
330 | All settings are changed via /proc/acpi/asus directory entries. Owner | ||
331 | and group for these entries can be set with asus_uid and asus_gid | ||
332 | parameters. | ||
333 | |||
334 | More information and a userspace daemon for handling the extra buttons | ||
335 | at <http://sourceforge.net/projects/acpi4asus/>. | ||
336 | |||
337 | If you have an ACPI-compatible ASUS laptop, say Y or M here. This | ||
338 | driver is still under development, so if your laptop is unsupported or | ||
339 | something works not quite as expected, please use the mailing list | ||
340 | available on the above page (acpi4asus-user@lists.sourceforge.net). | ||
341 | |||
342 | NOTE: This driver is deprecated and will probably be removed soon, | ||
343 | use asus-laptop instead. | ||
344 | |||
345 | config ACPI_TOSHIBA | ||
346 | tristate "Toshiba Laptop Extras" | ||
347 | depends on ACPI | ||
348 | depends on INPUT | ||
349 | select INPUT_POLLDEV | ||
350 | select NET | ||
351 | select RFKILL | ||
352 | select BACKLIGHT_CLASS_DEVICE | ||
353 | ---help--- | ||
354 | This driver adds support for access to certain system settings | ||
355 | on "legacy free" Toshiba laptops. These laptops can be recognized by | ||
356 | their lack of a BIOS setup menu and APM support. | ||
357 | |||
358 | On these machines, all system configuration is handled through the | ||
359 | ACPI. This driver is required for access to controls not covered | ||
360 | by the general ACPI drivers, such as LCD brightness, video output, | ||
361 | etc. | ||
362 | |||
363 | This driver differs from the non-ACPI Toshiba laptop driver (located | ||
364 | under "Processor type and features") in several aspects. | ||
365 | Configuration is accessed by reading and writing text files in the | ||
366 | /proc tree instead of by program interface to /dev. Furthermore, no | ||
367 | power management functions are exposed, as those are handled by the | ||
368 | general ACPI drivers. | ||
369 | |||
370 | More information about this driver is available at | ||
371 | <http://memebeam.org/toys/ToshibaAcpiDriver>. | ||
372 | |||
373 | If you have a legacy free Toshiba laptop (such as the Libretto L1 | ||
374 | series), say Y. | ||
375 | endif # X86_PLATFORM_DEVICES | ||
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile new file mode 100644 index 000000000000..1e9de2ae0de5 --- /dev/null +++ b/drivers/platform/x86/Makefile | |||
@@ -0,0 +1,19 @@ | |||
1 | # | ||
2 | # Makefile for linux/drivers/platform/x86 | ||
3 | # x86 Platform-Specific Drivers | ||
4 | # | ||
5 | obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o | ||
6 | obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o | ||
7 | obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o | ||
8 | obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o | ||
9 | obj-$(CONFIG_ACER_WMI) += acer-wmi.o | ||
10 | obj-$(CONFIG_HP_WMI) += hp-wmi.o | ||
11 | obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o | ||
12 | obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o | ||
13 | obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o | ||
14 | obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o | ||
15 | obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o | ||
16 | obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o | ||
17 | obj-$(CONFIG_ACPI_WMI) += wmi.o | ||
18 | obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o | ||
19 | obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o | ||
diff --git a/drivers/misc/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 94c9f911824e..94c9f911824e 100644 --- a/drivers/misc/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c | |||
diff --git a/drivers/misc/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 8fb8b3591048..8fb8b3591048 100644 --- a/drivers/misc/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c | |||
diff --git a/drivers/acpi/asus_acpi.c b/drivers/platform/x86/asus_acpi.c index 1e74988c7b2d..1e74988c7b2d 100644 --- a/drivers/acpi/asus_acpi.c +++ b/drivers/platform/x86/asus_acpi.c | |||
diff --git a/drivers/misc/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index 11003bba10d3..11003bba10d3 100644 --- a/drivers/misc/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c | |||
diff --git a/drivers/misc/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 02fe2b8b8939..02fe2b8b8939 100644 --- a/drivers/misc/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c | |||
diff --git a/drivers/misc/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index a7dd3e9fb79d..65dc41540c62 100644 --- a/drivers/misc/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c | |||
@@ -3,6 +3,7 @@ | |||
3 | /* | 3 | /* |
4 | Copyright (C) 2007,2008 Jonathan Woithe <jwoithe@physics.adelaide.edu.au> | 4 | Copyright (C) 2007,2008 Jonathan Woithe <jwoithe@physics.adelaide.edu.au> |
5 | Copyright (C) 2008 Peter Gruber <nokos@gmx.net> | 5 | Copyright (C) 2008 Peter Gruber <nokos@gmx.net> |
6 | Copyright (C) 2008 Tony Vroon <tony@linx.net> | ||
6 | Based on earlier work: | 7 | Based on earlier work: |
7 | Copyright (C) 2003 Shane Spencer <shane@bogomip.com> | 8 | Copyright (C) 2003 Shane Spencer <shane@bogomip.com> |
8 | Adrian Yee <brewt-fujitsu@brewt.org> | 9 | Adrian Yee <brewt-fujitsu@brewt.org> |
@@ -65,8 +66,11 @@ | |||
65 | #include <linux/kfifo.h> | 66 | #include <linux/kfifo.h> |
66 | #include <linux/video_output.h> | 67 | #include <linux/video_output.h> |
67 | #include <linux/platform_device.h> | 68 | #include <linux/platform_device.h> |
69 | #ifdef CONFIG_LEDS_CLASS | ||
70 | #include <linux/leds.h> | ||
71 | #endif | ||
68 | 72 | ||
69 | #define FUJITSU_DRIVER_VERSION "0.4.3" | 73 | #define FUJITSU_DRIVER_VERSION "0.5.0" |
70 | 74 | ||
71 | #define FUJITSU_LCD_N_LEVELS 8 | 75 | #define FUJITSU_LCD_N_LEVELS 8 |
72 | 76 | ||
@@ -83,6 +87,24 @@ | |||
83 | #define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86 | 87 | #define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86 |
84 | #define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87 | 88 | #define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87 |
85 | 89 | ||
90 | /* FUNC interface - command values */ | ||
91 | #define FUNC_RFKILL 0x1000 | ||
92 | #define FUNC_LEDS 0x1001 | ||
93 | #define FUNC_BUTTONS 0x1002 | ||
94 | #define FUNC_BACKLIGHT 0x1004 | ||
95 | |||
96 | /* FUNC interface - responses */ | ||
97 | #define UNSUPPORTED_CMD 0x80000000 | ||
98 | |||
99 | #ifdef CONFIG_LEDS_CLASS | ||
100 | /* FUNC interface - LED control */ | ||
101 | #define FUNC_LED_OFF 0x1 | ||
102 | #define FUNC_LED_ON 0x30001 | ||
103 | #define KEYBOARD_LAMPS 0x100 | ||
104 | #define LOGOLAMP_POWERON 0x2000 | ||
105 | #define LOGOLAMP_ALWAYS 0x4000 | ||
106 | #endif | ||
107 | |||
86 | /* Hotkey details */ | 108 | /* Hotkey details */ |
87 | #define KEY1_CODE 0x410 /* codes for the keys in the GIRB register */ | 109 | #define KEY1_CODE 0x410 /* codes for the keys in the GIRB register */ |
88 | #define KEY2_CODE 0x411 | 110 | #define KEY2_CODE 0x411 |
@@ -133,7 +155,6 @@ struct fujitsu_t { | |||
133 | 155 | ||
134 | static struct fujitsu_t *fujitsu; | 156 | static struct fujitsu_t *fujitsu; |
135 | static int use_alt_lcd_levels = -1; | 157 | static int use_alt_lcd_levels = -1; |
136 | static int disable_brightness_keys = -1; | ||
137 | static int disable_brightness_adjust = -1; | 158 | static int disable_brightness_adjust = -1; |
138 | 159 | ||
139 | /* Device used to access other hotkeys on the laptop */ | 160 | /* Device used to access other hotkeys on the laptop */ |
@@ -145,8 +166,9 @@ struct fujitsu_hotkey_t { | |||
145 | struct platform_device *pf_device; | 166 | struct platform_device *pf_device; |
146 | struct kfifo *fifo; | 167 | struct kfifo *fifo; |
147 | spinlock_t fifo_lock; | 168 | spinlock_t fifo_lock; |
148 | 169 | int rfkill_state; | |
149 | unsigned int irb; /* info about the pressed buttons */ | 170 | int logolamp_registered; |
171 | int kblamps_registered; | ||
150 | }; | 172 | }; |
151 | 173 | ||
152 | static struct fujitsu_hotkey_t *fujitsu_hotkey; | 174 | static struct fujitsu_hotkey_t *fujitsu_hotkey; |
@@ -154,12 +176,139 @@ static struct fujitsu_hotkey_t *fujitsu_hotkey; | |||
154 | static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, | 176 | static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, |
155 | void *data); | 177 | void *data); |
156 | 178 | ||
179 | #ifdef CONFIG_LEDS_CLASS | ||
180 | static enum led_brightness logolamp_get(struct led_classdev *cdev); | ||
181 | static void logolamp_set(struct led_classdev *cdev, | ||
182 | enum led_brightness brightness); | ||
183 | |||
184 | struct led_classdev logolamp_led = { | ||
185 | .name = "fujitsu::logolamp", | ||
186 | .brightness_get = logolamp_get, | ||
187 | .brightness_set = logolamp_set | ||
188 | }; | ||
189 | |||
190 | static enum led_brightness kblamps_get(struct led_classdev *cdev); | ||
191 | static void kblamps_set(struct led_classdev *cdev, | ||
192 | enum led_brightness brightness); | ||
193 | |||
194 | struct led_classdev kblamps_led = { | ||
195 | .name = "fujitsu::kblamps", | ||
196 | .brightness_get = kblamps_get, | ||
197 | .brightness_set = kblamps_set | ||
198 | }; | ||
199 | #endif | ||
200 | |||
157 | #ifdef CONFIG_FUJITSU_LAPTOP_DEBUG | 201 | #ifdef CONFIG_FUJITSU_LAPTOP_DEBUG |
158 | static u32 dbg_level = 0x03; | 202 | static u32 dbg_level = 0x03; |
159 | #endif | 203 | #endif |
160 | 204 | ||
161 | static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data); | 205 | static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data); |
162 | 206 | ||
207 | /* Fujitsu ACPI interface function */ | ||
208 | |||
209 | static int call_fext_func(int cmd, int arg0, int arg1, int arg2) | ||
210 | { | ||
211 | acpi_status status = AE_OK; | ||
212 | union acpi_object params[4] = { | ||
213 | { .type = ACPI_TYPE_INTEGER }, | ||
214 | { .type = ACPI_TYPE_INTEGER }, | ||
215 | { .type = ACPI_TYPE_INTEGER }, | ||
216 | { .type = ACPI_TYPE_INTEGER } | ||
217 | }; | ||
218 | struct acpi_object_list arg_list = { 4, ¶ms[0] }; | ||
219 | struct acpi_buffer output; | ||
220 | union acpi_object out_obj; | ||
221 | acpi_handle handle = NULL; | ||
222 | |||
223 | status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle); | ||
224 | if (ACPI_FAILURE(status)) { | ||
225 | vdbg_printk(FUJLAPTOP_DBG_ERROR, | ||
226 | "FUNC interface is not present\n"); | ||
227 | return -ENODEV; | ||
228 | } | ||
229 | |||
230 | params[0].integer.value = cmd; | ||
231 | params[1].integer.value = arg0; | ||
232 | params[2].integer.value = arg1; | ||
233 | params[3].integer.value = arg2; | ||
234 | |||
235 | output.length = sizeof(out_obj); | ||
236 | output.pointer = &out_obj; | ||
237 | |||
238 | status = acpi_evaluate_object(handle, NULL, &arg_list, &output); | ||
239 | if (ACPI_FAILURE(status)) { | ||
240 | vdbg_printk(FUJLAPTOP_DBG_WARN, | ||
241 | "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) call failed\n", | ||
242 | cmd, arg0, arg1, arg2); | ||
243 | return -ENODEV; | ||
244 | } | ||
245 | |||
246 | if (out_obj.type != ACPI_TYPE_INTEGER) { | ||
247 | vdbg_printk(FUJLAPTOP_DBG_WARN, | ||
248 | "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) did not " | ||
249 | "return an integer\n", | ||
250 | cmd, arg0, arg1, arg2); | ||
251 | return -ENODEV; | ||
252 | } | ||
253 | |||
254 | vdbg_printk(FUJLAPTOP_DBG_TRACE, | ||
255 | "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n", | ||
256 | cmd, arg0, arg1, arg2, (int)out_obj.integer.value); | ||
257 | return out_obj.integer.value; | ||
258 | } | ||
259 | |||
260 | #ifdef CONFIG_LEDS_CLASS | ||
261 | /* LED class callbacks */ | ||
262 | |||
263 | static void logolamp_set(struct led_classdev *cdev, | ||
264 | enum led_brightness brightness) | ||
265 | { | ||
266 | if (brightness >= LED_FULL) { | ||
267 | call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON); | ||
268 | call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_ON); | ||
269 | } else if (brightness >= LED_HALF) { | ||
270 | call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON); | ||
271 | call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_OFF); | ||
272 | } else { | ||
273 | call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_OFF); | ||
274 | } | ||
275 | } | ||
276 | |||
277 | static void kblamps_set(struct led_classdev *cdev, | ||
278 | enum led_brightness brightness) | ||
279 | { | ||
280 | if (brightness >= LED_FULL) | ||
281 | call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_ON); | ||
282 | else | ||
283 | call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF); | ||
284 | } | ||
285 | |||
286 | static enum led_brightness logolamp_get(struct led_classdev *cdev) | ||
287 | { | ||
288 | enum led_brightness brightness = LED_OFF; | ||
289 | int poweron, always; | ||
290 | |||
291 | poweron = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_POWERON, 0x0); | ||
292 | if (poweron == FUNC_LED_ON) { | ||
293 | brightness = LED_HALF; | ||
294 | always = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_ALWAYS, 0x0); | ||
295 | if (always == FUNC_LED_ON) | ||
296 | brightness = LED_FULL; | ||
297 | } | ||
298 | return brightness; | ||
299 | } | ||
300 | |||
301 | static enum led_brightness kblamps_get(struct led_classdev *cdev) | ||
302 | { | ||
303 | enum led_brightness brightness = LED_OFF; | ||
304 | |||
305 | if (call_fext_func(FUNC_LEDS, 0x2, KEYBOARD_LAMPS, 0x0) == FUNC_LED_ON) | ||
306 | brightness = LED_FULL; | ||
307 | |||
308 | return brightness; | ||
309 | } | ||
310 | #endif | ||
311 | |||
163 | /* Hardware access for LCD brightness control */ | 312 | /* Hardware access for LCD brightness control */ |
164 | 313 | ||
165 | static int set_lcd_level(int level) | 314 | static int set_lcd_level(int level) |
@@ -263,44 +412,34 @@ static int get_max_brightness(void) | |||
263 | return fujitsu->max_brightness; | 412 | return fujitsu->max_brightness; |
264 | } | 413 | } |
265 | 414 | ||
266 | static int get_lcd_level_alt(void) | ||
267 | { | ||
268 | unsigned long long state = 0; | ||
269 | acpi_status status = AE_OK; | ||
270 | |||
271 | vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLS\n"); | ||
272 | |||
273 | status = | ||
274 | acpi_evaluate_integer(fujitsu->acpi_handle, "GBLS", NULL, &state); | ||
275 | if (status < 0) | ||
276 | return status; | ||
277 | |||
278 | fujitsu->brightness_level = state & 0x0fffffff; | ||
279 | |||
280 | if (state & 0x80000000) | ||
281 | fujitsu->brightness_changed = 1; | ||
282 | else | ||
283 | fujitsu->brightness_changed = 0; | ||
284 | |||
285 | return fujitsu->brightness_level; | ||
286 | } | ||
287 | |||
288 | /* Backlight device stuff */ | 415 | /* Backlight device stuff */ |
289 | 416 | ||
290 | static int bl_get_brightness(struct backlight_device *b) | 417 | static int bl_get_brightness(struct backlight_device *b) |
291 | { | 418 | { |
292 | if (use_alt_lcd_levels) | 419 | return get_lcd_level(); |
293 | return get_lcd_level_alt(); | ||
294 | else | ||
295 | return get_lcd_level(); | ||
296 | } | 420 | } |
297 | 421 | ||
298 | static int bl_update_status(struct backlight_device *b) | 422 | static int bl_update_status(struct backlight_device *b) |
299 | { | 423 | { |
424 | int ret; | ||
425 | if (b->props.power == 4) | ||
426 | ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x3); | ||
427 | else | ||
428 | ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x0); | ||
429 | if (ret != 0) | ||
430 | vdbg_printk(FUJLAPTOP_DBG_ERROR, | ||
431 | "Unable to adjust backlight power, error code %i\n", | ||
432 | ret); | ||
433 | |||
300 | if (use_alt_lcd_levels) | 434 | if (use_alt_lcd_levels) |
301 | return set_lcd_level_alt(b->props.brightness); | 435 | ret = set_lcd_level_alt(b->props.brightness); |
302 | else | 436 | else |
303 | return set_lcd_level(b->props.brightness); | 437 | ret = set_lcd_level(b->props.brightness); |
438 | if (ret != 0) | ||
439 | vdbg_printk(FUJLAPTOP_DBG_ERROR, | ||
440 | "Unable to adjust LCD brightness, error code %i\n", | ||
441 | ret); | ||
442 | return ret; | ||
304 | } | 443 | } |
305 | 444 | ||
306 | static struct backlight_ops fujitsubl_ops = { | 445 | static struct backlight_ops fujitsubl_ops = { |
@@ -344,10 +483,7 @@ static ssize_t show_lcd_level(struct device *dev, | |||
344 | 483 | ||
345 | int ret; | 484 | int ret; |
346 | 485 | ||
347 | if (use_alt_lcd_levels) | 486 | ret = get_lcd_level(); |
348 | ret = get_lcd_level_alt(); | ||
349 | else | ||
350 | ret = get_lcd_level(); | ||
351 | if (ret < 0) | 487 | if (ret < 0) |
352 | return ret; | 488 | return ret; |
353 | 489 | ||
@@ -372,52 +508,71 @@ static ssize_t store_lcd_level(struct device *dev, | |||
372 | if (ret < 0) | 508 | if (ret < 0) |
373 | return ret; | 509 | return ret; |
374 | 510 | ||
375 | if (use_alt_lcd_levels) | 511 | ret = get_lcd_level(); |
376 | ret = get_lcd_level_alt(); | ||
377 | else | ||
378 | ret = get_lcd_level(); | ||
379 | if (ret < 0) | 512 | if (ret < 0) |
380 | return ret; | 513 | return ret; |
381 | 514 | ||
382 | return count; | 515 | return count; |
383 | } | 516 | } |
384 | 517 | ||
385 | /* Hardware access for hotkey device */ | 518 | static ssize_t |
386 | 519 | ignore_store(struct device *dev, | |
387 | static int get_irb(void) | 520 | struct device_attribute *attr, const char *buf, size_t count) |
388 | { | 521 | { |
389 | unsigned long long state = 0; | 522 | return count; |
390 | acpi_status status = AE_OK; | 523 | } |
391 | |||
392 | vdbg_printk(FUJLAPTOP_DBG_TRACE, "Get irb\n"); | ||
393 | |||
394 | status = | ||
395 | acpi_evaluate_integer(fujitsu_hotkey->acpi_handle, "GIRB", NULL, | ||
396 | &state); | ||
397 | if (status < 0) | ||
398 | return status; | ||
399 | 524 | ||
400 | fujitsu_hotkey->irb = state; | 525 | static ssize_t |
526 | show_lid_state(struct device *dev, | ||
527 | struct device_attribute *attr, char *buf) | ||
528 | { | ||
529 | if (fujitsu_hotkey->rfkill_state == UNSUPPORTED_CMD) | ||
530 | return sprintf(buf, "unknown\n"); | ||
531 | if (fujitsu_hotkey->rfkill_state & 0x100) | ||
532 | return sprintf(buf, "open\n"); | ||
533 | else | ||
534 | return sprintf(buf, "closed\n"); | ||
535 | } | ||
401 | 536 | ||
402 | return fujitsu_hotkey->irb; | 537 | static ssize_t |
538 | show_dock_state(struct device *dev, | ||
539 | struct device_attribute *attr, char *buf) | ||
540 | { | ||
541 | if (fujitsu_hotkey->rfkill_state == UNSUPPORTED_CMD) | ||
542 | return sprintf(buf, "unknown\n"); | ||
543 | if (fujitsu_hotkey->rfkill_state & 0x200) | ||
544 | return sprintf(buf, "docked\n"); | ||
545 | else | ||
546 | return sprintf(buf, "undocked\n"); | ||
403 | } | 547 | } |
404 | 548 | ||
405 | static ssize_t | 549 | static ssize_t |
406 | ignore_store(struct device *dev, | 550 | show_radios_state(struct device *dev, |
407 | struct device_attribute *attr, const char *buf, size_t count) | 551 | struct device_attribute *attr, char *buf) |
408 | { | 552 | { |
409 | return count; | 553 | if (fujitsu_hotkey->rfkill_state == UNSUPPORTED_CMD) |
554 | return sprintf(buf, "unknown\n"); | ||
555 | if (fujitsu_hotkey->rfkill_state & 0x20) | ||
556 | return sprintf(buf, "on\n"); | ||
557 | else | ||
558 | return sprintf(buf, "killed\n"); | ||
410 | } | 559 | } |
411 | 560 | ||
412 | static DEVICE_ATTR(max_brightness, 0444, show_max_brightness, ignore_store); | 561 | static DEVICE_ATTR(max_brightness, 0444, show_max_brightness, ignore_store); |
413 | static DEVICE_ATTR(brightness_changed, 0444, show_brightness_changed, | 562 | static DEVICE_ATTR(brightness_changed, 0444, show_brightness_changed, |
414 | ignore_store); | 563 | ignore_store); |
415 | static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level); | 564 | static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level); |
565 | static DEVICE_ATTR(lid, 0444, show_lid_state, ignore_store); | ||
566 | static DEVICE_ATTR(dock, 0444, show_dock_state, ignore_store); | ||
567 | static DEVICE_ATTR(radios, 0444, show_radios_state, ignore_store); | ||
416 | 568 | ||
417 | static struct attribute *fujitsupf_attributes[] = { | 569 | static struct attribute *fujitsupf_attributes[] = { |
418 | &dev_attr_brightness_changed.attr, | 570 | &dev_attr_brightness_changed.attr, |
419 | &dev_attr_max_brightness.attr, | 571 | &dev_attr_max_brightness.attr, |
420 | &dev_attr_lcd_level.attr, | 572 | &dev_attr_lcd_level.attr, |
573 | &dev_attr_lid.attr, | ||
574 | &dev_attr_dock.attr, | ||
575 | &dev_attr_radios.attr, | ||
421 | NULL | 576 | NULL |
422 | }; | 577 | }; |
423 | 578 | ||
@@ -435,24 +590,16 @@ static struct platform_driver fujitsupf_driver = { | |||
435 | static void dmi_check_cb_common(const struct dmi_system_id *id) | 590 | static void dmi_check_cb_common(const struct dmi_system_id *id) |
436 | { | 591 | { |
437 | acpi_handle handle; | 592 | acpi_handle handle; |
438 | int have_blnf; | ||
439 | printk(KERN_INFO "fujitsu-laptop: Identified laptop model '%s'.\n", | 593 | printk(KERN_INFO "fujitsu-laptop: Identified laptop model '%s'.\n", |
440 | id->ident); | 594 | id->ident); |
441 | have_blnf = ACPI_SUCCESS | ||
442 | (acpi_get_handle(NULL, "\\_SB.PCI0.GFX0.LCD.BLNF", &handle)); | ||
443 | if (use_alt_lcd_levels == -1) { | 595 | if (use_alt_lcd_levels == -1) { |
444 | vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detecting usealt\n"); | 596 | if (ACPI_SUCCESS(acpi_get_handle(NULL, |
445 | use_alt_lcd_levels = 1; | 597 | "\\_SB.PCI0.LPCB.FJEX.SBL2", &handle))) |
446 | } | 598 | use_alt_lcd_levels = 1; |
447 | if (disable_brightness_keys == -1) { | 599 | else |
448 | vdbg_printk(FUJLAPTOP_DBG_TRACE, | 600 | use_alt_lcd_levels = 0; |
449 | "auto-detecting disable_keys\n"); | 601 | vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as " |
450 | disable_brightness_keys = have_blnf ? 1 : 0; | 602 | "%i\n", use_alt_lcd_levels); |
451 | } | ||
452 | if (disable_brightness_adjust == -1) { | ||
453 | vdbg_printk(FUJLAPTOP_DBG_TRACE, | ||
454 | "auto-detecting disable_adjust\n"); | ||
455 | disable_brightness_adjust = have_blnf ? 0 : 1; | ||
456 | } | 603 | } |
457 | } | 604 | } |
458 | 605 | ||
@@ -581,19 +728,14 @@ static int acpi_fujitsu_add(struct acpi_device *device) | |||
581 | 728 | ||
582 | /* do config (detect defaults) */ | 729 | /* do config (detect defaults) */ |
583 | use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0; | 730 | use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0; |
584 | disable_brightness_keys = disable_brightness_keys == 1 ? 1 : 0; | ||
585 | disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0; | 731 | disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0; |
586 | vdbg_printk(FUJLAPTOP_DBG_INFO, | 732 | vdbg_printk(FUJLAPTOP_DBG_INFO, |
587 | "config: [alt interface: %d], [key disable: %d], [adjust disable: %d]\n", | 733 | "config: [alt interface: %d], [adjust disable: %d]\n", |
588 | use_alt_lcd_levels, disable_brightness_keys, | 734 | use_alt_lcd_levels, disable_brightness_adjust); |
589 | disable_brightness_adjust); | ||
590 | 735 | ||
591 | if (get_max_brightness() <= 0) | 736 | if (get_max_brightness() <= 0) |
592 | fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS; | 737 | fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS; |
593 | if (use_alt_lcd_levels) | 738 | get_lcd_level(); |
594 | get_lcd_level_alt(); | ||
595 | else | ||
596 | get_lcd_level(); | ||
597 | 739 | ||
598 | return result; | 740 | return result; |
599 | 741 | ||
@@ -644,43 +786,23 @@ static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data) | |||
644 | case ACPI_FUJITSU_NOTIFY_CODE1: | 786 | case ACPI_FUJITSU_NOTIFY_CODE1: |
645 | keycode = 0; | 787 | keycode = 0; |
646 | oldb = fujitsu->brightness_level; | 788 | oldb = fujitsu->brightness_level; |
647 | get_lcd_level(); /* the alt version always yields changed */ | 789 | get_lcd_level(); |
648 | newb = fujitsu->brightness_level; | 790 | newb = fujitsu->brightness_level; |
649 | 791 | ||
650 | vdbg_printk(FUJLAPTOP_DBG_TRACE, | 792 | vdbg_printk(FUJLAPTOP_DBG_TRACE, |
651 | "brightness button event [%i -> %i (%i)]\n", | 793 | "brightness button event [%i -> %i (%i)]\n", |
652 | oldb, newb, fujitsu->brightness_changed); | 794 | oldb, newb, fujitsu->brightness_changed); |
653 | 795 | ||
654 | if (oldb == newb && fujitsu->brightness_changed) { | 796 | if (oldb < newb) { |
655 | keycode = 0; | ||
656 | if (disable_brightness_keys != 1) { | ||
657 | if (oldb == 0) { | ||
658 | acpi_bus_generate_proc_event | ||
659 | (fujitsu->dev, | ||
660 | ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, | ||
661 | 0); | ||
662 | keycode = KEY_BRIGHTNESSDOWN; | ||
663 | } else if (oldb == | ||
664 | (fujitsu->max_brightness) - 1) { | ||
665 | acpi_bus_generate_proc_event | ||
666 | (fujitsu->dev, | ||
667 | ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, | ||
668 | 0); | ||
669 | keycode = KEY_BRIGHTNESSUP; | ||
670 | } | ||
671 | } | ||
672 | } else if (oldb < newb) { | ||
673 | if (disable_brightness_adjust != 1) { | 797 | if (disable_brightness_adjust != 1) { |
674 | if (use_alt_lcd_levels) | 798 | if (use_alt_lcd_levels) |
675 | set_lcd_level_alt(newb); | 799 | set_lcd_level_alt(newb); |
676 | else | 800 | else |
677 | set_lcd_level(newb); | 801 | set_lcd_level(newb); |
678 | } | 802 | } |
679 | if (disable_brightness_keys != 1) { | 803 | acpi_bus_generate_proc_event(fujitsu->dev, |
680 | acpi_bus_generate_proc_event(fujitsu->dev, | 804 | ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, 0); |
681 | ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, 0); | 805 | keycode = KEY_BRIGHTNESSUP; |
682 | keycode = KEY_BRIGHTNESSUP; | ||
683 | } | ||
684 | } else if (oldb > newb) { | 806 | } else if (oldb > newb) { |
685 | if (disable_brightness_adjust != 1) { | 807 | if (disable_brightness_adjust != 1) { |
686 | if (use_alt_lcd_levels) | 808 | if (use_alt_lcd_levels) |
@@ -688,13 +810,9 @@ static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data) | |||
688 | else | 810 | else |
689 | set_lcd_level(newb); | 811 | set_lcd_level(newb); |
690 | } | 812 | } |
691 | if (disable_brightness_keys != 1) { | 813 | acpi_bus_generate_proc_event(fujitsu->dev, |
692 | acpi_bus_generate_proc_event(fujitsu->dev, | 814 | ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, 0); |
693 | ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, 0); | 815 | keycode = KEY_BRIGHTNESSDOWN; |
694 | keycode = KEY_BRIGHTNESSDOWN; | ||
695 | } | ||
696 | } else { | ||
697 | keycode = KEY_UNKNOWN; | ||
698 | } | 816 | } |
699 | break; | 817 | break; |
700 | default: | 818 | default: |
@@ -771,7 +889,8 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) | |||
771 | input->id.bustype = BUS_HOST; | 889 | input->id.bustype = BUS_HOST; |
772 | input->id.product = 0x06; | 890 | input->id.product = 0x06; |
773 | input->dev.parent = &device->dev; | 891 | input->dev.parent = &device->dev; |
774 | input->evbit[0] = BIT(EV_KEY); | 892 | |
893 | set_bit(EV_KEY, input->evbit); | ||
775 | set_bit(fujitsu->keycode1, input->keybit); | 894 | set_bit(fujitsu->keycode1, input->keybit); |
776 | set_bit(fujitsu->keycode2, input->keybit); | 895 | set_bit(fujitsu->keycode2, input->keybit); |
777 | set_bit(fujitsu->keycode3, input->keybit); | 896 | set_bit(fujitsu->keycode3, input->keybit); |
@@ -803,10 +922,44 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) | |||
803 | printk(KERN_ERR "_INI Method failed\n"); | 922 | printk(KERN_ERR "_INI Method failed\n"); |
804 | } | 923 | } |
805 | 924 | ||
806 | i = 0; /* Discard hotkey ringbuffer */ | 925 | i = 0; |
807 | while (get_irb() != 0 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) ; | 926 | while (call_fext_func(FUNC_BUTTONS, 0x1, 0x0, 0x0) != 0 |
927 | && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) | ||
928 | ; /* No action, result is discarded */ | ||
808 | vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i); | 929 | vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i); |
809 | 930 | ||
931 | fujitsu_hotkey->rfkill_state = | ||
932 | call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0); | ||
933 | |||
934 | /* Suspect this is a keymap of the application panel, print it */ | ||
935 | printk(KERN_INFO "fujitsu-laptop: BTNI: [0x%x]\n", | ||
936 | call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0)); | ||
937 | |||
938 | #ifdef CONFIG_LEDS_CLASS | ||
939 | if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) { | ||
940 | result = led_classdev_register(&fujitsu->pf_device->dev, | ||
941 | &logolamp_led); | ||
942 | if (result == 0) { | ||
943 | fujitsu_hotkey->logolamp_registered = 1; | ||
944 | } else { | ||
945 | printk(KERN_ERR "fujitsu-laptop: Could not register " | ||
946 | "LED handler for logo lamp, error %i\n", result); | ||
947 | } | ||
948 | } | ||
949 | |||
950 | if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) && | ||
951 | (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) { | ||
952 | result = led_classdev_register(&fujitsu->pf_device->dev, | ||
953 | &kblamps_led); | ||
954 | if (result == 0) { | ||
955 | fujitsu_hotkey->kblamps_registered = 1; | ||
956 | } else { | ||
957 | printk(KERN_ERR "fujitsu-laptop: Could not register " | ||
958 | "LED handler for keyboard lamps, error %i\n", result); | ||
959 | } | ||
960 | } | ||
961 | #endif | ||
962 | |||
810 | return result; | 963 | return result; |
811 | 964 | ||
812 | end: | 965 | end: |
@@ -852,16 +1005,15 @@ static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, | |||
852 | 1005 | ||
853 | input = fujitsu_hotkey->input; | 1006 | input = fujitsu_hotkey->input; |
854 | 1007 | ||
855 | vdbg_printk(FUJLAPTOP_DBG_TRACE, "Hotkey event\n"); | 1008 | fujitsu_hotkey->rfkill_state = |
1009 | call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0); | ||
856 | 1010 | ||
857 | switch (event) { | 1011 | switch (event) { |
858 | case ACPI_FUJITSU_NOTIFY_CODE1: | 1012 | case ACPI_FUJITSU_NOTIFY_CODE1: |
859 | i = 0; | 1013 | i = 0; |
860 | while ((irb = get_irb()) != 0 | 1014 | while ((irb = |
861 | && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) { | 1015 | call_fext_func(FUNC_BUTTONS, 0x1, 0x0, 0x0)) != 0 |
862 | vdbg_printk(FUJLAPTOP_DBG_TRACE, "GIRB result [%x]\n", | 1016 | && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) { |
863 | irb); | ||
864 | |||
865 | switch (irb & 0x4ff) { | 1017 | switch (irb & 0x4ff) { |
866 | case KEY1_CODE: | 1018 | case KEY1_CODE: |
867 | keycode = fujitsu->keycode1; | 1019 | keycode = fujitsu->keycode1; |
@@ -1035,6 +1187,15 @@ static int __init fujitsu_init(void) | |||
1035 | goto fail_hotkey1; | 1187 | goto fail_hotkey1; |
1036 | } | 1188 | } |
1037 | 1189 | ||
1190 | /* Sync backlight power status (needs FUJ02E3 device, hence deferred) */ | ||
1191 | |||
1192 | if (!acpi_video_backlight_support()) { | ||
1193 | if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3) | ||
1194 | fujitsu->bl_device->props.power = 4; | ||
1195 | else | ||
1196 | fujitsu->bl_device->props.power = 0; | ||
1197 | } | ||
1198 | |||
1038 | printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION | 1199 | printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION |
1039 | " successfully loaded.\n"); | 1200 | " successfully loaded.\n"); |
1040 | 1201 | ||
@@ -1074,6 +1235,14 @@ fail_acpi: | |||
1074 | 1235 | ||
1075 | static void __exit fujitsu_cleanup(void) | 1236 | static void __exit fujitsu_cleanup(void) |
1076 | { | 1237 | { |
1238 | #ifdef CONFIG_LEDS_CLASS | ||
1239 | if (fujitsu_hotkey->logolamp_registered != 0) | ||
1240 | led_classdev_unregister(&logolamp_led); | ||
1241 | |||
1242 | if (fujitsu_hotkey->kblamps_registered != 0) | ||
1243 | led_classdev_unregister(&kblamps_led); | ||
1244 | #endif | ||
1245 | |||
1077 | sysfs_remove_group(&fujitsu->pf_device->dev.kobj, | 1246 | sysfs_remove_group(&fujitsu->pf_device->dev.kobj, |
1078 | &fujitsupf_attribute_group); | 1247 | &fujitsupf_attribute_group); |
1079 | platform_device_unregister(fujitsu->pf_device); | 1248 | platform_device_unregister(fujitsu->pf_device); |
@@ -1098,9 +1267,6 @@ module_exit(fujitsu_cleanup); | |||
1098 | module_param(use_alt_lcd_levels, uint, 0644); | 1267 | module_param(use_alt_lcd_levels, uint, 0644); |
1099 | MODULE_PARM_DESC(use_alt_lcd_levels, | 1268 | MODULE_PARM_DESC(use_alt_lcd_levels, |
1100 | "Use alternative interface for lcd_levels (needed for Lifebook s6410)."); | 1269 | "Use alternative interface for lcd_levels (needed for Lifebook s6410)."); |
1101 | module_param(disable_brightness_keys, uint, 0644); | ||
1102 | MODULE_PARM_DESC(disable_brightness_keys, | ||
1103 | "Disable brightness keys (eg. if they are already handled by the generic ACPI_VIDEO device)."); | ||
1104 | module_param(disable_brightness_adjust, uint, 0644); | 1270 | module_param(disable_brightness_adjust, uint, 0644); |
1105 | MODULE_PARM_DESC(disable_brightness_adjust, "Disable brightness adjustment ."); | 1271 | MODULE_PARM_DESC(disable_brightness_adjust, "Disable brightness adjustment ."); |
1106 | #ifdef CONFIG_FUJITSU_LAPTOP_DEBUG | 1272 | #ifdef CONFIG_FUJITSU_LAPTOP_DEBUG |
@@ -1108,12 +1274,13 @@ module_param_named(debug, dbg_level, uint, 0644); | |||
1108 | MODULE_PARM_DESC(debug, "Sets debug level bit-mask"); | 1274 | MODULE_PARM_DESC(debug, "Sets debug level bit-mask"); |
1109 | #endif | 1275 | #endif |
1110 | 1276 | ||
1111 | MODULE_AUTHOR("Jonathan Woithe, Peter Gruber"); | 1277 | MODULE_AUTHOR("Jonathan Woithe, Peter Gruber, Tony Vroon"); |
1112 | MODULE_DESCRIPTION("Fujitsu laptop extras support"); | 1278 | MODULE_DESCRIPTION("Fujitsu laptop extras support"); |
1113 | MODULE_VERSION(FUJITSU_DRIVER_VERSION); | 1279 | MODULE_VERSION(FUJITSU_DRIVER_VERSION); |
1114 | MODULE_LICENSE("GPL"); | 1280 | MODULE_LICENSE("GPL"); |
1115 | 1281 | ||
1116 | MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*"); | 1282 | MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*"); |
1283 | MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*"); | ||
1117 | MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*"); | 1284 | MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*"); |
1118 | 1285 | ||
1119 | static struct pnp_device_id pnp_ids[] = { | 1286 | static struct pnp_device_id pnp_ids[] = { |
diff --git a/drivers/misc/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 4b7c24c519c3..4b7c24c519c3 100644 --- a/drivers/misc/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c | |||
diff --git a/drivers/misc/intel_menlow.c b/drivers/platform/x86/intel_menlow.c index 27b7662955bb..27b7662955bb 100644 --- a/drivers/misc/intel_menlow.c +++ b/drivers/platform/x86/intel_menlow.c | |||
diff --git a/drivers/misc/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index 759763d18e4c..759763d18e4c 100644 --- a/drivers/misc/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c | |||
diff --git a/drivers/misc/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index 4a1bc64485d5..f30db367c82e 100644 --- a/drivers/misc/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c | |||
@@ -241,8 +241,6 @@ static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val) | |||
241 | }; | 241 | }; |
242 | acpi_status status = AE_OK; | 242 | acpi_status status = AE_OK; |
243 | 243 | ||
244 | ACPI_FUNCTION_TRACE("acpi_pcc_write_sset"); | ||
245 | |||
246 | status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SSET, | 244 | status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SSET, |
247 | ¶ms, NULL); | 245 | ¶ms, NULL); |
248 | 246 | ||
@@ -254,8 +252,6 @@ static inline int acpi_pcc_get_sqty(struct acpi_device *device) | |||
254 | unsigned long long s; | 252 | unsigned long long s; |
255 | acpi_status status; | 253 | acpi_status status; |
256 | 254 | ||
257 | ACPI_FUNCTION_TRACE("acpi_pcc_get_sqty"); | ||
258 | |||
259 | status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY, | 255 | status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY, |
260 | NULL, &s); | 256 | NULL, &s); |
261 | if (ACPI_SUCCESS(status)) | 257 | if (ACPI_SUCCESS(status)) |
@@ -274,8 +270,6 @@ static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc, u32 *sinf) | |||
274 | union acpi_object *hkey = NULL; | 270 | union acpi_object *hkey = NULL; |
275 | int i; | 271 | int i; |
276 | 272 | ||
277 | ACPI_FUNCTION_TRACE("acpi_pcc_retrieve_biosdata"); | ||
278 | |||
279 | status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, 0, | 273 | status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, 0, |
280 | &buffer); | 274 | &buffer); |
281 | if (ACPI_FAILURE(status)) { | 275 | if (ACPI_FAILURE(status)) { |
@@ -501,8 +495,6 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc) | |||
501 | int key_code, hkey_num; | 495 | int key_code, hkey_num; |
502 | unsigned long long result; | 496 | unsigned long long result; |
503 | 497 | ||
504 | ACPI_FUNCTION_TRACE("acpi_pcc_generate_keyinput"); | ||
505 | |||
506 | rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY, | 498 | rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY, |
507 | NULL, &result); | 499 | NULL, &result); |
508 | if (!ACPI_SUCCESS(rc)) { | 500 | if (!ACPI_SUCCESS(rc)) { |
@@ -538,8 +530,6 @@ static void acpi_pcc_hotkey_notify(acpi_handle handle, u32 event, void *data) | |||
538 | { | 530 | { |
539 | struct pcc_acpi *pcc = (struct pcc_acpi *) data; | 531 | struct pcc_acpi *pcc = (struct pcc_acpi *) data; |
540 | 532 | ||
541 | ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_notify"); | ||
542 | |||
543 | switch (event) { | 533 | switch (event) { |
544 | case HKEY_NOTIFY: | 534 | case HKEY_NOTIFY: |
545 | acpi_pcc_generate_keyinput(pcc); | 535 | acpi_pcc_generate_keyinput(pcc); |
@@ -554,8 +544,6 @@ static int acpi_pcc_init_input(struct pcc_acpi *pcc) | |||
554 | { | 544 | { |
555 | int i, rc; | 545 | int i, rc; |
556 | 546 | ||
557 | ACPI_FUNCTION_TRACE("acpi_pcc_init_input"); | ||
558 | |||
559 | pcc->input_dev = input_allocate_device(); | 547 | pcc->input_dev = input_allocate_device(); |
560 | if (!pcc->input_dev) { | 548 | if (!pcc->input_dev) { |
561 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, | 549 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, |
@@ -597,8 +585,6 @@ static int acpi_pcc_hotkey_resume(struct acpi_device *device) | |||
597 | struct pcc_acpi *pcc = acpi_driver_data(device); | 585 | struct pcc_acpi *pcc = acpi_driver_data(device); |
598 | acpi_status status = AE_OK; | 586 | acpi_status status = AE_OK; |
599 | 587 | ||
600 | ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_resume"); | ||
601 | |||
602 | if (device == NULL || pcc == NULL) | 588 | if (device == NULL || pcc == NULL) |
603 | return -EINVAL; | 589 | return -EINVAL; |
604 | 590 | ||
@@ -616,8 +602,6 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) | |||
616 | struct pcc_acpi *pcc; | 602 | struct pcc_acpi *pcc; |
617 | int num_sifr, result; | 603 | int num_sifr, result; |
618 | 604 | ||
619 | ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_add"); | ||
620 | |||
621 | if (!device) | 605 | if (!device) |
622 | return -EINVAL; | 606 | return -EINVAL; |
623 | 607 | ||
@@ -714,8 +698,6 @@ static int __init acpi_pcc_init(void) | |||
714 | { | 698 | { |
715 | int result = 0; | 699 | int result = 0; |
716 | 700 | ||
717 | ACPI_FUNCTION_TRACE("acpi_pcc_init"); | ||
718 | |||
719 | if (acpi_disabled) | 701 | if (acpi_disabled) |
720 | return -ENODEV; | 702 | return -ENODEV; |
721 | 703 | ||
@@ -733,8 +715,6 @@ static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type) | |||
733 | { | 715 | { |
734 | struct pcc_acpi *pcc = acpi_driver_data(device); | 716 | struct pcc_acpi *pcc = acpi_driver_data(device); |
735 | 717 | ||
736 | ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_remove"); | ||
737 | |||
738 | if (!device || !pcc) | 718 | if (!device || !pcc) |
739 | return -EINVAL; | 719 | return -EINVAL; |
740 | 720 | ||
@@ -757,8 +737,6 @@ static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type) | |||
757 | 737 | ||
758 | static void __exit acpi_pcc_exit(void) | 738 | static void __exit acpi_pcc_exit(void) |
759 | { | 739 | { |
760 | ACPI_FUNCTION_TRACE("acpi_pcc_exit"); | ||
761 | |||
762 | acpi_bus_unregister_driver(&acpi_pcc_driver); | 740 | acpi_bus_unregister_driver(&acpi_pcc_driver); |
763 | } | 741 | } |
764 | 742 | ||
diff --git a/drivers/misc/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 571b211608d1..537959d07148 100644 --- a/drivers/misc/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c | |||
@@ -935,14 +935,17 @@ static void sony_acpi_notify(acpi_handle handle, u32 event, void *data) | |||
935 | static acpi_status sony_walk_callback(acpi_handle handle, u32 level, | 935 | static acpi_status sony_walk_callback(acpi_handle handle, u32 level, |
936 | void *context, void **return_value) | 936 | void *context, void **return_value) |
937 | { | 937 | { |
938 | struct acpi_namespace_node *node; | 938 | struct acpi_device_info *info; |
939 | union acpi_operand_object *operand; | 939 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; |
940 | 940 | ||
941 | node = (struct acpi_namespace_node *)handle; | 941 | if (ACPI_SUCCESS(acpi_get_object_info(handle, &buffer))) { |
942 | operand = (union acpi_operand_object *)node->object; | 942 | info = buffer.pointer; |
943 | 943 | ||
944 | printk(KERN_WARNING DRV_PFX "method: name: %4.4s, args %X\n", node->name.ascii, | 944 | printk(KERN_WARNING DRV_PFX "method: name: %4.4s, args %X\n", |
945 | (u32) operand->method.param_count); | 945 | (char *)&info->name, info->param_count); |
946 | |||
947 | kfree(buffer.pointer); | ||
948 | } | ||
946 | 949 | ||
947 | return AE_OK; | 950 | return AE_OK; |
948 | } | 951 | } |
diff --git a/drivers/misc/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c index f25e4c974dcf..b4a4aa9ee482 100644 --- a/drivers/misc/tc1100-wmi.c +++ b/drivers/platform/x86/tc1100-wmi.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <linux/types.h> | 31 | #include <linux/types.h> |
32 | #include <acpi/acpi.h> | 32 | #include <acpi/acpi.h> |
33 | #include <acpi/actypes.h> | ||
34 | #include <acpi/acpi_bus.h> | 33 | #include <acpi/acpi_bus.h> |
35 | #include <acpi/acpi_drivers.h> | 34 | #include <acpi/acpi_drivers.h> |
36 | #include <linux/platform_device.h> | 35 | #include <linux/platform_device.h> |
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 899766e16fa8..3478453eba7a 100644 --- a/drivers/misc/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -76,7 +76,6 @@ | |||
76 | #include <linux/workqueue.h> | 76 | #include <linux/workqueue.h> |
77 | 77 | ||
78 | #include <acpi/acpi_drivers.h> | 78 | #include <acpi/acpi_drivers.h> |
79 | #include <acpi/acnamesp.h> | ||
80 | 79 | ||
81 | #include <linux/pci_ids.h> | 80 | #include <linux/pci_ids.h> |
82 | 81 | ||
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 40e60fc2e596..40e60fc2e596 100644 --- a/drivers/acpi/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c | |||
diff --git a/drivers/acpi/wmi.c b/drivers/platform/x86/wmi.c index 8a8b377712c9..8a8b377712c9 100644 --- a/drivers/acpi/wmi.c +++ b/drivers/platform/x86/wmi.c | |||
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 383e47c392a4..2834846a185d 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/pnp.h> | 23 | #include <linux/pnp.h> |
24 | #include <linux/mod_devicetable.h> | 24 | #include <linux/mod_devicetable.h> |
25 | #include <acpi/acpi_bus.h> | 25 | #include <acpi/acpi_bus.h> |
26 | #include <acpi/actypes.h> | ||
27 | 26 | ||
28 | #include "../base.h" | 27 | #include "../base.h" |
29 | #include "pnpacpi.h" | 28 | #include "pnpacpi.h" |
diff --git a/drivers/rtc/rtc-parisc.c b/drivers/rtc/rtc-parisc.c index 346d633655e7..c6bfa6fe1a2a 100644 --- a/drivers/rtc/rtc-parisc.c +++ b/drivers/rtc/rtc-parisc.c | |||
@@ -34,7 +34,8 @@ static int parisc_get_time(struct device *dev, struct rtc_time *tm) | |||
34 | static int parisc_set_time(struct device *dev, struct rtc_time *tm) | 34 | static int parisc_set_time(struct device *dev, struct rtc_time *tm) |
35 | { | 35 | { |
36 | struct parisc_rtc *p = dev_get_drvdata(dev); | 36 | struct parisc_rtc *p = dev_get_drvdata(dev); |
37 | unsigned long flags, ret; | 37 | unsigned long flags; |
38 | int ret; | ||
38 | 39 | ||
39 | spin_lock_irqsave(&p->lock, flags); | 40 | spin_lock_irqsave(&p->lock, flags); |
40 | ret = set_rtc_time(tm); | 41 | ret = set_rtc_time(tm); |
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c index c73fa89b5f8a..170d289ac785 100644 --- a/fs/jffs2/compr_rubin.c +++ b/fs/jffs2/compr_rubin.c | |||
@@ -22,9 +22,7 @@ | |||
22 | 22 | ||
23 | 23 | ||
24 | #define BIT_DIVIDER_MIPS 1043 | 24 | #define BIT_DIVIDER_MIPS 1043 |
25 | static int bits_mips[8] = { 277,249,290,267,229,341,212,241}; /* mips32 */ | 25 | static int bits_mips[8] = { 277, 249, 290, 267, 229, 341, 212, 241}; |
26 | |||
27 | #include <linux/errno.h> | ||
28 | 26 | ||
29 | struct pushpull { | 27 | struct pushpull { |
30 | unsigned char *buf; | 28 | unsigned char *buf; |
@@ -43,7 +41,9 @@ struct rubin_state { | |||
43 | int bits[8]; | 41 | int bits[8]; |
44 | }; | 42 | }; |
45 | 43 | ||
46 | static inline void init_pushpull(struct pushpull *pp, char *buf, unsigned buflen, unsigned ofs, unsigned reserve) | 44 | static inline void init_pushpull(struct pushpull *pp, char *buf, |
45 | unsigned buflen, unsigned ofs, | ||
46 | unsigned reserve) | ||
47 | { | 47 | { |
48 | pp->buf = buf; | 48 | pp->buf = buf; |
49 | pp->buflen = buflen; | 49 | pp->buflen = buflen; |
@@ -53,16 +53,14 @@ static inline void init_pushpull(struct pushpull *pp, char *buf, unsigned buflen | |||
53 | 53 | ||
54 | static inline int pushbit(struct pushpull *pp, int bit, int use_reserved) | 54 | static inline int pushbit(struct pushpull *pp, int bit, int use_reserved) |
55 | { | 55 | { |
56 | if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve)) { | 56 | if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve)) |
57 | return -ENOSPC; | 57 | return -ENOSPC; |
58 | } | ||
59 | 58 | ||
60 | if (bit) { | 59 | if (bit) |
61 | pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs &7))); | 60 | pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs & 7))); |
62 | } | 61 | else |
63 | else { | 62 | pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs & 7))); |
64 | pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs &7))); | 63 | |
65 | } | ||
66 | pp->ofs++; | 64 | pp->ofs++; |
67 | 65 | ||
68 | return 0; | 66 | return 0; |
@@ -97,6 +95,7 @@ static void init_rubin(struct rubin_state *rs, int div, int *bits) | |||
97 | rs->p = (long) (2 * UPPER_BIT_RUBIN); | 95 | rs->p = (long) (2 * UPPER_BIT_RUBIN); |
98 | rs->bit_number = (long) 0; | 96 | rs->bit_number = (long) 0; |
99 | rs->bit_divider = div; | 97 | rs->bit_divider = div; |
98 | |||
100 | for (c=0; c<8; c++) | 99 | for (c=0; c<8; c++) |
101 | rs->bits[c] = bits[c]; | 100 | rs->bits[c] = bits[c]; |
102 | } | 101 | } |
@@ -108,7 +107,8 @@ static int encode(struct rubin_state *rs, long A, long B, int symbol) | |||
108 | long i0, i1; | 107 | long i0, i1; |
109 | int ret; | 108 | int ret; |
110 | 109 | ||
111 | while ((rs->q >= UPPER_BIT_RUBIN) || ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) { | 110 | while ((rs->q >= UPPER_BIT_RUBIN) || |
111 | ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) { | ||
112 | rs->bit_number++; | 112 | rs->bit_number++; |
113 | 113 | ||
114 | ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0); | 114 | ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0); |
@@ -119,12 +119,12 @@ static int encode(struct rubin_state *rs, long A, long B, int symbol) | |||
119 | rs->p <<= 1; | 119 | rs->p <<= 1; |
120 | } | 120 | } |
121 | i0 = A * rs->p / (A + B); | 121 | i0 = A * rs->p / (A + B); |
122 | if (i0 <= 0) { | 122 | if (i0 <= 0) |
123 | i0 = 1; | 123 | i0 = 1; |
124 | } | 124 | |
125 | if (i0 >= rs->p) { | 125 | if (i0 >= rs->p) |
126 | i0 = rs->p - 1; | 126 | i0 = rs->p - 1; |
127 | } | 127 | |
128 | i1 = rs->p - i0; | 128 | i1 = rs->p - i0; |
129 | 129 | ||
130 | if (symbol == 0) | 130 | if (symbol == 0) |
@@ -157,11 +157,13 @@ static void init_decode(struct rubin_state *rs, int div, int *bits) | |||
157 | /* behalve lower */ | 157 | /* behalve lower */ |
158 | rs->rec_q = 0; | 158 | rs->rec_q = 0; |
159 | 159 | ||
160 | for (rs->bit_number = 0; rs->bit_number++ < RUBIN_REG_SIZE; rs->rec_q = rs->rec_q * 2 + (long) (pullbit(&rs->pp))) | 160 | for (rs->bit_number = 0; rs->bit_number++ < RUBIN_REG_SIZE; |
161 | rs->rec_q = rs->rec_q * 2 + (long) (pullbit(&rs->pp))) | ||
161 | ; | 162 | ; |
162 | } | 163 | } |
163 | 164 | ||
164 | static void __do_decode(struct rubin_state *rs, unsigned long p, unsigned long q) | 165 | static void __do_decode(struct rubin_state *rs, unsigned long p, |
166 | unsigned long q) | ||
165 | { | 167 | { |
166 | register unsigned long lower_bits_rubin = LOWER_BITS_RUBIN; | 168 | register unsigned long lower_bits_rubin = LOWER_BITS_RUBIN; |
167 | unsigned long rec_q; | 169 | unsigned long rec_q; |
@@ -207,12 +209,11 @@ static int decode(struct rubin_state *rs, long A, long B) | |||
207 | __do_decode(rs, p, q); | 209 | __do_decode(rs, p, q); |
208 | 210 | ||
209 | i0 = A * rs->p / (A + B); | 211 | i0 = A * rs->p / (A + B); |
210 | if (i0 <= 0) { | 212 | if (i0 <= 0) |
211 | i0 = 1; | 213 | i0 = 1; |
212 | } | 214 | |
213 | if (i0 >= rs->p) { | 215 | if (i0 >= rs->p) |
214 | i0 = rs->p - 1; | 216 | i0 = rs->p - 1; |
215 | } | ||
216 | 217 | ||
217 | threshold = rs->q + i0; | 218 | threshold = rs->q + i0; |
218 | symbol = rs->rec_q >= threshold; | 219 | symbol = rs->rec_q >= threshold; |
@@ -234,14 +235,15 @@ static int out_byte(struct rubin_state *rs, unsigned char byte) | |||
234 | struct rubin_state rs_copy; | 235 | struct rubin_state rs_copy; |
235 | rs_copy = *rs; | 236 | rs_copy = *rs; |
236 | 237 | ||
237 | for (i=0;i<8;i++) { | 238 | for (i=0; i<8; i++) { |
238 | ret = encode(rs, rs->bit_divider-rs->bits[i],rs->bits[i],byte&1); | 239 | ret = encode(rs, rs->bit_divider-rs->bits[i], |
240 | rs->bits[i], byte & 1); | ||
239 | if (ret) { | 241 | if (ret) { |
240 | /* Failed. Restore old state */ | 242 | /* Failed. Restore old state */ |
241 | *rs = rs_copy; | 243 | *rs = rs_copy; |
242 | return ret; | 244 | return ret; |
243 | } | 245 | } |
244 | byte=byte>>1; | 246 | byte >>= 1 ; |
245 | } | 247 | } |
246 | return 0; | 248 | return 0; |
247 | } | 249 | } |
@@ -251,7 +253,8 @@ static int in_byte(struct rubin_state *rs) | |||
251 | int i, result = 0, bit_divider = rs->bit_divider; | 253 | int i, result = 0, bit_divider = rs->bit_divider; |
252 | 254 | ||
253 | for (i = 0; i < 8; i++) | 255 | for (i = 0; i < 8; i++) |
254 | result |= decode(rs, bit_divider - rs->bits[i], rs->bits[i]) << i; | 256 | result |= decode(rs, bit_divider - rs->bits[i], |
257 | rs->bits[i]) << i; | ||
255 | 258 | ||
256 | return result; | 259 | return result; |
257 | } | 260 | } |
@@ -259,7 +262,8 @@ static int in_byte(struct rubin_state *rs) | |||
259 | 262 | ||
260 | 263 | ||
261 | static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, | 264 | static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, |
262 | unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) | 265 | unsigned char *cpage_out, uint32_t *sourcelen, |
266 | uint32_t *dstlen) | ||
263 | { | 267 | { |
264 | int outpos = 0; | 268 | int outpos = 0; |
265 | int pos=0; | 269 | int pos=0; |
@@ -295,7 +299,8 @@ static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, | |||
295 | int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out, | 299 | int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out, |
296 | uint32_t *sourcelen, uint32_t *dstlen, void *model) | 300 | uint32_t *sourcelen, uint32_t *dstlen, void *model) |
297 | { | 301 | { |
298 | return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen); | 302 | return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, |
303 | cpage_out, sourcelen, dstlen); | ||
299 | } | 304 | } |
300 | #endif | 305 | #endif |
301 | static int jffs2_dynrubin_compress(unsigned char *data_in, | 306 | static int jffs2_dynrubin_compress(unsigned char *data_in, |
@@ -316,9 +321,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in, | |||
316 | return -1; | 321 | return -1; |
317 | 322 | ||
318 | memset(histo, 0, 256); | 323 | memset(histo, 0, 256); |
319 | for (i=0; i<mysrclen; i++) { | 324 | for (i=0; i<mysrclen; i++) |
320 | histo[data_in[i]]++; | 325 | histo[data_in[i]]++; |
321 | } | ||
322 | memset(bits, 0, sizeof(int)*8); | 326 | memset(bits, 0, sizeof(int)*8); |
323 | for (i=0; i<256; i++) { | 327 | for (i=0; i<256; i++) { |
324 | if (i&128) | 328 | if (i&128) |
@@ -346,7 +350,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in, | |||
346 | cpage_out[i] = bits[i]; | 350 | cpage_out[i] = bits[i]; |
347 | } | 351 | } |
348 | 352 | ||
349 | ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, &mydstlen); | 353 | ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, |
354 | &mydstlen); | ||
350 | if (ret) | 355 | if (ret) |
351 | return ret; | 356 | return ret; |
352 | 357 | ||
@@ -363,8 +368,10 @@ static int jffs2_dynrubin_compress(unsigned char *data_in, | |||
363 | return 0; | 368 | return 0; |
364 | } | 369 | } |
365 | 370 | ||
366 | static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in, | 371 | static void rubin_do_decompress(int bit_divider, int *bits, |
367 | unsigned char *page_out, uint32_t srclen, uint32_t destlen) | 372 | unsigned char *cdata_in, |
373 | unsigned char *page_out, uint32_t srclen, | ||
374 | uint32_t destlen) | ||
368 | { | 375 | { |
369 | int outpos = 0; | 376 | int outpos = 0; |
370 | struct rubin_state rs; | 377 | struct rubin_state rs; |
@@ -372,9 +379,8 @@ static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata | |||
372 | init_pushpull(&rs.pp, cdata_in, srclen, 0, 0); | 379 | init_pushpull(&rs.pp, cdata_in, srclen, 0, 0); |
373 | init_decode(&rs, bit_divider, bits); | 380 | init_decode(&rs, bit_divider, bits); |
374 | 381 | ||
375 | while (outpos < destlen) { | 382 | while (outpos < destlen) |
376 | page_out[outpos++] = in_byte(&rs); | 383 | page_out[outpos++] = in_byte(&rs); |
377 | } | ||
378 | } | 384 | } |
379 | 385 | ||
380 | 386 | ||
@@ -383,7 +389,8 @@ static int jffs2_rubinmips_decompress(unsigned char *data_in, | |||
383 | uint32_t sourcelen, uint32_t dstlen, | 389 | uint32_t sourcelen, uint32_t dstlen, |
384 | void *model) | 390 | void *model) |
385 | { | 391 | { |
386 | rubin_do_decompress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen); | 392 | rubin_do_decompress(BIT_DIVIDER_MIPS, bits_mips, data_in, |
393 | cpage_out, sourcelen, dstlen); | ||
387 | return 0; | 394 | return 0; |
388 | } | 395 | } |
389 | 396 | ||
@@ -398,52 +405,53 @@ static int jffs2_dynrubin_decompress(unsigned char *data_in, | |||
398 | for (c=0; c<8; c++) | 405 | for (c=0; c<8; c++) |
399 | bits[c] = data_in[c]; | 406 | bits[c] = data_in[c]; |
400 | 407 | ||
401 | rubin_do_decompress(256, bits, data_in+8, cpage_out, sourcelen-8, dstlen); | 408 | rubin_do_decompress(256, bits, data_in+8, cpage_out, sourcelen-8, |
409 | dstlen); | ||
402 | return 0; | 410 | return 0; |
403 | } | 411 | } |
404 | 412 | ||
405 | static struct jffs2_compressor jffs2_rubinmips_comp = { | 413 | static struct jffs2_compressor jffs2_rubinmips_comp = { |
406 | .priority = JFFS2_RUBINMIPS_PRIORITY, | 414 | .priority = JFFS2_RUBINMIPS_PRIORITY, |
407 | .name = "rubinmips", | 415 | .name = "rubinmips", |
408 | .compr = JFFS2_COMPR_DYNRUBIN, | 416 | .compr = JFFS2_COMPR_DYNRUBIN, |
409 | .compress = NULL, /*&jffs2_rubinmips_compress,*/ | 417 | .compress = NULL, /*&jffs2_rubinmips_compress,*/ |
410 | .decompress = &jffs2_rubinmips_decompress, | 418 | .decompress = &jffs2_rubinmips_decompress, |
411 | #ifdef JFFS2_RUBINMIPS_DISABLED | 419 | #ifdef JFFS2_RUBINMIPS_DISABLED |
412 | .disabled = 1, | 420 | .disabled = 1, |
413 | #else | 421 | #else |
414 | .disabled = 0, | 422 | .disabled = 0, |
415 | #endif | 423 | #endif |
416 | }; | 424 | }; |
417 | 425 | ||
418 | int jffs2_rubinmips_init(void) | 426 | int jffs2_rubinmips_init(void) |
419 | { | 427 | { |
420 | return jffs2_register_compressor(&jffs2_rubinmips_comp); | 428 | return jffs2_register_compressor(&jffs2_rubinmips_comp); |
421 | } | 429 | } |
422 | 430 | ||
423 | void jffs2_rubinmips_exit(void) | 431 | void jffs2_rubinmips_exit(void) |
424 | { | 432 | { |
425 | jffs2_unregister_compressor(&jffs2_rubinmips_comp); | 433 | jffs2_unregister_compressor(&jffs2_rubinmips_comp); |
426 | } | 434 | } |
427 | 435 | ||
428 | static struct jffs2_compressor jffs2_dynrubin_comp = { | 436 | static struct jffs2_compressor jffs2_dynrubin_comp = { |
429 | .priority = JFFS2_DYNRUBIN_PRIORITY, | 437 | .priority = JFFS2_DYNRUBIN_PRIORITY, |
430 | .name = "dynrubin", | 438 | .name = "dynrubin", |
431 | .compr = JFFS2_COMPR_RUBINMIPS, | 439 | .compr = JFFS2_COMPR_RUBINMIPS, |
432 | .compress = jffs2_dynrubin_compress, | 440 | .compress = jffs2_dynrubin_compress, |
433 | .decompress = &jffs2_dynrubin_decompress, | 441 | .decompress = &jffs2_dynrubin_decompress, |
434 | #ifdef JFFS2_DYNRUBIN_DISABLED | 442 | #ifdef JFFS2_DYNRUBIN_DISABLED |
435 | .disabled = 1, | 443 | .disabled = 1, |
436 | #else | 444 | #else |
437 | .disabled = 0, | 445 | .disabled = 0, |
438 | #endif | 446 | #endif |
439 | }; | 447 | }; |
440 | 448 | ||
441 | int jffs2_dynrubin_init(void) | 449 | int jffs2_dynrubin_init(void) |
442 | { | 450 | { |
443 | return jffs2_register_compressor(&jffs2_dynrubin_comp); | 451 | return jffs2_register_compressor(&jffs2_dynrubin_comp); |
444 | } | 452 | } |
445 | 453 | ||
446 | void jffs2_dynrubin_exit(void) | 454 | void jffs2_dynrubin_exit(void) |
447 | { | 455 | { |
448 | jffs2_unregister_compressor(&jffs2_dynrubin_comp); | 456 | jffs2_unregister_compressor(&jffs2_dynrubin_comp); |
449 | } | 457 | } |
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index 259461b910af..c32b4a1ad6cf 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c | |||
@@ -175,7 +175,7 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
175 | { | 175 | { |
176 | /* For NAND, if the failure did not occur at the device level for a | 176 | /* For NAND, if the failure did not occur at the device level for a |
177 | specific physical page, don't bother updating the bad block table. */ | 177 | specific physical page, don't bother updating the bad block table. */ |
178 | if (jffs2_cleanmarker_oob(c) && (bad_offset != MTD_FAIL_ADDR_UNKNOWN)) { | 178 | if (jffs2_cleanmarker_oob(c) && (bad_offset != (uint32_t)MTD_FAIL_ADDR_UNKNOWN)) { |
179 | /* We had a device-level failure to erase. Let's see if we've | 179 | /* We had a device-level failure to erase. Let's see if we've |
180 | failed too many times. */ | 180 | failed too many times. */ |
181 | if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { | 181 | if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { |
@@ -209,7 +209,8 @@ static void jffs2_erase_callback(struct erase_info *instr) | |||
209 | struct erase_priv_struct *priv = (void *)instr->priv; | 209 | struct erase_priv_struct *priv = (void *)instr->priv; |
210 | 210 | ||
211 | if(instr->state != MTD_ERASE_DONE) { | 211 | if(instr->state != MTD_ERASE_DONE) { |
212 | printk(KERN_WARNING "Erase at 0x%08x finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", instr->addr, instr->state); | 212 | printk(KERN_WARNING "Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", |
213 | (unsigned long long)instr->addr, instr->state); | ||
213 | jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); | 214 | jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); |
214 | } else { | 215 | } else { |
215 | jffs2_erase_succeeded(priv->c, priv->jeb); | 216 | jffs2_erase_succeeded(priv->c, priv->jeb); |
diff --git a/include/acpi/acdisasm.h b/include/acpi/acdisasm.h deleted file mode 100644 index 0c1ed387073c..000000000000 --- a/include/acpi/acdisasm.h +++ /dev/null | |||
@@ -1,445 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Name: acdisasm.h - AML disassembler | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #ifndef __ACDISASM_H__ | ||
45 | #define __ACDISASM_H__ | ||
46 | |||
47 | #include "amlresrc.h" | ||
48 | |||
49 | #define BLOCK_NONE 0 | ||
50 | #define BLOCK_PAREN 1 | ||
51 | #define BLOCK_BRACE 2 | ||
52 | #define BLOCK_COMMA_LIST 4 | ||
53 | #define ACPI_DEFAULT_RESNAME *(u32 *) "__RD" | ||
54 | |||
55 | struct acpi_external_list { | ||
56 | char *path; | ||
57 | char *internal_path; | ||
58 | struct acpi_external_list *next; | ||
59 | u32 value; | ||
60 | u16 length; | ||
61 | u8 type; | ||
62 | }; | ||
63 | |||
64 | extern struct acpi_external_list *acpi_gbl_external_list; | ||
65 | |||
66 | typedef const struct acpi_dmtable_info { | ||
67 | u8 opcode; | ||
68 | u8 offset; | ||
69 | char *name; | ||
70 | |||
71 | } acpi_dmtable_info; | ||
72 | |||
73 | /* | ||
74 | * Values for Opcode above. | ||
75 | * Note: 0-7 must not change, used as a flag shift value | ||
76 | */ | ||
77 | #define ACPI_DMT_FLAG0 0 | ||
78 | #define ACPI_DMT_FLAG1 1 | ||
79 | #define ACPI_DMT_FLAG2 2 | ||
80 | #define ACPI_DMT_FLAG3 3 | ||
81 | #define ACPI_DMT_FLAG4 4 | ||
82 | #define ACPI_DMT_FLAG5 5 | ||
83 | #define ACPI_DMT_FLAG6 6 | ||
84 | #define ACPI_DMT_FLAG7 7 | ||
85 | #define ACPI_DMT_FLAGS0 8 | ||
86 | #define ACPI_DMT_FLAGS2 9 | ||
87 | #define ACPI_DMT_UINT8 10 | ||
88 | #define ACPI_DMT_UINT16 11 | ||
89 | #define ACPI_DMT_UINT24 12 | ||
90 | #define ACPI_DMT_UINT32 13 | ||
91 | #define ACPI_DMT_UINT56 14 | ||
92 | #define ACPI_DMT_UINT64 15 | ||
93 | #define ACPI_DMT_STRING 16 | ||
94 | #define ACPI_DMT_NAME4 17 | ||
95 | #define ACPI_DMT_NAME6 18 | ||
96 | #define ACPI_DMT_NAME8 19 | ||
97 | #define ACPI_DMT_CHKSUM 20 | ||
98 | #define ACPI_DMT_SPACEID 21 | ||
99 | #define ACPI_DMT_GAS 22 | ||
100 | #define ACPI_DMT_ASF 23 | ||
101 | #define ACPI_DMT_DMAR 24 | ||
102 | #define ACPI_DMT_HEST 25 | ||
103 | #define ACPI_DMT_HESTNTFY 26 | ||
104 | #define ACPI_DMT_HESTNTYP 27 | ||
105 | #define ACPI_DMT_MADT 28 | ||
106 | #define ACPI_DMT_SRAT 29 | ||
107 | #define ACPI_DMT_EXIT 30 | ||
108 | #define ACPI_DMT_SIG 31 | ||
109 | |||
110 | typedef | ||
111 | void (*acpi_dmtable_handler) (struct acpi_table_header * table); | ||
112 | |||
113 | struct acpi_dmtable_data { | ||
114 | char *signature; | ||
115 | struct acpi_dmtable_info *table_info; | ||
116 | acpi_dmtable_handler table_handler; | ||
117 | char *name; | ||
118 | }; | ||
119 | |||
120 | struct acpi_op_walk_info { | ||
121 | u32 level; | ||
122 | u32 last_level; | ||
123 | u32 count; | ||
124 | u32 bit_offset; | ||
125 | u32 flags; | ||
126 | struct acpi_walk_state *walk_state; | ||
127 | }; | ||
128 | |||
129 | typedef | ||
130 | acpi_status(*asl_walk_callback) (union acpi_parse_object * op, | ||
131 | u32 level, void *context); | ||
132 | |||
133 | struct acpi_resource_tag { | ||
134 | u32 bit_index; | ||
135 | char *tag; | ||
136 | }; | ||
137 | |||
138 | /* Strings used for decoding flags to ASL keywords */ | ||
139 | |||
140 | extern const char *acpi_gbl_word_decode[]; | ||
141 | extern const char *acpi_gbl_irq_decode[]; | ||
142 | extern const char *acpi_gbl_lock_rule[]; | ||
143 | extern const char *acpi_gbl_access_types[]; | ||
144 | extern const char *acpi_gbl_update_rules[]; | ||
145 | extern const char *acpi_gbl_match_ops[]; | ||
146 | |||
147 | extern struct acpi_dmtable_info acpi_dm_table_info_asf0[]; | ||
148 | extern struct acpi_dmtable_info acpi_dm_table_info_asf1[]; | ||
149 | extern struct acpi_dmtable_info acpi_dm_table_info_asf1a[]; | ||
150 | extern struct acpi_dmtable_info acpi_dm_table_info_asf2[]; | ||
151 | extern struct acpi_dmtable_info acpi_dm_table_info_asf2a[]; | ||
152 | extern struct acpi_dmtable_info acpi_dm_table_info_asf3[]; | ||
153 | extern struct acpi_dmtable_info acpi_dm_table_info_asf4[]; | ||
154 | extern struct acpi_dmtable_info acpi_dm_table_info_asf_hdr[]; | ||
155 | extern struct acpi_dmtable_info acpi_dm_table_info_boot[]; | ||
156 | extern struct acpi_dmtable_info acpi_dm_table_info_bert[]; | ||
157 | extern struct acpi_dmtable_info acpi_dm_table_info_cpep[]; | ||
158 | extern struct acpi_dmtable_info acpi_dm_table_info_cpep0[]; | ||
159 | extern struct acpi_dmtable_info acpi_dm_table_info_dbgp[]; | ||
160 | extern struct acpi_dmtable_info acpi_dm_table_info_dmar[]; | ||
161 | extern struct acpi_dmtable_info acpi_dm_table_info_dmar_hdr[]; | ||
162 | extern struct acpi_dmtable_info acpi_dm_table_info_dmar_scope[]; | ||
163 | extern struct acpi_dmtable_info acpi_dm_table_info_dmar0[]; | ||
164 | extern struct acpi_dmtable_info acpi_dm_table_info_dmar1[]; | ||
165 | extern struct acpi_dmtable_info acpi_dm_table_info_dmar2[]; | ||
166 | extern struct acpi_dmtable_info acpi_dm_table_info_ecdt[]; | ||
167 | extern struct acpi_dmtable_info acpi_dm_table_info_einj[]; | ||
168 | extern struct acpi_dmtable_info acpi_dm_table_info_einj0[]; | ||
169 | extern struct acpi_dmtable_info acpi_dm_table_info_erst[]; | ||
170 | extern struct acpi_dmtable_info acpi_dm_table_info_facs[]; | ||
171 | extern struct acpi_dmtable_info acpi_dm_table_info_fadt1[]; | ||
172 | extern struct acpi_dmtable_info acpi_dm_table_info_fadt2[]; | ||
173 | extern struct acpi_dmtable_info acpi_dm_table_info_gas[]; | ||
174 | extern struct acpi_dmtable_info acpi_dm_table_info_header[]; | ||
175 | extern struct acpi_dmtable_info acpi_dm_table_info_hest[]; | ||
176 | extern struct acpi_dmtable_info acpi_dm_table_info_hest9[]; | ||
177 | extern struct acpi_dmtable_info acpi_dm_table_info_hest_notify[]; | ||
178 | extern struct acpi_dmtable_info acpi_dm_table_info_hpet[]; | ||
179 | extern struct acpi_dmtable_info acpi_dm_table_info_madt[]; | ||
180 | extern struct acpi_dmtable_info acpi_dm_table_info_madt0[]; | ||
181 | extern struct acpi_dmtable_info acpi_dm_table_info_madt1[]; | ||
182 | extern struct acpi_dmtable_info acpi_dm_table_info_madt2[]; | ||
183 | extern struct acpi_dmtable_info acpi_dm_table_info_madt3[]; | ||
184 | extern struct acpi_dmtable_info acpi_dm_table_info_madt4[]; | ||
185 | extern struct acpi_dmtable_info acpi_dm_table_info_madt5[]; | ||
186 | extern struct acpi_dmtable_info acpi_dm_table_info_madt6[]; | ||
187 | extern struct acpi_dmtable_info acpi_dm_table_info_madt7[]; | ||
188 | extern struct acpi_dmtable_info acpi_dm_table_info_madt8[]; | ||
189 | extern struct acpi_dmtable_info acpi_dm_table_info_madt9[]; | ||
190 | extern struct acpi_dmtable_info acpi_dm_table_info_madt10[]; | ||
191 | extern struct acpi_dmtable_info acpi_dm_table_info_madt_hdr[]; | ||
192 | extern struct acpi_dmtable_info acpi_dm_table_info_mcfg[]; | ||
193 | extern struct acpi_dmtable_info acpi_dm_table_info_mcfg0[]; | ||
194 | extern struct acpi_dmtable_info acpi_dm_table_info_rsdp1[]; | ||
195 | extern struct acpi_dmtable_info acpi_dm_table_info_rsdp2[]; | ||
196 | extern struct acpi_dmtable_info acpi_dm_table_info_sbst[]; | ||
197 | extern struct acpi_dmtable_info acpi_dm_table_info_slic[]; | ||
198 | extern struct acpi_dmtable_info acpi_dm_table_info_slit[]; | ||
199 | extern struct acpi_dmtable_info acpi_dm_table_info_spcr[]; | ||
200 | extern struct acpi_dmtable_info acpi_dm_table_info_spmi[]; | ||
201 | extern struct acpi_dmtable_info acpi_dm_table_info_srat[]; | ||
202 | extern struct acpi_dmtable_info acpi_dm_table_info_srat_hdr[]; | ||
203 | extern struct acpi_dmtable_info acpi_dm_table_info_srat0[]; | ||
204 | extern struct acpi_dmtable_info acpi_dm_table_info_srat1[]; | ||
205 | extern struct acpi_dmtable_info acpi_dm_table_info_srat2[]; | ||
206 | extern struct acpi_dmtable_info acpi_dm_table_info_tcpa[]; | ||
207 | extern struct acpi_dmtable_info acpi_dm_table_info_wdrt[]; | ||
208 | |||
209 | /* | ||
210 | * dmtable | ||
211 | */ | ||
212 | void acpi_dm_dump_data_table(struct acpi_table_header *table); | ||
213 | |||
214 | acpi_status | ||
215 | acpi_dm_dump_table(u32 table_length, | ||
216 | u32 table_offset, | ||
217 | void *table, | ||
218 | u32 sub_table_length, struct acpi_dmtable_info *info); | ||
219 | |||
220 | void acpi_dm_line_header(u32 offset, u32 byte_length, char *name); | ||
221 | |||
222 | void acpi_dm_line_header2(u32 offset, u32 byte_length, char *name, u32 value); | ||
223 | |||
224 | /* | ||
225 | * dmtbdump | ||
226 | */ | ||
227 | void acpi_dm_dump_asf(struct acpi_table_header *table); | ||
228 | |||
229 | void acpi_dm_dump_cpep(struct acpi_table_header *table); | ||
230 | |||
231 | void acpi_dm_dump_dmar(struct acpi_table_header *table); | ||
232 | |||
233 | void acpi_dm_dump_einj(struct acpi_table_header *table); | ||
234 | |||
235 | void acpi_dm_dump_erst(struct acpi_table_header *table); | ||
236 | |||
237 | void acpi_dm_dump_fadt(struct acpi_table_header *table); | ||
238 | |||
239 | void acpi_dm_dump_hest(struct acpi_table_header *table); | ||
240 | |||
241 | void acpi_dm_dump_mcfg(struct acpi_table_header *table); | ||
242 | |||
243 | void acpi_dm_dump_madt(struct acpi_table_header *table); | ||
244 | |||
245 | u32 acpi_dm_dump_rsdp(struct acpi_table_header *table); | ||
246 | |||
247 | void acpi_dm_dump_rsdt(struct acpi_table_header *table); | ||
248 | |||
249 | void acpi_dm_dump_slit(struct acpi_table_header *table); | ||
250 | |||
251 | void acpi_dm_dump_srat(struct acpi_table_header *table); | ||
252 | |||
253 | void acpi_dm_dump_xsdt(struct acpi_table_header *table); | ||
254 | |||
255 | /* | ||
256 | * dmwalk | ||
257 | */ | ||
258 | void | ||
259 | acpi_dm_disassemble(struct acpi_walk_state *walk_state, | ||
260 | union acpi_parse_object *origin, u32 num_opcodes); | ||
261 | |||
262 | void | ||
263 | acpi_dm_walk_parse_tree(union acpi_parse_object *op, | ||
264 | asl_walk_callback descending_callback, | ||
265 | asl_walk_callback ascending_callback, void *context); | ||
266 | |||
267 | /* | ||
268 | * dmopcode | ||
269 | */ | ||
270 | void | ||
271 | acpi_dm_disassemble_one_op(struct acpi_walk_state *walk_state, | ||
272 | struct acpi_op_walk_info *info, | ||
273 | union acpi_parse_object *op); | ||
274 | |||
275 | void acpi_dm_decode_internal_object(union acpi_operand_object *obj_desc); | ||
276 | |||
277 | u32 acpi_dm_list_type(union acpi_parse_object *op); | ||
278 | |||
279 | void acpi_dm_method_flags(union acpi_parse_object *op); | ||
280 | |||
281 | void acpi_dm_field_flags(union acpi_parse_object *op); | ||
282 | |||
283 | void acpi_dm_address_space(u8 space_id); | ||
284 | |||
285 | void acpi_dm_region_flags(union acpi_parse_object *op); | ||
286 | |||
287 | void acpi_dm_match_op(union acpi_parse_object *op); | ||
288 | |||
289 | u8 acpi_dm_comma_if_list_member(union acpi_parse_object *op); | ||
290 | |||
291 | void acpi_dm_comma_if_field_member(union acpi_parse_object *op); | ||
292 | |||
293 | /* | ||
294 | * dmnames | ||
295 | */ | ||
296 | u32 acpi_dm_dump_name(char *name); | ||
297 | |||
298 | acpi_status | ||
299 | acpi_ps_display_object_pathname(struct acpi_walk_state *walk_state, | ||
300 | union acpi_parse_object *op); | ||
301 | |||
302 | void acpi_dm_namestring(char *name); | ||
303 | |||
304 | /* | ||
305 | * dmobject | ||
306 | */ | ||
307 | void | ||
308 | acpi_dm_display_internal_object(union acpi_operand_object *obj_desc, | ||
309 | struct acpi_walk_state *walk_state); | ||
310 | |||
311 | void acpi_dm_display_arguments(struct acpi_walk_state *walk_state); | ||
312 | |||
313 | void acpi_dm_display_locals(struct acpi_walk_state *walk_state); | ||
314 | |||
315 | void | ||
316 | acpi_dm_dump_method_info(acpi_status status, | ||
317 | struct acpi_walk_state *walk_state, | ||
318 | union acpi_parse_object *op); | ||
319 | |||
320 | /* | ||
321 | * dmbuffer | ||
322 | */ | ||
323 | void acpi_dm_disasm_byte_list(u32 level, u8 * byte_data, u32 byte_count); | ||
324 | |||
325 | void | ||
326 | acpi_dm_byte_list(struct acpi_op_walk_info *info, union acpi_parse_object *op); | ||
327 | |||
328 | void acpi_dm_is_eisa_id(union acpi_parse_object *op); | ||
329 | |||
330 | void acpi_dm_eisa_id(u32 encoded_id); | ||
331 | |||
332 | u8 acpi_dm_is_unicode_buffer(union acpi_parse_object *op); | ||
333 | |||
334 | u8 acpi_dm_is_string_buffer(union acpi_parse_object *op); | ||
335 | |||
336 | /* | ||
337 | * dmresrc | ||
338 | */ | ||
339 | void acpi_dm_dump_integer8(u8 value, char *name); | ||
340 | |||
341 | void acpi_dm_dump_integer16(u16 value, char *name); | ||
342 | |||
343 | void acpi_dm_dump_integer32(u32 value, char *name); | ||
344 | |||
345 | void acpi_dm_dump_integer64(u64 value, char *name); | ||
346 | |||
347 | void | ||
348 | acpi_dm_resource_template(struct acpi_op_walk_info *info, | ||
349 | union acpi_parse_object *op, | ||
350 | u8 * byte_data, u32 byte_count); | ||
351 | |||
352 | acpi_status acpi_dm_is_resource_template(union acpi_parse_object *op); | ||
353 | |||
354 | void acpi_dm_indent(u32 level); | ||
355 | |||
356 | void acpi_dm_bit_list(u16 mask); | ||
357 | |||
358 | void acpi_dm_decode_attribute(u8 attribute); | ||
359 | |||
360 | void acpi_dm_descriptor_name(void); | ||
361 | |||
362 | /* | ||
363 | * dmresrcl | ||
364 | */ | ||
365 | void | ||
366 | acpi_dm_word_descriptor(union aml_resource *resource, u32 length, u32 level); | ||
367 | |||
368 | void | ||
369 | acpi_dm_dword_descriptor(union aml_resource *resource, u32 length, u32 level); | ||
370 | |||
371 | void | ||
372 | acpi_dm_extended_descriptor(union aml_resource *resource, | ||
373 | u32 length, u32 level); | ||
374 | |||
375 | void | ||
376 | acpi_dm_qword_descriptor(union aml_resource *resource, u32 length, u32 level); | ||
377 | |||
378 | void | ||
379 | acpi_dm_memory24_descriptor(union aml_resource *resource, | ||
380 | u32 length, u32 level); | ||
381 | |||
382 | void | ||
383 | acpi_dm_memory32_descriptor(union aml_resource *resource, | ||
384 | u32 length, u32 level); | ||
385 | |||
386 | void | ||
387 | acpi_dm_fixed_memory32_descriptor(union aml_resource *resource, | ||
388 | u32 length, u32 level); | ||
389 | |||
390 | void | ||
391 | acpi_dm_generic_register_descriptor(union aml_resource *resource, | ||
392 | u32 length, u32 level); | ||
393 | |||
394 | void | ||
395 | acpi_dm_interrupt_descriptor(union aml_resource *resource, | ||
396 | u32 length, u32 level); | ||
397 | |||
398 | void | ||
399 | acpi_dm_vendor_large_descriptor(union aml_resource *resource, | ||
400 | u32 length, u32 level); | ||
401 | |||
402 | void acpi_dm_vendor_common(char *name, u8 * byte_data, u32 length, u32 level); | ||
403 | |||
404 | /* | ||
405 | * dmresrcs | ||
406 | */ | ||
407 | void | ||
408 | acpi_dm_irq_descriptor(union aml_resource *resource, u32 length, u32 level); | ||
409 | |||
410 | void | ||
411 | acpi_dm_dma_descriptor(union aml_resource *resource, u32 length, u32 level); | ||
412 | |||
413 | void acpi_dm_io_descriptor(union aml_resource *resource, u32 length, u32 level); | ||
414 | |||
415 | void | ||
416 | acpi_dm_fixed_io_descriptor(union aml_resource *resource, | ||
417 | u32 length, u32 level); | ||
418 | |||
419 | void | ||
420 | acpi_dm_start_dependent_descriptor(union aml_resource *resource, | ||
421 | u32 length, u32 level); | ||
422 | |||
423 | void | ||
424 | acpi_dm_end_dependent_descriptor(union aml_resource *resource, | ||
425 | u32 length, u32 level); | ||
426 | |||
427 | void | ||
428 | acpi_dm_vendor_small_descriptor(union aml_resource *resource, | ||
429 | u32 length, u32 level); | ||
430 | |||
431 | /* | ||
432 | * dmutils | ||
433 | */ | ||
434 | void acpi_dm_add_to_external_list(char *path, u8 type, u32 value); | ||
435 | |||
436 | /* | ||
437 | * dmrestag | ||
438 | */ | ||
439 | void acpi_dm_find_resources(union acpi_parse_object *root); | ||
440 | |||
441 | void | ||
442 | acpi_dm_check_resource_reference(union acpi_parse_object *op, | ||
443 | struct acpi_walk_state *walk_state); | ||
444 | |||
445 | #endif /* __ACDISASM_H__ */ | ||
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h index 84f5cb242863..eda04546cdf6 100644 --- a/include/acpi/acexcep.h +++ b/include/acpi/acexcep.h | |||
@@ -153,8 +153,9 @@ | |||
153 | #define AE_AML_CIRCULAR_REFERENCE (acpi_status) (0x001E | AE_CODE_AML) | 153 | #define AE_AML_CIRCULAR_REFERENCE (acpi_status) (0x001E | AE_CODE_AML) |
154 | #define AE_AML_BAD_RESOURCE_LENGTH (acpi_status) (0x001F | AE_CODE_AML) | 154 | #define AE_AML_BAD_RESOURCE_LENGTH (acpi_status) (0x001F | AE_CODE_AML) |
155 | #define AE_AML_ILLEGAL_ADDRESS (acpi_status) (0x0020 | AE_CODE_AML) | 155 | #define AE_AML_ILLEGAL_ADDRESS (acpi_status) (0x0020 | AE_CODE_AML) |
156 | #define AE_AML_INFINITE_LOOP (acpi_status) (0x0021 | AE_CODE_AML) | ||
156 | 157 | ||
157 | #define AE_CODE_AML_MAX 0x0020 | 158 | #define AE_CODE_AML_MAX 0x0021 |
158 | 159 | ||
159 | /* | 160 | /* |
160 | * Internal exceptions used for control | 161 | * Internal exceptions used for control |
@@ -175,6 +176,8 @@ | |||
175 | 176 | ||
176 | #define AE_CODE_CTRL_MAX 0x000D | 177 | #define AE_CODE_CTRL_MAX 0x000D |
177 | 178 | ||
179 | /* Exception strings for acpi_format_exception */ | ||
180 | |||
178 | #ifdef DEFINE_ACPI_GLOBALS | 181 | #ifdef DEFINE_ACPI_GLOBALS |
179 | 182 | ||
180 | /* | 183 | /* |
@@ -267,6 +270,7 @@ char const *acpi_gbl_exception_names_aml[] = { | |||
267 | "AE_AML_CIRCULAR_REFERENCE", | 270 | "AE_AML_CIRCULAR_REFERENCE", |
268 | "AE_AML_BAD_RESOURCE_LENGTH", | 271 | "AE_AML_BAD_RESOURCE_LENGTH", |
269 | "AE_AML_ILLEGAL_ADDRESS", | 272 | "AE_AML_ILLEGAL_ADDRESS", |
273 | "AE_AML_INFINITE_LOOP" | ||
270 | }; | 274 | }; |
271 | 275 | ||
272 | char const *acpi_gbl_exception_names_ctrl[] = { | 276 | char const *acpi_gbl_exception_names_ctrl[] = { |
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h index db8852d8bcf7..5c823d5ab783 100644 --- a/include/acpi/acoutput.h +++ b/include/acpi/acoutput.h | |||
@@ -45,9 +45,9 @@ | |||
45 | #define __ACOUTPUT_H__ | 45 | #define __ACOUTPUT_H__ |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Debug levels and component IDs. These are used to control the | 48 | * Debug levels and component IDs. These are used to control the |
49 | * granularity of the output of the DEBUG_PRINT macro -- on a per- | 49 | * granularity of the output of the ACPI_DEBUG_PRINT macro -- on a |
50 | * component basis and a per-exception-type basis. | 50 | * per-component basis and a per-exception-type basis. |
51 | */ | 51 | */ |
52 | 52 | ||
53 | /* Component IDs are used in the global "DebugLayer" */ | 53 | /* Component IDs are used in the global "DebugLayer" */ |
@@ -69,8 +69,10 @@ | |||
69 | 69 | ||
70 | #define ACPI_COMPILER 0x00001000 | 70 | #define ACPI_COMPILER 0x00001000 |
71 | #define ACPI_TOOLS 0x00002000 | 71 | #define ACPI_TOOLS 0x00002000 |
72 | #define ACPI_EXAMPLE 0x00004000 | ||
73 | #define ACPI_DRIVER 0x00008000 | ||
72 | 74 | ||
73 | #define ACPI_ALL_COMPONENTS 0x00003FFF | 75 | #define ACPI_ALL_COMPONENTS 0x0000FFFF |
74 | #define ACPI_COMPONENT_DEFAULT (ACPI_ALL_COMPONENTS) | 76 | #define ACPI_COMPONENT_DEFAULT (ACPI_ALL_COMPONENTS) |
75 | 77 | ||
76 | /* Component IDs reserved for ACPI drivers */ | 78 | /* Component IDs reserved for ACPI drivers */ |
@@ -78,7 +80,7 @@ | |||
78 | #define ACPI_ALL_DRIVERS 0xFFFF0000 | 80 | #define ACPI_ALL_DRIVERS 0xFFFF0000 |
79 | 81 | ||
80 | /* | 82 | /* |
81 | * Raw debug output levels, do not use these in the DEBUG_PRINT macros | 83 | * Raw debug output levels, do not use these in the ACPI_DEBUG_PRINT macros |
82 | */ | 84 | */ |
83 | #define ACPI_LV_INIT 0x00000001 | 85 | #define ACPI_LV_INIT 0x00000001 |
84 | #define ACPI_LV_DEBUG_OBJECT 0x00000002 | 86 | #define ACPI_LV_DEBUG_OBJECT 0x00000002 |
@@ -176,4 +178,95 @@ | |||
176 | #define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT) | 178 | #define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT) |
177 | #define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) | 179 | #define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) |
178 | 180 | ||
181 | #if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES) | ||
182 | /* | ||
183 | * Module name is included in both debug and non-debug versions primarily for | ||
184 | * error messages. The __FILE__ macro is not very useful for this, because it | ||
185 | * often includes the entire pathname to the module | ||
186 | */ | ||
187 | #define ACPI_MODULE_NAME(name) static const char ACPI_UNUSED_VAR _acpi_module_name[] = name; | ||
188 | #else | ||
189 | #define ACPI_MODULE_NAME(name) | ||
190 | #endif | ||
191 | |||
192 | /* | ||
193 | * Ascii error messages can be configured out | ||
194 | */ | ||
195 | #ifndef ACPI_NO_ERROR_MESSAGES | ||
196 | #define AE_INFO _acpi_module_name, __LINE__ | ||
197 | |||
198 | /* | ||
199 | * Error reporting. Callers module and line number are inserted by AE_INFO, | ||
200 | * the plist contains a set of parens to allow variable-length lists. | ||
201 | * These macros are used for both the debug and non-debug versions of the code. | ||
202 | */ | ||
203 | #define ACPI_INFO(plist) acpi_info plist | ||
204 | #define ACPI_WARNING(plist) acpi_warning plist | ||
205 | #define ACPI_EXCEPTION(plist) acpi_exception plist | ||
206 | #define ACPI_ERROR(plist) acpi_error plist | ||
207 | |||
208 | #else | ||
209 | |||
210 | /* No error messages */ | ||
211 | |||
212 | #define ACPI_INFO(plist) | ||
213 | #define ACPI_WARNING(plist) | ||
214 | #define ACPI_EXCEPTION(plist) | ||
215 | #define ACPI_ERROR(plist) | ||
216 | |||
217 | #endif /* ACPI_NO_ERROR_MESSAGES */ | ||
218 | |||
219 | /* | ||
220 | * Debug macros that are conditionally compiled | ||
221 | */ | ||
222 | #ifdef ACPI_DEBUG_OUTPUT | ||
223 | |||
224 | /* | ||
225 | * If ACPI_GET_FUNCTION_NAME was not defined in the compiler-dependent header, | ||
226 | * define it now. This is the case where there the compiler does not support | ||
227 | * a __FUNCTION__ macro or equivalent. | ||
228 | */ | ||
229 | #ifndef ACPI_GET_FUNCTION_NAME | ||
230 | #define ACPI_GET_FUNCTION_NAME _acpi_function_name | ||
231 | |||
232 | /* | ||
233 | * The Name parameter should be the procedure name as a quoted string. | ||
234 | * The function name is also used by the function exit macros below. | ||
235 | * Note: (const char) is used to be compatible with the debug interfaces | ||
236 | * and macros such as __FUNCTION__. | ||
237 | */ | ||
238 | #define ACPI_FUNCTION_NAME(name) static const char _acpi_function_name[] = #name; | ||
239 | |||
240 | #else | ||
241 | /* Compiler supports __FUNCTION__ (or equivalent) -- Ignore this macro */ | ||
242 | |||
243 | #define ACPI_FUNCTION_NAME(name) | ||
244 | #endif /* ACPI_GET_FUNCTION_NAME */ | ||
245 | |||
246 | /* | ||
247 | * Common parameters used for debug output functions: | ||
248 | * line number, function name, module(file) name, component ID | ||
249 | */ | ||
250 | #define ACPI_DEBUG_PARAMETERS __LINE__, ACPI_GET_FUNCTION_NAME, _acpi_module_name, _COMPONENT | ||
251 | |||
252 | /* | ||
253 | * Master debug print macros | ||
254 | * Print message if and only if: | ||
255 | * 1) Debug print for the current component is enabled | ||
256 | * 2) Debug error level or trace level for the print statement is enabled | ||
257 | */ | ||
258 | #define ACPI_DEBUG_PRINT(plist) acpi_debug_print plist | ||
259 | #define ACPI_DEBUG_PRINT_RAW(plist) acpi_debug_print_raw plist | ||
260 | |||
261 | #else | ||
262 | /* | ||
263 | * This is the non-debug case -- make everything go away, | ||
264 | * leaving no executable debug code! | ||
265 | */ | ||
266 | #define ACPI_FUNCTION_NAME(a) | ||
267 | #define ACPI_DEBUG_PRINT(pl) | ||
268 | #define ACPI_DEBUG_PRINT_RAW(pl) | ||
269 | |||
270 | #endif /* ACPI_DEBUG_OUTPUT */ | ||
271 | |||
179 | #endif /* __ACOUTPUT_H__ */ | 272 | #endif /* __ACOUTPUT_H__ */ |
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h index c515ef6cc89e..472b7bf0c5d4 100644 --- a/include/acpi/acpi.h +++ b/include/acpi/acpi.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | * | 2 | * |
3 | * Name: acpi.h - Master include file, Publics and external data. | 3 | * Name: acpi.h - Master public include file used to interface to ACPICA |
4 | * | 4 | * |
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
@@ -45,25 +45,22 @@ | |||
45 | #define __ACPI_H__ | 45 | #define __ACPI_H__ |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Common includes for all ACPI driver files | 48 | * Public include files for use by code that will interface to ACPICA. |
49 | * We put them here because we don't want to duplicate them | 49 | * |
50 | * in the rest of the source code again and again. | 50 | * Information includes the ACPICA data types, names, exceptions, and |
51 | * external interface prototypes. Also included are the definitions for | ||
52 | * all ACPI tables (FADT, MADT, etc.) | ||
53 | * | ||
54 | * Note: The order of these include files is important. | ||
51 | */ | 55 | */ |
52 | #include "acnames.h" /* Global ACPI names and strings */ | 56 | #include "platform/acenv.h" /* Environment-specific items */ |
53 | #include "acconfig.h" /* Configuration constants */ | 57 | #include "acnames.h" /* Common ACPI names and strings */ |
54 | #include "platform/acenv.h" /* Target environment specific items */ | 58 | #include "actypes.h" /* ACPICA data types and structures */ |
55 | #include "actypes.h" /* Fundamental common data types */ | 59 | #include "acexcep.h" /* ACPICA exceptions */ |
56 | #include "acexcep.h" /* ACPI exception codes */ | ||
57 | #include "acmacros.h" /* C macros */ | ||
58 | #include "actbl.h" /* ACPI table definitions */ | 60 | #include "actbl.h" /* ACPI table definitions */ |
59 | #include "aclocal.h" /* Internal data types */ | ||
60 | #include "acoutput.h" /* Error output and Debug macros */ | 61 | #include "acoutput.h" /* Error output and Debug macros */ |
61 | #include "acpiosxf.h" /* Interfaces to the ACPI-to-OS layer */ | 62 | #include "acrestyp.h" /* Resource Descriptor structs */ |
63 | #include "acpiosxf.h" /* OSL interfaces (ACPICA-to-OS) */ | ||
62 | #include "acpixf.h" /* ACPI core subsystem external interfaces */ | 64 | #include "acpixf.h" /* ACPI core subsystem external interfaces */ |
63 | #include "acobject.h" /* ACPI internal object */ | ||
64 | #include "acstruct.h" /* Common structures */ | ||
65 | #include "acglobal.h" /* All global variables */ | ||
66 | #include "achware.h" /* Hardware defines and interfaces */ | ||
67 | #include "acutils.h" /* Utility interfaces */ | ||
68 | 65 | ||
69 | #endif /* __ACPI_H__ */ | 66 | #endif /* __ACPI_H__ */ |
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index b91440ac0d16..a62720a7edc0 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h | |||
@@ -121,8 +121,11 @@ acpi_os_wait_semaphore(acpi_semaphore handle, u32 units, u16 timeout); | |||
121 | acpi_status acpi_os_signal_semaphore(acpi_semaphore handle, u32 units); | 121 | acpi_status acpi_os_signal_semaphore(acpi_semaphore handle, u32 units); |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * Mutex primitives | 124 | * Mutex primitives. May be configured to use semaphores instead via |
125 | * ACPI_MUTEX_TYPE (see platform/acenv.h) | ||
125 | */ | 126 | */ |
127 | #if (ACPI_MUTEX_TYPE != ACPI_BINARY_SEMAPHORE) | ||
128 | |||
126 | acpi_status acpi_os_create_mutex(acpi_mutex * out_handle); | 129 | acpi_status acpi_os_create_mutex(acpi_mutex * out_handle); |
127 | 130 | ||
128 | void acpi_os_delete_mutex(acpi_mutex handle); | 131 | void acpi_os_delete_mutex(acpi_mutex handle); |
@@ -130,13 +133,7 @@ void acpi_os_delete_mutex(acpi_mutex handle); | |||
130 | acpi_status acpi_os_acquire_mutex(acpi_mutex handle, u16 timeout); | 133 | acpi_status acpi_os_acquire_mutex(acpi_mutex handle, u16 timeout); |
131 | 134 | ||
132 | void acpi_os_release_mutex(acpi_mutex handle); | 135 | void acpi_os_release_mutex(acpi_mutex handle); |
133 | 136 | #endif | |
134 | /* Temporary macros for Mutex* interfaces, map to existing semaphore xfaces */ | ||
135 | |||
136 | #define acpi_os_create_mutex(out_handle) acpi_os_create_semaphore (1, 1, out_handle) | ||
137 | #define acpi_os_delete_mutex(handle) (void) acpi_os_delete_semaphore (handle) | ||
138 | #define acpi_os_acquire_mutex(handle,time) acpi_os_wait_semaphore (handle, 1, time) | ||
139 | #define acpi_os_release_mutex(handle) (void) acpi_os_signal_semaphore (handle, 1) | ||
140 | 137 | ||
141 | /* | 138 | /* |
142 | * Memory allocation and mapping | 139 | * Memory allocation and mapping |
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 33bc0e3b1954..c8e8cf45830f 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h | |||
@@ -45,9 +45,32 @@ | |||
45 | #ifndef __ACXFACE_H__ | 45 | #ifndef __ACXFACE_H__ |
46 | #define __ACXFACE_H__ | 46 | #define __ACXFACE_H__ |
47 | 47 | ||
48 | /* Current ACPICA subsystem version in YYYYMMDD format */ | ||
49 | |||
50 | #define ACPI_CA_VERSION 0x20081204 | ||
51 | |||
48 | #include "actypes.h" | 52 | #include "actypes.h" |
49 | #include "actbl.h" | 53 | #include "actbl.h" |
50 | 54 | ||
55 | extern u8 acpi_gbl_permanent_mmap; | ||
56 | |||
57 | /* | ||
58 | * Globals that are publically available, allowing for | ||
59 | * run time configuration | ||
60 | */ | ||
61 | extern u32 acpi_dbg_level; | ||
62 | extern u32 acpi_dbg_layer; | ||
63 | extern u8 acpi_gbl_enable_interpreter_slack; | ||
64 | extern u8 acpi_gbl_all_methods_serialized; | ||
65 | extern u8 acpi_gbl_create_osi_method; | ||
66 | extern u8 acpi_gbl_leave_wake_gpes_disabled; | ||
67 | extern acpi_name acpi_gbl_trace_method_name; | ||
68 | extern u32 acpi_gbl_trace_flags; | ||
69 | |||
70 | extern u32 acpi_current_gpe_count; | ||
71 | extern struct acpi_table_fadt acpi_gbl_FADT; | ||
72 | |||
73 | extern u32 acpi_rsdt_forced; | ||
51 | /* | 74 | /* |
52 | * Global interfaces | 75 | * Global interfaces |
53 | */ | 76 | */ |
@@ -79,11 +102,6 @@ const char *acpi_format_exception(acpi_status exception); | |||
79 | 102 | ||
80 | acpi_status acpi_purge_cached_objects(void); | 103 | acpi_status acpi_purge_cached_objects(void); |
81 | 104 | ||
82 | #ifdef ACPI_FUTURE_USAGE | ||
83 | acpi_status | ||
84 | acpi_install_initialization_handler(acpi_init_handler handler, u32 function); | ||
85 | #endif | ||
86 | |||
87 | /* | 105 | /* |
88 | * ACPI Memory management | 106 | * ACPI Memory management |
89 | */ | 107 | */ |
@@ -193,9 +211,12 @@ acpi_status acpi_get_id(acpi_handle object, acpi_owner_id * out_type); | |||
193 | acpi_status acpi_get_parent(acpi_handle object, acpi_handle * out_handle); | 211 | acpi_status acpi_get_parent(acpi_handle object, acpi_handle * out_handle); |
194 | 212 | ||
195 | /* | 213 | /* |
196 | * Event handler interfaces | 214 | * Handler interfaces |
197 | */ | 215 | */ |
198 | acpi_status | 216 | acpi_status |
217 | acpi_install_initialization_handler(acpi_init_handler handler, u32 function); | ||
218 | |||
219 | acpi_status | ||
199 | acpi_install_fixed_event_handler(u32 acpi_event, | 220 | acpi_install_fixed_event_handler(u32 acpi_event, |
200 | acpi_event_handler handler, void *context); | 221 | acpi_event_handler handler, void *context); |
201 | 222 | ||
@@ -227,6 +248,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device, | |||
227 | u32 gpe_number, | 248 | u32 gpe_number, |
228 | u32 type, acpi_event_handler address, void *context); | 249 | u32 type, acpi_event_handler address, void *context); |
229 | 250 | ||
251 | acpi_status | ||
252 | acpi_remove_gpe_handler(acpi_handle gpe_device, | ||
253 | u32 gpe_number, acpi_event_handler address); | ||
254 | |||
230 | #ifdef ACPI_FUTURE_USAGE | 255 | #ifdef ACPI_FUTURE_USAGE |
231 | acpi_status acpi_install_exception_handler(acpi_exception_handler handler); | 256 | acpi_status acpi_install_exception_handler(acpi_exception_handler handler); |
232 | #endif | 257 | #endif |
@@ -238,10 +263,6 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle); | |||
238 | 263 | ||
239 | acpi_status acpi_release_global_lock(u32 handle); | 264 | acpi_status acpi_release_global_lock(u32 handle); |
240 | 265 | ||
241 | acpi_status | ||
242 | acpi_remove_gpe_handler(acpi_handle gpe_device, | ||
243 | u32 gpe_number, acpi_event_handler address); | ||
244 | |||
245 | acpi_status acpi_enable_event(u32 event, u32 flags); | 266 | acpi_status acpi_enable_event(u32 event, u32 flags); |
246 | 267 | ||
247 | acpi_status acpi_disable_event(u32 event, u32 flags); | 268 | acpi_status acpi_disable_event(u32 event, u32 flags); |
@@ -250,6 +271,9 @@ acpi_status acpi_clear_event(u32 event); | |||
250 | 271 | ||
251 | acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status); | 272 | acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status); |
252 | 273 | ||
274 | /* | ||
275 | * GPE Interfaces | ||
276 | */ | ||
253 | acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type); | 277 | acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type); |
254 | 278 | ||
255 | acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number); | 279 | acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number); |
@@ -263,6 +287,12 @@ acpi_get_gpe_status(acpi_handle gpe_device, | |||
263 | u32 gpe_number, | 287 | u32 gpe_number, |
264 | u32 flags, acpi_event_status * event_status); | 288 | u32 flags, acpi_event_status * event_status); |
265 | 289 | ||
290 | acpi_status acpi_disable_all_gpes(void); | ||
291 | |||
292 | acpi_status acpi_enable_all_runtime_gpes(void); | ||
293 | |||
294 | acpi_status acpi_get_gpe_device(u32 gpe_index, acpi_handle *gpe_device); | ||
295 | |||
266 | acpi_status | 296 | acpi_status |
267 | acpi_install_gpe_block(acpi_handle gpe_device, | 297 | acpi_install_gpe_block(acpi_handle gpe_device, |
268 | struct acpi_generic_address *gpe_block_address, | 298 | struct acpi_generic_address *gpe_block_address, |
@@ -313,6 +343,8 @@ acpi_resource_to_address64(struct acpi_resource *resource, | |||
313 | /* | 343 | /* |
314 | * Hardware (ACPI device) interfaces | 344 | * Hardware (ACPI device) interfaces |
315 | */ | 345 | */ |
346 | acpi_status acpi_reset(void); | ||
347 | |||
316 | acpi_status acpi_get_register(u32 register_id, u32 * return_value); | 348 | acpi_status acpi_get_register(u32 register_id, u32 * return_value); |
317 | 349 | ||
318 | acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value); | 350 | acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value); |
@@ -320,12 +352,14 @@ acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value); | |||
320 | acpi_status acpi_set_register(u32 register_id, u32 value); | 352 | acpi_status acpi_set_register(u32 register_id, u32 value); |
321 | 353 | ||
322 | acpi_status | 354 | acpi_status |
323 | acpi_set_firmware_waking_vector(acpi_physical_address physical_address); | 355 | acpi_set_firmware_waking_vector(u32 physical_address); |
324 | 356 | ||
325 | #ifdef ACPI_FUTURE_USAGE | ||
326 | acpi_status | 357 | acpi_status |
327 | acpi_get_firmware_waking_vector(acpi_physical_address * physical_address); | 358 | acpi_set_firmware_waking_vector64(u64 physical_address); |
328 | #endif | 359 | |
360 | acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg); | ||
361 | |||
362 | acpi_status acpi_write(u32 value, struct acpi_generic_address *reg); | ||
329 | 363 | ||
330 | acpi_status | 364 | acpi_status |
331 | acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b); | 365 | acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b); |
@@ -340,4 +374,42 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state); | |||
340 | 374 | ||
341 | acpi_status acpi_leave_sleep_state(u8 sleep_state); | 375 | acpi_status acpi_leave_sleep_state(u8 sleep_state); |
342 | 376 | ||
377 | /* | ||
378 | * Debug output | ||
379 | */ | ||
380 | void ACPI_INTERNAL_VAR_XFACE | ||
381 | acpi_error(const char *module_name, | ||
382 | u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3); | ||
383 | |||
384 | void ACPI_INTERNAL_VAR_XFACE | ||
385 | acpi_exception(const char *module_name, | ||
386 | u32 line_number, | ||
387 | acpi_status status, const char *format, ...) ACPI_PRINTF_LIKE(4); | ||
388 | |||
389 | void ACPI_INTERNAL_VAR_XFACE | ||
390 | acpi_warning(const char *module_name, | ||
391 | u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3); | ||
392 | |||
393 | void ACPI_INTERNAL_VAR_XFACE | ||
394 | acpi_info(const char *module_name, | ||
395 | u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3); | ||
396 | |||
397 | #ifdef ACPI_DEBUG_OUTPUT | ||
398 | |||
399 | void ACPI_INTERNAL_VAR_XFACE | ||
400 | acpi_debug_print(u32 requested_debug_level, | ||
401 | u32 line_number, | ||
402 | const char *function_name, | ||
403 | const char *module_name, | ||
404 | u32 component_id, const char *format, ...) ACPI_PRINTF_LIKE(6); | ||
405 | |||
406 | void ACPI_INTERNAL_VAR_XFACE | ||
407 | acpi_debug_print_raw(u32 requested_debug_level, | ||
408 | u32 line_number, | ||
409 | const char *function_name, | ||
410 | const char *module_name, | ||
411 | u32 component_id, | ||
412 | const char *format, ...) ACPI_PRINTF_LIKE(6); | ||
413 | #endif | ||
414 | |||
343 | #endif /* __ACXFACE_H__ */ | 415 | #endif /* __ACXFACE_H__ */ |
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h new file mode 100644 index 000000000000..9ffe00feada6 --- /dev/null +++ b/include/acpi/acrestyp.h | |||
@@ -0,0 +1,405 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Name: acrestyp.h - Defines, types, and structures for resource descriptors | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #ifndef __ACRESTYP_H__ | ||
45 | #define __ACRESTYP_H__ | ||
46 | |||
47 | /* | ||
48 | * Definitions for Resource Attributes | ||
49 | */ | ||
50 | typedef u16 acpi_rs_length; /* Resource Length field is fixed at 16 bits */ | ||
51 | typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (64_k-1)+3 */ | ||
52 | |||
53 | /* | ||
54 | * Memory Attributes | ||
55 | */ | ||
56 | #define ACPI_READ_ONLY_MEMORY (u8) 0x00 | ||
57 | #define ACPI_READ_WRITE_MEMORY (u8) 0x01 | ||
58 | |||
59 | #define ACPI_NON_CACHEABLE_MEMORY (u8) 0x00 | ||
60 | #define ACPI_CACHABLE_MEMORY (u8) 0x01 | ||
61 | #define ACPI_WRITE_COMBINING_MEMORY (u8) 0x02 | ||
62 | #define ACPI_PREFETCHABLE_MEMORY (u8) 0x03 | ||
63 | |||
64 | /* | ||
65 | * IO Attributes | ||
66 | * The ISA IO ranges are: n000-n0_fFh, n400-n4_fFh, n800-n8_fFh, n_c00-n_cFFh. | ||
67 | * The non-ISA IO ranges are: n100-n3_fFh, n500-n7_fFh, n900-n_bFFh, n_cd0-n_fFFh. | ||
68 | */ | ||
69 | #define ACPI_NON_ISA_ONLY_RANGES (u8) 0x01 | ||
70 | #define ACPI_ISA_ONLY_RANGES (u8) 0x02 | ||
71 | #define ACPI_ENTIRE_RANGE (ACPI_NON_ISA_ONLY_RANGES | ACPI_ISA_ONLY_RANGES) | ||
72 | |||
73 | /* Type of translation - 1=Sparse, 0=Dense */ | ||
74 | |||
75 | #define ACPI_SPARSE_TRANSLATION (u8) 0x01 | ||
76 | |||
77 | /* | ||
78 | * IO Port Descriptor Decode | ||
79 | */ | ||
80 | #define ACPI_DECODE_10 (u8) 0x00 /* 10-bit IO address decode */ | ||
81 | #define ACPI_DECODE_16 (u8) 0x01 /* 16-bit IO address decode */ | ||
82 | |||
83 | /* | ||
84 | * IRQ Attributes | ||
85 | */ | ||
86 | #define ACPI_LEVEL_SENSITIVE (u8) 0x00 | ||
87 | #define ACPI_EDGE_SENSITIVE (u8) 0x01 | ||
88 | |||
89 | #define ACPI_ACTIVE_HIGH (u8) 0x00 | ||
90 | #define ACPI_ACTIVE_LOW (u8) 0x01 | ||
91 | |||
92 | #define ACPI_EXCLUSIVE (u8) 0x00 | ||
93 | #define ACPI_SHARED (u8) 0x01 | ||
94 | |||
95 | /* | ||
96 | * DMA Attributes | ||
97 | */ | ||
98 | #define ACPI_COMPATIBILITY (u8) 0x00 | ||
99 | #define ACPI_TYPE_A (u8) 0x01 | ||
100 | #define ACPI_TYPE_B (u8) 0x02 | ||
101 | #define ACPI_TYPE_F (u8) 0x03 | ||
102 | |||
103 | #define ACPI_NOT_BUS_MASTER (u8) 0x00 | ||
104 | #define ACPI_BUS_MASTER (u8) 0x01 | ||
105 | |||
106 | #define ACPI_TRANSFER_8 (u8) 0x00 | ||
107 | #define ACPI_TRANSFER_8_16 (u8) 0x01 | ||
108 | #define ACPI_TRANSFER_16 (u8) 0x02 | ||
109 | |||
110 | /* | ||
111 | * Start Dependent Functions Priority definitions | ||
112 | */ | ||
113 | #define ACPI_GOOD_CONFIGURATION (u8) 0x00 | ||
114 | #define ACPI_ACCEPTABLE_CONFIGURATION (u8) 0x01 | ||
115 | #define ACPI_SUB_OPTIMAL_CONFIGURATION (u8) 0x02 | ||
116 | |||
117 | /* | ||
118 | * 16, 32 and 64-bit Address Descriptor resource types | ||
119 | */ | ||
120 | #define ACPI_MEMORY_RANGE (u8) 0x00 | ||
121 | #define ACPI_IO_RANGE (u8) 0x01 | ||
122 | #define ACPI_BUS_NUMBER_RANGE (u8) 0x02 | ||
123 | |||
124 | #define ACPI_ADDRESS_NOT_FIXED (u8) 0x00 | ||
125 | #define ACPI_ADDRESS_FIXED (u8) 0x01 | ||
126 | |||
127 | #define ACPI_POS_DECODE (u8) 0x00 | ||
128 | #define ACPI_SUB_DECODE (u8) 0x01 | ||
129 | |||
130 | #define ACPI_PRODUCER (u8) 0x00 | ||
131 | #define ACPI_CONSUMER (u8) 0x01 | ||
132 | |||
133 | /* | ||
134 | * If possible, pack the following structures to byte alignment | ||
135 | */ | ||
136 | #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED | ||
137 | #pragma pack(1) | ||
138 | #endif | ||
139 | |||
140 | /* UUID data structures for use in vendor-defined resource descriptors */ | ||
141 | |||
142 | struct acpi_uuid { | ||
143 | u8 data[ACPI_UUID_LENGTH]; | ||
144 | }; | ||
145 | |||
146 | struct acpi_vendor_uuid { | ||
147 | u8 subtype; | ||
148 | u8 data[ACPI_UUID_LENGTH]; | ||
149 | }; | ||
150 | |||
151 | /* | ||
152 | * Structures used to describe device resources | ||
153 | */ | ||
154 | struct acpi_resource_irq { | ||
155 | u8 descriptor_length; | ||
156 | u8 triggering; | ||
157 | u8 polarity; | ||
158 | u8 sharable; | ||
159 | u8 interrupt_count; | ||
160 | u8 interrupts[1]; | ||
161 | }; | ||
162 | |||
163 | struct acpi_resource_dma { | ||
164 | u8 type; | ||
165 | u8 bus_master; | ||
166 | u8 transfer; | ||
167 | u8 channel_count; | ||
168 | u8 channels[1]; | ||
169 | }; | ||
170 | |||
171 | struct acpi_resource_start_dependent { | ||
172 | u8 descriptor_length; | ||
173 | u8 compatibility_priority; | ||
174 | u8 performance_robustness; | ||
175 | }; | ||
176 | |||
177 | /* | ||
178 | * The END_DEPENDENT_FUNCTIONS_RESOURCE struct is not | ||
179 | * needed because it has no fields | ||
180 | */ | ||
181 | |||
182 | struct acpi_resource_io { | ||
183 | u8 io_decode; | ||
184 | u8 alignment; | ||
185 | u8 address_length; | ||
186 | u16 minimum; | ||
187 | u16 maximum; | ||
188 | }; | ||
189 | |||
190 | struct acpi_resource_fixed_io { | ||
191 | u16 address; | ||
192 | u8 address_length; | ||
193 | }; | ||
194 | |||
195 | struct acpi_resource_vendor { | ||
196 | u16 byte_length; | ||
197 | u8 byte_data[1]; | ||
198 | }; | ||
199 | |||
200 | /* Vendor resource with UUID info (introduced in ACPI 3.0) */ | ||
201 | |||
202 | struct acpi_resource_vendor_typed { | ||
203 | u16 byte_length; | ||
204 | u8 uuid_subtype; | ||
205 | u8 uuid[ACPI_UUID_LENGTH]; | ||
206 | u8 byte_data[1]; | ||
207 | }; | ||
208 | |||
209 | struct acpi_resource_end_tag { | ||
210 | u8 checksum; | ||
211 | }; | ||
212 | |||
213 | struct acpi_resource_memory24 { | ||
214 | u8 write_protect; | ||
215 | u16 minimum; | ||
216 | u16 maximum; | ||
217 | u16 alignment; | ||
218 | u16 address_length; | ||
219 | }; | ||
220 | |||
221 | struct acpi_resource_memory32 { | ||
222 | u8 write_protect; | ||
223 | u32 minimum; | ||
224 | u32 maximum; | ||
225 | u32 alignment; | ||
226 | u32 address_length; | ||
227 | }; | ||
228 | |||
229 | struct acpi_resource_fixed_memory32 { | ||
230 | u8 write_protect; | ||
231 | u32 address; | ||
232 | u32 address_length; | ||
233 | }; | ||
234 | |||
235 | struct acpi_memory_attribute { | ||
236 | u8 write_protect; | ||
237 | u8 caching; | ||
238 | u8 range_type; | ||
239 | u8 translation; | ||
240 | }; | ||
241 | |||
242 | struct acpi_io_attribute { | ||
243 | u8 range_type; | ||
244 | u8 translation; | ||
245 | u8 translation_type; | ||
246 | u8 reserved1; | ||
247 | }; | ||
248 | |||
249 | union acpi_resource_attribute { | ||
250 | struct acpi_memory_attribute mem; | ||
251 | struct acpi_io_attribute io; | ||
252 | |||
253 | /* Used for the *word_space macros */ | ||
254 | |||
255 | u8 type_specific; | ||
256 | }; | ||
257 | |||
258 | struct acpi_resource_source { | ||
259 | u8 index; | ||
260 | u16 string_length; | ||
261 | char *string_ptr; | ||
262 | }; | ||
263 | |||
264 | /* Fields common to all address descriptors, 16/32/64 bit */ | ||
265 | |||
266 | #define ACPI_RESOURCE_ADDRESS_COMMON \ | ||
267 | u8 resource_type; \ | ||
268 | u8 producer_consumer; \ | ||
269 | u8 decode; \ | ||
270 | u8 min_address_fixed; \ | ||
271 | u8 max_address_fixed; \ | ||
272 | union acpi_resource_attribute info; | ||
273 | |||
274 | struct acpi_resource_address { | ||
275 | ACPI_RESOURCE_ADDRESS_COMMON}; | ||
276 | |||
277 | struct acpi_resource_address16 { | ||
278 | ACPI_RESOURCE_ADDRESS_COMMON u16 granularity; | ||
279 | u16 minimum; | ||
280 | u16 maximum; | ||
281 | u16 translation_offset; | ||
282 | u16 address_length; | ||
283 | struct acpi_resource_source resource_source; | ||
284 | }; | ||
285 | |||
286 | struct acpi_resource_address32 { | ||
287 | ACPI_RESOURCE_ADDRESS_COMMON u32 granularity; | ||
288 | u32 minimum; | ||
289 | u32 maximum; | ||
290 | u32 translation_offset; | ||
291 | u32 address_length; | ||
292 | struct acpi_resource_source resource_source; | ||
293 | }; | ||
294 | |||
295 | struct acpi_resource_address64 { | ||
296 | ACPI_RESOURCE_ADDRESS_COMMON u64 granularity; | ||
297 | u64 minimum; | ||
298 | u64 maximum; | ||
299 | u64 translation_offset; | ||
300 | u64 address_length; | ||
301 | struct acpi_resource_source resource_source; | ||
302 | }; | ||
303 | |||
304 | struct acpi_resource_extended_address64 { | ||
305 | ACPI_RESOURCE_ADDRESS_COMMON u8 revision_iD; | ||
306 | u64 granularity; | ||
307 | u64 minimum; | ||
308 | u64 maximum; | ||
309 | u64 translation_offset; | ||
310 | u64 address_length; | ||
311 | u64 type_specific; | ||
312 | }; | ||
313 | |||
314 | struct acpi_resource_extended_irq { | ||
315 | u8 producer_consumer; | ||
316 | u8 triggering; | ||
317 | u8 polarity; | ||
318 | u8 sharable; | ||
319 | u8 interrupt_count; | ||
320 | struct acpi_resource_source resource_source; | ||
321 | u32 interrupts[1]; | ||
322 | }; | ||
323 | |||
324 | struct acpi_resource_generic_register { | ||
325 | u8 space_id; | ||
326 | u8 bit_width; | ||
327 | u8 bit_offset; | ||
328 | u8 access_size; | ||
329 | u64 address; | ||
330 | }; | ||
331 | |||
332 | /* ACPI_RESOURCE_TYPEs */ | ||
333 | |||
334 | #define ACPI_RESOURCE_TYPE_IRQ 0 | ||
335 | #define ACPI_RESOURCE_TYPE_DMA 1 | ||
336 | #define ACPI_RESOURCE_TYPE_START_DEPENDENT 2 | ||
337 | #define ACPI_RESOURCE_TYPE_END_DEPENDENT 3 | ||
338 | #define ACPI_RESOURCE_TYPE_IO 4 | ||
339 | #define ACPI_RESOURCE_TYPE_FIXED_IO 5 | ||
340 | #define ACPI_RESOURCE_TYPE_VENDOR 6 | ||
341 | #define ACPI_RESOURCE_TYPE_END_TAG 7 | ||
342 | #define ACPI_RESOURCE_TYPE_MEMORY24 8 | ||
343 | #define ACPI_RESOURCE_TYPE_MEMORY32 9 | ||
344 | #define ACPI_RESOURCE_TYPE_FIXED_MEMORY32 10 | ||
345 | #define ACPI_RESOURCE_TYPE_ADDRESS16 11 | ||
346 | #define ACPI_RESOURCE_TYPE_ADDRESS32 12 | ||
347 | #define ACPI_RESOURCE_TYPE_ADDRESS64 13 | ||
348 | #define ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 14 /* ACPI 3.0 */ | ||
349 | #define ACPI_RESOURCE_TYPE_EXTENDED_IRQ 15 | ||
350 | #define ACPI_RESOURCE_TYPE_GENERIC_REGISTER 16 | ||
351 | #define ACPI_RESOURCE_TYPE_MAX 16 | ||
352 | |||
353 | /* Master union for resource descriptors */ | ||
354 | |||
355 | union acpi_resource_data { | ||
356 | struct acpi_resource_irq irq; | ||
357 | struct acpi_resource_dma dma; | ||
358 | struct acpi_resource_start_dependent start_dpf; | ||
359 | struct acpi_resource_io io; | ||
360 | struct acpi_resource_fixed_io fixed_io; | ||
361 | struct acpi_resource_vendor vendor; | ||
362 | struct acpi_resource_vendor_typed vendor_typed; | ||
363 | struct acpi_resource_end_tag end_tag; | ||
364 | struct acpi_resource_memory24 memory24; | ||
365 | struct acpi_resource_memory32 memory32; | ||
366 | struct acpi_resource_fixed_memory32 fixed_memory32; | ||
367 | struct acpi_resource_address16 address16; | ||
368 | struct acpi_resource_address32 address32; | ||
369 | struct acpi_resource_address64 address64; | ||
370 | struct acpi_resource_extended_address64 ext_address64; | ||
371 | struct acpi_resource_extended_irq extended_irq; | ||
372 | struct acpi_resource_generic_register generic_reg; | ||
373 | |||
374 | /* Common fields */ | ||
375 | |||
376 | struct acpi_resource_address address; /* Common 16/32/64 address fields */ | ||
377 | }; | ||
378 | |||
379 | /* Common resource header */ | ||
380 | |||
381 | struct acpi_resource { | ||
382 | u32 type; | ||
383 | u32 length; | ||
384 | union acpi_resource_data data; | ||
385 | }; | ||
386 | |||
387 | /* restore default alignment */ | ||
388 | |||
389 | #pragma pack() | ||
390 | |||
391 | #define ACPI_RS_SIZE_NO_DATA 8 /* Id + Length fields */ | ||
392 | #define ACPI_RS_SIZE_MIN (u32) ACPI_ROUND_UP_TO_NATIVE_WORD (12) | ||
393 | #define ACPI_RS_SIZE(type) (u32) (ACPI_RS_SIZE_NO_DATA + sizeof (type)) | ||
394 | |||
395 | #define ACPI_NEXT_RESOURCE(res) (struct acpi_resource *)((u8 *) res + res->length) | ||
396 | |||
397 | struct acpi_pci_routing_table { | ||
398 | u32 length; | ||
399 | u32 pin; | ||
400 | acpi_integer address; /* here for 64-bit alignment */ | ||
401 | u32 source_index; | ||
402 | char source[4]; /* pad to 64 bits so sizeof() works in all cases */ | ||
403 | }; | ||
404 | |||
405 | #endif /* __ACRESTYP_H__ */ | ||
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index 13a3d9ad92db..813e4b6c2c0d 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h | |||
@@ -288,6 +288,31 @@ enum acpi_prefered_pm_profiles { | |||
288 | 288 | ||
289 | #define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_fadt, f) | 289 | #define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_fadt, f) |
290 | 290 | ||
291 | union acpi_name_union { | ||
292 | u32 integer; | ||
293 | char ascii[4]; | ||
294 | }; | ||
295 | |||
296 | /* | ||
297 | * Internal ACPI Table Descriptor. One per ACPI table | ||
298 | */ | ||
299 | struct acpi_table_desc { | ||
300 | acpi_physical_address address; | ||
301 | struct acpi_table_header *pointer; | ||
302 | u32 length; /* Length fixed at 32 bits */ | ||
303 | union acpi_name_union signature; | ||
304 | acpi_owner_id owner_id; | ||
305 | u8 flags; | ||
306 | }; | ||
307 | |||
308 | /* Flags for above */ | ||
309 | |||
310 | #define ACPI_TABLE_ORIGIN_UNKNOWN (0) | ||
311 | #define ACPI_TABLE_ORIGIN_MAPPED (1) | ||
312 | #define ACPI_TABLE_ORIGIN_ALLOCATED (2) | ||
313 | #define ACPI_TABLE_ORIGIN_MASK (3) | ||
314 | #define ACPI_TABLE_IS_LOADED (4) | ||
315 | |||
291 | /* | 316 | /* |
292 | * Get the remaining ACPI tables | 317 | * Get the remaining ACPI tables |
293 | */ | 318 | */ |
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 63f5b4cf4de1..18963b968114 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h | |||
@@ -627,7 +627,7 @@ struct acpi_hest_aer_common { | |||
627 | u32 uncorrectable_error_mask; | 627 | u32 uncorrectable_error_mask; |
628 | u32 uncorrectable_error_severity; | 628 | u32 uncorrectable_error_severity; |
629 | u32 correctable_error_mask; | 629 | u32 correctable_error_mask; |
630 | u32 advanced_error_cababilities; | 630 | u32 advanced_error_capabilities; |
631 | }; | 631 | }; |
632 | 632 | ||
633 | /* Hardware Error Notification */ | 633 | /* Hardware Error Notification */ |
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 8222e8de0d1c..a20aab510173 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h | |||
@@ -204,11 +204,10 @@ typedef u32 acpi_physical_address; | |||
204 | 204 | ||
205 | /******************************************************************************* | 205 | /******************************************************************************* |
206 | * | 206 | * |
207 | * OS-dependent and compiler-dependent types | 207 | * OS-dependent types |
208 | * | 208 | * |
209 | * If the defaults below are not appropriate for the host system, they can | 209 | * If the defaults below are not appropriate for the host system, they can |
210 | * be defined in the compiler-specific or OS-specific header, and this will | 210 | * be defined in the OS-specific header, and this will take precedence. |
211 | * take precedence. | ||
212 | * | 211 | * |
213 | ******************************************************************************/ | 212 | ******************************************************************************/ |
214 | 213 | ||
@@ -218,12 +217,6 @@ typedef u32 acpi_physical_address; | |||
218 | #define acpi_thread_id acpi_size | 217 | #define acpi_thread_id acpi_size |
219 | #endif | 218 | #endif |
220 | 219 | ||
221 | /* Object returned from acpi_os_create_lock */ | ||
222 | |||
223 | #ifndef acpi_spinlock | ||
224 | #define acpi_spinlock void * | ||
225 | #endif | ||
226 | |||
227 | /* Flags for acpi_os_acquire_lock/acpi_os_release_lock */ | 220 | /* Flags for acpi_os_acquire_lock/acpi_os_release_lock */ |
228 | 221 | ||
229 | #ifndef acpi_cpu_flags | 222 | #ifndef acpi_cpu_flags |
@@ -233,9 +226,51 @@ typedef u32 acpi_physical_address; | |||
233 | /* Object returned from acpi_os_create_cache */ | 226 | /* Object returned from acpi_os_create_cache */ |
234 | 227 | ||
235 | #ifndef acpi_cache_t | 228 | #ifndef acpi_cache_t |
229 | #ifdef ACPI_USE_LOCAL_CACHE | ||
236 | #define acpi_cache_t struct acpi_memory_list | 230 | #define acpi_cache_t struct acpi_memory_list |
231 | #else | ||
232 | #define acpi_cache_t void * | ||
233 | #endif | ||
234 | #endif | ||
235 | |||
236 | /* | ||
237 | * Synchronization objects - Mutexes, Semaphores, and spin_locks | ||
238 | */ | ||
239 | #if (ACPI_MUTEX_TYPE == ACPI_BINARY_SEMAPHORE) | ||
240 | /* | ||
241 | * These macros are used if the host OS does not support a mutex object. | ||
242 | * Map the OSL Mutex interfaces to binary semaphores. | ||
243 | */ | ||
244 | #define acpi_mutex acpi_semaphore | ||
245 | #define acpi_os_create_mutex(out_handle) acpi_os_create_semaphore (1, 1, out_handle) | ||
246 | #define acpi_os_delete_mutex(handle) (void) acpi_os_delete_semaphore (handle) | ||
247 | #define acpi_os_acquire_mutex(handle,time) acpi_os_wait_semaphore (handle, 1, time) | ||
248 | #define acpi_os_release_mutex(handle) (void) acpi_os_signal_semaphore (handle, 1) | ||
249 | #endif | ||
250 | |||
251 | /* Configurable types for synchronization objects */ | ||
252 | |||
253 | #ifndef acpi_spinlock | ||
254 | #define acpi_spinlock void * | ||
255 | #endif | ||
256 | |||
257 | #ifndef acpi_semaphore | ||
258 | #define acpi_semaphore void * | ||
259 | #endif | ||
260 | |||
261 | #ifndef acpi_mutex | ||
262 | #define acpi_mutex void * | ||
237 | #endif | 263 | #endif |
238 | 264 | ||
265 | /******************************************************************************* | ||
266 | * | ||
267 | * Compiler-dependent types | ||
268 | * | ||
269 | * If the defaults below are not appropriate for the host compiler, they can | ||
270 | * be defined in the compiler-specific header, and this will take precedence. | ||
271 | * | ||
272 | ******************************************************************************/ | ||
273 | |||
239 | /* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ | 274 | /* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ |
240 | 275 | ||
241 | #ifndef acpi_uintptr_t | 276 | #ifndef acpi_uintptr_t |
@@ -268,6 +303,43 @@ typedef u32 acpi_physical_address; | |||
268 | #define ACPI_EXPORT_SYMBOL(symbol) | 303 | #define ACPI_EXPORT_SYMBOL(symbol) |
269 | #endif | 304 | #endif |
270 | 305 | ||
306 | /****************************************************************************** | ||
307 | * | ||
308 | * ACPI Specification constants (Do not change unless the specification changes) | ||
309 | * | ||
310 | *****************************************************************************/ | ||
311 | |||
312 | /* Number of distinct FADT-based GPE register blocks (GPE0 and GPE1) */ | ||
313 | |||
314 | #define ACPI_MAX_GPE_BLOCKS 2 | ||
315 | |||
316 | /* Default ACPI register widths */ | ||
317 | |||
318 | #define ACPI_GPE_REGISTER_WIDTH 8 | ||
319 | #define ACPI_PM1_REGISTER_WIDTH 16 | ||
320 | #define ACPI_PM2_REGISTER_WIDTH 8 | ||
321 | #define ACPI_PM_TIMER_WIDTH 32 | ||
322 | |||
323 | /* Names within the namespace are 4 bytes long */ | ||
324 | |||
325 | #define ACPI_NAME_SIZE 4 | ||
326 | #define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */ | ||
327 | #define ACPI_PATH_SEPARATOR '.' | ||
328 | |||
329 | /* Sizes for ACPI table headers */ | ||
330 | |||
331 | #define ACPI_OEM_ID_SIZE 6 | ||
332 | #define ACPI_OEM_TABLE_ID_SIZE 8 | ||
333 | |||
334 | /* ACPI/PNP hardware IDs */ | ||
335 | |||
336 | #define PCI_ROOT_HID_STRING "PNP0A03" | ||
337 | #define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08" | ||
338 | |||
339 | /* PM Timer ticks per second (HZ) */ | ||
340 | |||
341 | #define PM_TIMER_FREQUENCY 3579545 | ||
342 | |||
271 | /******************************************************************************* | 343 | /******************************************************************************* |
272 | * | 344 | * |
273 | * Independent types | 345 | * Independent types |
@@ -291,13 +363,18 @@ typedef u32 acpi_physical_address; | |||
291 | #endif | 363 | #endif |
292 | 364 | ||
293 | /* | 365 | /* |
294 | * Mescellaneous types | 366 | * Miscellaneous types |
295 | */ | 367 | */ |
296 | typedef u32 acpi_status; /* All ACPI Exceptions */ | 368 | typedef u32 acpi_status; /* All ACPI Exceptions */ |
297 | typedef u32 acpi_name; /* 4-byte ACPI name */ | 369 | typedef u32 acpi_name; /* 4-byte ACPI name */ |
298 | typedef char *acpi_string; /* Null terminated ASCII string */ | 370 | typedef char *acpi_string; /* Null terminated ASCII string */ |
299 | typedef void *acpi_handle; /* Actually a ptr to a NS Node */ | 371 | typedef void *acpi_handle; /* Actually a ptr to a NS Node */ |
300 | 372 | ||
373 | /* Owner IDs are used to track namespace nodes for selective deletion */ | ||
374 | |||
375 | typedef u8 acpi_owner_id; | ||
376 | #define ACPI_OWNER_ID_MAX 0xFF | ||
377 | |||
301 | struct uint64_struct { | 378 | struct uint64_struct { |
302 | u32 lo; | 379 | u32 lo; |
303 | u32 hi; | 380 | u32 hi; |
@@ -313,13 +390,8 @@ struct uint32_struct { | |||
313 | u32 hi; | 390 | u32 hi; |
314 | }; | 391 | }; |
315 | 392 | ||
316 | /* Synchronization objects */ | ||
317 | |||
318 | #define acpi_mutex void * | ||
319 | #define acpi_semaphore void * | ||
320 | |||
321 | /* | 393 | /* |
322 | * Acpi integer width. In ACPI version 1, integers are 32 bits. In ACPI | 394 | * Acpi integer width. In ACPI version 1, integers are 32 bits. In ACPI |
323 | * version 2, integers are 64 bits. Note that this pertains to the ACPI integer | 395 | * version 2, integers are 64 bits. Note that this pertains to the ACPI integer |
324 | * type only, not other integers used in the implementation of the ACPI CA | 396 | * type only, not other integers used in the implementation of the ACPI CA |
325 | * subsystem. | 397 | * subsystem. |
@@ -338,10 +410,75 @@ typedef unsigned long long acpi_integer; | |||
338 | #define ACPI_MAX16_DECIMAL_DIGITS 5 | 410 | #define ACPI_MAX16_DECIMAL_DIGITS 5 |
339 | #define ACPI_MAX8_DECIMAL_DIGITS 3 | 411 | #define ACPI_MAX8_DECIMAL_DIGITS 3 |
340 | 412 | ||
413 | /* PM Timer ticks per second (HZ) */ | ||
414 | |||
415 | #define PM_TIMER_FREQUENCY 3579545 | ||
416 | |||
341 | /* | 417 | /* |
342 | * Constants with special meanings | 418 | * Constants with special meanings |
343 | */ | 419 | */ |
344 | #define ACPI_ROOT_OBJECT ACPI_ADD_PTR (acpi_handle, NULL, ACPI_MAX_PTR) | 420 | #define ACPI_ROOT_OBJECT ACPI_ADD_PTR (acpi_handle, NULL, ACPI_MAX_PTR) |
421 | #define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */ | ||
422 | #define ACPI_DO_NOT_WAIT 0 | ||
423 | |||
424 | /******************************************************************************* | ||
425 | * | ||
426 | * Commonly used macros | ||
427 | * | ||
428 | ******************************************************************************/ | ||
429 | |||
430 | /* Data manipulation */ | ||
431 | |||
432 | #define ACPI_LOWORD(l) ((u16)(u32)(l)) | ||
433 | #define ACPI_HIWORD(l) ((u16)((((u32)(l)) >> 16) & 0xFFFF)) | ||
434 | #define ACPI_LOBYTE(l) ((u8)(u16)(l)) | ||
435 | #define ACPI_HIBYTE(l) ((u8)((((u16)(l)) >> 8) & 0xFF)) | ||
436 | |||
437 | /* Full 64-bit integer must be available on both 32-bit and 64-bit platforms */ | ||
438 | |||
439 | struct acpi_integer_overlay { | ||
440 | u32 lo_dword; | ||
441 | u32 hi_dword; | ||
442 | }; | ||
443 | |||
444 | #define ACPI_LODWORD(integer) (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->lo_dword) | ||
445 | #define ACPI_HIDWORD(integer) (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->hi_dword) | ||
446 | |||
447 | #define ACPI_SET_BIT(target,bit) ((target) |= (bit)) | ||
448 | #define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit)) | ||
449 | #define ACPI_MIN(a,b) (((a)<(b))?(a):(b)) | ||
450 | #define ACPI_MAX(a,b) (((a)>(b))?(a):(b)) | ||
451 | |||
452 | /* Size calculation */ | ||
453 | |||
454 | #define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0])) | ||
455 | |||
456 | /* Pointer manipulation */ | ||
457 | |||
458 | #define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p)) | ||
459 | #define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p)) | ||
460 | #define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b))) | ||
461 | #define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b))) | ||
462 | |||
463 | /* Pointer/Integer type conversions */ | ||
464 | |||
465 | #define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void, (void *) NULL,(acpi_size) i) | ||
466 | #define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) NULL) | ||
467 | #define ACPI_OFFSET(d, f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f), (void *) NULL) | ||
468 | #define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) | ||
469 | #define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) | ||
470 | |||
471 | #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED | ||
472 | #define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) | ||
473 | #else | ||
474 | #define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE)) | ||
475 | #endif | ||
476 | |||
477 | /******************************************************************************* | ||
478 | * | ||
479 | * Miscellaneous constants | ||
480 | * | ||
481 | ******************************************************************************/ | ||
345 | 482 | ||
346 | /* | 483 | /* |
347 | * Initialization sequence | 484 | * Initialization sequence |
@@ -414,7 +551,7 @@ typedef unsigned long long acpi_integer; | |||
414 | #define ACPI_NOTIFY_MAX 0x0B | 551 | #define ACPI_NOTIFY_MAX 0x0B |
415 | 552 | ||
416 | /* | 553 | /* |
417 | * Types associated with ACPI names and objects. The first group of | 554 | * Types associated with ACPI names and objects. The first group of |
418 | * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition | 555 | * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition |
419 | * of the ACPI object_type() operator (See the ACPI Spec). Therefore, | 556 | * of the ACPI object_type() operator (See the ACPI Spec). Therefore, |
420 | * only add to the first group if the spec changes. | 557 | * only add to the first group if the spec changes. |
@@ -732,6 +869,15 @@ struct acpi_buffer { | |||
732 | #define ACPI_NAME_TYPE_MAX 1 | 869 | #define ACPI_NAME_TYPE_MAX 1 |
733 | 870 | ||
734 | /* | 871 | /* |
872 | * Predefined Namespace items | ||
873 | */ | ||
874 | struct acpi_predefined_names { | ||
875 | char *name; | ||
876 | u8 type; | ||
877 | char *val; | ||
878 | }; | ||
879 | |||
880 | /* | ||
735 | * Structure and flags for acpi_get_system_info | 881 | * Structure and flags for acpi_get_system_info |
736 | */ | 882 | */ |
737 | #define ACPI_SYS_MODE_UNKNOWN 0x0000 | 883 | #define ACPI_SYS_MODE_UNKNOWN 0x0000 |
@@ -787,7 +933,7 @@ acpi_status(*acpi_exception_handler) (acpi_status aml_status, | |||
787 | u16 opcode, | 933 | u16 opcode, |
788 | u32 aml_offset, void *context); | 934 | u32 aml_offset, void *context); |
789 | 935 | ||
790 | /* Table Event handler (Load, load_table etc) and types */ | 936 | /* Table Event handler (Load, load_table, etc.) and types */ |
791 | 937 | ||
792 | typedef | 938 | typedef |
793 | acpi_status(*acpi_tbl_handler) (u32 event, void *table, void *context); | 939 | acpi_status(*acpi_tbl_handler) (u32 event, void *table, void *context); |
@@ -823,6 +969,12 @@ acpi_status(*acpi_walk_callback) (acpi_handle obj_handle, | |||
823 | #define ACPI_INTERRUPT_NOT_HANDLED 0x00 | 969 | #define ACPI_INTERRUPT_NOT_HANDLED 0x00 |
824 | #define ACPI_INTERRUPT_HANDLED 0x01 | 970 | #define ACPI_INTERRUPT_HANDLED 0x01 |
825 | 971 | ||
972 | /* Length of _HID, _UID, _CID, and UUID values */ | ||
973 | |||
974 | #define ACPI_DEVICE_ID_LENGTH 0x09 | ||
975 | #define ACPI_MAX_CID_LENGTH 48 | ||
976 | #define ACPI_UUID_LENGTH 16 | ||
977 | |||
826 | /* Common string version of device HIDs and UIDs */ | 978 | /* Common string version of device HIDs and UIDs */ |
827 | 979 | ||
828 | struct acpica_device_id { | 980 | struct acpica_device_id { |
@@ -900,357 +1052,28 @@ struct acpi_mem_space_context { | |||
900 | }; | 1052 | }; |
901 | 1053 | ||
902 | /* | 1054 | /* |
903 | * Definitions for Resource Attributes | 1055 | * struct acpi_memory_list is used only if the ACPICA local cache is enabled |
904 | */ | ||
905 | typedef u16 acpi_rs_length; /* Resource Length field is fixed at 16 bits */ | ||
906 | typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (64_k-1)+3 */ | ||
907 | |||
908 | /* | ||
909 | * Memory Attributes | ||
910 | */ | ||
911 | #define ACPI_READ_ONLY_MEMORY (u8) 0x00 | ||
912 | #define ACPI_READ_WRITE_MEMORY (u8) 0x01 | ||
913 | |||
914 | #define ACPI_NON_CACHEABLE_MEMORY (u8) 0x00 | ||
915 | #define ACPI_CACHABLE_MEMORY (u8) 0x01 | ||
916 | #define ACPI_WRITE_COMBINING_MEMORY (u8) 0x02 | ||
917 | #define ACPI_PREFETCHABLE_MEMORY (u8) 0x03 | ||
918 | |||
919 | /* | ||
920 | * IO Attributes | ||
921 | * The ISA IO ranges are: n000-n0_fFh, n400-n4_fFh, n800-n8_fFh, n_c00-n_cFFh. | ||
922 | * The non-ISA IO ranges are: n100-n3_fFh, n500-n7_fFh, n900-n_bFFh, n_cd0-n_fFFh. | ||
923 | */ | 1056 | */ |
924 | #define ACPI_NON_ISA_ONLY_RANGES (u8) 0x01 | 1057 | struct acpi_memory_list { |
925 | #define ACPI_ISA_ONLY_RANGES (u8) 0x02 | 1058 | char *list_name; |
926 | #define ACPI_ENTIRE_RANGE (ACPI_NON_ISA_ONLY_RANGES | ACPI_ISA_ONLY_RANGES) | 1059 | void *list_head; |
927 | 1060 | u16 object_size; | |
928 | /* Type of translation - 1=Sparse, 0=Dense */ | 1061 | u16 max_depth; |
929 | 1062 | u16 current_depth; | |
930 | #define ACPI_SPARSE_TRANSLATION (u8) 0x01 | 1063 | u16 link_offset; |
931 | 1064 | ||
932 | /* | 1065 | #ifdef ACPI_DBG_TRACK_ALLOCATIONS |
933 | * IO Port Descriptor Decode | 1066 | |
934 | */ | 1067 | /* Statistics for debug memory tracking only */ |
935 | #define ACPI_DECODE_10 (u8) 0x00 /* 10-bit IO address decode */ | 1068 | |
936 | #define ACPI_DECODE_16 (u8) 0x01 /* 16-bit IO address decode */ | 1069 | u32 total_allocated; |
937 | 1070 | u32 total_freed; | |
938 | /* | 1071 | u32 max_occupied; |
939 | * IRQ Attributes | 1072 | u32 total_size; |
940 | */ | 1073 | u32 current_total_size; |
941 | #define ACPI_LEVEL_SENSITIVE (u8) 0x00 | 1074 | u32 requests; |
942 | #define ACPI_EDGE_SENSITIVE (u8) 0x01 | 1075 | u32 hits; |
943 | |||
944 | #define ACPI_ACTIVE_HIGH (u8) 0x00 | ||
945 | #define ACPI_ACTIVE_LOW (u8) 0x01 | ||
946 | |||
947 | #define ACPI_EXCLUSIVE (u8) 0x00 | ||
948 | #define ACPI_SHARED (u8) 0x01 | ||
949 | |||
950 | /* | ||
951 | * DMA Attributes | ||
952 | */ | ||
953 | #define ACPI_COMPATIBILITY (u8) 0x00 | ||
954 | #define ACPI_TYPE_A (u8) 0x01 | ||
955 | #define ACPI_TYPE_B (u8) 0x02 | ||
956 | #define ACPI_TYPE_F (u8) 0x03 | ||
957 | |||
958 | #define ACPI_NOT_BUS_MASTER (u8) 0x00 | ||
959 | #define ACPI_BUS_MASTER (u8) 0x01 | ||
960 | |||
961 | #define ACPI_TRANSFER_8 (u8) 0x00 | ||
962 | #define ACPI_TRANSFER_8_16 (u8) 0x01 | ||
963 | #define ACPI_TRANSFER_16 (u8) 0x02 | ||
964 | |||
965 | /* | ||
966 | * Start Dependent Functions Priority definitions | ||
967 | */ | ||
968 | #define ACPI_GOOD_CONFIGURATION (u8) 0x00 | ||
969 | #define ACPI_ACCEPTABLE_CONFIGURATION (u8) 0x01 | ||
970 | #define ACPI_SUB_OPTIMAL_CONFIGURATION (u8) 0x02 | ||
971 | |||
972 | /* | ||
973 | * 16, 32 and 64-bit Address Descriptor resource types | ||
974 | */ | ||
975 | #define ACPI_MEMORY_RANGE (u8) 0x00 | ||
976 | #define ACPI_IO_RANGE (u8) 0x01 | ||
977 | #define ACPI_BUS_NUMBER_RANGE (u8) 0x02 | ||
978 | |||
979 | #define ACPI_ADDRESS_NOT_FIXED (u8) 0x00 | ||
980 | #define ACPI_ADDRESS_FIXED (u8) 0x01 | ||
981 | |||
982 | #define ACPI_POS_DECODE (u8) 0x00 | ||
983 | #define ACPI_SUB_DECODE (u8) 0x01 | ||
984 | |||
985 | #define ACPI_PRODUCER (u8) 0x00 | ||
986 | #define ACPI_CONSUMER (u8) 0x01 | ||
987 | |||
988 | /* | ||
989 | * If possible, pack the following structures to byte alignment | ||
990 | */ | ||
991 | #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED | ||
992 | #pragma pack(1) | ||
993 | #endif | 1076 | #endif |
994 | |||
995 | /* UUID data structures for use in vendor-defined resource descriptors */ | ||
996 | |||
997 | struct acpi_uuid { | ||
998 | u8 data[ACPI_UUID_LENGTH]; | ||
999 | }; | ||
1000 | |||
1001 | struct acpi_vendor_uuid { | ||
1002 | u8 subtype; | ||
1003 | u8 data[ACPI_UUID_LENGTH]; | ||
1004 | }; | ||
1005 | |||
1006 | /* | ||
1007 | * Structures used to describe device resources | ||
1008 | */ | ||
1009 | struct acpi_resource_irq { | ||
1010 | u8 descriptor_length; | ||
1011 | u8 triggering; | ||
1012 | u8 polarity; | ||
1013 | u8 sharable; | ||
1014 | u8 interrupt_count; | ||
1015 | u8 interrupts[1]; | ||
1016 | }; | ||
1017 | |||
1018 | struct acpi_resource_dma { | ||
1019 | u8 type; | ||
1020 | u8 bus_master; | ||
1021 | u8 transfer; | ||
1022 | u8 channel_count; | ||
1023 | u8 channels[1]; | ||
1024 | }; | ||
1025 | |||
1026 | struct acpi_resource_start_dependent { | ||
1027 | u8 descriptor_length; | ||
1028 | u8 compatibility_priority; | ||
1029 | u8 performance_robustness; | ||
1030 | }; | ||
1031 | |||
1032 | /* | ||
1033 | * END_DEPENDENT_FUNCTIONS_RESOURCE struct is not | ||
1034 | * needed because it has no fields | ||
1035 | */ | ||
1036 | |||
1037 | struct acpi_resource_io { | ||
1038 | u8 io_decode; | ||
1039 | u8 alignment; | ||
1040 | u8 address_length; | ||
1041 | u16 minimum; | ||
1042 | u16 maximum; | ||
1043 | }; | ||
1044 | |||
1045 | struct acpi_resource_fixed_io { | ||
1046 | u16 address; | ||
1047 | u8 address_length; | ||
1048 | }; | ||
1049 | |||
1050 | struct acpi_resource_vendor { | ||
1051 | u16 byte_length; | ||
1052 | u8 byte_data[1]; | ||
1053 | }; | ||
1054 | |||
1055 | /* Vendor resource with UUID info (introduced in ACPI 3.0) */ | ||
1056 | |||
1057 | struct acpi_resource_vendor_typed { | ||
1058 | u16 byte_length; | ||
1059 | u8 uuid_subtype; | ||
1060 | u8 uuid[ACPI_UUID_LENGTH]; | ||
1061 | u8 byte_data[1]; | ||
1062 | }; | ||
1063 | |||
1064 | struct acpi_resource_end_tag { | ||
1065 | u8 checksum; | ||
1066 | }; | ||
1067 | |||
1068 | struct acpi_resource_memory24 { | ||
1069 | u8 write_protect; | ||
1070 | u16 minimum; | ||
1071 | u16 maximum; | ||
1072 | u16 alignment; | ||
1073 | u16 address_length; | ||
1074 | }; | ||
1075 | |||
1076 | struct acpi_resource_memory32 { | ||
1077 | u8 write_protect; | ||
1078 | u32 minimum; | ||
1079 | u32 maximum; | ||
1080 | u32 alignment; | ||
1081 | u32 address_length; | ||
1082 | }; | ||
1083 | |||
1084 | struct acpi_resource_fixed_memory32 { | ||
1085 | u8 write_protect; | ||
1086 | u32 address; | ||
1087 | u32 address_length; | ||
1088 | }; | ||
1089 | |||
1090 | struct acpi_memory_attribute { | ||
1091 | u8 write_protect; | ||
1092 | u8 caching; | ||
1093 | u8 range_type; | ||
1094 | u8 translation; | ||
1095 | }; | ||
1096 | |||
1097 | struct acpi_io_attribute { | ||
1098 | u8 range_type; | ||
1099 | u8 translation; | ||
1100 | u8 translation_type; | ||
1101 | u8 reserved1; | ||
1102 | }; | ||
1103 | |||
1104 | union acpi_resource_attribute { | ||
1105 | struct acpi_memory_attribute mem; | ||
1106 | struct acpi_io_attribute io; | ||
1107 | |||
1108 | /* Used for the *word_space macros */ | ||
1109 | |||
1110 | u8 type_specific; | ||
1111 | }; | ||
1112 | |||
1113 | struct acpi_resource_source { | ||
1114 | u8 index; | ||
1115 | u16 string_length; | ||
1116 | char *string_ptr; | ||
1117 | }; | ||
1118 | |||
1119 | /* Fields common to all address descriptors, 16/32/64 bit */ | ||
1120 | |||
1121 | #define ACPI_RESOURCE_ADDRESS_COMMON \ | ||
1122 | u8 resource_type; \ | ||
1123 | u8 producer_consumer; \ | ||
1124 | u8 decode; \ | ||
1125 | u8 min_address_fixed; \ | ||
1126 | u8 max_address_fixed; \ | ||
1127 | union acpi_resource_attribute info; | ||
1128 | |||
1129 | struct acpi_resource_address { | ||
1130 | ACPI_RESOURCE_ADDRESS_COMMON}; | ||
1131 | |||
1132 | struct acpi_resource_address16 { | ||
1133 | ACPI_RESOURCE_ADDRESS_COMMON u16 granularity; | ||
1134 | u16 minimum; | ||
1135 | u16 maximum; | ||
1136 | u16 translation_offset; | ||
1137 | u16 address_length; | ||
1138 | struct acpi_resource_source resource_source; | ||
1139 | }; | ||
1140 | |||
1141 | struct acpi_resource_address32 { | ||
1142 | ACPI_RESOURCE_ADDRESS_COMMON u32 granularity; | ||
1143 | u32 minimum; | ||
1144 | u32 maximum; | ||
1145 | u32 translation_offset; | ||
1146 | u32 address_length; | ||
1147 | struct acpi_resource_source resource_source; | ||
1148 | }; | ||
1149 | |||
1150 | struct acpi_resource_address64 { | ||
1151 | ACPI_RESOURCE_ADDRESS_COMMON u64 granularity; | ||
1152 | u64 minimum; | ||
1153 | u64 maximum; | ||
1154 | u64 translation_offset; | ||
1155 | u64 address_length; | ||
1156 | struct acpi_resource_source resource_source; | ||
1157 | }; | ||
1158 | |||
1159 | struct acpi_resource_extended_address64 { | ||
1160 | ACPI_RESOURCE_ADDRESS_COMMON u8 revision_iD; | ||
1161 | u64 granularity; | ||
1162 | u64 minimum; | ||
1163 | u64 maximum; | ||
1164 | u64 translation_offset; | ||
1165 | u64 address_length; | ||
1166 | u64 type_specific; | ||
1167 | }; | ||
1168 | |||
1169 | struct acpi_resource_extended_irq { | ||
1170 | u8 producer_consumer; | ||
1171 | u8 triggering; | ||
1172 | u8 polarity; | ||
1173 | u8 sharable; | ||
1174 | u8 interrupt_count; | ||
1175 | struct acpi_resource_source resource_source; | ||
1176 | u32 interrupts[1]; | ||
1177 | }; | ||
1178 | |||
1179 | struct acpi_resource_generic_register { | ||
1180 | u8 space_id; | ||
1181 | u8 bit_width; | ||
1182 | u8 bit_offset; | ||
1183 | u8 access_size; | ||
1184 | u64 address; | ||
1185 | }; | ||
1186 | |||
1187 | /* ACPI_RESOURCE_TYPEs */ | ||
1188 | |||
1189 | #define ACPI_RESOURCE_TYPE_IRQ 0 | ||
1190 | #define ACPI_RESOURCE_TYPE_DMA 1 | ||
1191 | #define ACPI_RESOURCE_TYPE_START_DEPENDENT 2 | ||
1192 | #define ACPI_RESOURCE_TYPE_END_DEPENDENT 3 | ||
1193 | #define ACPI_RESOURCE_TYPE_IO 4 | ||
1194 | #define ACPI_RESOURCE_TYPE_FIXED_IO 5 | ||
1195 | #define ACPI_RESOURCE_TYPE_VENDOR 6 | ||
1196 | #define ACPI_RESOURCE_TYPE_END_TAG 7 | ||
1197 | #define ACPI_RESOURCE_TYPE_MEMORY24 8 | ||
1198 | #define ACPI_RESOURCE_TYPE_MEMORY32 9 | ||
1199 | #define ACPI_RESOURCE_TYPE_FIXED_MEMORY32 10 | ||
1200 | #define ACPI_RESOURCE_TYPE_ADDRESS16 11 | ||
1201 | #define ACPI_RESOURCE_TYPE_ADDRESS32 12 | ||
1202 | #define ACPI_RESOURCE_TYPE_ADDRESS64 13 | ||
1203 | #define ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 14 /* ACPI 3.0 */ | ||
1204 | #define ACPI_RESOURCE_TYPE_EXTENDED_IRQ 15 | ||
1205 | #define ACPI_RESOURCE_TYPE_GENERIC_REGISTER 16 | ||
1206 | #define ACPI_RESOURCE_TYPE_MAX 16 | ||
1207 | |||
1208 | union acpi_resource_data { | ||
1209 | struct acpi_resource_irq irq; | ||
1210 | struct acpi_resource_dma dma; | ||
1211 | struct acpi_resource_start_dependent start_dpf; | ||
1212 | struct acpi_resource_io io; | ||
1213 | struct acpi_resource_fixed_io fixed_io; | ||
1214 | struct acpi_resource_vendor vendor; | ||
1215 | struct acpi_resource_vendor_typed vendor_typed; | ||
1216 | struct acpi_resource_end_tag end_tag; | ||
1217 | struct acpi_resource_memory24 memory24; | ||
1218 | struct acpi_resource_memory32 memory32; | ||
1219 | struct acpi_resource_fixed_memory32 fixed_memory32; | ||
1220 | struct acpi_resource_address16 address16; | ||
1221 | struct acpi_resource_address32 address32; | ||
1222 | struct acpi_resource_address64 address64; | ||
1223 | struct acpi_resource_extended_address64 ext_address64; | ||
1224 | struct acpi_resource_extended_irq extended_irq; | ||
1225 | struct acpi_resource_generic_register generic_reg; | ||
1226 | |||
1227 | /* Common fields */ | ||
1228 | |||
1229 | struct acpi_resource_address address; /* Common 16/32/64 address fields */ | ||
1230 | }; | ||
1231 | |||
1232 | struct acpi_resource { | ||
1233 | u32 type; | ||
1234 | u32 length; | ||
1235 | union acpi_resource_data data; | ||
1236 | }; | ||
1237 | |||
1238 | /* restore default alignment */ | ||
1239 | |||
1240 | #pragma pack() | ||
1241 | |||
1242 | #define ACPI_RS_SIZE_NO_DATA 8 /* Id + Length fields */ | ||
1243 | #define ACPI_RS_SIZE_MIN (u32) ACPI_ROUND_UP_TO_NATIVE_WORD (12) | ||
1244 | #define ACPI_RS_SIZE(type) (u32) (ACPI_RS_SIZE_NO_DATA + sizeof (type)) | ||
1245 | |||
1246 | #define ACPI_NEXT_RESOURCE(res) (struct acpi_resource *)((u8 *) res + res->length) | ||
1247 | |||
1248 | struct acpi_pci_routing_table { | ||
1249 | u32 length; | ||
1250 | u32 pin; | ||
1251 | acpi_integer address; /* here for 64-bit alignment */ | ||
1252 | u32 source_index; | ||
1253 | char source[4]; /* pad to 64 bits so sizeof() works in all cases */ | ||
1254 | }; | 1077 | }; |
1255 | 1078 | ||
1256 | #endif /* __ACTYPES_H__ */ | 1079 | #endif /* __ACTYPES_H__ */ |
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h index fcd2572e428c..e62f10d9a7d8 100644 --- a/include/acpi/platform/acenv.h +++ b/include/acpi/platform/acenv.h | |||
@@ -44,14 +44,26 @@ | |||
44 | #ifndef __ACENV_H__ | 44 | #ifndef __ACENV_H__ |
45 | #define __ACENV_H__ | 45 | #define __ACENV_H__ |
46 | 46 | ||
47 | /* | 47 | /* Types for ACPI_MUTEX_TYPE */ |
48 | |||
49 | #define ACPI_BINARY_SEMAPHORE 0 | ||
50 | #define ACPI_OSL_MUTEX 1 | ||
51 | |||
52 | /* Types for DEBUGGER_THREADING */ | ||
53 | |||
54 | #define DEBUGGER_SINGLE_THREADED 0 | ||
55 | #define DEBUGGER_MULTI_THREADED 1 | ||
56 | |||
57 | /****************************************************************************** | ||
58 | * | ||
48 | * Configuration for ACPI tools and utilities | 59 | * Configuration for ACPI tools and utilities |
49 | */ | 60 | * |
61 | *****************************************************************************/ | ||
50 | 62 | ||
51 | #ifdef ACPI_LIBRARY | 63 | #ifdef ACPI_LIBRARY |
52 | /* | 64 | /* |
53 | * Note: The non-debug version of the acpi_library does not contain any | 65 | * Note: The non-debug version of the acpi_library does not contain any |
54 | * debug support, for minimimal size. The debug version uses ACPI_FULL_DEBUG | 66 | * debug support, for minimal size. The debug version uses ACPI_FULL_DEBUG |
55 | */ | 67 | */ |
56 | #define ACPI_USE_LOCAL_CACHE | 68 | #define ACPI_USE_LOCAL_CACHE |
57 | #endif | 69 | #endif |
@@ -75,17 +87,6 @@ | |||
75 | #define ACPI_DBG_TRACK_ALLOCATIONS | 87 | #define ACPI_DBG_TRACK_ALLOCATIONS |
76 | #endif | 88 | #endif |
77 | 89 | ||
78 | #ifdef ACPI_DASM_APP | ||
79 | #ifndef MSDOS | ||
80 | #define ACPI_DEBUG_OUTPUT | ||
81 | #endif | ||
82 | #define ACPI_APPLICATION | ||
83 | #define ACPI_DISASSEMBLER | ||
84 | #define ACPI_NO_METHOD_EXECUTION | ||
85 | #define ACPI_LARGE_NAMESPACE_NODE | ||
86 | #define ACPI_DATA_TABLE_DISASSEMBLY | ||
87 | #endif | ||
88 | |||
89 | #ifdef ACPI_APPLICATION | 90 | #ifdef ACPI_APPLICATION |
90 | #define ACPI_USE_SYSTEM_CLIBRARY | 91 | #define ACPI_USE_SYSTEM_CLIBRARY |
91 | #define ACPI_USE_LOCAL_CACHE | 92 | #define ACPI_USE_LOCAL_CACHE |
@@ -179,6 +180,19 @@ | |||
179 | 180 | ||
180 | /*! [End] no source code translation !*/ | 181 | /*! [End] no source code translation !*/ |
181 | 182 | ||
183 | /****************************************************************************** | ||
184 | * | ||
185 | * Miscellaneous configuration | ||
186 | * | ||
187 | *****************************************************************************/ | ||
188 | |||
189 | /* | ||
190 | * Are mutexes supported by the host? default is no, use binary semaphores. | ||
191 | */ | ||
192 | #ifndef ACPI_MUTEX_TYPE | ||
193 | #define ACPI_MUTEX_TYPE ACPI_BINARY_SEMAPHORE | ||
194 | #endif | ||
195 | |||
182 | /* | 196 | /* |
183 | * Debugger threading model | 197 | * Debugger threading model |
184 | * Use single threaded if the entire subsystem is contained in an application | 198 | * Use single threaded if the entire subsystem is contained in an application |
@@ -187,9 +201,6 @@ | |||
187 | * By default the model is single threaded if ACPI_APPLICATION is set, | 201 | * By default the model is single threaded if ACPI_APPLICATION is set, |
188 | * multi-threaded if ACPI_APPLICATION is not set. | 202 | * multi-threaded if ACPI_APPLICATION is not set. |
189 | */ | 203 | */ |
190 | #define DEBUGGER_SINGLE_THREADED 0 | ||
191 | #define DEBUGGER_MULTI_THREADED 1 | ||
192 | |||
193 | #ifndef DEBUGGER_THREADING | 204 | #ifndef DEBUGGER_THREADING |
194 | #ifdef ACPI_APPLICATION | 205 | #ifdef ACPI_APPLICATION |
195 | #define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED | 206 | #define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED |
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index 0515e754449d..6d49b2a498c4 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h | |||
@@ -46,6 +46,7 @@ | |||
46 | 46 | ||
47 | #define ACPI_USE_SYSTEM_CLIBRARY | 47 | #define ACPI_USE_SYSTEM_CLIBRARY |
48 | #define ACPI_USE_DO_WHILE_0 | 48 | #define ACPI_USE_DO_WHILE_0 |
49 | #define ACPI_MUTEX_TYPE ACPI_BINARY_SEMAPHORE | ||
49 | 50 | ||
50 | #ifdef __KERNEL__ | 51 | #ifdef __KERNEL__ |
51 | 52 | ||
@@ -70,9 +71,6 @@ | |||
70 | #define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol); | 71 | #define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol); |
71 | #define strtoul simple_strtoul | 72 | #define strtoul simple_strtoul |
72 | 73 | ||
73 | /* Full namespace pathname length limit - arbitrary */ | ||
74 | #define ACPI_PATHNAME_MAX 256 | ||
75 | |||
76 | #else /* !__KERNEL__ */ | 74 | #else /* !__KERNEL__ */ |
77 | 75 | ||
78 | #include <stdarg.h> | 76 | #include <stdarg.h> |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index fba8051fb297..6fce2fc2d124 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -131,22 +131,6 @@ extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity); | |||
131 | */ | 131 | */ |
132 | void acpi_unregister_gsi (u32 gsi); | 132 | void acpi_unregister_gsi (u32 gsi); |
133 | 133 | ||
134 | struct acpi_prt_entry { | ||
135 | struct list_head node; | ||
136 | struct acpi_pci_id id; | ||
137 | u8 pin; | ||
138 | struct { | ||
139 | acpi_handle handle; | ||
140 | u32 index; | ||
141 | } link; | ||
142 | u32 irq; | ||
143 | }; | ||
144 | |||
145 | struct acpi_prt_list { | ||
146 | int count; | ||
147 | struct list_head entries; | ||
148 | }; | ||
149 | |||
150 | struct pci_dev; | 134 | struct pci_dev; |
151 | 135 | ||
152 | int acpi_pci_irq_enable (struct pci_dev *dev); | 136 | int acpi_pci_irq_enable (struct pci_dev *dev); |
@@ -270,6 +254,7 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n, | |||
270 | #ifdef CONFIG_PM_SLEEP | 254 | #ifdef CONFIG_PM_SLEEP |
271 | void __init acpi_no_s4_hw_signature(void); | 255 | void __init acpi_no_s4_hw_signature(void); |
272 | void __init acpi_old_suspend_ordering(void); | 256 | void __init acpi_old_suspend_ordering(void); |
257 | void __init acpi_s4_no_nvs(void); | ||
273 | #endif /* CONFIG_PM_SLEEP */ | 258 | #endif /* CONFIG_PM_SLEEP */ |
274 | #else /* CONFIG_ACPI */ | 259 | #else /* CONFIG_ACPI */ |
275 | 260 | ||
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 0f50d4cc4360..45f6297821bd 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h | |||
@@ -59,9 +59,7 @@ enum async_tx_flags { | |||
59 | }; | 59 | }; |
60 | 60 | ||
61 | #ifdef CONFIG_DMA_ENGINE | 61 | #ifdef CONFIG_DMA_ENGINE |
62 | void async_tx_issue_pending_all(void); | 62 | #define async_tx_issue_pending_all dma_issue_pending_all |
63 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | ||
64 | void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx); | ||
65 | #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 63 | #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
66 | #include <asm/async_tx.h> | 64 | #include <asm/async_tx.h> |
67 | #else | 65 | #else |
@@ -77,19 +75,6 @@ static inline void async_tx_issue_pending_all(void) | |||
77 | do { } while (0); | 75 | do { } while (0); |
78 | } | 76 | } |
79 | 77 | ||
80 | static inline enum dma_status | ||
81 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | ||
82 | { | ||
83 | return DMA_SUCCESS; | ||
84 | } | ||
85 | |||
86 | static inline void | ||
87 | async_tx_run_dependencies(struct dma_async_tx_descriptor *tx, | ||
88 | struct dma_chan *host_chan) | ||
89 | { | ||
90 | do { } while (0); | ||
91 | } | ||
92 | |||
93 | static inline struct dma_chan * | 78 | static inline struct dma_chan * |
94 | async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | 79 | async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, |
95 | enum dma_transaction_type tx_type, struct page **dst, int dst_count, | 80 | enum dma_transaction_type tx_type, struct page **dst, int dst_count, |
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h index 2a2213eefd85..2f1f95737acb 100644 --- a/include/linux/atmel-mci.h +++ b/include/linux/atmel-mci.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #define ATMEL_MCI_MAX_NR_SLOTS 2 | 4 | #define ATMEL_MCI_MAX_NR_SLOTS 2 |
5 | 5 | ||
6 | struct dma_slave; | 6 | #include <linux/dw_dmac.h> |
7 | 7 | ||
8 | /** | 8 | /** |
9 | * struct mci_slot_pdata - board-specific per-slot configuration | 9 | * struct mci_slot_pdata - board-specific per-slot configuration |
@@ -28,11 +28,11 @@ struct mci_slot_pdata { | |||
28 | 28 | ||
29 | /** | 29 | /** |
30 | * struct mci_platform_data - board-specific MMC/SDcard configuration | 30 | * struct mci_platform_data - board-specific MMC/SDcard configuration |
31 | * @dma_slave: DMA slave interface to use in data transfers, or NULL. | 31 | * @dma_slave: DMA slave interface to use in data transfers. |
32 | * @slot: Per-slot configuration data. | 32 | * @slot: Per-slot configuration data. |
33 | */ | 33 | */ |
34 | struct mci_platform_data { | 34 | struct mci_platform_data { |
35 | struct dma_slave *dma_slave; | 35 | struct dw_dma_slave dma_slave; |
36 | struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; | 36 | struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; |
37 | }; | 37 | }; |
38 | 38 | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index adb0b084eb5a..64dea2ab326c 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -29,32 +29,6 @@ | |||
29 | #include <linux/dma-mapping.h> | 29 | #include <linux/dma-mapping.h> |
30 | 30 | ||
31 | /** | 31 | /** |
32 | * enum dma_state - resource PNP/power management state | ||
33 | * @DMA_RESOURCE_SUSPEND: DMA device going into low power state | ||
34 | * @DMA_RESOURCE_RESUME: DMA device returning to full power | ||
35 | * @DMA_RESOURCE_AVAILABLE: DMA device available to the system | ||
36 | * @DMA_RESOURCE_REMOVED: DMA device removed from the system | ||
37 | */ | ||
38 | enum dma_state { | ||
39 | DMA_RESOURCE_SUSPEND, | ||
40 | DMA_RESOURCE_RESUME, | ||
41 | DMA_RESOURCE_AVAILABLE, | ||
42 | DMA_RESOURCE_REMOVED, | ||
43 | }; | ||
44 | |||
45 | /** | ||
46 | * enum dma_state_client - state of the channel in the client | ||
47 | * @DMA_ACK: client would like to use, or was using this channel | ||
48 | * @DMA_DUP: client has already seen this channel, or is not using this channel | ||
49 | * @DMA_NAK: client does not want to see any more channels | ||
50 | */ | ||
51 | enum dma_state_client { | ||
52 | DMA_ACK, | ||
53 | DMA_DUP, | ||
54 | DMA_NAK, | ||
55 | }; | ||
56 | |||
57 | /** | ||
58 | * typedef dma_cookie_t - an opaque DMA cookie | 32 | * typedef dma_cookie_t - an opaque DMA cookie |
59 | * | 33 | * |
60 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code | 34 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code |
@@ -89,23 +63,13 @@ enum dma_transaction_type { | |||
89 | DMA_MEMSET, | 63 | DMA_MEMSET, |
90 | DMA_MEMCPY_CRC32C, | 64 | DMA_MEMCPY_CRC32C, |
91 | DMA_INTERRUPT, | 65 | DMA_INTERRUPT, |
66 | DMA_PRIVATE, | ||
92 | DMA_SLAVE, | 67 | DMA_SLAVE, |
93 | }; | 68 | }; |
94 | 69 | ||
95 | /* last transaction type for creation of the capabilities mask */ | 70 | /* last transaction type for creation of the capabilities mask */ |
96 | #define DMA_TX_TYPE_END (DMA_SLAVE + 1) | 71 | #define DMA_TX_TYPE_END (DMA_SLAVE + 1) |
97 | 72 | ||
98 | /** | ||
99 | * enum dma_slave_width - DMA slave register access width. | ||
100 | * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses | ||
101 | * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses | ||
102 | * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses | ||
103 | */ | ||
104 | enum dma_slave_width { | ||
105 | DMA_SLAVE_WIDTH_8BIT, | ||
106 | DMA_SLAVE_WIDTH_16BIT, | ||
107 | DMA_SLAVE_WIDTH_32BIT, | ||
108 | }; | ||
109 | 73 | ||
110 | /** | 74 | /** |
111 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, | 75 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, |
@@ -132,32 +96,6 @@ enum dma_ctrl_flags { | |||
132 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; | 96 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; |
133 | 97 | ||
134 | /** | 98 | /** |
135 | * struct dma_slave - Information about a DMA slave | ||
136 | * @dev: device acting as DMA slave | ||
137 | * @dma_dev: required DMA master device. If non-NULL, the client can not be | ||
138 | * bound to other masters than this. | ||
139 | * @tx_reg: physical address of data register used for | ||
140 | * memory-to-peripheral transfers | ||
141 | * @rx_reg: physical address of data register used for | ||
142 | * peripheral-to-memory transfers | ||
143 | * @reg_width: peripheral register width | ||
144 | * | ||
145 | * If dma_dev is non-NULL, the client can not be bound to other DMA | ||
146 | * masters than the one corresponding to this device. The DMA master | ||
147 | * driver may use this to determine if there is controller-specific | ||
148 | * data wrapped around this struct. Drivers of platform code that sets | ||
149 | * the dma_dev field must therefore make sure to use an appropriate | ||
150 | * controller-specific dma slave structure wrapping this struct. | ||
151 | */ | ||
152 | struct dma_slave { | ||
153 | struct device *dev; | ||
154 | struct device *dma_dev; | ||
155 | dma_addr_t tx_reg; | ||
156 | dma_addr_t rx_reg; | ||
157 | enum dma_slave_width reg_width; | ||
158 | }; | ||
159 | |||
160 | /** | ||
161 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan | 99 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan |
162 | * @refcount: local_t used for open-coded "bigref" counting | 100 | * @refcount: local_t used for open-coded "bigref" counting |
163 | * @memcpy_count: transaction counter | 101 | * @memcpy_count: transaction counter |
@@ -165,7 +103,6 @@ struct dma_slave { | |||
165 | */ | 103 | */ |
166 | 104 | ||
167 | struct dma_chan_percpu { | 105 | struct dma_chan_percpu { |
168 | local_t refcount; | ||
169 | /* stats */ | 106 | /* stats */ |
170 | unsigned long memcpy_count; | 107 | unsigned long memcpy_count; |
171 | unsigned long bytes_transferred; | 108 | unsigned long bytes_transferred; |
@@ -176,13 +113,14 @@ struct dma_chan_percpu { | |||
176 | * @device: ptr to the dma device who supplies this channel, always !%NULL | 113 | * @device: ptr to the dma device who supplies this channel, always !%NULL |
177 | * @cookie: last cookie value returned to client | 114 | * @cookie: last cookie value returned to client |
178 | * @chan_id: channel ID for sysfs | 115 | * @chan_id: channel ID for sysfs |
179 | * @class_dev: class device for sysfs | 116 | * @dev: class device for sysfs |
180 | * @refcount: kref, used in "bigref" slow-mode | 117 | * @refcount: kref, used in "bigref" slow-mode |
181 | * @slow_ref: indicates that the DMA channel is free | 118 | * @slow_ref: indicates that the DMA channel is free |
182 | * @rcu: the DMA channel's RCU head | 119 | * @rcu: the DMA channel's RCU head |
183 | * @device_node: used to add this to the device chan list | 120 | * @device_node: used to add this to the device chan list |
184 | * @local: per-cpu pointer to a struct dma_chan_percpu | 121 | * @local: per-cpu pointer to a struct dma_chan_percpu |
185 | * @client-count: how many clients are using this channel | 122 | * @client-count: how many clients are using this channel |
123 | * @table_count: number of appearances in the mem-to-mem allocation table | ||
186 | */ | 124 | */ |
187 | struct dma_chan { | 125 | struct dma_chan { |
188 | struct dma_device *device; | 126 | struct dma_device *device; |
@@ -190,73 +128,47 @@ struct dma_chan { | |||
190 | 128 | ||
191 | /* sysfs */ | 129 | /* sysfs */ |
192 | int chan_id; | 130 | int chan_id; |
193 | struct device dev; | 131 | struct dma_chan_dev *dev; |
194 | |||
195 | struct kref refcount; | ||
196 | int slow_ref; | ||
197 | struct rcu_head rcu; | ||
198 | 132 | ||
199 | struct list_head device_node; | 133 | struct list_head device_node; |
200 | struct dma_chan_percpu *local; | 134 | struct dma_chan_percpu *local; |
201 | int client_count; | 135 | int client_count; |
136 | int table_count; | ||
202 | }; | 137 | }; |
203 | 138 | ||
204 | #define to_dma_chan(p) container_of(p, struct dma_chan, dev) | 139 | /** |
205 | 140 | * struct dma_chan_dev - relate sysfs device node to backing channel device | |
206 | void dma_chan_cleanup(struct kref *kref); | 141 | * @chan - driver channel device |
207 | 142 | * @device - sysfs device | |
208 | static inline void dma_chan_get(struct dma_chan *chan) | 143 | * @dev_id - parent dma_device dev_id |
209 | { | 144 | * @idr_ref - reference count to gate release of dma_device dev_id |
210 | if (unlikely(chan->slow_ref)) | 145 | */ |
211 | kref_get(&chan->refcount); | 146 | struct dma_chan_dev { |
212 | else { | 147 | struct dma_chan *chan; |
213 | local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); | 148 | struct device device; |
214 | put_cpu(); | 149 | int dev_id; |
215 | } | 150 | atomic_t *idr_ref; |
216 | } | 151 | }; |
217 | 152 | ||
218 | static inline void dma_chan_put(struct dma_chan *chan) | 153 | static inline const char *dma_chan_name(struct dma_chan *chan) |
219 | { | 154 | { |
220 | if (unlikely(chan->slow_ref)) | 155 | return dev_name(&chan->dev->device); |
221 | kref_put(&chan->refcount, dma_chan_cleanup); | ||
222 | else { | ||
223 | local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); | ||
224 | put_cpu(); | ||
225 | } | ||
226 | } | 156 | } |
227 | 157 | ||
228 | /* | 158 | void dma_chan_cleanup(struct kref *kref); |
229 | * typedef dma_event_callback - function pointer to a DMA event callback | ||
230 | * For each channel added to the system this routine is called for each client. | ||
231 | * If the client would like to use the channel it returns '1' to signal (ack) | ||
232 | * the dmaengine core to take out a reference on the channel and its | ||
233 | * corresponding device. A client must not 'ack' an available channel more | ||
234 | * than once. When a channel is removed all clients are notified. If a client | ||
235 | * is using the channel it must 'ack' the removal. A client must not 'ack' a | ||
236 | * removed channel more than once. | ||
237 | * @client - 'this' pointer for the client context | ||
238 | * @chan - channel to be acted upon | ||
239 | * @state - available or removed | ||
240 | */ | ||
241 | struct dma_client; | ||
242 | typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client, | ||
243 | struct dma_chan *chan, enum dma_state state); | ||
244 | 159 | ||
245 | /** | 160 | /** |
246 | * struct dma_client - info on the entity making use of DMA services | 161 | * typedef dma_filter_fn - callback filter for dma_request_channel |
247 | * @event_callback: func ptr to call when something happens | 162 | * @chan: channel to be reviewed |
248 | * @cap_mask: only return channels that satisfy the requested capabilities | 163 | * @filter_param: opaque parameter passed through dma_request_channel |
249 | * a value of zero corresponds to any capability | 164 | * |
250 | * @slave: data for preparing slave transfer. Must be non-NULL iff the | 165 | * When this optional parameter is specified in a call to dma_request_channel a |
251 | * DMA_SLAVE capability is requested. | 166 | * suitable channel is passed to this routine for further dispositioning before |
252 | * @global_node: list_head for global dma_client_list | 167 | * being returned. Where 'suitable' indicates a non-busy channel that |
168 | * satisfies the given capability mask. It returns 'true' to indicate that the | ||
169 | * channel is suitable. | ||
253 | */ | 170 | */ |
254 | struct dma_client { | 171 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); |
255 | dma_event_callback event_callback; | ||
256 | dma_cap_mask_t cap_mask; | ||
257 | struct dma_slave *slave; | ||
258 | struct list_head global_node; | ||
259 | }; | ||
260 | 172 | ||
261 | typedef void (*dma_async_tx_callback)(void *dma_async_param); | 173 | typedef void (*dma_async_tx_callback)(void *dma_async_param); |
262 | /** | 174 | /** |
@@ -323,14 +235,10 @@ struct dma_device { | |||
323 | dma_cap_mask_t cap_mask; | 235 | dma_cap_mask_t cap_mask; |
324 | int max_xor; | 236 | int max_xor; |
325 | 237 | ||
326 | struct kref refcount; | ||
327 | struct completion done; | ||
328 | |||
329 | int dev_id; | 238 | int dev_id; |
330 | struct device *dev; | 239 | struct device *dev; |
331 | 240 | ||
332 | int (*device_alloc_chan_resources)(struct dma_chan *chan, | 241 | int (*device_alloc_chan_resources)(struct dma_chan *chan); |
333 | struct dma_client *client); | ||
334 | void (*device_free_chan_resources)(struct dma_chan *chan); | 242 | void (*device_free_chan_resources)(struct dma_chan *chan); |
335 | 243 | ||
336 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( | 244 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( |
@@ -362,9 +270,8 @@ struct dma_device { | |||
362 | 270 | ||
363 | /* --- public DMA engine API --- */ | 271 | /* --- public DMA engine API --- */ |
364 | 272 | ||
365 | void dma_async_client_register(struct dma_client *client); | 273 | void dmaengine_get(void); |
366 | void dma_async_client_unregister(struct dma_client *client); | 274 | void dmaengine_put(void); |
367 | void dma_async_client_chan_request(struct dma_client *client); | ||
368 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | 275 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, |
369 | void *dest, void *src, size_t len); | 276 | void *dest, void *src, size_t len); |
370 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, | 277 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, |
@@ -406,6 +313,12 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | |||
406 | set_bit(tx_type, dstp->bits); | 313 | set_bit(tx_type, dstp->bits); |
407 | } | 314 | } |
408 | 315 | ||
316 | #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) | ||
317 | static inline void __dma_cap_zero(dma_cap_mask_t *dstp) | ||
318 | { | ||
319 | bitmap_zero(dstp->bits, DMA_TX_TYPE_END); | ||
320 | } | ||
321 | |||
409 | #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) | 322 | #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) |
410 | static inline int | 323 | static inline int |
411 | __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) | 324 | __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) |
@@ -475,11 +388,25 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | |||
475 | } | 388 | } |
476 | 389 | ||
477 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | 390 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); |
391 | #ifdef CONFIG_DMA_ENGINE | ||
392 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | ||
393 | #else | ||
394 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | ||
395 | { | ||
396 | return DMA_SUCCESS; | ||
397 | } | ||
398 | #endif | ||
478 | 399 | ||
479 | /* --- DMA device --- */ | 400 | /* --- DMA device --- */ |
480 | 401 | ||
481 | int dma_async_device_register(struct dma_device *device); | 402 | int dma_async_device_register(struct dma_device *device); |
482 | void dma_async_device_unregister(struct dma_device *device); | 403 | void dma_async_device_unregister(struct dma_device *device); |
404 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); | ||
405 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); | ||
406 | void dma_issue_pending_all(void); | ||
407 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) | ||
408 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); | ||
409 | void dma_release_channel(struct dma_chan *chan); | ||
483 | 410 | ||
484 | /* --- Helper iov-locking functions --- */ | 411 | /* --- Helper iov-locking functions --- */ |
485 | 412 | ||
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index 04d217b442bf..d797dde247f7 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h | |||
@@ -22,14 +22,34 @@ struct dw_dma_platform_data { | |||
22 | }; | 22 | }; |
23 | 23 | ||
24 | /** | 24 | /** |
25 | * enum dw_dma_slave_width - DMA slave register access width. | ||
26 | * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses | ||
27 | * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses | ||
28 | * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses | ||
29 | */ | ||
30 | enum dw_dma_slave_width { | ||
31 | DW_DMA_SLAVE_WIDTH_8BIT, | ||
32 | DW_DMA_SLAVE_WIDTH_16BIT, | ||
33 | DW_DMA_SLAVE_WIDTH_32BIT, | ||
34 | }; | ||
35 | |||
36 | /** | ||
25 | * struct dw_dma_slave - Controller-specific information about a slave | 37 | * struct dw_dma_slave - Controller-specific information about a slave |
26 | * @slave: Generic information about the slave | 38 | * |
27 | * @ctl_lo: Platform-specific initializer for the CTL_LO register | 39 | * @dma_dev: required DMA master device |
40 | * @tx_reg: physical address of data register used for | ||
41 | * memory-to-peripheral transfers | ||
42 | * @rx_reg: physical address of data register used for | ||
43 | * peripheral-to-memory transfers | ||
44 | * @reg_width: peripheral register width | ||
28 | * @cfg_hi: Platform-specific initializer for the CFG_HI register | 45 | * @cfg_hi: Platform-specific initializer for the CFG_HI register |
29 | * @cfg_lo: Platform-specific initializer for the CFG_LO register | 46 | * @cfg_lo: Platform-specific initializer for the CFG_LO register |
30 | */ | 47 | */ |
31 | struct dw_dma_slave { | 48 | struct dw_dma_slave { |
32 | struct dma_slave slave; | 49 | struct device *dma_dev; |
50 | dma_addr_t tx_reg; | ||
51 | dma_addr_t rx_reg; | ||
52 | enum dw_dma_slave_width reg_width; | ||
33 | u32 cfg_hi; | 53 | u32 cfg_hi; |
34 | u32 cfg_lo; | 54 | u32 cfg_lo; |
35 | }; | 55 | }; |
@@ -54,9 +74,4 @@ struct dw_dma_slave { | |||
54 | #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ | 74 | #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ |
55 | #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ | 75 | #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ |
56 | 76 | ||
57 | static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave) | ||
58 | { | ||
59 | return container_of(slave, struct dw_dma_slave, slave); | ||
60 | } | ||
61 | |||
62 | #endif /* DW_DMAC_H */ | 77 | #endif /* DW_DMAC_H */ |
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index 00e2b575021f..88d3d8fbf9f2 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h | |||
@@ -520,6 +520,7 @@ struct cfi_fixup { | |||
520 | 520 | ||
521 | #define CFI_MFR_AMD 0x0001 | 521 | #define CFI_MFR_AMD 0x0001 |
522 | #define CFI_MFR_ATMEL 0x001F | 522 | #define CFI_MFR_ATMEL 0x001F |
523 | #define CFI_MFR_SAMSUNG 0x00EC | ||
523 | #define CFI_MFR_ST 0x0020 /* STMicroelectronics */ | 524 | #define CFI_MFR_ST 0x0020 /* STMicroelectronics */ |
524 | 525 | ||
525 | void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); | 526 | void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); |
diff --git a/include/linux/mtd/ftl.h b/include/linux/mtd/ftl.h index 0be442f881dd..0555f7a0b9ed 100644 --- a/include/linux/mtd/ftl.h +++ b/include/linux/mtd/ftl.h | |||
@@ -32,25 +32,25 @@ | |||
32 | #define _LINUX_FTL_H | 32 | #define _LINUX_FTL_H |
33 | 33 | ||
34 | typedef struct erase_unit_header_t { | 34 | typedef struct erase_unit_header_t { |
35 | u_int8_t LinkTargetTuple[5]; | 35 | uint8_t LinkTargetTuple[5]; |
36 | u_int8_t DataOrgTuple[10]; | 36 | uint8_t DataOrgTuple[10]; |
37 | u_int8_t NumTransferUnits; | 37 | uint8_t NumTransferUnits; |
38 | u_int32_t EraseCount; | 38 | uint32_t EraseCount; |
39 | u_int16_t LogicalEUN; | 39 | uint16_t LogicalEUN; |
40 | u_int8_t BlockSize; | 40 | uint8_t BlockSize; |
41 | u_int8_t EraseUnitSize; | 41 | uint8_t EraseUnitSize; |
42 | u_int16_t FirstPhysicalEUN; | 42 | uint16_t FirstPhysicalEUN; |
43 | u_int16_t NumEraseUnits; | 43 | uint16_t NumEraseUnits; |
44 | u_int32_t FormattedSize; | 44 | uint32_t FormattedSize; |
45 | u_int32_t FirstVMAddress; | 45 | uint32_t FirstVMAddress; |
46 | u_int16_t NumVMPages; | 46 | uint16_t NumVMPages; |
47 | u_int8_t Flags; | 47 | uint8_t Flags; |
48 | u_int8_t Code; | 48 | uint8_t Code; |
49 | u_int32_t SerialNumber; | 49 | uint32_t SerialNumber; |
50 | u_int32_t AltEUHOffset; | 50 | uint32_t AltEUHOffset; |
51 | u_int32_t BAMOffset; | 51 | uint32_t BAMOffset; |
52 | u_int8_t Reserved[12]; | 52 | uint8_t Reserved[12]; |
53 | u_int8_t EndTuple[2]; | 53 | uint8_t EndTuple[2]; |
54 | } erase_unit_header_t; | 54 | } erase_unit_header_t; |
55 | 55 | ||
56 | /* Flags in erase_unit_header_t */ | 56 | /* Flags in erase_unit_header_t */ |
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index aa30244492c6..b981b8772217 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h | |||
@@ -223,6 +223,7 @@ struct map_info { | |||
223 | must leave it enabled. */ | 223 | must leave it enabled. */ |
224 | void (*set_vpp)(struct map_info *, int); | 224 | void (*set_vpp)(struct map_info *, int); |
225 | 225 | ||
226 | unsigned long pfow_base; | ||
226 | unsigned long map_priv_1; | 227 | unsigned long map_priv_1; |
227 | unsigned long map_priv_2; | 228 | unsigned long map_priv_2; |
228 | void *fldrv_priv; | 229 | void *fldrv_priv; |
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 64433eb411d7..3aa5d77c2cdb 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/mtd/compatmac.h> | 15 | #include <linux/mtd/compatmac.h> |
16 | #include <mtd/mtd-abi.h> | 16 | #include <mtd/mtd-abi.h> |
17 | 17 | ||
18 | #include <asm/div64.h> | ||
19 | |||
18 | #define MTD_CHAR_MAJOR 90 | 20 | #define MTD_CHAR_MAJOR 90 |
19 | #define MTD_BLOCK_MAJOR 31 | 21 | #define MTD_BLOCK_MAJOR 31 |
20 | #define MAX_MTD_DEVICES 32 | 22 | #define MAX_MTD_DEVICES 32 |
@@ -25,20 +27,20 @@ | |||
25 | #define MTD_ERASE_DONE 0x08 | 27 | #define MTD_ERASE_DONE 0x08 |
26 | #define MTD_ERASE_FAILED 0x10 | 28 | #define MTD_ERASE_FAILED 0x10 |
27 | 29 | ||
28 | #define MTD_FAIL_ADDR_UNKNOWN 0xffffffff | 30 | #define MTD_FAIL_ADDR_UNKNOWN -1LL |
29 | 31 | ||
30 | /* If the erase fails, fail_addr might indicate exactly which block failed. If | 32 | /* If the erase fails, fail_addr might indicate exactly which block failed. If |
31 | fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not | 33 | fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not |
32 | specific to any particular block. */ | 34 | specific to any particular block. */ |
33 | struct erase_info { | 35 | struct erase_info { |
34 | struct mtd_info *mtd; | 36 | struct mtd_info *mtd; |
35 | u_int32_t addr; | 37 | uint64_t addr; |
36 | u_int32_t len; | 38 | uint64_t len; |
37 | u_int32_t fail_addr; | 39 | uint64_t fail_addr; |
38 | u_long time; | 40 | u_long time; |
39 | u_long retries; | 41 | u_long retries; |
40 | u_int dev; | 42 | unsigned dev; |
41 | u_int cell; | 43 | unsigned cell; |
42 | void (*callback) (struct erase_info *self); | 44 | void (*callback) (struct erase_info *self); |
43 | u_long priv; | 45 | u_long priv; |
44 | u_char state; | 46 | u_char state; |
@@ -46,9 +48,9 @@ struct erase_info { | |||
46 | }; | 48 | }; |
47 | 49 | ||
48 | struct mtd_erase_region_info { | 50 | struct mtd_erase_region_info { |
49 | u_int32_t offset; /* At which this region starts, from the beginning of the MTD */ | 51 | uint64_t offset; /* At which this region starts, from the beginning of the MTD */ |
50 | u_int32_t erasesize; /* For this region */ | 52 | uint32_t erasesize; /* For this region */ |
51 | u_int32_t numblocks; /* Number of blocks of erasesize in this region */ | 53 | uint32_t numblocks; /* Number of blocks of erasesize in this region */ |
52 | unsigned long *lockmap; /* If keeping bitmap of locks */ | 54 | unsigned long *lockmap; /* If keeping bitmap of locks */ |
53 | }; | 55 | }; |
54 | 56 | ||
@@ -100,14 +102,14 @@ struct mtd_oob_ops { | |||
100 | 102 | ||
101 | struct mtd_info { | 103 | struct mtd_info { |
102 | u_char type; | 104 | u_char type; |
103 | u_int32_t flags; | 105 | uint32_t flags; |
104 | u_int32_t size; // Total size of the MTD | 106 | uint64_t size; // Total size of the MTD |
105 | 107 | ||
106 | /* "Major" erase size for the device. Naïve users may take this | 108 | /* "Major" erase size for the device. Naïve users may take this |
107 | * to be the only erase size available, or may use the more detailed | 109 | * to be the only erase size available, or may use the more detailed |
108 | * information below if they desire | 110 | * information below if they desire |
109 | */ | 111 | */ |
110 | u_int32_t erasesize; | 112 | uint32_t erasesize; |
111 | /* Minimal writable flash unit size. In case of NOR flash it is 1 (even | 113 | /* Minimal writable flash unit size. In case of NOR flash it is 1 (even |
112 | * though individual bits can be cleared), in case of NAND flash it is | 114 | * though individual bits can be cleared), in case of NAND flash it is |
113 | * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR | 115 | * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR |
@@ -115,10 +117,20 @@ struct mtd_info { | |||
115 | * Any driver registering a struct mtd_info must ensure a writesize of | 117 | * Any driver registering a struct mtd_info must ensure a writesize of |
116 | * 1 or larger. | 118 | * 1 or larger. |
117 | */ | 119 | */ |
118 | u_int32_t writesize; | 120 | uint32_t writesize; |
121 | |||
122 | uint32_t oobsize; // Amount of OOB data per block (e.g. 16) | ||
123 | uint32_t oobavail; // Available OOB bytes per block | ||
119 | 124 | ||
120 | u_int32_t oobsize; // Amount of OOB data per block (e.g. 16) | 125 | /* |
121 | u_int32_t oobavail; // Available OOB bytes per block | 126 | * If erasesize is a power of 2 then the shift is stored in |
127 | * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize. | ||
128 | */ | ||
129 | unsigned int erasesize_shift; | ||
130 | unsigned int writesize_shift; | ||
131 | /* Masks based on erasesize_shift and writesize_shift */ | ||
132 | unsigned int erasesize_mask; | ||
133 | unsigned int writesize_mask; | ||
122 | 134 | ||
123 | // Kernel-only stuff starts here. | 135 | // Kernel-only stuff starts here. |
124 | const char *name; | 136 | const char *name; |
@@ -190,8 +202,8 @@ struct mtd_info { | |||
190 | void (*sync) (struct mtd_info *mtd); | 202 | void (*sync) (struct mtd_info *mtd); |
191 | 203 | ||
192 | /* Chip-supported device locking */ | 204 | /* Chip-supported device locking */ |
193 | int (*lock) (struct mtd_info *mtd, loff_t ofs, size_t len); | 205 | int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); |
194 | int (*unlock) (struct mtd_info *mtd, loff_t ofs, size_t len); | 206 | int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); |
195 | 207 | ||
196 | /* Power Management functions */ | 208 | /* Power Management functions */ |
197 | int (*suspend) (struct mtd_info *mtd); | 209 | int (*suspend) (struct mtd_info *mtd); |
@@ -221,6 +233,35 @@ struct mtd_info { | |||
221 | void (*put_device) (struct mtd_info *mtd); | 233 | void (*put_device) (struct mtd_info *mtd); |
222 | }; | 234 | }; |
223 | 235 | ||
236 | static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) | ||
237 | { | ||
238 | if (mtd->erasesize_shift) | ||
239 | return sz >> mtd->erasesize_shift; | ||
240 | do_div(sz, mtd->erasesize); | ||
241 | return sz; | ||
242 | } | ||
243 | |||
244 | static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd) | ||
245 | { | ||
246 | if (mtd->erasesize_shift) | ||
247 | return sz & mtd->erasesize_mask; | ||
248 | return do_div(sz, mtd->erasesize); | ||
249 | } | ||
250 | |||
251 | static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd) | ||
252 | { | ||
253 | if (mtd->writesize_shift) | ||
254 | return sz >> mtd->writesize_shift; | ||
255 | do_div(sz, mtd->writesize); | ||
256 | return sz; | ||
257 | } | ||
258 | |||
259 | static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) | ||
260 | { | ||
261 | if (mtd->writesize_shift) | ||
262 | return sz & mtd->writesize_mask; | ||
263 | return do_div(sz, mtd->writesize); | ||
264 | } | ||
224 | 265 | ||
225 | /* Kernel-side ioctl definitions */ | 266 | /* Kernel-side ioctl definitions */ |
226 | 267 | ||
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 733d3f3b4eb8..db5b63da2a7e 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
@@ -335,17 +335,12 @@ struct nand_buffers { | |||
335 | * @erase_cmd: [INTERN] erase command write function, selectable due to AND support | 335 | * @erase_cmd: [INTERN] erase command write function, selectable due to AND support |
336 | * @scan_bbt: [REPLACEABLE] function to scan bad block table | 336 | * @scan_bbt: [REPLACEABLE] function to scan bad block table |
337 | * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR) | 337 | * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR) |
338 | * @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress | ||
339 | * @state: [INTERN] the current state of the NAND device | 338 | * @state: [INTERN] the current state of the NAND device |
340 | * @oob_poi: poison value buffer | 339 | * @oob_poi: poison value buffer |
341 | * @page_shift: [INTERN] number of address bits in a page (column address bits) | 340 | * @page_shift: [INTERN] number of address bits in a page (column address bits) |
342 | * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock | 341 | * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock |
343 | * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry | 342 | * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry |
344 | * @chip_shift: [INTERN] number of address bits in one chip | 343 | * @chip_shift: [INTERN] number of address bits in one chip |
345 | * @datbuf: [INTERN] internal buffer for one page + oob | ||
346 | * @oobbuf: [INTERN] oob buffer for one eraseblock | ||
347 | * @oobdirty: [INTERN] indicates that oob_buf must be reinitialized | ||
348 | * @data_poi: [INTERN] pointer to a data buffer | ||
349 | * @options: [BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about | 344 | * @options: [BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about |
350 | * special functionality. See the defines for further explanation | 345 | * special functionality. See the defines for further explanation |
351 | * @badblockpos: [INTERN] position of the bad block marker in the oob area | 346 | * @badblockpos: [INTERN] position of the bad block marker in the oob area |
@@ -399,7 +394,7 @@ struct nand_chip { | |||
399 | int bbt_erase_shift; | 394 | int bbt_erase_shift; |
400 | int chip_shift; | 395 | int chip_shift; |
401 | int numchips; | 396 | int numchips; |
402 | unsigned long chipsize; | 397 | uint64_t chipsize; |
403 | int pagemask; | 398 | int pagemask; |
404 | int pagebuf; | 399 | int pagebuf; |
405 | int subpagesize; | 400 | int subpagesize; |
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index c92b4d439609..a45dd831b3f8 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h | |||
@@ -36,9 +36,9 @@ | |||
36 | 36 | ||
37 | struct mtd_partition { | 37 | struct mtd_partition { |
38 | char *name; /* identifier string */ | 38 | char *name; /* identifier string */ |
39 | u_int32_t size; /* partition size */ | 39 | uint64_t size; /* partition size */ |
40 | u_int32_t offset; /* offset within the master MTD space */ | 40 | uint64_t offset; /* offset within the master MTD space */ |
41 | u_int32_t mask_flags; /* master MTD flags to mask out for this partition */ | 41 | uint32_t mask_flags; /* master MTD flags to mask out for this partition */ |
42 | struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/ | 42 | struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/ |
43 | struct mtd_info **mtdp; /* pointer to store the MTD object */ | 43 | struct mtd_info **mtdp; /* pointer to store the MTD object */ |
44 | }; | 44 | }; |
diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h new file mode 100644 index 000000000000..b730d4f84655 --- /dev/null +++ b/include/linux/mtd/pfow.h | |||
@@ -0,0 +1,159 @@ | |||
1 | /* Primary function overlay window definitions | ||
2 | * and service functions used by LPDDR chips | ||
3 | */ | ||
4 | #ifndef __LINUX_MTD_PFOW_H | ||
5 | #define __LINUX_MTD_PFOW_H | ||
6 | |||
7 | #include <linux/mtd/qinfo.h> | ||
8 | |||
9 | /* PFOW registers addressing */ | ||
10 | /* Address of symbol "P" */ | ||
11 | #define PFOW_QUERY_STRING_P 0x0000 | ||
12 | /* Address of symbol "F" */ | ||
13 | #define PFOW_QUERY_STRING_F 0x0002 | ||
14 | /* Address of symbol "O" */ | ||
15 | #define PFOW_QUERY_STRING_O 0x0004 | ||
16 | /* Address of symbol "W" */ | ||
17 | #define PFOW_QUERY_STRING_W 0x0006 | ||
18 | /* Identification info for LPDDR chip */ | ||
19 | #define PFOW_MANUFACTURER_ID 0x0020 | ||
20 | #define PFOW_DEVICE_ID 0x0022 | ||
21 | /* Address in PFOW where prog buffer can can be found */ | ||
22 | #define PFOW_PROGRAM_BUFFER_OFFSET 0x0040 | ||
23 | /* Size of program buffer in words */ | ||
24 | #define PFOW_PROGRAM_BUFFER_SIZE 0x0042 | ||
25 | /* Address command code register */ | ||
26 | #define PFOW_COMMAND_CODE 0x0080 | ||
27 | /* command data register */ | ||
28 | #define PFOW_COMMAND_DATA 0x0084 | ||
29 | /* command address register lower address bits */ | ||
30 | #define PFOW_COMMAND_ADDRESS_L 0x0088 | ||
31 | /* command address register upper address bits */ | ||
32 | #define PFOW_COMMAND_ADDRESS_H 0x008a | ||
33 | /* number of bytes to be proggrammed lower address bits */ | ||
34 | #define PFOW_DATA_COUNT_L 0x0090 | ||
35 | /* number of bytes to be proggrammed higher address bits */ | ||
36 | #define PFOW_DATA_COUNT_H 0x0092 | ||
37 | /* command execution register, the only possible value is 0x01 */ | ||
38 | #define PFOW_COMMAND_EXECUTE 0x00c0 | ||
39 | /* 0x01 should be written at this address to clear buffer */ | ||
40 | #define PFOW_CLEAR_PROGRAM_BUFFER 0x00c4 | ||
41 | /* device program/erase suspend register */ | ||
42 | #define PFOW_PROGRAM_ERASE_SUSPEND 0x00c8 | ||
43 | /* device status register */ | ||
44 | #define PFOW_DSR 0x00cc | ||
45 | |||
46 | /* LPDDR memory device command codes */ | ||
47 | /* They are possible values of PFOW command code register */ | ||
48 | #define LPDDR_WORD_PROGRAM 0x0041 | ||
49 | #define LPDDR_BUFF_PROGRAM 0x00E9 | ||
50 | #define LPDDR_BLOCK_ERASE 0x0020 | ||
51 | #define LPDDR_LOCK_BLOCK 0x0061 | ||
52 | #define LPDDR_UNLOCK_BLOCK 0x0062 | ||
53 | #define LPDDR_READ_BLOCK_LOCK_STATUS 0x0065 | ||
54 | #define LPDDR_INFO_QUERY 0x0098 | ||
55 | #define LPDDR_READ_OTP 0x0097 | ||
56 | #define LPDDR_PROG_OTP 0x00C0 | ||
57 | #define LPDDR_RESUME 0x00D0 | ||
58 | |||
59 | /* Defines possible value of PFOW command execution register */ | ||
60 | #define LPDDR_START_EXECUTION 0x0001 | ||
61 | |||
62 | /* Defines possible value of PFOW program/erase suspend register */ | ||
63 | #define LPDDR_SUSPEND 0x0001 | ||
64 | |||
65 | /* Possible values of PFOW device status register */ | ||
66 | /* access R - read; RC read & clearable */ | ||
67 | #define DSR_DPS (1<<1) /* RC; device protect status | ||
68 | * 0 - not protected 1 - locked */ | ||
69 | #define DSR_PSS (1<<2) /* R; program suspend status; | ||
70 | * 0-prog in progress/completed, | ||
71 | * 1- prog suspended */ | ||
72 | #define DSR_VPPS (1<<3) /* RC; 0-Vpp OK, * 1-Vpp low */ | ||
73 | #define DSR_PROGRAM_STATUS (1<<4) /* RC; 0-successful, 1-error */ | ||
74 | #define DSR_ERASE_STATUS (1<<5) /* RC; erase or blank check status; | ||
75 | * 0-success erase/blank check, | ||
76 | * 1 blank check error */ | ||
77 | #define DSR_ESS (1<<6) /* R; erase suspend status; | ||
78 | * 0-erase in progress/complete, | ||
79 | * 1 erase suspended */ | ||
80 | #define DSR_READY_STATUS (1<<7) /* R; Device status | ||
81 | * 0-busy, | ||
82 | * 1-ready */ | ||
83 | #define DSR_RPS (0x3<<8) /* RC; region program status | ||
84 | * 00 - Success, | ||
85 | * 01-re-program attempt in region with | ||
86 | * object mode data, | ||
87 | * 10-object mode program w attempt in | ||
88 | * region with control mode data | ||
89 | * 11-attempt to program invalid half | ||
90 | * with 0x41 command */ | ||
91 | #define DSR_AOS (1<<12) /* RC; 1- AO related failure */ | ||
92 | #define DSR_AVAILABLE (1<<15) /* R; Device availbility | ||
93 | * 1 - Device available | ||
94 | * 0 - not available */ | ||
95 | |||
96 | /* The superset of all possible error bits in DSR */ | ||
97 | #define DSR_ERR 0x133A | ||
98 | |||
99 | static inline void send_pfow_command(struct map_info *map, | ||
100 | unsigned long cmd_code, unsigned long adr, | ||
101 | unsigned long len, map_word *datum) | ||
102 | { | ||
103 | int bits_per_chip = map_bankwidth(map) * 8; | ||
104 | int chipnum; | ||
105 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
106 | chipnum = adr >> lpddr->chipshift; | ||
107 | |||
108 | map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE); | ||
109 | map_write(map, CMD(adr & ((1<<bits_per_chip) - 1)), | ||
110 | map->pfow_base + PFOW_COMMAND_ADDRESS_L); | ||
111 | map_write(map, CMD(adr>>bits_per_chip), | ||
112 | map->pfow_base + PFOW_COMMAND_ADDRESS_H); | ||
113 | if (len) { | ||
114 | map_write(map, CMD(len & ((1<<bits_per_chip) - 1)), | ||
115 | map->pfow_base + PFOW_DATA_COUNT_L); | ||
116 | map_write(map, CMD(len>>bits_per_chip), | ||
117 | map->pfow_base + PFOW_DATA_COUNT_H); | ||
118 | } | ||
119 | if (datum) | ||
120 | map_write(map, *datum, map->pfow_base + PFOW_COMMAND_DATA); | ||
121 | |||
122 | /* Command execution start */ | ||
123 | map_write(map, CMD(LPDDR_START_EXECUTION), | ||
124 | map->pfow_base + PFOW_COMMAND_EXECUTE); | ||
125 | } | ||
126 | |||
127 | static inline void print_drs_error(unsigned dsr) | ||
128 | { | ||
129 | int prog_status = (dsr & DSR_RPS) >> 8; | ||
130 | |||
131 | if (!(dsr & DSR_AVAILABLE)) | ||
132 | printk(KERN_NOTICE"DSR.15: (0) Device not Available\n"); | ||
133 | if (prog_status & 0x03) | ||
134 | printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid " | ||
135 | "half with 41h command\n"); | ||
136 | else if (prog_status & 0x02) | ||
137 | printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt " | ||
138 | "in region with Control Mode data\n"); | ||
139 | else if (prog_status & 0x01) | ||
140 | printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region " | ||
141 | "with Object Mode data\n"); | ||
142 | if (!(dsr & DSR_READY_STATUS)) | ||
143 | printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n"); | ||
144 | if (dsr & DSR_ESS) | ||
145 | printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n"); | ||
146 | if (dsr & DSR_ERASE_STATUS) | ||
147 | printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n"); | ||
148 | if (dsr & DSR_PROGRAM_STATUS) | ||
149 | printk(KERN_NOTICE"DSR.4: (1) Program Error\n"); | ||
150 | if (dsr & DSR_VPPS) | ||
151 | printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation " | ||
152 | "aborted\n"); | ||
153 | if (dsr & DSR_PSS) | ||
154 | printk(KERN_NOTICE"DSR.2: (1) Program suspended\n"); | ||
155 | if (dsr & DSR_DPS) | ||
156 | printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt " | ||
157 | "on locked block\n"); | ||
158 | } | ||
159 | #endif /* __LINUX_MTD_PFOW_H */ | ||
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h index c8e63a5ee72e..76f7cabf07d3 100644 --- a/include/linux/mtd/physmap.h +++ b/include/linux/mtd/physmap.h | |||
@@ -24,6 +24,7 @@ struct physmap_flash_data { | |||
24 | unsigned int width; | 24 | unsigned int width; |
25 | void (*set_vpp)(struct map_info *, int); | 25 | void (*set_vpp)(struct map_info *, int); |
26 | unsigned int nr_parts; | 26 | unsigned int nr_parts; |
27 | unsigned int pfow_base; | ||
27 | struct mtd_partition *parts; | 28 | struct mtd_partition *parts; |
28 | }; | 29 | }; |
29 | 30 | ||
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h new file mode 100644 index 000000000000..7b3d487d8b3f --- /dev/null +++ b/include/linux/mtd/qinfo.h | |||
@@ -0,0 +1,91 @@ | |||
1 | #ifndef __LINUX_MTD_QINFO_H | ||
2 | #define __LINUX_MTD_QINFO_H | ||
3 | |||
4 | #include <linux/mtd/map.h> | ||
5 | #include <linux/wait.h> | ||
6 | #include <linux/spinlock.h> | ||
7 | #include <linux/delay.h> | ||
8 | #include <linux/mtd/mtd.h> | ||
9 | #include <linux/mtd/flashchip.h> | ||
10 | #include <linux/mtd/partitions.h> | ||
11 | |||
12 | /* lpddr_private describes lpddr flash chip in memory map | ||
13 | * @ManufactId - Chip Manufacture ID | ||
14 | * @DevId - Chip Device ID | ||
15 | * @qinfo - pointer to qinfo records describing the chip | ||
16 | * @numchips - number of chips including virual RWW partitions | ||
17 | * @chipshift - Chip/partiton size 2^chipshift | ||
18 | * @chips - per-chip data structure | ||
19 | */ | ||
20 | struct lpddr_private { | ||
21 | uint16_t ManufactId; | ||
22 | uint16_t DevId; | ||
23 | struct qinfo_chip *qinfo; | ||
24 | int numchips; | ||
25 | unsigned long chipshift; | ||
26 | struct flchip chips[0]; | ||
27 | }; | ||
28 | |||
29 | /* qinfo_query_info structure contains request information for | ||
30 | * each qinfo record | ||
31 | * @major - major number of qinfo record | ||
32 | * @major - minor number of qinfo record | ||
33 | * @id_str - descriptive string to access the record | ||
34 | * @desc - detailed description for the qinfo record | ||
35 | */ | ||
36 | struct qinfo_query_info { | ||
37 | uint8_t major; | ||
38 | uint8_t minor; | ||
39 | char *id_str; | ||
40 | char *desc; | ||
41 | }; | ||
42 | |||
43 | /* | ||
44 | * qinfo_chip structure contains necessary qinfo records data | ||
45 | * @DevSizeShift - Device size 2^n bytes | ||
46 | * @BufSizeShift - Program buffer size 2^n bytes | ||
47 | * @TotalBlocksNum - Total number of blocks | ||
48 | * @UniformBlockSizeShift - Uniform block size 2^UniformBlockSizeShift bytes | ||
49 | * @HWPartsNum - Number of hardware partitions | ||
50 | * @SuspEraseSupp - Suspend erase supported | ||
51 | * @SingleWordProgTime - Single word program 2^SingleWordProgTime u-sec | ||
52 | * @ProgBufferTime - Program buffer write 2^ProgBufferTime u-sec | ||
53 | * @BlockEraseTime - Block erase 2^BlockEraseTime m-sec | ||
54 | */ | ||
55 | struct qinfo_chip { | ||
56 | /* General device info */ | ||
57 | uint16_t DevSizeShift; | ||
58 | uint16_t BufSizeShift; | ||
59 | /* Erase block information */ | ||
60 | uint16_t TotalBlocksNum; | ||
61 | uint16_t UniformBlockSizeShift; | ||
62 | /* Partition information */ | ||
63 | uint16_t HWPartsNum; | ||
64 | /* Optional features */ | ||
65 | uint16_t SuspEraseSupp; | ||
66 | /* Operation typical time */ | ||
67 | uint16_t SingleWordProgTime; | ||
68 | uint16_t ProgBufferTime; | ||
69 | uint16_t BlockEraseTime; | ||
70 | }; | ||
71 | |||
72 | /* defines for fixup usage */ | ||
73 | #define LPDDR_MFR_ANY 0xffff | ||
74 | #define LPDDR_ID_ANY 0xffff | ||
75 | #define NUMONYX_MFGR_ID 0x0089 | ||
76 | #define R18_DEVICE_ID_1G 0x893c | ||
77 | |||
78 | static inline map_word lpddr_build_cmd(u_long cmd, struct map_info *map) | ||
79 | { | ||
80 | map_word val = { {0} }; | ||
81 | val.x[0] = cmd; | ||
82 | return val; | ||
83 | } | ||
84 | |||
85 | #define CMD(x) lpddr_build_cmd(x, map) | ||
86 | #define CMDVAL(cmd) cmd.x[0] | ||
87 | |||
88 | struct mtd_info *lpddr_cmdset(struct map_info *); | ||
89 | |||
90 | #endif | ||
91 | |||
diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h new file mode 100644 index 000000000000..25f4d2a845c1 --- /dev/null +++ b/include/linux/mtd/sharpsl.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * SharpSL NAND support | ||
3 | * | ||
4 | * Copyright (C) 2008 Dmitry Baryshkov | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/mtd/nand.h> | ||
12 | #include <linux/mtd/nand_ecc.h> | ||
13 | #include <linux/mtd/partitions.h> | ||
14 | |||
15 | struct sharpsl_nand_platform_data { | ||
16 | struct nand_bbt_descr *badblock_pattern; | ||
17 | struct nand_ecclayout *ecc_layout; | ||
18 | struct mtd_partition *partitions; | ||
19 | unsigned int nr_partitions; | ||
20 | }; | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 114091be8872..f24556813375 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1125,9 +1125,6 @@ struct softnet_data | |||
1125 | struct sk_buff *completion_queue; | 1125 | struct sk_buff *completion_queue; |
1126 | 1126 | ||
1127 | struct napi_struct backlog; | 1127 | struct napi_struct backlog; |
1128 | #ifdef CONFIG_NET_DMA | ||
1129 | struct dma_chan *net_dma; | ||
1130 | #endif | ||
1131 | }; | 1128 | }; |
1132 | 1129 | ||
1133 | DECLARE_PER_CPU(struct softnet_data,softnet_data); | 1130 | DECLARE_PER_CPU(struct softnet_data,softnet_data); |
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 1ce9fe572e51..1d9518bc4c58 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h | |||
@@ -164,4 +164,22 @@ void oprofile_put_buff(unsigned long *buf, unsigned int start, | |||
164 | unsigned long oprofile_get_cpu_buffer_size(void); | 164 | unsigned long oprofile_get_cpu_buffer_size(void); |
165 | void oprofile_cpu_buffer_inc_smpl_lost(void); | 165 | void oprofile_cpu_buffer_inc_smpl_lost(void); |
166 | 166 | ||
167 | /* cpu buffer functions */ | ||
168 | |||
169 | struct op_sample; | ||
170 | |||
171 | struct op_entry { | ||
172 | struct ring_buffer_event *event; | ||
173 | struct op_sample *sample; | ||
174 | unsigned long irq_flags; | ||
175 | unsigned long size; | ||
176 | unsigned long *data; | ||
177 | }; | ||
178 | |||
179 | void oprofile_write_reserve(struct op_entry *entry, | ||
180 | struct pt_regs * const regs, | ||
181 | unsigned long pc, int code, int size); | ||
182 | int oprofile_add_data(struct op_entry *entry, unsigned long val); | ||
183 | int oprofile_write_commit(struct op_entry *entry); | ||
184 | |||
167 | #endif /* OPROFILE_H */ | 185 | #endif /* OPROFILE_H */ |
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index f7cc204fab07..20998746518e 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h | |||
@@ -223,7 +223,6 @@ struct hotplug_params { | |||
223 | #ifdef CONFIG_ACPI | 223 | #ifdef CONFIG_ACPI |
224 | #include <acpi/acpi.h> | 224 | #include <acpi/acpi.h> |
225 | #include <acpi/acpi_bus.h> | 225 | #include <acpi/acpi_bus.h> |
226 | #include <acpi/actypes.h> | ||
227 | extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, | 226 | extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, |
228 | struct hotplug_params *hpp); | 227 | struct hotplug_params *hpp); |
229 | int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); | 228 | int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 2ce8207686e2..2b409c44db83 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -232,6 +232,11 @@ extern unsigned long get_safe_page(gfp_t gfp_mask); | |||
232 | 232 | ||
233 | extern void hibernation_set_ops(struct platform_hibernation_ops *ops); | 233 | extern void hibernation_set_ops(struct platform_hibernation_ops *ops); |
234 | extern int hibernate(void); | 234 | extern int hibernate(void); |
235 | extern int hibernate_nvs_register(unsigned long start, unsigned long size); | ||
236 | extern int hibernate_nvs_alloc(void); | ||
237 | extern void hibernate_nvs_free(void); | ||
238 | extern void hibernate_nvs_save(void); | ||
239 | extern void hibernate_nvs_restore(void); | ||
235 | #else /* CONFIG_HIBERNATION */ | 240 | #else /* CONFIG_HIBERNATION */ |
236 | static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } | 241 | static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } |
237 | static inline void swsusp_set_page_free(struct page *p) {} | 242 | static inline void swsusp_set_page_free(struct page *p) {} |
@@ -239,6 +244,14 @@ static inline void swsusp_unset_page_free(struct page *p) {} | |||
239 | 244 | ||
240 | static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} | 245 | static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} |
241 | static inline int hibernate(void) { return -ENOSYS; } | 246 | static inline int hibernate(void) { return -ENOSYS; } |
247 | static inline int hibernate_nvs_register(unsigned long a, unsigned long b) | ||
248 | { | ||
249 | return 0; | ||
250 | } | ||
251 | static inline int hibernate_nvs_alloc(void) { return 0; } | ||
252 | static inline void hibernate_nvs_free(void) {} | ||
253 | static inline void hibernate_nvs_save(void) {} | ||
254 | static inline void hibernate_nvs_restore(void) {} | ||
242 | #endif /* CONFIG_HIBERNATION */ | 255 | #endif /* CONFIG_HIBERNATION */ |
243 | 256 | ||
244 | #ifdef CONFIG_PM_SLEEP | 257 | #ifdef CONFIG_PM_SLEEP |
diff --git a/include/net/netdma.h b/include/net/netdma.h index f28c6e064e8f..8ba8ce284eeb 100644 --- a/include/net/netdma.h +++ b/include/net/netdma.h | |||
@@ -24,17 +24,6 @@ | |||
24 | #include <linux/dmaengine.h> | 24 | #include <linux/dmaengine.h> |
25 | #include <linux/skbuff.h> | 25 | #include <linux/skbuff.h> |
26 | 26 | ||
27 | static inline struct dma_chan *get_softnet_dma(void) | ||
28 | { | ||
29 | struct dma_chan *chan; | ||
30 | rcu_read_lock(); | ||
31 | chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma); | ||
32 | if (chan) | ||
33 | dma_chan_get(chan); | ||
34 | rcu_read_unlock(); | ||
35 | return chan; | ||
36 | } | ||
37 | |||
38 | int dma_skb_copy_datagram_iovec(struct dma_chan* chan, | 27 | int dma_skb_copy_datagram_iovec(struct dma_chan* chan, |
39 | struct sk_buff *skb, int offset, struct iovec *to, | 28 | struct sk_buff *skb, int offset, struct iovec *to, |
40 | size_t len, struct dma_pinned_list *pinned_list); | 29 | size_t len, struct dma_pinned_list *pinned_list); |
diff --git a/kernel/cred.c b/kernel/cred.c index ff7bc071991c..043f78c133c4 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -506,6 +506,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) | |||
506 | else | 506 | else |
507 | old = get_cred(&init_cred); | 507 | old = get_cred(&init_cred); |
508 | 508 | ||
509 | *new = *old; | ||
509 | get_uid(new->user); | 510 | get_uid(new->user); |
510 | get_group_info(new->group_info); | 511 | get_group_info(new->group_info); |
511 | 512 | ||
@@ -529,6 +530,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) | |||
529 | 530 | ||
530 | error: | 531 | error: |
531 | put_cred(new); | 532 | put_cred(new); |
533 | put_cred(old); | ||
532 | return NULL; | 534 | return NULL; |
533 | } | 535 | } |
534 | EXPORT_SYMBOL(prepare_kernel_cred); | 536 | EXPORT_SYMBOL(prepare_kernel_cred); |
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index f77d3819ef57..45e8541ab7e3 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
@@ -258,12 +258,12 @@ int hibernation_snapshot(int platform_mode) | |||
258 | { | 258 | { |
259 | int error; | 259 | int error; |
260 | 260 | ||
261 | /* Free memory before shutting down devices. */ | 261 | error = platform_begin(platform_mode); |
262 | error = swsusp_shrink_memory(); | ||
263 | if (error) | 262 | if (error) |
264 | return error; | 263 | return error; |
265 | 264 | ||
266 | error = platform_begin(platform_mode); | 265 | /* Free memory before shutting down devices. */ |
266 | error = swsusp_shrink_memory(); | ||
267 | if (error) | 267 | if (error) |
268 | goto Close; | 268 | goto Close; |
269 | 269 | ||
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 5d2ab836e998..f5fc2d7680f2 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/syscalls.h> | 25 | #include <linux/syscalls.h> |
26 | #include <linux/console.h> | 26 | #include <linux/console.h> |
27 | #include <linux/highmem.h> | 27 | #include <linux/highmem.h> |
28 | #include <linux/list.h> | ||
28 | 29 | ||
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
30 | #include <asm/mmu_context.h> | 31 | #include <asm/mmu_context.h> |
@@ -192,12 +193,6 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size) | |||
192 | return ret; | 193 | return ret; |
193 | } | 194 | } |
194 | 195 | ||
195 | static void chain_free(struct chain_allocator *ca, int clear_page_nosave) | ||
196 | { | ||
197 | free_list_of_pages(ca->chain, clear_page_nosave); | ||
198 | memset(ca, 0, sizeof(struct chain_allocator)); | ||
199 | } | ||
200 | |||
201 | /** | 196 | /** |
202 | * Data types related to memory bitmaps. | 197 | * Data types related to memory bitmaps. |
203 | * | 198 | * |
@@ -233,7 +228,7 @@ static void chain_free(struct chain_allocator *ca, int clear_page_nosave) | |||
233 | #define BM_BITS_PER_BLOCK (PAGE_SIZE << 3) | 228 | #define BM_BITS_PER_BLOCK (PAGE_SIZE << 3) |
234 | 229 | ||
235 | struct bm_block { | 230 | struct bm_block { |
236 | struct bm_block *next; /* next element of the list */ | 231 | struct list_head hook; /* hook into a list of bitmap blocks */ |
237 | unsigned long start_pfn; /* pfn represented by the first bit */ | 232 | unsigned long start_pfn; /* pfn represented by the first bit */ |
238 | unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ | 233 | unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ |
239 | unsigned long *data; /* bitmap representing pages */ | 234 | unsigned long *data; /* bitmap representing pages */ |
@@ -244,24 +239,15 @@ static inline unsigned long bm_block_bits(struct bm_block *bb) | |||
244 | return bb->end_pfn - bb->start_pfn; | 239 | return bb->end_pfn - bb->start_pfn; |
245 | } | 240 | } |
246 | 241 | ||
247 | struct zone_bitmap { | ||
248 | struct zone_bitmap *next; /* next element of the list */ | ||
249 | unsigned long start_pfn; /* minimal pfn in this zone */ | ||
250 | unsigned long end_pfn; /* maximal pfn in this zone plus 1 */ | ||
251 | struct bm_block *bm_blocks; /* list of bitmap blocks */ | ||
252 | struct bm_block *cur_block; /* recently used bitmap block */ | ||
253 | }; | ||
254 | |||
255 | /* strcut bm_position is used for browsing memory bitmaps */ | 242 | /* strcut bm_position is used for browsing memory bitmaps */ |
256 | 243 | ||
257 | struct bm_position { | 244 | struct bm_position { |
258 | struct zone_bitmap *zone_bm; | ||
259 | struct bm_block *block; | 245 | struct bm_block *block; |
260 | int bit; | 246 | int bit; |
261 | }; | 247 | }; |
262 | 248 | ||
263 | struct memory_bitmap { | 249 | struct memory_bitmap { |
264 | struct zone_bitmap *zone_bm_list; /* list of zone bitmaps */ | 250 | struct list_head blocks; /* list of bitmap blocks */ |
265 | struct linked_page *p_list; /* list of pages used to store zone | 251 | struct linked_page *p_list; /* list of pages used to store zone |
266 | * bitmap objects and bitmap block | 252 | * bitmap objects and bitmap block |
267 | * objects | 253 | * objects |
@@ -273,11 +259,7 @@ struct memory_bitmap { | |||
273 | 259 | ||
274 | static void memory_bm_position_reset(struct memory_bitmap *bm) | 260 | static void memory_bm_position_reset(struct memory_bitmap *bm) |
275 | { | 261 | { |
276 | struct zone_bitmap *zone_bm; | 262 | bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook); |
277 | |||
278 | zone_bm = bm->zone_bm_list; | ||
279 | bm->cur.zone_bm = zone_bm; | ||
280 | bm->cur.block = zone_bm->bm_blocks; | ||
281 | bm->cur.bit = 0; | 263 | bm->cur.bit = 0; |
282 | } | 264 | } |
283 | 265 | ||
@@ -285,151 +267,184 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); | |||
285 | 267 | ||
286 | /** | 268 | /** |
287 | * create_bm_block_list - create a list of block bitmap objects | 269 | * create_bm_block_list - create a list of block bitmap objects |
270 | * @nr_blocks - number of blocks to allocate | ||
271 | * @list - list to put the allocated blocks into | ||
272 | * @ca - chain allocator to be used for allocating memory | ||
288 | */ | 273 | */ |
289 | 274 | static int create_bm_block_list(unsigned long pages, | |
290 | static inline struct bm_block * | 275 | struct list_head *list, |
291 | create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca) | 276 | struct chain_allocator *ca) |
292 | { | 277 | { |
293 | struct bm_block *bblist = NULL; | 278 | unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); |
294 | 279 | ||
295 | while (nr_blocks-- > 0) { | 280 | while (nr_blocks-- > 0) { |
296 | struct bm_block *bb; | 281 | struct bm_block *bb; |
297 | 282 | ||
298 | bb = chain_alloc(ca, sizeof(struct bm_block)); | 283 | bb = chain_alloc(ca, sizeof(struct bm_block)); |
299 | if (!bb) | 284 | if (!bb) |
300 | return NULL; | 285 | return -ENOMEM; |
301 | 286 | list_add(&bb->hook, list); | |
302 | bb->next = bblist; | ||
303 | bblist = bb; | ||
304 | } | 287 | } |
305 | return bblist; | 288 | |
289 | return 0; | ||
306 | } | 290 | } |
307 | 291 | ||
292 | struct mem_extent { | ||
293 | struct list_head hook; | ||
294 | unsigned long start; | ||
295 | unsigned long end; | ||
296 | }; | ||
297 | |||
308 | /** | 298 | /** |
309 | * create_zone_bm_list - create a list of zone bitmap objects | 299 | * free_mem_extents - free a list of memory extents |
300 | * @list - list of extents to empty | ||
310 | */ | 301 | */ |
302 | static void free_mem_extents(struct list_head *list) | ||
303 | { | ||
304 | struct mem_extent *ext, *aux; | ||
311 | 305 | ||
312 | static inline struct zone_bitmap * | 306 | list_for_each_entry_safe(ext, aux, list, hook) { |
313 | create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca) | 307 | list_del(&ext->hook); |
308 | kfree(ext); | ||
309 | } | ||
310 | } | ||
311 | |||
312 | /** | ||
313 | * create_mem_extents - create a list of memory extents representing | ||
314 | * contiguous ranges of PFNs | ||
315 | * @list - list to put the extents into | ||
316 | * @gfp_mask - mask to use for memory allocations | ||
317 | */ | ||
318 | static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) | ||
314 | { | 319 | { |
315 | struct zone_bitmap *zbmlist = NULL; | 320 | struct zone *zone; |
316 | 321 | ||
317 | while (nr_zones-- > 0) { | 322 | INIT_LIST_HEAD(list); |
318 | struct zone_bitmap *zbm; | ||
319 | 323 | ||
320 | zbm = chain_alloc(ca, sizeof(struct zone_bitmap)); | 324 | for_each_zone(zone) { |
321 | if (!zbm) | 325 | unsigned long zone_start, zone_end; |
322 | return NULL; | 326 | struct mem_extent *ext, *cur, *aux; |
327 | |||
328 | if (!populated_zone(zone)) | ||
329 | continue; | ||
323 | 330 | ||
324 | zbm->next = zbmlist; | 331 | zone_start = zone->zone_start_pfn; |
325 | zbmlist = zbm; | 332 | zone_end = zone->zone_start_pfn + zone->spanned_pages; |
333 | |||
334 | list_for_each_entry(ext, list, hook) | ||
335 | if (zone_start <= ext->end) | ||
336 | break; | ||
337 | |||
338 | if (&ext->hook == list || zone_end < ext->start) { | ||
339 | /* New extent is necessary */ | ||
340 | struct mem_extent *new_ext; | ||
341 | |||
342 | new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask); | ||
343 | if (!new_ext) { | ||
344 | free_mem_extents(list); | ||
345 | return -ENOMEM; | ||
346 | } | ||
347 | new_ext->start = zone_start; | ||
348 | new_ext->end = zone_end; | ||
349 | list_add_tail(&new_ext->hook, &ext->hook); | ||
350 | continue; | ||
351 | } | ||
352 | |||
353 | /* Merge this zone's range of PFNs with the existing one */ | ||
354 | if (zone_start < ext->start) | ||
355 | ext->start = zone_start; | ||
356 | if (zone_end > ext->end) | ||
357 | ext->end = zone_end; | ||
358 | |||
359 | /* More merging may be possible */ | ||
360 | cur = ext; | ||
361 | list_for_each_entry_safe_continue(cur, aux, list, hook) { | ||
362 | if (zone_end < cur->start) | ||
363 | break; | ||
364 | if (zone_end < cur->end) | ||
365 | ext->end = cur->end; | ||
366 | list_del(&cur->hook); | ||
367 | kfree(cur); | ||
368 | } | ||
326 | } | 369 | } |
327 | return zbmlist; | 370 | |
371 | return 0; | ||
328 | } | 372 | } |
329 | 373 | ||
330 | /** | 374 | /** |
331 | * memory_bm_create - allocate memory for a memory bitmap | 375 | * memory_bm_create - allocate memory for a memory bitmap |
332 | */ | 376 | */ |
333 | |||
334 | static int | 377 | static int |
335 | memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) | 378 | memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) |
336 | { | 379 | { |
337 | struct chain_allocator ca; | 380 | struct chain_allocator ca; |
338 | struct zone *zone; | 381 | struct list_head mem_extents; |
339 | struct zone_bitmap *zone_bm; | 382 | struct mem_extent *ext; |
340 | struct bm_block *bb; | 383 | int error; |
341 | unsigned int nr; | ||
342 | 384 | ||
343 | chain_init(&ca, gfp_mask, safe_needed); | 385 | chain_init(&ca, gfp_mask, safe_needed); |
386 | INIT_LIST_HEAD(&bm->blocks); | ||
344 | 387 | ||
345 | /* Compute the number of zones */ | 388 | error = create_mem_extents(&mem_extents, gfp_mask); |
346 | nr = 0; | 389 | if (error) |
347 | for_each_zone(zone) | 390 | return error; |
348 | if (populated_zone(zone)) | ||
349 | nr++; | ||
350 | |||
351 | /* Allocate the list of zones bitmap objects */ | ||
352 | zone_bm = create_zone_bm_list(nr, &ca); | ||
353 | bm->zone_bm_list = zone_bm; | ||
354 | if (!zone_bm) { | ||
355 | chain_free(&ca, PG_UNSAFE_CLEAR); | ||
356 | return -ENOMEM; | ||
357 | } | ||
358 | |||
359 | /* Initialize the zone bitmap objects */ | ||
360 | for_each_zone(zone) { | ||
361 | unsigned long pfn; | ||
362 | 391 | ||
363 | if (!populated_zone(zone)) | 392 | list_for_each_entry(ext, &mem_extents, hook) { |
364 | continue; | 393 | struct bm_block *bb; |
394 | unsigned long pfn = ext->start; | ||
395 | unsigned long pages = ext->end - ext->start; | ||
365 | 396 | ||
366 | zone_bm->start_pfn = zone->zone_start_pfn; | 397 | bb = list_entry(bm->blocks.prev, struct bm_block, hook); |
367 | zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages; | ||
368 | /* Allocate the list of bitmap block objects */ | ||
369 | nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); | ||
370 | bb = create_bm_block_list(nr, &ca); | ||
371 | zone_bm->bm_blocks = bb; | ||
372 | zone_bm->cur_block = bb; | ||
373 | if (!bb) | ||
374 | goto Free; | ||
375 | 398 | ||
376 | nr = zone->spanned_pages; | 399 | error = create_bm_block_list(pages, bm->blocks.prev, &ca); |
377 | pfn = zone->zone_start_pfn; | 400 | if (error) |
378 | /* Initialize the bitmap block objects */ | 401 | goto Error; |
379 | while (bb) { | ||
380 | unsigned long *ptr; | ||
381 | 402 | ||
382 | ptr = get_image_page(gfp_mask, safe_needed); | 403 | list_for_each_entry_continue(bb, &bm->blocks, hook) { |
383 | bb->data = ptr; | 404 | bb->data = get_image_page(gfp_mask, safe_needed); |
384 | if (!ptr) | 405 | if (!bb->data) { |
385 | goto Free; | 406 | error = -ENOMEM; |
407 | goto Error; | ||
408 | } | ||
386 | 409 | ||
387 | bb->start_pfn = pfn; | 410 | bb->start_pfn = pfn; |
388 | if (nr >= BM_BITS_PER_BLOCK) { | 411 | if (pages >= BM_BITS_PER_BLOCK) { |
389 | pfn += BM_BITS_PER_BLOCK; | 412 | pfn += BM_BITS_PER_BLOCK; |
390 | nr -= BM_BITS_PER_BLOCK; | 413 | pages -= BM_BITS_PER_BLOCK; |
391 | } else { | 414 | } else { |
392 | /* This is executed only once in the loop */ | 415 | /* This is executed only once in the loop */ |
393 | pfn += nr; | 416 | pfn += pages; |
394 | } | 417 | } |
395 | bb->end_pfn = pfn; | 418 | bb->end_pfn = pfn; |
396 | bb = bb->next; | ||
397 | } | 419 | } |
398 | zone_bm = zone_bm->next; | ||
399 | } | 420 | } |
421 | |||
400 | bm->p_list = ca.chain; | 422 | bm->p_list = ca.chain; |
401 | memory_bm_position_reset(bm); | 423 | memory_bm_position_reset(bm); |
402 | return 0; | 424 | Exit: |
425 | free_mem_extents(&mem_extents); | ||
426 | return error; | ||
403 | 427 | ||
404 | Free: | 428 | Error: |
405 | bm->p_list = ca.chain; | 429 | bm->p_list = ca.chain; |
406 | memory_bm_free(bm, PG_UNSAFE_CLEAR); | 430 | memory_bm_free(bm, PG_UNSAFE_CLEAR); |
407 | return -ENOMEM; | 431 | goto Exit; |
408 | } | 432 | } |
409 | 433 | ||
410 | /** | 434 | /** |
411 | * memory_bm_free - free memory occupied by the memory bitmap @bm | 435 | * memory_bm_free - free memory occupied by the memory bitmap @bm |
412 | */ | 436 | */ |
413 | |||
414 | static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) | 437 | static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) |
415 | { | 438 | { |
416 | struct zone_bitmap *zone_bm; | 439 | struct bm_block *bb; |
417 | 440 | ||
418 | /* Free the list of bit blocks for each zone_bitmap object */ | 441 | list_for_each_entry(bb, &bm->blocks, hook) |
419 | zone_bm = bm->zone_bm_list; | 442 | if (bb->data) |
420 | while (zone_bm) { | 443 | free_image_page(bb->data, clear_nosave_free); |
421 | struct bm_block *bb; | ||
422 | 444 | ||
423 | bb = zone_bm->bm_blocks; | ||
424 | while (bb) { | ||
425 | if (bb->data) | ||
426 | free_image_page(bb->data, clear_nosave_free); | ||
427 | bb = bb->next; | ||
428 | } | ||
429 | zone_bm = zone_bm->next; | ||
430 | } | ||
431 | free_list_of_pages(bm->p_list, clear_nosave_free); | 445 | free_list_of_pages(bm->p_list, clear_nosave_free); |
432 | bm->zone_bm_list = NULL; | 446 | |
447 | INIT_LIST_HEAD(&bm->blocks); | ||
433 | } | 448 | } |
434 | 449 | ||
435 | /** | 450 | /** |
@@ -437,38 +452,33 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) | |||
437 | * to given pfn. The cur_zone_bm member of @bm and the cur_block member | 452 | * to given pfn. The cur_zone_bm member of @bm and the cur_block member |
438 | * of @bm->cur_zone_bm are updated. | 453 | * of @bm->cur_zone_bm are updated. |
439 | */ | 454 | */ |
440 | |||
441 | static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, | 455 | static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, |
442 | void **addr, unsigned int *bit_nr) | 456 | void **addr, unsigned int *bit_nr) |
443 | { | 457 | { |
444 | struct zone_bitmap *zone_bm; | ||
445 | struct bm_block *bb; | 458 | struct bm_block *bb; |
446 | 459 | ||
447 | /* Check if the pfn is from the current zone */ | 460 | /* |
448 | zone_bm = bm->cur.zone_bm; | 461 | * Check if the pfn corresponds to the current bitmap block and find |
449 | if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { | 462 | * the block where it fits if this is not the case. |
450 | zone_bm = bm->zone_bm_list; | 463 | */ |
451 | /* We don't assume that the zones are sorted by pfns */ | 464 | bb = bm->cur.block; |
452 | while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { | ||
453 | zone_bm = zone_bm->next; | ||
454 | |||
455 | if (!zone_bm) | ||
456 | return -EFAULT; | ||
457 | } | ||
458 | bm->cur.zone_bm = zone_bm; | ||
459 | } | ||
460 | /* Check if the pfn corresponds to the current bitmap block */ | ||
461 | bb = zone_bm->cur_block; | ||
462 | if (pfn < bb->start_pfn) | 465 | if (pfn < bb->start_pfn) |
463 | bb = zone_bm->bm_blocks; | 466 | list_for_each_entry_continue_reverse(bb, &bm->blocks, hook) |
467 | if (pfn >= bb->start_pfn) | ||
468 | break; | ||
464 | 469 | ||
465 | while (pfn >= bb->end_pfn) { | 470 | if (pfn >= bb->end_pfn) |
466 | bb = bb->next; | 471 | list_for_each_entry_continue(bb, &bm->blocks, hook) |
472 | if (pfn >= bb->start_pfn && pfn < bb->end_pfn) | ||
473 | break; | ||
467 | 474 | ||
468 | BUG_ON(!bb); | 475 | if (&bb->hook == &bm->blocks) |
469 | } | 476 | return -EFAULT; |
470 | zone_bm->cur_block = bb; | 477 | |
478 | /* The block has been found */ | ||
479 | bm->cur.block = bb; | ||
471 | pfn -= bb->start_pfn; | 480 | pfn -= bb->start_pfn; |
481 | bm->cur.bit = pfn + 1; | ||
472 | *bit_nr = pfn; | 482 | *bit_nr = pfn; |
473 | *addr = bb->data; | 483 | *addr = bb->data; |
474 | return 0; | 484 | return 0; |
@@ -519,6 +529,14 @@ static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) | |||
519 | return test_bit(bit, addr); | 529 | return test_bit(bit, addr); |
520 | } | 530 | } |
521 | 531 | ||
532 | static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn) | ||
533 | { | ||
534 | void *addr; | ||
535 | unsigned int bit; | ||
536 | |||
537 | return !memory_bm_find_bit(bm, pfn, &addr, &bit); | ||
538 | } | ||
539 | |||
522 | /** | 540 | /** |
523 | * memory_bm_next_pfn - find the pfn that corresponds to the next set bit | 541 | * memory_bm_next_pfn - find the pfn that corresponds to the next set bit |
524 | * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is | 542 | * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is |
@@ -530,29 +548,21 @@ static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) | |||
530 | 548 | ||
531 | static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) | 549 | static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) |
532 | { | 550 | { |
533 | struct zone_bitmap *zone_bm; | ||
534 | struct bm_block *bb; | 551 | struct bm_block *bb; |
535 | int bit; | 552 | int bit; |
536 | 553 | ||
554 | bb = bm->cur.block; | ||
537 | do { | 555 | do { |
538 | bb = bm->cur.block; | 556 | bit = bm->cur.bit; |
539 | do { | 557 | bit = find_next_bit(bb->data, bm_block_bits(bb), bit); |
540 | bit = bm->cur.bit; | 558 | if (bit < bm_block_bits(bb)) |
541 | bit = find_next_bit(bb->data, bm_block_bits(bb), bit); | 559 | goto Return_pfn; |
542 | if (bit < bm_block_bits(bb)) | 560 | |
543 | goto Return_pfn; | 561 | bb = list_entry(bb->hook.next, struct bm_block, hook); |
544 | 562 | bm->cur.block = bb; | |
545 | bb = bb->next; | 563 | bm->cur.bit = 0; |
546 | bm->cur.block = bb; | 564 | } while (&bb->hook != &bm->blocks); |
547 | bm->cur.bit = 0; | 565 | |
548 | } while (bb); | ||
549 | zone_bm = bm->cur.zone_bm->next; | ||
550 | if (zone_bm) { | ||
551 | bm->cur.zone_bm = zone_bm; | ||
552 | bm->cur.block = zone_bm->bm_blocks; | ||
553 | bm->cur.bit = 0; | ||
554 | } | ||
555 | } while (zone_bm); | ||
556 | memory_bm_position_reset(bm); | 566 | memory_bm_position_reset(bm); |
557 | return BM_END_OF_MAP; | 567 | return BM_END_OF_MAP; |
558 | 568 | ||
@@ -808,8 +818,7 @@ static unsigned int count_free_highmem_pages(void) | |||
808 | * We should save the page if it isn't Nosave or NosaveFree, or Reserved, | 818 | * We should save the page if it isn't Nosave or NosaveFree, or Reserved, |
809 | * and it isn't a part of a free chunk of pages. | 819 | * and it isn't a part of a free chunk of pages. |
810 | */ | 820 | */ |
811 | 821 | static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn) | |
812 | static struct page *saveable_highmem_page(unsigned long pfn) | ||
813 | { | 822 | { |
814 | struct page *page; | 823 | struct page *page; |
815 | 824 | ||
@@ -817,6 +826,8 @@ static struct page *saveable_highmem_page(unsigned long pfn) | |||
817 | return NULL; | 826 | return NULL; |
818 | 827 | ||
819 | page = pfn_to_page(pfn); | 828 | page = pfn_to_page(pfn); |
829 | if (page_zone(page) != zone) | ||
830 | return NULL; | ||
820 | 831 | ||
821 | BUG_ON(!PageHighMem(page)); | 832 | BUG_ON(!PageHighMem(page)); |
822 | 833 | ||
@@ -846,13 +857,16 @@ unsigned int count_highmem_pages(void) | |||
846 | mark_free_pages(zone); | 857 | mark_free_pages(zone); |
847 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | 858 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; |
848 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) | 859 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
849 | if (saveable_highmem_page(pfn)) | 860 | if (saveable_highmem_page(zone, pfn)) |
850 | n++; | 861 | n++; |
851 | } | 862 | } |
852 | return n; | 863 | return n; |
853 | } | 864 | } |
854 | #else | 865 | #else |
855 | static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; } | 866 | static inline void *saveable_highmem_page(struct zone *z, unsigned long p) |
867 | { | ||
868 | return NULL; | ||
869 | } | ||
856 | #endif /* CONFIG_HIGHMEM */ | 870 | #endif /* CONFIG_HIGHMEM */ |
857 | 871 | ||
858 | /** | 872 | /** |
@@ -863,8 +877,7 @@ static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; } | |||
863 | * of pages statically defined as 'unsaveable', and it isn't a part of | 877 | * of pages statically defined as 'unsaveable', and it isn't a part of |
864 | * a free chunk of pages. | 878 | * a free chunk of pages. |
865 | */ | 879 | */ |
866 | 880 | static struct page *saveable_page(struct zone *zone, unsigned long pfn) | |
867 | static struct page *saveable_page(unsigned long pfn) | ||
868 | { | 881 | { |
869 | struct page *page; | 882 | struct page *page; |
870 | 883 | ||
@@ -872,6 +885,8 @@ static struct page *saveable_page(unsigned long pfn) | |||
872 | return NULL; | 885 | return NULL; |
873 | 886 | ||
874 | page = pfn_to_page(pfn); | 887 | page = pfn_to_page(pfn); |
888 | if (page_zone(page) != zone) | ||
889 | return NULL; | ||
875 | 890 | ||
876 | BUG_ON(PageHighMem(page)); | 891 | BUG_ON(PageHighMem(page)); |
877 | 892 | ||
@@ -903,7 +918,7 @@ unsigned int count_data_pages(void) | |||
903 | mark_free_pages(zone); | 918 | mark_free_pages(zone); |
904 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | 919 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; |
905 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) | 920 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
906 | if(saveable_page(pfn)) | 921 | if (saveable_page(zone, pfn)) |
907 | n++; | 922 | n++; |
908 | } | 923 | } |
909 | return n; | 924 | return n; |
@@ -944,7 +959,7 @@ static inline struct page * | |||
944 | page_is_saveable(struct zone *zone, unsigned long pfn) | 959 | page_is_saveable(struct zone *zone, unsigned long pfn) |
945 | { | 960 | { |
946 | return is_highmem(zone) ? | 961 | return is_highmem(zone) ? |
947 | saveable_highmem_page(pfn) : saveable_page(pfn); | 962 | saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn); |
948 | } | 963 | } |
949 | 964 | ||
950 | static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) | 965 | static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) |
@@ -966,7 +981,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) | |||
966 | * data modified by kmap_atomic() | 981 | * data modified by kmap_atomic() |
967 | */ | 982 | */ |
968 | safe_copy_page(buffer, s_page); | 983 | safe_copy_page(buffer, s_page); |
969 | dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0); | 984 | dst = kmap_atomic(d_page, KM_USER0); |
970 | memcpy(dst, buffer, PAGE_SIZE); | 985 | memcpy(dst, buffer, PAGE_SIZE); |
971 | kunmap_atomic(dst, KM_USER0); | 986 | kunmap_atomic(dst, KM_USER0); |
972 | } else { | 987 | } else { |
@@ -975,7 +990,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) | |||
975 | } | 990 | } |
976 | } | 991 | } |
977 | #else | 992 | #else |
978 | #define page_is_saveable(zone, pfn) saveable_page(pfn) | 993 | #define page_is_saveable(zone, pfn) saveable_page(zone, pfn) |
979 | 994 | ||
980 | static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) | 995 | static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) |
981 | { | 996 | { |
@@ -1459,9 +1474,7 @@ load_header(struct swsusp_info *info) | |||
1459 | * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set | 1474 | * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set |
1460 | * the corresponding bit in the memory bitmap @bm | 1475 | * the corresponding bit in the memory bitmap @bm |
1461 | */ | 1476 | */ |
1462 | 1477 | static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) | |
1463 | static inline void | ||
1464 | unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) | ||
1465 | { | 1478 | { |
1466 | int j; | 1479 | int j; |
1467 | 1480 | ||
@@ -1469,8 +1482,13 @@ unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) | |||
1469 | if (unlikely(buf[j] == BM_END_OF_MAP)) | 1482 | if (unlikely(buf[j] == BM_END_OF_MAP)) |
1470 | break; | 1483 | break; |
1471 | 1484 | ||
1472 | memory_bm_set_bit(bm, buf[j]); | 1485 | if (memory_bm_pfn_present(bm, buf[j])) |
1486 | memory_bm_set_bit(bm, buf[j]); | ||
1487 | else | ||
1488 | return -EFAULT; | ||
1473 | } | 1489 | } |
1490 | |||
1491 | return 0; | ||
1474 | } | 1492 | } |
1475 | 1493 | ||
1476 | /* List of "safe" pages that may be used to store data loaded from the suspend | 1494 | /* List of "safe" pages that may be used to store data loaded from the suspend |
@@ -1608,7 +1626,7 @@ get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) | |||
1608 | pbe = chain_alloc(ca, sizeof(struct highmem_pbe)); | 1626 | pbe = chain_alloc(ca, sizeof(struct highmem_pbe)); |
1609 | if (!pbe) { | 1627 | if (!pbe) { |
1610 | swsusp_free(); | 1628 | swsusp_free(); |
1611 | return NULL; | 1629 | return ERR_PTR(-ENOMEM); |
1612 | } | 1630 | } |
1613 | pbe->orig_page = page; | 1631 | pbe->orig_page = page; |
1614 | if (safe_highmem_pages > 0) { | 1632 | if (safe_highmem_pages > 0) { |
@@ -1677,7 +1695,7 @@ prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p) | |||
1677 | static inline void * | 1695 | static inline void * |
1678 | get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) | 1696 | get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) |
1679 | { | 1697 | { |
1680 | return NULL; | 1698 | return ERR_PTR(-EINVAL); |
1681 | } | 1699 | } |
1682 | 1700 | ||
1683 | static inline void copy_last_highmem_page(void) {} | 1701 | static inline void copy_last_highmem_page(void) {} |
@@ -1788,8 +1806,13 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) | |||
1788 | static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) | 1806 | static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) |
1789 | { | 1807 | { |
1790 | struct pbe *pbe; | 1808 | struct pbe *pbe; |
1791 | struct page *page = pfn_to_page(memory_bm_next_pfn(bm)); | 1809 | struct page *page; |
1810 | unsigned long pfn = memory_bm_next_pfn(bm); | ||
1792 | 1811 | ||
1812 | if (pfn == BM_END_OF_MAP) | ||
1813 | return ERR_PTR(-EFAULT); | ||
1814 | |||
1815 | page = pfn_to_page(pfn); | ||
1793 | if (PageHighMem(page)) | 1816 | if (PageHighMem(page)) |
1794 | return get_highmem_page_buffer(page, ca); | 1817 | return get_highmem_page_buffer(page, ca); |
1795 | 1818 | ||
@@ -1805,7 +1828,7 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) | |||
1805 | pbe = chain_alloc(ca, sizeof(struct pbe)); | 1828 | pbe = chain_alloc(ca, sizeof(struct pbe)); |
1806 | if (!pbe) { | 1829 | if (!pbe) { |
1807 | swsusp_free(); | 1830 | swsusp_free(); |
1808 | return NULL; | 1831 | return ERR_PTR(-ENOMEM); |
1809 | } | 1832 | } |
1810 | pbe->orig_address = page_address(page); | 1833 | pbe->orig_address = page_address(page); |
1811 | pbe->address = safe_pages_list; | 1834 | pbe->address = safe_pages_list; |
@@ -1868,7 +1891,10 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) | |||
1868 | return error; | 1891 | return error; |
1869 | 1892 | ||
1870 | } else if (handle->prev <= nr_meta_pages) { | 1893 | } else if (handle->prev <= nr_meta_pages) { |
1871 | unpack_orig_pfns(buffer, ©_bm); | 1894 | error = unpack_orig_pfns(buffer, ©_bm); |
1895 | if (error) | ||
1896 | return error; | ||
1897 | |||
1872 | if (handle->prev == nr_meta_pages) { | 1898 | if (handle->prev == nr_meta_pages) { |
1873 | error = prepare_image(&orig_bm, ©_bm); | 1899 | error = prepare_image(&orig_bm, ©_bm); |
1874 | if (error) | 1900 | if (error) |
@@ -1879,12 +1905,14 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) | |||
1879 | restore_pblist = NULL; | 1905 | restore_pblist = NULL; |
1880 | handle->buffer = get_buffer(&orig_bm, &ca); | 1906 | handle->buffer = get_buffer(&orig_bm, &ca); |
1881 | handle->sync_read = 0; | 1907 | handle->sync_read = 0; |
1882 | if (!handle->buffer) | 1908 | if (IS_ERR(handle->buffer)) |
1883 | return -ENOMEM; | 1909 | return PTR_ERR(handle->buffer); |
1884 | } | 1910 | } |
1885 | } else { | 1911 | } else { |
1886 | copy_last_highmem_page(); | 1912 | copy_last_highmem_page(); |
1887 | handle->buffer = get_buffer(&orig_bm, &ca); | 1913 | handle->buffer = get_buffer(&orig_bm, &ca); |
1914 | if (IS_ERR(handle->buffer)) | ||
1915 | return PTR_ERR(handle->buffer); | ||
1888 | if (handle->buffer != buffer) | 1916 | if (handle->buffer != buffer) |
1889 | handle->sync_read = 0; | 1917 | handle->sync_read = 0; |
1890 | } | 1918 | } |
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 023ff2a31d89..a92c91451559 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c | |||
@@ -262,3 +262,125 @@ int swsusp_shrink_memory(void) | |||
262 | 262 | ||
263 | return 0; | 263 | return 0; |
264 | } | 264 | } |
265 | |||
266 | /* | ||
267 | * Platforms, like ACPI, may want us to save some memory used by them during | ||
268 | * hibernation and to restore the contents of this memory during the subsequent | ||
269 | * resume. The code below implements a mechanism allowing us to do that. | ||
270 | */ | ||
271 | |||
272 | struct nvs_page { | ||
273 | unsigned long phys_start; | ||
274 | unsigned int size; | ||
275 | void *kaddr; | ||
276 | void *data; | ||
277 | struct list_head node; | ||
278 | }; | ||
279 | |||
280 | static LIST_HEAD(nvs_list); | ||
281 | |||
282 | /** | ||
283 | * hibernate_nvs_register - register platform NVS memory region to save | ||
284 | * @start - physical address of the region | ||
285 | * @size - size of the region | ||
286 | * | ||
287 | * The NVS region need not be page-aligned (both ends) and we arrange | ||
288 | * things so that the data from page-aligned addresses in this region will | ||
289 | * be copied into separate RAM pages. | ||
290 | */ | ||
291 | int hibernate_nvs_register(unsigned long start, unsigned long size) | ||
292 | { | ||
293 | struct nvs_page *entry, *next; | ||
294 | |||
295 | while (size > 0) { | ||
296 | unsigned int nr_bytes; | ||
297 | |||
298 | entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL); | ||
299 | if (!entry) | ||
300 | goto Error; | ||
301 | |||
302 | list_add_tail(&entry->node, &nvs_list); | ||
303 | entry->phys_start = start; | ||
304 | nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK); | ||
305 | entry->size = (size < nr_bytes) ? size : nr_bytes; | ||
306 | |||
307 | start += entry->size; | ||
308 | size -= entry->size; | ||
309 | } | ||
310 | return 0; | ||
311 | |||
312 | Error: | ||
313 | list_for_each_entry_safe(entry, next, &nvs_list, node) { | ||
314 | list_del(&entry->node); | ||
315 | kfree(entry); | ||
316 | } | ||
317 | return -ENOMEM; | ||
318 | } | ||
319 | |||
320 | /** | ||
321 | * hibernate_nvs_free - free data pages allocated for saving NVS regions | ||
322 | */ | ||
323 | void hibernate_nvs_free(void) | ||
324 | { | ||
325 | struct nvs_page *entry; | ||
326 | |||
327 | list_for_each_entry(entry, &nvs_list, node) | ||
328 | if (entry->data) { | ||
329 | free_page((unsigned long)entry->data); | ||
330 | entry->data = NULL; | ||
331 | if (entry->kaddr) { | ||
332 | iounmap(entry->kaddr); | ||
333 | entry->kaddr = NULL; | ||
334 | } | ||
335 | } | ||
336 | } | ||
337 | |||
338 | /** | ||
339 | * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions | ||
340 | */ | ||
341 | int hibernate_nvs_alloc(void) | ||
342 | { | ||
343 | struct nvs_page *entry; | ||
344 | |||
345 | list_for_each_entry(entry, &nvs_list, node) { | ||
346 | entry->data = (void *)__get_free_page(GFP_KERNEL); | ||
347 | if (!entry->data) { | ||
348 | hibernate_nvs_free(); | ||
349 | return -ENOMEM; | ||
350 | } | ||
351 | } | ||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | /** | ||
356 | * hibernate_nvs_save - save NVS memory regions | ||
357 | */ | ||
358 | void hibernate_nvs_save(void) | ||
359 | { | ||
360 | struct nvs_page *entry; | ||
361 | |||
362 | printk(KERN_INFO "PM: Saving platform NVS memory\n"); | ||
363 | |||
364 | list_for_each_entry(entry, &nvs_list, node) | ||
365 | if (entry->data) { | ||
366 | entry->kaddr = ioremap(entry->phys_start, entry->size); | ||
367 | memcpy(entry->data, entry->kaddr, entry->size); | ||
368 | } | ||
369 | } | ||
370 | |||
371 | /** | ||
372 | * hibernate_nvs_restore - restore NVS memory regions | ||
373 | * | ||
374 | * This function is going to be called with interrupts disabled, so it | ||
375 | * cannot iounmap the virtual addresses used to access the NVS region. | ||
376 | */ | ||
377 | void hibernate_nvs_restore(void) | ||
378 | { | ||
379 | struct nvs_page *entry; | ||
380 | |||
381 | printk(KERN_INFO "PM: Restoring platform NVS memory\n"); | ||
382 | |||
383 | list_for_each_entry(entry, &nvs_list, node) | ||
384 | if (entry->data) | ||
385 | memcpy(entry->kaddr, entry->data, entry->size); | ||
386 | } | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a9d9760dc7b6..8b0daf0662ef 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -168,7 +168,13 @@ rb_event_length(struct ring_buffer_event *event) | |||
168 | */ | 168 | */ |
169 | unsigned ring_buffer_event_length(struct ring_buffer_event *event) | 169 | unsigned ring_buffer_event_length(struct ring_buffer_event *event) |
170 | { | 170 | { |
171 | return rb_event_length(event); | 171 | unsigned length = rb_event_length(event); |
172 | if (event->type != RINGBUF_TYPE_DATA) | ||
173 | return length; | ||
174 | length -= RB_EVNT_HDR_SIZE; | ||
175 | if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) | ||
176 | length -= sizeof(event->array[0]); | ||
177 | return length; | ||
172 | } | 178 | } |
173 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); | 179 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); |
174 | 180 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index bab8bcedd62e..5f736f1ceeae 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -170,25 +170,6 @@ static DEFINE_SPINLOCK(ptype_lock); | |||
170 | static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; | 170 | static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; |
171 | static struct list_head ptype_all __read_mostly; /* Taps */ | 171 | static struct list_head ptype_all __read_mostly; /* Taps */ |
172 | 172 | ||
173 | #ifdef CONFIG_NET_DMA | ||
174 | struct net_dma { | ||
175 | struct dma_client client; | ||
176 | spinlock_t lock; | ||
177 | cpumask_t channel_mask; | ||
178 | struct dma_chan **channels; | ||
179 | }; | ||
180 | |||
181 | static enum dma_state_client | ||
182 | netdev_dma_event(struct dma_client *client, struct dma_chan *chan, | ||
183 | enum dma_state state); | ||
184 | |||
185 | static struct net_dma net_dma = { | ||
186 | .client = { | ||
187 | .event_callback = netdev_dma_event, | ||
188 | }, | ||
189 | }; | ||
190 | #endif | ||
191 | |||
192 | /* | 173 | /* |
193 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl | 174 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl |
194 | * semaphore. | 175 | * semaphore. |
@@ -2754,14 +2735,7 @@ out: | |||
2754 | * There may not be any more sk_buffs coming right now, so push | 2735 | * There may not be any more sk_buffs coming right now, so push |
2755 | * any pending DMA copies to hardware | 2736 | * any pending DMA copies to hardware |
2756 | */ | 2737 | */ |
2757 | if (!cpus_empty(net_dma.channel_mask)) { | 2738 | dma_issue_pending_all(); |
2758 | int chan_idx; | ||
2759 | for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) { | ||
2760 | struct dma_chan *chan = net_dma.channels[chan_idx]; | ||
2761 | if (chan) | ||
2762 | dma_async_memcpy_issue_pending(chan); | ||
2763 | } | ||
2764 | } | ||
2765 | #endif | 2739 | #endif |
2766 | 2740 | ||
2767 | return; | 2741 | return; |
@@ -4952,122 +4926,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
4952 | return NOTIFY_OK; | 4926 | return NOTIFY_OK; |
4953 | } | 4927 | } |
4954 | 4928 | ||
4955 | #ifdef CONFIG_NET_DMA | ||
4956 | /** | ||
4957 | * net_dma_rebalance - try to maintain one DMA channel per CPU | ||
4958 | * @net_dma: DMA client and associated data (lock, channels, channel_mask) | ||
4959 | * | ||
4960 | * This is called when the number of channels allocated to the net_dma client | ||
4961 | * changes. The net_dma client tries to have one DMA channel per CPU. | ||
4962 | */ | ||
4963 | |||
4964 | static void net_dma_rebalance(struct net_dma *net_dma) | ||
4965 | { | ||
4966 | unsigned int cpu, i, n, chan_idx; | ||
4967 | struct dma_chan *chan; | ||
4968 | |||
4969 | if (cpus_empty(net_dma->channel_mask)) { | ||
4970 | for_each_online_cpu(cpu) | ||
4971 | rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); | ||
4972 | return; | ||
4973 | } | ||
4974 | |||
4975 | i = 0; | ||
4976 | cpu = first_cpu(cpu_online_map); | ||
4977 | |||
4978 | for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) { | ||
4979 | chan = net_dma->channels[chan_idx]; | ||
4980 | |||
4981 | n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) | ||
4982 | + (i < (num_online_cpus() % | ||
4983 | cpus_weight(net_dma->channel_mask)) ? 1 : 0)); | ||
4984 | |||
4985 | while(n) { | ||
4986 | per_cpu(softnet_data, cpu).net_dma = chan; | ||
4987 | cpu = next_cpu(cpu, cpu_online_map); | ||
4988 | n--; | ||
4989 | } | ||
4990 | i++; | ||
4991 | } | ||
4992 | } | ||
4993 | |||
4994 | /** | ||
4995 | * netdev_dma_event - event callback for the net_dma_client | ||
4996 | * @client: should always be net_dma_client | ||
4997 | * @chan: DMA channel for the event | ||
4998 | * @state: DMA state to be handled | ||
4999 | */ | ||
5000 | static enum dma_state_client | ||
5001 | netdev_dma_event(struct dma_client *client, struct dma_chan *chan, | ||
5002 | enum dma_state state) | ||
5003 | { | ||
5004 | int i, found = 0, pos = -1; | ||
5005 | struct net_dma *net_dma = | ||
5006 | container_of(client, struct net_dma, client); | ||
5007 | enum dma_state_client ack = DMA_DUP; /* default: take no action */ | ||
5008 | |||
5009 | spin_lock(&net_dma->lock); | ||
5010 | switch (state) { | ||
5011 | case DMA_RESOURCE_AVAILABLE: | ||
5012 | for (i = 0; i < nr_cpu_ids; i++) | ||
5013 | if (net_dma->channels[i] == chan) { | ||
5014 | found = 1; | ||
5015 | break; | ||
5016 | } else if (net_dma->channels[i] == NULL && pos < 0) | ||
5017 | pos = i; | ||
5018 | |||
5019 | if (!found && pos >= 0) { | ||
5020 | ack = DMA_ACK; | ||
5021 | net_dma->channels[pos] = chan; | ||
5022 | cpu_set(pos, net_dma->channel_mask); | ||
5023 | net_dma_rebalance(net_dma); | ||
5024 | } | ||
5025 | break; | ||
5026 | case DMA_RESOURCE_REMOVED: | ||
5027 | for (i = 0; i < nr_cpu_ids; i++) | ||
5028 | if (net_dma->channels[i] == chan) { | ||
5029 | found = 1; | ||
5030 | pos = i; | ||
5031 | break; | ||
5032 | } | ||
5033 | |||
5034 | if (found) { | ||
5035 | ack = DMA_ACK; | ||
5036 | cpu_clear(pos, net_dma->channel_mask); | ||
5037 | net_dma->channels[i] = NULL; | ||
5038 | net_dma_rebalance(net_dma); | ||
5039 | } | ||
5040 | break; | ||
5041 | default: | ||
5042 | break; | ||
5043 | } | ||
5044 | spin_unlock(&net_dma->lock); | ||
5045 | |||
5046 | return ack; | ||
5047 | } | ||
5048 | |||
5049 | /** | ||
5050 | * netdev_dma_register - register the networking subsystem as a DMA client | ||
5051 | */ | ||
5052 | static int __init netdev_dma_register(void) | ||
5053 | { | ||
5054 | net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma), | ||
5055 | GFP_KERNEL); | ||
5056 | if (unlikely(!net_dma.channels)) { | ||
5057 | printk(KERN_NOTICE | ||
5058 | "netdev_dma: no memory for net_dma.channels\n"); | ||
5059 | return -ENOMEM; | ||
5060 | } | ||
5061 | spin_lock_init(&net_dma.lock); | ||
5062 | dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask); | ||
5063 | dma_async_client_register(&net_dma.client); | ||
5064 | dma_async_client_chan_request(&net_dma.client); | ||
5065 | return 0; | ||
5066 | } | ||
5067 | |||
5068 | #else | ||
5069 | static int __init netdev_dma_register(void) { return -ENODEV; } | ||
5070 | #endif /* CONFIG_NET_DMA */ | ||
5071 | 4929 | ||
5072 | /** | 4930 | /** |
5073 | * netdev_increment_features - increment feature set by one | 4931 | * netdev_increment_features - increment feature set by one |
@@ -5287,14 +5145,15 @@ static int __init net_dev_init(void) | |||
5287 | if (register_pernet_device(&default_device_ops)) | 5145 | if (register_pernet_device(&default_device_ops)) |
5288 | goto out; | 5146 | goto out; |
5289 | 5147 | ||
5290 | netdev_dma_register(); | ||
5291 | |||
5292 | open_softirq(NET_TX_SOFTIRQ, net_tx_action); | 5148 | open_softirq(NET_TX_SOFTIRQ, net_tx_action); |
5293 | open_softirq(NET_RX_SOFTIRQ, net_rx_action); | 5149 | open_softirq(NET_RX_SOFTIRQ, net_rx_action); |
5294 | 5150 | ||
5295 | hotcpu_notifier(dev_cpu_callback, 0); | 5151 | hotcpu_notifier(dev_cpu_callback, 0); |
5296 | dst_init(); | 5152 | dst_init(); |
5297 | dev_mcast_init(); | 5153 | dev_mcast_init(); |
5154 | #ifdef CONFIG_NET_DMA | ||
5155 | dmaengine_get(); | ||
5156 | #endif | ||
5298 | rc = 0; | 5157 | rc = 0; |
5299 | out: | 5158 | out: |
5300 | return rc; | 5159 | return rc; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index bd6ff907d9e4..ce572f9dff02 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1313,7 +1313,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1313 | if ((available < target) && | 1313 | if ((available < target) && |
1314 | (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && | 1314 | (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && |
1315 | !sysctl_tcp_low_latency && | 1315 | !sysctl_tcp_low_latency && |
1316 | __get_cpu_var(softnet_data).net_dma) { | 1316 | dma_find_channel(DMA_MEMCPY)) { |
1317 | preempt_enable_no_resched(); | 1317 | preempt_enable_no_resched(); |
1318 | tp->ucopy.pinned_list = | 1318 | tp->ucopy.pinned_list = |
1319 | dma_pin_iovec_pages(msg->msg_iov, len); | 1319 | dma_pin_iovec_pages(msg->msg_iov, len); |
@@ -1523,7 +1523,7 @@ do_prequeue: | |||
1523 | if (!(flags & MSG_TRUNC)) { | 1523 | if (!(flags & MSG_TRUNC)) { |
1524 | #ifdef CONFIG_NET_DMA | 1524 | #ifdef CONFIG_NET_DMA |
1525 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 1525 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
1526 | tp->ucopy.dma_chan = get_softnet_dma(); | 1526 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
1527 | 1527 | ||
1528 | if (tp->ucopy.dma_chan) { | 1528 | if (tp->ucopy.dma_chan) { |
1529 | tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( | 1529 | tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( |
@@ -1628,7 +1628,6 @@ skip_copy: | |||
1628 | 1628 | ||
1629 | /* Safe to free early-copied skbs now */ | 1629 | /* Safe to free early-copied skbs now */ |
1630 | __skb_queue_purge(&sk->sk_async_wait_queue); | 1630 | __skb_queue_purge(&sk->sk_async_wait_queue); |
1631 | dma_chan_put(tp->ucopy.dma_chan); | ||
1632 | tp->ucopy.dma_chan = NULL; | 1631 | tp->ucopy.dma_chan = NULL; |
1633 | } | 1632 | } |
1634 | if (tp->ucopy.pinned_list) { | 1633 | if (tp->ucopy.pinned_list) { |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 99b7ecbe8893..a6961d75c7ea 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, | |||
5005 | return 0; | 5005 | return 0; |
5006 | 5006 | ||
5007 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 5007 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
5008 | tp->ucopy.dma_chan = get_softnet_dma(); | 5008 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
5009 | 5009 | ||
5010 | if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { | 5010 | if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { |
5011 | 5011 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9d839fa9331e..19d7b429a262 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1594,7 +1594,7 @@ process: | |||
1594 | #ifdef CONFIG_NET_DMA | 1594 | #ifdef CONFIG_NET_DMA |
1595 | struct tcp_sock *tp = tcp_sk(sk); | 1595 | struct tcp_sock *tp = tcp_sk(sk); |
1596 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 1596 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
1597 | tp->ucopy.dma_chan = get_softnet_dma(); | 1597 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
1598 | if (tp->ucopy.dma_chan) | 1598 | if (tp->ucopy.dma_chan) |
1599 | ret = tcp_v4_do_rcv(sk, skb); | 1599 | ret = tcp_v4_do_rcv(sk, skb); |
1600 | else | 1600 | else |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 1297306d729c..e5b85d45bee8 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1675,7 +1675,7 @@ process: | |||
1675 | #ifdef CONFIG_NET_DMA | 1675 | #ifdef CONFIG_NET_DMA |
1676 | struct tcp_sock *tp = tcp_sk(sk); | 1676 | struct tcp_sock *tp = tcp_sk(sk); |
1677 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 1677 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
1678 | tp->ucopy.dma_chan = get_softnet_dma(); | 1678 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
1679 | if (tp->ucopy.dma_chan) | 1679 | if (tp->ucopy.dma_chan) |
1680 | ret = tcp_v6_do_rcv(sk, skb); | 1680 | ret = tcp_v6_do_rcv(sk, skb); |
1681 | else | 1681 | else |