aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/acpi/enumeration.txt2
-rw-r--r--Documentation/cpu-freq/intel-pstate.txt8
-rw-r--r--Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt110
-rw-r--r--Documentation/devicetree/bindings/pci/versatile.txt59
-rw-r--r--Documentation/filesystems/xfs.txt22
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Documentation/power/s2ram.txt4
-rw-r--r--MAINTAINERS12
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/boot/dts/versatile-pb.dts37
-rw-r--r--arch/arm/include/asm/mach/pci.h6
-rw-r--r--arch/arm/include/asm/pci.h7
-rw-r--r--arch/arm/include/asm/xen/page.h2
-rw-r--r--arch/arm/kernel/bios32.c8
-rw-r--r--arch/arm/mach-cns3xxx/pcie.c92
-rw-r--r--arch/arm/mach-integrator/pci_v3.c62
-rw-r--r--arch/arm/mach-ks8695/pci.c77
-rw-r--r--arch/arm/mach-sa1100/pci-nanoengine.c94
-rw-r--r--arch/arm/xen/enlighten.c4
-rw-r--r--arch/arm/xen/mm.c2
-rw-r--r--arch/arm/xen/p2m.c2
-rw-r--r--arch/arm64/kernel/pci.c22
-rw-r--r--arch/frv/mb93090-mb00/pci-vdk.c4
-rw-r--r--arch/ia64/kernel/acpi-ext.c6
-rw-r--r--arch/ia64/kernel/acpi.c6
-rw-r--r--arch/ia64/pci/pci.c14
-rw-r--r--arch/m68k/atari/atakeyb.c72
-rw-r--r--arch/m68k/atari/stdma.c2
-rw-r--r--arch/m68k/atari/time.c3
-rw-r--r--arch/m68k/configs/amiga_defconfig73
-rw-r--r--arch/m68k/configs/apollo_defconfig73
-rw-r--r--arch/m68k/configs/atari_defconfig78
-rw-r--r--arch/m68k/configs/bvme6000_defconfig73
-rw-r--r--arch/m68k/configs/hp300_defconfig73
-rw-r--r--arch/m68k/configs/mac_defconfig72
-rw-r--r--arch/m68k/configs/multi_defconfig78
-rw-r--r--arch/m68k/configs/mvme147_defconfig73
-rw-r--r--arch/m68k/configs/mvme16x_defconfig72
-rw-r--r--arch/m68k/configs/q40_defconfig73
-rw-r--r--arch/m68k/configs/sun3_defconfig72
-rw-r--r--arch/m68k/configs/sun3x_defconfig73
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/atariints.h5
-rw-r--r--arch/m68k/include/asm/futex.h94
-rw-r--r--arch/m68k/include/asm/macintosh.h2
-rw-r--r--arch/m68k/mac/config.c32
-rw-r--r--arch/m68k/mvme147/config.c46
-rw-r--r--arch/m68k/mvme16x/rtc.c2
-rw-r--r--arch/microblaze/boot/Makefile3
-rw-r--r--arch/microblaze/boot/dts/Makefile2
-rw-r--r--arch/microblaze/include/asm/delay.h4
-rw-r--r--arch/microblaze/include/asm/kgdb.h3
-rw-r--r--arch/microblaze/include/asm/linkage.h16
-rw-r--r--arch/microblaze/include/asm/pgalloc.h14
-rw-r--r--arch/microblaze/include/asm/syscall.h2
-rw-r--r--arch/microblaze/include/asm/uaccess.h6
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/microblaze/include/uapi/asm/unistd.h1
-rw-r--r--arch/microblaze/kernel/Makefile2
-rw-r--r--arch/microblaze/kernel/cpu/cache.c6
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c2
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-static.c2
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c7
-rw-r--r--arch/microblaze/kernel/intc.c8
-rw-r--r--arch/microblaze/kernel/kgdb.c10
-rw-r--r--arch/microblaze/kernel/prom_parse.c35
-rw-r--r--arch/microblaze/kernel/ptrace.c4
-rw-r--r--arch/microblaze/kernel/reset.c1
-rw-r--r--arch/microblaze/kernel/signal.c4
-rw-r--r--arch/microblaze/kernel/syscall_table.S1
-rw-r--r--arch/microblaze/kernel/unwind.c2
-rw-r--r--arch/mips/pci/pci-bcm1480.c4
-rw-r--r--arch/mips/pci/pci-octeon.c4
-rw-r--r--arch/mips/pci/pcie-octeon.c12
-rw-r--r--arch/mn10300/unit-asb2305/pci.c4
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c4
-rw-r--r--arch/powerpc/platforms/powermac/pci.c209
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c46
-rw-r--r--arch/tile/kernel/pci.c4
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/asm/xen/page.h20
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h5
-rw-r--r--arch/x86/kernel/acpi/boot.c16
-rw-r--r--arch/x86/pci/acpi.c293
-rw-r--r--arch/x86/pci/bus_numa.c4
-rw-r--r--arch/x86/pci/common.c34
-rw-r--r--arch/x86/pci/intel_mid_pci.c4
-rw-r--r--arch/x86/pci/irq.c15
-rw-r--r--arch/x86/pci/mmconfig-shared.c6
-rw-r--r--arch/x86/pci/xen.c4
-rw-r--r--arch/x86/xen/mmu.c17
-rw-r--r--arch/x86/xen/p2m.c267
-rw-r--r--arch/x86/xen/setup.c37
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--arch/x86/xen/time.c4
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--drivers/acpi/Kconfig6
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_apd.c150
-rw-r--r--drivers/acpi/acpi_lpss.c12
-rw-r--r--drivers/acpi/acpi_memhotplug.c8
-rw-r--r--drivers/acpi/acpi_platform.c4
-rw-r--r--drivers/acpi/acpica/acapps.h4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h4
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h2
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c164
-rw-r--r--drivers/acpi/acpica/evgpeblk.c10
-rw-r--r--drivers/acpi/acpica/evgpeinit.c10
-rw-r--r--drivers/acpi/acpica/evgpeutil.c61
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evsci.c2
-rw-r--r--drivers/acpi/acpica/evxface.c132
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c123
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c10
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nseval.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/nsxfobj.c46
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psobject.c2
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c11
-rw-r--r--drivers/acpi/acpica/rscalc.c2
-rw-r--r--drivers/acpi/acpica/rscreate.c2
-rw-r--r--drivers/acpi/acpica/rsdump.c2
-rw-r--r--drivers/acpi/acpica/rsdumpinfo.c61
-rw-r--r--drivers/acpi/acpica/rsinfo.c2
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c2
-rw-r--r--drivers/acpi/acpica/rslist.c2
-rw-r--r--drivers/acpi/acpica/rsmemory.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c2
-rw-r--r--drivers/acpi/acpica/rsserial.c2
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/rsxface.c12
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c41
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uterror.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utexcep.c2
-rw-r--r--drivers/acpi/acpica/utfileio.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utownerid.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c2
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utstring.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/acpica/utxfmutex.c2
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/acpi/ec.c548
-rw-r--r--drivers/acpi/internal.h11
-rw-r--r--drivers/acpi/ioapic.c229
-rw-r--r--drivers/acpi/numa.c12
-rw-r--r--drivers/acpi/pci_irq.c9
-rw-r--r--drivers/acpi/pci_root.c9
-rw-r--r--drivers/acpi/processor_core.c123
-rw-r--r--drivers/acpi/processor_idle.c182
-rw-r--r--drivers/acpi/resource.c353
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.c2
-rw-r--r--drivers/acpi/video.c18
-rw-r--r--drivers/base/power/common.c18
-rw-r--r--drivers/base/power/domain.c157
-rw-r--r--drivers/base/power/opp.c194
-rw-r--r--drivers/base/power/qos.c4
-rw-r--r--drivers/block/xen-blkback/blkback.c177
-rw-r--r--drivers/block/xen-blkback/common.h3
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/cpufreq/Kconfig.x8610
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c3
-rw-r--r--drivers/cpufreq/cpufreq.c174
-rw-r--r--drivers/cpufreq/cpufreq_stats.c219
-rw-r--r--drivers/cpufreq/intel_pstate.c55
-rw-r--r--drivers/cpufreq/ls1x-cpufreq.c1
-rw-r--r--drivers/cpufreq/sfi-cpufreq.c136
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c4
-rw-r--r--drivers/devfreq/Kconfig12
-rw-r--r--drivers/devfreq/Makefile5
-rw-r--r--drivers/devfreq/devfreq-event.c494
-rw-r--r--drivers/devfreq/event/Kconfig25
-rw-r--r--drivers/devfreq/event/Makefile2
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c374
-rw-r--r--drivers/devfreq/event/exynos-ppmu.h93
-rw-r--r--drivers/devfreq/tegra-devfreq.c718
-rw-r--r--drivers/dma/acpi-dma.c10
-rw-r--r--drivers/hv/vmbus_drv.c4
-rw-r--r--drivers/mailbox/pcc.c4
-rw-r--r--drivers/net/ethernet/amd/atarilance.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169.c6
-rw-r--r--drivers/net/xen-netback/interface.c7
-rw-r--r--drivers/net/xen-netback/netback.c106
-rw-r--r--drivers/of/of_pci.c4
-rw-r--r--drivers/parport/parport_atari.c4
-rw-r--r--drivers/pci/access.c87
-rw-r--r--drivers/pci/bus.c18
-rw-r--r--drivers/pci/host-bridge.c8
-rw-r--r--drivers/pci/host/Kconfig4
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-host-generic.c55
-rw-r--r--drivers/pci/host/pci-keystone.c4
-rw-r--r--drivers/pci/host/pci-layerscape.c1
-rw-r--r--drivers/pci/host/pci-mvebu.c15
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c51
-rw-r--r--drivers/pci/host/pci-tegra.c68
-rw-r--r--drivers/pci/host/pci-versatile.c237
-rw-r--r--drivers/pci/host/pci-xgene.c156
-rw-r--r--drivers/pci/host/pcie-designware.c3
-rw-r--r--drivers/pci/host/pcie-rcar.c7
-rw-r--r--drivers/pci/host/pcie-xilinx.c96
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c3
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c13
-rw-r--r--drivers/pci/msi.c5
-rw-r--r--drivers/pci/pci-acpi.c17
-rw-r--r--drivers/pci/pci-driver.c13
-rw-r--r--drivers/pci/pci.c77
-rw-r--r--drivers/pci/pci.h6
-rw-r--r--drivers/pci/pcie/aspm.c12
-rw-r--r--drivers/pci/probe.c10
-rw-r--r--drivers/pci/quirks.c64
-rw-r--r--drivers/pci/rom.c7
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c45
-rw-r--r--drivers/rapidio/devices/tsi721.c2
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c5
-rw-r--r--drivers/sfi/sfi_core.c4
-rw-r--r--drivers/usb/core/hub.c12
-rw-r--r--drivers/video/fbdev/atafb.c3
-rw-r--r--drivers/xen/balloon.c86
-rw-r--r--drivers/xen/gntdev.c143
-rw-r--r--drivers/xen/grant-table.c120
-rw-r--r--drivers/xen/manage.c8
-rw-r--r--drivers/xen/tmem.c2
-rw-r--r--drivers/xen/xen-acpi-memhotplug.c8
-rw-r--r--drivers/xen/xen-scsiback.c6
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c11
-rw-r--r--fs/ceph/locks.c64
-rw-r--r--fs/ceph/mds_client.c4
-rw-r--r--fs/cifs/file.c34
-rw-r--r--fs/ext3/super.c2
-rw-r--r--fs/ext4/super.c42
-rw-r--r--fs/gfs2/acl.c2
-rw-r--r--fs/gfs2/dir.c3
-rw-r--r--fs/gfs2/glock.c13
-rw-r--r--fs/gfs2/inode.c3
-rw-r--r--fs/gfs2/recovery.c2
-rw-r--r--fs/gfs2/sys.c2
-rw-r--r--fs/inode.c3
-rw-r--r--fs/isofs/util.c18
-rw-r--r--fs/lockd/svcsubs.c26
-rw-r--r--fs/locks.c569
-rw-r--r--fs/nfs/delegation.c23
-rw-r--r--fs/nfs/nfs4state.c70
-rw-r--r--fs/nfs/pagelist.c6
-rw-r--r--fs/nfs/write.c41
-rw-r--r--fs/nfsd/nfs4state.c21
-rw-r--r--fs/ocfs2/quota.h1
-rw-r--r--fs/ocfs2/quota_local.c14
-rw-r--r--fs/ocfs2/super.c32
-rw-r--r--fs/quota/dquot.c107
-rw-r--r--fs/quota/quota.c52
-rw-r--r--fs/quota/quota_v1.c4
-rw-r--r--fs/quota/quota_v2.c16
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/udf/Kconfig10
-rw-r--r--fs/udf/inode.c32
-rw-r--r--fs/udf/super.c5
-rw-r--r--fs/xfs/kmem.c10
-rw-r--r--fs/xfs/kmem.h5
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c20
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h33
-rw-r--r--fs/xfs/libxfs/xfs_format.h24
-rw-r--r--fs/xfs/libxfs/xfs_fs.h (renamed from fs/xfs/xfs_fs.h)0
-rw-r--r--fs/xfs/libxfs/xfs_sb.c320
-rw-r--r--fs/xfs/libxfs/xfs_sb.h11
-rw-r--r--fs/xfs/libxfs/xfs_shared.h33
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.c2
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c14
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.h1
-rw-r--r--fs/xfs/libxfs/xfs_types.h (renamed from fs/xfs/xfs_types.h)0
-rw-r--r--fs/xfs/xfs_aops.c149
-rw-r--r--fs/xfs/xfs_aops.h3
-rw-r--r--fs/xfs/xfs_bmap_util.h37
-rw-r--r--fs/xfs/xfs_buf_item.c6
-rw-r--r--fs/xfs/xfs_dquot.h2
-rw-r--r--fs/xfs/xfs_file.c64
-rw-r--r--fs/xfs/xfs_fsops.c34
-rw-r--r--fs/xfs/xfs_inode.c136
-rw-r--r--fs/xfs/xfs_inode.h11
-rw-r--r--fs/xfs/xfs_ioctl.c501
-rw-r--r--fs/xfs/xfs_ioctl32.c2
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_iomap.h2
-rw-r--r--fs/xfs/xfs_iops.c21
-rw-r--r--fs/xfs/xfs_log.c28
-rw-r--r--fs/xfs/xfs_mount.c107
-rw-r--r--fs/xfs/xfs_mount.h5
-rw-r--r--fs/xfs/xfs_qm.c43
-rw-r--r--fs/xfs/xfs_qm.h1
-rw-r--r--fs/xfs/xfs_qm_syscalls.c88
-rw-r--r--fs/xfs/xfs_quotaops.c59
-rw-r--r--fs/xfs/xfs_super.c20
-rw-r--r--fs/xfs/xfs_sysctl.c18
-rw-r--r--fs/xfs/xfs_trans.c1
-rw-r--r--fs/xfs/xfs_trans_buf.c5
-rw-r--r--include/acpi/acbuffer.h2
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acexcep.h2
-rw-r--r--include/acpi/acnames.h2
-rw-r--r--include/acpi/acoutput.h2
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h18
-rw-r--r--include/acpi/acrestyp.h42
-rw-r--r--include/acpi/actbl.h2
-rw-r--r--include/acpi/actbl1.h2
-rw-r--r--include/acpi/actbl2.h2
-rw-r--r--include/acpi/actbl3.h2
-rw-r--r--include/acpi/actypes.h14
-rw-r--r--include/acpi/platform/acenv.h2
-rw-r--r--include/acpi/platform/acenvex.h2
-rw-r--r--include/acpi/platform/acgcc.h2
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/acpi/platform/aclinuxex.h2
-rw-r--r--include/linux/acpi.h22
-rw-r--r--include/linux/cpufreq.h10
-rw-r--r--include/linux/devfreq-event.h196
-rw-r--r--include/linux/dqblk_v1.h3
-rw-r--r--include/linux/fs.h52
-rw-r--r--include/linux/jbd.h9
-rw-r--r--include/linux/jbd2.h9
-rw-r--r--include/linux/mm.h7
-rw-r--r--include/linux/page-flags.h5
-rw-r--r--include/linux/pci.h24
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_domain.h4
-rw-r--r--include/linux/quota.h22
-rw-r--r--include/linux/quotaops.h3
-rw-r--r--include/linux/resource_ext.h77
-rw-r--r--include/uapi/linux/pci_regs.h4
-rw-r--r--include/uapi/linux/quota.h14
-rw-r--r--include/xen/grant_table.h43
-rw-r--r--include/xen/interface/features.h6
-rw-r--r--include/xen/interface/grant_table.h7
-rw-r--r--kernel/power/qos.c91
-rw-r--r--kernel/power/snapshot.c11
-rw-r--r--kernel/resource.c25
-rw-r--r--kernel/trace/power-traces.c1
-rw-r--r--mm/memory.c2
-rw-r--r--sound/oss/dmasound/dmasound_atari.c2
-rw-r--r--tools/power/acpi/common/cmfsize.c2
-rw-r--r--tools/power/acpi/common/getopt.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslibcfs.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixdir.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixmap.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixxf.c2
-rw-r--r--tools/power/acpi/tools/acpidump/acpidump.h2
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c2
-rw-r--r--tools/power/cpupower/Makefile2
-rw-r--r--tools/power/x86/turbostat/turbostat.866
-rw-r--r--tools/power/x86/turbostat/turbostat.c338
495 files changed, 9014 insertions, 5419 deletions
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index b60d2ab69497..9b121a569ab4 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -243,7 +243,7 @@ input driver:
243 .owner = THIS_MODULE, 243 .owner = THIS_MODULE,
244 .pm = &mpu3050_pm, 244 .pm = &mpu3050_pm,
245 .of_match_table = mpu3050_of_match, 245 .of_match_table = mpu3050_of_match,
246 .acpi_match_table ACPI_PTR(mpu3050_acpi_match), 246 .acpi_match_table = ACPI_PTR(mpu3050_acpi_match),
247 }, 247 },
248 .probe = mpu3050_probe, 248 .probe = mpu3050_probe,
249 .remove = mpu3050_remove, 249 .remove = mpu3050_remove,
diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt
index 765d7fc0e692..655750743fb0 100644
--- a/Documentation/cpu-freq/intel-pstate.txt
+++ b/Documentation/cpu-freq/intel-pstate.txt
@@ -37,6 +37,14 @@ controlling P state selection. These files have been added to
37 no_turbo: limits the driver to selecting P states below the turbo 37 no_turbo: limits the driver to selecting P states below the turbo
38 frequency range. 38 frequency range.
39 39
40 turbo_pct: displays the percentage of the total performance that
41 is supported by hardware that is in the turbo range. This number
42 is independent of whether turbo has been disabled or not.
43
44 num_pstates: displays the number of pstates that are supported
45 by hardware. This number is independent of whether turbo has
46 been disabled or not.
47
40For contemporary Intel processors, the frequency is controlled by the 48For contemporary Intel processors, the frequency is controlled by the
41processor itself and the P-states exposed to software are related to 49processor itself and the P-states exposed to software are related to
42performance levels. The idea that frequency can be set to a single 50performance levels. The idea that frequency can be set to a single
diff --git a/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt b/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
new file mode 100644
index 000000000000..b54bf3a2ff57
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
@@ -0,0 +1,110 @@
1
2* Samsung Exynos PPMU (Platform Performance Monitoring Unit) device
3
4The Samsung Exynos SoC has PPMU (Platform Performance Monitoring Unit) for
5each IP. PPMU provides the primitive values to get performance data. These
6PPMU events provide information of the SoC's behaviors so that you may
7use to analyze system performance, to make behaviors visible and to count
8usages of each IP (DMC, CPU, RIGHTBUS, LEFTBUS, CAM interface, LCD, G3D, MFC).
9The Exynos PPMU driver uses the devfreq-event class to provide event data
10to various devfreq devices. The devfreq devices would use the event data when
11derterming the current state of each IP.
12
13Required properties:
14- compatible: Should be "samsung,exynos-ppmu".
15- reg: physical base address of each PPMU and length of memory mapped region.
16
17Optional properties:
18- clock-names : the name of clock used by the PPMU, "ppmu"
19- clocks : phandles for clock specified in "clock-names" property
20- #clock-cells: should be 1.
21
22Example1 : PPMU nodes in exynos3250.dtsi are listed below.
23
24 ppmu_dmc0: ppmu_dmc0@106a0000 {
25 compatible = "samsung,exynos-ppmu";
26 reg = <0x106a0000 0x2000>;
27 status = "disabled";
28 };
29
30 ppmu_dmc1: ppmu_dmc1@106b0000 {
31 compatible = "samsung,exynos-ppmu";
32 reg = <0x106b0000 0x2000>;
33 status = "disabled";
34 };
35
36 ppmu_cpu: ppmu_cpu@106c0000 {
37 compatible = "samsung,exynos-ppmu";
38 reg = <0x106c0000 0x2000>;
39 status = "disabled";
40 };
41
42 ppmu_rightbus: ppmu_rightbus@112a0000 {
43 compatible = "samsung,exynos-ppmu";
44 reg = <0x112a0000 0x2000>;
45 clocks = <&cmu CLK_PPMURIGHT>;
46 clock-names = "ppmu";
47 status = "disabled";
48 };
49
50 ppmu_leftbus: ppmu_leftbus0@116a0000 {
51 compatible = "samsung,exynos-ppmu";
52 reg = <0x116a0000 0x2000>;
53 clocks = <&cmu CLK_PPMULEFT>;
54 clock-names = "ppmu";
55 status = "disabled";
56 };
57
58Example2 : Events of each PPMU node in exynos3250-rinato.dts are listed below.
59
60 &ppmu_dmc0 {
61 status = "okay";
62
63 events {
64 ppmu_dmc0_3: ppmu-event3-dmc0 {
65 event-name = "ppmu-event3-dmc0";
66 };
67
68 ppmu_dmc0_2: ppmu-event2-dmc0 {
69 event-name = "ppmu-event2-dmc0";
70 };
71
72 ppmu_dmc0_1: ppmu-event1-dmc0 {
73 event-name = "ppmu-event1-dmc0";
74 };
75
76 ppmu_dmc0_0: ppmu-event0-dmc0 {
77 event-name = "ppmu-event0-dmc0";
78 };
79 };
80 };
81
82 &ppmu_dmc1 {
83 status = "okay";
84
85 events {
86 ppmu_dmc1_3: ppmu-event3-dmc1 {
87 event-name = "ppmu-event3-dmc1";
88 };
89 };
90 };
91
92 &ppmu_leftbus {
93 status = "okay";
94
95 events {
96 ppmu_leftbus_3: ppmu-event3-leftbus {
97 event-name = "ppmu-event3-leftbus";
98 };
99 };
100 };
101
102 &ppmu_rightbus {
103 status = "okay";
104
105 events {
106 ppmu_rightbus_3: ppmu-event3-rightbus {
107 event-name = "ppmu-event3-rightbus";
108 };
109 };
110 };
diff --git a/Documentation/devicetree/bindings/pci/versatile.txt b/Documentation/devicetree/bindings/pci/versatile.txt
new file mode 100644
index 000000000000..ebd1e7d0403e
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/versatile.txt
@@ -0,0 +1,59 @@
1* ARM Versatile Platform Baseboard PCI interface
2
3PCI host controller found on the ARM Versatile PB board's FPGA.
4
5Required properties:
6- compatible: should contain "arm,versatile-pci" to identify the Versatile PCI
7 controller.
8- reg: base addresses and lengths of the pci controller. There must be 3
9 entries:
10 - Versatile-specific registers
11 - Self Config space
12 - Config space
13- #address-cells: set to <3>
14- #size-cells: set to <2>
15- device_type: set to "pci"
16- bus-range: set to <0 0xff>
17- ranges: ranges for the PCI memory and I/O regions
18- #interrupt-cells: set to <1>
19- interrupt-map-mask and interrupt-map: standard PCI properties to define
20 the mapping of the PCI interface to interrupt numbers.
21
22Example:
23
24pci-controller@10001000 {
25 compatible = "arm,versatile-pci";
26 device_type = "pci";
27 reg = <0x10001000 0x1000
28 0x41000000 0x10000
29 0x42000000 0x100000>;
30 bus-range = <0 0xff>;
31 #address-cells = <3>;
32 #size-cells = <2>;
33 #interrupt-cells = <1>;
34
35 ranges = <0x01000000 0 0x00000000 0x43000000 0 0x00010000 /* downstream I/O */
36 0x02000000 0 0x50000000 0x50000000 0 0x10000000 /* non-prefetchable memory */
37 0x42000000 0 0x60000000 0x60000000 0 0x10000000>; /* prefetchable memory */
38
39 interrupt-map-mask = <0x1800 0 0 7>;
40 interrupt-map = <0x1800 0 0 1 &sic 28
41 0x1800 0 0 2 &sic 29
42 0x1800 0 0 3 &sic 30
43 0x1800 0 0 4 &sic 27
44
45 0x1000 0 0 1 &sic 27
46 0x1000 0 0 2 &sic 28
47 0x1000 0 0 3 &sic 29
48 0x1000 0 0 4 &sic 30
49
50 0x0800 0 0 1 &sic 30
51 0x0800 0 0 2 &sic 27
52 0x0800 0 0 3 &sic 28
53 0x0800 0 0 4 &sic 29
54
55 0x0000 0 0 1 &sic 29
56 0x0000 0 0 2 &sic 30
57 0x0000 0 0 3 &sic 27
58 0x0000 0 0 4 &sic 28>;
59};
diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt
index 5be51fd888bd..0bfafe108357 100644
--- a/Documentation/filesystems/xfs.txt
+++ b/Documentation/filesystems/xfs.txt
@@ -287,9 +287,9 @@ The following sysctls are available for the XFS filesystem:
287 XFS_ERRLEVEL_LOW: 1 287 XFS_ERRLEVEL_LOW: 1
288 XFS_ERRLEVEL_HIGH: 5 288 XFS_ERRLEVEL_HIGH: 5
289 289
290 fs.xfs.panic_mask (Min: 0 Default: 0 Max: 127) 290 fs.xfs.panic_mask (Min: 0 Default: 0 Max: 255)
291 Causes certain error conditions to call BUG(). Value is a bitmask; 291 Causes certain error conditions to call BUG(). Value is a bitmask;
292 AND together the tags which represent errors which should cause panics: 292 OR together the tags which represent errors which should cause panics:
293 293
294 XFS_NO_PTAG 0 294 XFS_NO_PTAG 0
295 XFS_PTAG_IFLUSH 0x00000001 295 XFS_PTAG_IFLUSH 0x00000001
@@ -299,6 +299,7 @@ The following sysctls are available for the XFS filesystem:
299 XFS_PTAG_SHUTDOWN_CORRUPT 0x00000010 299 XFS_PTAG_SHUTDOWN_CORRUPT 0x00000010
300 XFS_PTAG_SHUTDOWN_IOERROR 0x00000020 300 XFS_PTAG_SHUTDOWN_IOERROR 0x00000020
301 XFS_PTAG_SHUTDOWN_LOGERROR 0x00000040 301 XFS_PTAG_SHUTDOWN_LOGERROR 0x00000040
302 XFS_PTAG_FSBLOCK_ZERO 0x00000080
302 303
303 This option is intended for debugging only. 304 This option is intended for debugging only.
304 305
@@ -348,16 +349,13 @@ The following sysctls are available for the XFS filesystem:
348Deprecated Sysctls 349Deprecated Sysctls
349================== 350==================
350 351
351 fs.xfs.xfsbufd_centisecs (Min: 50 Default: 100 Max: 3000) 352None at present.
352 Dirty metadata is now tracked by the log subsystem and
353 flushing is driven by log space and idling demands. The
354 xfsbufd no longer exists, so this syctl does nothing.
355 353
356 Due for removal in 3.14.
357 354
358 fs.xfs.age_buffer_centisecs (Min: 100 Default: 1500 Max: 720000) 355Removed Sysctls
359 Dirty metadata is now tracked by the log subsystem and 356===============
360 flushing is driven by log space and idling demands. The
361 xfsbufd no longer exists, so this syctl does nothing.
362 357
363 Due for removal in 3.14. 358 Name Removed
359 ---- -------
360 fs.xfs.xfsbufd_centisec v3.20
361 fs.xfs.age_buffer_centisecs v3.20
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 176d4fe4f076..f06f1f609cb7 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1470,6 +1470,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1470 no_hwp 1470 no_hwp
1471 Do not enable hardware P state control (HWP) 1471 Do not enable hardware P state control (HWP)
1472 if available. 1472 if available.
1473 hwp_only
1474 Only load intel_pstate on systems which support
1475 hardware P state control (HWP) if available.
1473 1476
1474 intremap= [X86-64, Intel-IOMMU] 1477 intremap= [X86-64, Intel-IOMMU]
1475 on enable Interrupt Remapping (default) 1478 on enable Interrupt Remapping (default)
diff --git a/Documentation/power/s2ram.txt b/Documentation/power/s2ram.txt
index 1bdfa0443773..4685aee197fd 100644
--- a/Documentation/power/s2ram.txt
+++ b/Documentation/power/s2ram.txt
@@ -69,6 +69,10 @@ Reason for this is that the RTC is the only reliably available piece of
69hardware during resume operations where a value can be set that will 69hardware during resume operations where a value can be set that will
70survive a reboot. 70survive a reboot.
71 71
72pm_trace is not compatible with asynchronous suspend, so it turns
73asynchronous suspend off (which may work around timing or
74ordering-sensitive bugs).
75
72Consequence is that after a resume (even if it is successful) your system 76Consequence is that after a resume (even if it is successful) your system
73clock will have a value corresponding to the magic number instead of the 77clock will have a value corresponding to the magic number instead of the
74correct date/time! It is therefore advisable to use a program like ntp-date 78correct date/time! It is therefore advisable to use a program like ntp-date
diff --git a/MAINTAINERS b/MAINTAINERS
index 26557b74a8ef..54c7ce00d85f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -270,12 +270,12 @@ F: drivers/acpi/
270F: drivers/pnp/pnpacpi/ 270F: drivers/pnp/pnpacpi/
271F: include/linux/acpi.h 271F: include/linux/acpi.h
272F: include/acpi/ 272F: include/acpi/
273F: Documentation/acpi 273F: Documentation/acpi/
274F: Documentation/ABI/testing/sysfs-bus-acpi 274F: Documentation/ABI/testing/sysfs-bus-acpi
275F: drivers/pci/*acpi* 275F: drivers/pci/*acpi*
276F: drivers/pci/*/*acpi* 276F: drivers/pci/*/*acpi*
277F: drivers/pci/*/*/*acpi* 277F: drivers/pci/*/*/*acpi*
278F: tools/power/acpi 278F: tools/power/acpi/
279 279
280ACPI COMPONENT ARCHITECTURE (ACPICA) 280ACPI COMPONENT ARCHITECTURE (ACPICA)
281M: Robert Moore <robert.moore@intel.com> 281M: Robert Moore <robert.moore@intel.com>
@@ -7277,6 +7277,14 @@ F: include/linux/pci*
7277F: arch/x86/pci/ 7277F: arch/x86/pci/
7278F: arch/x86/kernel/quirks.c 7278F: arch/x86/kernel/quirks.c
7279 7279
7280PCI DRIVER FOR ARM VERSATILE PLATFORM
7281M: Rob Herring <robh@kernel.org>
7282L: linux-pci@vger.kernel.org
7283L: linux-arm-kernel@lists.infradead.org
7284S: Maintained
7285F: Documentation/devicetree/bindings/pci/versatile.txt
7286F: drivers/pci/host/pci-versatile.c
7287
7280PCI DRIVER FOR APPLIEDMICRO XGENE 7288PCI DRIVER FOR APPLIEDMICRO XGENE
7281M: Tanmay Inamdar <tinamdar@apm.com> 7289M: Tanmay Inamdar <tinamdar@apm.com>
7282L: linux-pci@vger.kernel.org 7290L: linux-pci@vger.kernel.org
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 97d07ed60a0b..dcb2e0c55be4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1279,6 +1279,9 @@ config PCI_DOMAINS
1279 bool 1279 bool
1280 depends on PCI 1280 depends on PCI
1281 1281
1282config PCI_DOMAINS_GENERIC
1283 def_bool PCI_DOMAINS
1284
1282config PCI_NANOENGINE 1285config PCI_NANOENGINE
1283 bool "BSE nanoEngine PCI support" 1286 bool "BSE nanoEngine PCI support"
1284 depends on SA1100_NANOENGINE 1287 depends on SA1100_NANOENGINE
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index e36c1e82fea7..b83137f66034 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -29,6 +29,43 @@
29 clock-names = "apb_pclk"; 29 clock-names = "apb_pclk";
30 }; 30 };
31 31
32 pci-controller@10001000 {
33 compatible = "arm,versatile-pci";
34 device_type = "pci";
35 reg = <0x10001000 0x1000
36 0x41000000 0x10000
37 0x42000000 0x100000>;
38 bus-range = <0 0xff>;
39 #address-cells = <3>;
40 #size-cells = <2>;
41 #interrupt-cells = <1>;
42
43 ranges = <0x01000000 0 0x00000000 0x43000000 0 0x00010000 /* downstream I/O */
44 0x02000000 0 0x50000000 0x50000000 0 0x10000000 /* non-prefetchable memory */
45 0x42000000 0 0x60000000 0x60000000 0 0x10000000>; /* prefetchable memory */
46
47 interrupt-map-mask = <0x1800 0 0 7>;
48 interrupt-map = <0x1800 0 0 1 &sic 28
49 0x1800 0 0 2 &sic 29
50 0x1800 0 0 3 &sic 30
51 0x1800 0 0 4 &sic 27
52
53 0x1000 0 0 1 &sic 27
54 0x1000 0 0 2 &sic 28
55 0x1000 0 0 3 &sic 29
56 0x1000 0 0 4 &sic 30
57
58 0x0800 0 0 1 &sic 30
59 0x0800 0 0 2 &sic 27
60 0x0800 0 0 3 &sic 28
61 0x0800 0 0 4 &sic 29
62
63 0x0000 0 0 1 &sic 29
64 0x0000 0 0 2 &sic 30
65 0x0000 0 0 3 &sic 27
66 0x0000 0 0 4 &sic 28>;
67 };
68
32 fpga { 69 fpga {
33 uart@9000 { 70 uart@9000 {
34 compatible = "arm,pl011", "arm,primecell"; 71 compatible = "arm,pl011", "arm,primecell";
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 8292b5f81e23..28b9bb35949e 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -19,9 +19,6 @@ struct pci_bus;
19struct device; 19struct device;
20 20
21struct hw_pci { 21struct hw_pci {
22#ifdef CONFIG_PCI_DOMAINS
23 int domain;
24#endif
25#ifdef CONFIG_PCI_MSI 22#ifdef CONFIG_PCI_MSI
26 struct msi_controller *msi_ctrl; 23 struct msi_controller *msi_ctrl;
27#endif 24#endif
@@ -45,9 +42,6 @@ struct hw_pci {
45 * Per-controller structure 42 * Per-controller structure
46 */ 43 */
47struct pci_sys_data { 44struct pci_sys_data {
48#ifdef CONFIG_PCI_DOMAINS
49 int domain;
50#endif
51#ifdef CONFIG_PCI_MSI 45#ifdef CONFIG_PCI_MSI
52 struct msi_controller *msi_ctrl; 46 struct msi_controller *msi_ctrl;
53#endif 47#endif
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 7e95d8535e24..585dc33a7a24 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -18,13 +18,6 @@ static inline int pcibios_assign_all_busses(void)
18} 18}
19 19
20#ifdef CONFIG_PCI_DOMAINS 20#ifdef CONFIG_PCI_DOMAINS
21static inline int pci_domain_nr(struct pci_bus *bus)
22{
23 struct pci_sys_data *root = bus->sysdata;
24
25 return root->domain;
26}
27
28static inline int pci_proc_domain(struct pci_bus *bus) 21static inline int pci_proc_domain(struct pci_bus *bus)
29{ 22{
30 return pci_domain_nr(bus); 23 return pci_domain_nr(bus);
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 68c739b3fdf4..2f7e6ff67d51 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -92,7 +92,7 @@ extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
92 struct page **pages, unsigned int count); 92 struct page **pages, unsigned int count);
93 93
94extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 94extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
95 struct gnttab_map_grant_ref *kmap_ops, 95 struct gnttab_unmap_grant_ref *kunmap_ops,
96 struct page **pages, unsigned int count); 96 struct page **pages, unsigned int count);
97 97
98bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 98bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index a4effd6d8f2f..ab19b7c03423 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -422,17 +422,16 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
422static int pcibios_init_resources(int busnr, struct pci_sys_data *sys) 422static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
423{ 423{
424 int ret; 424 int ret;
425 struct pci_host_bridge_window *window; 425 struct resource_entry *window;
426 426
427 if (list_empty(&sys->resources)) { 427 if (list_empty(&sys->resources)) {
428 pci_add_resource_offset(&sys->resources, 428 pci_add_resource_offset(&sys->resources,
429 &iomem_resource, sys->mem_offset); 429 &iomem_resource, sys->mem_offset);
430 } 430 }
431 431
432 list_for_each_entry(window, &sys->resources, list) { 432 resource_list_for_each_entry(window, &sys->resources)
433 if (resource_type(window->res) == IORESOURCE_IO) 433 if (resource_type(window->res) == IORESOURCE_IO)
434 return 0; 434 return 0;
435 }
436 435
437 sys->io_res.start = (busnr * SZ_64K) ? : pcibios_min_io; 436 sys->io_res.start = (busnr * SZ_64K) ? : pcibios_min_io;
438 sys->io_res.end = (busnr + 1) * SZ_64K - 1; 437 sys->io_res.end = (busnr + 1) * SZ_64K - 1;
@@ -463,9 +462,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
463 if (!sys) 462 if (!sys)
464 panic("PCI: unable to allocate sys data!"); 463 panic("PCI: unable to allocate sys data!");
465 464
466#ifdef CONFIG_PCI_DOMAINS
467 sys->domain = hw->domain;
468#endif
469#ifdef CONFIG_PCI_MSI 465#ifdef CONFIG_PCI_MSI
470 sys->msi_ctrl = hw->msi_ctrl; 466 sys->msi_ctrl = hw->msi_ctrl;
471#endif 467#endif
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 45d6bd09e6ef..c622c306c390 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -30,18 +30,15 @@ struct cns3xxx_pcie {
30 unsigned int irqs[2]; 30 unsigned int irqs[2];
31 struct resource res_io; 31 struct resource res_io;
32 struct resource res_mem; 32 struct resource res_mem;
33 struct hw_pci hw_pci; 33 int port;
34
35 bool linked; 34 bool linked;
36}; 35};
37 36
38static struct cns3xxx_pcie cns3xxx_pcie[]; /* forward decl. */
39
40static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata) 37static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata)
41{ 38{
42 struct pci_sys_data *root = sysdata; 39 struct pci_sys_data *root = sysdata;
43 40
44 return &cns3xxx_pcie[root->domain]; 41 return root->private_data;
45} 42}
46 43
47static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev) 44static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev)
@@ -54,8 +51,8 @@ static struct cns3xxx_pcie *pbus_to_cnspci(struct pci_bus *bus)
54 return sysdata_to_cnspci(bus->sysdata); 51 return sysdata_to_cnspci(bus->sysdata);
55} 52}
56 53
57static void __iomem *cns3xxx_pci_cfg_base(struct pci_bus *bus, 54static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
58 unsigned int devfn, int where) 55 unsigned int devfn, int where)
59{ 56{
60 struct cns3xxx_pcie *cnspci = pbus_to_cnspci(bus); 57 struct cns3xxx_pcie *cnspci = pbus_to_cnspci(bus);
61 int busno = bus->number; 58 int busno = bus->number;
@@ -91,55 +88,22 @@ static void __iomem *cns3xxx_pci_cfg_base(struct pci_bus *bus,
91static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, 88static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
92 int where, int size, u32 *val) 89 int where, int size, u32 *val)
93{ 90{
94 u32 v; 91 int ret;
95 void __iomem *base;
96 u32 mask = (0x1ull << (size * 8)) - 1; 92 u32 mask = (0x1ull << (size * 8)) - 1;
97 int shift = (where % 4) * 8; 93 int shift = (where % 4) * 8;
98 94
99 base = cns3xxx_pci_cfg_base(bus, devfn, where); 95 ret = pci_generic_config_read32(bus, devfn, where, size, val);
100 if (!base) {
101 *val = 0xffffffff;
102 return PCIBIOS_SUCCESSFUL;
103 }
104
105 v = __raw_readl(base);
106 96
107 if (bus->number == 0 && devfn == 0 && 97 if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn &&
108 (where & 0xffc) == PCI_CLASS_REVISION) { 98 (where & 0xffc) == PCI_CLASS_REVISION)
109 /* 99 /*
110 * RC's class is 0xb, but Linux PCI driver needs 0x604 100 * RC's class is 0xb, but Linux PCI driver needs 0x604
111 * for a PCIe bridge. So we must fixup the class code 101 * for a PCIe bridge. So we must fixup the class code
112 * to 0x604 here. 102 * to 0x604 here.
113 */ 103 */
114 v &= 0xff; 104 *val = ((((*val << shift) & 0xff) | (0x604 << 16)) >> shift) & mask;
115 v |= 0x604 << 16;
116 }
117 105
118 *val = (v >> shift) & mask; 106 return ret;
119
120 return PCIBIOS_SUCCESSFUL;
121}
122
123static int cns3xxx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
124 int where, int size, u32 val)
125{
126 u32 v;
127 void __iomem *base;
128 u32 mask = (0x1ull << (size * 8)) - 1;
129 int shift = (where % 4) * 8;
130
131 base = cns3xxx_pci_cfg_base(bus, devfn, where);
132 if (!base)
133 return PCIBIOS_SUCCESSFUL;
134
135 v = __raw_readl(base);
136
137 v &= ~(mask << shift);
138 v |= (val & mask) << shift;
139
140 __raw_writel(v, base);
141
142 return PCIBIOS_SUCCESSFUL;
143} 107}
144 108
145static int cns3xxx_pci_setup(int nr, struct pci_sys_data *sys) 109static int cns3xxx_pci_setup(int nr, struct pci_sys_data *sys)
@@ -158,8 +122,9 @@ static int cns3xxx_pci_setup(int nr, struct pci_sys_data *sys)
158} 122}
159 123
160static struct pci_ops cns3xxx_pcie_ops = { 124static struct pci_ops cns3xxx_pcie_ops = {
125 .map_bus = cns3xxx_pci_map_bus,
161 .read = cns3xxx_pci_read_config, 126 .read = cns3xxx_pci_read_config,
162 .write = cns3xxx_pci_write_config, 127 .write = pci_generic_config_write,
163}; 128};
164 129
165static int cns3xxx_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 130static int cns3xxx_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
@@ -192,13 +157,7 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
192 .flags = IORESOURCE_MEM, 157 .flags = IORESOURCE_MEM,
193 }, 158 },
194 .irqs = { IRQ_CNS3XXX_PCIE0_RC, IRQ_CNS3XXX_PCIE0_DEVICE, }, 159 .irqs = { IRQ_CNS3XXX_PCIE0_RC, IRQ_CNS3XXX_PCIE0_DEVICE, },
195 .hw_pci = { 160 .port = 0,
196 .domain = 0,
197 .nr_controllers = 1,
198 .ops = &cns3xxx_pcie_ops,
199 .setup = cns3xxx_pci_setup,
200 .map_irq = cns3xxx_pcie_map_irq,
201 },
202 }, 161 },
203 [1] = { 162 [1] = {
204 .host_regs = (void __iomem *)CNS3XXX_PCIE1_HOST_BASE_VIRT, 163 .host_regs = (void __iomem *)CNS3XXX_PCIE1_HOST_BASE_VIRT,
@@ -217,19 +176,13 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
217 .flags = IORESOURCE_MEM, 176 .flags = IORESOURCE_MEM,
218 }, 177 },
219 .irqs = { IRQ_CNS3XXX_PCIE1_RC, IRQ_CNS3XXX_PCIE1_DEVICE, }, 178 .irqs = { IRQ_CNS3XXX_PCIE1_RC, IRQ_CNS3XXX_PCIE1_DEVICE, },
220 .hw_pci = { 179 .port = 1,
221 .domain = 1,
222 .nr_controllers = 1,
223 .ops = &cns3xxx_pcie_ops,
224 .setup = cns3xxx_pci_setup,
225 .map_irq = cns3xxx_pcie_map_irq,
226 },
227 }, 180 },
228}; 181};
229 182
230static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci) 183static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci)
231{ 184{
232 int port = cnspci->hw_pci.domain; 185 int port = cnspci->port;
233 u32 reg; 186 u32 reg;
234 unsigned long time; 187 unsigned long time;
235 188
@@ -260,9 +213,9 @@ static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci)
260 213
261static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci) 214static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci)
262{ 215{
263 int port = cnspci->hw_pci.domain; 216 int port = cnspci->port;
264 struct pci_sys_data sd = { 217 struct pci_sys_data sd = {
265 .domain = port, 218 .private_data = cnspci,
266 }; 219 };
267 struct pci_bus bus = { 220 struct pci_bus bus = {
268 .number = 0, 221 .number = 0,
@@ -323,6 +276,14 @@ static int cns3xxx_pcie_abort_handler(unsigned long addr, unsigned int fsr,
323void __init cns3xxx_pcie_init_late(void) 276void __init cns3xxx_pcie_init_late(void)
324{ 277{
325 int i; 278 int i;
279 void *private_data;
280 struct hw_pci hw_pci = {
281 .nr_controllers = 1,
282 .ops = &cns3xxx_pcie_ops,
283 .setup = cns3xxx_pci_setup,
284 .map_irq = cns3xxx_pcie_map_irq,
285 .private_data = &private_data,
286 };
326 287
327 pcibios_min_io = 0; 288 pcibios_min_io = 0;
328 pcibios_min_mem = 0; 289 pcibios_min_mem = 0;
@@ -335,7 +296,8 @@ void __init cns3xxx_pcie_init_late(void)
335 cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i)); 296 cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i));
336 cns3xxx_pcie_check_link(&cns3xxx_pcie[i]); 297 cns3xxx_pcie_check_link(&cns3xxx_pcie[i]);
337 cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]); 298 cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]);
338 pci_common_init(&cns3xxx_pcie[i].hw_pci); 299 private_data = &cns3xxx_pcie[i];
300 pci_common_init(&hw_pci);
339 } 301 }
340 302
341 pci_assign_unassigned_resources(); 303 pci_assign_unassigned_resources();
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index c186a17c2cff..2565f0e7b5cf 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -356,7 +356,6 @@ static u64 pre_mem_pci_sz;
356 * 7:2 register number 356 * 7:2 register number
357 * 357 *
358 */ 358 */
359static DEFINE_RAW_SPINLOCK(v3_lock);
360 359
361#undef V3_LB_BASE_PREFETCH 360#undef V3_LB_BASE_PREFETCH
362#define V3_LB_BASE_PREFETCH 0 361#define V3_LB_BASE_PREFETCH 0
@@ -457,67 +456,21 @@ static void v3_close_config_window(void)
457static int v3_read_config(struct pci_bus *bus, unsigned int devfn, int where, 456static int v3_read_config(struct pci_bus *bus, unsigned int devfn, int where,
458 int size, u32 *val) 457 int size, u32 *val)
459{ 458{
460 void __iomem *addr; 459 int ret = pci_generic_config_read(bus, devfn, where, size, val);
461 unsigned long flags;
462 u32 v;
463
464 raw_spin_lock_irqsave(&v3_lock, flags);
465 addr = v3_open_config_window(bus, devfn, where);
466
467 switch (size) {
468 case 1:
469 v = __raw_readb(addr);
470 break;
471
472 case 2:
473 v = __raw_readw(addr);
474 break;
475
476 default:
477 v = __raw_readl(addr);
478 break;
479 }
480
481 v3_close_config_window(); 460 v3_close_config_window();
482 raw_spin_unlock_irqrestore(&v3_lock, flags); 461 return ret;
483
484 *val = v;
485 return PCIBIOS_SUCCESSFUL;
486} 462}
487 463
488static int v3_write_config(struct pci_bus *bus, unsigned int devfn, int where, 464static int v3_write_config(struct pci_bus *bus, unsigned int devfn, int where,
489 int size, u32 val) 465 int size, u32 val)
490{ 466{
491 void __iomem *addr; 467 int ret = pci_generic_config_write(bus, devfn, where, size, val);
492 unsigned long flags;
493
494 raw_spin_lock_irqsave(&v3_lock, flags);
495 addr = v3_open_config_window(bus, devfn, where);
496
497 switch (size) {
498 case 1:
499 __raw_writeb((u8)val, addr);
500 __raw_readb(addr);
501 break;
502
503 case 2:
504 __raw_writew((u16)val, addr);
505 __raw_readw(addr);
506 break;
507
508 case 4:
509 __raw_writel(val, addr);
510 __raw_readl(addr);
511 break;
512 }
513
514 v3_close_config_window(); 468 v3_close_config_window();
515 raw_spin_unlock_irqrestore(&v3_lock, flags); 469 return ret;
516
517 return PCIBIOS_SUCCESSFUL;
518} 470}
519 471
520static struct pci_ops pci_v3_ops = { 472static struct pci_ops pci_v3_ops = {
473 .map_bus = v3_open_config_window,
521 .read = v3_read_config, 474 .read = v3_read_config,
522 .write = v3_write_config, 475 .write = v3_write_config,
523}; 476};
@@ -658,7 +611,6 @@ static int __init pci_v3_setup(int nr, struct pci_sys_data *sys)
658 */ 611 */
659static void __init pci_v3_preinit(void) 612static void __init pci_v3_preinit(void)
660{ 613{
661 unsigned long flags;
662 unsigned int temp; 614 unsigned int temp;
663 phys_addr_t io_address = pci_pio_to_address(io_mem.start); 615 phys_addr_t io_address = pci_pio_to_address(io_mem.start);
664 616
@@ -672,8 +624,6 @@ static void __init pci_v3_preinit(void)
672 hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); 624 hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
673 hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); 625 hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
674 626
675 raw_spin_lock_irqsave(&v3_lock, flags);
676
677 /* 627 /*
678 * Unlock V3 registers, but only if they were previously locked. 628 * Unlock V3 registers, but only if they were previously locked.
679 */ 629 */
@@ -736,8 +686,6 @@ static void __init pci_v3_preinit(void)
736 v3_writew(V3_LB_CFG, v3_readw(V3_LB_CFG) | (1 << 10)); 686 v3_writew(V3_LB_CFG, v3_readw(V3_LB_CFG) | (1 << 10));
737 v3_writeb(V3_LB_IMASK, 0x28); 687 v3_writeb(V3_LB_IMASK, 0x28);
738 __raw_writel(3, ap_syscon_base + INTEGRATOR_SC_PCIENABLE_OFFSET); 688 __raw_writel(3, ap_syscon_base + INTEGRATOR_SC_PCIENABLE_OFFSET);
739
740 raw_spin_unlock_irqrestore(&v3_lock, flags);
741} 689}
742 690
743static void __init pci_v3_postinit(void) 691static void __init pci_v3_postinit(void)
diff --git a/arch/arm/mach-ks8695/pci.c b/arch/arm/mach-ks8695/pci.c
index bb18193b4bac..c1bc4c3716ed 100644
--- a/arch/arm/mach-ks8695/pci.c
+++ b/arch/arm/mach-ks8695/pci.c
@@ -38,8 +38,6 @@
38 38
39 39
40static int pci_dbg; 40static int pci_dbg;
41static int pci_cfg_dbg;
42
43 41
44static void ks8695_pci_setupconfig(unsigned int bus_nr, unsigned int devfn, unsigned int where) 42static void ks8695_pci_setupconfig(unsigned int bus_nr, unsigned int devfn, unsigned int where)
45{ 43{
@@ -59,75 +57,11 @@ static void ks8695_pci_setupconfig(unsigned int bus_nr, unsigned int devfn, unsi
59 } 57 }
60} 58}
61 59
62 60static void __iomem *ks8695_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
63/* 61 int where)
64 * The KS8695 datasheet prohibits anything other than 32bit accesses
65 * to the IO registers, so all our configuration must be done with
66 * 32bit operations, and the correct bit masking and shifting.
67 */
68
69static int ks8695_pci_readconfig(struct pci_bus *bus,
70 unsigned int devfn, int where, int size, u32 *value)
71{
72 ks8695_pci_setupconfig(bus->number, devfn, where);
73
74 *value = __raw_readl(KS8695_PCI_VA + KS8695_PBCD);
75
76 switch (size) {
77 case 4:
78 break;
79 case 2:
80 *value = *value >> ((where & 2) * 8);
81 *value &= 0xffff;
82 break;
83 case 1:
84 *value = *value >> ((where & 3) * 8);
85 *value &= 0xff;
86 break;
87 }
88
89 if (pci_cfg_dbg) {
90 printk("read: %d,%08x,%02x,%d: %08x (%08x)\n",
91 bus->number, devfn, where, size, *value,
92 __raw_readl(KS8695_PCI_VA + KS8695_PBCD));
93 }
94
95 return PCIBIOS_SUCCESSFUL;
96}
97
98static int ks8695_pci_writeconfig(struct pci_bus *bus,
99 unsigned int devfn, int where, int size, u32 value)
100{ 62{
101 unsigned long tmp;
102
103 if (pci_cfg_dbg) {
104 printk("write: %d,%08x,%02x,%d: %08x\n",
105 bus->number, devfn, where, size, value);
106 }
107
108 ks8695_pci_setupconfig(bus->number, devfn, where); 63 ks8695_pci_setupconfig(bus->number, devfn, where);
109 64 return KS8695_PCI_VA + KS8695_PBCD;
110 switch (size) {
111 case 4:
112 __raw_writel(value, KS8695_PCI_VA + KS8695_PBCD);
113 break;
114 case 2:
115 tmp = __raw_readl(KS8695_PCI_VA + KS8695_PBCD);
116 tmp &= ~(0xffff << ((where & 2) * 8));
117 tmp |= value << ((where & 2) * 8);
118
119 __raw_writel(tmp, KS8695_PCI_VA + KS8695_PBCD);
120 break;
121 case 1:
122 tmp = __raw_readl(KS8695_PCI_VA + KS8695_PBCD);
123 tmp &= ~(0xff << ((where & 3) * 8));
124 tmp |= value << ((where & 3) * 8);
125
126 __raw_writel(tmp, KS8695_PCI_VA + KS8695_PBCD);
127 break;
128 }
129
130 return PCIBIOS_SUCCESSFUL;
131} 65}
132 66
133static void ks8695_local_writeconfig(int where, u32 value) 67static void ks8695_local_writeconfig(int where, u32 value)
@@ -137,8 +71,9 @@ static void ks8695_local_writeconfig(int where, u32 value)
137} 71}
138 72
139static struct pci_ops ks8695_pci_ops = { 73static struct pci_ops ks8695_pci_ops = {
140 .read = ks8695_pci_readconfig, 74 .map_bus = ks8695_pci_map_bus,
141 .write = ks8695_pci_writeconfig, 75 .read = pci_generic_config_read32,
76 .write = pci_generic_config_write32,
142}; 77};
143 78
144static struct resource pci_mem = { 79static struct resource pci_mem = {
diff --git a/arch/arm/mach-sa1100/pci-nanoengine.c b/arch/arm/mach-sa1100/pci-nanoengine.c
index b704433c529c..d7ae8d50f6d8 100644
--- a/arch/arm/mach-sa1100/pci-nanoengine.c
+++ b/arch/arm/mach-sa1100/pci-nanoengine.c
@@ -22,7 +22,6 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/spinlock.h>
26 25
27#include <asm/mach/pci.h> 26#include <asm/mach/pci.h>
28#include <asm/mach-types.h> 27#include <asm/mach-types.h>
@@ -30,97 +29,20 @@
30#include <mach/nanoengine.h> 29#include <mach/nanoengine.h>
31#include <mach/hardware.h> 30#include <mach/hardware.h>
32 31
33static DEFINE_SPINLOCK(nano_lock); 32static void __iomem *nanoengine_pci_map_bus(struct pci_bus *bus,
34 33 unsigned int devfn, int where)
35static int nanoengine_get_pci_address(struct pci_bus *bus,
36 unsigned int devfn, int where, void __iomem **address)
37{ 34{
38 int ret = PCIBIOS_DEVICE_NOT_FOUND; 35 if (bus->number != 0 || (devfn >> 3) != 0)
39 unsigned int busnr = bus->number; 36 return NULL;
40 37
41 *address = (void __iomem *)NANO_PCI_CONFIG_SPACE_VIRT + 38 return (void __iomem *)NANO_PCI_CONFIG_SPACE_VIRT +
42 ((bus->number << 16) | (devfn << 8) | (where & ~3)); 39 ((bus->number << 16) | (devfn << 8) | (where & ~3));
43
44 ret = (busnr > 255 || devfn > 255 || where > 255) ?
45 PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
46
47 return ret;
48}
49
50static int nanoengine_read_config(struct pci_bus *bus, unsigned int devfn, int where,
51 int size, u32 *val)
52{
53 int ret;
54 void __iomem *address;
55 unsigned long flags;
56 u32 v;
57
58 /* nanoEngine PCI bridge does not return -1 for a non-existing
59 * device. We must fake the answer. We know that the only valid
60 * device is device zero at bus 0, which is the network chip. */
61 if (bus->number != 0 || (devfn >> 3) != 0) {
62 v = -1;
63 nanoengine_get_pci_address(bus, devfn, where, &address);
64 goto exit_function;
65 }
66
67 spin_lock_irqsave(&nano_lock, flags);
68
69 ret = nanoengine_get_pci_address(bus, devfn, where, &address);
70 if (ret != PCIBIOS_SUCCESSFUL)
71 return ret;
72 v = __raw_readl(address);
73
74 spin_unlock_irqrestore(&nano_lock, flags);
75
76 v >>= ((where & 3) * 8);
77 v &= (unsigned long)(-1) >> ((4 - size) * 8);
78
79exit_function:
80 *val = v;
81 return PCIBIOS_SUCCESSFUL;
82}
83
84static int nanoengine_write_config(struct pci_bus *bus, unsigned int devfn, int where,
85 int size, u32 val)
86{
87 int ret;
88 void __iomem *address;
89 unsigned long flags;
90 unsigned shift;
91 u32 v;
92
93 shift = (where & 3) * 8;
94
95 spin_lock_irqsave(&nano_lock, flags);
96
97 ret = nanoengine_get_pci_address(bus, devfn, where, &address);
98 if (ret != PCIBIOS_SUCCESSFUL)
99 return ret;
100 v = __raw_readl(address);
101 switch (size) {
102 case 1:
103 v &= ~(0xFF << shift);
104 v |= val << shift;
105 break;
106 case 2:
107 v &= ~(0xFFFF << shift);
108 v |= val << shift;
109 break;
110 case 4:
111 v = val;
112 break;
113 }
114 __raw_writel(v, address);
115
116 spin_unlock_irqrestore(&nano_lock, flags);
117
118 return PCIBIOS_SUCCESSFUL;
119} 40}
120 41
121static struct pci_ops pci_nano_ops = { 42static struct pci_ops pci_nano_ops = {
122 .read = nanoengine_read_config, 43 .map_bus = nanoengine_pci_map_bus,
123 .write = nanoengine_write_config, 44 .read = pci_generic_config_read32,
45 .write = pci_generic_config_write32,
124}; 46};
125 47
126static int __init pci_nanoengine_map_irq(const struct pci_dev *dev, u8 slot, 48static int __init pci_nanoengine_map_irq(const struct pci_dev *dev, u8 slot,
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index c7ca936ebd99..263a2044c65b 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -29,10 +29,10 @@
29 29
30struct start_info _xen_start_info; 30struct start_info _xen_start_info;
31struct start_info *xen_start_info = &_xen_start_info; 31struct start_info *xen_start_info = &_xen_start_info;
32EXPORT_SYMBOL_GPL(xen_start_info); 32EXPORT_SYMBOL(xen_start_info);
33 33
34enum xen_domain_type xen_domain_type = XEN_NATIVE; 34enum xen_domain_type xen_domain_type = XEN_NATIVE;
35EXPORT_SYMBOL_GPL(xen_domain_type); 35EXPORT_SYMBOL(xen_domain_type);
36 36
37struct shared_info xen_dummy_shared_info; 37struct shared_info xen_dummy_shared_info;
38struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; 38struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 351b24a979d4..793551d15f1d 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -149,7 +149,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
149EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); 149EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
150 150
151struct dma_map_ops *xen_dma_ops; 151struct dma_map_ops *xen_dma_ops;
152EXPORT_SYMBOL_GPL(xen_dma_ops); 152EXPORT_SYMBOL(xen_dma_ops);
153 153
154static struct dma_map_ops xen_swiotlb_dma_ops = { 154static struct dma_map_ops xen_swiotlb_dma_ops = {
155 .mapping_error = xen_swiotlb_dma_mapping_error, 155 .mapping_error = xen_swiotlb_dma_mapping_error,
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index 054857776254..cb7a14c5cd69 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -102,7 +102,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
102EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping); 102EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
103 103
104int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 104int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
105 struct gnttab_map_grant_ref *kmap_ops, 105 struct gnttab_unmap_grant_ref *kunmap_ops,
106 struct page **pages, unsigned int count) 106 struct page **pages, unsigned int count)
107{ 107{
108 int i; 108 int i;
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index ce5836c14ec1..6f93c24ca801 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -46,25 +46,3 @@ int pcibios_add_device(struct pci_dev *dev)
46 46
47 return 0; 47 return 0;
48} 48}
49
50
51#ifdef CONFIG_PCI_DOMAINS_GENERIC
52static bool dt_domain_found = false;
53
54void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
55{
56 int domain = of_get_pci_domain_nr(parent->of_node);
57
58 if (domain >= 0) {
59 dt_domain_found = true;
60 } else if (dt_domain_found == true) {
61 dev_err(parent, "Node %s is missing \"linux,pci-domain\" property in DT\n",
62 parent->of_node->full_name);
63 return;
64 } else {
65 domain = pci_get_new_domain_nr();
66 }
67
68 bus->domain_nr = domain;
69}
70#endif
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index efa5d65b0007..b073f4d771a5 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -168,8 +168,8 @@ static int pci_frv_write_config(struct pci_bus *bus, unsigned int devfn, int whe
168} 168}
169 169
170static struct pci_ops pci_direct_frv = { 170static struct pci_ops pci_direct_frv = {
171 pci_frv_read_config, 171 .read = pci_frv_read_config,
172 pci_frv_write_config, 172 .write = pci_frv_write_config,
173}; 173};
174 174
175/* 175/*
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c
index 8b9318d311a0..bd09bf74f187 100644
--- a/arch/ia64/kernel/acpi-ext.c
+++ b/arch/ia64/kernel/acpi-ext.c
@@ -69,10 +69,10 @@ static acpi_status find_csr_space(struct acpi_resource *resource, void *data)
69 status = acpi_resource_to_address64(resource, &addr); 69 status = acpi_resource_to_address64(resource, &addr);
70 if (ACPI_SUCCESS(status) && 70 if (ACPI_SUCCESS(status) &&
71 addr.resource_type == ACPI_MEMORY_RANGE && 71 addr.resource_type == ACPI_MEMORY_RANGE &&
72 addr.address_length && 72 addr.address.address_length &&
73 addr.producer_consumer == ACPI_CONSUMER) { 73 addr.producer_consumer == ACPI_CONSUMER) {
74 space->base = addr.minimum; 74 space->base = addr.address.minimum;
75 space->length = addr.address_length; 75 space->length = addr.address.address_length;
76 return AE_CTRL_TERMINATE; 76 return AE_CTRL_TERMINATE;
77 } 77 }
78 return AE_OK; /* keep looking */ 78 return AE_OK; /* keep looking */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index e795cb848154..2c4498919d3c 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -380,9 +380,6 @@ static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
380 380
381static int __init acpi_parse_madt(struct acpi_table_header *table) 381static int __init acpi_parse_madt(struct acpi_table_header *table)
382{ 382{
383 if (!table)
384 return -EINVAL;
385
386 acpi_madt = (struct acpi_table_madt *)table; 383 acpi_madt = (struct acpi_table_madt *)table;
387 384
388 acpi_madt_rev = acpi_madt->header.revision; 385 acpi_madt_rev = acpi_madt->header.revision;
@@ -645,9 +642,6 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
645 struct acpi_table_header *fadt_header; 642 struct acpi_table_header *fadt_header;
646 struct acpi_table_fadt *fadt; 643 struct acpi_table_fadt *fadt;
647 644
648 if (!table)
649 return -EINVAL;
650
651 fadt_header = (struct acpi_table_header *)table; 645 fadt_header = (struct acpi_table_header *)table;
652 if (fadt_header->revision != 3) 646 if (fadt_header->revision != 3)
653 return -ENODEV; /* Only deal with ACPI 2.0 FADT */ 647 return -ENODEV; /* Only deal with ACPI 2.0 FADT */
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 900cc93e5409..48cc65705db4 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -188,12 +188,12 @@ static u64 add_io_space(struct pci_root_info *info,
188 188
189 name = (char *)(iospace + 1); 189 name = (char *)(iospace + 1);
190 190
191 min = addr->minimum; 191 min = addr->address.minimum;
192 max = min + addr->address_length - 1; 192 max = min + addr->address.address_length - 1;
193 if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION) 193 if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
194 sparse = 1; 194 sparse = 1;
195 195
196 space_nr = new_space(addr->translation_offset, sparse); 196 space_nr = new_space(addr->address.translation_offset, sparse);
197 if (space_nr == ~0) 197 if (space_nr == ~0)
198 goto free_resource; 198 goto free_resource;
199 199
@@ -247,7 +247,7 @@ static acpi_status resource_to_window(struct acpi_resource *resource,
247 if (ACPI_SUCCESS(status) && 247 if (ACPI_SUCCESS(status) &&
248 (addr->resource_type == ACPI_MEMORY_RANGE || 248 (addr->resource_type == ACPI_MEMORY_RANGE ||
249 addr->resource_type == ACPI_IO_RANGE) && 249 addr->resource_type == ACPI_IO_RANGE) &&
250 addr->address_length && 250 addr->address.address_length &&
251 addr->producer_consumer == ACPI_PRODUCER) 251 addr->producer_consumer == ACPI_PRODUCER)
252 return AE_OK; 252 return AE_OK;
253 253
@@ -284,7 +284,7 @@ static acpi_status add_window(struct acpi_resource *res, void *data)
284 if (addr.resource_type == ACPI_MEMORY_RANGE) { 284 if (addr.resource_type == ACPI_MEMORY_RANGE) {
285 flags = IORESOURCE_MEM; 285 flags = IORESOURCE_MEM;
286 root = &iomem_resource; 286 root = &iomem_resource;
287 offset = addr.translation_offset; 287 offset = addr.address.translation_offset;
288 } else if (addr.resource_type == ACPI_IO_RANGE) { 288 } else if (addr.resource_type == ACPI_IO_RANGE) {
289 flags = IORESOURCE_IO; 289 flags = IORESOURCE_IO;
290 root = &ioport_resource; 290 root = &ioport_resource;
@@ -297,8 +297,8 @@ static acpi_status add_window(struct acpi_resource *res, void *data)
297 resource = &info->res[info->res_num]; 297 resource = &info->res[info->res_num];
298 resource->name = info->name; 298 resource->name = info->name;
299 resource->flags = flags; 299 resource->flags = flags;
300 resource->start = addr.minimum + offset; 300 resource->start = addr.address.minimum + offset;
301 resource->end = resource->start + addr.address_length - 1; 301 resource->end = resource->start + addr.address.address_length - 1;
302 info->res_offset[info->res_num] = offset; 302 info->res_offset[info->res_num] = offset;
303 303
304 if (insert_resource(root, resource)) { 304 if (insert_resource(root, resource)) {
diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c
index 95022b04b62d..264db1126803 100644
--- a/arch/m68k/atari/atakeyb.c
+++ b/arch/m68k/atari/atakeyb.c
@@ -170,7 +170,6 @@ repeat:
170 if (acia_stat & ACIA_RDRF) { 170 if (acia_stat & ACIA_RDRF) {
171 /* received a character */ 171 /* received a character */
172 scancode = acia.key_data; /* get it or reset the ACIA, I'll get it! */ 172 scancode = acia.key_data; /* get it or reset the ACIA, I'll get it! */
173 tasklet_schedule(&keyboard_tasklet);
174 interpret_scancode: 173 interpret_scancode:
175 switch (kb_state.state) { 174 switch (kb_state.state) {
176 case KEYBOARD: 175 case KEYBOARD:
@@ -430,14 +429,6 @@ void ikbd_mouse_y0_top(void)
430} 429}
431EXPORT_SYMBOL(ikbd_mouse_y0_top); 430EXPORT_SYMBOL(ikbd_mouse_y0_top);
432 431
433/* Resume */
434void ikbd_resume(void)
435{
436 static const char cmd[1] = { 0x11 };
437
438 ikbd_write(cmd, 1);
439}
440
441/* Disable mouse */ 432/* Disable mouse */
442void ikbd_mouse_disable(void) 433void ikbd_mouse_disable(void)
443{ 434{
@@ -447,14 +438,6 @@ void ikbd_mouse_disable(void)
447} 438}
448EXPORT_SYMBOL(ikbd_mouse_disable); 439EXPORT_SYMBOL(ikbd_mouse_disable);
449 440
450/* Pause output */
451void ikbd_pause(void)
452{
453 static const char cmd[1] = { 0x13 };
454
455 ikbd_write(cmd, 1);
456}
457
458/* Set joystick event reporting */ 441/* Set joystick event reporting */
459void ikbd_joystick_event_on(void) 442void ikbd_joystick_event_on(void)
460{ 443{
@@ -502,56 +485,6 @@ void ikbd_joystick_disable(void)
502 ikbd_write(cmd, 1); 485 ikbd_write(cmd, 1);
503} 486}
504 487
505/* Time-of-day clock set */
506void ikbd_clock_set(int year, int month, int day, int hour, int minute, int second)
507{
508 char cmd[7] = { 0x1B, year, month, day, hour, minute, second };
509
510 ikbd_write(cmd, 7);
511}
512
513/* Interrogate time-of-day clock */
514void ikbd_clock_get(int *year, int *month, int *day, int *hour, int *minute, int second)
515{
516 static const char cmd[1] = { 0x1C };
517
518 ikbd_write(cmd, 1);
519}
520
521/* Memory load */
522void ikbd_mem_write(int address, int size, char *data)
523{
524 panic("Attempt to write data into keyboard memory");
525}
526
527/* Memory read */
528void ikbd_mem_read(int address, char data[6])
529{
530 char cmd[3] = { 0x21, address>>8, address&0xFF };
531
532 ikbd_write(cmd, 3);
533
534 /* receive data and put it in data */
535}
536
537/* Controller execute */
538void ikbd_exec(int address)
539{
540 char cmd[3] = { 0x22, address>>8, address&0xFF };
541
542 ikbd_write(cmd, 3);
543}
544
545/* Status inquiries (0x87-0x9A) not yet implemented */
546
547/* Set the state of the caps lock led. */
548void atari_kbd_leds(unsigned int leds)
549{
550 char cmd[6] = {32, 0, 4, 1, 254 + ((leds & 4) != 0), 0};
551
552 ikbd_write(cmd, 6);
553}
554
555/* 488/*
556 * The original code sometimes left the interrupt line of 489 * The original code sometimes left the interrupt line of
557 * the ACIAs low forever. I hope, it is fixed now. 490 * the ACIAs low forever. I hope, it is fixed now.
@@ -571,9 +504,8 @@ int atari_keyb_init(void)
571 kb_state.state = KEYBOARD; 504 kb_state.state = KEYBOARD;
572 kb_state.len = 0; 505 kb_state.len = 0;
573 506
574 error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, 507 error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, 0,
575 IRQ_TYPE_SLOW, "keyboard,mouse,MIDI", 508 "keyboard,mouse,MIDI", atari_keyboard_interrupt);
576 atari_keyboard_interrupt);
577 if (error) 509 if (error)
578 return error; 510 return error;
579 511
diff --git a/arch/m68k/atari/stdma.c b/arch/m68k/atari/stdma.c
index e5a66596b116..ba65f942d0c7 100644
--- a/arch/m68k/atari/stdma.c
+++ b/arch/m68k/atari/stdma.c
@@ -198,7 +198,7 @@ EXPORT_SYMBOL(stdma_islocked);
198void __init stdma_init(void) 198void __init stdma_init(void)
199{ 199{
200 stdma_isr = NULL; 200 stdma_isr = NULL;
201 if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED, 201 if (request_irq(IRQ_MFP_FDC, stdma_int, IRQF_SHARED,
202 "ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int)) 202 "ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int))
203 pr_err("Couldn't register ST-DMA interrupt\n"); 203 pr_err("Couldn't register ST-DMA interrupt\n");
204} 204}
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index da8f981c36d6..c549b48174ec 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -32,8 +32,7 @@ atari_sched_init(irq_handler_t timer_routine)
32 /* start timer C, div = 1:100 */ 32 /* start timer C, div = 1:100 */
33 st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 15) | 0x60; 33 st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 15) | 0x60;
34 /* install interrupt service routine for MFP Timer C */ 34 /* install interrupt service routine for MFP Timer C */
35 if (request_irq(IRQ_MFP_TIMC, timer_routine, IRQ_TYPE_SLOW, 35 if (request_irq(IRQ_MFP_TIMC, timer_routine, 0, "timer", timer_routine))
36 "timer", timer_routine))
37 pr_err("Couldn't register timer interrupt\n"); 36 pr_err("Couldn't register timer interrupt\n");
38} 37}
39 38
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 399df883c8bb..1a10a08ebec7 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -36,6 +36,7 @@ CONFIG_AMIGA_PCMCIA=y
36CONFIG_ZORRO_NAMES=y 36CONFIG_ZORRO_NAMES=y
37# CONFIG_COMPACTION is not set 37# CONFIG_COMPACTION is not set
38CONFIG_CLEANCACHE=y 38CONFIG_CLEANCACHE=y
39CONFIG_ZPOOL=m
39# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 40# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
40CONFIG_BINFMT_AOUT=m 41CONFIG_BINFMT_AOUT=m
41CONFIG_BINFMT_MISC=m 42CONFIG_BINFMT_MISC=m
@@ -55,6 +56,8 @@ CONFIG_NET_IPIP=m
55CONFIG_NET_IPGRE_DEMUX=m 56CONFIG_NET_IPGRE_DEMUX=m
56CONFIG_NET_IPGRE=m 57CONFIG_NET_IPGRE=m
57CONFIG_NET_IPVTI=m 58CONFIG_NET_IPVTI=m
59CONFIG_NET_FOU_IP_TUNNELS=y
60CONFIG_GENEVE=m
58CONFIG_INET_AH=m 61CONFIG_INET_AH=m
59CONFIG_INET_ESP=m 62CONFIG_INET_ESP=m
60CONFIG_INET_IPCOMP=m 63CONFIG_INET_IPCOMP=m
@@ -96,6 +99,8 @@ CONFIG_NFT_HASH=m
96CONFIG_NFT_COUNTER=m 99CONFIG_NFT_COUNTER=m
97CONFIG_NFT_LOG=m 100CONFIG_NFT_LOG=m
98CONFIG_NFT_LIMIT=m 101CONFIG_NFT_LIMIT=m
102CONFIG_NFT_MASQ=m
103CONFIG_NFT_REDIR=m
99CONFIG_NFT_NAT=m 104CONFIG_NFT_NAT=m
100CONFIG_NFT_QUEUE=m 105CONFIG_NFT_QUEUE=m
101CONFIG_NFT_REJECT=m 106CONFIG_NFT_REJECT=m
@@ -142,6 +147,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
142CONFIG_NETFILTER_XT_MATCH_OSF=m 147CONFIG_NETFILTER_XT_MATCH_OSF=m
143CONFIG_NETFILTER_XT_MATCH_OWNER=m 148CONFIG_NETFILTER_XT_MATCH_OWNER=m
144CONFIG_NETFILTER_XT_MATCH_POLICY=m 149CONFIG_NETFILTER_XT_MATCH_POLICY=m
150CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
145CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 151CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
146CONFIG_NETFILTER_XT_MATCH_QUOTA=m 152CONFIG_NETFILTER_XT_MATCH_QUOTA=m
147CONFIG_NETFILTER_XT_MATCH_RATEEST=m 153CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -163,6 +169,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
163CONFIG_IP_SET_HASH_IPPORT=m 169CONFIG_IP_SET_HASH_IPPORT=m
164CONFIG_IP_SET_HASH_IPPORTIP=m 170CONFIG_IP_SET_HASH_IPPORTIP=m
165CONFIG_IP_SET_HASH_IPPORTNET=m 171CONFIG_IP_SET_HASH_IPPORTNET=m
172CONFIG_IP_SET_HASH_MAC=m
166CONFIG_IP_SET_HASH_NETPORTNET=m 173CONFIG_IP_SET_HASH_NETPORTNET=m
167CONFIG_IP_SET_HASH_NET=m 174CONFIG_IP_SET_HASH_NET=m
168CONFIG_IP_SET_HASH_NETNET=m 175CONFIG_IP_SET_HASH_NETNET=m
@@ -170,9 +177,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
170CONFIG_IP_SET_HASH_NETIFACE=m 177CONFIG_IP_SET_HASH_NETIFACE=m
171CONFIG_IP_SET_LIST_SET=m 178CONFIG_IP_SET_LIST_SET=m
172CONFIG_NF_CONNTRACK_IPV4=m 179CONFIG_NF_CONNTRACK_IPV4=m
180CONFIG_NF_LOG_ARP=m
173CONFIG_NFT_CHAIN_ROUTE_IPV4=m 181CONFIG_NFT_CHAIN_ROUTE_IPV4=m
174CONFIG_NFT_CHAIN_NAT_IPV4=m
175CONFIG_NF_TABLES_ARP=m 182CONFIG_NF_TABLES_ARP=m
183CONFIG_NFT_CHAIN_NAT_IPV4=m
184CONFIG_NFT_MASQ_IPV4=m
185CONFIG_NFT_REDIR_IPV4=m
176CONFIG_IP_NF_IPTABLES=m 186CONFIG_IP_NF_IPTABLES=m
177CONFIG_IP_NF_MATCH_AH=m 187CONFIG_IP_NF_MATCH_AH=m
178CONFIG_IP_NF_MATCH_ECN=m 188CONFIG_IP_NF_MATCH_ECN=m
@@ -181,8 +191,7 @@ CONFIG_IP_NF_MATCH_TTL=m
181CONFIG_IP_NF_FILTER=m 191CONFIG_IP_NF_FILTER=m
182CONFIG_IP_NF_TARGET_REJECT=m 192CONFIG_IP_NF_TARGET_REJECT=m
183CONFIG_IP_NF_TARGET_SYNPROXY=m 193CONFIG_IP_NF_TARGET_SYNPROXY=m
184CONFIG_IP_NF_TARGET_ULOG=m 194CONFIG_IP_NF_NAT=m
185CONFIG_NF_NAT_IPV4=m
186CONFIG_IP_NF_TARGET_MASQUERADE=m 195CONFIG_IP_NF_TARGET_MASQUERADE=m
187CONFIG_IP_NF_TARGET_NETMAP=m 196CONFIG_IP_NF_TARGET_NETMAP=m
188CONFIG_IP_NF_TARGET_REDIRECT=m 197CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -197,6 +206,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
197CONFIG_NF_CONNTRACK_IPV6=m 206CONFIG_NF_CONNTRACK_IPV6=m
198CONFIG_NFT_CHAIN_ROUTE_IPV6=m 207CONFIG_NFT_CHAIN_ROUTE_IPV6=m
199CONFIG_NFT_CHAIN_NAT_IPV6=m 208CONFIG_NFT_CHAIN_NAT_IPV6=m
209CONFIG_NFT_MASQ_IPV6=m
210CONFIG_NFT_REDIR_IPV6=m
200CONFIG_IP6_NF_IPTABLES=m 211CONFIG_IP6_NF_IPTABLES=m
201CONFIG_IP6_NF_MATCH_AH=m 212CONFIG_IP6_NF_MATCH_AH=m
202CONFIG_IP6_NF_MATCH_EUI64=m 213CONFIG_IP6_NF_MATCH_EUI64=m
@@ -213,17 +224,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
213CONFIG_IP6_NF_TARGET_SYNPROXY=m 224CONFIG_IP6_NF_TARGET_SYNPROXY=m
214CONFIG_IP6_NF_MANGLE=m 225CONFIG_IP6_NF_MANGLE=m
215CONFIG_IP6_NF_RAW=m 226CONFIG_IP6_NF_RAW=m
216CONFIG_NF_NAT_IPV6=m 227CONFIG_IP6_NF_NAT=m
217CONFIG_IP6_NF_TARGET_MASQUERADE=m 228CONFIG_IP6_NF_TARGET_MASQUERADE=m
218CONFIG_IP6_NF_TARGET_NPT=m 229CONFIG_IP6_NF_TARGET_NPT=m
219CONFIG_NF_TABLES_BRIDGE=m 230CONFIG_NF_TABLES_BRIDGE=m
231CONFIG_NFT_BRIDGE_META=m
232CONFIG_NFT_BRIDGE_REJECT=m
233CONFIG_NF_LOG_BRIDGE=m
234CONFIG_BRIDGE_NF_EBTABLES=m
235CONFIG_BRIDGE_EBT_BROUTE=m
236CONFIG_BRIDGE_EBT_T_FILTER=m
237CONFIG_BRIDGE_EBT_T_NAT=m
238CONFIG_BRIDGE_EBT_802_3=m
239CONFIG_BRIDGE_EBT_AMONG=m
240CONFIG_BRIDGE_EBT_ARP=m
241CONFIG_BRIDGE_EBT_IP=m
242CONFIG_BRIDGE_EBT_IP6=m
243CONFIG_BRIDGE_EBT_LIMIT=m
244CONFIG_BRIDGE_EBT_MARK=m
245CONFIG_BRIDGE_EBT_PKTTYPE=m
246CONFIG_BRIDGE_EBT_STP=m
247CONFIG_BRIDGE_EBT_VLAN=m
248CONFIG_BRIDGE_EBT_ARPREPLY=m
249CONFIG_BRIDGE_EBT_DNAT=m
250CONFIG_BRIDGE_EBT_MARK_T=m
251CONFIG_BRIDGE_EBT_REDIRECT=m
252CONFIG_BRIDGE_EBT_SNAT=m
253CONFIG_BRIDGE_EBT_LOG=m
254CONFIG_BRIDGE_EBT_NFLOG=m
220CONFIG_IP_DCCP=m 255CONFIG_IP_DCCP=m
221# CONFIG_IP_DCCP_CCID3 is not set 256# CONFIG_IP_DCCP_CCID3 is not set
222CONFIG_SCTP_COOKIE_HMAC_SHA1=y 257CONFIG_SCTP_COOKIE_HMAC_SHA1=y
223CONFIG_RDS=m 258CONFIG_RDS=m
224CONFIG_RDS_TCP=m 259CONFIG_RDS_TCP=m
225CONFIG_L2TP=m 260CONFIG_L2TP=m
261CONFIG_BRIDGE=m
226CONFIG_ATALK=m 262CONFIG_ATALK=m
263CONFIG_6LOWPAN=m
227CONFIG_DNS_RESOLVER=y 264CONFIG_DNS_RESOLVER=y
228CONFIG_BATMAN_ADV=m 265CONFIG_BATMAN_ADV=m
229CONFIG_BATMAN_ADV_DAT=y 266CONFIG_BATMAN_ADV_DAT=y
@@ -232,9 +269,10 @@ CONFIG_BATMAN_ADV_MCAST=y
232CONFIG_NETLINK_DIAG=m 269CONFIG_NETLINK_DIAG=m
233CONFIG_NET_MPLS_GSO=m 270CONFIG_NET_MPLS_GSO=m
234# CONFIG_WIRELESS is not set 271# CONFIG_WIRELESS is not set
272# CONFIG_UEVENT_HELPER is not set
235CONFIG_DEVTMPFS=y 273CONFIG_DEVTMPFS=y
274CONFIG_DEVTMPFS_MOUNT=y
236# CONFIG_FIRMWARE_IN_KERNEL is not set 275# CONFIG_FIRMWARE_IN_KERNEL is not set
237# CONFIG_FW_LOADER_USER_HELPER is not set
238CONFIG_CONNECTOR=m 276CONFIG_CONNECTOR=m
239CONFIG_PARPORT=m 277CONFIG_PARPORT=m
240CONFIG_PARPORT_AMIGA=m 278CONFIG_PARPORT_AMIGA=m
@@ -299,6 +337,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
299CONFIG_NET_TEAM_MODE_RANDOM=m 337CONFIG_NET_TEAM_MODE_RANDOM=m
300CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 338CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
301CONFIG_NET_TEAM_MODE_LOADBALANCE=m 339CONFIG_NET_TEAM_MODE_LOADBALANCE=m
340CONFIG_MACVLAN=m
341CONFIG_MACVTAP=m
342CONFIG_IPVLAN=m
302CONFIG_VXLAN=m 343CONFIG_VXLAN=m
303CONFIG_NETCONSOLE=m 344CONFIG_NETCONSOLE=m
304CONFIG_NETCONSOLE_DYNAMIC=y 345CONFIG_NETCONSOLE_DYNAMIC=y
@@ -316,6 +357,8 @@ CONFIG_ARIADNE=y
316CONFIG_HYDRA=y 357CONFIG_HYDRA=y
317CONFIG_APNE=y 358CONFIG_APNE=y
318CONFIG_ZORRO8390=y 359CONFIG_ZORRO8390=y
360# CONFIG_NET_VENDOR_QUALCOMM is not set
361# CONFIG_NET_VENDOR_ROCKER is not set
319# CONFIG_NET_VENDOR_SAMSUNG is not set 362# CONFIG_NET_VENDOR_SAMSUNG is not set
320# CONFIG_NET_VENDOR_SEEQ is not set 363# CONFIG_NET_VENDOR_SEEQ is not set
321# CONFIG_NET_VENDOR_SMSC is not set 364# CONFIG_NET_VENDOR_SMSC is not set
@@ -371,6 +414,7 @@ CONFIG_HID=m
371CONFIG_HIDRAW=y 414CONFIG_HIDRAW=y
372CONFIG_UHID=m 415CONFIG_UHID=m
373# CONFIG_HID_GENERIC is not set 416# CONFIG_HID_GENERIC is not set
417# CONFIG_HID_PLANTRONICS is not set
374# CONFIG_USB_SUPPORT is not set 418# CONFIG_USB_SUPPORT is not set
375CONFIG_RTC_CLASS=y 419CONFIG_RTC_CLASS=y
376CONFIG_RTC_DRV_MSM6242=m 420CONFIG_RTC_DRV_MSM6242=m
@@ -392,6 +436,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
392CONFIG_AUTOFS4_FS=m 436CONFIG_AUTOFS4_FS=m
393CONFIG_FUSE_FS=m 437CONFIG_FUSE_FS=m
394CONFIG_CUSE=m 438CONFIG_CUSE=m
439CONFIG_OVERLAY_FS=m
395CONFIG_ISO9660_FS=y 440CONFIG_ISO9660_FS=y
396CONFIG_JOLIET=y 441CONFIG_JOLIET=y
397CONFIG_ZISOFS=y 442CONFIG_ZISOFS=y
@@ -407,6 +452,7 @@ CONFIG_HFS_FS=m
407CONFIG_HFSPLUS_FS=m 452CONFIG_HFSPLUS_FS=m
408CONFIG_CRAMFS=m 453CONFIG_CRAMFS=m
409CONFIG_SQUASHFS=m 454CONFIG_SQUASHFS=m
455CONFIG_SQUASHFS_LZ4=y
410CONFIG_SQUASHFS_LZO=y 456CONFIG_SQUASHFS_LZO=y
411CONFIG_MINIX_FS=m 457CONFIG_MINIX_FS=m
412CONFIG_OMFS_FS=m 458CONFIG_OMFS_FS=m
@@ -476,10 +522,18 @@ CONFIG_DLM=m
476CONFIG_MAGIC_SYSRQ=y 522CONFIG_MAGIC_SYSRQ=y
477CONFIG_ASYNC_RAID6_TEST=m 523CONFIG_ASYNC_RAID6_TEST=m
478CONFIG_TEST_STRING_HELPERS=m 524CONFIG_TEST_STRING_HELPERS=m
525CONFIG_TEST_KSTRTOX=m
526CONFIG_TEST_LKM=m
527CONFIG_TEST_USER_COPY=m
528CONFIG_TEST_BPF=m
529CONFIG_TEST_FIRMWARE=m
530CONFIG_TEST_UDELAY=m
531CONFIG_EARLY_PRINTK=y
479CONFIG_ENCRYPTED_KEYS=m 532CONFIG_ENCRYPTED_KEYS=m
480CONFIG_CRYPTO_MANAGER=y 533CONFIG_CRYPTO_MANAGER=y
481CONFIG_CRYPTO_USER=m 534CONFIG_CRYPTO_USER=m
482CONFIG_CRYPTO_CRYPTD=m 535CONFIG_CRYPTO_CRYPTD=m
536CONFIG_CRYPTO_MCRYPTD=m
483CONFIG_CRYPTO_TEST=m 537CONFIG_CRYPTO_TEST=m
484CONFIG_CRYPTO_CCM=m 538CONFIG_CRYPTO_CCM=m
485CONFIG_CRYPTO_GCM=m 539CONFIG_CRYPTO_GCM=m
@@ -514,13 +568,10 @@ CONFIG_CRYPTO_LZO=m
514CONFIG_CRYPTO_LZ4=m 568CONFIG_CRYPTO_LZ4=m
515CONFIG_CRYPTO_LZ4HC=m 569CONFIG_CRYPTO_LZ4HC=m
516# CONFIG_CRYPTO_ANSI_CPRNG is not set 570# CONFIG_CRYPTO_ANSI_CPRNG is not set
571CONFIG_CRYPTO_DRBG_MENU=m
572CONFIG_CRYPTO_DRBG_HASH=y
573CONFIG_CRYPTO_DRBG_CTR=y
517CONFIG_CRYPTO_USER_API_HASH=m 574CONFIG_CRYPTO_USER_API_HASH=m
518CONFIG_CRYPTO_USER_API_SKCIPHER=m 575CONFIG_CRYPTO_USER_API_SKCIPHER=m
519# CONFIG_CRYPTO_HW is not set 576# CONFIG_CRYPTO_HW is not set
520CONFIG_XZ_DEC_X86=y
521CONFIG_XZ_DEC_POWERPC=y
522CONFIG_XZ_DEC_IA64=y
523CONFIG_XZ_DEC_ARM=y
524CONFIG_XZ_DEC_ARMTHUMB=y
525CONFIG_XZ_DEC_SPARC=y
526CONFIG_XZ_DEC_TEST=m 577CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index be16740c0749..7859a738c81e 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -34,6 +34,7 @@ CONFIG_M68060=y
34CONFIG_APOLLO=y 34CONFIG_APOLLO=y
35# CONFIG_COMPACTION is not set 35# CONFIG_COMPACTION is not set
36CONFIG_CLEANCACHE=y 36CONFIG_CLEANCACHE=y
37CONFIG_ZPOOL=m
37# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 38# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
38CONFIG_BINFMT_AOUT=m 39CONFIG_BINFMT_AOUT=m
39CONFIG_BINFMT_MISC=m 40CONFIG_BINFMT_MISC=m
@@ -53,6 +54,8 @@ CONFIG_NET_IPIP=m
53CONFIG_NET_IPGRE_DEMUX=m 54CONFIG_NET_IPGRE_DEMUX=m
54CONFIG_NET_IPGRE=m 55CONFIG_NET_IPGRE=m
55CONFIG_NET_IPVTI=m 56CONFIG_NET_IPVTI=m
57CONFIG_NET_FOU_IP_TUNNELS=y
58CONFIG_GENEVE=m
56CONFIG_INET_AH=m 59CONFIG_INET_AH=m
57CONFIG_INET_ESP=m 60CONFIG_INET_ESP=m
58CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
@@ -94,6 +97,8 @@ CONFIG_NFT_HASH=m
94CONFIG_NFT_COUNTER=m 97CONFIG_NFT_COUNTER=m
95CONFIG_NFT_LOG=m 98CONFIG_NFT_LOG=m
96CONFIG_NFT_LIMIT=m 99CONFIG_NFT_LIMIT=m
100CONFIG_NFT_MASQ=m
101CONFIG_NFT_REDIR=m
97CONFIG_NFT_NAT=m 102CONFIG_NFT_NAT=m
98CONFIG_NFT_QUEUE=m 103CONFIG_NFT_QUEUE=m
99CONFIG_NFT_REJECT=m 104CONFIG_NFT_REJECT=m
@@ -140,6 +145,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
140CONFIG_NETFILTER_XT_MATCH_OSF=m 145CONFIG_NETFILTER_XT_MATCH_OSF=m
141CONFIG_NETFILTER_XT_MATCH_OWNER=m 146CONFIG_NETFILTER_XT_MATCH_OWNER=m
142CONFIG_NETFILTER_XT_MATCH_POLICY=m 147CONFIG_NETFILTER_XT_MATCH_POLICY=m
148CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
143CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 149CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
144CONFIG_NETFILTER_XT_MATCH_QUOTA=m 150CONFIG_NETFILTER_XT_MATCH_QUOTA=m
145CONFIG_NETFILTER_XT_MATCH_RATEEST=m 151CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -161,6 +167,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
161CONFIG_IP_SET_HASH_IPPORT=m 167CONFIG_IP_SET_HASH_IPPORT=m
162CONFIG_IP_SET_HASH_IPPORTIP=m 168CONFIG_IP_SET_HASH_IPPORTIP=m
163CONFIG_IP_SET_HASH_IPPORTNET=m 169CONFIG_IP_SET_HASH_IPPORTNET=m
170CONFIG_IP_SET_HASH_MAC=m
164CONFIG_IP_SET_HASH_NETPORTNET=m 171CONFIG_IP_SET_HASH_NETPORTNET=m
165CONFIG_IP_SET_HASH_NET=m 172CONFIG_IP_SET_HASH_NET=m
166CONFIG_IP_SET_HASH_NETNET=m 173CONFIG_IP_SET_HASH_NETNET=m
@@ -168,9 +175,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
168CONFIG_IP_SET_HASH_NETIFACE=m 175CONFIG_IP_SET_HASH_NETIFACE=m
169CONFIG_IP_SET_LIST_SET=m 176CONFIG_IP_SET_LIST_SET=m
170CONFIG_NF_CONNTRACK_IPV4=m 177CONFIG_NF_CONNTRACK_IPV4=m
178CONFIG_NF_LOG_ARP=m
171CONFIG_NFT_CHAIN_ROUTE_IPV4=m 179CONFIG_NFT_CHAIN_ROUTE_IPV4=m
172CONFIG_NFT_CHAIN_NAT_IPV4=m
173CONFIG_NF_TABLES_ARP=m 180CONFIG_NF_TABLES_ARP=m
181CONFIG_NFT_CHAIN_NAT_IPV4=m
182CONFIG_NFT_MASQ_IPV4=m
183CONFIG_NFT_REDIR_IPV4=m
174CONFIG_IP_NF_IPTABLES=m 184CONFIG_IP_NF_IPTABLES=m
175CONFIG_IP_NF_MATCH_AH=m 185CONFIG_IP_NF_MATCH_AH=m
176CONFIG_IP_NF_MATCH_ECN=m 186CONFIG_IP_NF_MATCH_ECN=m
@@ -179,8 +189,7 @@ CONFIG_IP_NF_MATCH_TTL=m
179CONFIG_IP_NF_FILTER=m 189CONFIG_IP_NF_FILTER=m
180CONFIG_IP_NF_TARGET_REJECT=m 190CONFIG_IP_NF_TARGET_REJECT=m
181CONFIG_IP_NF_TARGET_SYNPROXY=m 191CONFIG_IP_NF_TARGET_SYNPROXY=m
182CONFIG_IP_NF_TARGET_ULOG=m 192CONFIG_IP_NF_NAT=m
183CONFIG_NF_NAT_IPV4=m
184CONFIG_IP_NF_TARGET_MASQUERADE=m 193CONFIG_IP_NF_TARGET_MASQUERADE=m
185CONFIG_IP_NF_TARGET_NETMAP=m 194CONFIG_IP_NF_TARGET_NETMAP=m
186CONFIG_IP_NF_TARGET_REDIRECT=m 195CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -195,6 +204,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
195CONFIG_NF_CONNTRACK_IPV6=m 204CONFIG_NF_CONNTRACK_IPV6=m
196CONFIG_NFT_CHAIN_ROUTE_IPV6=m 205CONFIG_NFT_CHAIN_ROUTE_IPV6=m
197CONFIG_NFT_CHAIN_NAT_IPV6=m 206CONFIG_NFT_CHAIN_NAT_IPV6=m
207CONFIG_NFT_MASQ_IPV6=m
208CONFIG_NFT_REDIR_IPV6=m
198CONFIG_IP6_NF_IPTABLES=m 209CONFIG_IP6_NF_IPTABLES=m
199CONFIG_IP6_NF_MATCH_AH=m 210CONFIG_IP6_NF_MATCH_AH=m
200CONFIG_IP6_NF_MATCH_EUI64=m 211CONFIG_IP6_NF_MATCH_EUI64=m
@@ -211,17 +222,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
211CONFIG_IP6_NF_TARGET_SYNPROXY=m 222CONFIG_IP6_NF_TARGET_SYNPROXY=m
212CONFIG_IP6_NF_MANGLE=m 223CONFIG_IP6_NF_MANGLE=m
213CONFIG_IP6_NF_RAW=m 224CONFIG_IP6_NF_RAW=m
214CONFIG_NF_NAT_IPV6=m 225CONFIG_IP6_NF_NAT=m
215CONFIG_IP6_NF_TARGET_MASQUERADE=m 226CONFIG_IP6_NF_TARGET_MASQUERADE=m
216CONFIG_IP6_NF_TARGET_NPT=m 227CONFIG_IP6_NF_TARGET_NPT=m
217CONFIG_NF_TABLES_BRIDGE=m 228CONFIG_NF_TABLES_BRIDGE=m
229CONFIG_NFT_BRIDGE_META=m
230CONFIG_NFT_BRIDGE_REJECT=m
231CONFIG_NF_LOG_BRIDGE=m
232CONFIG_BRIDGE_NF_EBTABLES=m
233CONFIG_BRIDGE_EBT_BROUTE=m
234CONFIG_BRIDGE_EBT_T_FILTER=m
235CONFIG_BRIDGE_EBT_T_NAT=m
236CONFIG_BRIDGE_EBT_802_3=m
237CONFIG_BRIDGE_EBT_AMONG=m
238CONFIG_BRIDGE_EBT_ARP=m
239CONFIG_BRIDGE_EBT_IP=m
240CONFIG_BRIDGE_EBT_IP6=m
241CONFIG_BRIDGE_EBT_LIMIT=m
242CONFIG_BRIDGE_EBT_MARK=m
243CONFIG_BRIDGE_EBT_PKTTYPE=m
244CONFIG_BRIDGE_EBT_STP=m
245CONFIG_BRIDGE_EBT_VLAN=m
246CONFIG_BRIDGE_EBT_ARPREPLY=m
247CONFIG_BRIDGE_EBT_DNAT=m
248CONFIG_BRIDGE_EBT_MARK_T=m
249CONFIG_BRIDGE_EBT_REDIRECT=m
250CONFIG_BRIDGE_EBT_SNAT=m
251CONFIG_BRIDGE_EBT_LOG=m
252CONFIG_BRIDGE_EBT_NFLOG=m
218CONFIG_IP_DCCP=m 253CONFIG_IP_DCCP=m
219# CONFIG_IP_DCCP_CCID3 is not set 254# CONFIG_IP_DCCP_CCID3 is not set
220CONFIG_SCTP_COOKIE_HMAC_SHA1=y 255CONFIG_SCTP_COOKIE_HMAC_SHA1=y
221CONFIG_RDS=m 256CONFIG_RDS=m
222CONFIG_RDS_TCP=m 257CONFIG_RDS_TCP=m
223CONFIG_L2TP=m 258CONFIG_L2TP=m
259CONFIG_BRIDGE=m
224CONFIG_ATALK=m 260CONFIG_ATALK=m
261CONFIG_6LOWPAN=m
225CONFIG_DNS_RESOLVER=y 262CONFIG_DNS_RESOLVER=y
226CONFIG_BATMAN_ADV=m 263CONFIG_BATMAN_ADV=m
227CONFIG_BATMAN_ADV_DAT=y 264CONFIG_BATMAN_ADV_DAT=y
@@ -230,9 +267,10 @@ CONFIG_BATMAN_ADV_MCAST=y
230CONFIG_NETLINK_DIAG=m 267CONFIG_NETLINK_DIAG=m
231CONFIG_NET_MPLS_GSO=m 268CONFIG_NET_MPLS_GSO=m
232# CONFIG_WIRELESS is not set 269# CONFIG_WIRELESS is not set
270# CONFIG_UEVENT_HELPER is not set
233CONFIG_DEVTMPFS=y 271CONFIG_DEVTMPFS=y
272CONFIG_DEVTMPFS_MOUNT=y
234# CONFIG_FIRMWARE_IN_KERNEL is not set 273# CONFIG_FIRMWARE_IN_KERNEL is not set
235# CONFIG_FW_LOADER_USER_HELPER is not set
236CONFIG_CONNECTOR=m 274CONFIG_CONNECTOR=m
237CONFIG_BLK_DEV_LOOP=y 275CONFIG_BLK_DEV_LOOP=y
238CONFIG_BLK_DEV_CRYPTOLOOP=m 276CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -281,6 +319,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
281CONFIG_NET_TEAM_MODE_RANDOM=m 319CONFIG_NET_TEAM_MODE_RANDOM=m
282CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 320CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
283CONFIG_NET_TEAM_MODE_LOADBALANCE=m 321CONFIG_NET_TEAM_MODE_LOADBALANCE=m
322CONFIG_MACVLAN=m
323CONFIG_MACVTAP=m
324CONFIG_IPVLAN=m
284CONFIG_VXLAN=m 325CONFIG_VXLAN=m
285CONFIG_NETCONSOLE=m 326CONFIG_NETCONSOLE=m
286CONFIG_NETCONSOLE_DYNAMIC=y 327CONFIG_NETCONSOLE_DYNAMIC=y
@@ -291,6 +332,8 @@ CONFIG_VETH=m
291# CONFIG_NET_VENDOR_MARVELL is not set 332# CONFIG_NET_VENDOR_MARVELL is not set
292# CONFIG_NET_VENDOR_MICREL is not set 333# CONFIG_NET_VENDOR_MICREL is not set
293# CONFIG_NET_VENDOR_NATSEMI is not set 334# CONFIG_NET_VENDOR_NATSEMI is not set
335# CONFIG_NET_VENDOR_QUALCOMM is not set
336# CONFIG_NET_VENDOR_ROCKER is not set
294# CONFIG_NET_VENDOR_SAMSUNG is not set 337# CONFIG_NET_VENDOR_SAMSUNG is not set
295# CONFIG_NET_VENDOR_SEEQ is not set 338# CONFIG_NET_VENDOR_SEEQ is not set
296# CONFIG_NET_VENDOR_STMICRO is not set 339# CONFIG_NET_VENDOR_STMICRO is not set
@@ -332,6 +375,7 @@ CONFIG_HID=m
332CONFIG_HIDRAW=y 375CONFIG_HIDRAW=y
333CONFIG_UHID=m 376CONFIG_UHID=m
334# CONFIG_HID_GENERIC is not set 377# CONFIG_HID_GENERIC is not set
378# CONFIG_HID_PLANTRONICS is not set
335# CONFIG_USB_SUPPORT is not set 379# CONFIG_USB_SUPPORT is not set
336CONFIG_RTC_CLASS=y 380CONFIG_RTC_CLASS=y
337CONFIG_RTC_DRV_GENERIC=m 381CONFIG_RTC_DRV_GENERIC=m
@@ -350,6 +394,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
350CONFIG_AUTOFS4_FS=m 394CONFIG_AUTOFS4_FS=m
351CONFIG_FUSE_FS=m 395CONFIG_FUSE_FS=m
352CONFIG_CUSE=m 396CONFIG_CUSE=m
397CONFIG_OVERLAY_FS=m
353CONFIG_ISO9660_FS=y 398CONFIG_ISO9660_FS=y
354CONFIG_JOLIET=y 399CONFIG_JOLIET=y
355CONFIG_ZISOFS=y 400CONFIG_ZISOFS=y
@@ -365,6 +410,7 @@ CONFIG_HFS_FS=m
365CONFIG_HFSPLUS_FS=m 410CONFIG_HFSPLUS_FS=m
366CONFIG_CRAMFS=m 411CONFIG_CRAMFS=m
367CONFIG_SQUASHFS=m 412CONFIG_SQUASHFS=m
413CONFIG_SQUASHFS_LZ4=y
368CONFIG_SQUASHFS_LZO=y 414CONFIG_SQUASHFS_LZO=y
369CONFIG_MINIX_FS=m 415CONFIG_MINIX_FS=m
370CONFIG_OMFS_FS=m 416CONFIG_OMFS_FS=m
@@ -434,10 +480,18 @@ CONFIG_DLM=m
434CONFIG_MAGIC_SYSRQ=y 480CONFIG_MAGIC_SYSRQ=y
435CONFIG_ASYNC_RAID6_TEST=m 481CONFIG_ASYNC_RAID6_TEST=m
436CONFIG_TEST_STRING_HELPERS=m 482CONFIG_TEST_STRING_HELPERS=m
483CONFIG_TEST_KSTRTOX=m
484CONFIG_TEST_LKM=m
485CONFIG_TEST_USER_COPY=m
486CONFIG_TEST_BPF=m
487CONFIG_TEST_FIRMWARE=m
488CONFIG_TEST_UDELAY=m
489CONFIG_EARLY_PRINTK=y
437CONFIG_ENCRYPTED_KEYS=m 490CONFIG_ENCRYPTED_KEYS=m
438CONFIG_CRYPTO_MANAGER=y 491CONFIG_CRYPTO_MANAGER=y
439CONFIG_CRYPTO_USER=m 492CONFIG_CRYPTO_USER=m
440CONFIG_CRYPTO_CRYPTD=m 493CONFIG_CRYPTO_CRYPTD=m
494CONFIG_CRYPTO_MCRYPTD=m
441CONFIG_CRYPTO_TEST=m 495CONFIG_CRYPTO_TEST=m
442CONFIG_CRYPTO_CCM=m 496CONFIG_CRYPTO_CCM=m
443CONFIG_CRYPTO_GCM=m 497CONFIG_CRYPTO_GCM=m
@@ -472,13 +526,10 @@ CONFIG_CRYPTO_LZO=m
472CONFIG_CRYPTO_LZ4=m 526CONFIG_CRYPTO_LZ4=m
473CONFIG_CRYPTO_LZ4HC=m 527CONFIG_CRYPTO_LZ4HC=m
474# CONFIG_CRYPTO_ANSI_CPRNG is not set 528# CONFIG_CRYPTO_ANSI_CPRNG is not set
529CONFIG_CRYPTO_DRBG_MENU=m
530CONFIG_CRYPTO_DRBG_HASH=y
531CONFIG_CRYPTO_DRBG_CTR=y
475CONFIG_CRYPTO_USER_API_HASH=m 532CONFIG_CRYPTO_USER_API_HASH=m
476CONFIG_CRYPTO_USER_API_SKCIPHER=m 533CONFIG_CRYPTO_USER_API_SKCIPHER=m
477# CONFIG_CRYPTO_HW is not set 534# CONFIG_CRYPTO_HW is not set
478CONFIG_XZ_DEC_X86=y
479CONFIG_XZ_DEC_POWERPC=y
480CONFIG_XZ_DEC_IA64=y
481CONFIG_XZ_DEC_ARM=y
482CONFIG_XZ_DEC_ARMTHUMB=y
483CONFIG_XZ_DEC_SPARC=y
484CONFIG_XZ_DEC_TEST=m 535CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 391e185d73be..372593a3b398 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -31,8 +31,10 @@ CONFIG_M68030=y
31CONFIG_M68040=y 31CONFIG_M68040=y
32CONFIG_M68060=y 32CONFIG_M68060=y
33CONFIG_ATARI=y 33CONFIG_ATARI=y
34CONFIG_ATARI_ROM_ISA=y
34# CONFIG_COMPACTION is not set 35# CONFIG_COMPACTION is not set
35CONFIG_CLEANCACHE=y 36CONFIG_CLEANCACHE=y
37CONFIG_ZPOOL=m
36# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 38# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
37CONFIG_BINFMT_AOUT=m 39CONFIG_BINFMT_AOUT=m
38CONFIG_BINFMT_MISC=m 40CONFIG_BINFMT_MISC=m
@@ -52,6 +54,8 @@ CONFIG_NET_IPIP=m
52CONFIG_NET_IPGRE_DEMUX=m 54CONFIG_NET_IPGRE_DEMUX=m
53CONFIG_NET_IPGRE=m 55CONFIG_NET_IPGRE=m
54CONFIG_NET_IPVTI=m 56CONFIG_NET_IPVTI=m
57CONFIG_NET_FOU_IP_TUNNELS=y
58CONFIG_GENEVE=m
55CONFIG_INET_AH=m 59CONFIG_INET_AH=m
56CONFIG_INET_ESP=m 60CONFIG_INET_ESP=m
57CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
@@ -93,6 +97,8 @@ CONFIG_NFT_HASH=m
93CONFIG_NFT_COUNTER=m 97CONFIG_NFT_COUNTER=m
94CONFIG_NFT_LOG=m 98CONFIG_NFT_LOG=m
95CONFIG_NFT_LIMIT=m 99CONFIG_NFT_LIMIT=m
100CONFIG_NFT_MASQ=m
101CONFIG_NFT_REDIR=m
96CONFIG_NFT_NAT=m 102CONFIG_NFT_NAT=m
97CONFIG_NFT_QUEUE=m 103CONFIG_NFT_QUEUE=m
98CONFIG_NFT_REJECT=m 104CONFIG_NFT_REJECT=m
@@ -139,6 +145,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
139CONFIG_NETFILTER_XT_MATCH_OSF=m 145CONFIG_NETFILTER_XT_MATCH_OSF=m
140CONFIG_NETFILTER_XT_MATCH_OWNER=m 146CONFIG_NETFILTER_XT_MATCH_OWNER=m
141CONFIG_NETFILTER_XT_MATCH_POLICY=m 147CONFIG_NETFILTER_XT_MATCH_POLICY=m
148CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
142CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 149CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
143CONFIG_NETFILTER_XT_MATCH_QUOTA=m 150CONFIG_NETFILTER_XT_MATCH_QUOTA=m
144CONFIG_NETFILTER_XT_MATCH_RATEEST=m 151CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -160,6 +167,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
160CONFIG_IP_SET_HASH_IPPORT=m 167CONFIG_IP_SET_HASH_IPPORT=m
161CONFIG_IP_SET_HASH_IPPORTIP=m 168CONFIG_IP_SET_HASH_IPPORTIP=m
162CONFIG_IP_SET_HASH_IPPORTNET=m 169CONFIG_IP_SET_HASH_IPPORTNET=m
170CONFIG_IP_SET_HASH_MAC=m
163CONFIG_IP_SET_HASH_NETPORTNET=m 171CONFIG_IP_SET_HASH_NETPORTNET=m
164CONFIG_IP_SET_HASH_NET=m 172CONFIG_IP_SET_HASH_NET=m
165CONFIG_IP_SET_HASH_NETNET=m 173CONFIG_IP_SET_HASH_NETNET=m
@@ -167,9 +175,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
167CONFIG_IP_SET_HASH_NETIFACE=m 175CONFIG_IP_SET_HASH_NETIFACE=m
168CONFIG_IP_SET_LIST_SET=m 176CONFIG_IP_SET_LIST_SET=m
169CONFIG_NF_CONNTRACK_IPV4=m 177CONFIG_NF_CONNTRACK_IPV4=m
178CONFIG_NF_LOG_ARP=m
170CONFIG_NFT_CHAIN_ROUTE_IPV4=m 179CONFIG_NFT_CHAIN_ROUTE_IPV4=m
171CONFIG_NFT_CHAIN_NAT_IPV4=m
172CONFIG_NF_TABLES_ARP=m 180CONFIG_NF_TABLES_ARP=m
181CONFIG_NFT_CHAIN_NAT_IPV4=m
182CONFIG_NFT_MASQ_IPV4=m
183CONFIG_NFT_REDIR_IPV4=m
173CONFIG_IP_NF_IPTABLES=m 184CONFIG_IP_NF_IPTABLES=m
174CONFIG_IP_NF_MATCH_AH=m 185CONFIG_IP_NF_MATCH_AH=m
175CONFIG_IP_NF_MATCH_ECN=m 186CONFIG_IP_NF_MATCH_ECN=m
@@ -178,8 +189,7 @@ CONFIG_IP_NF_MATCH_TTL=m
178CONFIG_IP_NF_FILTER=m 189CONFIG_IP_NF_FILTER=m
179CONFIG_IP_NF_TARGET_REJECT=m 190CONFIG_IP_NF_TARGET_REJECT=m
180CONFIG_IP_NF_TARGET_SYNPROXY=m 191CONFIG_IP_NF_TARGET_SYNPROXY=m
181CONFIG_IP_NF_TARGET_ULOG=m 192CONFIG_IP_NF_NAT=m
182CONFIG_NF_NAT_IPV4=m
183CONFIG_IP_NF_TARGET_MASQUERADE=m 193CONFIG_IP_NF_TARGET_MASQUERADE=m
184CONFIG_IP_NF_TARGET_NETMAP=m 194CONFIG_IP_NF_TARGET_NETMAP=m
185CONFIG_IP_NF_TARGET_REDIRECT=m 195CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -194,6 +204,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
194CONFIG_NF_CONNTRACK_IPV6=m 204CONFIG_NF_CONNTRACK_IPV6=m
195CONFIG_NFT_CHAIN_ROUTE_IPV6=m 205CONFIG_NFT_CHAIN_ROUTE_IPV6=m
196CONFIG_NFT_CHAIN_NAT_IPV6=m 206CONFIG_NFT_CHAIN_NAT_IPV6=m
207CONFIG_NFT_MASQ_IPV6=m
208CONFIG_NFT_REDIR_IPV6=m
197CONFIG_IP6_NF_IPTABLES=m 209CONFIG_IP6_NF_IPTABLES=m
198CONFIG_IP6_NF_MATCH_AH=m 210CONFIG_IP6_NF_MATCH_AH=m
199CONFIG_IP6_NF_MATCH_EUI64=m 211CONFIG_IP6_NF_MATCH_EUI64=m
@@ -210,17 +222,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
210CONFIG_IP6_NF_TARGET_SYNPROXY=m 222CONFIG_IP6_NF_TARGET_SYNPROXY=m
211CONFIG_IP6_NF_MANGLE=m 223CONFIG_IP6_NF_MANGLE=m
212CONFIG_IP6_NF_RAW=m 224CONFIG_IP6_NF_RAW=m
213CONFIG_NF_NAT_IPV6=m 225CONFIG_IP6_NF_NAT=m
214CONFIG_IP6_NF_TARGET_MASQUERADE=m 226CONFIG_IP6_NF_TARGET_MASQUERADE=m
215CONFIG_IP6_NF_TARGET_NPT=m 227CONFIG_IP6_NF_TARGET_NPT=m
216CONFIG_NF_TABLES_BRIDGE=m 228CONFIG_NF_TABLES_BRIDGE=m
229CONFIG_NFT_BRIDGE_META=m
230CONFIG_NFT_BRIDGE_REJECT=m
231CONFIG_NF_LOG_BRIDGE=m
232CONFIG_BRIDGE_NF_EBTABLES=m
233CONFIG_BRIDGE_EBT_BROUTE=m
234CONFIG_BRIDGE_EBT_T_FILTER=m
235CONFIG_BRIDGE_EBT_T_NAT=m
236CONFIG_BRIDGE_EBT_802_3=m
237CONFIG_BRIDGE_EBT_AMONG=m
238CONFIG_BRIDGE_EBT_ARP=m
239CONFIG_BRIDGE_EBT_IP=m
240CONFIG_BRIDGE_EBT_IP6=m
241CONFIG_BRIDGE_EBT_LIMIT=m
242CONFIG_BRIDGE_EBT_MARK=m
243CONFIG_BRIDGE_EBT_PKTTYPE=m
244CONFIG_BRIDGE_EBT_STP=m
245CONFIG_BRIDGE_EBT_VLAN=m
246CONFIG_BRIDGE_EBT_ARPREPLY=m
247CONFIG_BRIDGE_EBT_DNAT=m
248CONFIG_BRIDGE_EBT_MARK_T=m
249CONFIG_BRIDGE_EBT_REDIRECT=m
250CONFIG_BRIDGE_EBT_SNAT=m
251CONFIG_BRIDGE_EBT_LOG=m
252CONFIG_BRIDGE_EBT_NFLOG=m
217CONFIG_IP_DCCP=m 253CONFIG_IP_DCCP=m
218# CONFIG_IP_DCCP_CCID3 is not set 254# CONFIG_IP_DCCP_CCID3 is not set
219CONFIG_SCTP_COOKIE_HMAC_SHA1=y 255CONFIG_SCTP_COOKIE_HMAC_SHA1=y
220CONFIG_RDS=m 256CONFIG_RDS=m
221CONFIG_RDS_TCP=m 257CONFIG_RDS_TCP=m
222CONFIG_L2TP=m 258CONFIG_L2TP=m
259CONFIG_BRIDGE=m
223CONFIG_ATALK=m 260CONFIG_ATALK=m
261CONFIG_6LOWPAN=m
224CONFIG_DNS_RESOLVER=y 262CONFIG_DNS_RESOLVER=y
225CONFIG_BATMAN_ADV=m 263CONFIG_BATMAN_ADV=m
226CONFIG_BATMAN_ADV_DAT=y 264CONFIG_BATMAN_ADV_DAT=y
@@ -229,9 +267,10 @@ CONFIG_BATMAN_ADV_MCAST=y
229CONFIG_NETLINK_DIAG=m 267CONFIG_NETLINK_DIAG=m
230CONFIG_NET_MPLS_GSO=m 268CONFIG_NET_MPLS_GSO=m
231# CONFIG_WIRELESS is not set 269# CONFIG_WIRELESS is not set
270# CONFIG_UEVENT_HELPER is not set
232CONFIG_DEVTMPFS=y 271CONFIG_DEVTMPFS=y
272CONFIG_DEVTMPFS_MOUNT=y
233# CONFIG_FIRMWARE_IN_KERNEL is not set 273# CONFIG_FIRMWARE_IN_KERNEL is not set
234# CONFIG_FW_LOADER_USER_HELPER is not set
235CONFIG_CONNECTOR=m 274CONFIG_CONNECTOR=m
236CONFIG_PARPORT=m 275CONFIG_PARPORT=m
237CONFIG_PARPORT_ATARI=m 276CONFIG_PARPORT_ATARI=m
@@ -289,6 +328,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
289CONFIG_NET_TEAM_MODE_RANDOM=m 328CONFIG_NET_TEAM_MODE_RANDOM=m
290CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 329CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
291CONFIG_NET_TEAM_MODE_LOADBALANCE=m 330CONFIG_NET_TEAM_MODE_LOADBALANCE=m
331CONFIG_MACVLAN=m
332CONFIG_MACVTAP=m
333CONFIG_IPVLAN=m
292CONFIG_VXLAN=m 334CONFIG_VXLAN=m
293CONFIG_NETCONSOLE=m 335CONFIG_NETCONSOLE=m
294CONFIG_NETCONSOLE_DYNAMIC=y 336CONFIG_NETCONSOLE_DYNAMIC=y
@@ -299,8 +341,12 @@ CONFIG_ATARILANCE=y
299# CONFIG_NET_VENDOR_INTEL is not set 341# CONFIG_NET_VENDOR_INTEL is not set
300# CONFIG_NET_VENDOR_MARVELL is not set 342# CONFIG_NET_VENDOR_MARVELL is not set
301# CONFIG_NET_VENDOR_MICREL is not set 343# CONFIG_NET_VENDOR_MICREL is not set
344CONFIG_NE2000=y
345# CONFIG_NET_VENDOR_QUALCOMM is not set
346# CONFIG_NET_VENDOR_ROCKER is not set
302# CONFIG_NET_VENDOR_SAMSUNG is not set 347# CONFIG_NET_VENDOR_SAMSUNG is not set
303# CONFIG_NET_VENDOR_SEEQ is not set 348# CONFIG_NET_VENDOR_SEEQ is not set
349CONFIG_SMC91X=y
304# CONFIG_NET_VENDOR_STMICRO is not set 350# CONFIG_NET_VENDOR_STMICRO is not set
305# CONFIG_NET_VENDOR_VIA is not set 351# CONFIG_NET_VENDOR_VIA is not set
306# CONFIG_NET_VENDOR_WIZNET is not set 352# CONFIG_NET_VENDOR_WIZNET is not set
@@ -345,6 +391,7 @@ CONFIG_DMASOUND_ATARI=m
345CONFIG_HID=m 391CONFIG_HID=m
346CONFIG_HIDRAW=y 392CONFIG_HIDRAW=y
347CONFIG_UHID=m 393CONFIG_UHID=m
394# CONFIG_HID_PLANTRONICS is not set
348CONFIG_RTC_CLASS=y 395CONFIG_RTC_CLASS=y
349CONFIG_RTC_DRV_GENERIC=m 396CONFIG_RTC_DRV_GENERIC=m
350# CONFIG_IOMMU_SUPPORT is not set 397# CONFIG_IOMMU_SUPPORT is not set
@@ -354,6 +401,8 @@ CONFIG_NATFEAT=y
354CONFIG_NFBLOCK=y 401CONFIG_NFBLOCK=y
355CONFIG_NFCON=y 402CONFIG_NFCON=y
356CONFIG_NFETH=y 403CONFIG_NFETH=y
404CONFIG_ATARI_ETHERNAT=y
405CONFIG_ATARI_ETHERNEC=y
357CONFIG_ATARI_DSP56K=m 406CONFIG_ATARI_DSP56K=m
358CONFIG_EXT4_FS=y 407CONFIG_EXT4_FS=y
359CONFIG_REISERFS_FS=m 408CONFIG_REISERFS_FS=m
@@ -367,6 +416,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
367CONFIG_AUTOFS4_FS=m 416CONFIG_AUTOFS4_FS=m
368CONFIG_FUSE_FS=m 417CONFIG_FUSE_FS=m
369CONFIG_CUSE=m 418CONFIG_CUSE=m
419CONFIG_OVERLAY_FS=m
370CONFIG_ISO9660_FS=y 420CONFIG_ISO9660_FS=y
371CONFIG_JOLIET=y 421CONFIG_JOLIET=y
372CONFIG_ZISOFS=y 422CONFIG_ZISOFS=y
@@ -382,6 +432,7 @@ CONFIG_HFS_FS=m
382CONFIG_HFSPLUS_FS=m 432CONFIG_HFSPLUS_FS=m
383CONFIG_CRAMFS=m 433CONFIG_CRAMFS=m
384CONFIG_SQUASHFS=m 434CONFIG_SQUASHFS=m
435CONFIG_SQUASHFS_LZ4=y
385CONFIG_SQUASHFS_LZO=y 436CONFIG_SQUASHFS_LZO=y
386CONFIG_MINIX_FS=m 437CONFIG_MINIX_FS=m
387CONFIG_OMFS_FS=m 438CONFIG_OMFS_FS=m
@@ -451,10 +502,18 @@ CONFIG_DLM=m
451CONFIG_MAGIC_SYSRQ=y 502CONFIG_MAGIC_SYSRQ=y
452CONFIG_ASYNC_RAID6_TEST=m 503CONFIG_ASYNC_RAID6_TEST=m
453CONFIG_TEST_STRING_HELPERS=m 504CONFIG_TEST_STRING_HELPERS=m
505CONFIG_TEST_KSTRTOX=m
506CONFIG_TEST_LKM=m
507CONFIG_TEST_USER_COPY=m
508CONFIG_TEST_BPF=m
509CONFIG_TEST_FIRMWARE=m
510CONFIG_TEST_UDELAY=m
511CONFIG_EARLY_PRINTK=y
454CONFIG_ENCRYPTED_KEYS=m 512CONFIG_ENCRYPTED_KEYS=m
455CONFIG_CRYPTO_MANAGER=y 513CONFIG_CRYPTO_MANAGER=y
456CONFIG_CRYPTO_USER=m 514CONFIG_CRYPTO_USER=m
457CONFIG_CRYPTO_CRYPTD=m 515CONFIG_CRYPTO_CRYPTD=m
516CONFIG_CRYPTO_MCRYPTD=m
458CONFIG_CRYPTO_TEST=m 517CONFIG_CRYPTO_TEST=m
459CONFIG_CRYPTO_CCM=m 518CONFIG_CRYPTO_CCM=m
460CONFIG_CRYPTO_GCM=m 519CONFIG_CRYPTO_GCM=m
@@ -489,13 +548,10 @@ CONFIG_CRYPTO_LZO=m
489CONFIG_CRYPTO_LZ4=m 548CONFIG_CRYPTO_LZ4=m
490CONFIG_CRYPTO_LZ4HC=m 549CONFIG_CRYPTO_LZ4HC=m
491# CONFIG_CRYPTO_ANSI_CPRNG is not set 550# CONFIG_CRYPTO_ANSI_CPRNG is not set
551CONFIG_CRYPTO_DRBG_MENU=m
552CONFIG_CRYPTO_DRBG_HASH=y
553CONFIG_CRYPTO_DRBG_CTR=y
492CONFIG_CRYPTO_USER_API_HASH=m 554CONFIG_CRYPTO_USER_API_HASH=m
493CONFIG_CRYPTO_USER_API_SKCIPHER=m 555CONFIG_CRYPTO_USER_API_SKCIPHER=m
494# CONFIG_CRYPTO_HW is not set 556# CONFIG_CRYPTO_HW is not set
495CONFIG_XZ_DEC_X86=y
496CONFIG_XZ_DEC_POWERPC=y
497CONFIG_XZ_DEC_IA64=y
498CONFIG_XZ_DEC_ARM=y
499CONFIG_XZ_DEC_ARMTHUMB=y
500CONFIG_XZ_DEC_SPARC=y
501CONFIG_XZ_DEC_TEST=m 557CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index d0e705d1a063..f3bd35e76ea4 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -32,6 +32,7 @@ CONFIG_VME=y
32CONFIG_BVME6000=y 32CONFIG_BVME6000=y
33# CONFIG_COMPACTION is not set 33# CONFIG_COMPACTION is not set
34CONFIG_CLEANCACHE=y 34CONFIG_CLEANCACHE=y
35CONFIG_ZPOOL=m
35# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 36# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
36CONFIG_BINFMT_AOUT=m 37CONFIG_BINFMT_AOUT=m
37CONFIG_BINFMT_MISC=m 38CONFIG_BINFMT_MISC=m
@@ -51,6 +52,8 @@ CONFIG_NET_IPIP=m
51CONFIG_NET_IPGRE_DEMUX=m 52CONFIG_NET_IPGRE_DEMUX=m
52CONFIG_NET_IPGRE=m 53CONFIG_NET_IPGRE=m
53CONFIG_NET_IPVTI=m 54CONFIG_NET_IPVTI=m
55CONFIG_NET_FOU_IP_TUNNELS=y
56CONFIG_GENEVE=m
54CONFIG_INET_AH=m 57CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 58CONFIG_INET_ESP=m
56CONFIG_INET_IPCOMP=m 59CONFIG_INET_IPCOMP=m
@@ -92,6 +95,8 @@ CONFIG_NFT_HASH=m
92CONFIG_NFT_COUNTER=m 95CONFIG_NFT_COUNTER=m
93CONFIG_NFT_LOG=m 96CONFIG_NFT_LOG=m
94CONFIG_NFT_LIMIT=m 97CONFIG_NFT_LIMIT=m
98CONFIG_NFT_MASQ=m
99CONFIG_NFT_REDIR=m
95CONFIG_NFT_NAT=m 100CONFIG_NFT_NAT=m
96CONFIG_NFT_QUEUE=m 101CONFIG_NFT_QUEUE=m
97CONFIG_NFT_REJECT=m 102CONFIG_NFT_REJECT=m
@@ -138,6 +143,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
138CONFIG_NETFILTER_XT_MATCH_OSF=m 143CONFIG_NETFILTER_XT_MATCH_OSF=m
139CONFIG_NETFILTER_XT_MATCH_OWNER=m 144CONFIG_NETFILTER_XT_MATCH_OWNER=m
140CONFIG_NETFILTER_XT_MATCH_POLICY=m 145CONFIG_NETFILTER_XT_MATCH_POLICY=m
146CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
141CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 147CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
142CONFIG_NETFILTER_XT_MATCH_QUOTA=m 148CONFIG_NETFILTER_XT_MATCH_QUOTA=m
143CONFIG_NETFILTER_XT_MATCH_RATEEST=m 149CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -159,6 +165,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
159CONFIG_IP_SET_HASH_IPPORT=m 165CONFIG_IP_SET_HASH_IPPORT=m
160CONFIG_IP_SET_HASH_IPPORTIP=m 166CONFIG_IP_SET_HASH_IPPORTIP=m
161CONFIG_IP_SET_HASH_IPPORTNET=m 167CONFIG_IP_SET_HASH_IPPORTNET=m
168CONFIG_IP_SET_HASH_MAC=m
162CONFIG_IP_SET_HASH_NETPORTNET=m 169CONFIG_IP_SET_HASH_NETPORTNET=m
163CONFIG_IP_SET_HASH_NET=m 170CONFIG_IP_SET_HASH_NET=m
164CONFIG_IP_SET_HASH_NETNET=m 171CONFIG_IP_SET_HASH_NETNET=m
@@ -166,9 +173,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
166CONFIG_IP_SET_HASH_NETIFACE=m 173CONFIG_IP_SET_HASH_NETIFACE=m
167CONFIG_IP_SET_LIST_SET=m 174CONFIG_IP_SET_LIST_SET=m
168CONFIG_NF_CONNTRACK_IPV4=m 175CONFIG_NF_CONNTRACK_IPV4=m
176CONFIG_NF_LOG_ARP=m
169CONFIG_NFT_CHAIN_ROUTE_IPV4=m 177CONFIG_NFT_CHAIN_ROUTE_IPV4=m
170CONFIG_NFT_CHAIN_NAT_IPV4=m
171CONFIG_NF_TABLES_ARP=m 178CONFIG_NF_TABLES_ARP=m
179CONFIG_NFT_CHAIN_NAT_IPV4=m
180CONFIG_NFT_MASQ_IPV4=m
181CONFIG_NFT_REDIR_IPV4=m
172CONFIG_IP_NF_IPTABLES=m 182CONFIG_IP_NF_IPTABLES=m
173CONFIG_IP_NF_MATCH_AH=m 183CONFIG_IP_NF_MATCH_AH=m
174CONFIG_IP_NF_MATCH_ECN=m 184CONFIG_IP_NF_MATCH_ECN=m
@@ -177,8 +187,7 @@ CONFIG_IP_NF_MATCH_TTL=m
177CONFIG_IP_NF_FILTER=m 187CONFIG_IP_NF_FILTER=m
178CONFIG_IP_NF_TARGET_REJECT=m 188CONFIG_IP_NF_TARGET_REJECT=m
179CONFIG_IP_NF_TARGET_SYNPROXY=m 189CONFIG_IP_NF_TARGET_SYNPROXY=m
180CONFIG_IP_NF_TARGET_ULOG=m 190CONFIG_IP_NF_NAT=m
181CONFIG_NF_NAT_IPV4=m
182CONFIG_IP_NF_TARGET_MASQUERADE=m 191CONFIG_IP_NF_TARGET_MASQUERADE=m
183CONFIG_IP_NF_TARGET_NETMAP=m 192CONFIG_IP_NF_TARGET_NETMAP=m
184CONFIG_IP_NF_TARGET_REDIRECT=m 193CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -193,6 +202,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
193CONFIG_NF_CONNTRACK_IPV6=m 202CONFIG_NF_CONNTRACK_IPV6=m
194CONFIG_NFT_CHAIN_ROUTE_IPV6=m 203CONFIG_NFT_CHAIN_ROUTE_IPV6=m
195CONFIG_NFT_CHAIN_NAT_IPV6=m 204CONFIG_NFT_CHAIN_NAT_IPV6=m
205CONFIG_NFT_MASQ_IPV6=m
206CONFIG_NFT_REDIR_IPV6=m
196CONFIG_IP6_NF_IPTABLES=m 207CONFIG_IP6_NF_IPTABLES=m
197CONFIG_IP6_NF_MATCH_AH=m 208CONFIG_IP6_NF_MATCH_AH=m
198CONFIG_IP6_NF_MATCH_EUI64=m 209CONFIG_IP6_NF_MATCH_EUI64=m
@@ -209,17 +220,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
209CONFIG_IP6_NF_TARGET_SYNPROXY=m 220CONFIG_IP6_NF_TARGET_SYNPROXY=m
210CONFIG_IP6_NF_MANGLE=m 221CONFIG_IP6_NF_MANGLE=m
211CONFIG_IP6_NF_RAW=m 222CONFIG_IP6_NF_RAW=m
212CONFIG_NF_NAT_IPV6=m 223CONFIG_IP6_NF_NAT=m
213CONFIG_IP6_NF_TARGET_MASQUERADE=m 224CONFIG_IP6_NF_TARGET_MASQUERADE=m
214CONFIG_IP6_NF_TARGET_NPT=m 225CONFIG_IP6_NF_TARGET_NPT=m
215CONFIG_NF_TABLES_BRIDGE=m 226CONFIG_NF_TABLES_BRIDGE=m
227CONFIG_NFT_BRIDGE_META=m
228CONFIG_NFT_BRIDGE_REJECT=m
229CONFIG_NF_LOG_BRIDGE=m
230CONFIG_BRIDGE_NF_EBTABLES=m
231CONFIG_BRIDGE_EBT_BROUTE=m
232CONFIG_BRIDGE_EBT_T_FILTER=m
233CONFIG_BRIDGE_EBT_T_NAT=m
234CONFIG_BRIDGE_EBT_802_3=m
235CONFIG_BRIDGE_EBT_AMONG=m
236CONFIG_BRIDGE_EBT_ARP=m
237CONFIG_BRIDGE_EBT_IP=m
238CONFIG_BRIDGE_EBT_IP6=m
239CONFIG_BRIDGE_EBT_LIMIT=m
240CONFIG_BRIDGE_EBT_MARK=m
241CONFIG_BRIDGE_EBT_PKTTYPE=m
242CONFIG_BRIDGE_EBT_STP=m
243CONFIG_BRIDGE_EBT_VLAN=m
244CONFIG_BRIDGE_EBT_ARPREPLY=m
245CONFIG_BRIDGE_EBT_DNAT=m
246CONFIG_BRIDGE_EBT_MARK_T=m
247CONFIG_BRIDGE_EBT_REDIRECT=m
248CONFIG_BRIDGE_EBT_SNAT=m
249CONFIG_BRIDGE_EBT_LOG=m
250CONFIG_BRIDGE_EBT_NFLOG=m
216CONFIG_IP_DCCP=m 251CONFIG_IP_DCCP=m
217# CONFIG_IP_DCCP_CCID3 is not set 252# CONFIG_IP_DCCP_CCID3 is not set
218CONFIG_SCTP_COOKIE_HMAC_SHA1=y 253CONFIG_SCTP_COOKIE_HMAC_SHA1=y
219CONFIG_RDS=m 254CONFIG_RDS=m
220CONFIG_RDS_TCP=m 255CONFIG_RDS_TCP=m
221CONFIG_L2TP=m 256CONFIG_L2TP=m
257CONFIG_BRIDGE=m
222CONFIG_ATALK=m 258CONFIG_ATALK=m
259CONFIG_6LOWPAN=m
223CONFIG_DNS_RESOLVER=y 260CONFIG_DNS_RESOLVER=y
224CONFIG_BATMAN_ADV=m 261CONFIG_BATMAN_ADV=m
225CONFIG_BATMAN_ADV_DAT=y 262CONFIG_BATMAN_ADV_DAT=y
@@ -228,9 +265,10 @@ CONFIG_BATMAN_ADV_MCAST=y
228CONFIG_NETLINK_DIAG=m 265CONFIG_NETLINK_DIAG=m
229CONFIG_NET_MPLS_GSO=m 266CONFIG_NET_MPLS_GSO=m
230# CONFIG_WIRELESS is not set 267# CONFIG_WIRELESS is not set
268# CONFIG_UEVENT_HELPER is not set
231CONFIG_DEVTMPFS=y 269CONFIG_DEVTMPFS=y
270CONFIG_DEVTMPFS_MOUNT=y
232# CONFIG_FIRMWARE_IN_KERNEL is not set 271# CONFIG_FIRMWARE_IN_KERNEL is not set
233# CONFIG_FW_LOADER_USER_HELPER is not set
234CONFIG_CONNECTOR=m 272CONFIG_CONNECTOR=m
235CONFIG_BLK_DEV_LOOP=y 273CONFIG_BLK_DEV_LOOP=y
236CONFIG_BLK_DEV_CRYPTOLOOP=m 274CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -280,6 +318,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
280CONFIG_NET_TEAM_MODE_RANDOM=m 318CONFIG_NET_TEAM_MODE_RANDOM=m
281CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 319CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
282CONFIG_NET_TEAM_MODE_LOADBALANCE=m 320CONFIG_NET_TEAM_MODE_LOADBALANCE=m
321CONFIG_MACVLAN=m
322CONFIG_MACVTAP=m
323CONFIG_IPVLAN=m
283CONFIG_VXLAN=m 324CONFIG_VXLAN=m
284CONFIG_NETCONSOLE=m 325CONFIG_NETCONSOLE=m
285CONFIG_NETCONSOLE_DYNAMIC=y 326CONFIG_NETCONSOLE_DYNAMIC=y
@@ -290,6 +331,8 @@ CONFIG_BVME6000_NET=y
290# CONFIG_NET_VENDOR_MARVELL is not set 331# CONFIG_NET_VENDOR_MARVELL is not set
291# CONFIG_NET_VENDOR_MICREL is not set 332# CONFIG_NET_VENDOR_MICREL is not set
292# CONFIG_NET_VENDOR_NATSEMI is not set 333# CONFIG_NET_VENDOR_NATSEMI is not set
334# CONFIG_NET_VENDOR_QUALCOMM is not set
335# CONFIG_NET_VENDOR_ROCKER is not set
293# CONFIG_NET_VENDOR_SAMSUNG is not set 336# CONFIG_NET_VENDOR_SAMSUNG is not set
294# CONFIG_NET_VENDOR_SEEQ is not set 337# CONFIG_NET_VENDOR_SEEQ is not set
295# CONFIG_NET_VENDOR_STMICRO is not set 338# CONFIG_NET_VENDOR_STMICRO is not set
@@ -326,6 +369,7 @@ CONFIG_HID=m
326CONFIG_HIDRAW=y 369CONFIG_HIDRAW=y
327CONFIG_UHID=m 370CONFIG_UHID=m
328# CONFIG_HID_GENERIC is not set 371# CONFIG_HID_GENERIC is not set
372# CONFIG_HID_PLANTRONICS is not set
329# CONFIG_USB_SUPPORT is not set 373# CONFIG_USB_SUPPORT is not set
330CONFIG_RTC_CLASS=y 374CONFIG_RTC_CLASS=y
331CONFIG_RTC_DRV_GENERIC=m 375CONFIG_RTC_DRV_GENERIC=m
@@ -343,6 +387,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
343CONFIG_AUTOFS4_FS=m 387CONFIG_AUTOFS4_FS=m
344CONFIG_FUSE_FS=m 388CONFIG_FUSE_FS=m
345CONFIG_CUSE=m 389CONFIG_CUSE=m
390CONFIG_OVERLAY_FS=m
346CONFIG_ISO9660_FS=y 391CONFIG_ISO9660_FS=y
347CONFIG_JOLIET=y 392CONFIG_JOLIET=y
348CONFIG_ZISOFS=y 393CONFIG_ZISOFS=y
@@ -358,6 +403,7 @@ CONFIG_HFS_FS=m
358CONFIG_HFSPLUS_FS=m 403CONFIG_HFSPLUS_FS=m
359CONFIG_CRAMFS=m 404CONFIG_CRAMFS=m
360CONFIG_SQUASHFS=m 405CONFIG_SQUASHFS=m
406CONFIG_SQUASHFS_LZ4=y
361CONFIG_SQUASHFS_LZO=y 407CONFIG_SQUASHFS_LZO=y
362CONFIG_MINIX_FS=m 408CONFIG_MINIX_FS=m
363CONFIG_OMFS_FS=m 409CONFIG_OMFS_FS=m
@@ -427,10 +473,18 @@ CONFIG_DLM=m
427CONFIG_MAGIC_SYSRQ=y 473CONFIG_MAGIC_SYSRQ=y
428CONFIG_ASYNC_RAID6_TEST=m 474CONFIG_ASYNC_RAID6_TEST=m
429CONFIG_TEST_STRING_HELPERS=m 475CONFIG_TEST_STRING_HELPERS=m
476CONFIG_TEST_KSTRTOX=m
477CONFIG_TEST_LKM=m
478CONFIG_TEST_USER_COPY=m
479CONFIG_TEST_BPF=m
480CONFIG_TEST_FIRMWARE=m
481CONFIG_TEST_UDELAY=m
482CONFIG_EARLY_PRINTK=y
430CONFIG_ENCRYPTED_KEYS=m 483CONFIG_ENCRYPTED_KEYS=m
431CONFIG_CRYPTO_MANAGER=y 484CONFIG_CRYPTO_MANAGER=y
432CONFIG_CRYPTO_USER=m 485CONFIG_CRYPTO_USER=m
433CONFIG_CRYPTO_CRYPTD=m 486CONFIG_CRYPTO_CRYPTD=m
487CONFIG_CRYPTO_MCRYPTD=m
434CONFIG_CRYPTO_TEST=m 488CONFIG_CRYPTO_TEST=m
435CONFIG_CRYPTO_CCM=m 489CONFIG_CRYPTO_CCM=m
436CONFIG_CRYPTO_GCM=m 490CONFIG_CRYPTO_GCM=m
@@ -465,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
465CONFIG_CRYPTO_LZ4=m 519CONFIG_CRYPTO_LZ4=m
466CONFIG_CRYPTO_LZ4HC=m 520CONFIG_CRYPTO_LZ4HC=m
467# CONFIG_CRYPTO_ANSI_CPRNG is not set 521# CONFIG_CRYPTO_ANSI_CPRNG is not set
522CONFIG_CRYPTO_DRBG_MENU=m
523CONFIG_CRYPTO_DRBG_HASH=y
524CONFIG_CRYPTO_DRBG_CTR=y
468CONFIG_CRYPTO_USER_API_HASH=m 525CONFIG_CRYPTO_USER_API_HASH=m
469CONFIG_CRYPTO_USER_API_SKCIPHER=m 526CONFIG_CRYPTO_USER_API_SKCIPHER=m
470# CONFIG_CRYPTO_HW is not set 527# CONFIG_CRYPTO_HW is not set
471CONFIG_XZ_DEC_X86=y
472CONFIG_XZ_DEC_POWERPC=y
473CONFIG_XZ_DEC_IA64=y
474CONFIG_XZ_DEC_ARM=y
475CONFIG_XZ_DEC_ARMTHUMB=y
476CONFIG_XZ_DEC_SPARC=y
477CONFIG_XZ_DEC_TEST=m 528CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index fdc7e9672249..9f9793fb2b73 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -34,6 +34,7 @@ CONFIG_M68060=y
34CONFIG_HP300=y 34CONFIG_HP300=y
35# CONFIG_COMPACTION is not set 35# CONFIG_COMPACTION is not set
36CONFIG_CLEANCACHE=y 36CONFIG_CLEANCACHE=y
37CONFIG_ZPOOL=m
37# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 38# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
38CONFIG_BINFMT_AOUT=m 39CONFIG_BINFMT_AOUT=m
39CONFIG_BINFMT_MISC=m 40CONFIG_BINFMT_MISC=m
@@ -53,6 +54,8 @@ CONFIG_NET_IPIP=m
53CONFIG_NET_IPGRE_DEMUX=m 54CONFIG_NET_IPGRE_DEMUX=m
54CONFIG_NET_IPGRE=m 55CONFIG_NET_IPGRE=m
55CONFIG_NET_IPVTI=m 56CONFIG_NET_IPVTI=m
57CONFIG_NET_FOU_IP_TUNNELS=y
58CONFIG_GENEVE=m
56CONFIG_INET_AH=m 59CONFIG_INET_AH=m
57CONFIG_INET_ESP=m 60CONFIG_INET_ESP=m
58CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
@@ -94,6 +97,8 @@ CONFIG_NFT_HASH=m
94CONFIG_NFT_COUNTER=m 97CONFIG_NFT_COUNTER=m
95CONFIG_NFT_LOG=m 98CONFIG_NFT_LOG=m
96CONFIG_NFT_LIMIT=m 99CONFIG_NFT_LIMIT=m
100CONFIG_NFT_MASQ=m
101CONFIG_NFT_REDIR=m
97CONFIG_NFT_NAT=m 102CONFIG_NFT_NAT=m
98CONFIG_NFT_QUEUE=m 103CONFIG_NFT_QUEUE=m
99CONFIG_NFT_REJECT=m 104CONFIG_NFT_REJECT=m
@@ -140,6 +145,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
140CONFIG_NETFILTER_XT_MATCH_OSF=m 145CONFIG_NETFILTER_XT_MATCH_OSF=m
141CONFIG_NETFILTER_XT_MATCH_OWNER=m 146CONFIG_NETFILTER_XT_MATCH_OWNER=m
142CONFIG_NETFILTER_XT_MATCH_POLICY=m 147CONFIG_NETFILTER_XT_MATCH_POLICY=m
148CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
143CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 149CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
144CONFIG_NETFILTER_XT_MATCH_QUOTA=m 150CONFIG_NETFILTER_XT_MATCH_QUOTA=m
145CONFIG_NETFILTER_XT_MATCH_RATEEST=m 151CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -161,6 +167,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
161CONFIG_IP_SET_HASH_IPPORT=m 167CONFIG_IP_SET_HASH_IPPORT=m
162CONFIG_IP_SET_HASH_IPPORTIP=m 168CONFIG_IP_SET_HASH_IPPORTIP=m
163CONFIG_IP_SET_HASH_IPPORTNET=m 169CONFIG_IP_SET_HASH_IPPORTNET=m
170CONFIG_IP_SET_HASH_MAC=m
164CONFIG_IP_SET_HASH_NETPORTNET=m 171CONFIG_IP_SET_HASH_NETPORTNET=m
165CONFIG_IP_SET_HASH_NET=m 172CONFIG_IP_SET_HASH_NET=m
166CONFIG_IP_SET_HASH_NETNET=m 173CONFIG_IP_SET_HASH_NETNET=m
@@ -168,9 +175,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
168CONFIG_IP_SET_HASH_NETIFACE=m 175CONFIG_IP_SET_HASH_NETIFACE=m
169CONFIG_IP_SET_LIST_SET=m 176CONFIG_IP_SET_LIST_SET=m
170CONFIG_NF_CONNTRACK_IPV4=m 177CONFIG_NF_CONNTRACK_IPV4=m
178CONFIG_NF_LOG_ARP=m
171CONFIG_NFT_CHAIN_ROUTE_IPV4=m 179CONFIG_NFT_CHAIN_ROUTE_IPV4=m
172CONFIG_NFT_CHAIN_NAT_IPV4=m
173CONFIG_NF_TABLES_ARP=m 180CONFIG_NF_TABLES_ARP=m
181CONFIG_NFT_CHAIN_NAT_IPV4=m
182CONFIG_NFT_MASQ_IPV4=m
183CONFIG_NFT_REDIR_IPV4=m
174CONFIG_IP_NF_IPTABLES=m 184CONFIG_IP_NF_IPTABLES=m
175CONFIG_IP_NF_MATCH_AH=m 185CONFIG_IP_NF_MATCH_AH=m
176CONFIG_IP_NF_MATCH_ECN=m 186CONFIG_IP_NF_MATCH_ECN=m
@@ -179,8 +189,7 @@ CONFIG_IP_NF_MATCH_TTL=m
179CONFIG_IP_NF_FILTER=m 189CONFIG_IP_NF_FILTER=m
180CONFIG_IP_NF_TARGET_REJECT=m 190CONFIG_IP_NF_TARGET_REJECT=m
181CONFIG_IP_NF_TARGET_SYNPROXY=m 191CONFIG_IP_NF_TARGET_SYNPROXY=m
182CONFIG_IP_NF_TARGET_ULOG=m 192CONFIG_IP_NF_NAT=m
183CONFIG_NF_NAT_IPV4=m
184CONFIG_IP_NF_TARGET_MASQUERADE=m 193CONFIG_IP_NF_TARGET_MASQUERADE=m
185CONFIG_IP_NF_TARGET_NETMAP=m 194CONFIG_IP_NF_TARGET_NETMAP=m
186CONFIG_IP_NF_TARGET_REDIRECT=m 195CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -195,6 +204,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
195CONFIG_NF_CONNTRACK_IPV6=m 204CONFIG_NF_CONNTRACK_IPV6=m
196CONFIG_NFT_CHAIN_ROUTE_IPV6=m 205CONFIG_NFT_CHAIN_ROUTE_IPV6=m
197CONFIG_NFT_CHAIN_NAT_IPV6=m 206CONFIG_NFT_CHAIN_NAT_IPV6=m
207CONFIG_NFT_MASQ_IPV6=m
208CONFIG_NFT_REDIR_IPV6=m
198CONFIG_IP6_NF_IPTABLES=m 209CONFIG_IP6_NF_IPTABLES=m
199CONFIG_IP6_NF_MATCH_AH=m 210CONFIG_IP6_NF_MATCH_AH=m
200CONFIG_IP6_NF_MATCH_EUI64=m 211CONFIG_IP6_NF_MATCH_EUI64=m
@@ -211,17 +222,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
211CONFIG_IP6_NF_TARGET_SYNPROXY=m 222CONFIG_IP6_NF_TARGET_SYNPROXY=m
212CONFIG_IP6_NF_MANGLE=m 223CONFIG_IP6_NF_MANGLE=m
213CONFIG_IP6_NF_RAW=m 224CONFIG_IP6_NF_RAW=m
214CONFIG_NF_NAT_IPV6=m 225CONFIG_IP6_NF_NAT=m
215CONFIG_IP6_NF_TARGET_MASQUERADE=m 226CONFIG_IP6_NF_TARGET_MASQUERADE=m
216CONFIG_IP6_NF_TARGET_NPT=m 227CONFIG_IP6_NF_TARGET_NPT=m
217CONFIG_NF_TABLES_BRIDGE=m 228CONFIG_NF_TABLES_BRIDGE=m
229CONFIG_NFT_BRIDGE_META=m
230CONFIG_NFT_BRIDGE_REJECT=m
231CONFIG_NF_LOG_BRIDGE=m
232CONFIG_BRIDGE_NF_EBTABLES=m
233CONFIG_BRIDGE_EBT_BROUTE=m
234CONFIG_BRIDGE_EBT_T_FILTER=m
235CONFIG_BRIDGE_EBT_T_NAT=m
236CONFIG_BRIDGE_EBT_802_3=m
237CONFIG_BRIDGE_EBT_AMONG=m
238CONFIG_BRIDGE_EBT_ARP=m
239CONFIG_BRIDGE_EBT_IP=m
240CONFIG_BRIDGE_EBT_IP6=m
241CONFIG_BRIDGE_EBT_LIMIT=m
242CONFIG_BRIDGE_EBT_MARK=m
243CONFIG_BRIDGE_EBT_PKTTYPE=m
244CONFIG_BRIDGE_EBT_STP=m
245CONFIG_BRIDGE_EBT_VLAN=m
246CONFIG_BRIDGE_EBT_ARPREPLY=m
247CONFIG_BRIDGE_EBT_DNAT=m
248CONFIG_BRIDGE_EBT_MARK_T=m
249CONFIG_BRIDGE_EBT_REDIRECT=m
250CONFIG_BRIDGE_EBT_SNAT=m
251CONFIG_BRIDGE_EBT_LOG=m
252CONFIG_BRIDGE_EBT_NFLOG=m
218CONFIG_IP_DCCP=m 253CONFIG_IP_DCCP=m
219# CONFIG_IP_DCCP_CCID3 is not set 254# CONFIG_IP_DCCP_CCID3 is not set
220CONFIG_SCTP_COOKIE_HMAC_SHA1=y 255CONFIG_SCTP_COOKIE_HMAC_SHA1=y
221CONFIG_RDS=m 256CONFIG_RDS=m
222CONFIG_RDS_TCP=m 257CONFIG_RDS_TCP=m
223CONFIG_L2TP=m 258CONFIG_L2TP=m
259CONFIG_BRIDGE=m
224CONFIG_ATALK=m 260CONFIG_ATALK=m
261CONFIG_6LOWPAN=m
225CONFIG_DNS_RESOLVER=y 262CONFIG_DNS_RESOLVER=y
226CONFIG_BATMAN_ADV=m 263CONFIG_BATMAN_ADV=m
227CONFIG_BATMAN_ADV_DAT=y 264CONFIG_BATMAN_ADV_DAT=y
@@ -230,9 +267,10 @@ CONFIG_BATMAN_ADV_MCAST=y
230CONFIG_NETLINK_DIAG=m 267CONFIG_NETLINK_DIAG=m
231CONFIG_NET_MPLS_GSO=m 268CONFIG_NET_MPLS_GSO=m
232# CONFIG_WIRELESS is not set 269# CONFIG_WIRELESS is not set
270# CONFIG_UEVENT_HELPER is not set
233CONFIG_DEVTMPFS=y 271CONFIG_DEVTMPFS=y
272CONFIG_DEVTMPFS_MOUNT=y
234# CONFIG_FIRMWARE_IN_KERNEL is not set 273# CONFIG_FIRMWARE_IN_KERNEL is not set
235# CONFIG_FW_LOADER_USER_HELPER is not set
236CONFIG_CONNECTOR=m 274CONFIG_CONNECTOR=m
237CONFIG_BLK_DEV_LOOP=y 275CONFIG_BLK_DEV_LOOP=y
238CONFIG_BLK_DEV_CRYPTOLOOP=m 276CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -281,6 +319,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
281CONFIG_NET_TEAM_MODE_RANDOM=m 319CONFIG_NET_TEAM_MODE_RANDOM=m
282CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 320CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
283CONFIG_NET_TEAM_MODE_LOADBALANCE=m 321CONFIG_NET_TEAM_MODE_LOADBALANCE=m
322CONFIG_MACVLAN=m
323CONFIG_MACVTAP=m
324CONFIG_IPVLAN=m
284CONFIG_VXLAN=m 325CONFIG_VXLAN=m
285CONFIG_NETCONSOLE=m 326CONFIG_NETCONSOLE=m
286CONFIG_NETCONSOLE_DYNAMIC=y 327CONFIG_NETCONSOLE_DYNAMIC=y
@@ -292,6 +333,8 @@ CONFIG_HPLANCE=y
292# CONFIG_NET_VENDOR_MARVELL is not set 333# CONFIG_NET_VENDOR_MARVELL is not set
293# CONFIG_NET_VENDOR_MICREL is not set 334# CONFIG_NET_VENDOR_MICREL is not set
294# CONFIG_NET_VENDOR_NATSEMI is not set 335# CONFIG_NET_VENDOR_NATSEMI is not set
336# CONFIG_NET_VENDOR_QUALCOMM is not set
337# CONFIG_NET_VENDOR_ROCKER is not set
295# CONFIG_NET_VENDOR_SAMSUNG is not set 338# CONFIG_NET_VENDOR_SAMSUNG is not set
296# CONFIG_NET_VENDOR_SEEQ is not set 339# CONFIG_NET_VENDOR_SEEQ is not set
297# CONFIG_NET_VENDOR_STMICRO is not set 340# CONFIG_NET_VENDOR_STMICRO is not set
@@ -335,6 +378,7 @@ CONFIG_HID=m
335CONFIG_HIDRAW=y 378CONFIG_HIDRAW=y
336CONFIG_UHID=m 379CONFIG_UHID=m
337# CONFIG_HID_GENERIC is not set 380# CONFIG_HID_GENERIC is not set
381# CONFIG_HID_PLANTRONICS is not set
338# CONFIG_USB_SUPPORT is not set 382# CONFIG_USB_SUPPORT is not set
339CONFIG_RTC_CLASS=y 383CONFIG_RTC_CLASS=y
340CONFIG_RTC_DRV_GENERIC=m 384CONFIG_RTC_DRV_GENERIC=m
@@ -352,6 +396,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
352CONFIG_AUTOFS4_FS=m 396CONFIG_AUTOFS4_FS=m
353CONFIG_FUSE_FS=m 397CONFIG_FUSE_FS=m
354CONFIG_CUSE=m 398CONFIG_CUSE=m
399CONFIG_OVERLAY_FS=m
355CONFIG_ISO9660_FS=y 400CONFIG_ISO9660_FS=y
356CONFIG_JOLIET=y 401CONFIG_JOLIET=y
357CONFIG_ZISOFS=y 402CONFIG_ZISOFS=y
@@ -367,6 +412,7 @@ CONFIG_HFS_FS=m
367CONFIG_HFSPLUS_FS=m 412CONFIG_HFSPLUS_FS=m
368CONFIG_CRAMFS=m 413CONFIG_CRAMFS=m
369CONFIG_SQUASHFS=m 414CONFIG_SQUASHFS=m
415CONFIG_SQUASHFS_LZ4=y
370CONFIG_SQUASHFS_LZO=y 416CONFIG_SQUASHFS_LZO=y
371CONFIG_MINIX_FS=m 417CONFIG_MINIX_FS=m
372CONFIG_OMFS_FS=m 418CONFIG_OMFS_FS=m
@@ -436,10 +482,18 @@ CONFIG_DLM=m
436CONFIG_MAGIC_SYSRQ=y 482CONFIG_MAGIC_SYSRQ=y
437CONFIG_ASYNC_RAID6_TEST=m 483CONFIG_ASYNC_RAID6_TEST=m
438CONFIG_TEST_STRING_HELPERS=m 484CONFIG_TEST_STRING_HELPERS=m
485CONFIG_TEST_KSTRTOX=m
486CONFIG_TEST_LKM=m
487CONFIG_TEST_USER_COPY=m
488CONFIG_TEST_BPF=m
489CONFIG_TEST_FIRMWARE=m
490CONFIG_TEST_UDELAY=m
491CONFIG_EARLY_PRINTK=y
439CONFIG_ENCRYPTED_KEYS=m 492CONFIG_ENCRYPTED_KEYS=m
440CONFIG_CRYPTO_MANAGER=y 493CONFIG_CRYPTO_MANAGER=y
441CONFIG_CRYPTO_USER=m 494CONFIG_CRYPTO_USER=m
442CONFIG_CRYPTO_CRYPTD=m 495CONFIG_CRYPTO_CRYPTD=m
496CONFIG_CRYPTO_MCRYPTD=m
443CONFIG_CRYPTO_TEST=m 497CONFIG_CRYPTO_TEST=m
444CONFIG_CRYPTO_CCM=m 498CONFIG_CRYPTO_CCM=m
445CONFIG_CRYPTO_GCM=m 499CONFIG_CRYPTO_GCM=m
@@ -474,13 +528,10 @@ CONFIG_CRYPTO_LZO=m
474CONFIG_CRYPTO_LZ4=m 528CONFIG_CRYPTO_LZ4=m
475CONFIG_CRYPTO_LZ4HC=m 529CONFIG_CRYPTO_LZ4HC=m
476# CONFIG_CRYPTO_ANSI_CPRNG is not set 530# CONFIG_CRYPTO_ANSI_CPRNG is not set
531CONFIG_CRYPTO_DRBG_MENU=m
532CONFIG_CRYPTO_DRBG_HASH=y
533CONFIG_CRYPTO_DRBG_CTR=y
477CONFIG_CRYPTO_USER_API_HASH=m 534CONFIG_CRYPTO_USER_API_HASH=m
478CONFIG_CRYPTO_USER_API_SKCIPHER=m 535CONFIG_CRYPTO_USER_API_SKCIPHER=m
479# CONFIG_CRYPTO_HW is not set 536# CONFIG_CRYPTO_HW is not set
480CONFIG_XZ_DEC_X86=y
481CONFIG_XZ_DEC_POWERPC=y
482CONFIG_XZ_DEC_IA64=y
483CONFIG_XZ_DEC_ARM=y
484CONFIG_XZ_DEC_ARMTHUMB=y
485CONFIG_XZ_DEC_SPARC=y
486CONFIG_XZ_DEC_TEST=m 537CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 3d345641d5a0..89f225c01a0b 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -33,6 +33,7 @@ CONFIG_M68KFPU_EMU=y
33CONFIG_MAC=y 33CONFIG_MAC=y
34# CONFIG_COMPACTION is not set 34# CONFIG_COMPACTION is not set
35CONFIG_CLEANCACHE=y 35CONFIG_CLEANCACHE=y
36CONFIG_ZPOOL=m
36# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 37# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
37CONFIG_BINFMT_AOUT=m 38CONFIG_BINFMT_AOUT=m
38CONFIG_BINFMT_MISC=m 39CONFIG_BINFMT_MISC=m
@@ -52,6 +53,8 @@ CONFIG_NET_IPIP=m
52CONFIG_NET_IPGRE_DEMUX=m 53CONFIG_NET_IPGRE_DEMUX=m
53CONFIG_NET_IPGRE=m 54CONFIG_NET_IPGRE=m
54CONFIG_NET_IPVTI=m 55CONFIG_NET_IPVTI=m
56CONFIG_NET_FOU_IP_TUNNELS=y
57CONFIG_GENEVE=m
55CONFIG_INET_AH=m 58CONFIG_INET_AH=m
56CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
57CONFIG_INET_IPCOMP=m 60CONFIG_INET_IPCOMP=m
@@ -93,6 +96,8 @@ CONFIG_NFT_HASH=m
93CONFIG_NFT_COUNTER=m 96CONFIG_NFT_COUNTER=m
94CONFIG_NFT_LOG=m 97CONFIG_NFT_LOG=m
95CONFIG_NFT_LIMIT=m 98CONFIG_NFT_LIMIT=m
99CONFIG_NFT_MASQ=m
100CONFIG_NFT_REDIR=m
96CONFIG_NFT_NAT=m 101CONFIG_NFT_NAT=m
97CONFIG_NFT_QUEUE=m 102CONFIG_NFT_QUEUE=m
98CONFIG_NFT_REJECT=m 103CONFIG_NFT_REJECT=m
@@ -139,6 +144,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
139CONFIG_NETFILTER_XT_MATCH_OSF=m 144CONFIG_NETFILTER_XT_MATCH_OSF=m
140CONFIG_NETFILTER_XT_MATCH_OWNER=m 145CONFIG_NETFILTER_XT_MATCH_OWNER=m
141CONFIG_NETFILTER_XT_MATCH_POLICY=m 146CONFIG_NETFILTER_XT_MATCH_POLICY=m
147CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
142CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 148CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
143CONFIG_NETFILTER_XT_MATCH_QUOTA=m 149CONFIG_NETFILTER_XT_MATCH_QUOTA=m
144CONFIG_NETFILTER_XT_MATCH_RATEEST=m 150CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -160,6 +166,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
160CONFIG_IP_SET_HASH_IPPORT=m 166CONFIG_IP_SET_HASH_IPPORT=m
161CONFIG_IP_SET_HASH_IPPORTIP=m 167CONFIG_IP_SET_HASH_IPPORTIP=m
162CONFIG_IP_SET_HASH_IPPORTNET=m 168CONFIG_IP_SET_HASH_IPPORTNET=m
169CONFIG_IP_SET_HASH_MAC=m
163CONFIG_IP_SET_HASH_NETPORTNET=m 170CONFIG_IP_SET_HASH_NETPORTNET=m
164CONFIG_IP_SET_HASH_NET=m 171CONFIG_IP_SET_HASH_NET=m
165CONFIG_IP_SET_HASH_NETNET=m 172CONFIG_IP_SET_HASH_NETNET=m
@@ -167,9 +174,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
167CONFIG_IP_SET_HASH_NETIFACE=m 174CONFIG_IP_SET_HASH_NETIFACE=m
168CONFIG_IP_SET_LIST_SET=m 175CONFIG_IP_SET_LIST_SET=m
169CONFIG_NF_CONNTRACK_IPV4=m 176CONFIG_NF_CONNTRACK_IPV4=m
177CONFIG_NF_LOG_ARP=m
170CONFIG_NFT_CHAIN_ROUTE_IPV4=m 178CONFIG_NFT_CHAIN_ROUTE_IPV4=m
171CONFIG_NFT_CHAIN_NAT_IPV4=m
172CONFIG_NF_TABLES_ARP=m 179CONFIG_NF_TABLES_ARP=m
180CONFIG_NFT_CHAIN_NAT_IPV4=m
181CONFIG_NFT_MASQ_IPV4=m
182CONFIG_NFT_REDIR_IPV4=m
173CONFIG_IP_NF_IPTABLES=m 183CONFIG_IP_NF_IPTABLES=m
174CONFIG_IP_NF_MATCH_AH=m 184CONFIG_IP_NF_MATCH_AH=m
175CONFIG_IP_NF_MATCH_ECN=m 185CONFIG_IP_NF_MATCH_ECN=m
@@ -178,8 +188,7 @@ CONFIG_IP_NF_MATCH_TTL=m
178CONFIG_IP_NF_FILTER=m 188CONFIG_IP_NF_FILTER=m
179CONFIG_IP_NF_TARGET_REJECT=m 189CONFIG_IP_NF_TARGET_REJECT=m
180CONFIG_IP_NF_TARGET_SYNPROXY=m 190CONFIG_IP_NF_TARGET_SYNPROXY=m
181CONFIG_IP_NF_TARGET_ULOG=m 191CONFIG_IP_NF_NAT=m
182CONFIG_NF_NAT_IPV4=m
183CONFIG_IP_NF_TARGET_MASQUERADE=m 192CONFIG_IP_NF_TARGET_MASQUERADE=m
184CONFIG_IP_NF_TARGET_NETMAP=m 193CONFIG_IP_NF_TARGET_NETMAP=m
185CONFIG_IP_NF_TARGET_REDIRECT=m 194CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -194,6 +203,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
194CONFIG_NF_CONNTRACK_IPV6=m 203CONFIG_NF_CONNTRACK_IPV6=m
195CONFIG_NFT_CHAIN_ROUTE_IPV6=m 204CONFIG_NFT_CHAIN_ROUTE_IPV6=m
196CONFIG_NFT_CHAIN_NAT_IPV6=m 205CONFIG_NFT_CHAIN_NAT_IPV6=m
206CONFIG_NFT_MASQ_IPV6=m
207CONFIG_NFT_REDIR_IPV6=m
197CONFIG_IP6_NF_IPTABLES=m 208CONFIG_IP6_NF_IPTABLES=m
198CONFIG_IP6_NF_MATCH_AH=m 209CONFIG_IP6_NF_MATCH_AH=m
199CONFIG_IP6_NF_MATCH_EUI64=m 210CONFIG_IP6_NF_MATCH_EUI64=m
@@ -210,20 +221,46 @@ CONFIG_IP6_NF_TARGET_REJECT=m
210CONFIG_IP6_NF_TARGET_SYNPROXY=m 221CONFIG_IP6_NF_TARGET_SYNPROXY=m
211CONFIG_IP6_NF_MANGLE=m 222CONFIG_IP6_NF_MANGLE=m
212CONFIG_IP6_NF_RAW=m 223CONFIG_IP6_NF_RAW=m
213CONFIG_NF_NAT_IPV6=m 224CONFIG_IP6_NF_NAT=m
214CONFIG_IP6_NF_TARGET_MASQUERADE=m 225CONFIG_IP6_NF_TARGET_MASQUERADE=m
215CONFIG_IP6_NF_TARGET_NPT=m 226CONFIG_IP6_NF_TARGET_NPT=m
216CONFIG_NF_TABLES_BRIDGE=m 227CONFIG_NF_TABLES_BRIDGE=m
228CONFIG_NFT_BRIDGE_META=m
229CONFIG_NFT_BRIDGE_REJECT=m
230CONFIG_NF_LOG_BRIDGE=m
231CONFIG_BRIDGE_NF_EBTABLES=m
232CONFIG_BRIDGE_EBT_BROUTE=m
233CONFIG_BRIDGE_EBT_T_FILTER=m
234CONFIG_BRIDGE_EBT_T_NAT=m
235CONFIG_BRIDGE_EBT_802_3=m
236CONFIG_BRIDGE_EBT_AMONG=m
237CONFIG_BRIDGE_EBT_ARP=m
238CONFIG_BRIDGE_EBT_IP=m
239CONFIG_BRIDGE_EBT_IP6=m
240CONFIG_BRIDGE_EBT_LIMIT=m
241CONFIG_BRIDGE_EBT_MARK=m
242CONFIG_BRIDGE_EBT_PKTTYPE=m
243CONFIG_BRIDGE_EBT_STP=m
244CONFIG_BRIDGE_EBT_VLAN=m
245CONFIG_BRIDGE_EBT_ARPREPLY=m
246CONFIG_BRIDGE_EBT_DNAT=m
247CONFIG_BRIDGE_EBT_MARK_T=m
248CONFIG_BRIDGE_EBT_REDIRECT=m
249CONFIG_BRIDGE_EBT_SNAT=m
250CONFIG_BRIDGE_EBT_LOG=m
251CONFIG_BRIDGE_EBT_NFLOG=m
217CONFIG_IP_DCCP=m 252CONFIG_IP_DCCP=m
218# CONFIG_IP_DCCP_CCID3 is not set 253# CONFIG_IP_DCCP_CCID3 is not set
219CONFIG_SCTP_COOKIE_HMAC_SHA1=y 254CONFIG_SCTP_COOKIE_HMAC_SHA1=y
220CONFIG_RDS=m 255CONFIG_RDS=m
221CONFIG_RDS_TCP=m 256CONFIG_RDS_TCP=m
222CONFIG_L2TP=m 257CONFIG_L2TP=m
258CONFIG_BRIDGE=m
223CONFIG_ATALK=m 259CONFIG_ATALK=m
224CONFIG_DEV_APPLETALK=m 260CONFIG_DEV_APPLETALK=m
225CONFIG_IPDDP=m 261CONFIG_IPDDP=m
226CONFIG_IPDDP_ENCAP=y 262CONFIG_IPDDP_ENCAP=y
263CONFIG_6LOWPAN=m
227CONFIG_DNS_RESOLVER=y 264CONFIG_DNS_RESOLVER=y
228CONFIG_BATMAN_ADV=m 265CONFIG_BATMAN_ADV=m
229CONFIG_BATMAN_ADV_DAT=y 266CONFIG_BATMAN_ADV_DAT=y
@@ -232,9 +269,10 @@ CONFIG_BATMAN_ADV_MCAST=y
232CONFIG_NETLINK_DIAG=m 269CONFIG_NETLINK_DIAG=m
233CONFIG_NET_MPLS_GSO=m 270CONFIG_NET_MPLS_GSO=m
234# CONFIG_WIRELESS is not set 271# CONFIG_WIRELESS is not set
272# CONFIG_UEVENT_HELPER is not set
235CONFIG_DEVTMPFS=y 273CONFIG_DEVTMPFS=y
274CONFIG_DEVTMPFS_MOUNT=y
236# CONFIG_FIRMWARE_IN_KERNEL is not set 275# CONFIG_FIRMWARE_IN_KERNEL is not set
237# CONFIG_FW_LOADER_USER_HELPER is not set
238CONFIG_CONNECTOR=m 276CONFIG_CONNECTOR=m
239CONFIG_BLK_DEV_SWIM=m 277CONFIG_BLK_DEV_SWIM=m
240CONFIG_BLK_DEV_LOOP=y 278CONFIG_BLK_DEV_LOOP=y
@@ -297,6 +335,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
297CONFIG_NET_TEAM_MODE_RANDOM=m 335CONFIG_NET_TEAM_MODE_RANDOM=m
298CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 336CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
299CONFIG_NET_TEAM_MODE_LOADBALANCE=m 337CONFIG_NET_TEAM_MODE_LOADBALANCE=m
338CONFIG_MACVLAN=m
339CONFIG_MACVTAP=m
340CONFIG_IPVLAN=m
300CONFIG_VXLAN=m 341CONFIG_VXLAN=m
301CONFIG_NETCONSOLE=m 342CONFIG_NETCONSOLE=m
302CONFIG_NETCONSOLE_DYNAMIC=y 343CONFIG_NETCONSOLE_DYNAMIC=y
@@ -310,6 +351,8 @@ CONFIG_MAC89x0=y
310# CONFIG_NET_VENDOR_MICREL is not set 351# CONFIG_NET_VENDOR_MICREL is not set
311CONFIG_MACSONIC=y 352CONFIG_MACSONIC=y
312CONFIG_MAC8390=y 353CONFIG_MAC8390=y
354# CONFIG_NET_VENDOR_QUALCOMM is not set
355# CONFIG_NET_VENDOR_ROCKER is not set
313# CONFIG_NET_VENDOR_SAMSUNG is not set 356# CONFIG_NET_VENDOR_SAMSUNG is not set
314# CONFIG_NET_VENDOR_SEEQ is not set 357# CONFIG_NET_VENDOR_SEEQ is not set
315# CONFIG_NET_VENDOR_SMSC is not set 358# CONFIG_NET_VENDOR_SMSC is not set
@@ -357,6 +400,7 @@ CONFIG_HID=m
357CONFIG_HIDRAW=y 400CONFIG_HIDRAW=y
358CONFIG_UHID=m 401CONFIG_UHID=m
359# CONFIG_HID_GENERIC is not set 402# CONFIG_HID_GENERIC is not set
403# CONFIG_HID_PLANTRONICS is not set
360# CONFIG_USB_SUPPORT is not set 404# CONFIG_USB_SUPPORT is not set
361CONFIG_RTC_CLASS=y 405CONFIG_RTC_CLASS=y
362CONFIG_RTC_DRV_GENERIC=m 406CONFIG_RTC_DRV_GENERIC=m
@@ -374,6 +418,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
374CONFIG_AUTOFS4_FS=m 418CONFIG_AUTOFS4_FS=m
375CONFIG_FUSE_FS=m 419CONFIG_FUSE_FS=m
376CONFIG_CUSE=m 420CONFIG_CUSE=m
421CONFIG_OVERLAY_FS=m
377CONFIG_ISO9660_FS=y 422CONFIG_ISO9660_FS=y
378CONFIG_JOLIET=y 423CONFIG_JOLIET=y
379CONFIG_ZISOFS=y 424CONFIG_ZISOFS=y
@@ -389,6 +434,7 @@ CONFIG_HFS_FS=m
389CONFIG_HFSPLUS_FS=m 434CONFIG_HFSPLUS_FS=m
390CONFIG_CRAMFS=m 435CONFIG_CRAMFS=m
391CONFIG_SQUASHFS=m 436CONFIG_SQUASHFS=m
437CONFIG_SQUASHFS_LZ4=y
392CONFIG_SQUASHFS_LZO=y 438CONFIG_SQUASHFS_LZO=y
393CONFIG_MINIX_FS=m 439CONFIG_MINIX_FS=m
394CONFIG_OMFS_FS=m 440CONFIG_OMFS_FS=m
@@ -458,11 +504,18 @@ CONFIG_DLM=m
458CONFIG_MAGIC_SYSRQ=y 504CONFIG_MAGIC_SYSRQ=y
459CONFIG_ASYNC_RAID6_TEST=m 505CONFIG_ASYNC_RAID6_TEST=m
460CONFIG_TEST_STRING_HELPERS=m 506CONFIG_TEST_STRING_HELPERS=m
507CONFIG_TEST_KSTRTOX=m
508CONFIG_TEST_LKM=m
509CONFIG_TEST_USER_COPY=m
510CONFIG_TEST_BPF=m
511CONFIG_TEST_FIRMWARE=m
512CONFIG_TEST_UDELAY=m
461CONFIG_EARLY_PRINTK=y 513CONFIG_EARLY_PRINTK=y
462CONFIG_ENCRYPTED_KEYS=m 514CONFIG_ENCRYPTED_KEYS=m
463CONFIG_CRYPTO_MANAGER=y 515CONFIG_CRYPTO_MANAGER=y
464CONFIG_CRYPTO_USER=m 516CONFIG_CRYPTO_USER=m
465CONFIG_CRYPTO_CRYPTD=m 517CONFIG_CRYPTO_CRYPTD=m
518CONFIG_CRYPTO_MCRYPTD=m
466CONFIG_CRYPTO_TEST=m 519CONFIG_CRYPTO_TEST=m
467CONFIG_CRYPTO_CCM=m 520CONFIG_CRYPTO_CCM=m
468CONFIG_CRYPTO_GCM=m 521CONFIG_CRYPTO_GCM=m
@@ -497,13 +550,10 @@ CONFIG_CRYPTO_LZO=m
497CONFIG_CRYPTO_LZ4=m 550CONFIG_CRYPTO_LZ4=m
498CONFIG_CRYPTO_LZ4HC=m 551CONFIG_CRYPTO_LZ4HC=m
499# CONFIG_CRYPTO_ANSI_CPRNG is not set 552# CONFIG_CRYPTO_ANSI_CPRNG is not set
553CONFIG_CRYPTO_DRBG_MENU=m
554CONFIG_CRYPTO_DRBG_HASH=y
555CONFIG_CRYPTO_DRBG_CTR=y
500CONFIG_CRYPTO_USER_API_HASH=m 556CONFIG_CRYPTO_USER_API_HASH=m
501CONFIG_CRYPTO_USER_API_SKCIPHER=m 557CONFIG_CRYPTO_USER_API_SKCIPHER=m
502# CONFIG_CRYPTO_HW is not set 558# CONFIG_CRYPTO_HW is not set
503CONFIG_XZ_DEC_X86=y
504CONFIG_XZ_DEC_POWERPC=y
505CONFIG_XZ_DEC_IA64=y
506CONFIG_XZ_DEC_ARM=y
507CONFIG_XZ_DEC_ARMTHUMB=y
508CONFIG_XZ_DEC_SPARC=y
509CONFIG_XZ_DEC_TEST=m 559CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 59aa42096000..d3cdb5447a2c 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -39,9 +39,11 @@ CONFIG_SUN3X=y
39CONFIG_Q40=y 39CONFIG_Q40=y
40CONFIG_ZORRO=y 40CONFIG_ZORRO=y
41CONFIG_AMIGA_PCMCIA=y 41CONFIG_AMIGA_PCMCIA=y
42CONFIG_ATARI_ROM_ISA=y
42CONFIG_ZORRO_NAMES=y 43CONFIG_ZORRO_NAMES=y
43# CONFIG_COMPACTION is not set 44# CONFIG_COMPACTION is not set
44CONFIG_CLEANCACHE=y 45CONFIG_CLEANCACHE=y
46CONFIG_ZPOOL=m
45# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 47# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
46CONFIG_BINFMT_AOUT=m 48CONFIG_BINFMT_AOUT=m
47CONFIG_BINFMT_MISC=m 49CONFIG_BINFMT_MISC=m
@@ -61,6 +63,8 @@ CONFIG_NET_IPIP=m
61CONFIG_NET_IPGRE_DEMUX=m 63CONFIG_NET_IPGRE_DEMUX=m
62CONFIG_NET_IPGRE=m 64CONFIG_NET_IPGRE=m
63CONFIG_NET_IPVTI=m 65CONFIG_NET_IPVTI=m
66CONFIG_NET_FOU_IP_TUNNELS=y
67CONFIG_GENEVE=m
64CONFIG_INET_AH=m 68CONFIG_INET_AH=m
65CONFIG_INET_ESP=m 69CONFIG_INET_ESP=m
66CONFIG_INET_IPCOMP=m 70CONFIG_INET_IPCOMP=m
@@ -102,6 +106,8 @@ CONFIG_NFT_HASH=m
102CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
103CONFIG_NFT_LOG=m 107CONFIG_NFT_LOG=m
104CONFIG_NFT_LIMIT=m 108CONFIG_NFT_LIMIT=m
109CONFIG_NFT_MASQ=m
110CONFIG_NFT_REDIR=m
105CONFIG_NFT_NAT=m 111CONFIG_NFT_NAT=m
106CONFIG_NFT_QUEUE=m 112CONFIG_NFT_QUEUE=m
107CONFIG_NFT_REJECT=m 113CONFIG_NFT_REJECT=m
@@ -148,6 +154,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
148CONFIG_NETFILTER_XT_MATCH_OSF=m 154CONFIG_NETFILTER_XT_MATCH_OSF=m
149CONFIG_NETFILTER_XT_MATCH_OWNER=m 155CONFIG_NETFILTER_XT_MATCH_OWNER=m
150CONFIG_NETFILTER_XT_MATCH_POLICY=m 156CONFIG_NETFILTER_XT_MATCH_POLICY=m
157CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
151CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 158CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
152CONFIG_NETFILTER_XT_MATCH_QUOTA=m 159CONFIG_NETFILTER_XT_MATCH_QUOTA=m
153CONFIG_NETFILTER_XT_MATCH_RATEEST=m 160CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -169,6 +176,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
169CONFIG_IP_SET_HASH_IPPORT=m 176CONFIG_IP_SET_HASH_IPPORT=m
170CONFIG_IP_SET_HASH_IPPORTIP=m 177CONFIG_IP_SET_HASH_IPPORTIP=m
171CONFIG_IP_SET_HASH_IPPORTNET=m 178CONFIG_IP_SET_HASH_IPPORTNET=m
179CONFIG_IP_SET_HASH_MAC=m
172CONFIG_IP_SET_HASH_NETPORTNET=m 180CONFIG_IP_SET_HASH_NETPORTNET=m
173CONFIG_IP_SET_HASH_NET=m 181CONFIG_IP_SET_HASH_NET=m
174CONFIG_IP_SET_HASH_NETNET=m 182CONFIG_IP_SET_HASH_NETNET=m
@@ -176,9 +184,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
176CONFIG_IP_SET_HASH_NETIFACE=m 184CONFIG_IP_SET_HASH_NETIFACE=m
177CONFIG_IP_SET_LIST_SET=m 185CONFIG_IP_SET_LIST_SET=m
178CONFIG_NF_CONNTRACK_IPV4=m 186CONFIG_NF_CONNTRACK_IPV4=m
187CONFIG_NF_LOG_ARP=m
179CONFIG_NFT_CHAIN_ROUTE_IPV4=m 188CONFIG_NFT_CHAIN_ROUTE_IPV4=m
180CONFIG_NFT_CHAIN_NAT_IPV4=m
181CONFIG_NF_TABLES_ARP=m 189CONFIG_NF_TABLES_ARP=m
190CONFIG_NFT_CHAIN_NAT_IPV4=m
191CONFIG_NFT_MASQ_IPV4=m
192CONFIG_NFT_REDIR_IPV4=m
182CONFIG_IP_NF_IPTABLES=m 193CONFIG_IP_NF_IPTABLES=m
183CONFIG_IP_NF_MATCH_AH=m 194CONFIG_IP_NF_MATCH_AH=m
184CONFIG_IP_NF_MATCH_ECN=m 195CONFIG_IP_NF_MATCH_ECN=m
@@ -187,8 +198,7 @@ CONFIG_IP_NF_MATCH_TTL=m
187CONFIG_IP_NF_FILTER=m 198CONFIG_IP_NF_FILTER=m
188CONFIG_IP_NF_TARGET_REJECT=m 199CONFIG_IP_NF_TARGET_REJECT=m
189CONFIG_IP_NF_TARGET_SYNPROXY=m 200CONFIG_IP_NF_TARGET_SYNPROXY=m
190CONFIG_IP_NF_TARGET_ULOG=m 201CONFIG_IP_NF_NAT=m
191CONFIG_NF_NAT_IPV4=m
192CONFIG_IP_NF_TARGET_MASQUERADE=m 202CONFIG_IP_NF_TARGET_MASQUERADE=m
193CONFIG_IP_NF_TARGET_NETMAP=m 203CONFIG_IP_NF_TARGET_NETMAP=m
194CONFIG_IP_NF_TARGET_REDIRECT=m 204CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -203,6 +213,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
203CONFIG_NF_CONNTRACK_IPV6=m 213CONFIG_NF_CONNTRACK_IPV6=m
204CONFIG_NFT_CHAIN_ROUTE_IPV6=m 214CONFIG_NFT_CHAIN_ROUTE_IPV6=m
205CONFIG_NFT_CHAIN_NAT_IPV6=m 215CONFIG_NFT_CHAIN_NAT_IPV6=m
216CONFIG_NFT_MASQ_IPV6=m
217CONFIG_NFT_REDIR_IPV6=m
206CONFIG_IP6_NF_IPTABLES=m 218CONFIG_IP6_NF_IPTABLES=m
207CONFIG_IP6_NF_MATCH_AH=m 219CONFIG_IP6_NF_MATCH_AH=m
208CONFIG_IP6_NF_MATCH_EUI64=m 220CONFIG_IP6_NF_MATCH_EUI64=m
@@ -219,20 +231,46 @@ CONFIG_IP6_NF_TARGET_REJECT=m
219CONFIG_IP6_NF_TARGET_SYNPROXY=m 231CONFIG_IP6_NF_TARGET_SYNPROXY=m
220CONFIG_IP6_NF_MANGLE=m 232CONFIG_IP6_NF_MANGLE=m
221CONFIG_IP6_NF_RAW=m 233CONFIG_IP6_NF_RAW=m
222CONFIG_NF_NAT_IPV6=m 234CONFIG_IP6_NF_NAT=m
223CONFIG_IP6_NF_TARGET_MASQUERADE=m 235CONFIG_IP6_NF_TARGET_MASQUERADE=m
224CONFIG_IP6_NF_TARGET_NPT=m 236CONFIG_IP6_NF_TARGET_NPT=m
225CONFIG_NF_TABLES_BRIDGE=m 237CONFIG_NF_TABLES_BRIDGE=m
238CONFIG_NFT_BRIDGE_META=m
239CONFIG_NFT_BRIDGE_REJECT=m
240CONFIG_NF_LOG_BRIDGE=m
241CONFIG_BRIDGE_NF_EBTABLES=m
242CONFIG_BRIDGE_EBT_BROUTE=m
243CONFIG_BRIDGE_EBT_T_FILTER=m
244CONFIG_BRIDGE_EBT_T_NAT=m
245CONFIG_BRIDGE_EBT_802_3=m
246CONFIG_BRIDGE_EBT_AMONG=m
247CONFIG_BRIDGE_EBT_ARP=m
248CONFIG_BRIDGE_EBT_IP=m
249CONFIG_BRIDGE_EBT_IP6=m
250CONFIG_BRIDGE_EBT_LIMIT=m
251CONFIG_BRIDGE_EBT_MARK=m
252CONFIG_BRIDGE_EBT_PKTTYPE=m
253CONFIG_BRIDGE_EBT_STP=m
254CONFIG_BRIDGE_EBT_VLAN=m
255CONFIG_BRIDGE_EBT_ARPREPLY=m
256CONFIG_BRIDGE_EBT_DNAT=m
257CONFIG_BRIDGE_EBT_MARK_T=m
258CONFIG_BRIDGE_EBT_REDIRECT=m
259CONFIG_BRIDGE_EBT_SNAT=m
260CONFIG_BRIDGE_EBT_LOG=m
261CONFIG_BRIDGE_EBT_NFLOG=m
226CONFIG_IP_DCCP=m 262CONFIG_IP_DCCP=m
227# CONFIG_IP_DCCP_CCID3 is not set 263# CONFIG_IP_DCCP_CCID3 is not set
228CONFIG_SCTP_COOKIE_HMAC_SHA1=y 264CONFIG_SCTP_COOKIE_HMAC_SHA1=y
229CONFIG_RDS=m 265CONFIG_RDS=m
230CONFIG_RDS_TCP=m 266CONFIG_RDS_TCP=m
231CONFIG_L2TP=m 267CONFIG_L2TP=m
268CONFIG_BRIDGE=m
232CONFIG_ATALK=m 269CONFIG_ATALK=m
233CONFIG_DEV_APPLETALK=m 270CONFIG_DEV_APPLETALK=m
234CONFIG_IPDDP=m 271CONFIG_IPDDP=m
235CONFIG_IPDDP_ENCAP=y 272CONFIG_IPDDP_ENCAP=y
273CONFIG_6LOWPAN=m
236CONFIG_DNS_RESOLVER=y 274CONFIG_DNS_RESOLVER=y
237CONFIG_BATMAN_ADV=m 275CONFIG_BATMAN_ADV=m
238CONFIG_BATMAN_ADV_DAT=y 276CONFIG_BATMAN_ADV_DAT=y
@@ -241,9 +279,10 @@ CONFIG_BATMAN_ADV_MCAST=y
241CONFIG_NETLINK_DIAG=m 279CONFIG_NETLINK_DIAG=m
242CONFIG_NET_MPLS_GSO=m 280CONFIG_NET_MPLS_GSO=m
243# CONFIG_WIRELESS is not set 281# CONFIG_WIRELESS is not set
282# CONFIG_UEVENT_HELPER is not set
244CONFIG_DEVTMPFS=y 283CONFIG_DEVTMPFS=y
284CONFIG_DEVTMPFS_MOUNT=y
245# CONFIG_FIRMWARE_IN_KERNEL is not set 285# CONFIG_FIRMWARE_IN_KERNEL is not set
246# CONFIG_FW_LOADER_USER_HELPER is not set
247CONFIG_CONNECTOR=m 286CONFIG_CONNECTOR=m
248CONFIG_PARPORT=m 287CONFIG_PARPORT=m
249CONFIG_PARPORT_PC=m 288CONFIG_PARPORT_PC=m
@@ -329,6 +368,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
329CONFIG_NET_TEAM_MODE_RANDOM=m 368CONFIG_NET_TEAM_MODE_RANDOM=m
330CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 369CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
331CONFIG_NET_TEAM_MODE_LOADBALANCE=m 370CONFIG_NET_TEAM_MODE_LOADBALANCE=m
371CONFIG_MACVLAN=m
372CONFIG_MACVTAP=m
373CONFIG_IPVLAN=m
332CONFIG_VXLAN=m 374CONFIG_VXLAN=m
333CONFIG_NETCONSOLE=m 375CONFIG_NETCONSOLE=m
334CONFIG_NETCONSOLE_DYNAMIC=y 376CONFIG_NETCONSOLE_DYNAMIC=y
@@ -352,11 +394,14 @@ CONFIG_MVME16x_NET=y
352CONFIG_MACSONIC=y 394CONFIG_MACSONIC=y
353CONFIG_HYDRA=y 395CONFIG_HYDRA=y
354CONFIG_MAC8390=y 396CONFIG_MAC8390=y
355CONFIG_NE2000=m 397CONFIG_NE2000=y
356CONFIG_APNE=y 398CONFIG_APNE=y
357CONFIG_ZORRO8390=y 399CONFIG_ZORRO8390=y
400# CONFIG_NET_VENDOR_QUALCOMM is not set
401# CONFIG_NET_VENDOR_ROCKER is not set
358# CONFIG_NET_VENDOR_SAMSUNG is not set 402# CONFIG_NET_VENDOR_SAMSUNG is not set
359# CONFIG_NET_VENDOR_SEEQ is not set 403# CONFIG_NET_VENDOR_SEEQ is not set
404CONFIG_SMC91X=y
360# CONFIG_NET_VENDOR_STMICRO is not set 405# CONFIG_NET_VENDOR_STMICRO is not set
361# CONFIG_NET_VENDOR_VIA is not set 406# CONFIG_NET_VENDOR_VIA is not set
362# CONFIG_NET_VENDOR_WIZNET is not set 407# CONFIG_NET_VENDOR_WIZNET is not set
@@ -423,6 +468,7 @@ CONFIG_HID=m
423CONFIG_HIDRAW=y 468CONFIG_HIDRAW=y
424CONFIG_UHID=m 469CONFIG_UHID=m
425# CONFIG_HID_GENERIC is not set 470# CONFIG_HID_GENERIC is not set
471# CONFIG_HID_PLANTRONICS is not set
426# CONFIG_USB_SUPPORT is not set 472# CONFIG_USB_SUPPORT is not set
427CONFIG_RTC_CLASS=y 473CONFIG_RTC_CLASS=y
428CONFIG_RTC_DRV_MSM6242=m 474CONFIG_RTC_DRV_MSM6242=m
@@ -435,6 +481,8 @@ CONFIG_NATFEAT=y
435CONFIG_NFBLOCK=y 481CONFIG_NFBLOCK=y
436CONFIG_NFCON=y 482CONFIG_NFCON=y
437CONFIG_NFETH=y 483CONFIG_NFETH=y
484CONFIG_ATARI_ETHERNAT=y
485CONFIG_ATARI_ETHERNEC=y
438CONFIG_ATARI_DSP56K=m 486CONFIG_ATARI_DSP56K=m
439CONFIG_AMIGA_BUILTIN_SERIAL=y 487CONFIG_AMIGA_BUILTIN_SERIAL=y
440CONFIG_SERIAL_CONSOLE=y 488CONFIG_SERIAL_CONSOLE=y
@@ -450,6 +498,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
450CONFIG_AUTOFS4_FS=m 498CONFIG_AUTOFS4_FS=m
451CONFIG_FUSE_FS=m 499CONFIG_FUSE_FS=m
452CONFIG_CUSE=m 500CONFIG_CUSE=m
501CONFIG_OVERLAY_FS=m
453CONFIG_ISO9660_FS=y 502CONFIG_ISO9660_FS=y
454CONFIG_JOLIET=y 503CONFIG_JOLIET=y
455CONFIG_ZISOFS=y 504CONFIG_ZISOFS=y
@@ -465,6 +514,7 @@ CONFIG_HFS_FS=m
465CONFIG_HFSPLUS_FS=m 514CONFIG_HFSPLUS_FS=m
466CONFIG_CRAMFS=m 515CONFIG_CRAMFS=m
467CONFIG_SQUASHFS=m 516CONFIG_SQUASHFS=m
517CONFIG_SQUASHFS_LZ4=y
468CONFIG_SQUASHFS_LZO=y 518CONFIG_SQUASHFS_LZO=y
469CONFIG_MINIX_FS=m 519CONFIG_MINIX_FS=m
470CONFIG_OMFS_FS=m 520CONFIG_OMFS_FS=m
@@ -534,11 +584,18 @@ CONFIG_DLM=m
534CONFIG_MAGIC_SYSRQ=y 584CONFIG_MAGIC_SYSRQ=y
535CONFIG_ASYNC_RAID6_TEST=m 585CONFIG_ASYNC_RAID6_TEST=m
536CONFIG_TEST_STRING_HELPERS=m 586CONFIG_TEST_STRING_HELPERS=m
587CONFIG_TEST_KSTRTOX=m
588CONFIG_TEST_LKM=m
589CONFIG_TEST_USER_COPY=m
590CONFIG_TEST_BPF=m
591CONFIG_TEST_FIRMWARE=m
592CONFIG_TEST_UDELAY=m
537CONFIG_EARLY_PRINTK=y 593CONFIG_EARLY_PRINTK=y
538CONFIG_ENCRYPTED_KEYS=m 594CONFIG_ENCRYPTED_KEYS=m
539CONFIG_CRYPTO_MANAGER=y 595CONFIG_CRYPTO_MANAGER=y
540CONFIG_CRYPTO_USER=m 596CONFIG_CRYPTO_USER=m
541CONFIG_CRYPTO_CRYPTD=m 597CONFIG_CRYPTO_CRYPTD=m
598CONFIG_CRYPTO_MCRYPTD=m
542CONFIG_CRYPTO_TEST=m 599CONFIG_CRYPTO_TEST=m
543CONFIG_CRYPTO_CCM=m 600CONFIG_CRYPTO_CCM=m
544CONFIG_CRYPTO_GCM=m 601CONFIG_CRYPTO_GCM=m
@@ -573,13 +630,10 @@ CONFIG_CRYPTO_LZO=m
573CONFIG_CRYPTO_LZ4=m 630CONFIG_CRYPTO_LZ4=m
574CONFIG_CRYPTO_LZ4HC=m 631CONFIG_CRYPTO_LZ4HC=m
575# CONFIG_CRYPTO_ANSI_CPRNG is not set 632# CONFIG_CRYPTO_ANSI_CPRNG is not set
633CONFIG_CRYPTO_DRBG_MENU=m
634CONFIG_CRYPTO_DRBG_HASH=y
635CONFIG_CRYPTO_DRBG_CTR=y
576CONFIG_CRYPTO_USER_API_HASH=m 636CONFIG_CRYPTO_USER_API_HASH=m
577CONFIG_CRYPTO_USER_API_SKCIPHER=m 637CONFIG_CRYPTO_USER_API_SKCIPHER=m
578# CONFIG_CRYPTO_HW is not set 638# CONFIG_CRYPTO_HW is not set
579CONFIG_XZ_DEC_X86=y
580CONFIG_XZ_DEC_POWERPC=y
581CONFIG_XZ_DEC_IA64=y
582CONFIG_XZ_DEC_ARM=y
583CONFIG_XZ_DEC_ARMTHUMB=y
584CONFIG_XZ_DEC_SPARC=y
585CONFIG_XZ_DEC_TEST=m 639CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 066b24af095e..b4c76640973e 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -31,6 +31,7 @@ CONFIG_VME=y
31CONFIG_MVME147=y 31CONFIG_MVME147=y
32# CONFIG_COMPACTION is not set 32# CONFIG_COMPACTION is not set
33CONFIG_CLEANCACHE=y 33CONFIG_CLEANCACHE=y
34CONFIG_ZPOOL=m
34# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 35# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
35CONFIG_BINFMT_AOUT=m 36CONFIG_BINFMT_AOUT=m
36CONFIG_BINFMT_MISC=m 37CONFIG_BINFMT_MISC=m
@@ -50,6 +51,8 @@ CONFIG_NET_IPIP=m
50CONFIG_NET_IPGRE_DEMUX=m 51CONFIG_NET_IPGRE_DEMUX=m
51CONFIG_NET_IPGRE=m 52CONFIG_NET_IPGRE=m
52CONFIG_NET_IPVTI=m 53CONFIG_NET_IPVTI=m
54CONFIG_NET_FOU_IP_TUNNELS=y
55CONFIG_GENEVE=m
53CONFIG_INET_AH=m 56CONFIG_INET_AH=m
54CONFIG_INET_ESP=m 57CONFIG_INET_ESP=m
55CONFIG_INET_IPCOMP=m 58CONFIG_INET_IPCOMP=m
@@ -91,6 +94,8 @@ CONFIG_NFT_HASH=m
91CONFIG_NFT_COUNTER=m 94CONFIG_NFT_COUNTER=m
92CONFIG_NFT_LOG=m 95CONFIG_NFT_LOG=m
93CONFIG_NFT_LIMIT=m 96CONFIG_NFT_LIMIT=m
97CONFIG_NFT_MASQ=m
98CONFIG_NFT_REDIR=m
94CONFIG_NFT_NAT=m 99CONFIG_NFT_NAT=m
95CONFIG_NFT_QUEUE=m 100CONFIG_NFT_QUEUE=m
96CONFIG_NFT_REJECT=m 101CONFIG_NFT_REJECT=m
@@ -137,6 +142,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
137CONFIG_NETFILTER_XT_MATCH_OSF=m 142CONFIG_NETFILTER_XT_MATCH_OSF=m
138CONFIG_NETFILTER_XT_MATCH_OWNER=m 143CONFIG_NETFILTER_XT_MATCH_OWNER=m
139CONFIG_NETFILTER_XT_MATCH_POLICY=m 144CONFIG_NETFILTER_XT_MATCH_POLICY=m
145CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
140CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 146CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
141CONFIG_NETFILTER_XT_MATCH_QUOTA=m 147CONFIG_NETFILTER_XT_MATCH_QUOTA=m
142CONFIG_NETFILTER_XT_MATCH_RATEEST=m 148CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -158,6 +164,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
158CONFIG_IP_SET_HASH_IPPORT=m 164CONFIG_IP_SET_HASH_IPPORT=m
159CONFIG_IP_SET_HASH_IPPORTIP=m 165CONFIG_IP_SET_HASH_IPPORTIP=m
160CONFIG_IP_SET_HASH_IPPORTNET=m 166CONFIG_IP_SET_HASH_IPPORTNET=m
167CONFIG_IP_SET_HASH_MAC=m
161CONFIG_IP_SET_HASH_NETPORTNET=m 168CONFIG_IP_SET_HASH_NETPORTNET=m
162CONFIG_IP_SET_HASH_NET=m 169CONFIG_IP_SET_HASH_NET=m
163CONFIG_IP_SET_HASH_NETNET=m 170CONFIG_IP_SET_HASH_NETNET=m
@@ -165,9 +172,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
165CONFIG_IP_SET_HASH_NETIFACE=m 172CONFIG_IP_SET_HASH_NETIFACE=m
166CONFIG_IP_SET_LIST_SET=m 173CONFIG_IP_SET_LIST_SET=m
167CONFIG_NF_CONNTRACK_IPV4=m 174CONFIG_NF_CONNTRACK_IPV4=m
175CONFIG_NF_LOG_ARP=m
168CONFIG_NFT_CHAIN_ROUTE_IPV4=m 176CONFIG_NFT_CHAIN_ROUTE_IPV4=m
169CONFIG_NFT_CHAIN_NAT_IPV4=m
170CONFIG_NF_TABLES_ARP=m 177CONFIG_NF_TABLES_ARP=m
178CONFIG_NFT_CHAIN_NAT_IPV4=m
179CONFIG_NFT_MASQ_IPV4=m
180CONFIG_NFT_REDIR_IPV4=m
171CONFIG_IP_NF_IPTABLES=m 181CONFIG_IP_NF_IPTABLES=m
172CONFIG_IP_NF_MATCH_AH=m 182CONFIG_IP_NF_MATCH_AH=m
173CONFIG_IP_NF_MATCH_ECN=m 183CONFIG_IP_NF_MATCH_ECN=m
@@ -176,8 +186,7 @@ CONFIG_IP_NF_MATCH_TTL=m
176CONFIG_IP_NF_FILTER=m 186CONFIG_IP_NF_FILTER=m
177CONFIG_IP_NF_TARGET_REJECT=m 187CONFIG_IP_NF_TARGET_REJECT=m
178CONFIG_IP_NF_TARGET_SYNPROXY=m 188CONFIG_IP_NF_TARGET_SYNPROXY=m
179CONFIG_IP_NF_TARGET_ULOG=m 189CONFIG_IP_NF_NAT=m
180CONFIG_NF_NAT_IPV4=m
181CONFIG_IP_NF_TARGET_MASQUERADE=m 190CONFIG_IP_NF_TARGET_MASQUERADE=m
182CONFIG_IP_NF_TARGET_NETMAP=m 191CONFIG_IP_NF_TARGET_NETMAP=m
183CONFIG_IP_NF_TARGET_REDIRECT=m 192CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -192,6 +201,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
192CONFIG_NF_CONNTRACK_IPV6=m 201CONFIG_NF_CONNTRACK_IPV6=m
193CONFIG_NFT_CHAIN_ROUTE_IPV6=m 202CONFIG_NFT_CHAIN_ROUTE_IPV6=m
194CONFIG_NFT_CHAIN_NAT_IPV6=m 203CONFIG_NFT_CHAIN_NAT_IPV6=m
204CONFIG_NFT_MASQ_IPV6=m
205CONFIG_NFT_REDIR_IPV6=m
195CONFIG_IP6_NF_IPTABLES=m 206CONFIG_IP6_NF_IPTABLES=m
196CONFIG_IP6_NF_MATCH_AH=m 207CONFIG_IP6_NF_MATCH_AH=m
197CONFIG_IP6_NF_MATCH_EUI64=m 208CONFIG_IP6_NF_MATCH_EUI64=m
@@ -208,17 +219,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
208CONFIG_IP6_NF_TARGET_SYNPROXY=m 219CONFIG_IP6_NF_TARGET_SYNPROXY=m
209CONFIG_IP6_NF_MANGLE=m 220CONFIG_IP6_NF_MANGLE=m
210CONFIG_IP6_NF_RAW=m 221CONFIG_IP6_NF_RAW=m
211CONFIG_NF_NAT_IPV6=m 222CONFIG_IP6_NF_NAT=m
212CONFIG_IP6_NF_TARGET_MASQUERADE=m 223CONFIG_IP6_NF_TARGET_MASQUERADE=m
213CONFIG_IP6_NF_TARGET_NPT=m 224CONFIG_IP6_NF_TARGET_NPT=m
214CONFIG_NF_TABLES_BRIDGE=m 225CONFIG_NF_TABLES_BRIDGE=m
226CONFIG_NFT_BRIDGE_META=m
227CONFIG_NFT_BRIDGE_REJECT=m
228CONFIG_NF_LOG_BRIDGE=m
229CONFIG_BRIDGE_NF_EBTABLES=m
230CONFIG_BRIDGE_EBT_BROUTE=m
231CONFIG_BRIDGE_EBT_T_FILTER=m
232CONFIG_BRIDGE_EBT_T_NAT=m
233CONFIG_BRIDGE_EBT_802_3=m
234CONFIG_BRIDGE_EBT_AMONG=m
235CONFIG_BRIDGE_EBT_ARP=m
236CONFIG_BRIDGE_EBT_IP=m
237CONFIG_BRIDGE_EBT_IP6=m
238CONFIG_BRIDGE_EBT_LIMIT=m
239CONFIG_BRIDGE_EBT_MARK=m
240CONFIG_BRIDGE_EBT_PKTTYPE=m
241CONFIG_BRIDGE_EBT_STP=m
242CONFIG_BRIDGE_EBT_VLAN=m
243CONFIG_BRIDGE_EBT_ARPREPLY=m
244CONFIG_BRIDGE_EBT_DNAT=m
245CONFIG_BRIDGE_EBT_MARK_T=m
246CONFIG_BRIDGE_EBT_REDIRECT=m
247CONFIG_BRIDGE_EBT_SNAT=m
248CONFIG_BRIDGE_EBT_LOG=m
249CONFIG_BRIDGE_EBT_NFLOG=m
215CONFIG_IP_DCCP=m 250CONFIG_IP_DCCP=m
216# CONFIG_IP_DCCP_CCID3 is not set 251# CONFIG_IP_DCCP_CCID3 is not set
217CONFIG_SCTP_COOKIE_HMAC_SHA1=y 252CONFIG_SCTP_COOKIE_HMAC_SHA1=y
218CONFIG_RDS=m 253CONFIG_RDS=m
219CONFIG_RDS_TCP=m 254CONFIG_RDS_TCP=m
220CONFIG_L2TP=m 255CONFIG_L2TP=m
256CONFIG_BRIDGE=m
221CONFIG_ATALK=m 257CONFIG_ATALK=m
258CONFIG_6LOWPAN=m
222CONFIG_DNS_RESOLVER=y 259CONFIG_DNS_RESOLVER=y
223CONFIG_BATMAN_ADV=m 260CONFIG_BATMAN_ADV=m
224CONFIG_BATMAN_ADV_DAT=y 261CONFIG_BATMAN_ADV_DAT=y
@@ -227,9 +264,10 @@ CONFIG_BATMAN_ADV_MCAST=y
227CONFIG_NETLINK_DIAG=m 264CONFIG_NETLINK_DIAG=m
228CONFIG_NET_MPLS_GSO=m 265CONFIG_NET_MPLS_GSO=m
229# CONFIG_WIRELESS is not set 266# CONFIG_WIRELESS is not set
267# CONFIG_UEVENT_HELPER is not set
230CONFIG_DEVTMPFS=y 268CONFIG_DEVTMPFS=y
269CONFIG_DEVTMPFS_MOUNT=y
231# CONFIG_FIRMWARE_IN_KERNEL is not set 270# CONFIG_FIRMWARE_IN_KERNEL is not set
232# CONFIG_FW_LOADER_USER_HELPER is not set
233CONFIG_CONNECTOR=m 271CONFIG_CONNECTOR=m
234CONFIG_BLK_DEV_LOOP=y 272CONFIG_BLK_DEV_LOOP=y
235CONFIG_BLK_DEV_CRYPTOLOOP=m 273CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -279,6 +317,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
279CONFIG_NET_TEAM_MODE_RANDOM=m 317CONFIG_NET_TEAM_MODE_RANDOM=m
280CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 318CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
281CONFIG_NET_TEAM_MODE_LOADBALANCE=m 319CONFIG_NET_TEAM_MODE_LOADBALANCE=m
320CONFIG_MACVLAN=m
321CONFIG_MACVTAP=m
322CONFIG_IPVLAN=m
282CONFIG_VXLAN=m 323CONFIG_VXLAN=m
283CONFIG_NETCONSOLE=m 324CONFIG_NETCONSOLE=m
284CONFIG_NETCONSOLE_DYNAMIC=y 325CONFIG_NETCONSOLE_DYNAMIC=y
@@ -290,6 +331,8 @@ CONFIG_MVME147_NET=y
290# CONFIG_NET_VENDOR_MARVELL is not set 331# CONFIG_NET_VENDOR_MARVELL is not set
291# CONFIG_NET_VENDOR_MICREL is not set 332# CONFIG_NET_VENDOR_MICREL is not set
292# CONFIG_NET_VENDOR_NATSEMI is not set 333# CONFIG_NET_VENDOR_NATSEMI is not set
334# CONFIG_NET_VENDOR_QUALCOMM is not set
335# CONFIG_NET_VENDOR_ROCKER is not set
293# CONFIG_NET_VENDOR_SAMSUNG is not set 336# CONFIG_NET_VENDOR_SAMSUNG is not set
294# CONFIG_NET_VENDOR_SEEQ is not set 337# CONFIG_NET_VENDOR_SEEQ is not set
295# CONFIG_NET_VENDOR_STMICRO is not set 338# CONFIG_NET_VENDOR_STMICRO is not set
@@ -326,6 +369,7 @@ CONFIG_HID=m
326CONFIG_HIDRAW=y 369CONFIG_HIDRAW=y
327CONFIG_UHID=m 370CONFIG_UHID=m
328# CONFIG_HID_GENERIC is not set 371# CONFIG_HID_GENERIC is not set
372# CONFIG_HID_PLANTRONICS is not set
329# CONFIG_USB_SUPPORT is not set 373# CONFIG_USB_SUPPORT is not set
330CONFIG_RTC_CLASS=y 374CONFIG_RTC_CLASS=y
331CONFIG_RTC_DRV_GENERIC=m 375CONFIG_RTC_DRV_GENERIC=m
@@ -343,6 +387,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
343CONFIG_AUTOFS4_FS=m 387CONFIG_AUTOFS4_FS=m
344CONFIG_FUSE_FS=m 388CONFIG_FUSE_FS=m
345CONFIG_CUSE=m 389CONFIG_CUSE=m
390CONFIG_OVERLAY_FS=m
346CONFIG_ISO9660_FS=y 391CONFIG_ISO9660_FS=y
347CONFIG_JOLIET=y 392CONFIG_JOLIET=y
348CONFIG_ZISOFS=y 393CONFIG_ZISOFS=y
@@ -358,6 +403,7 @@ CONFIG_HFS_FS=m
358CONFIG_HFSPLUS_FS=m 403CONFIG_HFSPLUS_FS=m
359CONFIG_CRAMFS=m 404CONFIG_CRAMFS=m
360CONFIG_SQUASHFS=m 405CONFIG_SQUASHFS=m
406CONFIG_SQUASHFS_LZ4=y
361CONFIG_SQUASHFS_LZO=y 407CONFIG_SQUASHFS_LZO=y
362CONFIG_MINIX_FS=m 408CONFIG_MINIX_FS=m
363CONFIG_OMFS_FS=m 409CONFIG_OMFS_FS=m
@@ -427,10 +473,18 @@ CONFIG_DLM=m
427CONFIG_MAGIC_SYSRQ=y 473CONFIG_MAGIC_SYSRQ=y
428CONFIG_ASYNC_RAID6_TEST=m 474CONFIG_ASYNC_RAID6_TEST=m
429CONFIG_TEST_STRING_HELPERS=m 475CONFIG_TEST_STRING_HELPERS=m
476CONFIG_TEST_KSTRTOX=m
477CONFIG_TEST_LKM=m
478CONFIG_TEST_USER_COPY=m
479CONFIG_TEST_BPF=m
480CONFIG_TEST_FIRMWARE=m
481CONFIG_TEST_UDELAY=m
482CONFIG_EARLY_PRINTK=y
430CONFIG_ENCRYPTED_KEYS=m 483CONFIG_ENCRYPTED_KEYS=m
431CONFIG_CRYPTO_MANAGER=y 484CONFIG_CRYPTO_MANAGER=y
432CONFIG_CRYPTO_USER=m 485CONFIG_CRYPTO_USER=m
433CONFIG_CRYPTO_CRYPTD=m 486CONFIG_CRYPTO_CRYPTD=m
487CONFIG_CRYPTO_MCRYPTD=m
434CONFIG_CRYPTO_TEST=m 488CONFIG_CRYPTO_TEST=m
435CONFIG_CRYPTO_CCM=m 489CONFIG_CRYPTO_CCM=m
436CONFIG_CRYPTO_GCM=m 490CONFIG_CRYPTO_GCM=m
@@ -465,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
465CONFIG_CRYPTO_LZ4=m 519CONFIG_CRYPTO_LZ4=m
466CONFIG_CRYPTO_LZ4HC=m 520CONFIG_CRYPTO_LZ4HC=m
467# CONFIG_CRYPTO_ANSI_CPRNG is not set 521# CONFIG_CRYPTO_ANSI_CPRNG is not set
522CONFIG_CRYPTO_DRBG_MENU=m
523CONFIG_CRYPTO_DRBG_HASH=y
524CONFIG_CRYPTO_DRBG_CTR=y
468CONFIG_CRYPTO_USER_API_HASH=m 525CONFIG_CRYPTO_USER_API_HASH=m
469CONFIG_CRYPTO_USER_API_SKCIPHER=m 526CONFIG_CRYPTO_USER_API_SKCIPHER=m
470# CONFIG_CRYPTO_HW is not set 527# CONFIG_CRYPTO_HW is not set
471CONFIG_XZ_DEC_X86=y
472CONFIG_XZ_DEC_POWERPC=y
473CONFIG_XZ_DEC_IA64=y
474CONFIG_XZ_DEC_ARM=y
475CONFIG_XZ_DEC_ARMTHUMB=y
476CONFIG_XZ_DEC_SPARC=y
477CONFIG_XZ_DEC_TEST=m 528CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 9326ea664a5b..0d4a26f9b58c 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -32,6 +32,7 @@ CONFIG_VME=y
32CONFIG_MVME16x=y 32CONFIG_MVME16x=y
33# CONFIG_COMPACTION is not set 33# CONFIG_COMPACTION is not set
34CONFIG_CLEANCACHE=y 34CONFIG_CLEANCACHE=y
35CONFIG_ZPOOL=m
35# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 36# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
36CONFIG_BINFMT_AOUT=m 37CONFIG_BINFMT_AOUT=m
37CONFIG_BINFMT_MISC=m 38CONFIG_BINFMT_MISC=m
@@ -51,6 +52,8 @@ CONFIG_NET_IPIP=m
51CONFIG_NET_IPGRE_DEMUX=m 52CONFIG_NET_IPGRE_DEMUX=m
52CONFIG_NET_IPGRE=m 53CONFIG_NET_IPGRE=m
53CONFIG_NET_IPVTI=m 54CONFIG_NET_IPVTI=m
55CONFIG_NET_FOU_IP_TUNNELS=y
56CONFIG_GENEVE=m
54CONFIG_INET_AH=m 57CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 58CONFIG_INET_ESP=m
56CONFIG_INET_IPCOMP=m 59CONFIG_INET_IPCOMP=m
@@ -92,6 +95,8 @@ CONFIG_NFT_HASH=m
92CONFIG_NFT_COUNTER=m 95CONFIG_NFT_COUNTER=m
93CONFIG_NFT_LOG=m 96CONFIG_NFT_LOG=m
94CONFIG_NFT_LIMIT=m 97CONFIG_NFT_LIMIT=m
98CONFIG_NFT_MASQ=m
99CONFIG_NFT_REDIR=m
95CONFIG_NFT_NAT=m 100CONFIG_NFT_NAT=m
96CONFIG_NFT_QUEUE=m 101CONFIG_NFT_QUEUE=m
97CONFIG_NFT_REJECT=m 102CONFIG_NFT_REJECT=m
@@ -138,6 +143,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
138CONFIG_NETFILTER_XT_MATCH_OSF=m 143CONFIG_NETFILTER_XT_MATCH_OSF=m
139CONFIG_NETFILTER_XT_MATCH_OWNER=m 144CONFIG_NETFILTER_XT_MATCH_OWNER=m
140CONFIG_NETFILTER_XT_MATCH_POLICY=m 145CONFIG_NETFILTER_XT_MATCH_POLICY=m
146CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
141CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 147CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
142CONFIG_NETFILTER_XT_MATCH_QUOTA=m 148CONFIG_NETFILTER_XT_MATCH_QUOTA=m
143CONFIG_NETFILTER_XT_MATCH_RATEEST=m 149CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -159,6 +165,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
159CONFIG_IP_SET_HASH_IPPORT=m 165CONFIG_IP_SET_HASH_IPPORT=m
160CONFIG_IP_SET_HASH_IPPORTIP=m 166CONFIG_IP_SET_HASH_IPPORTIP=m
161CONFIG_IP_SET_HASH_IPPORTNET=m 167CONFIG_IP_SET_HASH_IPPORTNET=m
168CONFIG_IP_SET_HASH_MAC=m
162CONFIG_IP_SET_HASH_NETPORTNET=m 169CONFIG_IP_SET_HASH_NETPORTNET=m
163CONFIG_IP_SET_HASH_NET=m 170CONFIG_IP_SET_HASH_NET=m
164CONFIG_IP_SET_HASH_NETNET=m 171CONFIG_IP_SET_HASH_NETNET=m
@@ -166,9 +173,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
166CONFIG_IP_SET_HASH_NETIFACE=m 173CONFIG_IP_SET_HASH_NETIFACE=m
167CONFIG_IP_SET_LIST_SET=m 174CONFIG_IP_SET_LIST_SET=m
168CONFIG_NF_CONNTRACK_IPV4=m 175CONFIG_NF_CONNTRACK_IPV4=m
176CONFIG_NF_LOG_ARP=m
169CONFIG_NFT_CHAIN_ROUTE_IPV4=m 177CONFIG_NFT_CHAIN_ROUTE_IPV4=m
170CONFIG_NFT_CHAIN_NAT_IPV4=m
171CONFIG_NF_TABLES_ARP=m 178CONFIG_NF_TABLES_ARP=m
179CONFIG_NFT_CHAIN_NAT_IPV4=m
180CONFIG_NFT_MASQ_IPV4=m
181CONFIG_NFT_REDIR_IPV4=m
172CONFIG_IP_NF_IPTABLES=m 182CONFIG_IP_NF_IPTABLES=m
173CONFIG_IP_NF_MATCH_AH=m 183CONFIG_IP_NF_MATCH_AH=m
174CONFIG_IP_NF_MATCH_ECN=m 184CONFIG_IP_NF_MATCH_ECN=m
@@ -177,8 +187,7 @@ CONFIG_IP_NF_MATCH_TTL=m
177CONFIG_IP_NF_FILTER=m 187CONFIG_IP_NF_FILTER=m
178CONFIG_IP_NF_TARGET_REJECT=m 188CONFIG_IP_NF_TARGET_REJECT=m
179CONFIG_IP_NF_TARGET_SYNPROXY=m 189CONFIG_IP_NF_TARGET_SYNPROXY=m
180CONFIG_IP_NF_TARGET_ULOG=m 190CONFIG_IP_NF_NAT=m
181CONFIG_NF_NAT_IPV4=m
182CONFIG_IP_NF_TARGET_MASQUERADE=m 191CONFIG_IP_NF_TARGET_MASQUERADE=m
183CONFIG_IP_NF_TARGET_NETMAP=m 192CONFIG_IP_NF_TARGET_NETMAP=m
184CONFIG_IP_NF_TARGET_REDIRECT=m 193CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -193,6 +202,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
193CONFIG_NF_CONNTRACK_IPV6=m 202CONFIG_NF_CONNTRACK_IPV6=m
194CONFIG_NFT_CHAIN_ROUTE_IPV6=m 203CONFIG_NFT_CHAIN_ROUTE_IPV6=m
195CONFIG_NFT_CHAIN_NAT_IPV6=m 204CONFIG_NFT_CHAIN_NAT_IPV6=m
205CONFIG_NFT_MASQ_IPV6=m
206CONFIG_NFT_REDIR_IPV6=m
196CONFIG_IP6_NF_IPTABLES=m 207CONFIG_IP6_NF_IPTABLES=m
197CONFIG_IP6_NF_MATCH_AH=m 208CONFIG_IP6_NF_MATCH_AH=m
198CONFIG_IP6_NF_MATCH_EUI64=m 209CONFIG_IP6_NF_MATCH_EUI64=m
@@ -209,17 +220,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
209CONFIG_IP6_NF_TARGET_SYNPROXY=m 220CONFIG_IP6_NF_TARGET_SYNPROXY=m
210CONFIG_IP6_NF_MANGLE=m 221CONFIG_IP6_NF_MANGLE=m
211CONFIG_IP6_NF_RAW=m 222CONFIG_IP6_NF_RAW=m
212CONFIG_NF_NAT_IPV6=m 223CONFIG_IP6_NF_NAT=m
213CONFIG_IP6_NF_TARGET_MASQUERADE=m 224CONFIG_IP6_NF_TARGET_MASQUERADE=m
214CONFIG_IP6_NF_TARGET_NPT=m 225CONFIG_IP6_NF_TARGET_NPT=m
215CONFIG_NF_TABLES_BRIDGE=m 226CONFIG_NF_TABLES_BRIDGE=m
227CONFIG_NFT_BRIDGE_META=m
228CONFIG_NFT_BRIDGE_REJECT=m
229CONFIG_NF_LOG_BRIDGE=m
230CONFIG_BRIDGE_NF_EBTABLES=m
231CONFIG_BRIDGE_EBT_BROUTE=m
232CONFIG_BRIDGE_EBT_T_FILTER=m
233CONFIG_BRIDGE_EBT_T_NAT=m
234CONFIG_BRIDGE_EBT_802_3=m
235CONFIG_BRIDGE_EBT_AMONG=m
236CONFIG_BRIDGE_EBT_ARP=m
237CONFIG_BRIDGE_EBT_IP=m
238CONFIG_BRIDGE_EBT_IP6=m
239CONFIG_BRIDGE_EBT_LIMIT=m
240CONFIG_BRIDGE_EBT_MARK=m
241CONFIG_BRIDGE_EBT_PKTTYPE=m
242CONFIG_BRIDGE_EBT_STP=m
243CONFIG_BRIDGE_EBT_VLAN=m
244CONFIG_BRIDGE_EBT_ARPREPLY=m
245CONFIG_BRIDGE_EBT_DNAT=m
246CONFIG_BRIDGE_EBT_MARK_T=m
247CONFIG_BRIDGE_EBT_REDIRECT=m
248CONFIG_BRIDGE_EBT_SNAT=m
249CONFIG_BRIDGE_EBT_LOG=m
250CONFIG_BRIDGE_EBT_NFLOG=m
216CONFIG_IP_DCCP=m 251CONFIG_IP_DCCP=m
217# CONFIG_IP_DCCP_CCID3 is not set 252# CONFIG_IP_DCCP_CCID3 is not set
218CONFIG_SCTP_COOKIE_HMAC_SHA1=y 253CONFIG_SCTP_COOKIE_HMAC_SHA1=y
219CONFIG_RDS=m 254CONFIG_RDS=m
220CONFIG_RDS_TCP=m 255CONFIG_RDS_TCP=m
221CONFIG_L2TP=m 256CONFIG_L2TP=m
257CONFIG_BRIDGE=m
222CONFIG_ATALK=m 258CONFIG_ATALK=m
259CONFIG_6LOWPAN=m
223CONFIG_DNS_RESOLVER=y 260CONFIG_DNS_RESOLVER=y
224CONFIG_BATMAN_ADV=m 261CONFIG_BATMAN_ADV=m
225CONFIG_BATMAN_ADV_DAT=y 262CONFIG_BATMAN_ADV_DAT=y
@@ -228,9 +265,10 @@ CONFIG_BATMAN_ADV_MCAST=y
228CONFIG_NETLINK_DIAG=m 265CONFIG_NETLINK_DIAG=m
229CONFIG_NET_MPLS_GSO=m 266CONFIG_NET_MPLS_GSO=m
230# CONFIG_WIRELESS is not set 267# CONFIG_WIRELESS is not set
268# CONFIG_UEVENT_HELPER is not set
231CONFIG_DEVTMPFS=y 269CONFIG_DEVTMPFS=y
270CONFIG_DEVTMPFS_MOUNT=y
232# CONFIG_FIRMWARE_IN_KERNEL is not set 271# CONFIG_FIRMWARE_IN_KERNEL is not set
233# CONFIG_FW_LOADER_USER_HELPER is not set
234CONFIG_CONNECTOR=m 272CONFIG_CONNECTOR=m
235CONFIG_BLK_DEV_LOOP=y 273CONFIG_BLK_DEV_LOOP=y
236CONFIG_BLK_DEV_CRYPTOLOOP=m 274CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -280,6 +318,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
280CONFIG_NET_TEAM_MODE_RANDOM=m 318CONFIG_NET_TEAM_MODE_RANDOM=m
281CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 319CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
282CONFIG_NET_TEAM_MODE_LOADBALANCE=m 320CONFIG_NET_TEAM_MODE_LOADBALANCE=m
321CONFIG_MACVLAN=m
322CONFIG_MACVTAP=m
323CONFIG_IPVLAN=m
283CONFIG_VXLAN=m 324CONFIG_VXLAN=m
284CONFIG_NETCONSOLE=m 325CONFIG_NETCONSOLE=m
285CONFIG_NETCONSOLE_DYNAMIC=y 326CONFIG_NETCONSOLE_DYNAMIC=y
@@ -290,6 +331,8 @@ CONFIG_MVME16x_NET=y
290# CONFIG_NET_VENDOR_MARVELL is not set 331# CONFIG_NET_VENDOR_MARVELL is not set
291# CONFIG_NET_VENDOR_MICREL is not set 332# CONFIG_NET_VENDOR_MICREL is not set
292# CONFIG_NET_VENDOR_NATSEMI is not set 333# CONFIG_NET_VENDOR_NATSEMI is not set
334# CONFIG_NET_VENDOR_QUALCOMM is not set
335# CONFIG_NET_VENDOR_ROCKER is not set
293# CONFIG_NET_VENDOR_SAMSUNG is not set 336# CONFIG_NET_VENDOR_SAMSUNG is not set
294# CONFIG_NET_VENDOR_SEEQ is not set 337# CONFIG_NET_VENDOR_SEEQ is not set
295# CONFIG_NET_VENDOR_STMICRO is not set 338# CONFIG_NET_VENDOR_STMICRO is not set
@@ -326,6 +369,7 @@ CONFIG_HID=m
326CONFIG_HIDRAW=y 369CONFIG_HIDRAW=y
327CONFIG_UHID=m 370CONFIG_UHID=m
328# CONFIG_HID_GENERIC is not set 371# CONFIG_HID_GENERIC is not set
372# CONFIG_HID_PLANTRONICS is not set
329# CONFIG_USB_SUPPORT is not set 373# CONFIG_USB_SUPPORT is not set
330CONFIG_RTC_CLASS=y 374CONFIG_RTC_CLASS=y
331CONFIG_RTC_DRV_GENERIC=m 375CONFIG_RTC_DRV_GENERIC=m
@@ -343,6 +387,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
343CONFIG_AUTOFS4_FS=m 387CONFIG_AUTOFS4_FS=m
344CONFIG_FUSE_FS=m 388CONFIG_FUSE_FS=m
345CONFIG_CUSE=m 389CONFIG_CUSE=m
390CONFIG_OVERLAY_FS=m
346CONFIG_ISO9660_FS=y 391CONFIG_ISO9660_FS=y
347CONFIG_JOLIET=y 392CONFIG_JOLIET=y
348CONFIG_ZISOFS=y 393CONFIG_ZISOFS=y
@@ -358,6 +403,7 @@ CONFIG_HFS_FS=m
358CONFIG_HFSPLUS_FS=m 403CONFIG_HFSPLUS_FS=m
359CONFIG_CRAMFS=m 404CONFIG_CRAMFS=m
360CONFIG_SQUASHFS=m 405CONFIG_SQUASHFS=m
406CONFIG_SQUASHFS_LZ4=y
361CONFIG_SQUASHFS_LZO=y 407CONFIG_SQUASHFS_LZO=y
362CONFIG_MINIX_FS=m 408CONFIG_MINIX_FS=m
363CONFIG_OMFS_FS=m 409CONFIG_OMFS_FS=m
@@ -427,11 +473,18 @@ CONFIG_DLM=m
427CONFIG_MAGIC_SYSRQ=y 473CONFIG_MAGIC_SYSRQ=y
428CONFIG_ASYNC_RAID6_TEST=m 474CONFIG_ASYNC_RAID6_TEST=m
429CONFIG_TEST_STRING_HELPERS=m 475CONFIG_TEST_STRING_HELPERS=m
476CONFIG_TEST_KSTRTOX=m
477CONFIG_TEST_LKM=m
478CONFIG_TEST_USER_COPY=m
479CONFIG_TEST_BPF=m
480CONFIG_TEST_FIRMWARE=m
481CONFIG_TEST_UDELAY=m
430CONFIG_EARLY_PRINTK=y 482CONFIG_EARLY_PRINTK=y
431CONFIG_ENCRYPTED_KEYS=m 483CONFIG_ENCRYPTED_KEYS=m
432CONFIG_CRYPTO_MANAGER=y 484CONFIG_CRYPTO_MANAGER=y
433CONFIG_CRYPTO_USER=m 485CONFIG_CRYPTO_USER=m
434CONFIG_CRYPTO_CRYPTD=m 486CONFIG_CRYPTO_CRYPTD=m
487CONFIG_CRYPTO_MCRYPTD=m
435CONFIG_CRYPTO_TEST=m 488CONFIG_CRYPTO_TEST=m
436CONFIG_CRYPTO_CCM=m 489CONFIG_CRYPTO_CCM=m
437CONFIG_CRYPTO_GCM=m 490CONFIG_CRYPTO_GCM=m
@@ -466,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
466CONFIG_CRYPTO_LZ4=m 519CONFIG_CRYPTO_LZ4=m
467CONFIG_CRYPTO_LZ4HC=m 520CONFIG_CRYPTO_LZ4HC=m
468# CONFIG_CRYPTO_ANSI_CPRNG is not set 521# CONFIG_CRYPTO_ANSI_CPRNG is not set
522CONFIG_CRYPTO_DRBG_MENU=m
523CONFIG_CRYPTO_DRBG_HASH=y
524CONFIG_CRYPTO_DRBG_CTR=y
469CONFIG_CRYPTO_USER_API_HASH=m 525CONFIG_CRYPTO_USER_API_HASH=m
470CONFIG_CRYPTO_USER_API_SKCIPHER=m 526CONFIG_CRYPTO_USER_API_SKCIPHER=m
471# CONFIG_CRYPTO_HW is not set 527# CONFIG_CRYPTO_HW is not set
472CONFIG_XZ_DEC_X86=y
473CONFIG_XZ_DEC_POWERPC=y
474CONFIG_XZ_DEC_IA64=y
475CONFIG_XZ_DEC_ARM=y
476CONFIG_XZ_DEC_ARMTHUMB=y
477CONFIG_XZ_DEC_SPARC=y
478CONFIG_XZ_DEC_TEST=m 528CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index d7d1101e31b5..5d581c503fa3 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -32,6 +32,7 @@ CONFIG_M68060=y
32CONFIG_Q40=y 32CONFIG_Q40=y
33# CONFIG_COMPACTION is not set 33# CONFIG_COMPACTION is not set
34CONFIG_CLEANCACHE=y 34CONFIG_CLEANCACHE=y
35CONFIG_ZPOOL=m
35# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 36# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
36CONFIG_BINFMT_AOUT=m 37CONFIG_BINFMT_AOUT=m
37CONFIG_BINFMT_MISC=m 38CONFIG_BINFMT_MISC=m
@@ -51,6 +52,8 @@ CONFIG_NET_IPIP=m
51CONFIG_NET_IPGRE_DEMUX=m 52CONFIG_NET_IPGRE_DEMUX=m
52CONFIG_NET_IPGRE=m 53CONFIG_NET_IPGRE=m
53CONFIG_NET_IPVTI=m 54CONFIG_NET_IPVTI=m
55CONFIG_NET_FOU_IP_TUNNELS=y
56CONFIG_GENEVE=m
54CONFIG_INET_AH=m 57CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 58CONFIG_INET_ESP=m
56CONFIG_INET_IPCOMP=m 59CONFIG_INET_IPCOMP=m
@@ -92,6 +95,8 @@ CONFIG_NFT_HASH=m
92CONFIG_NFT_COUNTER=m 95CONFIG_NFT_COUNTER=m
93CONFIG_NFT_LOG=m 96CONFIG_NFT_LOG=m
94CONFIG_NFT_LIMIT=m 97CONFIG_NFT_LIMIT=m
98CONFIG_NFT_MASQ=m
99CONFIG_NFT_REDIR=m
95CONFIG_NFT_NAT=m 100CONFIG_NFT_NAT=m
96CONFIG_NFT_QUEUE=m 101CONFIG_NFT_QUEUE=m
97CONFIG_NFT_REJECT=m 102CONFIG_NFT_REJECT=m
@@ -138,6 +143,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
138CONFIG_NETFILTER_XT_MATCH_OSF=m 143CONFIG_NETFILTER_XT_MATCH_OSF=m
139CONFIG_NETFILTER_XT_MATCH_OWNER=m 144CONFIG_NETFILTER_XT_MATCH_OWNER=m
140CONFIG_NETFILTER_XT_MATCH_POLICY=m 145CONFIG_NETFILTER_XT_MATCH_POLICY=m
146CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
141CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 147CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
142CONFIG_NETFILTER_XT_MATCH_QUOTA=m 148CONFIG_NETFILTER_XT_MATCH_QUOTA=m
143CONFIG_NETFILTER_XT_MATCH_RATEEST=m 149CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -159,6 +165,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
159CONFIG_IP_SET_HASH_IPPORT=m 165CONFIG_IP_SET_HASH_IPPORT=m
160CONFIG_IP_SET_HASH_IPPORTIP=m 166CONFIG_IP_SET_HASH_IPPORTIP=m
161CONFIG_IP_SET_HASH_IPPORTNET=m 167CONFIG_IP_SET_HASH_IPPORTNET=m
168CONFIG_IP_SET_HASH_MAC=m
162CONFIG_IP_SET_HASH_NETPORTNET=m 169CONFIG_IP_SET_HASH_NETPORTNET=m
163CONFIG_IP_SET_HASH_NET=m 170CONFIG_IP_SET_HASH_NET=m
164CONFIG_IP_SET_HASH_NETNET=m 171CONFIG_IP_SET_HASH_NETNET=m
@@ -166,9 +173,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
166CONFIG_IP_SET_HASH_NETIFACE=m 173CONFIG_IP_SET_HASH_NETIFACE=m
167CONFIG_IP_SET_LIST_SET=m 174CONFIG_IP_SET_LIST_SET=m
168CONFIG_NF_CONNTRACK_IPV4=m 175CONFIG_NF_CONNTRACK_IPV4=m
176CONFIG_NF_LOG_ARP=m
169CONFIG_NFT_CHAIN_ROUTE_IPV4=m 177CONFIG_NFT_CHAIN_ROUTE_IPV4=m
170CONFIG_NFT_CHAIN_NAT_IPV4=m
171CONFIG_NF_TABLES_ARP=m 178CONFIG_NF_TABLES_ARP=m
179CONFIG_NFT_CHAIN_NAT_IPV4=m
180CONFIG_NFT_MASQ_IPV4=m
181CONFIG_NFT_REDIR_IPV4=m
172CONFIG_IP_NF_IPTABLES=m 182CONFIG_IP_NF_IPTABLES=m
173CONFIG_IP_NF_MATCH_AH=m 183CONFIG_IP_NF_MATCH_AH=m
174CONFIG_IP_NF_MATCH_ECN=m 184CONFIG_IP_NF_MATCH_ECN=m
@@ -177,8 +187,7 @@ CONFIG_IP_NF_MATCH_TTL=m
177CONFIG_IP_NF_FILTER=m 187CONFIG_IP_NF_FILTER=m
178CONFIG_IP_NF_TARGET_REJECT=m 188CONFIG_IP_NF_TARGET_REJECT=m
179CONFIG_IP_NF_TARGET_SYNPROXY=m 189CONFIG_IP_NF_TARGET_SYNPROXY=m
180CONFIG_IP_NF_TARGET_ULOG=m 190CONFIG_IP_NF_NAT=m
181CONFIG_NF_NAT_IPV4=m
182CONFIG_IP_NF_TARGET_MASQUERADE=m 191CONFIG_IP_NF_TARGET_MASQUERADE=m
183CONFIG_IP_NF_TARGET_NETMAP=m 192CONFIG_IP_NF_TARGET_NETMAP=m
184CONFIG_IP_NF_TARGET_REDIRECT=m 193CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -193,6 +202,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
193CONFIG_NF_CONNTRACK_IPV6=m 202CONFIG_NF_CONNTRACK_IPV6=m
194CONFIG_NFT_CHAIN_ROUTE_IPV6=m 203CONFIG_NFT_CHAIN_ROUTE_IPV6=m
195CONFIG_NFT_CHAIN_NAT_IPV6=m 204CONFIG_NFT_CHAIN_NAT_IPV6=m
205CONFIG_NFT_MASQ_IPV6=m
206CONFIG_NFT_REDIR_IPV6=m
196CONFIG_IP6_NF_IPTABLES=m 207CONFIG_IP6_NF_IPTABLES=m
197CONFIG_IP6_NF_MATCH_AH=m 208CONFIG_IP6_NF_MATCH_AH=m
198CONFIG_IP6_NF_MATCH_EUI64=m 209CONFIG_IP6_NF_MATCH_EUI64=m
@@ -209,17 +220,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
209CONFIG_IP6_NF_TARGET_SYNPROXY=m 220CONFIG_IP6_NF_TARGET_SYNPROXY=m
210CONFIG_IP6_NF_MANGLE=m 221CONFIG_IP6_NF_MANGLE=m
211CONFIG_IP6_NF_RAW=m 222CONFIG_IP6_NF_RAW=m
212CONFIG_NF_NAT_IPV6=m 223CONFIG_IP6_NF_NAT=m
213CONFIG_IP6_NF_TARGET_MASQUERADE=m 224CONFIG_IP6_NF_TARGET_MASQUERADE=m
214CONFIG_IP6_NF_TARGET_NPT=m 225CONFIG_IP6_NF_TARGET_NPT=m
215CONFIG_NF_TABLES_BRIDGE=m 226CONFIG_NF_TABLES_BRIDGE=m
227CONFIG_NFT_BRIDGE_META=m
228CONFIG_NFT_BRIDGE_REJECT=m
229CONFIG_NF_LOG_BRIDGE=m
230CONFIG_BRIDGE_NF_EBTABLES=m
231CONFIG_BRIDGE_EBT_BROUTE=m
232CONFIG_BRIDGE_EBT_T_FILTER=m
233CONFIG_BRIDGE_EBT_T_NAT=m
234CONFIG_BRIDGE_EBT_802_3=m
235CONFIG_BRIDGE_EBT_AMONG=m
236CONFIG_BRIDGE_EBT_ARP=m
237CONFIG_BRIDGE_EBT_IP=m
238CONFIG_BRIDGE_EBT_IP6=m
239CONFIG_BRIDGE_EBT_LIMIT=m
240CONFIG_BRIDGE_EBT_MARK=m
241CONFIG_BRIDGE_EBT_PKTTYPE=m
242CONFIG_BRIDGE_EBT_STP=m
243CONFIG_BRIDGE_EBT_VLAN=m
244CONFIG_BRIDGE_EBT_ARPREPLY=m
245CONFIG_BRIDGE_EBT_DNAT=m
246CONFIG_BRIDGE_EBT_MARK_T=m
247CONFIG_BRIDGE_EBT_REDIRECT=m
248CONFIG_BRIDGE_EBT_SNAT=m
249CONFIG_BRIDGE_EBT_LOG=m
250CONFIG_BRIDGE_EBT_NFLOG=m
216CONFIG_IP_DCCP=m 251CONFIG_IP_DCCP=m
217# CONFIG_IP_DCCP_CCID3 is not set 252# CONFIG_IP_DCCP_CCID3 is not set
218CONFIG_SCTP_COOKIE_HMAC_SHA1=y 253CONFIG_SCTP_COOKIE_HMAC_SHA1=y
219CONFIG_RDS=m 254CONFIG_RDS=m
220CONFIG_RDS_TCP=m 255CONFIG_RDS_TCP=m
221CONFIG_L2TP=m 256CONFIG_L2TP=m
257CONFIG_BRIDGE=m
222CONFIG_ATALK=m 258CONFIG_ATALK=m
259CONFIG_6LOWPAN=m
223CONFIG_DNS_RESOLVER=y 260CONFIG_DNS_RESOLVER=y
224CONFIG_BATMAN_ADV=m 261CONFIG_BATMAN_ADV=m
225CONFIG_BATMAN_ADV_DAT=y 262CONFIG_BATMAN_ADV_DAT=y
@@ -228,9 +265,10 @@ CONFIG_BATMAN_ADV_MCAST=y
228CONFIG_NETLINK_DIAG=m 265CONFIG_NETLINK_DIAG=m
229CONFIG_NET_MPLS_GSO=m 266CONFIG_NET_MPLS_GSO=m
230# CONFIG_WIRELESS is not set 267# CONFIG_WIRELESS is not set
268# CONFIG_UEVENT_HELPER is not set
231CONFIG_DEVTMPFS=y 269CONFIG_DEVTMPFS=y
270CONFIG_DEVTMPFS_MOUNT=y
232# CONFIG_FIRMWARE_IN_KERNEL is not set 271# CONFIG_FIRMWARE_IN_KERNEL is not set
233# CONFIG_FW_LOADER_USER_HELPER is not set
234CONFIG_CONNECTOR=m 272CONFIG_CONNECTOR=m
235CONFIG_PARPORT=m 273CONFIG_PARPORT=m
236CONFIG_PARPORT_PC=m 274CONFIG_PARPORT_PC=m
@@ -286,6 +324,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
286CONFIG_NET_TEAM_MODE_RANDOM=m 324CONFIG_NET_TEAM_MODE_RANDOM=m
287CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 325CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
288CONFIG_NET_TEAM_MODE_LOADBALANCE=m 326CONFIG_NET_TEAM_MODE_LOADBALANCE=m
327CONFIG_MACVLAN=m
328CONFIG_MACVTAP=m
329CONFIG_IPVLAN=m
289CONFIG_VXLAN=m 330CONFIG_VXLAN=m
290CONFIG_NETCONSOLE=m 331CONFIG_NETCONSOLE=m
291CONFIG_NETCONSOLE_DYNAMIC=y 332CONFIG_NETCONSOLE_DYNAMIC=y
@@ -300,6 +341,8 @@ CONFIG_VETH=m
300# CONFIG_NET_VENDOR_MARVELL is not set 341# CONFIG_NET_VENDOR_MARVELL is not set
301# CONFIG_NET_VENDOR_MICREL is not set 342# CONFIG_NET_VENDOR_MICREL is not set
302CONFIG_NE2000=m 343CONFIG_NE2000=m
344# CONFIG_NET_VENDOR_QUALCOMM is not set
345# CONFIG_NET_VENDOR_ROCKER is not set
303# CONFIG_NET_VENDOR_SAMSUNG is not set 346# CONFIG_NET_VENDOR_SAMSUNG is not set
304# CONFIG_NET_VENDOR_SEEQ is not set 347# CONFIG_NET_VENDOR_SEEQ is not set
305# CONFIG_NET_VENDOR_SMSC is not set 348# CONFIG_NET_VENDOR_SMSC is not set
@@ -347,6 +390,7 @@ CONFIG_HID=m
347CONFIG_HIDRAW=y 390CONFIG_HIDRAW=y
348CONFIG_UHID=m 391CONFIG_UHID=m
349# CONFIG_HID_GENERIC is not set 392# CONFIG_HID_GENERIC is not set
393# CONFIG_HID_PLANTRONICS is not set
350# CONFIG_USB_SUPPORT is not set 394# CONFIG_USB_SUPPORT is not set
351CONFIG_RTC_CLASS=y 395CONFIG_RTC_CLASS=y
352CONFIG_RTC_DRV_GENERIC=m 396CONFIG_RTC_DRV_GENERIC=m
@@ -365,6 +409,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
365CONFIG_AUTOFS4_FS=m 409CONFIG_AUTOFS4_FS=m
366CONFIG_FUSE_FS=m 410CONFIG_FUSE_FS=m
367CONFIG_CUSE=m 411CONFIG_CUSE=m
412CONFIG_OVERLAY_FS=m
368CONFIG_ISO9660_FS=y 413CONFIG_ISO9660_FS=y
369CONFIG_JOLIET=y 414CONFIG_JOLIET=y
370CONFIG_ZISOFS=y 415CONFIG_ZISOFS=y
@@ -380,6 +425,7 @@ CONFIG_HFS_FS=m
380CONFIG_HFSPLUS_FS=m 425CONFIG_HFSPLUS_FS=m
381CONFIG_CRAMFS=m 426CONFIG_CRAMFS=m
382CONFIG_SQUASHFS=m 427CONFIG_SQUASHFS=m
428CONFIG_SQUASHFS_LZ4=y
383CONFIG_SQUASHFS_LZO=y 429CONFIG_SQUASHFS_LZO=y
384CONFIG_MINIX_FS=m 430CONFIG_MINIX_FS=m
385CONFIG_OMFS_FS=m 431CONFIG_OMFS_FS=m
@@ -449,10 +495,18 @@ CONFIG_DLM=m
449CONFIG_MAGIC_SYSRQ=y 495CONFIG_MAGIC_SYSRQ=y
450CONFIG_ASYNC_RAID6_TEST=m 496CONFIG_ASYNC_RAID6_TEST=m
451CONFIG_TEST_STRING_HELPERS=m 497CONFIG_TEST_STRING_HELPERS=m
498CONFIG_TEST_KSTRTOX=m
499CONFIG_TEST_LKM=m
500CONFIG_TEST_USER_COPY=m
501CONFIG_TEST_BPF=m
502CONFIG_TEST_FIRMWARE=m
503CONFIG_TEST_UDELAY=m
504CONFIG_EARLY_PRINTK=y
452CONFIG_ENCRYPTED_KEYS=m 505CONFIG_ENCRYPTED_KEYS=m
453CONFIG_CRYPTO_MANAGER=y 506CONFIG_CRYPTO_MANAGER=y
454CONFIG_CRYPTO_USER=m 507CONFIG_CRYPTO_USER=m
455CONFIG_CRYPTO_CRYPTD=m 508CONFIG_CRYPTO_CRYPTD=m
509CONFIG_CRYPTO_MCRYPTD=m
456CONFIG_CRYPTO_TEST=m 510CONFIG_CRYPTO_TEST=m
457CONFIG_CRYPTO_CCM=m 511CONFIG_CRYPTO_CCM=m
458CONFIG_CRYPTO_GCM=m 512CONFIG_CRYPTO_GCM=m
@@ -487,13 +541,10 @@ CONFIG_CRYPTO_LZO=m
487CONFIG_CRYPTO_LZ4=m 541CONFIG_CRYPTO_LZ4=m
488CONFIG_CRYPTO_LZ4HC=m 542CONFIG_CRYPTO_LZ4HC=m
489# CONFIG_CRYPTO_ANSI_CPRNG is not set 543# CONFIG_CRYPTO_ANSI_CPRNG is not set
544CONFIG_CRYPTO_DRBG_MENU=m
545CONFIG_CRYPTO_DRBG_HASH=y
546CONFIG_CRYPTO_DRBG_CTR=y
490CONFIG_CRYPTO_USER_API_HASH=m 547CONFIG_CRYPTO_USER_API_HASH=m
491CONFIG_CRYPTO_USER_API_SKCIPHER=m 548CONFIG_CRYPTO_USER_API_SKCIPHER=m
492# CONFIG_CRYPTO_HW is not set 549# CONFIG_CRYPTO_HW is not set
493CONFIG_XZ_DEC_X86=y
494CONFIG_XZ_DEC_POWERPC=y
495CONFIG_XZ_DEC_IA64=y
496CONFIG_XZ_DEC_ARM=y
497CONFIG_XZ_DEC_ARMTHUMB=y
498CONFIG_XZ_DEC_SPARC=y
499CONFIG_XZ_DEC_TEST=m 550CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 98522e8fb852..c6b49a4a887c 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -29,6 +29,7 @@ CONFIG_BOOTINFO_PROC=y
29CONFIG_SUN3=y 29CONFIG_SUN3=y
30# CONFIG_COMPACTION is not set 30# CONFIG_COMPACTION is not set
31CONFIG_CLEANCACHE=y 31CONFIG_CLEANCACHE=y
32CONFIG_ZPOOL=m
32# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 33# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
33CONFIG_BINFMT_AOUT=m 34CONFIG_BINFMT_AOUT=m
34CONFIG_BINFMT_MISC=m 35CONFIG_BINFMT_MISC=m
@@ -48,6 +49,8 @@ CONFIG_NET_IPIP=m
48CONFIG_NET_IPGRE_DEMUX=m 49CONFIG_NET_IPGRE_DEMUX=m
49CONFIG_NET_IPGRE=m 50CONFIG_NET_IPGRE=m
50CONFIG_NET_IPVTI=m 51CONFIG_NET_IPVTI=m
52CONFIG_NET_FOU_IP_TUNNELS=y
53CONFIG_GENEVE=m
51CONFIG_INET_AH=m 54CONFIG_INET_AH=m
52CONFIG_INET_ESP=m 55CONFIG_INET_ESP=m
53CONFIG_INET_IPCOMP=m 56CONFIG_INET_IPCOMP=m
@@ -89,6 +92,8 @@ CONFIG_NFT_HASH=m
89CONFIG_NFT_COUNTER=m 92CONFIG_NFT_COUNTER=m
90CONFIG_NFT_LOG=m 93CONFIG_NFT_LOG=m
91CONFIG_NFT_LIMIT=m 94CONFIG_NFT_LIMIT=m
95CONFIG_NFT_MASQ=m
96CONFIG_NFT_REDIR=m
92CONFIG_NFT_NAT=m 97CONFIG_NFT_NAT=m
93CONFIG_NFT_QUEUE=m 98CONFIG_NFT_QUEUE=m
94CONFIG_NFT_REJECT=m 99CONFIG_NFT_REJECT=m
@@ -135,6 +140,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
135CONFIG_NETFILTER_XT_MATCH_OSF=m 140CONFIG_NETFILTER_XT_MATCH_OSF=m
136CONFIG_NETFILTER_XT_MATCH_OWNER=m 141CONFIG_NETFILTER_XT_MATCH_OWNER=m
137CONFIG_NETFILTER_XT_MATCH_POLICY=m 142CONFIG_NETFILTER_XT_MATCH_POLICY=m
143CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
138CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 144CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
139CONFIG_NETFILTER_XT_MATCH_QUOTA=m 145CONFIG_NETFILTER_XT_MATCH_QUOTA=m
140CONFIG_NETFILTER_XT_MATCH_RATEEST=m 146CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -156,6 +162,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
156CONFIG_IP_SET_HASH_IPPORT=m 162CONFIG_IP_SET_HASH_IPPORT=m
157CONFIG_IP_SET_HASH_IPPORTIP=m 163CONFIG_IP_SET_HASH_IPPORTIP=m
158CONFIG_IP_SET_HASH_IPPORTNET=m 164CONFIG_IP_SET_HASH_IPPORTNET=m
165CONFIG_IP_SET_HASH_MAC=m
159CONFIG_IP_SET_HASH_NETPORTNET=m 166CONFIG_IP_SET_HASH_NETPORTNET=m
160CONFIG_IP_SET_HASH_NET=m 167CONFIG_IP_SET_HASH_NET=m
161CONFIG_IP_SET_HASH_NETNET=m 168CONFIG_IP_SET_HASH_NETNET=m
@@ -163,9 +170,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
163CONFIG_IP_SET_HASH_NETIFACE=m 170CONFIG_IP_SET_HASH_NETIFACE=m
164CONFIG_IP_SET_LIST_SET=m 171CONFIG_IP_SET_LIST_SET=m
165CONFIG_NF_CONNTRACK_IPV4=m 172CONFIG_NF_CONNTRACK_IPV4=m
173CONFIG_NF_LOG_ARP=m
166CONFIG_NFT_CHAIN_ROUTE_IPV4=m 174CONFIG_NFT_CHAIN_ROUTE_IPV4=m
167CONFIG_NFT_CHAIN_NAT_IPV4=m
168CONFIG_NF_TABLES_ARP=m 175CONFIG_NF_TABLES_ARP=m
176CONFIG_NFT_CHAIN_NAT_IPV4=m
177CONFIG_NFT_MASQ_IPV4=m
178CONFIG_NFT_REDIR_IPV4=m
169CONFIG_IP_NF_IPTABLES=m 179CONFIG_IP_NF_IPTABLES=m
170CONFIG_IP_NF_MATCH_AH=m 180CONFIG_IP_NF_MATCH_AH=m
171CONFIG_IP_NF_MATCH_ECN=m 181CONFIG_IP_NF_MATCH_ECN=m
@@ -174,8 +184,7 @@ CONFIG_IP_NF_MATCH_TTL=m
174CONFIG_IP_NF_FILTER=m 184CONFIG_IP_NF_FILTER=m
175CONFIG_IP_NF_TARGET_REJECT=m 185CONFIG_IP_NF_TARGET_REJECT=m
176CONFIG_IP_NF_TARGET_SYNPROXY=m 186CONFIG_IP_NF_TARGET_SYNPROXY=m
177CONFIG_IP_NF_TARGET_ULOG=m 187CONFIG_IP_NF_NAT=m
178CONFIG_NF_NAT_IPV4=m
179CONFIG_IP_NF_TARGET_MASQUERADE=m 188CONFIG_IP_NF_TARGET_MASQUERADE=m
180CONFIG_IP_NF_TARGET_NETMAP=m 189CONFIG_IP_NF_TARGET_NETMAP=m
181CONFIG_IP_NF_TARGET_REDIRECT=m 190CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -190,6 +199,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
190CONFIG_NF_CONNTRACK_IPV6=m 199CONFIG_NF_CONNTRACK_IPV6=m
191CONFIG_NFT_CHAIN_ROUTE_IPV6=m 200CONFIG_NFT_CHAIN_ROUTE_IPV6=m
192CONFIG_NFT_CHAIN_NAT_IPV6=m 201CONFIG_NFT_CHAIN_NAT_IPV6=m
202CONFIG_NFT_MASQ_IPV6=m
203CONFIG_NFT_REDIR_IPV6=m
193CONFIG_IP6_NF_IPTABLES=m 204CONFIG_IP6_NF_IPTABLES=m
194CONFIG_IP6_NF_MATCH_AH=m 205CONFIG_IP6_NF_MATCH_AH=m
195CONFIG_IP6_NF_MATCH_EUI64=m 206CONFIG_IP6_NF_MATCH_EUI64=m
@@ -206,17 +217,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
206CONFIG_IP6_NF_TARGET_SYNPROXY=m 217CONFIG_IP6_NF_TARGET_SYNPROXY=m
207CONFIG_IP6_NF_MANGLE=m 218CONFIG_IP6_NF_MANGLE=m
208CONFIG_IP6_NF_RAW=m 219CONFIG_IP6_NF_RAW=m
209CONFIG_NF_NAT_IPV6=m 220CONFIG_IP6_NF_NAT=m
210CONFIG_IP6_NF_TARGET_MASQUERADE=m 221CONFIG_IP6_NF_TARGET_MASQUERADE=m
211CONFIG_IP6_NF_TARGET_NPT=m 222CONFIG_IP6_NF_TARGET_NPT=m
212CONFIG_NF_TABLES_BRIDGE=m 223CONFIG_NF_TABLES_BRIDGE=m
224CONFIG_NFT_BRIDGE_META=m
225CONFIG_NFT_BRIDGE_REJECT=m
226CONFIG_NF_LOG_BRIDGE=m
227CONFIG_BRIDGE_NF_EBTABLES=m
228CONFIG_BRIDGE_EBT_BROUTE=m
229CONFIG_BRIDGE_EBT_T_FILTER=m
230CONFIG_BRIDGE_EBT_T_NAT=m
231CONFIG_BRIDGE_EBT_802_3=m
232CONFIG_BRIDGE_EBT_AMONG=m
233CONFIG_BRIDGE_EBT_ARP=m
234CONFIG_BRIDGE_EBT_IP=m
235CONFIG_BRIDGE_EBT_IP6=m
236CONFIG_BRIDGE_EBT_LIMIT=m
237CONFIG_BRIDGE_EBT_MARK=m
238CONFIG_BRIDGE_EBT_PKTTYPE=m
239CONFIG_BRIDGE_EBT_STP=m
240CONFIG_BRIDGE_EBT_VLAN=m
241CONFIG_BRIDGE_EBT_ARPREPLY=m
242CONFIG_BRIDGE_EBT_DNAT=m
243CONFIG_BRIDGE_EBT_MARK_T=m
244CONFIG_BRIDGE_EBT_REDIRECT=m
245CONFIG_BRIDGE_EBT_SNAT=m
246CONFIG_BRIDGE_EBT_LOG=m
247CONFIG_BRIDGE_EBT_NFLOG=m
213CONFIG_IP_DCCP=m 248CONFIG_IP_DCCP=m
214# CONFIG_IP_DCCP_CCID3 is not set 249# CONFIG_IP_DCCP_CCID3 is not set
215CONFIG_SCTP_COOKIE_HMAC_SHA1=y 250CONFIG_SCTP_COOKIE_HMAC_SHA1=y
216CONFIG_RDS=m 251CONFIG_RDS=m
217CONFIG_RDS_TCP=m 252CONFIG_RDS_TCP=m
218CONFIG_L2TP=m 253CONFIG_L2TP=m
254CONFIG_BRIDGE=m
219CONFIG_ATALK=m 255CONFIG_ATALK=m
256CONFIG_6LOWPAN=m
220CONFIG_DNS_RESOLVER=y 257CONFIG_DNS_RESOLVER=y
221CONFIG_BATMAN_ADV=m 258CONFIG_BATMAN_ADV=m
222CONFIG_BATMAN_ADV_DAT=y 259CONFIG_BATMAN_ADV_DAT=y
@@ -225,9 +262,10 @@ CONFIG_BATMAN_ADV_MCAST=y
225CONFIG_NETLINK_DIAG=m 262CONFIG_NETLINK_DIAG=m
226CONFIG_NET_MPLS_GSO=m 263CONFIG_NET_MPLS_GSO=m
227# CONFIG_WIRELESS is not set 264# CONFIG_WIRELESS is not set
265# CONFIG_UEVENT_HELPER is not set
228CONFIG_DEVTMPFS=y 266CONFIG_DEVTMPFS=y
267CONFIG_DEVTMPFS_MOUNT=y
229# CONFIG_FIRMWARE_IN_KERNEL is not set 268# CONFIG_FIRMWARE_IN_KERNEL is not set
230# CONFIG_FW_LOADER_USER_HELPER is not set
231CONFIG_CONNECTOR=m 269CONFIG_CONNECTOR=m
232CONFIG_BLK_DEV_LOOP=y 270CONFIG_BLK_DEV_LOOP=y
233CONFIG_BLK_DEV_CRYPTOLOOP=m 271CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -277,6 +315,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
277CONFIG_NET_TEAM_MODE_RANDOM=m 315CONFIG_NET_TEAM_MODE_RANDOM=m
278CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 316CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
279CONFIG_NET_TEAM_MODE_LOADBALANCE=m 317CONFIG_NET_TEAM_MODE_LOADBALANCE=m
318CONFIG_MACVLAN=m
319CONFIG_MACVTAP=m
320CONFIG_IPVLAN=m
280CONFIG_VXLAN=m 321CONFIG_VXLAN=m
281CONFIG_NETCONSOLE=m 322CONFIG_NETCONSOLE=m
282CONFIG_NETCONSOLE_DYNAMIC=y 323CONFIG_NETCONSOLE_DYNAMIC=y
@@ -287,6 +328,8 @@ CONFIG_SUN3_82586=y
287# CONFIG_NET_VENDOR_MARVELL is not set 328# CONFIG_NET_VENDOR_MARVELL is not set
288# CONFIG_NET_VENDOR_MICREL is not set 329# CONFIG_NET_VENDOR_MICREL is not set
289# CONFIG_NET_VENDOR_NATSEMI is not set 330# CONFIG_NET_VENDOR_NATSEMI is not set
331# CONFIG_NET_VENDOR_QUALCOMM is not set
332# CONFIG_NET_VENDOR_ROCKER is not set
290# CONFIG_NET_VENDOR_SAMSUNG is not set 333# CONFIG_NET_VENDOR_SAMSUNG is not set
291# CONFIG_NET_VENDOR_SEEQ is not set 334# CONFIG_NET_VENDOR_SEEQ is not set
292# CONFIG_NET_VENDOR_STMICRO is not set 335# CONFIG_NET_VENDOR_STMICRO is not set
@@ -327,6 +370,7 @@ CONFIG_HID=m
327CONFIG_HIDRAW=y 370CONFIG_HIDRAW=y
328CONFIG_UHID=m 371CONFIG_UHID=m
329# CONFIG_HID_GENERIC is not set 372# CONFIG_HID_GENERIC is not set
373# CONFIG_HID_PLANTRONICS is not set
330# CONFIG_USB_SUPPORT is not set 374# CONFIG_USB_SUPPORT is not set
331CONFIG_RTC_CLASS=y 375CONFIG_RTC_CLASS=y
332CONFIG_RTC_DRV_GENERIC=m 376CONFIG_RTC_DRV_GENERIC=m
@@ -344,6 +388,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
344CONFIG_AUTOFS4_FS=m 388CONFIG_AUTOFS4_FS=m
345CONFIG_FUSE_FS=m 389CONFIG_FUSE_FS=m
346CONFIG_CUSE=m 390CONFIG_CUSE=m
391CONFIG_OVERLAY_FS=m
347CONFIG_ISO9660_FS=y 392CONFIG_ISO9660_FS=y
348CONFIG_JOLIET=y 393CONFIG_JOLIET=y
349CONFIG_ZISOFS=y 394CONFIG_ZISOFS=y
@@ -359,6 +404,7 @@ CONFIG_HFS_FS=m
359CONFIG_HFSPLUS_FS=m 404CONFIG_HFSPLUS_FS=m
360CONFIG_CRAMFS=m 405CONFIG_CRAMFS=m
361CONFIG_SQUASHFS=m 406CONFIG_SQUASHFS=m
407CONFIG_SQUASHFS_LZ4=y
362CONFIG_SQUASHFS_LZO=y 408CONFIG_SQUASHFS_LZO=y
363CONFIG_MINIX_FS=m 409CONFIG_MINIX_FS=m
364CONFIG_OMFS_FS=m 410CONFIG_OMFS_FS=m
@@ -428,10 +474,17 @@ CONFIG_DLM=m
428CONFIG_MAGIC_SYSRQ=y 474CONFIG_MAGIC_SYSRQ=y
429CONFIG_ASYNC_RAID6_TEST=m 475CONFIG_ASYNC_RAID6_TEST=m
430CONFIG_TEST_STRING_HELPERS=m 476CONFIG_TEST_STRING_HELPERS=m
477CONFIG_TEST_KSTRTOX=m
478CONFIG_TEST_LKM=m
479CONFIG_TEST_USER_COPY=m
480CONFIG_TEST_BPF=m
481CONFIG_TEST_FIRMWARE=m
482CONFIG_TEST_UDELAY=m
431CONFIG_ENCRYPTED_KEYS=m 483CONFIG_ENCRYPTED_KEYS=m
432CONFIG_CRYPTO_MANAGER=y 484CONFIG_CRYPTO_MANAGER=y
433CONFIG_CRYPTO_USER=m 485CONFIG_CRYPTO_USER=m
434CONFIG_CRYPTO_CRYPTD=m 486CONFIG_CRYPTO_CRYPTD=m
487CONFIG_CRYPTO_MCRYPTD=m
435CONFIG_CRYPTO_TEST=m 488CONFIG_CRYPTO_TEST=m
436CONFIG_CRYPTO_CCM=m 489CONFIG_CRYPTO_CCM=m
437CONFIG_CRYPTO_GCM=m 490CONFIG_CRYPTO_GCM=m
@@ -466,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
466CONFIG_CRYPTO_LZ4=m 519CONFIG_CRYPTO_LZ4=m
467CONFIG_CRYPTO_LZ4HC=m 520CONFIG_CRYPTO_LZ4HC=m
468# CONFIG_CRYPTO_ANSI_CPRNG is not set 521# CONFIG_CRYPTO_ANSI_CPRNG is not set
522CONFIG_CRYPTO_DRBG_MENU=m
523CONFIG_CRYPTO_DRBG_HASH=y
524CONFIG_CRYPTO_DRBG_CTR=y
469CONFIG_CRYPTO_USER_API_HASH=m 525CONFIG_CRYPTO_USER_API_HASH=m
470CONFIG_CRYPTO_USER_API_SKCIPHER=m 526CONFIG_CRYPTO_USER_API_SKCIPHER=m
471# CONFIG_CRYPTO_HW is not set 527# CONFIG_CRYPTO_HW is not set
472CONFIG_XZ_DEC_X86=y
473CONFIG_XZ_DEC_POWERPC=y
474CONFIG_XZ_DEC_IA64=y
475CONFIG_XZ_DEC_ARM=y
476CONFIG_XZ_DEC_ARMTHUMB=y
477CONFIG_XZ_DEC_SPARC=y
478CONFIG_XZ_DEC_TEST=m 528CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 5128a8c3f4e3..b65785eaff8d 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -29,6 +29,7 @@ CONFIG_BOOTINFO_PROC=y
29CONFIG_SUN3X=y 29CONFIG_SUN3X=y
30# CONFIG_COMPACTION is not set 30# CONFIG_COMPACTION is not set
31CONFIG_CLEANCACHE=y 31CONFIG_CLEANCACHE=y
32CONFIG_ZPOOL=m
32# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 33# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
33CONFIG_BINFMT_AOUT=m 34CONFIG_BINFMT_AOUT=m
34CONFIG_BINFMT_MISC=m 35CONFIG_BINFMT_MISC=m
@@ -48,6 +49,8 @@ CONFIG_NET_IPIP=m
48CONFIG_NET_IPGRE_DEMUX=m 49CONFIG_NET_IPGRE_DEMUX=m
49CONFIG_NET_IPGRE=m 50CONFIG_NET_IPGRE=m
50CONFIG_NET_IPVTI=m 51CONFIG_NET_IPVTI=m
52CONFIG_NET_FOU_IP_TUNNELS=y
53CONFIG_GENEVE=m
51CONFIG_INET_AH=m 54CONFIG_INET_AH=m
52CONFIG_INET_ESP=m 55CONFIG_INET_ESP=m
53CONFIG_INET_IPCOMP=m 56CONFIG_INET_IPCOMP=m
@@ -89,6 +92,8 @@ CONFIG_NFT_HASH=m
89CONFIG_NFT_COUNTER=m 92CONFIG_NFT_COUNTER=m
90CONFIG_NFT_LOG=m 93CONFIG_NFT_LOG=m
91CONFIG_NFT_LIMIT=m 94CONFIG_NFT_LIMIT=m
95CONFIG_NFT_MASQ=m
96CONFIG_NFT_REDIR=m
92CONFIG_NFT_NAT=m 97CONFIG_NFT_NAT=m
93CONFIG_NFT_QUEUE=m 98CONFIG_NFT_QUEUE=m
94CONFIG_NFT_REJECT=m 99CONFIG_NFT_REJECT=m
@@ -135,6 +140,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
135CONFIG_NETFILTER_XT_MATCH_OSF=m 140CONFIG_NETFILTER_XT_MATCH_OSF=m
136CONFIG_NETFILTER_XT_MATCH_OWNER=m 141CONFIG_NETFILTER_XT_MATCH_OWNER=m
137CONFIG_NETFILTER_XT_MATCH_POLICY=m 142CONFIG_NETFILTER_XT_MATCH_POLICY=m
143CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
138CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 144CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
139CONFIG_NETFILTER_XT_MATCH_QUOTA=m 145CONFIG_NETFILTER_XT_MATCH_QUOTA=m
140CONFIG_NETFILTER_XT_MATCH_RATEEST=m 146CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -156,6 +162,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
156CONFIG_IP_SET_HASH_IPPORT=m 162CONFIG_IP_SET_HASH_IPPORT=m
157CONFIG_IP_SET_HASH_IPPORTIP=m 163CONFIG_IP_SET_HASH_IPPORTIP=m
158CONFIG_IP_SET_HASH_IPPORTNET=m 164CONFIG_IP_SET_HASH_IPPORTNET=m
165CONFIG_IP_SET_HASH_MAC=m
159CONFIG_IP_SET_HASH_NETPORTNET=m 166CONFIG_IP_SET_HASH_NETPORTNET=m
160CONFIG_IP_SET_HASH_NET=m 167CONFIG_IP_SET_HASH_NET=m
161CONFIG_IP_SET_HASH_NETNET=m 168CONFIG_IP_SET_HASH_NETNET=m
@@ -163,9 +170,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
163CONFIG_IP_SET_HASH_NETIFACE=m 170CONFIG_IP_SET_HASH_NETIFACE=m
164CONFIG_IP_SET_LIST_SET=m 171CONFIG_IP_SET_LIST_SET=m
165CONFIG_NF_CONNTRACK_IPV4=m 172CONFIG_NF_CONNTRACK_IPV4=m
173CONFIG_NF_LOG_ARP=m
166CONFIG_NFT_CHAIN_ROUTE_IPV4=m 174CONFIG_NFT_CHAIN_ROUTE_IPV4=m
167CONFIG_NFT_CHAIN_NAT_IPV4=m
168CONFIG_NF_TABLES_ARP=m 175CONFIG_NF_TABLES_ARP=m
176CONFIG_NFT_CHAIN_NAT_IPV4=m
177CONFIG_NFT_MASQ_IPV4=m
178CONFIG_NFT_REDIR_IPV4=m
169CONFIG_IP_NF_IPTABLES=m 179CONFIG_IP_NF_IPTABLES=m
170CONFIG_IP_NF_MATCH_AH=m 180CONFIG_IP_NF_MATCH_AH=m
171CONFIG_IP_NF_MATCH_ECN=m 181CONFIG_IP_NF_MATCH_ECN=m
@@ -174,8 +184,7 @@ CONFIG_IP_NF_MATCH_TTL=m
174CONFIG_IP_NF_FILTER=m 184CONFIG_IP_NF_FILTER=m
175CONFIG_IP_NF_TARGET_REJECT=m 185CONFIG_IP_NF_TARGET_REJECT=m
176CONFIG_IP_NF_TARGET_SYNPROXY=m 186CONFIG_IP_NF_TARGET_SYNPROXY=m
177CONFIG_IP_NF_TARGET_ULOG=m 187CONFIG_IP_NF_NAT=m
178CONFIG_NF_NAT_IPV4=m
179CONFIG_IP_NF_TARGET_MASQUERADE=m 188CONFIG_IP_NF_TARGET_MASQUERADE=m
180CONFIG_IP_NF_TARGET_NETMAP=m 189CONFIG_IP_NF_TARGET_NETMAP=m
181CONFIG_IP_NF_TARGET_REDIRECT=m 190CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -190,6 +199,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
190CONFIG_NF_CONNTRACK_IPV6=m 199CONFIG_NF_CONNTRACK_IPV6=m
191CONFIG_NFT_CHAIN_ROUTE_IPV6=m 200CONFIG_NFT_CHAIN_ROUTE_IPV6=m
192CONFIG_NFT_CHAIN_NAT_IPV6=m 201CONFIG_NFT_CHAIN_NAT_IPV6=m
202CONFIG_NFT_MASQ_IPV6=m
203CONFIG_NFT_REDIR_IPV6=m
193CONFIG_IP6_NF_IPTABLES=m 204CONFIG_IP6_NF_IPTABLES=m
194CONFIG_IP6_NF_MATCH_AH=m 205CONFIG_IP6_NF_MATCH_AH=m
195CONFIG_IP6_NF_MATCH_EUI64=m 206CONFIG_IP6_NF_MATCH_EUI64=m
@@ -206,17 +217,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
206CONFIG_IP6_NF_TARGET_SYNPROXY=m 217CONFIG_IP6_NF_TARGET_SYNPROXY=m
207CONFIG_IP6_NF_MANGLE=m 218CONFIG_IP6_NF_MANGLE=m
208CONFIG_IP6_NF_RAW=m 219CONFIG_IP6_NF_RAW=m
209CONFIG_NF_NAT_IPV6=m 220CONFIG_IP6_NF_NAT=m
210CONFIG_IP6_NF_TARGET_MASQUERADE=m 221CONFIG_IP6_NF_TARGET_MASQUERADE=m
211CONFIG_IP6_NF_TARGET_NPT=m 222CONFIG_IP6_NF_TARGET_NPT=m
212CONFIG_NF_TABLES_BRIDGE=m 223CONFIG_NF_TABLES_BRIDGE=m
224CONFIG_NFT_BRIDGE_META=m
225CONFIG_NFT_BRIDGE_REJECT=m
226CONFIG_NF_LOG_BRIDGE=m
227CONFIG_BRIDGE_NF_EBTABLES=m
228CONFIG_BRIDGE_EBT_BROUTE=m
229CONFIG_BRIDGE_EBT_T_FILTER=m
230CONFIG_BRIDGE_EBT_T_NAT=m
231CONFIG_BRIDGE_EBT_802_3=m
232CONFIG_BRIDGE_EBT_AMONG=m
233CONFIG_BRIDGE_EBT_ARP=m
234CONFIG_BRIDGE_EBT_IP=m
235CONFIG_BRIDGE_EBT_IP6=m
236CONFIG_BRIDGE_EBT_LIMIT=m
237CONFIG_BRIDGE_EBT_MARK=m
238CONFIG_BRIDGE_EBT_PKTTYPE=m
239CONFIG_BRIDGE_EBT_STP=m
240CONFIG_BRIDGE_EBT_VLAN=m
241CONFIG_BRIDGE_EBT_ARPREPLY=m
242CONFIG_BRIDGE_EBT_DNAT=m
243CONFIG_BRIDGE_EBT_MARK_T=m
244CONFIG_BRIDGE_EBT_REDIRECT=m
245CONFIG_BRIDGE_EBT_SNAT=m
246CONFIG_BRIDGE_EBT_LOG=m
247CONFIG_BRIDGE_EBT_NFLOG=m
213CONFIG_IP_DCCP=m 248CONFIG_IP_DCCP=m
214# CONFIG_IP_DCCP_CCID3 is not set 249# CONFIG_IP_DCCP_CCID3 is not set
215CONFIG_SCTP_COOKIE_HMAC_SHA1=y 250CONFIG_SCTP_COOKIE_HMAC_SHA1=y
216CONFIG_RDS=m 251CONFIG_RDS=m
217CONFIG_RDS_TCP=m 252CONFIG_RDS_TCP=m
218CONFIG_L2TP=m 253CONFIG_L2TP=m
254CONFIG_BRIDGE=m
219CONFIG_ATALK=m 255CONFIG_ATALK=m
256CONFIG_6LOWPAN=m
220CONFIG_DNS_RESOLVER=y 257CONFIG_DNS_RESOLVER=y
221CONFIG_BATMAN_ADV=m 258CONFIG_BATMAN_ADV=m
222CONFIG_BATMAN_ADV_DAT=y 259CONFIG_BATMAN_ADV_DAT=y
@@ -225,9 +262,10 @@ CONFIG_BATMAN_ADV_MCAST=y
225CONFIG_NETLINK_DIAG=m 262CONFIG_NETLINK_DIAG=m
226CONFIG_NET_MPLS_GSO=m 263CONFIG_NET_MPLS_GSO=m
227# CONFIG_WIRELESS is not set 264# CONFIG_WIRELESS is not set
265# CONFIG_UEVENT_HELPER is not set
228CONFIG_DEVTMPFS=y 266CONFIG_DEVTMPFS=y
267CONFIG_DEVTMPFS_MOUNT=y
229# CONFIG_FIRMWARE_IN_KERNEL is not set 268# CONFIG_FIRMWARE_IN_KERNEL is not set
230# CONFIG_FW_LOADER_USER_HELPER is not set
231CONFIG_CONNECTOR=m 269CONFIG_CONNECTOR=m
232CONFIG_BLK_DEV_LOOP=y 270CONFIG_BLK_DEV_LOOP=y
233CONFIG_BLK_DEV_CRYPTOLOOP=m 271CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -277,6 +315,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
277CONFIG_NET_TEAM_MODE_RANDOM=m 315CONFIG_NET_TEAM_MODE_RANDOM=m
278CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 316CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
279CONFIG_NET_TEAM_MODE_LOADBALANCE=m 317CONFIG_NET_TEAM_MODE_LOADBALANCE=m
318CONFIG_MACVLAN=m
319CONFIG_MACVTAP=m
320CONFIG_IPVLAN=m
280CONFIG_VXLAN=m 321CONFIG_VXLAN=m
281CONFIG_NETCONSOLE=m 322CONFIG_NETCONSOLE=m
282CONFIG_NETCONSOLE_DYNAMIC=y 323CONFIG_NETCONSOLE_DYNAMIC=y
@@ -288,6 +329,8 @@ CONFIG_SUN3LANCE=y
288# CONFIG_NET_VENDOR_MARVELL is not set 329# CONFIG_NET_VENDOR_MARVELL is not set
289# CONFIG_NET_VENDOR_MICREL is not set 330# CONFIG_NET_VENDOR_MICREL is not set
290# CONFIG_NET_VENDOR_NATSEMI is not set 331# CONFIG_NET_VENDOR_NATSEMI is not set
332# CONFIG_NET_VENDOR_QUALCOMM is not set
333# CONFIG_NET_VENDOR_ROCKER is not set
291# CONFIG_NET_VENDOR_SAMSUNG is not set 334# CONFIG_NET_VENDOR_SAMSUNG is not set
292# CONFIG_NET_VENDOR_SEEQ is not set 335# CONFIG_NET_VENDOR_SEEQ is not set
293# CONFIG_NET_VENDOR_STMICRO is not set 336# CONFIG_NET_VENDOR_STMICRO is not set
@@ -327,6 +370,7 @@ CONFIG_HID=m
327CONFIG_HIDRAW=y 370CONFIG_HIDRAW=y
328CONFIG_UHID=m 371CONFIG_UHID=m
329# CONFIG_HID_GENERIC is not set 372# CONFIG_HID_GENERIC is not set
373# CONFIG_HID_PLANTRONICS is not set
330# CONFIG_USB_SUPPORT is not set 374# CONFIG_USB_SUPPORT is not set
331CONFIG_RTC_CLASS=y 375CONFIG_RTC_CLASS=y
332CONFIG_RTC_DRV_GENERIC=m 376CONFIG_RTC_DRV_GENERIC=m
@@ -344,6 +388,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
344CONFIG_AUTOFS4_FS=m 388CONFIG_AUTOFS4_FS=m
345CONFIG_FUSE_FS=m 389CONFIG_FUSE_FS=m
346CONFIG_CUSE=m 390CONFIG_CUSE=m
391CONFIG_OVERLAY_FS=m
347CONFIG_ISO9660_FS=y 392CONFIG_ISO9660_FS=y
348CONFIG_JOLIET=y 393CONFIG_JOLIET=y
349CONFIG_ZISOFS=y 394CONFIG_ZISOFS=y
@@ -359,6 +404,7 @@ CONFIG_HFS_FS=m
359CONFIG_HFSPLUS_FS=m 404CONFIG_HFSPLUS_FS=m
360CONFIG_CRAMFS=m 405CONFIG_CRAMFS=m
361CONFIG_SQUASHFS=m 406CONFIG_SQUASHFS=m
407CONFIG_SQUASHFS_LZ4=y
362CONFIG_SQUASHFS_LZO=y 408CONFIG_SQUASHFS_LZO=y
363CONFIG_MINIX_FS=m 409CONFIG_MINIX_FS=m
364CONFIG_OMFS_FS=m 410CONFIG_OMFS_FS=m
@@ -428,10 +474,18 @@ CONFIG_DLM=m
428CONFIG_MAGIC_SYSRQ=y 474CONFIG_MAGIC_SYSRQ=y
429CONFIG_ASYNC_RAID6_TEST=m 475CONFIG_ASYNC_RAID6_TEST=m
430CONFIG_TEST_STRING_HELPERS=m 476CONFIG_TEST_STRING_HELPERS=m
477CONFIG_TEST_KSTRTOX=m
478CONFIG_TEST_LKM=m
479CONFIG_TEST_USER_COPY=m
480CONFIG_TEST_BPF=m
481CONFIG_TEST_FIRMWARE=m
482CONFIG_TEST_UDELAY=m
483CONFIG_EARLY_PRINTK=y
431CONFIG_ENCRYPTED_KEYS=m 484CONFIG_ENCRYPTED_KEYS=m
432CONFIG_CRYPTO_MANAGER=y 485CONFIG_CRYPTO_MANAGER=y
433CONFIG_CRYPTO_USER=m 486CONFIG_CRYPTO_USER=m
434CONFIG_CRYPTO_CRYPTD=m 487CONFIG_CRYPTO_CRYPTD=m
488CONFIG_CRYPTO_MCRYPTD=m
435CONFIG_CRYPTO_TEST=m 489CONFIG_CRYPTO_TEST=m
436CONFIG_CRYPTO_CCM=m 490CONFIG_CRYPTO_CCM=m
437CONFIG_CRYPTO_GCM=m 491CONFIG_CRYPTO_GCM=m
@@ -466,13 +520,10 @@ CONFIG_CRYPTO_LZO=m
466CONFIG_CRYPTO_LZ4=m 520CONFIG_CRYPTO_LZ4=m
467CONFIG_CRYPTO_LZ4HC=m 521CONFIG_CRYPTO_LZ4HC=m
468# CONFIG_CRYPTO_ANSI_CPRNG is not set 522# CONFIG_CRYPTO_ANSI_CPRNG is not set
523CONFIG_CRYPTO_DRBG_MENU=m
524CONFIG_CRYPTO_DRBG_HASH=y
525CONFIG_CRYPTO_DRBG_CTR=y
469CONFIG_CRYPTO_USER_API_HASH=m 526CONFIG_CRYPTO_USER_API_HASH=m
470CONFIG_CRYPTO_USER_API_SKCIPHER=m 527CONFIG_CRYPTO_USER_API_SKCIPHER=m
471# CONFIG_CRYPTO_HW is not set 528# CONFIG_CRYPTO_HW is not set
472CONFIG_XZ_DEC_X86=y
473CONFIG_XZ_DEC_POWERPC=y
474CONFIG_XZ_DEC_IA64=y
475CONFIG_XZ_DEC_ARM=y
476CONFIG_XZ_DEC_ARMTHUMB=y
477CONFIG_XZ_DEC_SPARC=y
478CONFIG_XZ_DEC_TEST=m 529CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 9b6c691874bd..1517ed1c6471 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += device.h
6generic-y += emergency-restart.h 6generic-y += emergency-restart.h
7generic-y += errno.h 7generic-y += errno.h
8generic-y += exec.h 8generic-y += exec.h
9generic-y += futex.h
9generic-y += hw_irq.h 10generic-y += hw_irq.h
10generic-y += ioctl.h 11generic-y += ioctl.h
11generic-y += ipcbuf.h 12generic-y += ipcbuf.h
diff --git a/arch/m68k/include/asm/atariints.h b/arch/m68k/include/asm/atariints.h
index 953e0ac6855e..6321c4495620 100644
--- a/arch/m68k/include/asm/atariints.h
+++ b/arch/m68k/include/asm/atariints.h
@@ -40,11 +40,6 @@
40/* convert irq_handler index to vector number */ 40/* convert irq_handler index to vector number */
41#define IRQ_SOURCE_TO_VECTOR(i) ((i) + ((i) < 8 ? 0x18 : (0x40-8))) 41#define IRQ_SOURCE_TO_VECTOR(i) ((i) + ((i) < 8 ? 0x18 : (0x40-8)))
42 42
43/* interrupt service types */
44#define IRQ_TYPE_SLOW 0
45#define IRQ_TYPE_FAST 1
46#define IRQ_TYPE_PRIO 2
47
48/* ST-MFP interrupts */ 43/* ST-MFP interrupts */
49#define IRQ_MFP_BUSY (8) 44#define IRQ_MFP_BUSY (8)
50#define IRQ_MFP_DCD (9) 45#define IRQ_MFP_DCD (9)
diff --git a/arch/m68k/include/asm/futex.h b/arch/m68k/include/asm/futex.h
deleted file mode 100644
index bc868af10c96..000000000000
--- a/arch/m68k/include/asm/futex.h
+++ /dev/null
@@ -1,94 +0,0 @@
1#ifndef _ASM_M68K_FUTEX_H
2#define _ASM_M68K_FUTEX_H
3
4#ifdef __KERNEL__
5#if !defined(CONFIG_MMU)
6#include <asm-generic/futex.h>
7#else /* CONFIG_MMU */
8
9#include <linux/futex.h>
10#include <linux/uaccess.h>
11#include <asm/errno.h>
12
13static inline int
14futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
15 u32 oldval, u32 newval)
16{
17 u32 val;
18
19 if (unlikely(get_user(val, uaddr) != 0))
20 return -EFAULT;
21
22 if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
23 return -EFAULT;
24
25 *uval = val;
26
27 return 0;
28}
29
30static inline int
31futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
32{
33 int op = (encoded_op >> 28) & 7;
34 int cmp = (encoded_op >> 24) & 15;
35 int oparg = (encoded_op << 8) >> 20;
36 int cmparg = (encoded_op << 20) >> 20;
37 int oldval, ret;
38 u32 tmp;
39
40 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
41 oparg = 1 << oparg;
42
43 pagefault_disable(); /* implies preempt_disable() */
44
45 ret = -EFAULT;
46 if (unlikely(get_user(oldval, uaddr) != 0))
47 goto out_pagefault_enable;
48
49 ret = 0;
50 tmp = oldval;
51
52 switch (op) {
53 case FUTEX_OP_SET:
54 tmp = oparg;
55 break;
56 case FUTEX_OP_ADD:
57 tmp += oparg;
58 break;
59 case FUTEX_OP_OR:
60 tmp |= oparg;
61 break;
62 case FUTEX_OP_ANDN:
63 tmp &= ~oparg;
64 break;
65 case FUTEX_OP_XOR:
66 tmp ^= oparg;
67 break;
68 default:
69 ret = -ENOSYS;
70 }
71
72 if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
73 ret = -EFAULT;
74
75out_pagefault_enable:
76 pagefault_enable(); /* subsumes preempt_enable() */
77
78 if (ret == 0) {
79 switch (cmp) {
80 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
81 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
82 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
83 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
84 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
85 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
86 default: ret = -ENOSYS;
87 }
88 }
89 return ret;
90}
91
92#endif /* CONFIG_MMU */
93#endif /* __KERNEL__ */
94#endif /* _ASM_M68K_FUTEX_H */
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index 29c7c6c3a5f2..42235e7fbeed 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -55,7 +55,7 @@ struct mac_model
55#define MAC_SCSI_QUADRA3 4 55#define MAC_SCSI_QUADRA3 4
56#define MAC_SCSI_IIFX 5 56#define MAC_SCSI_IIFX 5
57#define MAC_SCSI_DUO 6 57#define MAC_SCSI_DUO 6
58#define MAC_SCSI_CCL 7 58#define MAC_SCSI_LC 7
59#define MAC_SCSI_LATE 8 59#define MAC_SCSI_LATE 8
60 60
61#define MAC_IDE_NONE 0 61#define MAC_IDE_NONE 0
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index e9c3756139fc..689b47d292ac 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -296,7 +296,7 @@ static struct mac_model mac_data_table[] = {
296 .name = "IIvi", 296 .name = "IIvi",
297 .adb_type = MAC_ADB_IISI, 297 .adb_type = MAC_ADB_IISI,
298 .via_type = MAC_VIA_IICI, 298 .via_type = MAC_VIA_IICI,
299 .scsi_type = MAC_SCSI_OLD, 299 .scsi_type = MAC_SCSI_LC,
300 .scc_type = MAC_SCC_II, 300 .scc_type = MAC_SCC_II,
301 .nubus_type = MAC_NUBUS, 301 .nubus_type = MAC_NUBUS,
302 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 302 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -305,7 +305,7 @@ static struct mac_model mac_data_table[] = {
305 .name = "IIvx", 305 .name = "IIvx",
306 .adb_type = MAC_ADB_IISI, 306 .adb_type = MAC_ADB_IISI,
307 .via_type = MAC_VIA_IICI, 307 .via_type = MAC_VIA_IICI,
308 .scsi_type = MAC_SCSI_OLD, 308 .scsi_type = MAC_SCSI_LC,
309 .scc_type = MAC_SCC_II, 309 .scc_type = MAC_SCC_II,
310 .nubus_type = MAC_NUBUS, 310 .nubus_type = MAC_NUBUS,
311 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 311 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -320,7 +320,7 @@ static struct mac_model mac_data_table[] = {
320 .name = "Classic II", 320 .name = "Classic II",
321 .adb_type = MAC_ADB_IISI, 321 .adb_type = MAC_ADB_IISI,
322 .via_type = MAC_VIA_IICI, 322 .via_type = MAC_VIA_IICI,
323 .scsi_type = MAC_SCSI_OLD, 323 .scsi_type = MAC_SCSI_LC,
324 .scc_type = MAC_SCC_II, 324 .scc_type = MAC_SCC_II,
325 .nubus_type = MAC_NUBUS, 325 .nubus_type = MAC_NUBUS,
326 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 326 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -329,7 +329,7 @@ static struct mac_model mac_data_table[] = {
329 .name = "Color Classic", 329 .name = "Color Classic",
330 .adb_type = MAC_ADB_CUDA, 330 .adb_type = MAC_ADB_CUDA,
331 .via_type = MAC_VIA_IICI, 331 .via_type = MAC_VIA_IICI,
332 .scsi_type = MAC_SCSI_CCL, 332 .scsi_type = MAC_SCSI_LC,
333 .scc_type = MAC_SCC_II, 333 .scc_type = MAC_SCC_II,
334 .nubus_type = MAC_NUBUS, 334 .nubus_type = MAC_NUBUS,
335 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 335 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -338,7 +338,7 @@ static struct mac_model mac_data_table[] = {
338 .name = "Color Classic II", 338 .name = "Color Classic II",
339 .adb_type = MAC_ADB_CUDA, 339 .adb_type = MAC_ADB_CUDA,
340 .via_type = MAC_VIA_IICI, 340 .via_type = MAC_VIA_IICI,
341 .scsi_type = MAC_SCSI_CCL, 341 .scsi_type = MAC_SCSI_LC,
342 .scc_type = MAC_SCC_II, 342 .scc_type = MAC_SCC_II,
343 .nubus_type = MAC_NUBUS, 343 .nubus_type = MAC_NUBUS,
344 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 344 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -353,7 +353,7 @@ static struct mac_model mac_data_table[] = {
353 .name = "LC", 353 .name = "LC",
354 .adb_type = MAC_ADB_IISI, 354 .adb_type = MAC_ADB_IISI,
355 .via_type = MAC_VIA_IICI, 355 .via_type = MAC_VIA_IICI,
356 .scsi_type = MAC_SCSI_OLD, 356 .scsi_type = MAC_SCSI_LC,
357 .scc_type = MAC_SCC_II, 357 .scc_type = MAC_SCC_II,
358 .nubus_type = MAC_NUBUS, 358 .nubus_type = MAC_NUBUS,
359 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 359 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -362,7 +362,7 @@ static struct mac_model mac_data_table[] = {
362 .name = "LC II", 362 .name = "LC II",
363 .adb_type = MAC_ADB_IISI, 363 .adb_type = MAC_ADB_IISI,
364 .via_type = MAC_VIA_IICI, 364 .via_type = MAC_VIA_IICI,
365 .scsi_type = MAC_SCSI_OLD, 365 .scsi_type = MAC_SCSI_LC,
366 .scc_type = MAC_SCC_II, 366 .scc_type = MAC_SCC_II,
367 .nubus_type = MAC_NUBUS, 367 .nubus_type = MAC_NUBUS,
368 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 368 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -371,7 +371,7 @@ static struct mac_model mac_data_table[] = {
371 .name = "LC III", 371 .name = "LC III",
372 .adb_type = MAC_ADB_IISI, 372 .adb_type = MAC_ADB_IISI,
373 .via_type = MAC_VIA_IICI, 373 .via_type = MAC_VIA_IICI,
374 .scsi_type = MAC_SCSI_OLD, 374 .scsi_type = MAC_SCSI_LC,
375 .scc_type = MAC_SCC_II, 375 .scc_type = MAC_SCC_II,
376 .nubus_type = MAC_NUBUS, 376 .nubus_type = MAC_NUBUS,
377 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 377 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -499,7 +499,7 @@ static struct mac_model mac_data_table[] = {
499 .name = "Performa 460", 499 .name = "Performa 460",
500 .adb_type = MAC_ADB_IISI, 500 .adb_type = MAC_ADB_IISI,
501 .via_type = MAC_VIA_IICI, 501 .via_type = MAC_VIA_IICI,
502 .scsi_type = MAC_SCSI_OLD, 502 .scsi_type = MAC_SCSI_LC,
503 .scc_type = MAC_SCC_II, 503 .scc_type = MAC_SCC_II,
504 .nubus_type = MAC_NUBUS, 504 .nubus_type = MAC_NUBUS,
505 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 505 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -526,7 +526,7 @@ static struct mac_model mac_data_table[] = {
526 .name = "Performa 520", 526 .name = "Performa 520",
527 .adb_type = MAC_ADB_CUDA, 527 .adb_type = MAC_ADB_CUDA,
528 .via_type = MAC_VIA_IICI, 528 .via_type = MAC_VIA_IICI,
529 .scsi_type = MAC_SCSI_CCL, 529 .scsi_type = MAC_SCSI_LC,
530 .scc_type = MAC_SCC_II, 530 .scc_type = MAC_SCC_II,
531 .nubus_type = MAC_NUBUS, 531 .nubus_type = MAC_NUBUS,
532 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 532 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -535,7 +535,7 @@ static struct mac_model mac_data_table[] = {
535 .name = "Performa 550", 535 .name = "Performa 550",
536 .adb_type = MAC_ADB_CUDA, 536 .adb_type = MAC_ADB_CUDA,
537 .via_type = MAC_VIA_IICI, 537 .via_type = MAC_VIA_IICI,
538 .scsi_type = MAC_SCSI_CCL, 538 .scsi_type = MAC_SCSI_LC,
539 .scc_type = MAC_SCC_II, 539 .scc_type = MAC_SCC_II,
540 .nubus_type = MAC_NUBUS, 540 .nubus_type = MAC_NUBUS,
541 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 541 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -567,7 +567,7 @@ static struct mac_model mac_data_table[] = {
567 .name = "TV", 567 .name = "TV",
568 .adb_type = MAC_ADB_CUDA, 568 .adb_type = MAC_ADB_CUDA,
569 .via_type = MAC_VIA_IICI, 569 .via_type = MAC_VIA_IICI,
570 .scsi_type = MAC_SCSI_CCL, 570 .scsi_type = MAC_SCSI_LC,
571 .scc_type = MAC_SCC_II, 571 .scc_type = MAC_SCC_II,
572 .nubus_type = MAC_NUBUS, 572 .nubus_type = MAC_NUBUS,
573 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 573 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -576,7 +576,7 @@ static struct mac_model mac_data_table[] = {
576 .name = "Performa 600", 576 .name = "Performa 600",
577 .adb_type = MAC_ADB_IISI, 577 .adb_type = MAC_ADB_IISI,
578 .via_type = MAC_VIA_IICI, 578 .via_type = MAC_VIA_IICI,
579 .scsi_type = MAC_SCSI_OLD, 579 .scsi_type = MAC_SCSI_LC,
580 .scc_type = MAC_SCC_II, 580 .scc_type = MAC_SCC_II,
581 .nubus_type = MAC_NUBUS, 581 .nubus_type = MAC_NUBUS,
582 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 582 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -1109,8 +1109,10 @@ int __init mac_platform_init(void)
1109 platform_device_register_simple("mac_scsi", 0, 1109 platform_device_register_simple("mac_scsi", 0,
1110 mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc)); 1110 mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc));
1111 break; 1111 break;
1112 case MAC_SCSI_CCL: 1112 case MAC_SCSI_LC:
1113 /* Addresses from the Color Classic Developer Note. 1113 /* Addresses from Mac LC data in Designing Cards & Drivers 3ed.
1114 * Also from the Developer Notes for Classic II, LC III,
1115 * Color Classic and IIvx.
1114 * $50F0 6000 - $50F0 7FFF: SCSI handshake 1116 * $50F0 6000 - $50F0 7FFF: SCSI handshake
1115 * $50F1 0000 - $50F1 1FFF: SCSI 1117 * $50F1 0000 - $50F1 1FFF: SCSI
1116 * $50F1 2000 - $50F1 3FFF: SCSI DMA 1118 * $50F1 2000 - $50F1 3FFF: SCSI DMA
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index 1bb3ce6634d3..e6a3b56c6481 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -168,49 +168,3 @@ int mvme147_set_clock_mmss (unsigned long nowtime)
168{ 168{
169 return 0; 169 return 0;
170} 170}
171
172/*------------------- Serial console stuff ------------------------*/
173
174static void scc_delay (void)
175{
176 int n;
177 volatile int trash;
178
179 for (n = 0; n < 20; n++)
180 trash = n;
181}
182
183static void scc_write (char ch)
184{
185 volatile char *p = (volatile char *)M147_SCC_A_ADDR;
186
187 do {
188 scc_delay();
189 }
190 while (!(*p & 4));
191 scc_delay();
192 *p = 8;
193 scc_delay();
194 *p = ch;
195}
196
197
198void m147_scc_write (struct console *co, const char *str, unsigned count)
199{
200 unsigned long flags;
201
202 local_irq_save(flags);
203
204 while (count--)
205 {
206 if (*str == '\n')
207 scc_write ('\r');
208 scc_write (*str++);
209 }
210 local_irq_restore(flags);
211}
212
213void mvme147_init_console_port (struct console *co, int cflag)
214{
215 co->write = m147_scc_write;
216}
diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c
index 6ef7a81a3b12..1755e2f7137d 100644
--- a/arch/m68k/mvme16x/rtc.c
+++ b/arch/m68k/mvme16x/rtc.c
@@ -161,4 +161,4 @@ static int __init rtc_MK48T08_init(void)
161 printk(KERN_INFO "MK48T08 Real Time Clock Driver v%s\n", RTC_VERSION); 161 printk(KERN_INFO "MK48T08 Real Time Clock Driver v%s\n", RTC_VERSION);
162 return misc_register(&rtc_dev); 162 return misc_register(&rtc_dev);
163} 163}
164module_init(rtc_MK48T08_init); 164device_initcall(rtc_MK48T08_init);
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 8e211cc28dac..91d2068da1b9 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -34,5 +34,4 @@ $(obj)/simpleImage.%: vmlinux FORCE
34 $(call if_changed,strip) 34 $(call if_changed,strip)
35 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' 35 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
36 36
37 37clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb
38clean-files += simpleImage.*.unstrip linux.bin.ub
diff --git a/arch/microblaze/boot/dts/Makefile b/arch/microblaze/boot/dts/Makefile
index c4982d16e555..a3d2e42c3c97 100644
--- a/arch/microblaze/boot/dts/Makefile
+++ b/arch/microblaze/boot/dts/Makefile
@@ -16,5 +16,3 @@ quiet_cmd_cp = CP $< $@$2
16 16
17# Rule to build device tree blobs 17# Rule to build device tree blobs
18DTC_FLAGS := -p 1024 18DTC_FLAGS := -p 1024
19
20clean-files += *.dtb
diff --git a/arch/microblaze/include/asm/delay.h b/arch/microblaze/include/asm/delay.h
index 60cb39deb533..ea2a9cd9b159 100644
--- a/arch/microblaze/include/asm/delay.h
+++ b/arch/microblaze/include/asm/delay.h
@@ -15,7 +15,7 @@
15 15
16#include <linux/param.h> 16#include <linux/param.h>
17 17
18extern inline void __delay(unsigned long loops) 18static inline void __delay(unsigned long loops)
19{ 19{
20 asm volatile ("# __delay \n\t" \ 20 asm volatile ("# __delay \n\t" \
21 "1: addi %0, %0, -1\t\n" \ 21 "1: addi %0, %0, -1\t\n" \
@@ -43,7 +43,7 @@ extern inline void __delay(unsigned long loops)
43 43
44extern unsigned long loops_per_jiffy; 44extern unsigned long loops_per_jiffy;
45 45
46extern inline void __udelay(unsigned int x) 46static inline void __udelay(unsigned int x)
47{ 47{
48 48
49 unsigned long long tmp = 49 unsigned long long tmp =
diff --git a/arch/microblaze/include/asm/kgdb.h b/arch/microblaze/include/asm/kgdb.h
index 78b17d40b235..ad27acb2b15f 100644
--- a/arch/microblaze/include/asm/kgdb.h
+++ b/arch/microblaze/include/asm/kgdb.h
@@ -23,6 +23,9 @@ static inline void arch_kgdb_breakpoint(void)
23 __asm__ __volatile__("brki r16, 0x18;"); 23 __asm__ __volatile__("brki r16, 0x18;");
24} 24}
25 25
26struct pt_regs;
27asmlinkage void microblaze_kgdb_break(struct pt_regs *regs);
28
26#endif /* __ASSEMBLY__ */ 29#endif /* __ASSEMBLY__ */
27#endif /* __MICROBLAZE_KGDB_H__ */ 30#endif /* __MICROBLAZE_KGDB_H__ */
28#endif /* __KERNEL__ */ 31#endif /* __KERNEL__ */
diff --git a/arch/microblaze/include/asm/linkage.h b/arch/microblaze/include/asm/linkage.h
index 3a8e36d057eb..0540bbaad897 100644
--- a/arch/microblaze/include/asm/linkage.h
+++ b/arch/microblaze/include/asm/linkage.h
@@ -1,15 +1 @@
1/* #include <asm-generic/linkage.h>
2 * Copyright (C) 2006 Atmark Techno, Inc.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#ifndef _ASM_MICROBLAZE_LINKAGE_H
10#define _ASM_MICROBLAZE_LINKAGE_H
11
12#define __ALIGN .align 4
13#define __ALIGN_STR ".align 4"
14
15#endif /* _ASM_MICROBLAZE_LINKAGE_H */
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 7fdf7fabc7d7..61436d69775c 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -60,7 +60,7 @@ extern unsigned long get_zero_page_fast(void);
60 60
61extern void __bad_pte(pmd_t *pmd); 61extern void __bad_pte(pmd_t *pmd);
62 62
63extern inline pgd_t *get_pgd_slow(void) 63static inline pgd_t *get_pgd_slow(void)
64{ 64{
65 pgd_t *ret; 65 pgd_t *ret;
66 66
@@ -70,7 +70,7 @@ extern inline pgd_t *get_pgd_slow(void)
70 return ret; 70 return ret;
71} 71}
72 72
73extern inline pgd_t *get_pgd_fast(void) 73static inline pgd_t *get_pgd_fast(void)
74{ 74{
75 unsigned long *ret; 75 unsigned long *ret;
76 76
@@ -84,14 +84,14 @@ extern inline pgd_t *get_pgd_fast(void)
84 return (pgd_t *)ret; 84 return (pgd_t *)ret;
85} 85}
86 86
87extern inline void free_pgd_fast(pgd_t *pgd) 87static inline void free_pgd_fast(pgd_t *pgd)
88{ 88{
89 *(unsigned long **)pgd = pgd_quicklist; 89 *(unsigned long **)pgd = pgd_quicklist;
90 pgd_quicklist = (unsigned long *) pgd; 90 pgd_quicklist = (unsigned long *) pgd;
91 pgtable_cache_size++; 91 pgtable_cache_size++;
92} 92}
93 93
94extern inline void free_pgd_slow(pgd_t *pgd) 94static inline void free_pgd_slow(pgd_t *pgd)
95{ 95{
96 free_page((unsigned long)pgd); 96 free_page((unsigned long)pgd);
97} 97}
@@ -146,19 +146,19 @@ static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
146 return (pte_t *)ret; 146 return (pte_t *)ret;
147} 147}
148 148
149extern inline void pte_free_fast(pte_t *pte) 149static inline void pte_free_fast(pte_t *pte)
150{ 150{
151 *(unsigned long **)pte = pte_quicklist; 151 *(unsigned long **)pte = pte_quicklist;
152 pte_quicklist = (unsigned long *) pte; 152 pte_quicklist = (unsigned long *) pte;
153 pgtable_cache_size++; 153 pgtable_cache_size++;
154} 154}
155 155
156extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 156static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
157{ 157{
158 free_page((unsigned long)pte); 158 free_page((unsigned long)pte);
159} 159}
160 160
161extern inline void pte_free_slow(struct page *ptepage) 161static inline void pte_free_slow(struct page *ptepage)
162{ 162{
163 __free_page(ptepage); 163 __free_page(ptepage);
164} 164}
diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h
index 53cfaf34c343..04a5bece8168 100644
--- a/arch/microblaze/include/asm/syscall.h
+++ b/arch/microblaze/include/asm/syscall.h
@@ -97,7 +97,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
97 microblaze_set_syscall_arg(regs, i++, *args++); 97 microblaze_set_syscall_arg(regs, i++, *args++);
98} 98}
99 99
100asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); 100asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs);
101asmlinkage void do_syscall_trace_leave(struct pt_regs *regs); 101asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
102 102
103static inline int syscall_get_arch(void) 103static inline int syscall_get_arch(void)
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 59a89a64a865..62942fd12672 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -220,7 +220,7 @@ extern long __user_bad(void);
220 } else { \ 220 } else { \
221 __gu_err = -EFAULT; \ 221 __gu_err = -EFAULT; \
222 } \ 222 } \
223 x = (typeof(*(ptr)))__gu_val; \ 223 x = (__force typeof(*(ptr)))__gu_val; \
224 __gu_err; \ 224 __gu_err; \
225}) 225})
226 226
@@ -242,7 +242,7 @@ extern long __user_bad(void);
242 default: \ 242 default: \
243 /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\ 243 /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
244 } \ 244 } \
245 x = (__typeof__(*(ptr))) __gu_val; \ 245 x = (__force __typeof__(*(ptr))) __gu_val; \
246 __gu_err; \ 246 __gu_err; \
247}) 247})
248 248
@@ -306,7 +306,7 @@ extern long __user_bad(void);
306 306
307#define __put_user_check(x, ptr, size) \ 307#define __put_user_check(x, ptr, size) \
308({ \ 308({ \
309 typeof(*(ptr)) volatile __pu_val = x; \ 309 typeof(*(ptr)) volatile __pu_val = x; \
310 typeof(*(ptr)) __user *__pu_addr = (ptr); \ 310 typeof(*(ptr)) __user *__pu_addr = (ptr); \
311 int __pu_err = 0; \ 311 int __pu_err = 0; \
312 \ 312 \
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 0a53362d5548..76ed17b56fea 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -38,6 +38,6 @@
38 38
39#endif /* __ASSEMBLY__ */ 39#endif /* __ASSEMBLY__ */
40 40
41#define __NR_syscalls 388 41#define __NR_syscalls 389
42 42
43#endif /* _ASM_MICROBLAZE_UNISTD_H */ 43#endif /* _ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h
index c712677f8a2a..32850c73be09 100644
--- a/arch/microblaze/include/uapi/asm/unistd.h
+++ b/arch/microblaze/include/uapi/asm/unistd.h
@@ -403,5 +403,6 @@
403#define __NR_getrandom 385 403#define __NR_getrandom 385
404#define __NR_memfd_create 386 404#define __NR_memfd_create 386
405#define __NR_bpf 387 405#define __NR_bpf 387
406#define __NR_execveat 388
406 407
407#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */ 408#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index 08d50cc55e7d..f08bacaf8a95 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -16,7 +16,7 @@ extra-y := head.o vmlinux.lds
16 16
17obj-y += dma.o exceptions.o \ 17obj-y += dma.o exceptions.o \
18 hw_exception_handler.o intc.o irq.o \ 18 hw_exception_handler.o intc.o irq.o \
19 platform.o process.o prom.o prom_parse.o ptrace.o \ 19 platform.o process.o prom.o ptrace.o \
20 reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o 20 reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
21 21
22obj-y += cpu/ 22obj-y += cpu/
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index a6e44410672d..0bde47e4fa69 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -140,10 +140,10 @@ do { \
140/* It is used only first parameter for OP - for wic, wdc */ 140/* It is used only first parameter for OP - for wic, wdc */
141#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ 141#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
142do { \ 142do { \
143 int volatile temp = 0; \ 143 unsigned int volatile temp = 0; \
144 int align = ~(line_length - 1); \ 144 unsigned int align = ~(line_length - 1); \
145 end = ((end & align) == end) ? end - line_length : end & align; \ 145 end = ((end & align) == end) ? end - line_length : end & align; \
146 WARN_ON(end - start < 0); \ 146 WARN_ON(end < start); \
147 \ 147 \
148 __asm__ __volatile__ (" 1: " #op " %1, r0;" \ 148 __asm__ __volatile__ (" 1: " #op " %1, r0;" \
149 "cmpu %0, %1, %2;" \ 149 "cmpu %0, %1, %2;" \
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
index 93c26cf50de5..a32daec96c12 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
@@ -33,7 +33,7 @@
33void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) 33void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
34{ 34{
35 struct pvr_s pvr; 35 struct pvr_s pvr;
36 int temp; /* for saving temp value */ 36 u32 temp; /* for saving temp value */
37 get_pvr(&pvr); 37 get_pvr(&pvr);
38 38
39 CI(ver_code, VERSION); 39 CI(ver_code, VERSION);
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
index 4854285b26e7..85dbda4a08a8 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-static.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
@@ -22,7 +22,7 @@ static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER;
22 22
23void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) 23void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
24{ 24{
25 int i = 0; 25 u32 i = 0;
26 26
27 ci->use_instr = 27 ci->use_instr =
28 (fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) | 28 (fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) |
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 234acad79b9e..d1dd6e83d59b 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -41,8 +41,12 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
41 {"8.40.a", 0x18}, 41 {"8.40.a", 0x18},
42 {"8.40.b", 0x19}, 42 {"8.40.b", 0x19},
43 {"8.50.a", 0x1a}, 43 {"8.50.a", 0x1a},
44 {"8.50.b", 0x1c},
45 {"8.50.c", 0x1e},
44 {"9.0", 0x1b}, 46 {"9.0", 0x1b},
45 {"9.1", 0x1d}, 47 {"9.1", 0x1d},
48 {"9.2", 0x1f},
49 {"9.3", 0x20},
46 {NULL, 0}, 50 {NULL, 0},
47}; 51};
48 52
@@ -61,11 +65,14 @@ const struct family_string_key family_string_lookup[] = {
61 {"spartan3adsp", 0xc}, 65 {"spartan3adsp", 0xc},
62 {"spartan6", 0xd}, 66 {"spartan6", 0xd},
63 {"virtex6", 0xe}, 67 {"virtex6", 0xe},
68 {"virtex7", 0xf},
64 /* FIXME There is no key code defined for spartan2 */ 69 /* FIXME There is no key code defined for spartan2 */
65 {"spartan2", 0xf0}, 70 {"spartan2", 0xf0},
66 {"kintex7", 0x10}, 71 {"kintex7", 0x10},
67 {"artix7", 0x11}, 72 {"artix7", 0x11},
68 {"zynq7000", 0x12}, 73 {"zynq7000", 0x12},
74 {"UltraScale Virtex", 0x13},
75 {"UltraScale Kintex", 0x14},
69 {NULL, 0}, 76 {NULL, 0},
70}; 77};
71 78
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index 15c7c12ea0e7..719feee1e043 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -148,17 +148,17 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
148 ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq); 148 ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
149 if (ret < 0) { 149 if (ret < 0) {
150 pr_err("%s: unable to read xlnx,num-intr-inputs\n", __func__); 150 pr_err("%s: unable to read xlnx,num-intr-inputs\n", __func__);
151 return -EINVAL; 151 return ret;
152 } 152 }
153 153
154 ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &intr_mask); 154 ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &intr_mask);
155 if (ret < 0) { 155 if (ret < 0) {
156 pr_err("%s: unable to read xlnx,kind-of-intr\n", __func__); 156 pr_err("%s: unable to read xlnx,kind-of-intr\n", __func__);
157 return -EINVAL; 157 return ret;
158 } 158 }
159 159
160 if (intr_mask > (u32)((1ULL << nr_irq) - 1)) 160 if (intr_mask >> nr_irq)
161 pr_info(" ERROR: Mismatch in kind-of-intr param\n"); 161 pr_warn("%s: mismatch in kind-of-intr param\n", __func__);
162 162
163 pr_info("%s: num_irq=%d, edge=0x%x\n", 163 pr_info("%s: num_irq=%d, edge=0x%x\n",
164 intc->full_name, nr_irq, intr_mask); 164 intc->full_name, nr_irq, intr_mask);
diff --git a/arch/microblaze/kernel/kgdb.c b/arch/microblaze/kernel/kgdb.c
index 09a5e8286137..8736af5806ae 100644
--- a/arch/microblaze/kernel/kgdb.c
+++ b/arch/microblaze/kernel/kgdb.c
@@ -12,6 +12,7 @@
12#include <linux/io.h> 12#include <linux/io.h>
13#include <asm/cacheflush.h> 13#include <asm/cacheflush.h>
14#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
15#include <asm/kgdb.h>
15#include <asm/pvr.h> 16#include <asm/pvr.h>
16 17
17#define GDB_REG 0 18#define GDB_REG 0
@@ -35,9 +36,10 @@ struct pvr_s pvr;
35 36
36void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) 37void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
37{ 38{
38 int i; 39 unsigned int i;
39 unsigned long *pt_regb = (unsigned long *)regs; 40 unsigned long *pt_regb = (unsigned long *)regs;
40 int temp; 41 int temp;
42
41 /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */ 43 /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
42 for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++) 44 for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++)
43 gdb_regs[i] = pt_regb[i]; 45 gdb_regs[i] = pt_regb[i];
@@ -67,7 +69,7 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
67 69
68void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) 70void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
69{ 71{
70 int i; 72 unsigned int i;
71 unsigned long *pt_regb = (unsigned long *)regs; 73 unsigned long *pt_regb = (unsigned long *)regs;
72 74
73 /* pt_regs and gdb_regs have the same 37 values. 75 /* pt_regs and gdb_regs have the same 37 values.
@@ -77,7 +79,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
77 pt_regb[i] = gdb_regs[i]; 79 pt_regb[i] = gdb_regs[i];
78} 80}
79 81
80void microblaze_kgdb_break(struct pt_regs *regs) 82asmlinkage void microblaze_kgdb_break(struct pt_regs *regs)
81{ 83{
82 if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0) 84 if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
83 return; 85 return;
@@ -91,7 +93,7 @@ void microblaze_kgdb_break(struct pt_regs *regs)
91/* untested */ 93/* untested */
92void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) 94void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
93{ 95{
94 int i; 96 unsigned int i;
95 unsigned long *pt_regb = (unsigned long *)(p->thread.regs); 97 unsigned long *pt_regb = (unsigned long *)(p->thread.regs);
96 98
97 /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */ 99 /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c
deleted file mode 100644
index 068762f55fd6..000000000000
--- a/arch/microblaze/kernel/prom_parse.c
+++ /dev/null
@@ -1,35 +0,0 @@
1#undef DEBUG
2
3#include <linux/export.h>
4#include <linux/kernel.h>
5#include <linux/string.h>
6#include <linux/ioport.h>
7#include <linux/etherdevice.h>
8#include <linux/of_address.h>
9#include <asm/prom.h>
10
11void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
12 unsigned long *busno, unsigned long *phys, unsigned long *size)
13{
14 const u32 *dma_window;
15 u32 cells;
16 const unsigned char *prop;
17
18 dma_window = dma_window_prop;
19
20 /* busno is always one cell */
21 *busno = *(dma_window++);
22
23 prop = of_get_property(dn, "ibm,#dma-address-cells", NULL);
24 if (!prop)
25 prop = of_get_property(dn, "#address-cells", NULL);
26
27 cells = prop ? *(u32 *)prop : of_n_addr_cells(dn);
28 *phys = of_read_number(dma_window, cells);
29
30 dma_window += cells;
31
32 prop = of_get_property(dn, "ibm,#dma-size-cells", NULL);
33 cells = prop ? *(u32 *)prop : of_n_size_cells(dn);
34 *size = of_read_number(dma_window, cells);
35}
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c
index bb10637ce688..8cfa98cadf3d 100644
--- a/arch/microblaze/kernel/ptrace.c
+++ b/arch/microblaze/kernel/ptrace.c
@@ -132,9 +132,9 @@ long arch_ptrace(struct task_struct *child, long request,
132 return rval; 132 return rval;
133} 133}
134 134
135asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) 135asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs)
136{ 136{
137 long ret = 0; 137 unsigned long ret = 0;
138 138
139 secure_computing_strict(regs->r12); 139 secure_computing_strict(regs->r12);
140 140
diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c
index fbe58c6554a8..bab4c8330ef4 100644
--- a/arch/microblaze/kernel/reset.c
+++ b/arch/microblaze/kernel/reset.c
@@ -9,7 +9,6 @@
9 9
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/of_platform.h> 11#include <linux/of_platform.h>
12#include <asm/prom.h>
13 12
14/* Trigger specific functions */ 13/* Trigger specific functions */
15#ifdef CONFIG_GPIOLIB 14#ifdef CONFIG_GPIOLIB
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 8955a3829cf0..235706055b7f 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -158,7 +158,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
158{ 158{
159 struct rt_sigframe __user *frame; 159 struct rt_sigframe __user *frame;
160 int err = 0, sig = ksig->sig; 160 int err = 0, sig = ksig->sig;
161 int signal; 161 unsigned long signal;
162 unsigned long address = 0; 162 unsigned long address = 0;
163#ifdef CONFIG_MMU 163#ifdef CONFIG_MMU
164 pmd_t *pmdp; 164 pmd_t *pmdp;
@@ -174,7 +174,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
174 && current_thread_info()->exec_domain->signal_invmap 174 && current_thread_info()->exec_domain->signal_invmap
175 && sig < 32 175 && sig < 32
176 ? current_thread_info()->exec_domain->signal_invmap[sig] 176 ? current_thread_info()->exec_domain->signal_invmap[sig]
177 : sig; 177 : (unsigned long)sig;
178 178
179 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 179 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
180 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 180 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 0166e890486c..29c8568ec55c 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -388,3 +388,4 @@ ENTRY(sys_call_table)
388 .long sys_getrandom /* 385 */ 388 .long sys_getrandom /* 385 */
389 .long sys_memfd_create 389 .long sys_memfd_create
390 .long sys_bpf 390 .long sys_bpf
391 .long sys_execveat
diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c
index 1f7b8d449668..61c04eed14d5 100644
--- a/arch/microblaze/kernel/unwind.c
+++ b/arch/microblaze/kernel/unwind.c
@@ -59,7 +59,7 @@ struct stack_trace;
59 * 59 *
60 * Return - Number of stack bytes the instruction reserves or reclaims 60 * Return - Number of stack bytes the instruction reserves or reclaims
61 */ 61 */
62inline long get_frame_size(unsigned long instr) 62static inline long get_frame_size(unsigned long instr)
63{ 63{
64 return abs((s16)(instr & 0xFFFF)); 64 return abs((s16)(instr & 0xFFFF));
65} 65}
diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c
index 5ec2a7bae02c..f2355e3e65a1 100644
--- a/arch/mips/pci/pci-bcm1480.c
+++ b/arch/mips/pci/pci-bcm1480.c
@@ -173,8 +173,8 @@ static int bcm1480_pcibios_write(struct pci_bus *bus, unsigned int devfn,
173} 173}
174 174
175struct pci_ops bcm1480_pci_ops = { 175struct pci_ops bcm1480_pci_ops = {
176 bcm1480_pcibios_read, 176 .read = bcm1480_pcibios_read,
177 bcm1480_pcibios_write, 177 .write = bcm1480_pcibios_write,
178}; 178};
179 179
180static struct resource bcm1480_mem_resource = { 180static struct resource bcm1480_mem_resource = {
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index d07e04121cc6..bedb72bd3a27 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
327 327
328 328
329static struct pci_ops octeon_pci_ops = { 329static struct pci_ops octeon_pci_ops = {
330 octeon_read_config, 330 .read = octeon_read_config,
331 octeon_write_config, 331 .write = octeon_write_config,
332}; 332};
333 333
334static struct resource octeon_pci_mem_resource = { 334static struct resource octeon_pci_mem_resource = {
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 5e36c33e5543..eb4a17ba4a53 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
1792} 1792}
1793 1793
1794static struct pci_ops octeon_pcie0_ops = { 1794static struct pci_ops octeon_pcie0_ops = {
1795 octeon_pcie0_read_config, 1795 .read = octeon_pcie0_read_config,
1796 octeon_pcie0_write_config, 1796 .write = octeon_pcie0_write_config,
1797}; 1797};
1798 1798
1799static struct resource octeon_pcie0_mem_resource = { 1799static struct resource octeon_pcie0_mem_resource = {
@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
1813}; 1813};
1814 1814
1815static struct pci_ops octeon_pcie1_ops = { 1815static struct pci_ops octeon_pcie1_ops = {
1816 octeon_pcie1_read_config, 1816 .read = octeon_pcie1_read_config,
1817 octeon_pcie1_write_config, 1817 .write = octeon_pcie1_write_config,
1818}; 1818};
1819 1819
1820static struct resource octeon_pcie1_mem_resource = { 1820static struct resource octeon_pcie1_mem_resource = {
@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
1834}; 1834};
1835 1835
1836static struct pci_ops octeon_dummy_ops = { 1836static struct pci_ops octeon_dummy_ops = {
1837 octeon_dummy_read_config, 1837 .read = octeon_dummy_read_config,
1838 octeon_dummy_write_config, 1838 .write = octeon_dummy_write_config,
1839}; 1839};
1840 1840
1841static struct resource octeon_dummy_mem_resource = { 1841static struct resource octeon_dummy_mem_resource = {
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index 471ff398090c..613ca1e55b4b 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -228,8 +228,8 @@ static int pci_ampci_write_config(struct pci_bus *bus, unsigned int devfn,
228} 228}
229 229
230static struct pci_ops pci_direct_ampci = { 230static struct pci_ops pci_direct_ampci = {
231 pci_ampci_read_config, 231 .read = pci_ampci_read_config,
232 pci_ampci_write_config, 232 .write = pci_ampci_write_config,
233}; 233};
234 234
235/* 235/*
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
index f22387598040..94170e4f2ce7 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -399,8 +399,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
399} 399}
400 400
401static struct pci_ops scc_pciex_pci_ops = { 401static struct pci_ops scc_pciex_pci_ops = {
402 scc_pciex_read_config, 402 .read = scc_pciex_read_config,
403 scc_pciex_write_config, 403 .write = scc_pciex_write_config,
404}; 404};
405 405
406static void pciex_clear_intr_all(unsigned int __iomem *base) 406static void pciex_clear_intr_all(unsigned int __iomem *base)
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 04702db35d45..f4071a67ad00 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -133,17 +133,23 @@ static void __init fixup_bus_range(struct device_node *bridge)
133 |(((unsigned int)(off)) & 0xFCUL) \ 133 |(((unsigned int)(off)) & 0xFCUL) \
134 |1UL) 134 |1UL)
135 135
136static volatile void __iomem *macrisc_cfg_access(struct pci_controller* hose, 136static void __iomem *macrisc_cfg_map_bus(struct pci_bus *bus,
137 u8 bus, u8 dev_fn, u8 offset) 137 unsigned int dev_fn,
138 int offset)
138{ 139{
139 unsigned int caddr; 140 unsigned int caddr;
141 struct pci_controller *hose;
140 142
141 if (bus == hose->first_busno) { 143 hose = pci_bus_to_host(bus);
144 if (hose == NULL)
145 return NULL;
146
147 if (bus->number == hose->first_busno) {
142 if (dev_fn < (11 << 3)) 148 if (dev_fn < (11 << 3))
143 return NULL; 149 return NULL;
144 caddr = MACRISC_CFA0(dev_fn, offset); 150 caddr = MACRISC_CFA0(dev_fn, offset);
145 } else 151 } else
146 caddr = MACRISC_CFA1(bus, dev_fn, offset); 152 caddr = MACRISC_CFA1(bus->number, dev_fn, offset);
147 153
148 /* Uninorth will return garbage if we don't read back the value ! */ 154 /* Uninorth will return garbage if we don't read back the value ! */
149 do { 155 do {
@@ -154,129 +160,46 @@ static volatile void __iomem *macrisc_cfg_access(struct pci_controller* hose,
154 return hose->cfg_data + offset; 160 return hose->cfg_data + offset;
155} 161}
156 162
157static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
158 int offset, int len, u32 *val)
159{
160 struct pci_controller *hose;
161 volatile void __iomem *addr;
162
163 hose = pci_bus_to_host(bus);
164 if (hose == NULL)
165 return PCIBIOS_DEVICE_NOT_FOUND;
166 if (offset >= 0x100)
167 return PCIBIOS_BAD_REGISTER_NUMBER;
168 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
169 if (!addr)
170 return PCIBIOS_DEVICE_NOT_FOUND;
171 /*
172 * Note: the caller has already checked that offset is
173 * suitably aligned and that len is 1, 2 or 4.
174 */
175 switch (len) {
176 case 1:
177 *val = in_8(addr);
178 break;
179 case 2:
180 *val = in_le16(addr);
181 break;
182 default:
183 *val = in_le32(addr);
184 break;
185 }
186 return PCIBIOS_SUCCESSFUL;
187}
188
189static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
190 int offset, int len, u32 val)
191{
192 struct pci_controller *hose;
193 volatile void __iomem *addr;
194
195 hose = pci_bus_to_host(bus);
196 if (hose == NULL)
197 return PCIBIOS_DEVICE_NOT_FOUND;
198 if (offset >= 0x100)
199 return PCIBIOS_BAD_REGISTER_NUMBER;
200 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
201 if (!addr)
202 return PCIBIOS_DEVICE_NOT_FOUND;
203 /*
204 * Note: the caller has already checked that offset is
205 * suitably aligned and that len is 1, 2 or 4.
206 */
207 switch (len) {
208 case 1:
209 out_8(addr, val);
210 break;
211 case 2:
212 out_le16(addr, val);
213 break;
214 default:
215 out_le32(addr, val);
216 break;
217 }
218 return PCIBIOS_SUCCESSFUL;
219}
220
221static struct pci_ops macrisc_pci_ops = 163static struct pci_ops macrisc_pci_ops =
222{ 164{
223 .read = macrisc_read_config, 165 .map_bus = macrisc_cfg_map_bus,
224 .write = macrisc_write_config, 166 .read = pci_generic_config_read,
167 .write = pci_generic_config_write,
225}; 168};
226 169
227#ifdef CONFIG_PPC32 170#ifdef CONFIG_PPC32
228/* 171/*
229 * Verify that a specific (bus, dev_fn) exists on chaos 172 * Verify that a specific (bus, dev_fn) exists on chaos
230 */ 173 */
231static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset) 174static void __iomem *chaos_map_bus(struct pci_bus *bus, unsigned int devfn,
175 int offset)
232{ 176{
233 struct device_node *np; 177 struct device_node *np;
234 const u32 *vendor, *device; 178 const u32 *vendor, *device;
235 179
236 if (offset >= 0x100) 180 if (offset >= 0x100)
237 return PCIBIOS_BAD_REGISTER_NUMBER; 181 return NULL;
238 np = of_pci_find_child_device(bus->dev.of_node, devfn); 182 np = of_pci_find_child_device(bus->dev.of_node, devfn);
239 if (np == NULL) 183 if (np == NULL)
240 return PCIBIOS_DEVICE_NOT_FOUND; 184 return NULL;
241 185
242 vendor = of_get_property(np, "vendor-id", NULL); 186 vendor = of_get_property(np, "vendor-id", NULL);
243 device = of_get_property(np, "device-id", NULL); 187 device = of_get_property(np, "device-id", NULL);
244 if (vendor == NULL || device == NULL) 188 if (vendor == NULL || device == NULL)
245 return PCIBIOS_DEVICE_NOT_FOUND; 189 return NULL;
246 190
247 if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10) 191 if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10)
248 && (offset != 0x14) && (offset != 0x18) && (offset <= 0x24)) 192 && (offset != 0x14) && (offset != 0x18) && (offset <= 0x24))
249 return PCIBIOS_BAD_REGISTER_NUMBER; 193 return NULL;
250
251 return PCIBIOS_SUCCESSFUL;
252}
253 194
254static int 195 return macrisc_cfg_map_bus(bus, devfn, offset);
255chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
256 int len, u32 *val)
257{
258 int result = chaos_validate_dev(bus, devfn, offset);
259 if (result == PCIBIOS_BAD_REGISTER_NUMBER)
260 *val = ~0U;
261 if (result != PCIBIOS_SUCCESSFUL)
262 return result;
263 return macrisc_read_config(bus, devfn, offset, len, val);
264}
265
266static int
267chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
268 int len, u32 val)
269{
270 int result = chaos_validate_dev(bus, devfn, offset);
271 if (result != PCIBIOS_SUCCESSFUL)
272 return result;
273 return macrisc_write_config(bus, devfn, offset, len, val);
274} 196}
275 197
276static struct pci_ops chaos_pci_ops = 198static struct pci_ops chaos_pci_ops =
277{ 199{
278 .read = chaos_read_config, 200 .map_bus = chaos_map_bus,
279 .write = chaos_write_config, 201 .read = pci_generic_config_read,
202 .write = pci_generic_config_write,
280}; 203};
281 204
282static void __init setup_chaos(struct pci_controller *hose, 205static void __init setup_chaos(struct pci_controller *hose,
@@ -471,15 +394,24 @@ static struct pci_ops u3_ht_pci_ops =
471 |(((unsigned int)(off)) & 0xfcU) \ 394 |(((unsigned int)(off)) & 0xfcU) \
472 |1UL) 395 |1UL)
473 396
474static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose, 397static void __iomem *u4_pcie_cfg_map_bus(struct pci_bus *bus,
475 u8 bus, u8 dev_fn, int offset) 398 unsigned int dev_fn,
399 int offset)
476{ 400{
401 struct pci_controller *hose;
477 unsigned int caddr; 402 unsigned int caddr;
478 403
479 if (bus == hose->first_busno) { 404 if (offset >= 0x1000)
405 return NULL;
406
407 hose = pci_bus_to_host(bus);
408 if (!hose)
409 return NULL;
410
411 if (bus->number == hose->first_busno) {
480 caddr = U4_PCIE_CFA0(dev_fn, offset); 412 caddr = U4_PCIE_CFA0(dev_fn, offset);
481 } else 413 } else
482 caddr = U4_PCIE_CFA1(bus, dev_fn, offset); 414 caddr = U4_PCIE_CFA1(bus->number, dev_fn, offset);
483 415
484 /* Uninorth will return garbage if we don't read back the value ! */ 416 /* Uninorth will return garbage if we don't read back the value ! */
485 do { 417 do {
@@ -490,74 +422,11 @@ static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose,
490 return hose->cfg_data + offset; 422 return hose->cfg_data + offset;
491} 423}
492 424
493static int u4_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
494 int offset, int len, u32 *val)
495{
496 struct pci_controller *hose;
497 volatile void __iomem *addr;
498
499 hose = pci_bus_to_host(bus);
500 if (hose == NULL)
501 return PCIBIOS_DEVICE_NOT_FOUND;
502 if (offset >= 0x1000)
503 return PCIBIOS_BAD_REGISTER_NUMBER;
504 addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
505 if (!addr)
506 return PCIBIOS_DEVICE_NOT_FOUND;
507 /*
508 * Note: the caller has already checked that offset is
509 * suitably aligned and that len is 1, 2 or 4.
510 */
511 switch (len) {
512 case 1:
513 *val = in_8(addr);
514 break;
515 case 2:
516 *val = in_le16(addr);
517 break;
518 default:
519 *val = in_le32(addr);
520 break;
521 }
522 return PCIBIOS_SUCCESSFUL;
523}
524
525static int u4_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
526 int offset, int len, u32 val)
527{
528 struct pci_controller *hose;
529 volatile void __iomem *addr;
530
531 hose = pci_bus_to_host(bus);
532 if (hose == NULL)
533 return PCIBIOS_DEVICE_NOT_FOUND;
534 if (offset >= 0x1000)
535 return PCIBIOS_BAD_REGISTER_NUMBER;
536 addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
537 if (!addr)
538 return PCIBIOS_DEVICE_NOT_FOUND;
539 /*
540 * Note: the caller has already checked that offset is
541 * suitably aligned and that len is 1, 2 or 4.
542 */
543 switch (len) {
544 case 1:
545 out_8(addr, val);
546 break;
547 case 2:
548 out_le16(addr, val);
549 break;
550 default:
551 out_le32(addr, val);
552 break;
553 }
554 return PCIBIOS_SUCCESSFUL;
555}
556
557static struct pci_ops u4_pcie_pci_ops = 425static struct pci_ops u4_pcie_pci_ops =
558{ 426{
559 .read = u4_pcie_read_config, 427 .map_bus = u4_pcie_cfg_map_bus,
560 .write = u4_pcie_write_config, 428 .read = pci_generic_config_read,
429 .write = pci_generic_config_write,
561}; 430};
562 431
563static void pmac_pci_fixup_u4_of_node(struct pci_dev *dev) 432static void pmac_pci_fixup_u4_of_node(struct pci_dev *dev)
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 6455c1eada1a..271b67e7670c 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -645,61 +645,21 @@ mapped:
645 return pcie->cfg_type1 + offset; 645 return pcie->cfg_type1 + offset;
646} 646}
647 647
648static int mpc83xx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
649 int offset, int len, u32 *val)
650{
651 void __iomem *cfg_addr;
652
653 cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset);
654 if (!cfg_addr)
655 return PCIBIOS_DEVICE_NOT_FOUND;
656
657 switch (len) {
658 case 1:
659 *val = in_8(cfg_addr);
660 break;
661 case 2:
662 *val = in_le16(cfg_addr);
663 break;
664 default:
665 *val = in_le32(cfg_addr);
666 break;
667 }
668
669 return PCIBIOS_SUCCESSFUL;
670}
671
672static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn, 648static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
673 int offset, int len, u32 val) 649 int offset, int len, u32 val)
674{ 650{
675 struct pci_controller *hose = pci_bus_to_host(bus); 651 struct pci_controller *hose = pci_bus_to_host(bus);
676 void __iomem *cfg_addr;
677
678 cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset);
679 if (!cfg_addr)
680 return PCIBIOS_DEVICE_NOT_FOUND;
681 652
682 /* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */ 653 /* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
683 if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno) 654 if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
684 val &= 0xffffff00; 655 val &= 0xffffff00;
685 656
686 switch (len) { 657 return pci_generic_config_write(bus, devfn, offset, len, val);
687 case 1:
688 out_8(cfg_addr, val);
689 break;
690 case 2:
691 out_le16(cfg_addr, val);
692 break;
693 default:
694 out_le32(cfg_addr, val);
695 break;
696 }
697
698 return PCIBIOS_SUCCESSFUL;
699} 658}
700 659
701static struct pci_ops mpc83xx_pcie_ops = { 660static struct pci_ops mpc83xx_pcie_ops = {
702 .read = mpc83xx_pcie_read_config, 661 .map_bus = mpc83xx_pcie_remap_cfg,
662 .read = pci_generic_config_read,
703 .write = mpc83xx_pcie_write_config, 663 .write = mpc83xx_pcie_write_config,
704}; 664};
705 665
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index f70c7892fa25..325df47f114d 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -245,7 +245,7 @@ static void fixup_read_and_payload_sizes(void)
245{ 245{
246 struct pci_dev *dev = NULL; 246 struct pci_dev *dev = NULL;
247 int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */ 247 int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
248 int max_read_size = 0x2; /* Limit to 512 byte reads. */ 248 int max_read_size = PCI_EXP_DEVCTL_READRQ_512B;
249 u16 new_values; 249 u16 new_values;
250 250
251 /* Scan for the smallest maximum payload size. */ 251 /* Scan for the smallest maximum payload size. */
@@ -258,7 +258,7 @@ static void fixup_read_and_payload_sizes(void)
258 } 258 }
259 259
260 /* Now, set the max_payload_size for all devices to that value. */ 260 /* Now, set the max_payload_size for all devices to that value. */
261 new_values = (max_read_size << 12) | (smallest_max_payload << 5); 261 new_values = max_read_size | (smallest_max_payload << 5);
262 for_each_pci_dev(dev) 262 for_each_pci_dev(dev)
263 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 263 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
264 PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ, 264 PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ,
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 5e28e2be3a41..019f4e5c2b75 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -497,6 +497,17 @@ config X86_INTEL_LPSS
497 things like clock tree (common clock framework) and pincontrol 497 things like clock tree (common clock framework) and pincontrol
498 which are needed by the LPSS peripheral drivers. 498 which are needed by the LPSS peripheral drivers.
499 499
500config X86_AMD_PLATFORM_DEVICE
501 bool "AMD ACPI2Platform devices support"
502 depends on ACPI
503 select COMMON_CLK
504 select PINCTRL
505 ---help---
506 Select to interpret AMD specific ACPI device to platform device
507 such as I2C, UART, GPIO found on AMD Carrizo and later chipsets.
508 I2C and UART depend on COMMON_CLK to set clock. GPIO driver is
509 implemented under PINCTRL subsystem.
510
500config IOSF_MBI 511config IOSF_MBI
501 tristate "Intel SoC IOSF Sideband support for SoC platforms" 512 tristate "Intel SoC IOSF Sideband support for SoC platforms"
502 depends on PCI 513 depends on PCI
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 164e3f8d3c3d..fa1195dae425 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -93,8 +93,6 @@ extern raw_spinlock_t pci_config_lock;
93extern int (*pcibios_enable_irq)(struct pci_dev *dev); 93extern int (*pcibios_enable_irq)(struct pci_dev *dev);
94extern void (*pcibios_disable_irq)(struct pci_dev *dev); 94extern void (*pcibios_disable_irq)(struct pci_dev *dev);
95 95
96extern bool mp_should_keep_irq(struct device *dev);
97
98struct pci_raw_ops { 96struct pci_raw_ops {
99 int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, 97 int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
100 int reg, int len, u32 *val); 98 int reg, int len, u32 *val);
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 5eea09915a15..358dcd338915 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -55,9 +55,8 @@ extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
55 struct gnttab_map_grant_ref *kmap_ops, 55 struct gnttab_map_grant_ref *kmap_ops,
56 struct page **pages, unsigned int count); 56 struct page **pages, unsigned int count);
57extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 57extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
58 struct gnttab_map_grant_ref *kmap_ops, 58 struct gnttab_unmap_grant_ref *kunmap_ops,
59 struct page **pages, unsigned int count); 59 struct page **pages, unsigned int count);
60extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
61 60
62/* 61/*
63 * Helper functions to write or read unsigned long values to/from 62 * Helper functions to write or read unsigned long values to/from
@@ -154,21 +153,12 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
154 return mfn; 153 return mfn;
155 154
156 pfn = mfn_to_pfn_no_overrides(mfn); 155 pfn = mfn_to_pfn_no_overrides(mfn);
157 if (__pfn_to_mfn(pfn) != mfn) { 156 if (__pfn_to_mfn(pfn) != mfn)
158 /* 157 pfn = ~0;
159 * If this appears to be a foreign mfn (because the pfn
160 * doesn't map back to the mfn), then check the local override
161 * table to see if there's a better pfn to use.
162 *
163 * m2p_find_override_pfn returns ~0 if it doesn't find anything.
164 */
165 pfn = m2p_find_override_pfn(mfn, ~0);
166 }
167 158
168 /* 159 /*
169 * pfn is ~0 if there are no entries in the m2p for mfn or if the 160 * pfn is ~0 if there are no entries in the m2p for mfn or the
170 * entry doesn't map back to the mfn and m2p_override doesn't have a 161 * entry doesn't map back to the mfn.
171 * valid entry for it.
172 */ 162 */
173 if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn)) 163 if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
174 pfn = mfn; 164 pfn = mfn;
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index d979e5abae55..536240fa9a95 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -152,6 +152,10 @@
152#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 152#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
153#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 153#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669
154 154
155#define MSR_CORE_PERF_LIMIT_REASONS 0x00000690
156#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0
157#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1
158
155/* Hardware P state interface */ 159/* Hardware P state interface */
156#define MSR_PPERF 0x0000064e 160#define MSR_PPERF 0x0000064e
157#define MSR_PERF_LIMIT_REASONS 0x0000064f 161#define MSR_PERF_LIMIT_REASONS 0x0000064f
@@ -362,6 +366,7 @@
362 366
363#define MSR_IA32_PERF_STATUS 0x00000198 367#define MSR_IA32_PERF_STATUS 0x00000198
364#define MSR_IA32_PERF_CTL 0x00000199 368#define MSR_IA32_PERF_CTL 0x00000199
369#define INTEL_PERF_CTL_MASK 0xffff
365#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 370#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064
366#define MSR_AMD_PERF_STATUS 0xc0010063 371#define MSR_AMD_PERF_STATUS 0xc0010063
367#define MSR_AMD_PERF_CTL 0xc0010062 372#define MSR_AMD_PERF_CTL 0xc0010062
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index a18fff361c7f..ae97ed0873c6 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -845,13 +845,7 @@ int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base)
845 845
846static int __init acpi_parse_sbf(struct acpi_table_header *table) 846static int __init acpi_parse_sbf(struct acpi_table_header *table)
847{ 847{
848 struct acpi_table_boot *sb; 848 struct acpi_table_boot *sb = (struct acpi_table_boot *)table;
849
850 sb = (struct acpi_table_boot *)table;
851 if (!sb) {
852 printk(KERN_WARNING PREFIX "Unable to map SBF\n");
853 return -ENODEV;
854 }
855 849
856 sbf_port = sb->cmos_index; /* Save CMOS port */ 850 sbf_port = sb->cmos_index; /* Save CMOS port */
857 851
@@ -865,13 +859,7 @@ static struct resource *hpet_res __initdata;
865 859
866static int __init acpi_parse_hpet(struct acpi_table_header *table) 860static int __init acpi_parse_hpet(struct acpi_table_header *table)
867{ 861{
868 struct acpi_table_hpet *hpet_tbl; 862 struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
869
870 hpet_tbl = (struct acpi_table_hpet *)table;
871 if (!hpet_tbl) {
872 printk(KERN_WARNING PREFIX "Unable to map HPET\n");
873 return -ENODEV;
874 }
875 863
876 if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) { 864 if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
877 printk(KERN_WARNING PREFIX "HPET timers must be located in " 865 printk(KERN_WARNING PREFIX "HPET timers must be located in "
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index cfd1b132b8e3..6ac273832f28 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -10,9 +10,6 @@
10struct pci_root_info { 10struct pci_root_info {
11 struct acpi_device *bridge; 11 struct acpi_device *bridge;
12 char name[16]; 12 char name[16];
13 unsigned int res_num;
14 struct resource *res;
15 resource_size_t *res_offset;
16 struct pci_sysdata sd; 13 struct pci_sysdata sd;
17#ifdef CONFIG_PCI_MMCONFIG 14#ifdef CONFIG_PCI_MMCONFIG
18 bool mcfg_added; 15 bool mcfg_added;
@@ -218,130 +215,41 @@ static void teardown_mcfg_map(struct pci_root_info *info)
218} 215}
219#endif 216#endif
220 217
221static acpi_status resource_to_addr(struct acpi_resource *resource, 218static void validate_resources(struct device *dev, struct list_head *crs_res,
222 struct acpi_resource_address64 *addr) 219 unsigned long type)
223{
224 acpi_status status;
225 struct acpi_resource_memory24 *memory24;
226 struct acpi_resource_memory32 *memory32;
227 struct acpi_resource_fixed_memory32 *fixed_memory32;
228
229 memset(addr, 0, sizeof(*addr));
230 switch (resource->type) {
231 case ACPI_RESOURCE_TYPE_MEMORY24:
232 memory24 = &resource->data.memory24;
233 addr->resource_type = ACPI_MEMORY_RANGE;
234 addr->minimum = memory24->minimum;
235 addr->address_length = memory24->address_length;
236 addr->maximum = addr->minimum + addr->address_length - 1;
237 return AE_OK;
238 case ACPI_RESOURCE_TYPE_MEMORY32:
239 memory32 = &resource->data.memory32;
240 addr->resource_type = ACPI_MEMORY_RANGE;
241 addr->minimum = memory32->minimum;
242 addr->address_length = memory32->address_length;
243 addr->maximum = addr->minimum + addr->address_length - 1;
244 return AE_OK;
245 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
246 fixed_memory32 = &resource->data.fixed_memory32;
247 addr->resource_type = ACPI_MEMORY_RANGE;
248 addr->minimum = fixed_memory32->address;
249 addr->address_length = fixed_memory32->address_length;
250 addr->maximum = addr->minimum + addr->address_length - 1;
251 return AE_OK;
252 case ACPI_RESOURCE_TYPE_ADDRESS16:
253 case ACPI_RESOURCE_TYPE_ADDRESS32:
254 case ACPI_RESOURCE_TYPE_ADDRESS64:
255 status = acpi_resource_to_address64(resource, addr);
256 if (ACPI_SUCCESS(status) &&
257 (addr->resource_type == ACPI_MEMORY_RANGE ||
258 addr->resource_type == ACPI_IO_RANGE) &&
259 addr->address_length > 0) {
260 return AE_OK;
261 }
262 break;
263 }
264 return AE_ERROR;
265}
266
267static acpi_status count_resource(struct acpi_resource *acpi_res, void *data)
268{ 220{
269 struct pci_root_info *info = data; 221 LIST_HEAD(list);
270 struct acpi_resource_address64 addr; 222 struct resource *res1, *res2, *root = NULL;
271 acpi_status status; 223 struct resource_entry *tmp, *entry, *entry2;
272
273 status = resource_to_addr(acpi_res, &addr);
274 if (ACPI_SUCCESS(status))
275 info->res_num++;
276 return AE_OK;
277}
278
279static acpi_status setup_resource(struct acpi_resource *acpi_res, void *data)
280{
281 struct pci_root_info *info = data;
282 struct resource *res;
283 struct acpi_resource_address64 addr;
284 acpi_status status;
285 unsigned long flags;
286 u64 start, orig_end, end;
287
288 status = resource_to_addr(acpi_res, &addr);
289 if (!ACPI_SUCCESS(status))
290 return AE_OK;
291
292 if (addr.resource_type == ACPI_MEMORY_RANGE) {
293 flags = IORESOURCE_MEM;
294 if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
295 flags |= IORESOURCE_PREFETCH;
296 } else if (addr.resource_type == ACPI_IO_RANGE) {
297 flags = IORESOURCE_IO;
298 } else
299 return AE_OK;
300
301 start = addr.minimum + addr.translation_offset;
302 orig_end = end = addr.maximum + addr.translation_offset;
303
304 /* Exclude non-addressable range or non-addressable portion of range */
305 end = min(end, (u64)iomem_resource.end);
306 if (end <= start) {
307 dev_info(&info->bridge->dev,
308 "host bridge window [%#llx-%#llx] "
309 "(ignored, not CPU addressable)\n", start, orig_end);
310 return AE_OK;
311 } else if (orig_end != end) {
312 dev_info(&info->bridge->dev,
313 "host bridge window [%#llx-%#llx] "
314 "([%#llx-%#llx] ignored, not CPU addressable)\n",
315 start, orig_end, end + 1, orig_end);
316 }
317 224
318 res = &info->res[info->res_num]; 225 BUG_ON((type & (IORESOURCE_MEM | IORESOURCE_IO)) == 0);
319 res->name = info->name; 226 root = (type & IORESOURCE_MEM) ? &iomem_resource : &ioport_resource;
320 res->flags = flags;
321 res->start = start;
322 res->end = end;
323 info->res_offset[info->res_num] = addr.translation_offset;
324 info->res_num++;
325 227
326 if (!pci_use_crs) 228 list_splice_init(crs_res, &list);
327 dev_printk(KERN_DEBUG, &info->bridge->dev, 229 resource_list_for_each_entry_safe(entry, tmp, &list) {
328 "host bridge window %pR (ignored)\n", res); 230 bool free = false;
231 resource_size_t end;
329 232
330 return AE_OK; 233 res1 = entry->res;
331}
332
333static void coalesce_windows(struct pci_root_info *info, unsigned long type)
334{
335 int i, j;
336 struct resource *res1, *res2;
337
338 for (i = 0; i < info->res_num; i++) {
339 res1 = &info->res[i];
340 if (!(res1->flags & type)) 234 if (!(res1->flags & type))
341 continue; 235 goto next;
236
237 /* Exclude non-addressable range or non-addressable portion */
238 end = min(res1->end, root->end);
239 if (end <= res1->start) {
240 dev_info(dev, "host bridge window %pR (ignored, not CPU addressable)\n",
241 res1);
242 free = true;
243 goto next;
244 } else if (res1->end != end) {
245 dev_info(dev, "host bridge window %pR ([%#llx-%#llx] ignored, not CPU addressable)\n",
246 res1, (unsigned long long)end + 1,
247 (unsigned long long)res1->end);
248 res1->end = end;
249 }
342 250
343 for (j = i + 1; j < info->res_num; j++) { 251 resource_list_for_each_entry(entry2, crs_res) {
344 res2 = &info->res[j]; 252 res2 = entry2->res;
345 if (!(res2->flags & type)) 253 if (!(res2->flags & type))
346 continue; 254 continue;
347 255
@@ -353,118 +261,92 @@ static void coalesce_windows(struct pci_root_info *info, unsigned long type)
353 if (resource_overlaps(res1, res2)) { 261 if (resource_overlaps(res1, res2)) {
354 res2->start = min(res1->start, res2->start); 262 res2->start = min(res1->start, res2->start);
355 res2->end = max(res1->end, res2->end); 263 res2->end = max(res1->end, res2->end);
356 dev_info(&info->bridge->dev, 264 dev_info(dev, "host bridge window expanded to %pR; %pR ignored\n",
357 "host bridge window expanded to %pR; %pR ignored\n",
358 res2, res1); 265 res2, res1);
359 res1->flags = 0; 266 free = true;
267 goto next;
360 } 268 }
361 } 269 }
270
271next:
272 resource_list_del(entry);
273 if (free)
274 resource_list_free_entry(entry);
275 else
276 resource_list_add_tail(entry, crs_res);
362 } 277 }
363} 278}
364 279
365static void add_resources(struct pci_root_info *info, 280static void add_resources(struct pci_root_info *info,
366 struct list_head *resources) 281 struct list_head *resources,
282 struct list_head *crs_res)
367{ 283{
368 int i; 284 struct resource_entry *entry, *tmp;
369 struct resource *res, *root, *conflict; 285 struct resource *res, *conflict, *root = NULL;
370
371 coalesce_windows(info, IORESOURCE_MEM);
372 coalesce_windows(info, IORESOURCE_IO);
373 286
374 for (i = 0; i < info->res_num; i++) { 287 validate_resources(&info->bridge->dev, crs_res, IORESOURCE_MEM);
375 res = &info->res[i]; 288 validate_resources(&info->bridge->dev, crs_res, IORESOURCE_IO);
376 289
290 resource_list_for_each_entry_safe(entry, tmp, crs_res) {
291 res = entry->res;
377 if (res->flags & IORESOURCE_MEM) 292 if (res->flags & IORESOURCE_MEM)
378 root = &iomem_resource; 293 root = &iomem_resource;
379 else if (res->flags & IORESOURCE_IO) 294 else if (res->flags & IORESOURCE_IO)
380 root = &ioport_resource; 295 root = &ioport_resource;
381 else 296 else
382 continue; 297 BUG_ON(res);
383 298
384 conflict = insert_resource_conflict(root, res); 299 conflict = insert_resource_conflict(root, res);
385 if (conflict) 300 if (conflict) {
386 dev_info(&info->bridge->dev, 301 dev_info(&info->bridge->dev,
387 "ignoring host bridge window %pR (conflicts with %s %pR)\n", 302 "ignoring host bridge window %pR (conflicts with %s %pR)\n",
388 res, conflict->name, conflict); 303 res, conflict->name, conflict);
389 else 304 resource_list_destroy_entry(entry);
390 pci_add_resource_offset(resources, res, 305 }
391 info->res_offset[i]);
392 } 306 }
393}
394 307
395static void free_pci_root_info_res(struct pci_root_info *info) 308 list_splice_tail(crs_res, resources);
396{
397 kfree(info->res);
398 info->res = NULL;
399 kfree(info->res_offset);
400 info->res_offset = NULL;
401 info->res_num = 0;
402} 309}
403 310
404static void __release_pci_root_info(struct pci_root_info *info) 311static void release_pci_root_info(struct pci_host_bridge *bridge)
405{ 312{
406 int i;
407 struct resource *res; 313 struct resource *res;
314 struct resource_entry *entry;
315 struct pci_root_info *info = bridge->release_data;
408 316
409 for (i = 0; i < info->res_num; i++) { 317 resource_list_for_each_entry(entry, &bridge->windows) {
410 res = &info->res[i]; 318 res = entry->res;
411 319 if (res->parent &&
412 if (!res->parent) 320 (res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
413 continue; 321 release_resource(res);
414
415 if (!(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
416 continue;
417
418 release_resource(res);
419 } 322 }
420 323
421 free_pci_root_info_res(info);
422
423 teardown_mcfg_map(info); 324 teardown_mcfg_map(info);
424
425 kfree(info); 325 kfree(info);
426} 326}
427 327
428static void release_pci_root_info(struct pci_host_bridge *bridge)
429{
430 struct pci_root_info *info = bridge->release_data;
431
432 __release_pci_root_info(info);
433}
434
435static void probe_pci_root_info(struct pci_root_info *info, 328static void probe_pci_root_info(struct pci_root_info *info,
436 struct acpi_device *device, 329 struct acpi_device *device,
437 int busnum, int domain) 330 int busnum, int domain,
331 struct list_head *list)
438{ 332{
439 size_t size; 333 int ret;
334 struct resource_entry *entry;
440 335
441 sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum); 336 sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
442 info->bridge = device; 337 info->bridge = device;
443 338 ret = acpi_dev_get_resources(device, list,
444 info->res_num = 0; 339 acpi_dev_filter_resource_type_cb,
445 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource, 340 (void *)(IORESOURCE_IO | IORESOURCE_MEM));
446 info); 341 if (ret < 0)
447 if (!info->res_num) 342 dev_warn(&device->dev,
448 return; 343 "failed to parse _CRS method, error code %d\n", ret);
449 344 else if (ret == 0)
450 size = sizeof(*info->res) * info->res_num; 345 dev_dbg(&device->dev,
451 info->res = kzalloc_node(size, GFP_KERNEL, info->sd.node); 346 "no IO and memory resources present in _CRS\n");
452 if (!info->res) { 347 else
453 info->res_num = 0; 348 resource_list_for_each_entry(entry, list)
454 return; 349 entry->res->name = info->name;
455 }
456
457 size = sizeof(*info->res_offset) * info->res_num;
458 info->res_num = 0;
459 info->res_offset = kzalloc_node(size, GFP_KERNEL, info->sd.node);
460 if (!info->res_offset) {
461 kfree(info->res);
462 info->res = NULL;
463 return;
464 }
465
466 acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
467 info);
468} 350}
469 351
470struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) 352struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
@@ -473,6 +355,8 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
473 struct pci_root_info *info; 355 struct pci_root_info *info;
474 int domain = root->segment; 356 int domain = root->segment;
475 int busnum = root->secondary.start; 357 int busnum = root->secondary.start;
358 struct resource_entry *res_entry;
359 LIST_HEAD(crs_res);
476 LIST_HEAD(resources); 360 LIST_HEAD(resources);
477 struct pci_bus *bus; 361 struct pci_bus *bus;
478 struct pci_sysdata *sd; 362 struct pci_sysdata *sd;
@@ -520,18 +404,22 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
520 memcpy(bus->sysdata, sd, sizeof(*sd)); 404 memcpy(bus->sysdata, sd, sizeof(*sd));
521 kfree(info); 405 kfree(info);
522 } else { 406 } else {
523 probe_pci_root_info(info, device, busnum, domain);
524
525 /* insert busn res at first */ 407 /* insert busn res at first */
526 pci_add_resource(&resources, &root->secondary); 408 pci_add_resource(&resources, &root->secondary);
409
527 /* 410 /*
528 * _CRS with no apertures is normal, so only fall back to 411 * _CRS with no apertures is normal, so only fall back to
529 * defaults or native bridge info if we're ignoring _CRS. 412 * defaults or native bridge info if we're ignoring _CRS.
530 */ 413 */
531 if (pci_use_crs) 414 probe_pci_root_info(info, device, busnum, domain, &crs_res);
532 add_resources(info, &resources); 415 if (pci_use_crs) {
533 else { 416 add_resources(info, &resources, &crs_res);
534 free_pci_root_info_res(info); 417 } else {
418 resource_list_for_each_entry(res_entry, &crs_res)
419 dev_printk(KERN_DEBUG, &device->dev,
420 "host bridge window %pR (ignored)\n",
421 res_entry->res);
422 resource_list_free(&crs_res);
535 x86_pci_root_bus_resources(busnum, &resources); 423 x86_pci_root_bus_resources(busnum, &resources);
536 } 424 }
537 425
@@ -546,8 +434,9 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
546 to_pci_host_bridge(bus->bridge), 434 to_pci_host_bridge(bus->bridge),
547 release_pci_root_info, info); 435 release_pci_root_info, info);
548 } else { 436 } else {
549 pci_free_resource_list(&resources); 437 resource_list_free(&resources);
550 __release_pci_root_info(info); 438 teardown_mcfg_map(info);
439 kfree(info);
551 } 440 }
552 } 441 }
553 442
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index f3a2cfc14125..7bcf06a7cd12 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -31,7 +31,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
31{ 31{
32 struct pci_root_info *info = x86_find_pci_root_info(bus); 32 struct pci_root_info *info = x86_find_pci_root_info(bus);
33 struct pci_root_res *root_res; 33 struct pci_root_res *root_res;
34 struct pci_host_bridge_window *window; 34 struct resource_entry *window;
35 bool found = false; 35 bool found = false;
36 36
37 if (!info) 37 if (!info)
@@ -41,7 +41,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
41 bus); 41 bus);
42 42
43 /* already added by acpi ? */ 43 /* already added by acpi ? */
44 list_for_each_entry(window, resources, list) 44 resource_list_for_each_entry(window, resources)
45 if (window->res->flags & IORESOURCE_BUS) { 45 if (window->res->flags & IORESOURCE_BUS) {
46 found = true; 46 found = true;
47 break; 47 break;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 2fb384724ebb..3d2612b68694 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -513,6 +513,31 @@ void __init pcibios_set_cache_line_size(void)
513 } 513 }
514} 514}
515 515
516/*
517 * Some device drivers assume dev->irq won't change after calling
518 * pci_disable_device(). So delay releasing of IRQ resource to driver
519 * unbinding time. Otherwise it will break PM subsystem and drivers
520 * like xen-pciback etc.
521 */
522static int pci_irq_notifier(struct notifier_block *nb, unsigned long action,
523 void *data)
524{
525 struct pci_dev *dev = to_pci_dev(data);
526
527 if (action != BUS_NOTIFY_UNBOUND_DRIVER)
528 return NOTIFY_DONE;
529
530 if (pcibios_disable_irq)
531 pcibios_disable_irq(dev);
532
533 return NOTIFY_OK;
534}
535
536static struct notifier_block pci_irq_nb = {
537 .notifier_call = pci_irq_notifier,
538 .priority = INT_MIN,
539};
540
516int __init pcibios_init(void) 541int __init pcibios_init(void)
517{ 542{
518 if (!raw_pci_ops) { 543 if (!raw_pci_ops) {
@@ -525,6 +550,9 @@ int __init pcibios_init(void)
525 550
526 if (pci_bf_sort >= pci_force_bf) 551 if (pci_bf_sort >= pci_force_bf)
527 pci_sort_breadthfirst(); 552 pci_sort_breadthfirst();
553
554 bus_register_notifier(&pci_bus_type, &pci_irq_nb);
555
528 return 0; 556 return 0;
529} 557}
530 558
@@ -683,12 +711,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
683 return 0; 711 return 0;
684} 712}
685 713
686void pcibios_disable_device (struct pci_dev *dev)
687{
688 if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
689 pcibios_disable_irq(dev);
690}
691
692int pci_ext_cfg_avail(void) 714int pci_ext_cfg_avail(void)
693{ 715{
694 if (raw_pci_ext_ops) 716 if (raw_pci_ext_ops)
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 852aa4c92da0..efb849323c74 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -234,10 +234,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
234 234
235static void intel_mid_pci_irq_disable(struct pci_dev *dev) 235static void intel_mid_pci_irq_disable(struct pci_dev *dev)
236{ 236{
237 if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed && 237 if (dev->irq_managed && dev->irq > 0) {
238 dev->irq > 0) {
239 mp_unmap_irq(dev->irq); 238 mp_unmap_irq(dev->irq);
240 dev->irq_managed = 0; 239 dev->irq_managed = 0;
240 dev->irq = 0;
241 } 241 }
242} 242}
243 243
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 5dc6ca5e1741..e71b3dbd87b8 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1256,22 +1256,9 @@ static int pirq_enable_irq(struct pci_dev *dev)
1256 return 0; 1256 return 0;
1257} 1257}
1258 1258
1259bool mp_should_keep_irq(struct device *dev)
1260{
1261 if (dev->power.is_prepared)
1262 return true;
1263#ifdef CONFIG_PM
1264 if (dev->power.runtime_status == RPM_SUSPENDING)
1265 return true;
1266#endif
1267
1268 return false;
1269}
1270
1271static void pirq_disable_irq(struct pci_dev *dev) 1259static void pirq_disable_irq(struct pci_dev *dev)
1272{ 1260{
1273 if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) && 1261 if (io_apic_assign_pci_irqs && dev->irq_managed && dev->irq) {
1274 dev->irq_managed && dev->irq) {
1275 mp_unmap_irq(dev->irq); 1262 mp_unmap_irq(dev->irq);
1276 dev->irq = 0; 1263 dev->irq = 0;
1277 dev->irq_managed = 0; 1264 dev->irq_managed = 0;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 676e5e04e4d4..dd30b7e08bc2 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -397,12 +397,12 @@ static acpi_status check_mcfg_resource(struct acpi_resource *res, void *data)
397 397
398 status = acpi_resource_to_address64(res, &address); 398 status = acpi_resource_to_address64(res, &address);
399 if (ACPI_FAILURE(status) || 399 if (ACPI_FAILURE(status) ||
400 (address.address_length <= 0) || 400 (address.address.address_length <= 0) ||
401 (address.resource_type != ACPI_MEMORY_RANGE)) 401 (address.resource_type != ACPI_MEMORY_RANGE))
402 return AE_OK; 402 return AE_OK;
403 403
404 if ((mcfg_res->start >= address.minimum) && 404 if ((mcfg_res->start >= address.address.minimum) &&
405 (mcfg_res->end < (address.minimum + address.address_length))) { 405 (mcfg_res->end < (address.address.minimum + address.address.address_length))) {
406 mcfg_res->flags = 1; 406 mcfg_res->flags = 1;
407 return AE_CTRL_TERMINATE; 407 return AE_CTRL_TERMINATE;
408 } 408 }
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 9098d880c476..d22f4b5bbc04 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -298,12 +298,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
298 map_irq.entry_nr = nvec; 298 map_irq.entry_nr = nvec;
299 } else if (type == PCI_CAP_ID_MSIX) { 299 } else if (type == PCI_CAP_ID_MSIX) {
300 int pos; 300 int pos;
301 unsigned long flags;
301 u32 table_offset, bir; 302 u32 table_offset, bir;
302 303
303 pos = dev->msix_cap; 304 pos = dev->msix_cap;
304 pci_read_config_dword(dev, pos + PCI_MSIX_TABLE, 305 pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
305 &table_offset); 306 &table_offset);
306 bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); 307 bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
308 flags = pci_resource_flags(dev, bir);
309 if (!flags || (flags & IORESOURCE_UNSET))
310 return -EINVAL;
307 311
308 map_irq.table_base = pci_resource_start(dev, bir); 312 map_irq.table_base = pci_resource_start(dev, bir);
309 map_irq.entry_nr = msidesc->msi_attrib.entry_nr; 313 map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 5c1f9ace7ae7..adca9e2b6553 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1489,7 +1489,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1489 native_set_pte(ptep, pte); 1489 native_set_pte(ptep, pte);
1490} 1490}
1491 1491
1492static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) 1492static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1493{ 1493{
1494 struct mmuext_op op; 1494 struct mmuext_op op;
1495 op.cmd = cmd; 1495 op.cmd = cmd;
@@ -1657,7 +1657,7 @@ void __init xen_reserve_top(void)
1657 * Like __va(), but returns address in the kernel mapping (which is 1657 * Like __va(), but returns address in the kernel mapping (which is
1658 * all we have until the physical memory mapping has been set up. 1658 * all we have until the physical memory mapping has been set up.
1659 */ 1659 */
1660static void *__ka(phys_addr_t paddr) 1660static void * __init __ka(phys_addr_t paddr)
1661{ 1661{
1662#ifdef CONFIG_X86_64 1662#ifdef CONFIG_X86_64
1663 return (void *)(paddr + __START_KERNEL_map); 1663 return (void *)(paddr + __START_KERNEL_map);
@@ -1667,7 +1667,7 @@ static void *__ka(phys_addr_t paddr)
1667} 1667}
1668 1668
1669/* Convert a machine address to physical address */ 1669/* Convert a machine address to physical address */
1670static unsigned long m2p(phys_addr_t maddr) 1670static unsigned long __init m2p(phys_addr_t maddr)
1671{ 1671{
1672 phys_addr_t paddr; 1672 phys_addr_t paddr;
1673 1673
@@ -1678,13 +1678,14 @@ static unsigned long m2p(phys_addr_t maddr)
1678} 1678}
1679 1679
1680/* Convert a machine address to kernel virtual */ 1680/* Convert a machine address to kernel virtual */
1681static void *m2v(phys_addr_t maddr) 1681static void * __init m2v(phys_addr_t maddr)
1682{ 1682{
1683 return __ka(m2p(maddr)); 1683 return __ka(m2p(maddr));
1684} 1684}
1685 1685
1686/* Set the page permissions on an identity-mapped pages */ 1686/* Set the page permissions on an identity-mapped pages */
1687static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags) 1687static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1688 unsigned long flags)
1688{ 1689{
1689 unsigned long pfn = __pa(addr) >> PAGE_SHIFT; 1690 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1690 pte_t pte = pfn_pte(pfn, prot); 1691 pte_t pte = pfn_pte(pfn, prot);
@@ -1696,7 +1697,7 @@ static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
1696 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags)) 1697 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1697 BUG(); 1698 BUG();
1698} 1699}
1699static void set_page_prot(void *addr, pgprot_t prot) 1700static void __init set_page_prot(void *addr, pgprot_t prot)
1700{ 1701{
1701 return set_page_prot_flags(addr, prot, UVMF_NONE); 1702 return set_page_prot_flags(addr, prot, UVMF_NONE);
1702} 1703}
@@ -1733,10 +1734,8 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1733 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { 1734 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1734 pte_t pte; 1735 pte_t pte;
1735 1736
1736#ifdef CONFIG_X86_32
1737 if (pfn > max_pfn_mapped) 1737 if (pfn > max_pfn_mapped)
1738 max_pfn_mapped = pfn; 1738 max_pfn_mapped = pfn;
1739#endif
1740 1739
1741 if (!pte_none(pte_page[pteidx])) 1740 if (!pte_none(pte_page[pteidx]))
1742 continue; 1741 continue;
@@ -1769,7 +1768,7 @@ void __init xen_setup_machphys_mapping(void)
1769} 1768}
1770 1769
1771#ifdef CONFIG_X86_64 1770#ifdef CONFIG_X86_64
1772static void convert_pfn_mfn(void *v) 1771static void __init convert_pfn_mfn(void *v)
1773{ 1772{
1774 pte_t *pte = v; 1773 pte_t *pte = v;
1775 int i; 1774 int i;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 70fb5075c901..f18fd1d411f6 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -84,8 +84,6 @@
84 84
85#define PMDS_PER_MID_PAGE (P2M_MID_PER_PAGE / PTRS_PER_PTE) 85#define PMDS_PER_MID_PAGE (P2M_MID_PER_PAGE / PTRS_PER_PTE)
86 86
87static void __init m2p_override_init(void);
88
89unsigned long *xen_p2m_addr __read_mostly; 87unsigned long *xen_p2m_addr __read_mostly;
90EXPORT_SYMBOL_GPL(xen_p2m_addr); 88EXPORT_SYMBOL_GPL(xen_p2m_addr);
91unsigned long xen_p2m_size __read_mostly; 89unsigned long xen_p2m_size __read_mostly;
@@ -402,8 +400,6 @@ void __init xen_vmalloc_p2m_tree(void)
402 xen_p2m_size = xen_max_p2m_pfn; 400 xen_p2m_size = xen_max_p2m_pfn;
403 401
404 xen_inv_extra_mem(); 402 xen_inv_extra_mem();
405
406 m2p_override_init();
407} 403}
408 404
409unsigned long get_phys_to_machine(unsigned long pfn) 405unsigned long get_phys_to_machine(unsigned long pfn)
@@ -652,100 +648,21 @@ bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
652 return true; 648 return true;
653} 649}
654 650
655#define M2P_OVERRIDE_HASH_SHIFT 10
656#define M2P_OVERRIDE_HASH (1 << M2P_OVERRIDE_HASH_SHIFT)
657
658static struct list_head *m2p_overrides;
659static DEFINE_SPINLOCK(m2p_override_lock);
660
661static void __init m2p_override_init(void)
662{
663 unsigned i;
664
665 m2p_overrides = alloc_bootmem_align(
666 sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH,
667 sizeof(unsigned long));
668
669 for (i = 0; i < M2P_OVERRIDE_HASH; i++)
670 INIT_LIST_HEAD(&m2p_overrides[i]);
671}
672
673static unsigned long mfn_hash(unsigned long mfn)
674{
675 return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT);
676}
677
678/* Add an MFN override for a particular page */
679static int m2p_add_override(unsigned long mfn, struct page *page,
680 struct gnttab_map_grant_ref *kmap_op)
681{
682 unsigned long flags;
683 unsigned long pfn;
684 unsigned long uninitialized_var(address);
685 unsigned level;
686 pte_t *ptep = NULL;
687
688 pfn = page_to_pfn(page);
689 if (!PageHighMem(page)) {
690 address = (unsigned long)__va(pfn << PAGE_SHIFT);
691 ptep = lookup_address(address, &level);
692 if (WARN(ptep == NULL || level != PG_LEVEL_4K,
693 "m2p_add_override: pfn %lx not mapped", pfn))
694 return -EINVAL;
695 }
696
697 if (kmap_op != NULL) {
698 if (!PageHighMem(page)) {
699 struct multicall_space mcs =
700 xen_mc_entry(sizeof(*kmap_op));
701
702 MULTI_grant_table_op(mcs.mc,
703 GNTTABOP_map_grant_ref, kmap_op, 1);
704
705 xen_mc_issue(PARAVIRT_LAZY_MMU);
706 }
707 }
708 spin_lock_irqsave(&m2p_override_lock, flags);
709 list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
710 spin_unlock_irqrestore(&m2p_override_lock, flags);
711
712 /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
713 * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
714 * pfn so that the following mfn_to_pfn(mfn) calls will return the
715 * pfn from the m2p_override (the backend pfn) instead.
716 * We need to do this because the pages shared by the frontend
717 * (xen-blkfront) can be already locked (lock_page, called by
718 * do_read_cache_page); when the userspace backend tries to use them
719 * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
720 * do_blockdev_direct_IO is going to try to lock the same pages
721 * again resulting in a deadlock.
722 * As a side effect get_user_pages_fast might not be safe on the
723 * frontend pages while they are being shared with the backend,
724 * because mfn_to_pfn (that ends up being called by GUPF) will
725 * return the backend pfn rather than the frontend pfn. */
726 pfn = mfn_to_pfn_no_overrides(mfn);
727 if (__pfn_to_mfn(pfn) == mfn)
728 set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
729
730 return 0;
731}
732
733int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, 651int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
734 struct gnttab_map_grant_ref *kmap_ops, 652 struct gnttab_map_grant_ref *kmap_ops,
735 struct page **pages, unsigned int count) 653 struct page **pages, unsigned int count)
736{ 654{
737 int i, ret = 0; 655 int i, ret = 0;
738 bool lazy = false;
739 pte_t *pte; 656 pte_t *pte;
740 657
741 if (xen_feature(XENFEAT_auto_translated_physmap)) 658 if (xen_feature(XENFEAT_auto_translated_physmap))
742 return 0; 659 return 0;
743 660
744 if (kmap_ops && 661 if (kmap_ops) {
745 !in_interrupt() && 662 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
746 paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 663 kmap_ops, count);
747 arch_enter_lazy_mmu_mode(); 664 if (ret)
748 lazy = true; 665 goto out;
749 } 666 }
750 667
751 for (i = 0; i < count; i++) { 668 for (i = 0; i < count; i++) {
@@ -764,170 +681,28 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
764 } 681 }
765 pfn = page_to_pfn(pages[i]); 682 pfn = page_to_pfn(pages[i]);
766 683
767 WARN_ON(PagePrivate(pages[i])); 684 WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
768 SetPagePrivate(pages[i]);
769 set_page_private(pages[i], mfn);
770 pages[i]->index = pfn_to_mfn(pfn);
771 685
772 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) { 686 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
773 ret = -ENOMEM; 687 ret = -ENOMEM;
774 goto out; 688 goto out;
775 } 689 }
776
777 if (kmap_ops) {
778 ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
779 if (ret)
780 goto out;
781 }
782 } 690 }
783 691
784out: 692out:
785 if (lazy)
786 arch_leave_lazy_mmu_mode();
787
788 return ret; 693 return ret;
789} 694}
790EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping); 695EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
791 696
792static struct page *m2p_find_override(unsigned long mfn)
793{
794 unsigned long flags;
795 struct list_head *bucket;
796 struct page *p, *ret;
797
798 if (unlikely(!m2p_overrides))
799 return NULL;
800
801 ret = NULL;
802 bucket = &m2p_overrides[mfn_hash(mfn)];
803
804 spin_lock_irqsave(&m2p_override_lock, flags);
805
806 list_for_each_entry(p, bucket, lru) {
807 if (page_private(p) == mfn) {
808 ret = p;
809 break;
810 }
811 }
812
813 spin_unlock_irqrestore(&m2p_override_lock, flags);
814
815 return ret;
816}
817
818static int m2p_remove_override(struct page *page,
819 struct gnttab_map_grant_ref *kmap_op,
820 unsigned long mfn)
821{
822 unsigned long flags;
823 unsigned long pfn;
824 unsigned long uninitialized_var(address);
825 unsigned level;
826 pte_t *ptep = NULL;
827
828 pfn = page_to_pfn(page);
829
830 if (!PageHighMem(page)) {
831 address = (unsigned long)__va(pfn << PAGE_SHIFT);
832 ptep = lookup_address(address, &level);
833
834 if (WARN(ptep == NULL || level != PG_LEVEL_4K,
835 "m2p_remove_override: pfn %lx not mapped", pfn))
836 return -EINVAL;
837 }
838
839 spin_lock_irqsave(&m2p_override_lock, flags);
840 list_del(&page->lru);
841 spin_unlock_irqrestore(&m2p_override_lock, flags);
842
843 if (kmap_op != NULL) {
844 if (!PageHighMem(page)) {
845 struct multicall_space mcs;
846 struct gnttab_unmap_and_replace *unmap_op;
847 struct page *scratch_page = get_balloon_scratch_page();
848 unsigned long scratch_page_address = (unsigned long)
849 __va(page_to_pfn(scratch_page) << PAGE_SHIFT);
850
851 /*
852 * It might be that we queued all the m2p grant table
853 * hypercalls in a multicall, then m2p_remove_override
854 * get called before the multicall has actually been
855 * issued. In this case handle is going to -1 because
856 * it hasn't been modified yet.
857 */
858 if (kmap_op->handle == -1)
859 xen_mc_flush();
860 /*
861 * Now if kmap_op->handle is negative it means that the
862 * hypercall actually returned an error.
863 */
864 if (kmap_op->handle == GNTST_general_error) {
865 pr_warn("m2p_remove_override: pfn %lx mfn %lx, failed to modify kernel mappings",
866 pfn, mfn);
867 put_balloon_scratch_page();
868 return -1;
869 }
870
871 xen_mc_batch();
872
873 mcs = __xen_mc_entry(
874 sizeof(struct gnttab_unmap_and_replace));
875 unmap_op = mcs.args;
876 unmap_op->host_addr = kmap_op->host_addr;
877 unmap_op->new_addr = scratch_page_address;
878 unmap_op->handle = kmap_op->handle;
879
880 MULTI_grant_table_op(mcs.mc,
881 GNTTABOP_unmap_and_replace, unmap_op, 1);
882
883 mcs = __xen_mc_entry(0);
884 MULTI_update_va_mapping(mcs.mc, scratch_page_address,
885 pfn_pte(page_to_pfn(scratch_page),
886 PAGE_KERNEL_RO), 0);
887
888 xen_mc_issue(PARAVIRT_LAZY_MMU);
889
890 kmap_op->host_addr = 0;
891 put_balloon_scratch_page();
892 }
893 }
894
895 /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
896 * somewhere in this domain, even before being added to the
897 * m2p_override (see comment above in m2p_add_override).
898 * If there are no other entries in the m2p_override corresponding
899 * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
900 * the original pfn (the one shared by the frontend): the backend
901 * cannot do any IO on this page anymore because it has been
902 * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
903 * the original pfn causes mfn_to_pfn(mfn) to return the frontend
904 * pfn again. */
905 mfn &= ~FOREIGN_FRAME_BIT;
906 pfn = mfn_to_pfn_no_overrides(mfn);
907 if (__pfn_to_mfn(pfn) == FOREIGN_FRAME(mfn) &&
908 m2p_find_override(mfn) == NULL)
909 set_phys_to_machine(pfn, mfn);
910
911 return 0;
912}
913
914int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 697int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
915 struct gnttab_map_grant_ref *kmap_ops, 698 struct gnttab_unmap_grant_ref *kunmap_ops,
916 struct page **pages, unsigned int count) 699 struct page **pages, unsigned int count)
917{ 700{
918 int i, ret = 0; 701 int i, ret = 0;
919 bool lazy = false;
920 702
921 if (xen_feature(XENFEAT_auto_translated_physmap)) 703 if (xen_feature(XENFEAT_auto_translated_physmap))
922 return 0; 704 return 0;
923 705
924 if (kmap_ops &&
925 !in_interrupt() &&
926 paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
927 arch_enter_lazy_mmu_mode();
928 lazy = true;
929 }
930
931 for (i = 0; i < count; i++) { 706 for (i = 0; i < count; i++) {
932 unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); 707 unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
933 unsigned long pfn = page_to_pfn(pages[i]); 708 unsigned long pfn = page_to_pfn(pages[i]);
@@ -937,36 +712,16 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
937 goto out; 712 goto out;
938 } 713 }
939 714
940 set_page_private(pages[i], INVALID_P2M_ENTRY); 715 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
941 WARN_ON(!PagePrivate(pages[i]));
942 ClearPagePrivate(pages[i]);
943 set_phys_to_machine(pfn, pages[i]->index);
944
945 if (kmap_ops)
946 ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
947 if (ret)
948 goto out;
949 } 716 }
950 717 if (kunmap_ops)
718 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
719 kunmap_ops, count);
951out: 720out:
952 if (lazy)
953 arch_leave_lazy_mmu_mode();
954 return ret; 721 return ret;
955} 722}
956EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping); 723EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
957 724
958unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
959{
960 struct page *p = m2p_find_override(mfn);
961 unsigned long ret = pfn;
962
963 if (p)
964 ret = page_to_pfn(p);
965
966 return ret;
967}
968EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
969
970#ifdef CONFIG_XEN_DEBUG_FS 725#ifdef CONFIG_XEN_DEBUG_FS
971#include <linux/debugfs.h> 726#include <linux/debugfs.h>
972#include "debugfs.h" 727#include "debugfs.h"
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 865e56cea7a0..55f388ef481a 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -32,16 +32,6 @@
32#include "p2m.h" 32#include "p2m.h"
33#include "mmu.h" 33#include "mmu.h"
34 34
35/* These are code, but not functions. Defined in entry.S */
36extern const char xen_hypervisor_callback[];
37extern const char xen_failsafe_callback[];
38#ifdef CONFIG_X86_64
39extern asmlinkage void nmi(void);
40#endif
41extern void xen_sysenter_target(void);
42extern void xen_syscall_target(void);
43extern void xen_syscall32_target(void);
44
45/* Amount of extra memory space we add to the e820 ranges */ 35/* Amount of extra memory space we add to the e820 ranges */
46struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; 36struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
47 37
@@ -74,7 +64,7 @@ static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
74 */ 64 */
75#define EXTRA_MEM_RATIO (10) 65#define EXTRA_MEM_RATIO (10)
76 66
77static void __init xen_add_extra_mem(u64 start, u64 size) 67static void __init xen_add_extra_mem(phys_addr_t start, phys_addr_t size)
78{ 68{
79 int i; 69 int i;
80 70
@@ -97,10 +87,10 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
97 memblock_reserve(start, size); 87 memblock_reserve(start, size);
98} 88}
99 89
100static void __init xen_del_extra_mem(u64 start, u64 size) 90static void __init xen_del_extra_mem(phys_addr_t start, phys_addr_t size)
101{ 91{
102 int i; 92 int i;
103 u64 start_r, size_r; 93 phys_addr_t start_r, size_r;
104 94
105 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { 95 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
106 start_r = xen_extra_mem[i].start; 96 start_r = xen_extra_mem[i].start;
@@ -267,7 +257,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
267static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn) 257static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
268{ 258{
269 struct mmu_update update = { 259 struct mmu_update update = {
270 .ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE, 260 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
271 .val = pfn 261 .val = pfn
272 }; 262 };
273 263
@@ -545,20 +535,21 @@ static unsigned long __init xen_get_max_pages(void)
545 return min(max_pages, MAX_DOMAIN_PAGES); 535 return min(max_pages, MAX_DOMAIN_PAGES);
546} 536}
547 537
548static void xen_align_and_add_e820_region(u64 start, u64 size, int type) 538static void __init xen_align_and_add_e820_region(phys_addr_t start,
539 phys_addr_t size, int type)
549{ 540{
550 u64 end = start + size; 541 phys_addr_t end = start + size;
551 542
552 /* Align RAM regions to page boundaries. */ 543 /* Align RAM regions to page boundaries. */
553 if (type == E820_RAM) { 544 if (type == E820_RAM) {
554 start = PAGE_ALIGN(start); 545 start = PAGE_ALIGN(start);
555 end &= ~((u64)PAGE_SIZE - 1); 546 end &= ~((phys_addr_t)PAGE_SIZE - 1);
556 } 547 }
557 548
558 e820_add_region(start, end - start, type); 549 e820_add_region(start, end - start, type);
559} 550}
560 551
561void xen_ignore_unusable(struct e820entry *list, size_t map_size) 552static void __init xen_ignore_unusable(struct e820entry *list, size_t map_size)
562{ 553{
563 struct e820entry *entry; 554 struct e820entry *entry;
564 unsigned int i; 555 unsigned int i;
@@ -577,7 +568,7 @@ char * __init xen_memory_setup(void)
577 static struct e820entry map[E820MAX] __initdata; 568 static struct e820entry map[E820MAX] __initdata;
578 569
579 unsigned long max_pfn = xen_start_info->nr_pages; 570 unsigned long max_pfn = xen_start_info->nr_pages;
580 unsigned long long mem_end; 571 phys_addr_t mem_end;
581 int rc; 572 int rc;
582 struct xen_memory_map memmap; 573 struct xen_memory_map memmap;
583 unsigned long max_pages; 574 unsigned long max_pages;
@@ -652,16 +643,16 @@ char * __init xen_memory_setup(void)
652 extra_pages); 643 extra_pages);
653 i = 0; 644 i = 0;
654 while (i < memmap.nr_entries) { 645 while (i < memmap.nr_entries) {
655 u64 addr = map[i].addr; 646 phys_addr_t addr = map[i].addr;
656 u64 size = map[i].size; 647 phys_addr_t size = map[i].size;
657 u32 type = map[i].type; 648 u32 type = map[i].type;
658 649
659 if (type == E820_RAM) { 650 if (type == E820_RAM) {
660 if (addr < mem_end) { 651 if (addr < mem_end) {
661 size = min(size, mem_end - addr); 652 size = min(size, mem_end - addr);
662 } else if (extra_pages) { 653 } else if (extra_pages) {
663 size = min(size, (u64)extra_pages * PAGE_SIZE); 654 size = min(size, PFN_PHYS(extra_pages));
664 extra_pages -= size / PAGE_SIZE; 655 extra_pages -= PFN_DOWN(size);
665 xen_add_extra_mem(addr, size); 656 xen_add_extra_mem(addr, size);
666 xen_max_p2m_pfn = PFN_DOWN(addr + size); 657 xen_max_p2m_pfn = PFN_DOWN(addr + size);
667 } else 658 } else
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 4c071aeb8417..08e8489c47f1 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -507,7 +507,7 @@ static int xen_cpu_disable(void)
507static void xen_cpu_die(unsigned int cpu) 507static void xen_cpu_die(unsigned int cpu)
508{ 508{
509 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { 509 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
510 current->state = TASK_UNINTERRUPTIBLE; 510 __set_current_state(TASK_UNINTERRUPTIBLE);
511 schedule_timeout(HZ/10); 511 schedule_timeout(HZ/10);
512 } 512 }
513 513
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 69087341d9ae..55da33b1d51c 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -479,6 +479,10 @@ static void __init xen_time_init(void)
479 int cpu = smp_processor_id(); 479 int cpu = smp_processor_id();
480 struct timespec tp; 480 struct timespec tp;
481 481
482 /* As Dom0 is never moved, no penalty on using TSC there */
483 if (xen_initial_domain())
484 xen_clocksource.rating = 275;
485
482 clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC); 486 clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
483 487
484 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) { 488 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 5686bd9d58cc..9e195c683549 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -10,6 +10,12 @@
10extern const char xen_hypervisor_callback[]; 10extern const char xen_hypervisor_callback[];
11extern const char xen_failsafe_callback[]; 11extern const char xen_failsafe_callback[];
12 12
13void xen_sysenter_target(void);
14#ifdef CONFIG_X86_64
15void xen_syscall_target(void);
16void xen_syscall32_target(void);
17#endif
18
13extern void *xen_initial_gdt; 19extern void *xen_initial_gdt;
14 20
15struct trap_info; 21struct trap_info;
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 8951cefb0a96..e6c3ddd92665 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -315,6 +315,12 @@ config ACPI_HOTPLUG_MEMORY
315 To compile this driver as a module, choose M here: 315 To compile this driver as a module, choose M here:
316 the module will be called acpi_memhotplug. 316 the module will be called acpi_memhotplug.
317 317
318config ACPI_HOTPLUG_IOAPIC
319 bool
320 depends on PCI
321 depends on X86_IO_APIC
322 default y
323
318config ACPI_SBS 324config ACPI_SBS
319 tristate "Smart Battery System" 325 tristate "Smart Battery System"
320 depends on X86 326 depends on X86
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index f74317cc1ca9..b18cd2151ddb 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -40,7 +40,7 @@ acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
40acpi-y += ec.o 40acpi-y += ec.o
41acpi-$(CONFIG_ACPI_DOCK) += dock.o 41acpi-$(CONFIG_ACPI_DOCK) += dock.o
42acpi-y += pci_root.o pci_link.o pci_irq.o 42acpi-y += pci_root.o pci_link.o pci_irq.o
43acpi-y += acpi_lpss.o 43acpi-y += acpi_lpss.o acpi_apd.o
44acpi-y += acpi_platform.o 44acpi-y += acpi_platform.o
45acpi-y += acpi_pnp.o 45acpi-y += acpi_pnp.o
46acpi-y += int340x_thermal.o 46acpi-y += int340x_thermal.o
@@ -70,6 +70,7 @@ obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
70obj-y += container.o 70obj-y += container.o
71obj-$(CONFIG_ACPI_THERMAL) += thermal.o 71obj-$(CONFIG_ACPI_THERMAL) += thermal.o
72obj-y += acpi_memhotplug.o 72obj-y += acpi_memhotplug.o
73obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o
73obj-$(CONFIG_ACPI_BATTERY) += battery.o 74obj-$(CONFIG_ACPI_BATTERY) += battery.o
74obj-$(CONFIG_ACPI_SBS) += sbshc.o 75obj-$(CONFIG_ACPI_SBS) += sbshc.o
75obj-$(CONFIG_ACPI_SBS) += sbs.o 76obj-$(CONFIG_ACPI_SBS) += sbs.o
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
new file mode 100644
index 000000000000..3984ea96e5f7
--- /dev/null
+++ b/drivers/acpi/acpi_apd.c
@@ -0,0 +1,150 @@
1/*
2 * AMD ACPI support for ACPI2platform device.
3 *
4 * Copyright (c) 2014,2015 AMD Corporation.
5 * Authors: Ken Xue <Ken.Xue@amd.com>
6 * Wu, Jeff <Jeff.Wu@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/clk-provider.h>
14#include <linux/platform_device.h>
15#include <linux/pm_domain.h>
16#include <linux/clkdev.h>
17#include <linux/acpi.h>
18#include <linux/err.h>
19#include <linux/clk.h>
20#include <linux/pm.h>
21
22#include "internal.h"
23
24ACPI_MODULE_NAME("acpi_apd");
25struct apd_private_data;
26
27/**
28 * ACPI_APD_SYSFS : add device attributes in sysfs
29 * ACPI_APD_PM : attach power domain to device
30 */
31#define ACPI_APD_SYSFS BIT(0)
32#define ACPI_APD_PM BIT(1)
33
34/**
35 * struct apd_device_desc - a descriptor for apd device
36 * @flags: device flags like %ACPI_APD_SYSFS, %ACPI_APD_PM
37 * @fixed_clk_rate: fixed rate input clock source for acpi device;
38 * 0 means no fixed rate input clock source
39 * @setup: a hook routine to set device resource during create platform device
40 *
41 * Device description defined as acpi_device_id.driver_data
42 */
43struct apd_device_desc {
44 unsigned int flags;
45 unsigned int fixed_clk_rate;
46 int (*setup)(struct apd_private_data *pdata);
47};
48
49struct apd_private_data {
50 struct clk *clk;
51 struct acpi_device *adev;
52 const struct apd_device_desc *dev_desc;
53};
54
55#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
56#define APD_ADDR(desc) ((unsigned long)&desc)
57
58static int acpi_apd_setup(struct apd_private_data *pdata)
59{
60 const struct apd_device_desc *dev_desc = pdata->dev_desc;
61 struct clk *clk = ERR_PTR(-ENODEV);
62
63 if (dev_desc->fixed_clk_rate) {
64 clk = clk_register_fixed_rate(&pdata->adev->dev,
65 dev_name(&pdata->adev->dev),
66 NULL, CLK_IS_ROOT,
67 dev_desc->fixed_clk_rate);
68 clk_register_clkdev(clk, NULL, dev_name(&pdata->adev->dev));
69 pdata->clk = clk;
70 }
71
72 return 0;
73}
74
75static struct apd_device_desc cz_i2c_desc = {
76 .setup = acpi_apd_setup,
77 .fixed_clk_rate = 133000000,
78};
79
80static struct apd_device_desc cz_uart_desc = {
81 .setup = acpi_apd_setup,
82 .fixed_clk_rate = 48000000,
83};
84
85#else
86
87#define APD_ADDR(desc) (0UL)
88
89#endif /* CONFIG_X86_AMD_PLATFORM_DEVICE */
90
91/**
92* Create platform device during acpi scan attach handle.
93* Return value > 0 on success of creating device.
94*/
95static int acpi_apd_create_device(struct acpi_device *adev,
96 const struct acpi_device_id *id)
97{
98 const struct apd_device_desc *dev_desc = (void *)id->driver_data;
99 struct apd_private_data *pdata;
100 struct platform_device *pdev;
101 int ret;
102
103 if (!dev_desc) {
104 pdev = acpi_create_platform_device(adev);
105 return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
106 }
107
108 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
109 if (!pdata)
110 return -ENOMEM;
111
112 pdata->adev = adev;
113 pdata->dev_desc = dev_desc;
114
115 if (dev_desc->setup) {
116 ret = dev_desc->setup(pdata);
117 if (ret)
118 goto err_out;
119 }
120
121 adev->driver_data = pdata;
122 pdev = acpi_create_platform_device(adev);
123 if (!IS_ERR_OR_NULL(pdev))
124 return 1;
125
126 ret = PTR_ERR(pdev);
127 adev->driver_data = NULL;
128
129 err_out:
130 kfree(pdata);
131 return ret;
132}
133
134static const struct acpi_device_id acpi_apd_device_ids[] = {
135 /* Generic apd devices */
136 { "AMD0010", APD_ADDR(cz_i2c_desc) },
137 { "AMD0020", APD_ADDR(cz_uart_desc) },
138 { "AMD0030", },
139 { }
140};
141
142static struct acpi_scan_handler apd_handler = {
143 .ids = acpi_apd_device_ids,
144 .attach = acpi_apd_create_device,
145};
146
147void __init acpi_apd_init(void)
148{
149 acpi_scan_add_handler(&apd_handler);
150}
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index e75737fd7eef..02e835f3cf8a 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -125,7 +125,7 @@ static struct lpss_device_desc lpt_dev_desc = {
125}; 125};
126 126
127static struct lpss_device_desc lpt_i2c_dev_desc = { 127static struct lpss_device_desc lpt_i2c_dev_desc = {
128 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR, 128 .flags = LPSS_CLK | LPSS_LTR,
129 .prv_offset = 0x800, 129 .prv_offset = 0x800,
130}; 130};
131 131
@@ -307,7 +307,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
307{ 307{
308 struct lpss_device_desc *dev_desc; 308 struct lpss_device_desc *dev_desc;
309 struct lpss_private_data *pdata; 309 struct lpss_private_data *pdata;
310 struct resource_list_entry *rentry; 310 struct resource_entry *rentry;
311 struct list_head resource_list; 311 struct list_head resource_list;
312 struct platform_device *pdev; 312 struct platform_device *pdev;
313 int ret; 313 int ret;
@@ -327,13 +327,15 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
327 goto err_out; 327 goto err_out;
328 328
329 list_for_each_entry(rentry, &resource_list, node) 329 list_for_each_entry(rentry, &resource_list, node)
330 if (resource_type(&rentry->res) == IORESOURCE_MEM) { 330 if (resource_type(rentry->res) == IORESOURCE_MEM) {
331 if (dev_desc->prv_size_override) 331 if (dev_desc->prv_size_override)
332 pdata->mmio_size = dev_desc->prv_size_override; 332 pdata->mmio_size = dev_desc->prv_size_override;
333 else 333 else
334 pdata->mmio_size = resource_size(&rentry->res); 334 pdata->mmio_size = resource_size(rentry->res);
335 pdata->mmio_base = ioremap(rentry->res.start, 335 pdata->mmio_base = ioremap(rentry->res->start,
336 pdata->mmio_size); 336 pdata->mmio_size);
337 if (!pdata->mmio_base)
338 goto err_out;
337 break; 339 break;
338 } 340 }
339 341
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 23e2319ead41..ee28f4d15625 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -101,8 +101,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
101 /* Can we combine the resource range information? */ 101 /* Can we combine the resource range information? */
102 if ((info->caching == address64.info.mem.caching) && 102 if ((info->caching == address64.info.mem.caching) &&
103 (info->write_protect == address64.info.mem.write_protect) && 103 (info->write_protect == address64.info.mem.write_protect) &&
104 (info->start_addr + info->length == address64.minimum)) { 104 (info->start_addr + info->length == address64.address.minimum)) {
105 info->length += address64.address_length; 105 info->length += address64.address.address_length;
106 return AE_OK; 106 return AE_OK;
107 } 107 }
108 } 108 }
@@ -114,8 +114,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
114 INIT_LIST_HEAD(&new->list); 114 INIT_LIST_HEAD(&new->list);
115 new->caching = address64.info.mem.caching; 115 new->caching = address64.info.mem.caching;
116 new->write_protect = address64.info.mem.write_protect; 116 new->write_protect = address64.info.mem.write_protect;
117 new->start_addr = address64.minimum; 117 new->start_addr = address64.address.minimum;
118 new->length = address64.address_length; 118 new->length = address64.address.address_length;
119 list_add_tail(&new->list, &mem_device->res_list); 119 list_add_tail(&new->list, &mem_device->res_list);
120 120
121 return AE_OK; 121 return AE_OK;
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 6ba8beb6b9d2..1284138e42ab 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -45,7 +45,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
45 struct platform_device *pdev = NULL; 45 struct platform_device *pdev = NULL;
46 struct acpi_device *acpi_parent; 46 struct acpi_device *acpi_parent;
47 struct platform_device_info pdevinfo; 47 struct platform_device_info pdevinfo;
48 struct resource_list_entry *rentry; 48 struct resource_entry *rentry;
49 struct list_head resource_list; 49 struct list_head resource_list;
50 struct resource *resources = NULL; 50 struct resource *resources = NULL;
51 int count; 51 int count;
@@ -71,7 +71,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
71 } 71 }
72 count = 0; 72 count = 0;
73 list_for_each_entry(rentry, &resource_list, node) 73 list_for_each_entry(rentry, &resource_list, node)
74 resources[count++] = rentry->res; 74 resources[count++] = *rentry->res;
75 75
76 acpi_dev_free_resource_list(&resource_list); 76 acpi_dev_free_resource_list(&resource_list);
77 } 77 }
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 3d2c88289da9..d863016565b5 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,7 @@
47/* Common info for tool signons */ 47/* Common info for tool signons */
48 48
49#define ACPICA_NAME "Intel ACPI Component Architecture" 49#define ACPICA_NAME "Intel ACPI Component Architecture"
50#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2014 Intel Corporation" 50#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2015 Intel Corporation"
51 51
52#if ACPI_MACHINE_WIDTH == 64 52#if ACPI_MACHINE_WIDTH == 64
53#define ACPI_WIDTH "-64" 53#define ACPI_WIDTH "-64"
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 6f1c616910ac..853aa2dbdb61 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 1d026ff1683f..4169bb87a996 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index d3e2cc395d7f..408f04bcaab4 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 7a7811a9fc26..228704b78657 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -143,8 +143,6 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
143acpi_status 143acpi_status
144acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context); 144acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
145 145
146u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
147
148acpi_status 146acpi_status
149acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 147acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
150 struct acpi_gpe_block_info *gpe_block, void *context); 148 struct acpi_gpe_block_info *gpe_block, void *context);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 7f60582d0c8c..a165d25343e8 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index c318d3e27893..196a55244559 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index b01f71ce0523..1886bde54b5d 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 680d23bbae7c..7add32e5d8c5 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 4bceb11c7380..cf607fe69dbd 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index ee1c040f321c..952fbe0b7231 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 8abb393dafab..3e9720e1f34f 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index dda0e6affcf1..a5f17de45ac6 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 6168b85463ed..74a390c6db16 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index bd3908d26c4f..a972d11c97c9 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 4b008e8884a1..efc4c7124ccc 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index cf7346110bd8..d14b547b7cd5 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 1afe46e44dac..1c127a43017b 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 486d342e74b6..c2f03e8774ad 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 5908ccec6aea..3a95068fc119 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2014, Intel Corp. 10 * Copyright (C) 2000 - 2015, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 3a0beeb86ba5..ee0cdd60b93d 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 720b1cdda711..3e6989738e85 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 8daf9de82b73..39da9da62bbf 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index c57666196672..43b40de90484 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index aee5e45f6d35..bbe74bcebbae 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 3c7f7378b94d..d72565a3c646 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index b67522df01ac..2e4c42b377ec 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index a1e7e6b6fcf7..8a7b07b6adc8 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 6c0759c0db47..77244182ff02 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 9f74795e2268..e5ff89bcb3f5 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index f7f5107e754d..df54d46225cd 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 15623da26200..843942fb4be5 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 2ac28d297305..fcaa30c611fb 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 9d6e2c1de1f8..43b3ea40c0b6 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 24f7d5ea678a..89ac2022465e 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index c7bffff9ed32..bf6873f95e72 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 3393a73ca0d6..b78dc7c6d5d7 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index aa70154cf4fa..5ed064e8673c 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -114,17 +114,6 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
114 114
115 ACPI_FUNCTION_TRACE(ev_enable_gpe); 115 ACPI_FUNCTION_TRACE(ev_enable_gpe);
116 116
117 /*
118 * We will only allow a GPE to be enabled if it has either an associated
119 * method (_Lxx/_Exx) or a handler, or is using the implicit notify
120 * feature. Otherwise, the GPE will be immediately disabled by
121 * acpi_ev_gpe_dispatch the first time it fires.
122 */
123 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
124 ACPI_GPE_DISPATCH_NONE) {
125 return_ACPI_STATUS(AE_NO_HANDLER);
126 }
127
128 /* Clear the GPE (of stale events) */ 117 /* Clear the GPE (of stale events) */
129 118
130 status = acpi_hw_clear_gpe(gpe_event_info); 119 status = acpi_hw_clear_gpe(gpe_event_info);
@@ -339,7 +328,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
339{ 328{
340 acpi_status status; 329 acpi_status status;
341 struct acpi_gpe_block_info *gpe_block; 330 struct acpi_gpe_block_info *gpe_block;
331 struct acpi_namespace_node *gpe_device;
342 struct acpi_gpe_register_info *gpe_register_info; 332 struct acpi_gpe_register_info *gpe_register_info;
333 struct acpi_gpe_event_info *gpe_event_info;
334 u32 gpe_number;
335 struct acpi_gpe_handler_info *gpe_handler_info;
343 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; 336 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
344 u8 enabled_status_byte; 337 u8 enabled_status_byte;
345 u32 status_reg; 338 u32 status_reg;
@@ -367,6 +360,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
367 360
368 gpe_block = gpe_xrupt_list->gpe_block_list_head; 361 gpe_block = gpe_xrupt_list->gpe_block_list_head;
369 while (gpe_block) { 362 while (gpe_block) {
363 gpe_device = gpe_block->node;
364
370 /* 365 /*
371 * Read all of the 8-bit GPE status and enable registers in this GPE 366 * Read all of the 8-bit GPE status and enable registers in this GPE
372 * block, saving all of them. Find all currently active GP events. 367 * block, saving all of them. Find all currently active GP events.
@@ -442,16 +437,68 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
442 437
443 /* Examine one GPE bit */ 438 /* Examine one GPE bit */
444 439
440 gpe_event_info =
441 &gpe_block->
442 event_info[((acpi_size) i *
443 ACPI_GPE_REGISTER_WIDTH) + j];
444 gpe_number =
445 j + gpe_register_info->base_gpe_number;
446
445 if (enabled_status_byte & (1 << j)) { 447 if (enabled_status_byte & (1 << j)) {
446 /* 448
447 * Found an active GPE. Dispatch the event to a handler 449 /* Invoke global event handler if present */
448 * or method. 450
449 */ 451 acpi_gpe_count++;
450 int_status |= 452 if (acpi_gbl_global_event_handler) {
451 acpi_ev_gpe_dispatch(gpe_block-> 453 acpi_gbl_global_event_handler
452 node, 454 (ACPI_EVENT_TYPE_GPE,
453 &gpe_block-> 455 gpe_device, gpe_number,
454 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); 456 acpi_gbl_global_event_handler_context);
457 }
458
459 /* Found an active GPE */
460
461 if (ACPI_GPE_DISPATCH_TYPE
462 (gpe_event_info->flags) ==
463 ACPI_GPE_DISPATCH_RAW_HANDLER) {
464
465 /* Dispatch the event to a raw handler */
466
467 gpe_handler_info =
468 gpe_event_info->dispatch.
469 handler;
470
471 /*
472 * There is no protection around the namespace node
473 * and the GPE handler to ensure a safe destruction
474 * because:
475 * 1. The namespace node is expected to always
476 * exist after loading a table.
477 * 2. The GPE handler is expected to be flushed by
478 * acpi_os_wait_events_complete() before the
479 * destruction.
480 */
481 acpi_os_release_lock
482 (acpi_gbl_gpe_lock, flags);
483 int_status |=
484 gpe_handler_info->
485 address(gpe_device,
486 gpe_number,
487 gpe_handler_info->
488 context);
489 flags =
490 acpi_os_acquire_lock
491 (acpi_gbl_gpe_lock);
492 } else {
493 /*
494 * Dispatch the event to a standard handler or
495 * method.
496 */
497 int_status |=
498 acpi_ev_gpe_dispatch
499 (gpe_device, gpe_event_info,
500 gpe_number);
501 }
455 } 502 }
456 } 503 }
457 } 504 }
@@ -484,52 +531,15 @@ unlock_and_exit:
484static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) 531static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
485{ 532{
486 struct acpi_gpe_event_info *gpe_event_info = context; 533 struct acpi_gpe_event_info *gpe_event_info = context;
487 acpi_status status; 534 acpi_status status = AE_OK;
488 struct acpi_gpe_event_info *local_gpe_event_info;
489 struct acpi_evaluate_info *info; 535 struct acpi_evaluate_info *info;
490 struct acpi_gpe_notify_info *notify; 536 struct acpi_gpe_notify_info *notify;
491 537
492 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); 538 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
493 539
494 /* Allocate a local GPE block */
495
496 local_gpe_event_info =
497 ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info));
498 if (!local_gpe_event_info) {
499 ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE"));
500 return_VOID;
501 }
502
503 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
504 if (ACPI_FAILURE(status)) {
505 ACPI_FREE(local_gpe_event_info);
506 return_VOID;
507 }
508
509 /* Must revalidate the gpe_number/gpe_block */
510
511 if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
512 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
513 ACPI_FREE(local_gpe_event_info);
514 return_VOID;
515 }
516
517 /*
518 * Take a snapshot of the GPE info for this level - we copy the info to
519 * prevent a race condition with remove_handler/remove_block.
520 */
521 ACPI_MEMCPY(local_gpe_event_info, gpe_event_info,
522 sizeof(struct acpi_gpe_event_info));
523
524 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
525 if (ACPI_FAILURE(status)) {
526 ACPI_FREE(local_gpe_event_info);
527 return_VOID;
528 }
529
530 /* Do the correct dispatch - normal method or implicit notify */ 540 /* Do the correct dispatch - normal method or implicit notify */
531 541
532 switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { 542 switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
533 case ACPI_GPE_DISPATCH_NOTIFY: 543 case ACPI_GPE_DISPATCH_NOTIFY:
534 /* 544 /*
535 * Implicit notify. 545 * Implicit notify.
@@ -542,7 +552,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
542 * June 2012: Expand implicit notify mechanism to support 552 * June 2012: Expand implicit notify mechanism to support
543 * notifies on multiple device objects. 553 * notifies on multiple device objects.
544 */ 554 */
545 notify = local_gpe_event_info->dispatch.notify_list; 555 notify = gpe_event_info->dispatch.notify_list;
546 while (ACPI_SUCCESS(status) && notify) { 556 while (ACPI_SUCCESS(status) && notify) {
547 status = 557 status =
548 acpi_ev_queue_notify_request(notify->device_node, 558 acpi_ev_queue_notify_request(notify->device_node,
@@ -566,7 +576,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
566 * _Lxx/_Exx control method that corresponds to this GPE 576 * _Lxx/_Exx control method that corresponds to this GPE
567 */ 577 */
568 info->prefix_node = 578 info->prefix_node =
569 local_gpe_event_info->dispatch.method_node; 579 gpe_event_info->dispatch.method_node;
570 info->flags = ACPI_IGNORE_RETURN_VALUE; 580 info->flags = ACPI_IGNORE_RETURN_VALUE;
571 581
572 status = acpi_ns_evaluate(info); 582 status = acpi_ns_evaluate(info);
@@ -576,25 +586,27 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
576 if (ACPI_FAILURE(status)) { 586 if (ACPI_FAILURE(status)) {
577 ACPI_EXCEPTION((AE_INFO, status, 587 ACPI_EXCEPTION((AE_INFO, status,
578 "while evaluating GPE method [%4.4s]", 588 "while evaluating GPE method [%4.4s]",
579 acpi_ut_get_node_name 589 acpi_ut_get_node_name(gpe_event_info->
580 (local_gpe_event_info->dispatch. 590 dispatch.
581 method_node))); 591 method_node)));
582 } 592 }
583 break; 593 break;
584 594
585 default: 595 default:
586 596
587 return_VOID; /* Should never happen */ 597 goto error_exit; /* Should never happen */
588 } 598 }
589 599
590 /* Defer enabling of GPE until all notify handlers are done */ 600 /* Defer enabling of GPE until all notify handlers are done */
591 601
592 status = acpi_os_execute(OSL_NOTIFY_HANDLER, 602 status = acpi_os_execute(OSL_NOTIFY_HANDLER,
593 acpi_ev_asynch_enable_gpe, 603 acpi_ev_asynch_enable_gpe, gpe_event_info);
594 local_gpe_event_info); 604 if (ACPI_SUCCESS(status)) {
595 if (ACPI_FAILURE(status)) { 605 return_VOID;
596 ACPI_FREE(local_gpe_event_info);
597 } 606 }
607
608error_exit:
609 acpi_ev_asynch_enable_gpe(gpe_event_info);
598 return_VOID; 610 return_VOID;
599} 611}
600 612
@@ -622,7 +634,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
622 (void)acpi_ev_finish_gpe(gpe_event_info); 634 (void)acpi_ev_finish_gpe(gpe_event_info);
623 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 635 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
624 636
625 ACPI_FREE(gpe_event_info);
626 return; 637 return;
627} 638}
628 639
@@ -692,15 +703,6 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
692 703
693 ACPI_FUNCTION_TRACE(ev_gpe_dispatch); 704 ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
694 705
695 /* Invoke global event handler if present */
696
697 acpi_gpe_count++;
698 if (acpi_gbl_global_event_handler) {
699 acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
700 gpe_number,
701 acpi_gbl_global_event_handler_context);
702 }
703
704 /* 706 /*
705 * Always disable the GPE so that it does not keep firing before 707 * Always disable the GPE so that it does not keep firing before
706 * any asynchronous activity completes (either from the execution 708 * any asynchronous activity completes (either from the execution
@@ -741,7 +743,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
741 * If there is neither a handler nor a method, leave the GPE 743 * If there is neither a handler nor a method, leave the GPE
742 * disabled. 744 * disabled.
743 */ 745 */
744 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { 746 switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
745 case ACPI_GPE_DISPATCH_HANDLER: 747 case ACPI_GPE_DISPATCH_HANDLER:
746 748
747 /* Invoke the installed handler (at interrupt level) */ 749 /* Invoke the installed handler (at interrupt level) */
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index d86699eea33c..e0f24c504513 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -474,10 +474,12 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
474 * Ignore GPEs that have no corresponding _Lxx/_Exx method 474 * Ignore GPEs that have no corresponding _Lxx/_Exx method
475 * and GPEs that are used to wake the system 475 * and GPEs that are used to wake the system
476 */ 476 */
477 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 477 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
478 ACPI_GPE_DISPATCH_NONE) 478 ACPI_GPE_DISPATCH_NONE)
479 || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) 479 || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
480 == ACPI_GPE_DISPATCH_HANDLER) 480 ACPI_GPE_DISPATCH_HANDLER)
481 || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
482 ACPI_GPE_DISPATCH_RAW_HANDLER)
481 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { 483 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
482 continue; 484 continue;
483 } 485 }
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 7be928379879..8840296d5b20 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -401,15 +401,17 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
401 return_ACPI_STATUS(AE_OK); 401 return_ACPI_STATUS(AE_OK);
402 } 402 }
403 403
404 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 404 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
405 ACPI_GPE_DISPATCH_HANDLER) { 405 ACPI_GPE_DISPATCH_HANDLER) ||
406 (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
407 ACPI_GPE_DISPATCH_RAW_HANDLER)) {
406 408
407 /* If there is already a handler, ignore this GPE method */ 409 /* If there is already a handler, ignore this GPE method */
408 410
409 return_ACPI_STATUS(AE_OK); 411 return_ACPI_STATUS(AE_OK);
410 } 412 }
411 413
412 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 414 if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
413 ACPI_GPE_DISPATCH_METHOD) { 415 ACPI_GPE_DISPATCH_METHOD) {
414 /* 416 /*
415 * If there is already a method, ignore this method. But check 417 * If there is already a method, ignore this method. But check
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 17e4bbfdb096..3a958f3612fe 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -108,53 +108,6 @@ unlock_and_exit:
108 108
109/******************************************************************************* 109/*******************************************************************************
110 * 110 *
111 * FUNCTION: acpi_ev_valid_gpe_event
112 *
113 * PARAMETERS: gpe_event_info - Info for this GPE
114 *
115 * RETURN: TRUE if the gpe_event is valid
116 *
117 * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
118 * Should be called only when the GPE lists are semaphore locked
119 * and not subject to change.
120 *
121 ******************************************************************************/
122
123u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
124{
125 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
126 struct acpi_gpe_block_info *gpe_block;
127
128 ACPI_FUNCTION_ENTRY();
129
130 /* No need for spin lock since we are not changing any list elements */
131
132 /* Walk the GPE interrupt levels */
133
134 gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
135 while (gpe_xrupt_block) {
136 gpe_block = gpe_xrupt_block->gpe_block_list_head;
137
138 /* Walk the GPE blocks on this interrupt level */
139
140 while (gpe_block) {
141 if ((&gpe_block->event_info[0] <= gpe_event_info) &&
142 (&gpe_block->event_info[gpe_block->gpe_count] >
143 gpe_event_info)) {
144 return (TRUE);
145 }
146
147 gpe_block = gpe_block->next;
148 }
149
150 gpe_xrupt_block = gpe_xrupt_block->next;
151 }
152
153 return (FALSE);
154}
155
156/*******************************************************************************
157 *
158 * FUNCTION: acpi_ev_get_gpe_device 111 * FUNCTION: acpi_ev_get_gpe_device
159 * 112 *
160 * PARAMETERS: GPE_WALK_CALLBACK 113 * PARAMETERS: GPE_WALK_CALLBACK
@@ -371,8 +324,10 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
371 ACPI_GPE_REGISTER_WIDTH) 324 ACPI_GPE_REGISTER_WIDTH)
372 + j]; 325 + j];
373 326
374 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 327 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
375 ACPI_GPE_DISPATCH_HANDLER) { 328 ACPI_GPE_DISPATCH_HANDLER) ||
329 (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
330 ACPI_GPE_DISPATCH_RAW_HANDLER)) {
376 331
377 /* Delete an installed handler block */ 332 /* Delete an installed handler block */
378 333
@@ -380,10 +335,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
380 gpe_event_info->dispatch.handler = NULL; 335 gpe_event_info->dispatch.handler = NULL;
381 gpe_event_info->flags &= 336 gpe_event_info->flags &=
382 ~ACPI_GPE_DISPATCH_MASK; 337 ~ACPI_GPE_DISPATCH_MASK;
383 } else 338 } else if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)
384 if ((gpe_event_info-> 339 == ACPI_GPE_DISPATCH_NOTIFY) {
385 flags & ACPI_GPE_DISPATCH_MASK) ==
386 ACPI_GPE_DISPATCH_NOTIFY) {
387 340
388 /* Delete the implicit notification device list */ 341 /* Delete the implicit notification device list */
389 342
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 78ac29351c9e..74e8595f5a2b 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 24ea3424981b..f7c9dfe7b990 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 8eb8575e8c16..9abace3401f9 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 1b148a440d67..da323390bb70 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 29630e303829..0366703d2970 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 55a58f3ec8df..81f2d9e87fad 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,16 @@
51 51
52#define _COMPONENT ACPI_EVENTS 52#define _COMPONENT ACPI_EVENTS
53ACPI_MODULE_NAME("evxface") 53ACPI_MODULE_NAME("evxface")
54#if (!ACPI_REDUCED_HARDWARE)
55/* Local prototypes */
56static acpi_status
57acpi_ev_install_gpe_handler(acpi_handle gpe_device,
58 u32 gpe_number,
59 u32 type,
60 u8 is_raw_handler,
61 acpi_gpe_handler address, void *context);
62
63#endif
54 64
55 65
56/******************************************************************************* 66/*******************************************************************************
@@ -76,6 +86,7 @@ ACPI_MODULE_NAME("evxface")
76 * handlers. 86 * handlers.
77 * 87 *
78 ******************************************************************************/ 88 ******************************************************************************/
89
79acpi_status 90acpi_status
80acpi_install_notify_handler(acpi_handle device, 91acpi_install_notify_handler(acpi_handle device,
81 u32 handler_type, 92 u32 handler_type,
@@ -717,32 +728,37 @@ ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
717 728
718/******************************************************************************* 729/*******************************************************************************
719 * 730 *
720 * FUNCTION: acpi_install_gpe_handler 731 * FUNCTION: acpi_ev_install_gpe_handler
721 * 732 *
722 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT 733 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
723 * defined GPEs) 734 * defined GPEs)
724 * gpe_number - The GPE number within the GPE block 735 * gpe_number - The GPE number within the GPE block
725 * type - Whether this GPE should be treated as an 736 * type - Whether this GPE should be treated as an
726 * edge- or level-triggered interrupt. 737 * edge- or level-triggered interrupt.
738 * is_raw_handler - Whether this GPE should be handled using
739 * the special GPE handler mode.
727 * address - Address of the handler 740 * address - Address of the handler
728 * context - Value passed to the handler on each GPE 741 * context - Value passed to the handler on each GPE
729 * 742 *
730 * RETURN: Status 743 * RETURN: Status
731 * 744 *
732 * DESCRIPTION: Install a handler for a General Purpose Event. 745 * DESCRIPTION: Internal function to install a handler for a General Purpose
746 * Event.
733 * 747 *
734 ******************************************************************************/ 748 ******************************************************************************/
735acpi_status 749static acpi_status
736acpi_install_gpe_handler(acpi_handle gpe_device, 750acpi_ev_install_gpe_handler(acpi_handle gpe_device,
737 u32 gpe_number, 751 u32 gpe_number,
738 u32 type, acpi_gpe_handler address, void *context) 752 u32 type,
753 u8 is_raw_handler,
754 acpi_gpe_handler address, void *context)
739{ 755{
740 struct acpi_gpe_event_info *gpe_event_info; 756 struct acpi_gpe_event_info *gpe_event_info;
741 struct acpi_gpe_handler_info *handler; 757 struct acpi_gpe_handler_info *handler;
742 acpi_status status; 758 acpi_status status;
743 acpi_cpu_flags flags; 759 acpi_cpu_flags flags;
744 760
745 ACPI_FUNCTION_TRACE(acpi_install_gpe_handler); 761 ACPI_FUNCTION_TRACE(ev_install_gpe_handler);
746 762
747 /* Parameter validation */ 763 /* Parameter validation */
748 764
@@ -775,8 +791,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
775 791
776 /* Make sure that there isn't a handler there already */ 792 /* Make sure that there isn't a handler there already */
777 793
778 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 794 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
779 ACPI_GPE_DISPATCH_HANDLER) { 795 ACPI_GPE_DISPATCH_HANDLER) ||
796 (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
797 ACPI_GPE_DISPATCH_RAW_HANDLER)) {
780 status = AE_ALREADY_EXISTS; 798 status = AE_ALREADY_EXISTS;
781 goto free_and_exit; 799 goto free_and_exit;
782 } 800 }
@@ -793,9 +811,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
793 * automatically during initialization, in which case it has to be 811 * automatically during initialization, in which case it has to be
794 * disabled now to avoid spurious execution of the handler. 812 * disabled now to avoid spurious execution of the handler.
795 */ 813 */
796 if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) || 814 if (((ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
797 (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) && 815 ACPI_GPE_DISPATCH_METHOD) ||
798 gpe_event_info->runtime_count) { 816 (ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
817 ACPI_GPE_DISPATCH_NOTIFY)) && gpe_event_info->runtime_count) {
799 handler->originally_enabled = TRUE; 818 handler->originally_enabled = TRUE;
800 (void)acpi_ev_remove_gpe_reference(gpe_event_info); 819 (void)acpi_ev_remove_gpe_reference(gpe_event_info);
801 820
@@ -816,7 +835,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
816 835
817 gpe_event_info->flags &= 836 gpe_event_info->flags &=
818 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); 837 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
819 gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_HANDLER); 838 gpe_event_info->flags |=
839 (u8)(type |
840 (is_raw_handler ? ACPI_GPE_DISPATCH_RAW_HANDLER :
841 ACPI_GPE_DISPATCH_HANDLER));
820 842
821 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 843 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
822 844
@@ -830,10 +852,78 @@ free_and_exit:
830 goto unlock_and_exit; 852 goto unlock_and_exit;
831} 853}
832 854
855/*******************************************************************************
856 *
857 * FUNCTION: acpi_install_gpe_handler
858 *
859 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
860 * defined GPEs)
861 * gpe_number - The GPE number within the GPE block
862 * type - Whether this GPE should be treated as an
863 * edge- or level-triggered interrupt.
864 * address - Address of the handler
865 * context - Value passed to the handler on each GPE
866 *
867 * RETURN: Status
868 *
869 * DESCRIPTION: Install a handler for a General Purpose Event.
870 *
871 ******************************************************************************/
872
873acpi_status
874acpi_install_gpe_handler(acpi_handle gpe_device,
875 u32 gpe_number,
876 u32 type, acpi_gpe_handler address, void *context)
877{
878 acpi_status status;
879
880 ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
881
882 status =
883 acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, FALSE,
884 address, context);
885
886 return_ACPI_STATUS(status);
887}
888
833ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler) 889ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
834 890
835/******************************************************************************* 891/*******************************************************************************
836 * 892 *
893 * FUNCTION: acpi_install_gpe_raw_handler
894 *
895 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
896 * defined GPEs)
897 * gpe_number - The GPE number within the GPE block
898 * type - Whether this GPE should be treated as an
899 * edge- or level-triggered interrupt.
900 * address - Address of the handler
901 * context - Value passed to the handler on each GPE
902 *
903 * RETURN: Status
904 *
905 * DESCRIPTION: Install a handler for a General Purpose Event.
906 *
907 ******************************************************************************/
908acpi_status
909acpi_install_gpe_raw_handler(acpi_handle gpe_device,
910 u32 gpe_number,
911 u32 type, acpi_gpe_handler address, void *context)
912{
913 acpi_status status;
914
915 ACPI_FUNCTION_TRACE(acpi_install_gpe_raw_handler);
916
917 status = acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, TRUE,
918 address, context);
919
920 return_ACPI_STATUS(status);
921}
922
923ACPI_EXPORT_SYMBOL(acpi_install_gpe_raw_handler)
924
925/*******************************************************************************
926 *
837 * FUNCTION: acpi_remove_gpe_handler 927 * FUNCTION: acpi_remove_gpe_handler
838 * 928 *
839 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT 929 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
@@ -880,8 +970,10 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
880 970
881 /* Make sure that a handler is indeed installed */ 971 /* Make sure that a handler is indeed installed */
882 972
883 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != 973 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
884 ACPI_GPE_DISPATCH_HANDLER) { 974 ACPI_GPE_DISPATCH_HANDLER) &&
975 (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
976 ACPI_GPE_DISPATCH_RAW_HANDLER)) {
885 status = AE_NOT_EXIST; 977 status = AE_NOT_EXIST;
886 goto unlock_and_exit; 978 goto unlock_and_exit;
887 } 979 }
@@ -896,6 +988,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
896 /* Remove the handler */ 988 /* Remove the handler */
897 989
898 handler = gpe_event_info->dispatch.handler; 990 handler = gpe_event_info->dispatch.handler;
991 gpe_event_info->dispatch.handler = NULL;
899 992
900 /* Restore Method node (if any), set dispatch flags */ 993 /* Restore Method node (if any), set dispatch flags */
901 994
@@ -909,9 +1002,10 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
909 * enabled, it should be enabled at this point to restore the 1002 * enabled, it should be enabled at this point to restore the
910 * post-initialization configuration. 1003 * post-initialization configuration.
911 */ 1004 */
912 if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) || 1005 if (((ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
913 (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) && 1006 ACPI_GPE_DISPATCH_METHOD) ||
914 handler->originally_enabled) { 1007 (ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
1008 ACPI_GPE_DISPATCH_NOTIFY)) && handler->originally_enabled) {
915 (void)acpi_ev_add_gpe_reference(gpe_event_info); 1009 (void)acpi_ev_add_gpe_reference(gpe_event_info);
916 } 1010 }
917 1011
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index bb8cbf5961bf..df06a23c4197 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index e889a5304abd..70eb47e3d724 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -132,7 +132,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
132 */ 132 */
133 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 133 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
134 if (gpe_event_info) { 134 if (gpe_event_info) {
135 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != 135 if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
136 ACPI_GPE_DISPATCH_NONE) { 136 ACPI_GPE_DISPATCH_NONE) {
137 status = acpi_ev_add_gpe_reference(gpe_event_info); 137 status = acpi_ev_add_gpe_reference(gpe_event_info);
138 } else { 138 } else {
@@ -183,6 +183,77 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
183 183
184ACPI_EXPORT_SYMBOL(acpi_disable_gpe) 184ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
185 185
186/*******************************************************************************
187 *
188 * FUNCTION: acpi_set_gpe
189 *
190 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
191 * gpe_number - GPE level within the GPE block
192 * action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE
193 *
194 * RETURN: Status
195 *
196 * DESCRIPTION: Enable or disable an individual GPE. This function bypasses
197 * the reference count mechanism used in the acpi_enable_gpe(),
198 * acpi_disable_gpe() interfaces.
199 * This API is typically used by the GPE raw handler mode driver
200 * to switch between the polling mode and the interrupt mode after
201 * the driver has enabled the GPE.
202 * The APIs should be invoked in this order:
203 * acpi_enable_gpe() <- Ensure the reference count > 0
204 * acpi_set_gpe(ACPI_GPE_DISABLE) <- Enter polling mode
205 * acpi_set_gpe(ACPI_GPE_ENABLE) <- Leave polling mode
206 * acpi_disable_gpe() <- Decrease the reference count
207 *
208 * Note: If a GPE is shared by 2 silicon components, then both the drivers
209 * should support GPE polling mode or disabling the GPE for long period
210 * for one driver may break the other. So use it with care since all
211 * firmware _Lxx/_Exx handlers currently rely on the GPE interrupt mode.
212 *
213 ******************************************************************************/
214acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
215{
216 struct acpi_gpe_event_info *gpe_event_info;
217 acpi_status status;
218 acpi_cpu_flags flags;
219
220 ACPI_FUNCTION_TRACE(acpi_set_gpe);
221
222 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
223
224 /* Ensure that we have a valid GPE number */
225
226 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
227 if (!gpe_event_info) {
228 status = AE_BAD_PARAMETER;
229 goto unlock_and_exit;
230 }
231
232 /* Perform the action */
233
234 switch (action) {
235 case ACPI_GPE_ENABLE:
236
237 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
238 break;
239
240 case ACPI_GPE_DISABLE:
241
242 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
243 break;
244
245 default:
246
247 status = AE_BAD_PARAMETER;
248 break;
249 }
250
251unlock_and_exit:
252 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
253 return_ACPI_STATUS(status);
254}
255
256ACPI_EXPORT_SYMBOL(acpi_set_gpe)
186 257
187/******************************************************************************* 258/*******************************************************************************
188 * 259 *
@@ -313,7 +384,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
313 * known as an "implicit notify". Note: The GPE is assumed to be 384 * known as an "implicit notify". Note: The GPE is assumed to be
314 * level-triggered (for windows compatibility). 385 * level-triggered (for windows compatibility).
315 */ 386 */
316 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 387 if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
317 ACPI_GPE_DISPATCH_NONE) { 388 ACPI_GPE_DISPATCH_NONE) {
318 /* 389 /*
319 * This is the first device for implicit notify on this GPE. 390 * This is the first device for implicit notify on this GPE.
@@ -327,7 +398,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
327 * If we already have an implicit notify on this GPE, add 398 * If we already have an implicit notify on this GPE, add
328 * this device to the notify list. 399 * this device to the notify list.
329 */ 400 */
330 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 401 if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
331 ACPI_GPE_DISPATCH_NOTIFY) { 402 ACPI_GPE_DISPATCH_NOTIFY) {
332 403
333 /* Ensure that the device is not already in the list */ 404 /* Ensure that the device is not already in the list */
@@ -530,6 +601,49 @@ unlock_and_exit:
530 601
531ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) 602ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
532 603
604/*******************************************************************************
605 *
606 * FUNCTION: acpi_finish_gpe
607 *
608 * PARAMETERS: gpe_device - Namespace node for the GPE Block
609 * (NULL for FADT defined GPEs)
610 * gpe_number - GPE level within the GPE block
611 *
612 * RETURN: Status
613 *
614 * DESCRIPTION: Clear and conditionally reenable a GPE. This completes the GPE
615 * processing. Intended for use by asynchronous host-installed
616 * GPE handlers. The GPE is only reenabled if the enable_for_run bit
617 * is set in the GPE info.
618 *
619 ******************************************************************************/
620acpi_status acpi_finish_gpe(acpi_handle gpe_device, u32 gpe_number)
621{
622 struct acpi_gpe_event_info *gpe_event_info;
623 acpi_status status;
624 acpi_cpu_flags flags;
625
626 ACPI_FUNCTION_TRACE(acpi_finish_gpe);
627
628 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
629
630 /* Ensure that we have a valid GPE number */
631
632 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
633 if (!gpe_event_info) {
634 status = AE_BAD_PARAMETER;
635 goto unlock_and_exit;
636 }
637
638 status = acpi_ev_finish_gpe(gpe_event_info);
639
640unlock_and_exit:
641 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
642 return_ACPI_STATUS(status);
643}
644
645ACPI_EXPORT_SYMBOL(acpi_finish_gpe)
646
533/****************************************************************************** 647/******************************************************************************
534 * 648 *
535 * FUNCTION: acpi_disable_all_gpes 649 * FUNCTION: acpi_disable_all_gpes
@@ -604,7 +718,6 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
604 * all GPE blocks. 718 * all GPE blocks.
605 * 719 *
606 ******************************************************************************/ 720 ******************************************************************************/
607
608acpi_status acpi_enable_all_wakeup_gpes(void) 721acpi_status acpi_enable_all_wakeup_gpes(void)
609{ 722{
610 acpi_status status; 723 acpi_status status;
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 2d6f187939c7..f21afbab03f7 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 7d2949420db7..6e0df2b9d5a4 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index c545386fee96..89a976b4ccf2 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 95d23dabcfbb..aaeea4840aaa 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 6fbfad47518c..e67d0aca3fe6 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 0f23c3f2678e..7c213b6b6472 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index b994845ed359..c161dd974f74 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 1d1b27a96c5b..49479927e7f7 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 2207e624f538..b56fc9d6f48e 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index b49ea2a95f4f..472030f2b5bb 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index dbb03b544e8c..453b00c30177 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 1b8e94104407..77930683ab7d 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 2ede656ee26a..fcc618aa2061 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 363767cf01e5..b813fed95e56 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 29e9e99f7fe3..c930edda3f65 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 118e942005e5..4c2836dc825b 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index cd5288a257a9..0fe188e238ef 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index ab060261b43e..c7e3b929aa85 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 3cde553bcbe1..b6b7f3af29e4 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 3af8de3fcea4..d2964af9ad4d 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index daf49f7ea311..a7eee2400ce0 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 04bd16c08f9e..3101607b4efe 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index fd11018b0168..6fa3c8d8fc5f 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index f7da64123ed5..05450656fe3d 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index d9d72dff2a76..3f4225e95d93 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 1e66d960fc11..e5c5949f9081 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 858fdd6be598..e5599f610808 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 494027f5c067..84bc550f4f1d 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -54,6 +54,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
54 struct acpi_gpe_block_info *gpe_block, 54 struct acpi_gpe_block_info *gpe_block,
55 void *context); 55 void *context);
56 56
57static acpi_status
58acpi_hw_gpe_enable_write(u8 enable_mask,
59 struct acpi_gpe_register_info *gpe_register_info);
60
57/****************************************************************************** 61/******************************************************************************
58 * 62 *
59 * FUNCTION: acpi_hw_get_gpe_register_bit 63 * FUNCTION: acpi_hw_get_gpe_register_bit
@@ -146,7 +150,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
146 150
147 status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address); 151 status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
148 if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) { 152 if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) {
149 gpe_register_info->enable_mask = enable_mask; 153 gpe_register_info->enable_mask = (u8)enable_mask;
150 } 154 }
151 return (status); 155 return (status);
152} 156}
@@ -221,7 +225,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
221 225
222 /* GPE currently handled? */ 226 /* GPE currently handled? */
223 227
224 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != 228 if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
225 ACPI_GPE_DISPATCH_NONE) { 229 ACPI_GPE_DISPATCH_NONE) {
226 local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER; 230 local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER;
227 } 231 }
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 6aade8e1d2a1..c5214dec4988 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index a4c34d2c556b..3cf77afd142c 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index d590693eb54e..7d21cae6d602 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 76ab5c1a814e..675c709a300b 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 6b919127cd9d..2bd33fe56cb3 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 96d007df65ec..5f97468df8ff 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 6921c7f3d208..3b3767698827 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index f1249e3463be..24fa19a76d70 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 607eb9e5150d..e107f929d9cf 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 80fcfc8c9c1b..5d347a71bd0b 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index b55642c4ee58..1a8b39c8d969 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 3d88ef4a3e0d..80f097eb7381 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 42d37109aa5d..7dc367e6fe09 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index e634a05974db..7bcc68f57afa 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index a3fb7e4c0809..4a85c4517988 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 7c9d0181f341..bd6cd4a81316 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 7eee0a6f02f6..d293d9748036 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index a42ee9d6970d..677bc9330e64 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index e83cff31754b..c95a119767b5 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 392910ffbed9..0eb54315b4be 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 1b13b921dda9..8b79958b7aca 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 7e417aa5c91e..151fcd95ba84 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index b09e6bef72b8..c30672d23878 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index af1cc42a8aa1..4a9d4a66016e 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 4a5e3f5c0ff7..6ad02008c0c2 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 4758a1f2ce22..c68609a2bc1b 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 4bd558bf10d2..b6030a2deee1 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 8c6c11ce9760..d66c326485d8 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index dae9401be7a2..793383501f81 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -53,50 +53,6 @@ ACPI_MODULE_NAME("nsxfobj")
53 53
54/******************************************************************************* 54/*******************************************************************************
55 * 55 *
56 * FUNCTION: acpi_get_id
57 *
58 * PARAMETERS: Handle - Handle of object whose id is desired
59 * ret_id - Where the id will be placed
60 *
61 * RETURN: Status
62 *
63 * DESCRIPTION: This routine returns the owner id associated with a handle
64 *
65 ******************************************************************************/
66acpi_status acpi_get_id(acpi_handle handle, acpi_owner_id * ret_id)
67{
68 struct acpi_namespace_node *node;
69 acpi_status status;
70
71 /* Parameter Validation */
72
73 if (!ret_id) {
74 return (AE_BAD_PARAMETER);
75 }
76
77 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
78 if (ACPI_FAILURE(status)) {
79 return (status);
80 }
81
82 /* Convert and validate the handle */
83
84 node = acpi_ns_validate_handle(handle);
85 if (!node) {
86 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
87 return (AE_BAD_PARAMETER);
88 }
89
90 *ret_id = node->owner_id;
91
92 status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
93 return (status);
94}
95
96ACPI_EXPORT_SYMBOL(acpi_get_id)
97
98/*******************************************************************************
99 *
100 * FUNCTION: acpi_get_type 56 * FUNCTION: acpi_get_type
101 * 57 *
102 * PARAMETERS: handle - Handle of object whose type is desired 58 * PARAMETERS: handle - Handle of object whose type is desired
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 314d314340ae..6d038770577b 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index b058e2390fdd..90437227d790 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index a6885077d59e..2f5ddd806c58 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 1755d2ac5656..1af4a405e351 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index 0d8d37ffd04d..e18e7c47f482 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 6d27b597394e..a555f7f7b9a2 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 32d250feea21..9d669cc6cb62 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 0b64181e7720..89984f30addc 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 3cd48802eede..960505ab409a 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 9cb07e1e76d9..ba5f69171288 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index e135acaa5e1c..841a5ea06094 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 916fd095ff34..66d406e8fe36 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -74,7 +74,7 @@ struct acpi_rsconvert_info acpi_rs_convert_address16[5] = {
74 * Address Translation Offset 74 * Address Translation Offset
75 * Address Length 75 * Address Length
76 */ 76 */
77 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.address16.granularity), 77 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.address16.address.granularity),
78 AML_OFFSET(address16.granularity), 78 AML_OFFSET(address16.granularity),
79 5}, 79 5},
80 80
@@ -112,7 +112,7 @@ struct acpi_rsconvert_info acpi_rs_convert_address32[5] = {
112 * Address Translation Offset 112 * Address Translation Offset
113 * Address Length 113 * Address Length
114 */ 114 */
115 {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.address32.granularity), 115 {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.address32.address.granularity),
116 AML_OFFSET(address32.granularity), 116 AML_OFFSET(address32.granularity),
117 5}, 117 5},
118 118
@@ -150,7 +150,7 @@ struct acpi_rsconvert_info acpi_rs_convert_address64[5] = {
150 * Address Translation Offset 150 * Address Translation Offset
151 * Address Length 151 * Address Length
152 */ 152 */
153 {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.address64.granularity), 153 {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.address64.address.granularity),
154 AML_OFFSET(address64.granularity), 154 AML_OFFSET(address64.granularity),
155 5}, 155 5},
156 156
@@ -194,7 +194,8 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_address64[5] = {
194 * Address Length 194 * Address Length
195 * Type-Specific Attribute 195 * Type-Specific Attribute
196 */ 196 */
197 {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.ext_address64.granularity), 197 {ACPI_RSC_MOVE64,
198 ACPI_RS_OFFSET(data.ext_address64.address.granularity),
198 AML_OFFSET(ext_address64.granularity), 199 AML_OFFSET(ext_address64.granularity),
199 6} 200 6}
200}; 201};
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 689556744b03..cb739a694931 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 049d9c22a0f9..15434e4c9b34 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index c3c56b5a9788..1539394c8c52 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
index 2f9332d5c973..b29d9ec63d1b 100644
--- a/drivers/acpi/acpica/rsdumpinfo.c
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -183,15 +183,15 @@ struct acpi_rsdump_info acpi_rs_dump_address16[8] = {
183 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16), 183 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16),
184 "16-Bit WORD Address Space", NULL}, 184 "16-Bit WORD Address Space", NULL},
185 {ACPI_RSD_ADDRESS, 0, NULL, NULL}, 185 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
186 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.granularity), "Granularity", 186 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.granularity),
187 NULL}, 187 "Granularity", NULL},
188 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.minimum), "Address Minimum", 188 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.minimum),
189 NULL}, 189 "Address Minimum", NULL},
190 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.maximum), "Address Maximum", 190 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.maximum),
191 NULL}, 191 "Address Maximum", NULL},
192 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.translation_offset), 192 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.translation_offset),
193 "Translation Offset", NULL}, 193 "Translation Offset", NULL},
194 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address_length), 194 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.address_length),
195 "Address Length", NULL}, 195 "Address Length", NULL},
196 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL} 196 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL}
197}; 197};
@@ -200,15 +200,15 @@ struct acpi_rsdump_info acpi_rs_dump_address32[8] = {
200 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32), 200 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32),
201 "32-Bit DWORD Address Space", NULL}, 201 "32-Bit DWORD Address Space", NULL},
202 {ACPI_RSD_ADDRESS, 0, NULL, NULL}, 202 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
203 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.granularity), "Granularity", 203 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.granularity),
204 NULL}, 204 "Granularity", NULL},
205 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.minimum), "Address Minimum", 205 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.minimum),
206 NULL}, 206 "Address Minimum", NULL},
207 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.maximum), "Address Maximum", 207 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.maximum),
208 NULL}, 208 "Address Maximum", NULL},
209 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.translation_offset), 209 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.translation_offset),
210 "Translation Offset", NULL}, 210 "Translation Offset", NULL},
211 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address_length), 211 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.address_length),
212 "Address Length", NULL}, 212 "Address Length", NULL},
213 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL} 213 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL}
214}; 214};
@@ -217,15 +217,15 @@ struct acpi_rsdump_info acpi_rs_dump_address64[8] = {
217 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64), 217 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64),
218 "64-Bit QWORD Address Space", NULL}, 218 "64-Bit QWORD Address Space", NULL},
219 {ACPI_RSD_ADDRESS, 0, NULL, NULL}, 219 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
220 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.granularity), "Granularity", 220 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.granularity),
221 NULL}, 221 "Granularity", NULL},
222 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.minimum), "Address Minimum", 222 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.minimum),
223 NULL}, 223 "Address Minimum", NULL},
224 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.maximum), "Address Maximum", 224 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.maximum),
225 NULL}, 225 "Address Maximum", NULL},
226 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.translation_offset), 226 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.translation_offset),
227 "Translation Offset", NULL}, 227 "Translation Offset", NULL},
228 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address_length), 228 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.address_length),
229 "Address Length", NULL}, 229 "Address Length", NULL},
230 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL} 230 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL}
231}; 231};
@@ -234,15 +234,16 @@ struct acpi_rsdump_info acpi_rs_dump_ext_address64[8] = {
234 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64), 234 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64),
235 "64-Bit Extended Address Space", NULL}, 235 "64-Bit Extended Address Space", NULL},
236 {ACPI_RSD_ADDRESS, 0, NULL, NULL}, 236 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
237 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.granularity), 237 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.granularity),
238 "Granularity", NULL}, 238 "Granularity", NULL},
239 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.minimum), 239 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.minimum),
240 "Address Minimum", NULL}, 240 "Address Minimum", NULL},
241 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.maximum), 241 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.maximum),
242 "Address Maximum", NULL}, 242 "Address Maximum", NULL},
243 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.translation_offset), 243 {ACPI_RSD_UINT64,
244 ACPI_RSD_OFFSET(ext_address64.address.translation_offset),
244 "Translation Offset", NULL}, 245 "Translation Offset", NULL},
245 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address_length), 246 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.address_length),
246 "Address Length", NULL}, 247 "Address Length", NULL},
247 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific), 248 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific),
248 "Type-Specific Attribute", NULL} 249 "Type-Specific Attribute", NULL}
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index 9d3f8a9a24bd..edecfc675979 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index 19d64873290a..5adba018bab0 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 3461f7db26df..07cfa70a475b 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 77291293af64..50d5be2ee062 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index eab4483ff5f8..c6b80862030e 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 41eea4bc089c..1fe49d223663 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index 9e8407223d95..4c8c6fe6ea74 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 897a5ceb0420..ece3cd60cc6a 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 877ab9202133..8e6276df0226 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -60,11 +60,11 @@ ACPI_MODULE_NAME("rsxface")
60 ACPI_COPY_FIELD(out, in, min_address_fixed); \ 60 ACPI_COPY_FIELD(out, in, min_address_fixed); \
61 ACPI_COPY_FIELD(out, in, max_address_fixed); \ 61 ACPI_COPY_FIELD(out, in, max_address_fixed); \
62 ACPI_COPY_FIELD(out, in, info); \ 62 ACPI_COPY_FIELD(out, in, info); \
63 ACPI_COPY_FIELD(out, in, granularity); \ 63 ACPI_COPY_FIELD(out, in, address.granularity); \
64 ACPI_COPY_FIELD(out, in, minimum); \ 64 ACPI_COPY_FIELD(out, in, address.minimum); \
65 ACPI_COPY_FIELD(out, in, maximum); \ 65 ACPI_COPY_FIELD(out, in, address.maximum); \
66 ACPI_COPY_FIELD(out, in, translation_offset); \ 66 ACPI_COPY_FIELD(out, in, address.translation_offset); \
67 ACPI_COPY_FIELD(out, in, address_length); \ 67 ACPI_COPY_FIELD(out, in, address.address_length); \
68 ACPI_COPY_FIELD(out, in, resource_source); 68 ACPI_COPY_FIELD(out, in, resource_source);
69/* Local prototypes */ 69/* Local prototypes */
70static acpi_status 70static acpi_status
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index f499c10ceb4a..6a144957aadd 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 41519a958083..7d2486005e3f 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index cb947700206c..0b879fcfef67 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 755b90c40ddf..9bad45e63a45 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index df3bb20ea325..ef16c06e5091 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 6b1ca9991b90..6559a58439c5 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 6482b0ded652..60e94f87f27a 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -265,45 +265,6 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_header)
265 265
266/******************************************************************************* 266/*******************************************************************************
267 * 267 *
268 * FUNCTION: acpi_unload_table_id
269 *
270 * PARAMETERS: id - Owner ID of the table to be removed.
271 *
272 * RETURN: Status
273 *
274 * DESCRIPTION: This routine is used to force the unload of a table (by id)
275 *
276 ******************************************************************************/
277acpi_status acpi_unload_table_id(acpi_owner_id id)
278{
279 int i;
280 acpi_status status = AE_NOT_EXIST;
281
282 ACPI_FUNCTION_TRACE(acpi_unload_table_id);
283
284 /* Find table in the global table list */
285 for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
286 if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
287 continue;
288 }
289 /*
290 * Delete all namespace objects owned by this table. Note that these
291 * objects can appear anywhere in the namespace by virtue of the AML
292 * "Scope" operator. Thus, we need to track ownership by an ID, not
293 * simply a position within the hierarchy
294 */
295 acpi_tb_delete_namespace_by_owner(i);
296 status = acpi_tb_release_owner_id(i);
297 acpi_tb_set_table_loaded_flag(i, FALSE);
298 break;
299 }
300 return_ACPI_STATUS(status);
301}
302
303ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
304
305/*******************************************************************************
306 *
307 * FUNCTION: acpi_get_table_with_size 268 * FUNCTION: acpi_get_table_with_size
308 * 269 *
309 * PARAMETERS: signature - ACPI signature of needed table 270 * PARAMETERS: signature - ACPI signature of needed table
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index ab5308b81aa8..aadb3002a2dd 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 43a54af2b548..eac52cf14f1a 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index a1acec9d2ef3..1279f50da757 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index efac83c606dc..61d8f6d186d1 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 038ea887f562..242bd071f007 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 78fde0aac487..eacc5eee362e 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index ff601c0f7c7a..c37ec5035f4c 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index e516254c63b2..57078e3ea9b7 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 40e923e675fc..988e23b7795c 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index a3516de213fa..71fce389fd48 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index 8e544d4688cd..9ef80f2828e3 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 8fed1482d228..6c738fa0cd42 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index 0403dcaabaf2..743a0ae9fb17 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utfileio.c b/drivers/acpi/acpica/utfileio.c
index 4e263a8cc6f0..7e1168be39fa 100644
--- a/drivers/acpi/acpica/utfileio.c
+++ b/drivers/acpi/acpica/utfileio.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 77ceac715f28..5e8df9177da4 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index 9afa9441b183..aa448278ba28 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 4b12880e5b11..27431cfc1c44 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 77120ec9ea86..e402e07b4846 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index dc6e96547f18..089f78bbd59b 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index d44dee6ee10a..f9ff100f0159 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 2e2bb14e1099..56bbacd576f2 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 82717fff9ffc..37b8b58fcd56 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index dfa9009bfc87..7d83efe1ea29 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 685766fc6ca8..574cd3118313 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index 36bec57ebd23..2959217067cb 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index db30caff130a..29e449935a82 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 0ce3f5a0dd67..82ca9142e10d 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index bc1ff820c7dd..b3505dbc715e 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 1cc97a752c15..8274cc16edc3 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 6dc54b3c28b0..83b6c52490dc 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 7d0ee969d781..130dd9f96f0f 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index 4dc33130f134..c6149a212149 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 49c873c68756..0929187bdce0 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 88ef77f3cf88..306e785f9418 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index b1fd6886e439..083a76891889 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
index 2a0f9e04d3a4..f2606af3364c 100644
--- a/drivers/acpi/acpica/utxfmutex.c
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index c0d44d394ca3..735db11a9b00 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1027,7 +1027,6 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
1027 1027
1028static struct dev_pm_domain acpi_general_pm_domain = { 1028static struct dev_pm_domain acpi_general_pm_domain = {
1029 .ops = { 1029 .ops = {
1030#ifdef CONFIG_PM
1031 .runtime_suspend = acpi_subsys_runtime_suspend, 1030 .runtime_suspend = acpi_subsys_runtime_suspend,
1032 .runtime_resume = acpi_subsys_runtime_resume, 1031 .runtime_resume = acpi_subsys_runtime_resume,
1033#ifdef CONFIG_PM_SLEEP 1032#ifdef CONFIG_PM_SLEEP
@@ -1041,7 +1040,6 @@ static struct dev_pm_domain acpi_general_pm_domain = {
1041 .poweroff_late = acpi_subsys_suspend_late, 1040 .poweroff_late = acpi_subsys_suspend_late,
1042 .restore_early = acpi_subsys_resume_early, 1041 .restore_early = acpi_subsys_resume_early,
1043#endif 1042#endif
1044#endif
1045 }, 1043 },
1046}; 1044};
1047 1045
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 1b5853f384e2..14d0c89ada2a 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * ec.c - ACPI Embedded Controller Driver (v2.2) 2 * ec.c - ACPI Embedded Controller Driver (v3)
3 * 3 *
4 * Copyright (C) 2001-2014 Intel Corporation 4 * Copyright (C) 2001-2015 Intel Corporation
5 * Author: 2014 Lv Zheng <lv.zheng@intel.com> 5 * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
6 * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> 6 * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
7 * 2006 Denis Sadykov <denis.m.sadykov@intel.com> 7 * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
8 * 2004 Luming Yu <luming.yu@intel.com> 8 * 2004 Luming Yu <luming.yu@intel.com>
@@ -31,6 +31,7 @@
31 31
32/* Uncomment next line to get verbose printout */ 32/* Uncomment next line to get verbose printout */
33/* #define DEBUG */ 33/* #define DEBUG */
34#define DEBUG_REF 0
34#define pr_fmt(fmt) "ACPI : EC: " fmt 35#define pr_fmt(fmt) "ACPI : EC: " fmt
35 36
36#include <linux/kernel.h> 37#include <linux/kernel.h>
@@ -71,20 +72,32 @@ enum ec_command {
71#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ 72#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
72#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 73#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
73#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ 74#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
75#define ACPI_EC_UDELAY_POLL 1000 /* Wait 1ms for EC transaction polling */
74#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query 76#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
75 * when trying to clear the EC */ 77 * when trying to clear the EC */
76 78
77enum { 79enum {
78 EC_FLAGS_QUERY_PENDING, /* Query is pending */ 80 EC_FLAGS_EVENT_ENABLED, /* Event is enabled */
79 EC_FLAGS_GPE_STORM, /* GPE storm detected */ 81 EC_FLAGS_EVENT_PENDING, /* Event is pending */
82 EC_FLAGS_EVENT_DETECTED, /* Event is detected */
80 EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and 83 EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
81 * OpReg are installed */ 84 * OpReg are installed */
82 EC_FLAGS_BLOCKED, /* Transactions are blocked */ 85 EC_FLAGS_STARTED, /* Driver is started */
86 EC_FLAGS_STOPPED, /* Driver is stopped */
87 EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
88 * current command processing */
83}; 89};
84 90
85#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ 91#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
86#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */ 92#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
87 93
94#define ec_debug_ref(ec, fmt, ...) \
95 do { \
96 if (DEBUG_REF) \
97 pr_debug("%lu: " fmt, ec->reference_count, \
98 ## __VA_ARGS__); \
99 } while (0)
100
88/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */ 101/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
89static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; 102static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
90module_param(ec_delay, uint, 0644); 103module_param(ec_delay, uint, 0644);
@@ -105,6 +118,7 @@ struct acpi_ec_query_handler {
105 acpi_handle handle; 118 acpi_handle handle;
106 void *data; 119 void *data;
107 u8 query_bit; 120 u8 query_bit;
121 struct kref kref;
108}; 122};
109 123
110struct transaction { 124struct transaction {
@@ -117,8 +131,12 @@ struct transaction {
117 u8 wlen; 131 u8 wlen;
118 u8 rlen; 132 u8 rlen;
119 u8 flags; 133 u8 flags;
134 unsigned long timestamp;
120}; 135};
121 136
137static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
138static void advance_transaction(struct acpi_ec *ec);
139
122struct acpi_ec *boot_ec, *first_ec; 140struct acpi_ec *boot_ec, *first_ec;
123EXPORT_SYMBOL(first_ec); 141EXPORT_SYMBOL(first_ec);
124 142
@@ -129,7 +147,28 @@ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
129static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ 147static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
130 148
131/* -------------------------------------------------------------------------- 149/* --------------------------------------------------------------------------
132 * Transaction Management 150 * Device Flags
151 * -------------------------------------------------------------------------- */
152
153static bool acpi_ec_started(struct acpi_ec *ec)
154{
155 return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
156 !test_bit(EC_FLAGS_STOPPED, &ec->flags);
157}
158
159static bool acpi_ec_flushed(struct acpi_ec *ec)
160{
161 return ec->reference_count == 1;
162}
163
164static bool acpi_ec_has_pending_event(struct acpi_ec *ec)
165{
166 return test_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags) ||
167 test_bit(EC_FLAGS_EVENT_PENDING, &ec->flags);
168}
169
170/* --------------------------------------------------------------------------
171 * EC Registers
133 * -------------------------------------------------------------------------- */ 172 * -------------------------------------------------------------------------- */
134 173
135static inline u8 acpi_ec_read_status(struct acpi_ec *ec) 174static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
@@ -151,6 +190,7 @@ static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
151{ 190{
152 u8 x = inb(ec->data_addr); 191 u8 x = inb(ec->data_addr);
153 192
193 ec->curr->timestamp = jiffies;
154 pr_debug("EC_DATA(R) = 0x%2.2x\n", x); 194 pr_debug("EC_DATA(R) = 0x%2.2x\n", x);
155 return x; 195 return x;
156} 196}
@@ -159,12 +199,14 @@ static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
159{ 199{
160 pr_debug("EC_SC(W) = 0x%2.2x\n", command); 200 pr_debug("EC_SC(W) = 0x%2.2x\n", command);
161 outb(command, ec->command_addr); 201 outb(command, ec->command_addr);
202 ec->curr->timestamp = jiffies;
162} 203}
163 204
164static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) 205static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
165{ 206{
166 pr_debug("EC_DATA(W) = 0x%2.2x\n", data); 207 pr_debug("EC_DATA(W) = 0x%2.2x\n", data);
167 outb(data, ec->data_addr); 208 outb(data, ec->data_addr);
209 ec->curr->timestamp = jiffies;
168} 210}
169 211
170#ifdef DEBUG 212#ifdef DEBUG
@@ -188,6 +230,203 @@ static const char *acpi_ec_cmd_string(u8 cmd)
188#define acpi_ec_cmd_string(cmd) "UNDEF" 230#define acpi_ec_cmd_string(cmd) "UNDEF"
189#endif 231#endif
190 232
233/* --------------------------------------------------------------------------
234 * GPE Registers
235 * -------------------------------------------------------------------------- */
236
237static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
238{
239 acpi_event_status gpe_status = 0;
240
241 (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
242 return (gpe_status & ACPI_EVENT_FLAG_SET) ? true : false;
243}
244
245static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
246{
247 if (open)
248 acpi_enable_gpe(NULL, ec->gpe);
249 else {
250 BUG_ON(ec->reference_count < 1);
251 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
252 }
253 if (acpi_ec_is_gpe_raised(ec)) {
254 /*
255 * On some platforms, EN=1 writes cannot trigger GPE. So
256 * software need to manually trigger a pseudo GPE event on
257 * EN=1 writes.
258 */
259 pr_debug("***** Polling quirk *****\n");
260 advance_transaction(ec);
261 }
262}
263
264static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
265{
266 if (close)
267 acpi_disable_gpe(NULL, ec->gpe);
268 else {
269 BUG_ON(ec->reference_count < 1);
270 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
271 }
272}
273
274static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
275{
276 /*
277 * GPE STS is a W1C register, which means:
278 * 1. Software can clear it without worrying about clearing other
279 * GPEs' STS bits when the hardware sets them in parallel.
280 * 2. As long as software can ensure only clearing it when it is
281 * set, hardware won't set it in parallel.
282 * So software can clear GPE in any contexts.
283 * Warning: do not move the check into advance_transaction() as the
284 * EC commands will be sent without GPE raised.
285 */
286 if (!acpi_ec_is_gpe_raised(ec))
287 return;
288 acpi_clear_gpe(NULL, ec->gpe);
289}
290
291/* --------------------------------------------------------------------------
292 * Transaction Management
293 * -------------------------------------------------------------------------- */
294
295static void acpi_ec_submit_request(struct acpi_ec *ec)
296{
297 ec->reference_count++;
298 if (ec->reference_count == 1)
299 acpi_ec_enable_gpe(ec, true);
300}
301
302static void acpi_ec_complete_request(struct acpi_ec *ec)
303{
304 bool flushed = false;
305
306 ec->reference_count--;
307 if (ec->reference_count == 0)
308 acpi_ec_disable_gpe(ec, true);
309 flushed = acpi_ec_flushed(ec);
310 if (flushed)
311 wake_up(&ec->wait);
312}
313
314static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
315{
316 if (!test_bit(flag, &ec->flags)) {
317 acpi_ec_disable_gpe(ec, false);
318 pr_debug("+++++ Polling enabled +++++\n");
319 set_bit(flag, &ec->flags);
320 }
321}
322
323static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
324{
325 if (test_bit(flag, &ec->flags)) {
326 clear_bit(flag, &ec->flags);
327 acpi_ec_enable_gpe(ec, false);
328 pr_debug("+++++ Polling disabled +++++\n");
329 }
330}
331
332/*
333 * acpi_ec_submit_flushable_request() - Increase the reference count unless
334 * the flush operation is not in
335 * progress
336 * @ec: the EC device
337 * @allow_event: whether event should be handled
338 *
339 * This function must be used before taking a new action that should hold
340 * the reference count. If this function returns false, then the action
341 * must be discarded or it will prevent the flush operation from being
342 * completed.
343 *
344 * During flushing, QR_EC command need to pass this check when there is a
345 * pending event, so that the reference count held for the pending event
346 * can be decreased by the completion of the QR_EC command.
347 */
348static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec,
349 bool allow_event)
350{
351 if (!acpi_ec_started(ec)) {
352 if (!allow_event || !acpi_ec_has_pending_event(ec))
353 return false;
354 }
355 acpi_ec_submit_request(ec);
356 return true;
357}
358
359static void acpi_ec_submit_event(struct acpi_ec *ec)
360{
361 if (!test_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags) ||
362 !test_bit(EC_FLAGS_EVENT_ENABLED, &ec->flags))
363 return;
364 /* Hold reference for pending event */
365 if (!acpi_ec_submit_flushable_request(ec, true))
366 return;
367 ec_debug_ref(ec, "Increase event\n");
368 if (!test_and_set_bit(EC_FLAGS_EVENT_PENDING, &ec->flags)) {
369 pr_debug("***** Event query started *****\n");
370 schedule_work(&ec->work);
371 return;
372 }
373 acpi_ec_complete_request(ec);
374 ec_debug_ref(ec, "Decrease event\n");
375}
376
377static void acpi_ec_complete_event(struct acpi_ec *ec)
378{
379 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
380 clear_bit(EC_FLAGS_EVENT_PENDING, &ec->flags);
381 pr_debug("***** Event query stopped *****\n");
382 /* Unhold reference for pending event */
383 acpi_ec_complete_request(ec);
384 ec_debug_ref(ec, "Decrease event\n");
385 /* Check if there is another SCI_EVT detected */
386 acpi_ec_submit_event(ec);
387 }
388}
389
390static void acpi_ec_submit_detection(struct acpi_ec *ec)
391{
392 /* Hold reference for query submission */
393 if (!acpi_ec_submit_flushable_request(ec, false))
394 return;
395 ec_debug_ref(ec, "Increase query\n");
396 if (!test_and_set_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags)) {
397 pr_debug("***** Event detection blocked *****\n");
398 acpi_ec_submit_event(ec);
399 return;
400 }
401 acpi_ec_complete_request(ec);
402 ec_debug_ref(ec, "Decrease query\n");
403}
404
405static void acpi_ec_complete_detection(struct acpi_ec *ec)
406{
407 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
408 clear_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags);
409 pr_debug("***** Event detetion unblocked *****\n");
410 /* Unhold reference for query submission */
411 acpi_ec_complete_request(ec);
412 ec_debug_ref(ec, "Decrease query\n");
413 }
414}
415
416static void acpi_ec_enable_event(struct acpi_ec *ec)
417{
418 unsigned long flags;
419
420 spin_lock_irqsave(&ec->lock, flags);
421 set_bit(EC_FLAGS_EVENT_ENABLED, &ec->flags);
422 /*
423 * An event may be pending even with SCI_EVT=0, so QR_EC should
424 * always be issued right after started.
425 */
426 acpi_ec_submit_detection(ec);
427 spin_unlock_irqrestore(&ec->lock, flags);
428}
429
191static int ec_transaction_completed(struct acpi_ec *ec) 430static int ec_transaction_completed(struct acpi_ec *ec)
192{ 431{
193 unsigned long flags; 432 unsigned long flags;
@@ -200,7 +439,7 @@ static int ec_transaction_completed(struct acpi_ec *ec)
200 return ret; 439 return ret;
201} 440}
202 441
203static bool advance_transaction(struct acpi_ec *ec) 442static void advance_transaction(struct acpi_ec *ec)
204{ 443{
205 struct transaction *t; 444 struct transaction *t;
206 u8 status; 445 u8 status;
@@ -208,6 +447,12 @@ static bool advance_transaction(struct acpi_ec *ec)
208 447
209 pr_debug("===== %s (%d) =====\n", 448 pr_debug("===== %s (%d) =====\n",
210 in_interrupt() ? "IRQ" : "TASK", smp_processor_id()); 449 in_interrupt() ? "IRQ" : "TASK", smp_processor_id());
450 /*
451 * By always clearing STS before handling all indications, we can
452 * ensure a hardware STS 0->1 change after this clearing can always
453 * trigger a GPE interrupt.
454 */
455 acpi_ec_clear_gpe(ec);
211 status = acpi_ec_read_status(ec); 456 status = acpi_ec_read_status(ec);
212 t = ec->curr; 457 t = ec->curr;
213 if (!t) 458 if (!t)
@@ -223,6 +468,7 @@ static bool advance_transaction(struct acpi_ec *ec)
223 t->rdata[t->ri++] = acpi_ec_read_data(ec); 468 t->rdata[t->ri++] = acpi_ec_read_data(ec);
224 if (t->rlen == t->ri) { 469 if (t->rlen == t->ri) {
225 t->flags |= ACPI_EC_COMMAND_COMPLETE; 470 t->flags |= ACPI_EC_COMMAND_COMPLETE;
471 acpi_ec_complete_event(ec);
226 if (t->command == ACPI_EC_COMMAND_QUERY) 472 if (t->command == ACPI_EC_COMMAND_QUERY)
227 pr_debug("***** Command(%s) hardware completion *****\n", 473 pr_debug("***** Command(%s) hardware completion *****\n",
228 acpi_ec_cmd_string(t->command)); 474 acpi_ec_cmd_string(t->command));
@@ -233,25 +479,29 @@ static bool advance_transaction(struct acpi_ec *ec)
233 } else if (t->wlen == t->wi && 479 } else if (t->wlen == t->wi &&
234 (status & ACPI_EC_FLAG_IBF) == 0) { 480 (status & ACPI_EC_FLAG_IBF) == 0) {
235 t->flags |= ACPI_EC_COMMAND_COMPLETE; 481 t->flags |= ACPI_EC_COMMAND_COMPLETE;
482 acpi_ec_complete_event(ec);
236 wakeup = true; 483 wakeup = true;
237 } 484 }
238 return wakeup; 485 goto out;
239 } else { 486 } else {
240 if (EC_FLAGS_QUERY_HANDSHAKE && 487 if (EC_FLAGS_QUERY_HANDSHAKE &&
241 !(status & ACPI_EC_FLAG_SCI) && 488 !(status & ACPI_EC_FLAG_SCI) &&
242 (t->command == ACPI_EC_COMMAND_QUERY)) { 489 (t->command == ACPI_EC_COMMAND_QUERY)) {
243 t->flags |= ACPI_EC_COMMAND_POLL; 490 t->flags |= ACPI_EC_COMMAND_POLL;
491 acpi_ec_complete_detection(ec);
244 t->rdata[t->ri++] = 0x00; 492 t->rdata[t->ri++] = 0x00;
245 t->flags |= ACPI_EC_COMMAND_COMPLETE; 493 t->flags |= ACPI_EC_COMMAND_COMPLETE;
494 acpi_ec_complete_event(ec);
246 pr_debug("***** Command(%s) software completion *****\n", 495 pr_debug("***** Command(%s) software completion *****\n",
247 acpi_ec_cmd_string(t->command)); 496 acpi_ec_cmd_string(t->command));
248 wakeup = true; 497 wakeup = true;
249 } else if ((status & ACPI_EC_FLAG_IBF) == 0) { 498 } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
250 acpi_ec_write_cmd(ec, t->command); 499 acpi_ec_write_cmd(ec, t->command);
251 t->flags |= ACPI_EC_COMMAND_POLL; 500 t->flags |= ACPI_EC_COMMAND_POLL;
501 acpi_ec_complete_detection(ec);
252 } else 502 } else
253 goto err; 503 goto err;
254 return wakeup; 504 goto out;
255 } 505 }
256err: 506err:
257 /* 507 /*
@@ -259,28 +509,27 @@ err:
259 * otherwise will take a not handled IRQ as a false one. 509 * otherwise will take a not handled IRQ as a false one.
260 */ 510 */
261 if (!(status & ACPI_EC_FLAG_SCI)) { 511 if (!(status & ACPI_EC_FLAG_SCI)) {
262 if (in_interrupt() && t) 512 if (in_interrupt() && t) {
263 ++t->irq_count; 513 if (t->irq_count < ec_storm_threshold)
514 ++t->irq_count;
515 /* Allow triggering on 0 threshold */
516 if (t->irq_count == ec_storm_threshold)
517 acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
518 }
264 } 519 }
265 return wakeup; 520out:
521 if (status & ACPI_EC_FLAG_SCI)
522 acpi_ec_submit_detection(ec);
523 if (wakeup && in_interrupt())
524 wake_up(&ec->wait);
266} 525}
267 526
268static void start_transaction(struct acpi_ec *ec) 527static void start_transaction(struct acpi_ec *ec)
269{ 528{
270 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; 529 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
271 ec->curr->flags = 0; 530 ec->curr->flags = 0;
272 (void)advance_transaction(ec); 531 ec->curr->timestamp = jiffies;
273} 532 advance_transaction(ec);
274
275static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
276
277static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
278{
279 if (state & ACPI_EC_FLAG_SCI) {
280 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
281 return acpi_ec_sync_query(ec, NULL);
282 }
283 return 0;
284} 533}
285 534
286static int ec_poll(struct acpi_ec *ec) 535static int ec_poll(struct acpi_ec *ec)
@@ -291,20 +540,25 @@ static int ec_poll(struct acpi_ec *ec)
291 while (repeat--) { 540 while (repeat--) {
292 unsigned long delay = jiffies + 541 unsigned long delay = jiffies +
293 msecs_to_jiffies(ec_delay); 542 msecs_to_jiffies(ec_delay);
543 unsigned long usecs = ACPI_EC_UDELAY_POLL;
294 do { 544 do {
295 /* don't sleep with disabled interrupts */ 545 /* don't sleep with disabled interrupts */
296 if (EC_FLAGS_MSI || irqs_disabled()) { 546 if (EC_FLAGS_MSI || irqs_disabled()) {
297 udelay(ACPI_EC_MSI_UDELAY); 547 usecs = ACPI_EC_MSI_UDELAY;
548 udelay(usecs);
298 if (ec_transaction_completed(ec)) 549 if (ec_transaction_completed(ec))
299 return 0; 550 return 0;
300 } else { 551 } else {
301 if (wait_event_timeout(ec->wait, 552 if (wait_event_timeout(ec->wait,
302 ec_transaction_completed(ec), 553 ec_transaction_completed(ec),
303 msecs_to_jiffies(1))) 554 usecs_to_jiffies(usecs)))
304 return 0; 555 return 0;
305 } 556 }
306 spin_lock_irqsave(&ec->lock, flags); 557 spin_lock_irqsave(&ec->lock, flags);
307 (void)advance_transaction(ec); 558 if (time_after(jiffies,
559 ec->curr->timestamp +
560 usecs_to_jiffies(usecs)))
561 advance_transaction(ec);
308 spin_unlock_irqrestore(&ec->lock, flags); 562 spin_unlock_irqrestore(&ec->lock, flags);
309 } while (time_before(jiffies, delay)); 563 } while (time_before(jiffies, delay));
310 pr_debug("controller reset, restart transaction\n"); 564 pr_debug("controller reset, restart transaction\n");
@@ -325,21 +579,29 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
325 udelay(ACPI_EC_MSI_UDELAY); 579 udelay(ACPI_EC_MSI_UDELAY);
326 /* start transaction */ 580 /* start transaction */
327 spin_lock_irqsave(&ec->lock, tmp); 581 spin_lock_irqsave(&ec->lock, tmp);
582 /* Enable GPE for command processing (IBF=0/OBF=1) */
583 if (!acpi_ec_submit_flushable_request(ec, true)) {
584 ret = -EINVAL;
585 goto unlock;
586 }
587 ec_debug_ref(ec, "Increase command\n");
328 /* following two actions should be kept atomic */ 588 /* following two actions should be kept atomic */
329 ec->curr = t; 589 ec->curr = t;
330 pr_debug("***** Command(%s) started *****\n", 590 pr_debug("***** Command(%s) started *****\n",
331 acpi_ec_cmd_string(t->command)); 591 acpi_ec_cmd_string(t->command));
332 start_transaction(ec); 592 start_transaction(ec);
333 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
334 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
335 pr_debug("***** Event stopped *****\n");
336 }
337 spin_unlock_irqrestore(&ec->lock, tmp); 593 spin_unlock_irqrestore(&ec->lock, tmp);
338 ret = ec_poll(ec); 594 ret = ec_poll(ec);
339 spin_lock_irqsave(&ec->lock, tmp); 595 spin_lock_irqsave(&ec->lock, tmp);
596 if (t->irq_count == ec_storm_threshold)
597 acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
340 pr_debug("***** Command(%s) stopped *****\n", 598 pr_debug("***** Command(%s) stopped *****\n",
341 acpi_ec_cmd_string(t->command)); 599 acpi_ec_cmd_string(t->command));
342 ec->curr = NULL; 600 ec->curr = NULL;
601 /* Disable GPE for command processing (IBF=0/OBF=1) */
602 acpi_ec_complete_request(ec);
603 ec_debug_ref(ec, "Decrease command\n");
604unlock:
343 spin_unlock_irqrestore(&ec->lock, tmp); 605 spin_unlock_irqrestore(&ec->lock, tmp);
344 return ret; 606 return ret;
345} 607}
@@ -354,10 +616,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
354 if (t->rdata) 616 if (t->rdata)
355 memset(t->rdata, 0, t->rlen); 617 memset(t->rdata, 0, t->rlen);
356 mutex_lock(&ec->mutex); 618 mutex_lock(&ec->mutex);
357 if (test_bit(EC_FLAGS_BLOCKED, &ec->flags)) {
358 status = -EINVAL;
359 goto unlock;
360 }
361 if (ec->global_lock) { 619 if (ec->global_lock) {
362 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 620 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
363 if (ACPI_FAILURE(status)) { 621 if (ACPI_FAILURE(status)) {
@@ -365,26 +623,11 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
365 goto unlock; 623 goto unlock;
366 } 624 }
367 } 625 }
368 /* disable GPE during transaction if storm is detected */
369 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
370 /* It has to be disabled, so that it doesn't trigger. */
371 acpi_disable_gpe(NULL, ec->gpe);
372 }
373 626
374 status = acpi_ec_transaction_unlocked(ec, t); 627 status = acpi_ec_transaction_unlocked(ec, t);
375 628
376 /* check if we received SCI during transaction */ 629 if (test_bit(EC_FLAGS_COMMAND_STORM, &ec->flags))
377 ec_check_sci_sync(ec, acpi_ec_read_status(ec));
378 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
379 msleep(1); 630 msleep(1);
380 /* It is safe to enable the GPE outside of the transaction. */
381 acpi_enable_gpe(NULL, ec->gpe);
382 } else if (t->irq_count > ec_storm_threshold) {
383 pr_info("GPE storm detected(%d GPEs), "
384 "transactions will use polling mode\n",
385 t->irq_count);
386 set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
387 }
388 if (ec->global_lock) 631 if (ec->global_lock)
389 acpi_release_global_lock(glk); 632 acpi_release_global_lock(glk);
390unlock: 633unlock:
@@ -500,7 +743,7 @@ static void acpi_ec_clear(struct acpi_ec *ec)
500 u8 value = 0; 743 u8 value = 0;
501 744
502 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { 745 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
503 status = acpi_ec_sync_query(ec, &value); 746 status = acpi_ec_query(ec, &value);
504 if (status || !value) 747 if (status || !value)
505 break; 748 break;
506 } 749 }
@@ -511,6 +754,57 @@ static void acpi_ec_clear(struct acpi_ec *ec)
511 pr_info("%d stale EC events cleared\n", i); 754 pr_info("%d stale EC events cleared\n", i);
512} 755}
513 756
757static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
758{
759 unsigned long flags;
760
761 spin_lock_irqsave(&ec->lock, flags);
762 if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
763 pr_debug("+++++ Starting EC +++++\n");
764 /* Enable GPE for event processing (SCI_EVT=1) */
765 if (!resuming) {
766 acpi_ec_submit_request(ec);
767 ec_debug_ref(ec, "Increase driver\n");
768 }
769 pr_info("+++++ EC started +++++\n");
770 }
771 spin_unlock_irqrestore(&ec->lock, flags);
772}
773
774static bool acpi_ec_stopped(struct acpi_ec *ec)
775{
776 unsigned long flags;
777 bool flushed;
778
779 spin_lock_irqsave(&ec->lock, flags);
780 flushed = acpi_ec_flushed(ec);
781 spin_unlock_irqrestore(&ec->lock, flags);
782 return flushed;
783}
784
785static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
786{
787 unsigned long flags;
788
789 spin_lock_irqsave(&ec->lock, flags);
790 if (acpi_ec_started(ec)) {
791 pr_debug("+++++ Stopping EC +++++\n");
792 set_bit(EC_FLAGS_STOPPED, &ec->flags);
793 spin_unlock_irqrestore(&ec->lock, flags);
794 wait_event(ec->wait, acpi_ec_stopped(ec));
795 spin_lock_irqsave(&ec->lock, flags);
796 /* Disable GPE for event processing (SCI_EVT=1) */
797 if (!suspending) {
798 acpi_ec_complete_request(ec);
799 ec_debug_ref(ec, "Decrease driver\n");
800 }
801 clear_bit(EC_FLAGS_STARTED, &ec->flags);
802 clear_bit(EC_FLAGS_STOPPED, &ec->flags);
803 pr_info("+++++ EC stopped +++++\n");
804 }
805 spin_unlock_irqrestore(&ec->lock, flags);
806}
807
514void acpi_ec_block_transactions(void) 808void acpi_ec_block_transactions(void)
515{ 809{
516 struct acpi_ec *ec = first_ec; 810 struct acpi_ec *ec = first_ec;
@@ -520,7 +814,7 @@ void acpi_ec_block_transactions(void)
520 814
521 mutex_lock(&ec->mutex); 815 mutex_lock(&ec->mutex);
522 /* Prevent transactions from being carried out */ 816 /* Prevent transactions from being carried out */
523 set_bit(EC_FLAGS_BLOCKED, &ec->flags); 817 acpi_ec_stop(ec, true);
524 mutex_unlock(&ec->mutex); 818 mutex_unlock(&ec->mutex);
525} 819}
526 820
@@ -531,14 +825,11 @@ void acpi_ec_unblock_transactions(void)
531 if (!ec) 825 if (!ec)
532 return; 826 return;
533 827
534 mutex_lock(&ec->mutex);
535 /* Allow transactions to be carried out again */ 828 /* Allow transactions to be carried out again */
536 clear_bit(EC_FLAGS_BLOCKED, &ec->flags); 829 acpi_ec_start(ec, true);
537 830
538 if (EC_FLAGS_CLEAR_ON_RESUME) 831 if (EC_FLAGS_CLEAR_ON_RESUME)
539 acpi_ec_clear(ec); 832 acpi_ec_clear(ec);
540
541 mutex_unlock(&ec->mutex);
542} 833}
543 834
544void acpi_ec_unblock_transactions_early(void) 835void acpi_ec_unblock_transactions_early(void)
@@ -548,36 +839,33 @@ void acpi_ec_unblock_transactions_early(void)
548 * atomic context during wakeup, so we don't need to acquire the mutex). 839 * atomic context during wakeup, so we don't need to acquire the mutex).
549 */ 840 */
550 if (first_ec) 841 if (first_ec)
551 clear_bit(EC_FLAGS_BLOCKED, &first_ec->flags); 842 acpi_ec_start(first_ec, true);
552} 843}
553 844
554static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data) 845/* --------------------------------------------------------------------------
846 Event Management
847 -------------------------------------------------------------------------- */
848static struct acpi_ec_query_handler *
849acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
555{ 850{
556 int result; 851 if (handler)
557 u8 d; 852 kref_get(&handler->kref);
558 struct transaction t = {.command = ACPI_EC_COMMAND_QUERY, 853 return handler;
559 .wdata = NULL, .rdata = &d, 854}
560 .wlen = 0, .rlen = 1};
561 855
562 if (!ec || !data) 856static void acpi_ec_query_handler_release(struct kref *kref)
563 return -EINVAL; 857{
564 /* 858 struct acpi_ec_query_handler *handler =
565 * Query the EC to find out which _Qxx method we need to evaluate. 859 container_of(kref, struct acpi_ec_query_handler, kref);
566 * Note that successful completion of the query causes the ACPI_EC_SCI 860
567 * bit to be cleared (and thus clearing the interrupt source). 861 kfree(handler);
568 */ 862}
569 result = acpi_ec_transaction_unlocked(ec, &t); 863
570 if (result) 864static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
571 return result; 865{
572 if (!d) 866 kref_put(&handler->kref, acpi_ec_query_handler_release);
573 return -ENODATA;
574 *data = d;
575 return 0;
576} 867}
577 868
578/* --------------------------------------------------------------------------
579 Event Management
580 -------------------------------------------------------------------------- */
581int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, 869int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
582 acpi_handle handle, acpi_ec_query_func func, 870 acpi_handle handle, acpi_ec_query_func func,
583 void *data) 871 void *data)
@@ -593,6 +881,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
593 handler->func = func; 881 handler->func = func;
594 handler->data = data; 882 handler->data = data;
595 mutex_lock(&ec->mutex); 883 mutex_lock(&ec->mutex);
884 kref_init(&handler->kref);
596 list_add(&handler->node, &ec->list); 885 list_add(&handler->node, &ec->list);
597 mutex_unlock(&ec->mutex); 886 mutex_unlock(&ec->mutex);
598 return 0; 887 return 0;
@@ -602,15 +891,18 @@ EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
602void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) 891void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
603{ 892{
604 struct acpi_ec_query_handler *handler, *tmp; 893 struct acpi_ec_query_handler *handler, *tmp;
894 LIST_HEAD(free_list);
605 895
606 mutex_lock(&ec->mutex); 896 mutex_lock(&ec->mutex);
607 list_for_each_entry_safe(handler, tmp, &ec->list, node) { 897 list_for_each_entry_safe(handler, tmp, &ec->list, node) {
608 if (query_bit == handler->query_bit) { 898 if (query_bit == handler->query_bit) {
609 list_del(&handler->node); 899 list_del_init(&handler->node);
610 kfree(handler); 900 list_add(&handler->node, &free_list);
611 } 901 }
612 } 902 }
613 mutex_unlock(&ec->mutex); 903 mutex_unlock(&ec->mutex);
904 list_for_each_entry(handler, &free_list, node)
905 acpi_ec_put_query_handler(handler);
614} 906}
615EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); 907EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
616 908
@@ -626,59 +918,58 @@ static void acpi_ec_run(void *cxt)
626 else if (handler->handle) 918 else if (handler->handle)
627 acpi_evaluate_object(handler->handle, NULL, NULL, NULL); 919 acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
628 pr_debug("##### Query(0x%02x) stopped #####\n", handler->query_bit); 920 pr_debug("##### Query(0x%02x) stopped #####\n", handler->query_bit);
629 kfree(handler); 921 acpi_ec_put_query_handler(handler);
630} 922}
631 923
632static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data) 924static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
633{ 925{
634 u8 value = 0; 926 u8 value = 0;
635 int status; 927 int result;
636 struct acpi_ec_query_handler *handler, *copy; 928 acpi_status status;
929 struct acpi_ec_query_handler *handler;
930 struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
931 .wdata = NULL, .rdata = &value,
932 .wlen = 0, .rlen = 1};
637 933
638 status = acpi_ec_query_unlocked(ec, &value); 934 /*
935 * Query the EC to find out which _Qxx method we need to evaluate.
936 * Note that successful completion of the query causes the ACPI_EC_SCI
937 * bit to be cleared (and thus clearing the interrupt source).
938 */
939 result = acpi_ec_transaction(ec, &t);
940 if (result)
941 return result;
639 if (data) 942 if (data)
640 *data = value; 943 *data = value;
641 if (status) 944 if (!value)
642 return status; 945 return -ENODATA;
643 946
947 mutex_lock(&ec->mutex);
644 list_for_each_entry(handler, &ec->list, node) { 948 list_for_each_entry(handler, &ec->list, node) {
645 if (value == handler->query_bit) { 949 if (value == handler->query_bit) {
646 /* have custom handler for this bit */ 950 /* have custom handler for this bit */
647 copy = kmalloc(sizeof(*handler), GFP_KERNEL); 951 handler = acpi_ec_get_query_handler(handler);
648 if (!copy)
649 return -ENOMEM;
650 memcpy(copy, handler, sizeof(*copy));
651 pr_debug("##### Query(0x%02x) scheduled #####\n", 952 pr_debug("##### Query(0x%02x) scheduled #####\n",
652 handler->query_bit); 953 handler->query_bit);
653 return acpi_os_execute((copy->func) ? 954 status = acpi_os_execute((handler->func) ?
654 OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER, 955 OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
655 acpi_ec_run, copy); 956 acpi_ec_run, handler);
957 if (ACPI_FAILURE(status))
958 result = -EBUSY;
959 break;
656 } 960 }
657 } 961 }
658 return 0;
659}
660
661static void acpi_ec_gpe_query(void *ec_cxt)
662{
663 struct acpi_ec *ec = ec_cxt;
664
665 if (!ec)
666 return;
667 mutex_lock(&ec->mutex);
668 acpi_ec_sync_query(ec, NULL);
669 mutex_unlock(&ec->mutex); 962 mutex_unlock(&ec->mutex);
963 return result;
670} 964}
671 965
672static int ec_check_sci(struct acpi_ec *ec, u8 state) 966static void acpi_ec_gpe_poller(struct work_struct *work)
673{ 967{
674 if (state & ACPI_EC_FLAG_SCI) { 968 struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
675 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { 969
676 pr_debug("***** Event started *****\n"); 970 pr_debug("***** Event poller started *****\n");
677 return acpi_os_execute(OSL_NOTIFY_HANDLER, 971 acpi_ec_query(ec, NULL);
678 acpi_ec_gpe_query, ec); 972 pr_debug("***** Event poller stopped *****\n");
679 }
680 }
681 return 0;
682} 973}
683 974
684static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, 975static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@@ -688,11 +979,9 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
688 struct acpi_ec *ec = data; 979 struct acpi_ec *ec = data;
689 980
690 spin_lock_irqsave(&ec->lock, flags); 981 spin_lock_irqsave(&ec->lock, flags);
691 if (advance_transaction(ec)) 982 advance_transaction(ec);
692 wake_up(&ec->wait);
693 spin_unlock_irqrestore(&ec->lock, flags); 983 spin_unlock_irqrestore(&ec->lock, flags);
694 ec_check_sci(ec, acpi_ec_read_status(ec)); 984 return ACPI_INTERRUPT_HANDLED;
695 return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
696} 985}
697 986
698/* -------------------------------------------------------------------------- 987/* --------------------------------------------------------------------------
@@ -750,11 +1039,11 @@ static struct acpi_ec *make_acpi_ec(void)
750 1039
751 if (!ec) 1040 if (!ec)
752 return NULL; 1041 return NULL;
753 ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
754 mutex_init(&ec->mutex); 1042 mutex_init(&ec->mutex);
755 init_waitqueue_head(&ec->wait); 1043 init_waitqueue_head(&ec->wait);
756 INIT_LIST_HEAD(&ec->list); 1044 INIT_LIST_HEAD(&ec->list);
757 spin_lock_init(&ec->lock); 1045 spin_lock_init(&ec->lock);
1046 INIT_WORK(&ec->work, acpi_ec_gpe_poller);
758 return ec; 1047 return ec;
759} 1048}
760 1049
@@ -810,13 +1099,13 @@ static int ec_install_handlers(struct acpi_ec *ec)
810 1099
811 if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags)) 1100 if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
812 return 0; 1101 return 0;
813 status = acpi_install_gpe_handler(NULL, ec->gpe, 1102 status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
814 ACPI_GPE_EDGE_TRIGGERED, 1103 ACPI_GPE_EDGE_TRIGGERED,
815 &acpi_ec_gpe_handler, ec); 1104 &acpi_ec_gpe_handler, ec);
816 if (ACPI_FAILURE(status)) 1105 if (ACPI_FAILURE(status))
817 return -ENODEV; 1106 return -ENODEV;
818 1107
819 acpi_enable_gpe(NULL, ec->gpe); 1108 acpi_ec_start(ec, false);
820 status = acpi_install_address_space_handler(ec->handle, 1109 status = acpi_install_address_space_handler(ec->handle,
821 ACPI_ADR_SPACE_EC, 1110 ACPI_ADR_SPACE_EC,
822 &acpi_ec_space_handler, 1111 &acpi_ec_space_handler,
@@ -831,7 +1120,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
831 pr_err("Fail in evaluating the _REG object" 1120 pr_err("Fail in evaluating the _REG object"
832 " of EC device. Broken bios is suspected.\n"); 1121 " of EC device. Broken bios is suspected.\n");
833 } else { 1122 } else {
834 acpi_disable_gpe(NULL, ec->gpe); 1123 acpi_ec_stop(ec, false);
835 acpi_remove_gpe_handler(NULL, ec->gpe, 1124 acpi_remove_gpe_handler(NULL, ec->gpe,
836 &acpi_ec_gpe_handler); 1125 &acpi_ec_gpe_handler);
837 return -ENODEV; 1126 return -ENODEV;
@@ -846,7 +1135,7 @@ static void ec_remove_handlers(struct acpi_ec *ec)
846{ 1135{
847 if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags)) 1136 if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
848 return; 1137 return;
849 acpi_disable_gpe(NULL, ec->gpe); 1138 acpi_ec_stop(ec, false);
850 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, 1139 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
851 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) 1140 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
852 pr_err("failed to remove space handler\n"); 1141 pr_err("failed to remove space handler\n");
@@ -900,14 +1189,11 @@ static int acpi_ec_add(struct acpi_device *device)
900 ret = ec_install_handlers(ec); 1189 ret = ec_install_handlers(ec);
901 1190
902 /* EC is fully operational, allow queries */ 1191 /* EC is fully operational, allow queries */
903 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 1192 acpi_ec_enable_event(ec);
904 1193
905 /* Clear stale _Q events if hardware might require that */ 1194 /* Clear stale _Q events if hardware might require that */
906 if (EC_FLAGS_CLEAR_ON_RESUME) { 1195 if (EC_FLAGS_CLEAR_ON_RESUME)
907 mutex_lock(&ec->mutex);
908 acpi_ec_clear(ec); 1196 acpi_ec_clear(ec);
909 mutex_unlock(&ec->mutex);
910 }
911 return ret; 1197 return ret;
912} 1198}
913 1199
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 163e82f536fa..56b321aa2b1c 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -35,6 +35,13 @@ void acpi_int340x_thermal_init(void);
35int acpi_sysfs_init(void); 35int acpi_sysfs_init(void);
36void acpi_container_init(void); 36void acpi_container_init(void);
37void acpi_memory_hotplug_init(void); 37void acpi_memory_hotplug_init(void);
38#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
39int acpi_ioapic_add(struct acpi_pci_root *root);
40int acpi_ioapic_remove(struct acpi_pci_root *root);
41#else
42static inline int acpi_ioapic_add(struct acpi_pci_root *root) { return 0; }
43static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; }
44#endif
38#ifdef CONFIG_ACPI_DOCK 45#ifdef CONFIG_ACPI_DOCK
39void register_dock_dependent_device(struct acpi_device *adev, 46void register_dock_dependent_device(struct acpi_device *adev,
40 acpi_handle dshandle); 47 acpi_handle dshandle);
@@ -68,6 +75,8 @@ static inline void acpi_debugfs_init(void) { return; }
68#endif 75#endif
69void acpi_lpss_init(void); 76void acpi_lpss_init(void);
70 77
78void acpi_apd_init(void);
79
71acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src); 80acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src);
72bool acpi_queue_hotplug_work(struct work_struct *work); 81bool acpi_queue_hotplug_work(struct work_struct *work);
73void acpi_device_hotplug(struct acpi_device *adev, u32 src); 82void acpi_device_hotplug(struct acpi_device *adev, u32 src);
@@ -122,11 +131,13 @@ struct acpi_ec {
122 unsigned long data_addr; 131 unsigned long data_addr;
123 unsigned long global_lock; 132 unsigned long global_lock;
124 unsigned long flags; 133 unsigned long flags;
134 unsigned long reference_count;
125 struct mutex mutex; 135 struct mutex mutex;
126 wait_queue_head_t wait; 136 wait_queue_head_t wait;
127 struct list_head list; 137 struct list_head list;
128 struct transaction *curr; 138 struct transaction *curr;
129 spinlock_t lock; 139 spinlock_t lock;
140 struct work_struct work;
130}; 141};
131 142
132extern struct acpi_ec *first_ec; 143extern struct acpi_ec *first_ec;
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
new file mode 100644
index 000000000000..ccdc8db16bb8
--- /dev/null
+++ b/drivers/acpi/ioapic.c
@@ -0,0 +1,229 @@
1/*
2 * IOAPIC/IOxAPIC/IOSAPIC driver
3 *
4 * Copyright (C) 2009 Fujitsu Limited.
5 * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
6 *
7 * Copyright (C) 2014 Intel Corporation
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * Based on original drivers/pci/ioapic.c
14 * Yinghai Lu <yinghai@kernel.org>
15 * Jiang Liu <jiang.liu@intel.com>
16 */
17
18/*
19 * This driver manages I/O APICs added by hotplug after boot.
20 * We try to claim all I/O APIC devices, but those present at boot were
21 * registered when we parsed the ACPI MADT.
22 */
23
24#define pr_fmt(fmt) "ACPI : IOAPIC: " fmt
25
26#include <linux/slab.h>
27#include <linux/acpi.h>
28#include <linux/pci.h>
29#include <acpi/acpi.h>
30
31struct acpi_pci_ioapic {
32 acpi_handle root_handle;
33 acpi_handle handle;
34 u32 gsi_base;
35 struct resource res;
36 struct pci_dev *pdev;
37 struct list_head list;
38};
39
40static LIST_HEAD(ioapic_list);
41static DEFINE_MUTEX(ioapic_list_lock);
42
43static acpi_status setup_res(struct acpi_resource *acpi_res, void *data)
44{
45 struct resource *res = data;
46 struct resource_win win;
47
48 res->flags = 0;
49 if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM) == 0)
50 return AE_OK;
51
52 if (!acpi_dev_resource_memory(acpi_res, res)) {
53 if (acpi_dev_resource_address_space(acpi_res, &win) ||
54 acpi_dev_resource_ext_address_space(acpi_res, &win))
55 *res = win.res;
56 }
57 if ((res->flags & IORESOURCE_PREFETCH) ||
58 (res->flags & IORESOURCE_DISABLED))
59 res->flags = 0;
60
61 return AE_CTRL_TERMINATE;
62}
63
64static bool acpi_is_ioapic(acpi_handle handle, char **type)
65{
66 acpi_status status;
67 struct acpi_device_info *info;
68 char *hid = NULL;
69 bool match = false;
70
71 if (!acpi_has_method(handle, "_GSB"))
72 return false;
73
74 status = acpi_get_object_info(handle, &info);
75 if (ACPI_SUCCESS(status)) {
76 if (info->valid & ACPI_VALID_HID)
77 hid = info->hardware_id.string;
78 if (hid) {
79 if (strcmp(hid, "ACPI0009") == 0) {
80 *type = "IOxAPIC";
81 match = true;
82 } else if (strcmp(hid, "ACPI000A") == 0) {
83 *type = "IOAPIC";
84 match = true;
85 }
86 }
87 kfree(info);
88 }
89
90 return match;
91}
92
93static acpi_status handle_ioapic_add(acpi_handle handle, u32 lvl,
94 void *context, void **rv)
95{
96 acpi_status status;
97 unsigned long long gsi_base;
98 struct acpi_pci_ioapic *ioapic;
99 struct pci_dev *dev = NULL;
100 struct resource *res = NULL;
101 char *type = NULL;
102
103 if (!acpi_is_ioapic(handle, &type))
104 return AE_OK;
105
106 mutex_lock(&ioapic_list_lock);
107 list_for_each_entry(ioapic, &ioapic_list, list)
108 if (ioapic->handle == handle) {
109 mutex_unlock(&ioapic_list_lock);
110 return AE_OK;
111 }
112
113 status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsi_base);
114 if (ACPI_FAILURE(status)) {
115 acpi_handle_warn(handle, "failed to evaluate _GSB method\n");
116 goto exit;
117 }
118
119 ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL);
120 if (!ioapic) {
121 pr_err("cannot allocate memory for new IOAPIC\n");
122 goto exit;
123 } else {
124 ioapic->root_handle = (acpi_handle)context;
125 ioapic->handle = handle;
126 ioapic->gsi_base = (u32)gsi_base;
127 INIT_LIST_HEAD(&ioapic->list);
128 }
129
130 if (acpi_ioapic_registered(handle, (u32)gsi_base))
131 goto done;
132
133 dev = acpi_get_pci_dev(handle);
134 if (dev && pci_resource_len(dev, 0)) {
135 if (pci_enable_device(dev) < 0)
136 goto exit_put;
137 pci_set_master(dev);
138 if (pci_request_region(dev, 0, type))
139 goto exit_disable;
140 res = &dev->resource[0];
141 ioapic->pdev = dev;
142 } else {
143 pci_dev_put(dev);
144 dev = NULL;
145
146 res = &ioapic->res;
147 acpi_walk_resources(handle, METHOD_NAME__CRS, setup_res, res);
148 if (res->flags == 0) {
149 acpi_handle_warn(handle, "failed to get resource\n");
150 goto exit_free;
151 } else if (request_resource(&iomem_resource, res)) {
152 acpi_handle_warn(handle, "failed to insert resource\n");
153 goto exit_free;
154 }
155 }
156
157 if (acpi_register_ioapic(handle, res->start, (u32)gsi_base)) {
158 acpi_handle_warn(handle, "failed to register IOAPIC\n");
159 goto exit_release;
160 }
161done:
162 list_add(&ioapic->list, &ioapic_list);
163 mutex_unlock(&ioapic_list_lock);
164
165 if (dev)
166 dev_info(&dev->dev, "%s at %pR, GSI %u\n",
167 type, res, (u32)gsi_base);
168 else
169 acpi_handle_info(handle, "%s at %pR, GSI %u\n",
170 type, res, (u32)gsi_base);
171
172 return AE_OK;
173
174exit_release:
175 if (dev)
176 pci_release_region(dev, 0);
177 else
178 release_resource(res);
179exit_disable:
180 if (dev)
181 pci_disable_device(dev);
182exit_put:
183 pci_dev_put(dev);
184exit_free:
185 kfree(ioapic);
186exit:
187 mutex_unlock(&ioapic_list_lock);
188 *(acpi_status *)rv = AE_ERROR;
189 return AE_OK;
190}
191
192int acpi_ioapic_add(struct acpi_pci_root *root)
193{
194 acpi_status status, retval = AE_OK;
195
196 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, root->device->handle,
197 UINT_MAX, handle_ioapic_add, NULL,
198 root->device->handle, (void **)&retval);
199
200 return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV;
201}
202
203int acpi_ioapic_remove(struct acpi_pci_root *root)
204{
205 int retval = 0;
206 struct acpi_pci_ioapic *ioapic, *tmp;
207
208 mutex_lock(&ioapic_list_lock);
209 list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
210 if (root->device->handle != ioapic->root_handle)
211 continue;
212
213 if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base))
214 retval = -EBUSY;
215
216 if (ioapic->pdev) {
217 pci_release_region(ioapic->pdev, 0);
218 pci_disable_device(ioapic->pdev);
219 pci_dev_put(ioapic->pdev);
220 } else if (ioapic->res.flags && ioapic->res.parent) {
221 release_resource(&ioapic->res);
222 }
223 list_del(&ioapic->list);
224 kfree(ioapic);
225 }
226 mutex_unlock(&ioapic_list_lock);
227
228 return retval;
229}
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 24b5476449a1..1333cbdc3ea2 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -177,12 +177,7 @@ static int __init slit_valid(struct acpi_table_slit *slit)
177 177
178static int __init acpi_parse_slit(struct acpi_table_header *table) 178static int __init acpi_parse_slit(struct acpi_table_header *table)
179{ 179{
180 struct acpi_table_slit *slit; 180 struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
181
182 if (!table)
183 return -EINVAL;
184
185 slit = (struct acpi_table_slit *)table;
186 181
187 if (!slit_valid(slit)) { 182 if (!slit_valid(slit)) {
188 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n"); 183 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
@@ -260,11 +255,8 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
260 255
261static int __init acpi_parse_srat(struct acpi_table_header *table) 256static int __init acpi_parse_srat(struct acpi_table_header *table)
262{ 257{
263 struct acpi_table_srat *srat; 258 struct acpi_table_srat *srat = (struct acpi_table_srat *)table;
264 if (!table)
265 return -EINVAL;
266 259
267 srat = (struct acpi_table_srat *)table;
268 acpi_srat_revision = srat->header.revision; 260 acpi_srat_revision = srat->header.revision;
269 261
270 /* Real work done in acpi_table_parse_srat below. */ 262 /* Real work done in acpi_table_parse_srat below. */
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index b1def411c0b8..e7f718d6918a 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -485,14 +485,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
485 if (!pin || !dev->irq_managed || dev->irq <= 0) 485 if (!pin || !dev->irq_managed || dev->irq <= 0)
486 return; 486 return;
487 487
488 /* Keep IOAPIC pin configuration when suspending */
489 if (dev->dev.power.is_prepared)
490 return;
491#ifdef CONFIG_PM
492 if (dev->dev.power.runtime_status == RPM_SUSPENDING)
493 return;
494#endif
495
496 entry = acpi_pci_irq_lookup(dev, pin); 488 entry = acpi_pci_irq_lookup(dev, pin);
497 if (!entry) 489 if (!entry)
498 return; 490 return;
@@ -513,5 +505,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
513 if (gsi >= 0) { 505 if (gsi >= 0) {
514 acpi_unregister_gsi(gsi); 506 acpi_unregister_gsi(gsi);
515 dev->irq_managed = 0; 507 dev->irq_managed = 0;
508 dev->irq = 0;
516 } 509 }
517} 510}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index c6bcb8c719d8..68a5f712cd19 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -112,10 +112,10 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
112 if (ACPI_FAILURE(status)) 112 if (ACPI_FAILURE(status))
113 return AE_OK; 113 return AE_OK;
114 114
115 if ((address.address_length > 0) && 115 if ((address.address.address_length > 0) &&
116 (address.resource_type == ACPI_BUS_NUMBER_RANGE)) { 116 (address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
117 res->start = address.minimum; 117 res->start = address.address.minimum;
118 res->end = address.minimum + address.address_length - 1; 118 res->end = address.address.minimum + address.address.address_length - 1;
119 } 119 }
120 120
121 return AE_OK; 121 return AE_OK;
@@ -621,6 +621,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
621 if (hotadd) { 621 if (hotadd) {
622 pcibios_resource_survey_bus(root->bus); 622 pcibios_resource_survey_bus(root->bus);
623 pci_assign_unassigned_root_bus_resources(root->bus); 623 pci_assign_unassigned_root_bus_resources(root->bus);
624 acpi_ioapic_add(root);
624 } 625 }
625 626
626 pci_lock_rescan_remove(); 627 pci_lock_rescan_remove();
@@ -644,6 +645,8 @@ static void acpi_pci_root_remove(struct acpi_device *device)
644 645
645 pci_stop_root_bus(root->bus); 646 pci_stop_root_bus(root->bus);
646 647
648 WARN_ON(acpi_ioapic_remove(root));
649
647 device_set_run_wake(root->bus->bridge, false); 650 device_set_run_wake(root->bus->bridge, false);
648 pci_acpi_remove_bus_pm_notifier(device); 651 pci_acpi_remove_bus_pm_notifier(device);
649 652
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 02e48394276c..7962651cdbd4 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -4,6 +4,10 @@
4 * 4 *
5 * Alex Chiang <achiang@hp.com> 5 * Alex Chiang <achiang@hp.com>
6 * - Unified x86/ia64 implementations 6 * - Unified x86/ia64 implementations
7 *
8 * I/O APIC hotplug support
9 * Yinghai Lu <yinghai@kernel.org>
10 * Jiang Liu <jiang.liu@intel.com>
7 */ 11 */
8#include <linux/export.h> 12#include <linux/export.h>
9#include <linux/acpi.h> 13#include <linux/acpi.h>
@@ -12,6 +16,21 @@
12#define _COMPONENT ACPI_PROCESSOR_COMPONENT 16#define _COMPONENT ACPI_PROCESSOR_COMPONENT
13ACPI_MODULE_NAME("processor_core"); 17ACPI_MODULE_NAME("processor_core");
14 18
19static struct acpi_table_madt *get_madt_table(void)
20{
21 static struct acpi_table_madt *madt;
22 static int read_madt;
23
24 if (!read_madt) {
25 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
26 (struct acpi_table_header **)&madt)))
27 madt = NULL;
28 read_madt++;
29 }
30
31 return madt;
32}
33
15static int map_lapic_id(struct acpi_subtable_header *entry, 34static int map_lapic_id(struct acpi_subtable_header *entry,
16 u32 acpi_id, int *apic_id) 35 u32 acpi_id, int *apic_id)
17{ 36{
@@ -67,17 +86,10 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
67static int map_madt_entry(int type, u32 acpi_id) 86static int map_madt_entry(int type, u32 acpi_id)
68{ 87{
69 unsigned long madt_end, entry; 88 unsigned long madt_end, entry;
70 static struct acpi_table_madt *madt;
71 static int read_madt;
72 int phys_id = -1; /* CPU hardware ID */ 89 int phys_id = -1; /* CPU hardware ID */
90 struct acpi_table_madt *madt;
73 91
74 if (!read_madt) { 92 madt = get_madt_table();
75 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
76 (struct acpi_table_header **)&madt)))
77 madt = NULL;
78 read_madt++;
79 }
80
81 if (!madt) 93 if (!madt)
82 return phys_id; 94 return phys_id;
83 95
@@ -203,3 +215,96 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
203 return acpi_map_cpuid(phys_id, acpi_id); 215 return acpi_map_cpuid(phys_id, acpi_id);
204} 216}
205EXPORT_SYMBOL_GPL(acpi_get_cpuid); 217EXPORT_SYMBOL_GPL(acpi_get_cpuid);
218
219#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
220static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
221 u64 *phys_addr, int *ioapic_id)
222{
223 struct acpi_madt_io_apic *ioapic = (struct acpi_madt_io_apic *)entry;
224
225 if (ioapic->global_irq_base != gsi_base)
226 return 0;
227
228 *phys_addr = ioapic->address;
229 *ioapic_id = ioapic->id;
230 return 1;
231}
232
233static int parse_madt_ioapic_entry(u32 gsi_base, u64 *phys_addr)
234{
235 struct acpi_subtable_header *hdr;
236 unsigned long madt_end, entry;
237 struct acpi_table_madt *madt;
238 int apic_id = -1;
239
240 madt = get_madt_table();
241 if (!madt)
242 return apic_id;
243
244 entry = (unsigned long)madt;
245 madt_end = entry + madt->header.length;
246
247 /* Parse all entries looking for a match. */
248 entry += sizeof(struct acpi_table_madt);
249 while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
250 hdr = (struct acpi_subtable_header *)entry;
251 if (hdr->type == ACPI_MADT_TYPE_IO_APIC &&
252 get_ioapic_id(hdr, gsi_base, phys_addr, &apic_id))
253 break;
254 else
255 entry += hdr->length;
256 }
257
258 return apic_id;
259}
260
261static int parse_mat_ioapic_entry(acpi_handle handle, u32 gsi_base,
262 u64 *phys_addr)
263{
264 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
265 struct acpi_subtable_header *header;
266 union acpi_object *obj;
267 int apic_id = -1;
268
269 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
270 goto exit;
271
272 if (!buffer.length || !buffer.pointer)
273 goto exit;
274
275 obj = buffer.pointer;
276 if (obj->type != ACPI_TYPE_BUFFER ||
277 obj->buffer.length < sizeof(struct acpi_subtable_header))
278 goto exit;
279
280 header = (struct acpi_subtable_header *)obj->buffer.pointer;
281 if (header->type == ACPI_MADT_TYPE_IO_APIC)
282 get_ioapic_id(header, gsi_base, phys_addr, &apic_id);
283
284exit:
285 kfree(buffer.pointer);
286 return apic_id;
287}
288
289/**
290 * acpi_get_ioapic_id - Get IOAPIC ID and physical address matching @gsi_base
291 * @handle: ACPI object for IOAPIC device
292 * @gsi_base: GSI base to match with
293 * @phys_addr: Pointer to store physical address of matching IOAPIC record
294 *
295 * Walk resources returned by ACPI_MAT method, then ACPI MADT table, to search
296 * for an ACPI IOAPIC record matching @gsi_base.
297 * Return IOAPIC id and store physical address in @phys_addr if found a match,
298 * otherwise return <0.
299 */
300int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr)
301{
302 int apic_id;
303
304 apic_id = parse_mat_ioapic_entry(handle, gsi_base, phys_addr);
305 if (apic_id == -1)
306 apic_id = parse_madt_ioapic_entry(gsi_base, phys_addr);
307
308 return apic_id;
309}
310#endif /* CONFIG_ACPI_HOTPLUG_IOAPIC */
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 87b704e41877..c256bd7fbd78 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -681,15 +681,13 @@ static int acpi_idle_bm_check(void)
681} 681}
682 682
683/** 683/**
684 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry 684 * acpi_idle_do_entry - enter idle state using the appropriate method
685 * @cx: cstate data 685 * @cx: cstate data
686 * 686 *
687 * Caller disables interrupt before call and enables interrupt after return. 687 * Caller disables interrupt before call and enables interrupt after return.
688 */ 688 */
689static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) 689static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
690{ 690{
691 /* Don't trace irqs off for idle */
692 stop_critical_timings();
693 if (cx->entry_method == ACPI_CSTATE_FFH) { 691 if (cx->entry_method == ACPI_CSTATE_FFH) {
694 /* Call into architectural FFH based C-state */ 692 /* Call into architectural FFH based C-state */
695 acpi_processor_ffh_cstate_enter(cx); 693 acpi_processor_ffh_cstate_enter(cx);
@@ -703,38 +701,9 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
703 gets asserted in time to freeze execution properly. */ 701 gets asserted in time to freeze execution properly. */
704 inl(acpi_gbl_FADT.xpm_timer_block.address); 702 inl(acpi_gbl_FADT.xpm_timer_block.address);
705 } 703 }
706 start_critical_timings();
707} 704}
708 705
709/** 706/**
710 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
711 * @dev: the target CPU
712 * @drv: cpuidle driver containing cpuidle state info
713 * @index: index of target state
714 *
715 * This is equivalent to the HALT instruction.
716 */
717static int acpi_idle_enter_c1(struct cpuidle_device *dev,
718 struct cpuidle_driver *drv, int index)
719{
720 struct acpi_processor *pr;
721 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
722
723 pr = __this_cpu_read(processors);
724
725 if (unlikely(!pr))
726 return -EINVAL;
727
728 lapic_timer_state_broadcast(pr, cx, 1);
729 acpi_idle_do_entry(cx);
730
731 lapic_timer_state_broadcast(pr, cx, 0);
732
733 return index;
734}
735
736
737/**
738 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) 707 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
739 * @dev: the target CPU 708 * @dev: the target CPU
740 * @index: the index of suggested state 709 * @index: the index of suggested state
@@ -761,47 +730,11 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
761 return 0; 730 return 0;
762} 731}
763 732
764/** 733static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
765 * acpi_idle_enter_simple - enters an ACPI state without BM handling
766 * @dev: the target CPU
767 * @drv: cpuidle driver with cpuidle state information
768 * @index: the index of suggested state
769 */
770static int acpi_idle_enter_simple(struct cpuidle_device *dev,
771 struct cpuidle_driver *drv, int index)
772{ 734{
773 struct acpi_processor *pr; 735 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && num_online_cpus() > 1 &&
774 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 736 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED) &&
775 737 !pr->flags.has_cst;
776 pr = __this_cpu_read(processors);
777
778 if (unlikely(!pr))
779 return -EINVAL;
780
781#ifdef CONFIG_HOTPLUG_CPU
782 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
783 !pr->flags.has_cst &&
784 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
785 return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
786#endif
787
788 /*
789 * Must be done before busmaster disable as we might need to
790 * access HPET !
791 */
792 lapic_timer_state_broadcast(pr, cx, 1);
793
794 if (cx->type == ACPI_STATE_C3)
795 ACPI_FLUSH_CPU_CACHE();
796
797 /* Tell the scheduler that we are going deep-idle: */
798 sched_clock_idle_sleep_event();
799 acpi_idle_do_entry(cx);
800
801 sched_clock_idle_wakeup_event(0);
802
803 lapic_timer_state_broadcast(pr, cx, 0);
804 return index;
805} 738}
806 739
807static int c3_cpu_count; 740static int c3_cpu_count;
@@ -809,44 +742,14 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
809 742
810/** 743/**
811 * acpi_idle_enter_bm - enters C3 with proper BM handling 744 * acpi_idle_enter_bm - enters C3 with proper BM handling
812 * @dev: the target CPU 745 * @pr: Target processor
813 * @drv: cpuidle driver containing state data 746 * @cx: Target state context
814 * @index: the index of suggested state
815 *
816 * If BM is detected, the deepest non-C3 idle state is entered instead.
817 */ 747 */
818static int acpi_idle_enter_bm(struct cpuidle_device *dev, 748static void acpi_idle_enter_bm(struct acpi_processor *pr,
819 struct cpuidle_driver *drv, int index) 749 struct acpi_processor_cx *cx)
820{ 750{
821 struct acpi_processor *pr;
822 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
823
824 pr = __this_cpu_read(processors);
825
826 if (unlikely(!pr))
827 return -EINVAL;
828
829#ifdef CONFIG_HOTPLUG_CPU
830 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
831 !pr->flags.has_cst &&
832 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
833 return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
834#endif
835
836 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
837 if (drv->safe_state_index >= 0) {
838 return drv->states[drv->safe_state_index].enter(dev,
839 drv, drv->safe_state_index);
840 } else {
841 acpi_safe_halt();
842 return -EBUSY;
843 }
844 }
845
846 acpi_unlazy_tlb(smp_processor_id()); 751 acpi_unlazy_tlb(smp_processor_id());
847 752
848 /* Tell the scheduler that we are going deep-idle: */
849 sched_clock_idle_sleep_event();
850 /* 753 /*
851 * Must be done before busmaster disable as we might need to 754 * Must be done before busmaster disable as we might need to
852 * access HPET ! 755 * access HPET !
@@ -856,37 +759,71 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
856 /* 759 /*
857 * disable bus master 760 * disable bus master
858 * bm_check implies we need ARB_DIS 761 * bm_check implies we need ARB_DIS
859 * !bm_check implies we need cache flush
860 * bm_control implies whether we can do ARB_DIS 762 * bm_control implies whether we can do ARB_DIS
861 * 763 *
862 * That leaves a case where bm_check is set and bm_control is 764 * That leaves a case where bm_check is set and bm_control is
863 * not set. In that case we cannot do much, we enter C3 765 * not set. In that case we cannot do much, we enter C3
864 * without doing anything. 766 * without doing anything.
865 */ 767 */
866 if (pr->flags.bm_check && pr->flags.bm_control) { 768 if (pr->flags.bm_control) {
867 raw_spin_lock(&c3_lock); 769 raw_spin_lock(&c3_lock);
868 c3_cpu_count++; 770 c3_cpu_count++;
869 /* Disable bus master arbitration when all CPUs are in C3 */ 771 /* Disable bus master arbitration when all CPUs are in C3 */
870 if (c3_cpu_count == num_online_cpus()) 772 if (c3_cpu_count == num_online_cpus())
871 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); 773 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
872 raw_spin_unlock(&c3_lock); 774 raw_spin_unlock(&c3_lock);
873 } else if (!pr->flags.bm_check) {
874 ACPI_FLUSH_CPU_CACHE();
875 } 775 }
876 776
877 acpi_idle_do_entry(cx); 777 acpi_idle_do_entry(cx);
878 778
879 /* Re-enable bus master arbitration */ 779 /* Re-enable bus master arbitration */
880 if (pr->flags.bm_check && pr->flags.bm_control) { 780 if (pr->flags.bm_control) {
881 raw_spin_lock(&c3_lock); 781 raw_spin_lock(&c3_lock);
882 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); 782 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
883 c3_cpu_count--; 783 c3_cpu_count--;
884 raw_spin_unlock(&c3_lock); 784 raw_spin_unlock(&c3_lock);
885 } 785 }
886 786
887 sched_clock_idle_wakeup_event(0); 787 lapic_timer_state_broadcast(pr, cx, 0);
788}
789
790static int acpi_idle_enter(struct cpuidle_device *dev,
791 struct cpuidle_driver *drv, int index)
792{
793 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
794 struct acpi_processor *pr;
795
796 pr = __this_cpu_read(processors);
797 if (unlikely(!pr))
798 return -EINVAL;
799
800 if (cx->type != ACPI_STATE_C1) {
801 if (acpi_idle_fallback_to_c1(pr)) {
802 index = CPUIDLE_DRIVER_STATE_START;
803 cx = per_cpu(acpi_cstate[index], dev->cpu);
804 } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
805 if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
806 acpi_idle_enter_bm(pr, cx);
807 return index;
808 } else if (drv->safe_state_index >= 0) {
809 index = drv->safe_state_index;
810 cx = per_cpu(acpi_cstate[index], dev->cpu);
811 } else {
812 acpi_safe_halt();
813 return -EBUSY;
814 }
815 }
816 }
817
818 lapic_timer_state_broadcast(pr, cx, 1);
819
820 if (cx->type == ACPI_STATE_C3)
821 ACPI_FLUSH_CPU_CACHE();
822
823 acpi_idle_do_entry(cx);
888 824
889 lapic_timer_state_broadcast(pr, cx, 0); 825 lapic_timer_state_broadcast(pr, cx, 0);
826
890 return index; 827 return index;
891} 828}
892 829
@@ -981,27 +918,12 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
981 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 918 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
982 state->exit_latency = cx->latency; 919 state->exit_latency = cx->latency;
983 state->target_residency = cx->latency * latency_factor; 920 state->target_residency = cx->latency * latency_factor;
921 state->enter = acpi_idle_enter;
984 922
985 state->flags = 0; 923 state->flags = 0;
986 switch (cx->type) { 924 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
987 case ACPI_STATE_C1:
988
989 state->enter = acpi_idle_enter_c1;
990 state->enter_dead = acpi_idle_play_dead;
991 drv->safe_state_index = count;
992 break;
993
994 case ACPI_STATE_C2:
995 state->enter = acpi_idle_enter_simple;
996 state->enter_dead = acpi_idle_play_dead; 925 state->enter_dead = acpi_idle_play_dead;
997 drv->safe_state_index = count; 926 drv->safe_state_index = count;
998 break;
999
1000 case ACPI_STATE_C3:
1001 state->enter = pr->flags.bm_check ?
1002 acpi_idle_enter_bm :
1003 acpi_idle_enter_simple;
1004 break;
1005 } 927 }
1006 928
1007 count++; 929 count++;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 782a0d15c25f..4752b9939987 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -34,21 +34,34 @@
34#define valid_IRQ(i) (true) 34#define valid_IRQ(i) (true)
35#endif 35#endif
36 36
37static unsigned long acpi_dev_memresource_flags(u64 len, u8 write_protect, 37static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
38 bool window)
39{ 38{
40 unsigned long flags = IORESOURCE_MEM; 39 u64 reslen = end - start + 1;
41 40
42 if (len == 0) 41 /*
43 flags |= IORESOURCE_DISABLED; 42 * CHECKME: len might be required to check versus a minimum
43 * length as well. 1 for io is fine, but for memory it does
44 * not make any sense at all.
45 */
46 if (len && reslen && reslen == len && start <= end)
47 return true;
44 48
45 if (write_protect == ACPI_READ_WRITE_MEMORY) 49 pr_info("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
46 flags |= IORESOURCE_MEM_WRITEABLE; 50 io ? "io" : "mem", start, end, len);
51
52 return false;
53}
54
55static void acpi_dev_memresource_flags(struct resource *res, u64 len,
56 u8 write_protect)
57{
58 res->flags = IORESOURCE_MEM;
47 59
48 if (window) 60 if (!acpi_dev_resource_len_valid(res->start, res->end, len, false))
49 flags |= IORESOURCE_WINDOW; 61 res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
50 62
51 return flags; 63 if (write_protect == ACPI_READ_WRITE_MEMORY)
64 res->flags |= IORESOURCE_MEM_WRITEABLE;
52} 65}
53 66
54static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len, 67static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len,
@@ -56,7 +69,7 @@ static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len,
56{ 69{
57 res->start = start; 70 res->start = start;
58 res->end = start + len - 1; 71 res->end = start + len - 1;
59 res->flags = acpi_dev_memresource_flags(len, write_protect, false); 72 acpi_dev_memresource_flags(res, len, write_protect);
60} 73}
61 74
62/** 75/**
@@ -67,6 +80,11 @@ static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len,
67 * Check if the given ACPI resource object represents a memory resource and 80 * Check if the given ACPI resource object represents a memory resource and
68 * if that's the case, use the information in it to populate the generic 81 * if that's the case, use the information in it to populate the generic
69 * resource object pointed to by @res. 82 * resource object pointed to by @res.
83 *
84 * Return:
85 * 1) false with res->flags setting to zero: not the expected resource type
86 * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource
87 * 3) true: valid assigned resource
70 */ 88 */
71bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) 89bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
72{ 90{
@@ -77,60 +95,52 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
77 switch (ares->type) { 95 switch (ares->type) {
78 case ACPI_RESOURCE_TYPE_MEMORY24: 96 case ACPI_RESOURCE_TYPE_MEMORY24:
79 memory24 = &ares->data.memory24; 97 memory24 = &ares->data.memory24;
80 if (!memory24->minimum && !memory24->address_length) 98 acpi_dev_get_memresource(res, memory24->minimum << 8,
81 return false; 99 memory24->address_length << 8,
82 acpi_dev_get_memresource(res, memory24->minimum,
83 memory24->address_length,
84 memory24->write_protect); 100 memory24->write_protect);
85 break; 101 break;
86 case ACPI_RESOURCE_TYPE_MEMORY32: 102 case ACPI_RESOURCE_TYPE_MEMORY32:
87 memory32 = &ares->data.memory32; 103 memory32 = &ares->data.memory32;
88 if (!memory32->minimum && !memory32->address_length)
89 return false;
90 acpi_dev_get_memresource(res, memory32->minimum, 104 acpi_dev_get_memresource(res, memory32->minimum,
91 memory32->address_length, 105 memory32->address_length,
92 memory32->write_protect); 106 memory32->write_protect);
93 break; 107 break;
94 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 108 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
95 fixed_memory32 = &ares->data.fixed_memory32; 109 fixed_memory32 = &ares->data.fixed_memory32;
96 if (!fixed_memory32->address && !fixed_memory32->address_length)
97 return false;
98 acpi_dev_get_memresource(res, fixed_memory32->address, 110 acpi_dev_get_memresource(res, fixed_memory32->address,
99 fixed_memory32->address_length, 111 fixed_memory32->address_length,
100 fixed_memory32->write_protect); 112 fixed_memory32->write_protect);
101 break; 113 break;
102 default: 114 default:
115 res->flags = 0;
103 return false; 116 return false;
104 } 117 }
105 return true; 118
119 return !(res->flags & IORESOURCE_DISABLED);
106} 120}
107EXPORT_SYMBOL_GPL(acpi_dev_resource_memory); 121EXPORT_SYMBOL_GPL(acpi_dev_resource_memory);
108 122
109static unsigned int acpi_dev_ioresource_flags(u64 start, u64 end, u8 io_decode, 123static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
110 bool window) 124 u8 io_decode)
111{ 125{
112 int flags = IORESOURCE_IO; 126 res->flags = IORESOURCE_IO;
113 127
114 if (io_decode == ACPI_DECODE_16) 128 if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
115 flags |= IORESOURCE_IO_16BIT_ADDR; 129 res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
116 130
117 if (start > end || end >= 0x10003) 131 if (res->end >= 0x10003)
118 flags |= IORESOURCE_DISABLED; 132 res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
119 133
120 if (window) 134 if (io_decode == ACPI_DECODE_16)
121 flags |= IORESOURCE_WINDOW; 135 res->flags |= IORESOURCE_IO_16BIT_ADDR;
122
123 return flags;
124} 136}
125 137
126static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len, 138static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len,
127 u8 io_decode) 139 u8 io_decode)
128{ 140{
129 u64 end = start + len - 1;
130
131 res->start = start; 141 res->start = start;
132 res->end = end; 142 res->end = start + len - 1;
133 res->flags = acpi_dev_ioresource_flags(start, end, io_decode, false); 143 acpi_dev_ioresource_flags(res, len, io_decode);
134} 144}
135 145
136/** 146/**
@@ -141,6 +151,11 @@ static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len,
141 * Check if the given ACPI resource object represents an I/O resource and 151 * Check if the given ACPI resource object represents an I/O resource and
142 * if that's the case, use the information in it to populate the generic 152 * if that's the case, use the information in it to populate the generic
143 * resource object pointed to by @res. 153 * resource object pointed to by @res.
154 *
155 * Return:
156 * 1) false with res->flags setting to zero: not the expected resource type
157 * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource
158 * 3) true: valid assigned resource
144 */ 159 */
145bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) 160bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
146{ 161{
@@ -150,135 +165,143 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
150 switch (ares->type) { 165 switch (ares->type) {
151 case ACPI_RESOURCE_TYPE_IO: 166 case ACPI_RESOURCE_TYPE_IO:
152 io = &ares->data.io; 167 io = &ares->data.io;
153 if (!io->minimum && !io->address_length)
154 return false;
155 acpi_dev_get_ioresource(res, io->minimum, 168 acpi_dev_get_ioresource(res, io->minimum,
156 io->address_length, 169 io->address_length,
157 io->io_decode); 170 io->io_decode);
158 break; 171 break;
159 case ACPI_RESOURCE_TYPE_FIXED_IO: 172 case ACPI_RESOURCE_TYPE_FIXED_IO:
160 fixed_io = &ares->data.fixed_io; 173 fixed_io = &ares->data.fixed_io;
161 if (!fixed_io->address && !fixed_io->address_length)
162 return false;
163 acpi_dev_get_ioresource(res, fixed_io->address, 174 acpi_dev_get_ioresource(res, fixed_io->address,
164 fixed_io->address_length, 175 fixed_io->address_length,
165 ACPI_DECODE_10); 176 ACPI_DECODE_10);
166 break; 177 break;
167 default: 178 default:
179 res->flags = 0;
168 return false; 180 return false;
169 } 181 }
170 return true; 182
183 return !(res->flags & IORESOURCE_DISABLED);
171} 184}
172EXPORT_SYMBOL_GPL(acpi_dev_resource_io); 185EXPORT_SYMBOL_GPL(acpi_dev_resource_io);
173 186
174/** 187static bool acpi_decode_space(struct resource_win *win,
175 * acpi_dev_resource_address_space - Extract ACPI address space information. 188 struct acpi_resource_address *addr,
176 * @ares: Input ACPI resource object. 189 struct acpi_address64_attribute *attr)
177 * @res: Output generic resource object.
178 *
179 * Check if the given ACPI resource object represents an address space resource
180 * and if that's the case, use the information in it to populate the generic
181 * resource object pointed to by @res.
182 */
183bool acpi_dev_resource_address_space(struct acpi_resource *ares,
184 struct resource *res)
185{ 190{
186 acpi_status status; 191 u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16;
187 struct acpi_resource_address64 addr; 192 bool wp = addr->info.mem.write_protect;
188 bool window; 193 u64 len = attr->address_length;
189 u64 len; 194 struct resource *res = &win->res;
190 u8 io_decode;
191 195
192 switch (ares->type) { 196 /*
193 case ACPI_RESOURCE_TYPE_ADDRESS16: 197 * Filter out invalid descriptor according to ACPI Spec 5.0, section
194 case ACPI_RESOURCE_TYPE_ADDRESS32: 198 * 6.4.3.5 Address Space Resource Descriptors.
195 case ACPI_RESOURCE_TYPE_ADDRESS64: 199 */
196 break; 200 if ((addr->min_address_fixed != addr->max_address_fixed && len) ||
197 default: 201 (addr->min_address_fixed && addr->max_address_fixed && !len))
198 return false; 202 pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n",
199 } 203 addr->min_address_fixed, addr->max_address_fixed, len);
200 204
201 status = acpi_resource_to_address64(ares, &addr); 205 res->start = attr->minimum;
202 if (ACPI_FAILURE(status)) 206 res->end = attr->maximum;
203 return false;
204 207
205 res->start = addr.minimum; 208 /*
206 res->end = addr.maximum; 209 * For bridges that translate addresses across the bridge,
207 window = addr.producer_consumer == ACPI_PRODUCER; 210 * translation_offset is the offset that must be added to the
211 * address on the secondary side to obtain the address on the
212 * primary side. Non-bridge devices must list 0 for all Address
213 * Translation offset bits.
214 */
215 if (addr->producer_consumer == ACPI_PRODUCER) {
216 res->start += attr->translation_offset;
217 res->end += attr->translation_offset;
218 } else if (attr->translation_offset) {
219 pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n",
220 attr->translation_offset);
221 }
208 222
209 switch(addr.resource_type) { 223 switch (addr->resource_type) {
210 case ACPI_MEMORY_RANGE: 224 case ACPI_MEMORY_RANGE:
211 len = addr.maximum - addr.minimum + 1; 225 acpi_dev_memresource_flags(res, len, wp);
212 res->flags = acpi_dev_memresource_flags(len,
213 addr.info.mem.write_protect,
214 window);
215 break; 226 break;
216 case ACPI_IO_RANGE: 227 case ACPI_IO_RANGE:
217 io_decode = addr.granularity == 0xfff ? 228 acpi_dev_ioresource_flags(res, len, iodec);
218 ACPI_DECODE_10 : ACPI_DECODE_16;
219 res->flags = acpi_dev_ioresource_flags(addr.minimum,
220 addr.maximum,
221 io_decode, window);
222 break; 229 break;
223 case ACPI_BUS_NUMBER_RANGE: 230 case ACPI_BUS_NUMBER_RANGE:
224 res->flags = IORESOURCE_BUS; 231 res->flags = IORESOURCE_BUS;
225 break; 232 break;
226 default: 233 default:
227 res->flags = 0; 234 return false;
228 } 235 }
229 236
230 return true; 237 win->offset = attr->translation_offset;
238
239 if (addr->producer_consumer == ACPI_PRODUCER)
240 res->flags |= IORESOURCE_WINDOW;
241
242 if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
243 res->flags |= IORESOURCE_PREFETCH;
244
245 return !(res->flags & IORESOURCE_DISABLED);
246}
247
248/**
249 * acpi_dev_resource_address_space - Extract ACPI address space information.
250 * @ares: Input ACPI resource object.
251 * @win: Output generic resource object.
252 *
253 * Check if the given ACPI resource object represents an address space resource
254 * and if that's the case, use the information in it to populate the generic
255 * resource object pointed to by @win.
256 *
257 * Return:
258 * 1) false with win->res.flags setting to zero: not the expected resource type
259 * 2) false with IORESOURCE_DISABLED in win->res.flags: valid unassigned
260 * resource
261 * 3) true: valid assigned resource
262 */
263bool acpi_dev_resource_address_space(struct acpi_resource *ares,
264 struct resource_win *win)
265{
266 struct acpi_resource_address64 addr;
267
268 win->res.flags = 0;
269 if (ACPI_FAILURE(acpi_resource_to_address64(ares, &addr)))
270 return false;
271
272 return acpi_decode_space(win, (struct acpi_resource_address *)&addr,
273 &addr.address);
231} 274}
232EXPORT_SYMBOL_GPL(acpi_dev_resource_address_space); 275EXPORT_SYMBOL_GPL(acpi_dev_resource_address_space);
233 276
234/** 277/**
235 * acpi_dev_resource_ext_address_space - Extract ACPI address space information. 278 * acpi_dev_resource_ext_address_space - Extract ACPI address space information.
236 * @ares: Input ACPI resource object. 279 * @ares: Input ACPI resource object.
237 * @res: Output generic resource object. 280 * @win: Output generic resource object.
238 * 281 *
239 * Check if the given ACPI resource object represents an extended address space 282 * Check if the given ACPI resource object represents an extended address space
240 * resource and if that's the case, use the information in it to populate the 283 * resource and if that's the case, use the information in it to populate the
241 * generic resource object pointed to by @res. 284 * generic resource object pointed to by @win.
285 *
286 * Return:
287 * 1) false with win->res.flags setting to zero: not the expected resource type
288 * 2) false with IORESOURCE_DISABLED in win->res.flags: valid unassigned
289 * resource
290 * 3) true: valid assigned resource
242 */ 291 */
243bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, 292bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
244 struct resource *res) 293 struct resource_win *win)
245{ 294{
246 struct acpi_resource_extended_address64 *ext_addr; 295 struct acpi_resource_extended_address64 *ext_addr;
247 bool window;
248 u64 len;
249 u8 io_decode;
250 296
297 win->res.flags = 0;
251 if (ares->type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64) 298 if (ares->type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64)
252 return false; 299 return false;
253 300
254 ext_addr = &ares->data.ext_address64; 301 ext_addr = &ares->data.ext_address64;
255 302
256 res->start = ext_addr->minimum; 303 return acpi_decode_space(win, (struct acpi_resource_address *)ext_addr,
257 res->end = ext_addr->maximum; 304 &ext_addr->address);
258 window = ext_addr->producer_consumer == ACPI_PRODUCER;
259
260 switch(ext_addr->resource_type) {
261 case ACPI_MEMORY_RANGE:
262 len = ext_addr->maximum - ext_addr->minimum + 1;
263 res->flags = acpi_dev_memresource_flags(len,
264 ext_addr->info.mem.write_protect,
265 window);
266 break;
267 case ACPI_IO_RANGE:
268 io_decode = ext_addr->granularity == 0xfff ?
269 ACPI_DECODE_10 : ACPI_DECODE_16;
270 res->flags = acpi_dev_ioresource_flags(ext_addr->minimum,
271 ext_addr->maximum,
272 io_decode, window);
273 break;
274 case ACPI_BUS_NUMBER_RANGE:
275 res->flags = IORESOURCE_BUS;
276 break;
277 default:
278 res->flags = 0;
279 }
280
281 return true;
282} 305}
283EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space); 306EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space);
284 307
@@ -310,7 +333,7 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi)
310{ 333{
311 res->start = gsi; 334 res->start = gsi;
312 res->end = gsi; 335 res->end = gsi;
313 res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED; 336 res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
314} 337}
315 338
316static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, 339static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
@@ -369,6 +392,11 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
369 * represented by the resource and populate the generic resource object pointed 392 * represented by the resource and populate the generic resource object pointed
370 * to by @res accordingly. If the registration of the GSI is not successful, 393 * to by @res accordingly. If the registration of the GSI is not successful,
371 * IORESOURCE_DISABLED will be set it that object's flags. 394 * IORESOURCE_DISABLED will be set it that object's flags.
395 *
396 * Return:
397 * 1) false with res->flags setting to zero: not the expected resource type
398 * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource
399 * 3) true: valid assigned resource
372 */ 400 */
373bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, 401bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
374 struct resource *res) 402 struct resource *res)
@@ -402,6 +430,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
402 ext_irq->sharable, false); 430 ext_irq->sharable, false);
403 break; 431 break;
404 default: 432 default:
433 res->flags = 0;
405 return false; 434 return false;
406 } 435 }
407 436
@@ -415,12 +444,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt);
415 */ 444 */
416void acpi_dev_free_resource_list(struct list_head *list) 445void acpi_dev_free_resource_list(struct list_head *list)
417{ 446{
418 struct resource_list_entry *rentry, *re; 447 resource_list_free(list);
419
420 list_for_each_entry_safe(rentry, re, list, node) {
421 list_del(&rentry->node);
422 kfree(rentry);
423 }
424} 448}
425EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list); 449EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list);
426 450
@@ -432,18 +456,19 @@ struct res_proc_context {
432 int error; 456 int error;
433}; 457};
434 458
435static acpi_status acpi_dev_new_resource_entry(struct resource *r, 459static acpi_status acpi_dev_new_resource_entry(struct resource_win *win,
436 struct res_proc_context *c) 460 struct res_proc_context *c)
437{ 461{
438 struct resource_list_entry *rentry; 462 struct resource_entry *rentry;
439 463
440 rentry = kmalloc(sizeof(*rentry), GFP_KERNEL); 464 rentry = resource_list_create_entry(NULL, 0);
441 if (!rentry) { 465 if (!rentry) {
442 c->error = -ENOMEM; 466 c->error = -ENOMEM;
443 return AE_NO_MEMORY; 467 return AE_NO_MEMORY;
444 } 468 }
445 rentry->res = *r; 469 *rentry->res = win->res;
446 list_add_tail(&rentry->node, c->list); 470 rentry->offset = win->offset;
471 resource_list_add_tail(rentry, c->list);
447 c->count++; 472 c->count++;
448 return AE_OK; 473 return AE_OK;
449} 474}
@@ -452,7 +477,8 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
452 void *context) 477 void *context)
453{ 478{
454 struct res_proc_context *c = context; 479 struct res_proc_context *c = context;
455 struct resource r; 480 struct resource_win win;
481 struct resource *res = &win.res;
456 int i; 482 int i;
457 483
458 if (c->preproc) { 484 if (c->preproc) {
@@ -467,18 +493,18 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
467 } 493 }
468 } 494 }
469 495
470 memset(&r, 0, sizeof(r)); 496 memset(&win, 0, sizeof(win));
471 497
472 if (acpi_dev_resource_memory(ares, &r) 498 if (acpi_dev_resource_memory(ares, res)
473 || acpi_dev_resource_io(ares, &r) 499 || acpi_dev_resource_io(ares, res)
474 || acpi_dev_resource_address_space(ares, &r) 500 || acpi_dev_resource_address_space(ares, &win)
475 || acpi_dev_resource_ext_address_space(ares, &r)) 501 || acpi_dev_resource_ext_address_space(ares, &win))
476 return acpi_dev_new_resource_entry(&r, c); 502 return acpi_dev_new_resource_entry(&win, c);
477 503
478 for (i = 0; acpi_dev_resource_interrupt(ares, i, &r); i++) { 504 for (i = 0; acpi_dev_resource_interrupt(ares, i, res); i++) {
479 acpi_status status; 505 acpi_status status;
480 506
481 status = acpi_dev_new_resource_entry(&r, c); 507 status = acpi_dev_new_resource_entry(&win, c);
482 if (ACPI_FAILURE(status)) 508 if (ACPI_FAILURE(status))
483 return status; 509 return status;
484 } 510 }
@@ -503,7 +529,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
503 * returned as the final error code. 529 * returned as the final error code.
504 * 530 *
505 * The resultant struct resource objects are put on the list pointed to by 531 * The resultant struct resource objects are put on the list pointed to by
506 * @list, that must be empty initially, as members of struct resource_list_entry 532 * @list, that must be empty initially, as members of struct resource_entry
507 * objects. Callers of this routine should use %acpi_dev_free_resource_list() to 533 * objects. Callers of this routine should use %acpi_dev_free_resource_list() to
508 * free that list. 534 * free that list.
509 * 535 *
@@ -538,3 +564,58 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
538 return c.count; 564 return c.count;
539} 565}
540EXPORT_SYMBOL_GPL(acpi_dev_get_resources); 566EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
567
568/**
569 * acpi_dev_filter_resource_type - Filter ACPI resource according to resource
570 * types
571 * @ares: Input ACPI resource object.
572 * @types: Valid resource types of IORESOURCE_XXX
573 *
574 * This is a hepler function to support acpi_dev_get_resources(), which filters
575 * ACPI resource objects according to resource types.
576 */
577int acpi_dev_filter_resource_type(struct acpi_resource *ares,
578 unsigned long types)
579{
580 unsigned long type = 0;
581
582 switch (ares->type) {
583 case ACPI_RESOURCE_TYPE_MEMORY24:
584 case ACPI_RESOURCE_TYPE_MEMORY32:
585 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
586 type = IORESOURCE_MEM;
587 break;
588 case ACPI_RESOURCE_TYPE_IO:
589 case ACPI_RESOURCE_TYPE_FIXED_IO:
590 type = IORESOURCE_IO;
591 break;
592 case ACPI_RESOURCE_TYPE_IRQ:
593 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
594 type = IORESOURCE_IRQ;
595 break;
596 case ACPI_RESOURCE_TYPE_DMA:
597 case ACPI_RESOURCE_TYPE_FIXED_DMA:
598 type = IORESOURCE_DMA;
599 break;
600 case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
601 type = IORESOURCE_REG;
602 break;
603 case ACPI_RESOURCE_TYPE_ADDRESS16:
604 case ACPI_RESOURCE_TYPE_ADDRESS32:
605 case ACPI_RESOURCE_TYPE_ADDRESS64:
606 case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
607 if (ares->data.address.resource_type == ACPI_MEMORY_RANGE)
608 type = IORESOURCE_MEM;
609 else if (ares->data.address.resource_type == ACPI_IO_RANGE)
610 type = IORESOURCE_IO;
611 else if (ares->data.address.resource_type ==
612 ACPI_BUS_NUMBER_RANGE)
613 type = IORESOURCE_BUS;
614 break;
615 default:
616 break;
617 }
618
619 return (type & types) ? 0 : 1;
620}
621EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index dc4d8960684a..bbca7830e18a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2544,6 +2544,7 @@ int __init acpi_scan_init(void)
2544 acpi_pci_link_init(); 2544 acpi_pci_link_init();
2545 acpi_processor_init(); 2545 acpi_processor_init();
2546 acpi_lpss_init(); 2546 acpi_lpss_init();
2547 acpi_apd_init();
2547 acpi_cmos_rtc_init(); 2548 acpi_cmos_rtc_init();
2548 acpi_container_init(); 2549 acpi_container_init();
2549 acpi_memory_hotplug_init(); 2550 acpi_memory_hotplug_init();
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 8aa9254a387f..7f251dd1a687 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -321,7 +321,7 @@ static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
321 {}, 321 {},
322}; 322};
323 323
324static void acpi_sleep_dmi_check(void) 324static void __init acpi_sleep_dmi_check(void)
325{ 325{
326 int year; 326 int year;
327 327
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 032db459370f..88a4f99dd2a7 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -522,6 +522,24 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
522 DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"), 522 DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
523 }, 523 },
524 }, 524 },
525 {
526 /* https://bugzilla.redhat.com/show_bug.cgi?id=1186097 */
527 .callback = video_disable_native_backlight,
528 .ident = "SAMSUNG 3570R/370R/470R/450R/510R/4450RV",
529 .matches = {
530 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
531 DMI_MATCH(DMI_PRODUCT_NAME, "3570R/370R/470R/450R/510R/4450RV"),
532 },
533 },
534 {
535 /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
536 .callback = video_disable_native_backlight,
537 .ident = "SAMSUNG 730U3E/740U3E",
538 .matches = {
539 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
540 DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"),
541 },
542 },
525 543
526 { 544 {
527 /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ 545 /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index b0f138806bbc..f32b802b98f4 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -19,8 +19,8 @@
19 * @dev: Device to handle. 19 * @dev: Device to handle.
20 * 20 *
21 * If power.subsys_data is NULL, point it to a new object, otherwise increment 21 * If power.subsys_data is NULL, point it to a new object, otherwise increment
22 * its reference counter. Return 1 if a new object has been created, otherwise 22 * its reference counter. Return 0 if new object has been created or refcount
23 * return 0 or error code. 23 * increased, otherwise negative error code.
24 */ 24 */
25int dev_pm_get_subsys_data(struct device *dev) 25int dev_pm_get_subsys_data(struct device *dev)
26{ 26{
@@ -56,13 +56,11 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
56 * @dev: Device to handle. 56 * @dev: Device to handle.
57 * 57 *
58 * If the reference counter of power.subsys_data is zero after dropping the 58 * If the reference counter of power.subsys_data is zero after dropping the
59 * reference, power.subsys_data is removed. Return 1 if that happens or 0 59 * reference, power.subsys_data is removed.
60 * otherwise.
61 */ 60 */
62int dev_pm_put_subsys_data(struct device *dev) 61void dev_pm_put_subsys_data(struct device *dev)
63{ 62{
64 struct pm_subsys_data *psd; 63 struct pm_subsys_data *psd;
65 int ret = 1;
66 64
67 spin_lock_irq(&dev->power.lock); 65 spin_lock_irq(&dev->power.lock);
68 66
@@ -70,18 +68,14 @@ int dev_pm_put_subsys_data(struct device *dev)
70 if (!psd) 68 if (!psd)
71 goto out; 69 goto out;
72 70
73 if (--psd->refcount == 0) { 71 if (--psd->refcount == 0)
74 dev->power.subsys_data = NULL; 72 dev->power.subsys_data = NULL;
75 } else { 73 else
76 psd = NULL; 74 psd = NULL;
77 ret = 0;
78 }
79 75
80 out: 76 out:
81 spin_unlock_irq(&dev->power.lock); 77 spin_unlock_irq(&dev->power.lock);
82 kfree(psd); 78 kfree(psd);
83
84 return ret;
85} 79}
86EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); 80EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
87 81
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 0d8780c04a5e..ba4abbe4693c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -344,14 +344,7 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
344 struct device *dev; 344 struct device *dev;
345 345
346 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 346 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
347
348 mutex_lock(&gpd_data->lock);
349 dev = gpd_data->base.dev; 347 dev = gpd_data->base.dev;
350 if (!dev) {
351 mutex_unlock(&gpd_data->lock);
352 return NOTIFY_DONE;
353 }
354 mutex_unlock(&gpd_data->lock);
355 348
356 for (;;) { 349 for (;;) {
357 struct generic_pm_domain *genpd; 350 struct generic_pm_domain *genpd;
@@ -1384,25 +1377,66 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1384 1377
1385#endif /* CONFIG_PM_SLEEP */ 1378#endif /* CONFIG_PM_SLEEP */
1386 1379
1387static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev) 1380static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1381 struct generic_pm_domain *genpd,
1382 struct gpd_timing_data *td)
1388{ 1383{
1389 struct generic_pm_domain_data *gpd_data; 1384 struct generic_pm_domain_data *gpd_data;
1385 int ret;
1386
1387 ret = dev_pm_get_subsys_data(dev);
1388 if (ret)
1389 return ERR_PTR(ret);
1390 1390
1391 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1391 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1392 if (!gpd_data) 1392 if (!gpd_data) {
1393 return NULL; 1393 ret = -ENOMEM;
1394 goto err_put;
1395 }
1396
1397 if (td)
1398 gpd_data->td = *td;
1394 1399
1395 mutex_init(&gpd_data->lock); 1400 gpd_data->base.dev = dev;
1401 gpd_data->need_restore = -1;
1402 gpd_data->td.constraint_changed = true;
1403 gpd_data->td.effective_constraint_ns = -1;
1396 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1404 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1397 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1405
1406 spin_lock_irq(&dev->power.lock);
1407
1408 if (dev->power.subsys_data->domain_data) {
1409 ret = -EINVAL;
1410 goto err_free;
1411 }
1412
1413 dev->power.subsys_data->domain_data = &gpd_data->base;
1414 dev->pm_domain = &genpd->domain;
1415
1416 spin_unlock_irq(&dev->power.lock);
1417
1398 return gpd_data; 1418 return gpd_data;
1419
1420 err_free:
1421 spin_unlock_irq(&dev->power.lock);
1422 kfree(gpd_data);
1423 err_put:
1424 dev_pm_put_subsys_data(dev);
1425 return ERR_PTR(ret);
1399} 1426}
1400 1427
1401static void __pm_genpd_free_dev_data(struct device *dev, 1428static void genpd_free_dev_data(struct device *dev,
1402 struct generic_pm_domain_data *gpd_data) 1429 struct generic_pm_domain_data *gpd_data)
1403{ 1430{
1404 dev_pm_qos_remove_notifier(dev, &gpd_data->nb); 1431 spin_lock_irq(&dev->power.lock);
1432
1433 dev->pm_domain = NULL;
1434 dev->power.subsys_data->domain_data = NULL;
1435
1436 spin_unlock_irq(&dev->power.lock);
1437
1405 kfree(gpd_data); 1438 kfree(gpd_data);
1439 dev_pm_put_subsys_data(dev);
1406} 1440}
1407 1441
1408/** 1442/**
@@ -1414,8 +1448,7 @@ static void __pm_genpd_free_dev_data(struct device *dev,
1414int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1448int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1415 struct gpd_timing_data *td) 1449 struct gpd_timing_data *td)
1416{ 1450{
1417 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; 1451 struct generic_pm_domain_data *gpd_data;
1418 struct pm_domain_data *pdd;
1419 int ret = 0; 1452 int ret = 0;
1420 1453
1421 dev_dbg(dev, "%s()\n", __func__); 1454 dev_dbg(dev, "%s()\n", __func__);
@@ -1423,9 +1456,9 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1423 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1456 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1424 return -EINVAL; 1457 return -EINVAL;
1425 1458
1426 gpd_data_new = __pm_genpd_alloc_dev_data(dev); 1459 gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1427 if (!gpd_data_new) 1460 if (IS_ERR(gpd_data))
1428 return -ENOMEM; 1461 return PTR_ERR(gpd_data);
1429 1462
1430 genpd_acquire_lock(genpd); 1463 genpd_acquire_lock(genpd);
1431 1464
@@ -1434,50 +1467,22 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1434 goto out; 1467 goto out;
1435 } 1468 }
1436 1469
1437 list_for_each_entry(pdd, &genpd->dev_list, list_node) 1470 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1438 if (pdd->dev == dev) {
1439 ret = -EINVAL;
1440 goto out;
1441 }
1442
1443 ret = dev_pm_get_subsys_data(dev);
1444 if (ret) 1471 if (ret)
1445 goto out; 1472 goto out;
1446 1473
1447 genpd->device_count++; 1474 genpd->device_count++;
1448 genpd->max_off_time_changed = true; 1475 genpd->max_off_time_changed = true;
1449 1476
1450 spin_lock_irq(&dev->power.lock);
1451
1452 dev->pm_domain = &genpd->domain;
1453 if (dev->power.subsys_data->domain_data) {
1454 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1455 } else {
1456 gpd_data = gpd_data_new;
1457 dev->power.subsys_data->domain_data = &gpd_data->base;
1458 }
1459 gpd_data->refcount++;
1460 if (td)
1461 gpd_data->td = *td;
1462
1463 spin_unlock_irq(&dev->power.lock);
1464
1465 if (genpd->attach_dev)
1466 genpd->attach_dev(genpd, dev);
1467
1468 mutex_lock(&gpd_data->lock);
1469 gpd_data->base.dev = dev;
1470 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1477 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1471 gpd_data->need_restore = -1;
1472 gpd_data->td.constraint_changed = true;
1473 gpd_data->td.effective_constraint_ns = -1;
1474 mutex_unlock(&gpd_data->lock);
1475 1478
1476 out: 1479 out:
1477 genpd_release_lock(genpd); 1480 genpd_release_lock(genpd);
1478 1481
1479 if (gpd_data != gpd_data_new) 1482 if (ret)
1480 __pm_genpd_free_dev_data(dev, gpd_data_new); 1483 genpd_free_dev_data(dev, gpd_data);
1484 else
1485 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1481 1486
1482 return ret; 1487 return ret;
1483} 1488}
@@ -1504,7 +1509,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1504{ 1509{
1505 struct generic_pm_domain_data *gpd_data; 1510 struct generic_pm_domain_data *gpd_data;
1506 struct pm_domain_data *pdd; 1511 struct pm_domain_data *pdd;
1507 bool remove = false;
1508 int ret = 0; 1512 int ret = 0;
1509 1513
1510 dev_dbg(dev, "%s()\n", __func__); 1514 dev_dbg(dev, "%s()\n", __func__);
@@ -1514,6 +1518,11 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1514 || pd_to_genpd(dev->pm_domain) != genpd) 1518 || pd_to_genpd(dev->pm_domain) != genpd)
1515 return -EINVAL; 1519 return -EINVAL;
1516 1520
1521 /* The above validation also means we have existing domain_data. */
1522 pdd = dev->power.subsys_data->domain_data;
1523 gpd_data = to_gpd_data(pdd);
1524 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1525
1517 genpd_acquire_lock(genpd); 1526 genpd_acquire_lock(genpd);
1518 1527
1519 if (genpd->prepared_count > 0) { 1528 if (genpd->prepared_count > 0) {
@@ -1527,58 +1536,22 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1527 if (genpd->detach_dev) 1536 if (genpd->detach_dev)
1528 genpd->detach_dev(genpd, dev); 1537 genpd->detach_dev(genpd, dev);
1529 1538
1530 spin_lock_irq(&dev->power.lock);
1531
1532 dev->pm_domain = NULL;
1533 pdd = dev->power.subsys_data->domain_data;
1534 list_del_init(&pdd->list_node); 1539 list_del_init(&pdd->list_node);
1535 gpd_data = to_gpd_data(pdd);
1536 if (--gpd_data->refcount == 0) {
1537 dev->power.subsys_data->domain_data = NULL;
1538 remove = true;
1539 }
1540
1541 spin_unlock_irq(&dev->power.lock);
1542
1543 mutex_lock(&gpd_data->lock);
1544 pdd->dev = NULL;
1545 mutex_unlock(&gpd_data->lock);
1546 1540
1547 genpd_release_lock(genpd); 1541 genpd_release_lock(genpd);
1548 1542
1549 dev_pm_put_subsys_data(dev); 1543 genpd_free_dev_data(dev, gpd_data);
1550 if (remove)
1551 __pm_genpd_free_dev_data(dev, gpd_data);
1552 1544
1553 return 0; 1545 return 0;
1554 1546
1555 out: 1547 out:
1556 genpd_release_lock(genpd); 1548 genpd_release_lock(genpd);
1549 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1557 1550
1558 return ret; 1551 return ret;
1559} 1552}
1560 1553
1561/** 1554/**
1562 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1563 * @dev: Device to set/unset the flag for.
1564 * @val: The new value of the device's "need restore" flag.
1565 */
1566void pm_genpd_dev_need_restore(struct device *dev, bool val)
1567{
1568 struct pm_subsys_data *psd;
1569 unsigned long flags;
1570
1571 spin_lock_irqsave(&dev->power.lock, flags);
1572
1573 psd = dev_to_psd(dev);
1574 if (psd && psd->domain_data)
1575 to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
1576
1577 spin_unlock_irqrestore(&dev->power.lock, flags);
1578}
1579EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1580
1581/**
1582 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1555 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1583 * @genpd: Master PM domain to add the subdomain to. 1556 * @genpd: Master PM domain to add the subdomain to.
1584 * @subdomain: Subdomain to be added. 1557 * @subdomain: Subdomain to be added.
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 106c69359306..15bf29974c31 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -117,20 +117,20 @@ do { \
117} while (0) 117} while (0)
118 118
119/** 119/**
120 * find_device_opp() - find device_opp struct using device pointer 120 * _find_device_opp() - find device_opp struct using device pointer
121 * @dev: device pointer used to lookup device OPPs 121 * @dev: device pointer used to lookup device OPPs
122 * 122 *
123 * Search list of device OPPs for one containing matching device. Does a RCU 123 * Search list of device OPPs for one containing matching device. Does a RCU
124 * reader operation to grab the pointer needed. 124 * reader operation to grab the pointer needed.
125 * 125 *
126 * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or 126 * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
127 * -EINVAL based on type of error. 127 * -EINVAL based on type of error.
128 * 128 *
129 * Locking: This function must be called under rcu_read_lock(). device_opp 129 * Locking: This function must be called under rcu_read_lock(). device_opp
130 * is a RCU protected pointer. This means that device_opp is valid as long 130 * is a RCU protected pointer. This means that device_opp is valid as long
131 * as we are under RCU lock. 131 * as we are under RCU lock.
132 */ 132 */
133static struct device_opp *find_device_opp(struct device *dev) 133static struct device_opp *_find_device_opp(struct device *dev)
134{ 134{
135 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); 135 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
136 136
@@ -153,7 +153,7 @@ static struct device_opp *find_device_opp(struct device *dev)
153 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp 153 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
154 * @opp: opp for which voltage has to be returned for 154 * @opp: opp for which voltage has to be returned for
155 * 155 *
156 * Return voltage in micro volt corresponding to the opp, else 156 * Return: voltage in micro volt corresponding to the opp, else
157 * return 0 157 * return 0
158 * 158 *
159 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 159 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
@@ -169,6 +169,8 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
169 struct dev_pm_opp *tmp_opp; 169 struct dev_pm_opp *tmp_opp;
170 unsigned long v = 0; 170 unsigned long v = 0;
171 171
172 opp_rcu_lockdep_assert();
173
172 tmp_opp = rcu_dereference(opp); 174 tmp_opp = rcu_dereference(opp);
173 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) 175 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
174 pr_err("%s: Invalid parameters\n", __func__); 176 pr_err("%s: Invalid parameters\n", __func__);
@@ -183,7 +185,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
183 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp 185 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
184 * @opp: opp for which frequency has to be returned for 186 * @opp: opp for which frequency has to be returned for
185 * 187 *
186 * Return frequency in hertz corresponding to the opp, else 188 * Return: frequency in hertz corresponding to the opp, else
187 * return 0 189 * return 0
188 * 190 *
189 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 191 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
@@ -199,6 +201,8 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
199 struct dev_pm_opp *tmp_opp; 201 struct dev_pm_opp *tmp_opp;
200 unsigned long f = 0; 202 unsigned long f = 0;
201 203
204 opp_rcu_lockdep_assert();
205
202 tmp_opp = rcu_dereference(opp); 206 tmp_opp = rcu_dereference(opp);
203 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) 207 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
204 pr_err("%s: Invalid parameters\n", __func__); 208 pr_err("%s: Invalid parameters\n", __func__);
@@ -213,7 +217,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
213 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list 217 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
214 * @dev: device for which we do this operation 218 * @dev: device for which we do this operation
215 * 219 *
216 * This function returns the number of available opps if there are any, 220 * Return: This function returns the number of available opps if there are any,
217 * else returns 0 if none or the corresponding error value. 221 * else returns 0 if none or the corresponding error value.
218 * 222 *
219 * Locking: This function takes rcu_read_lock(). 223 * Locking: This function takes rcu_read_lock().
@@ -226,7 +230,7 @@ int dev_pm_opp_get_opp_count(struct device *dev)
226 230
227 rcu_read_lock(); 231 rcu_read_lock();
228 232
229 dev_opp = find_device_opp(dev); 233 dev_opp = _find_device_opp(dev);
230 if (IS_ERR(dev_opp)) { 234 if (IS_ERR(dev_opp)) {
231 count = PTR_ERR(dev_opp); 235 count = PTR_ERR(dev_opp);
232 dev_err(dev, "%s: device OPP not found (%d)\n", 236 dev_err(dev, "%s: device OPP not found (%d)\n",
@@ -251,9 +255,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
251 * @freq: frequency to search for 255 * @freq: frequency to search for
252 * @available: true/false - match for available opp 256 * @available: true/false - match for available opp
253 * 257 *
254 * Searches for exact match in the opp list and returns pointer to the matching 258 * Return: Searches for exact match in the opp list and returns pointer to the
255 * opp if found, else returns ERR_PTR in case of error and should be handled 259 * matching opp if found, else returns ERR_PTR in case of error and should
256 * using IS_ERR. Error return values can be: 260 * be handled using IS_ERR. Error return values can be:
257 * EINVAL: for bad pointer 261 * EINVAL: for bad pointer
258 * ERANGE: no match found for search 262 * ERANGE: no match found for search
259 * ENODEV: if device not found in list of registered devices 263 * ENODEV: if device not found in list of registered devices
@@ -280,7 +284,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
280 284
281 opp_rcu_lockdep_assert(); 285 opp_rcu_lockdep_assert();
282 286
283 dev_opp = find_device_opp(dev); 287 dev_opp = _find_device_opp(dev);
284 if (IS_ERR(dev_opp)) { 288 if (IS_ERR(dev_opp)) {
285 int r = PTR_ERR(dev_opp); 289 int r = PTR_ERR(dev_opp);
286 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); 290 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
@@ -307,7 +311,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
307 * Search for the matching ceil *available* OPP from a starting freq 311 * Search for the matching ceil *available* OPP from a starting freq
308 * for a device. 312 * for a device.
309 * 313 *
310 * Returns matching *opp and refreshes *freq accordingly, else returns 314 * Return: matching *opp and refreshes *freq accordingly, else returns
311 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 315 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
312 * values can be: 316 * values can be:
313 * EINVAL: for bad pointer 317 * EINVAL: for bad pointer
@@ -333,7 +337,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
333 return ERR_PTR(-EINVAL); 337 return ERR_PTR(-EINVAL);
334 } 338 }
335 339
336 dev_opp = find_device_opp(dev); 340 dev_opp = _find_device_opp(dev);
337 if (IS_ERR(dev_opp)) 341 if (IS_ERR(dev_opp))
338 return ERR_CAST(dev_opp); 342 return ERR_CAST(dev_opp);
339 343
@@ -357,7 +361,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
357 * Search for the matching floor *available* OPP from a starting freq 361 * Search for the matching floor *available* OPP from a starting freq
358 * for a device. 362 * for a device.
359 * 363 *
360 * Returns matching *opp and refreshes *freq accordingly, else returns 364 * Return: matching *opp and refreshes *freq accordingly, else returns
361 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 365 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
362 * values can be: 366 * values can be:
363 * EINVAL: for bad pointer 367 * EINVAL: for bad pointer
@@ -383,7 +387,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
383 return ERR_PTR(-EINVAL); 387 return ERR_PTR(-EINVAL);
384 } 388 }
385 389
386 dev_opp = find_device_opp(dev); 390 dev_opp = _find_device_opp(dev);
387 if (IS_ERR(dev_opp)) 391 if (IS_ERR(dev_opp))
388 return ERR_CAST(dev_opp); 392 return ERR_CAST(dev_opp);
389 393
@@ -403,7 +407,16 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
403} 407}
404EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); 408EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
405 409
406static struct device_opp *add_device_opp(struct device *dev) 410/**
411 * _add_device_opp() - Allocate a new device OPP table
412 * @dev: device for which we do this operation
413 *
414 * New device node which uses OPPs - used when multiple devices with OPP tables
415 * are maintained.
416 *
417 * Return: valid device_opp pointer if success, else NULL.
418 */
419static struct device_opp *_add_device_opp(struct device *dev)
407{ 420{
408 struct device_opp *dev_opp; 421 struct device_opp *dev_opp;
409 422
@@ -424,8 +437,35 @@ static struct device_opp *add_device_opp(struct device *dev)
424 return dev_opp; 437 return dev_opp;
425} 438}
426 439
427static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq, 440/**
428 unsigned long u_volt, bool dynamic) 441 * _opp_add_dynamic() - Allocate a dynamic OPP.
442 * @dev: device for which we do this operation
443 * @freq: Frequency in Hz for this OPP
444 * @u_volt: Voltage in uVolts for this OPP
445 * @dynamic: Dynamically added OPPs.
446 *
447 * This function adds an opp definition to the opp list and returns status.
448 * The opp is made available by default and it can be controlled using
449 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
450 *
451 * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and
452 * freed by of_free_opp_table.
453 *
454 * Locking: The internal device_opp and opp structures are RCU protected.
455 * Hence this function internally uses RCU updater strategy with mutex locks
456 * to keep the integrity of the internal data structures. Callers should ensure
457 * that this function is *NOT* called under RCU protection or in contexts where
458 * mutex cannot be locked.
459 *
460 * Return:
461 * 0 On success OR
462 * Duplicate OPPs (both freq and volt are same) and opp->available
463 * -EEXIST Freq are same and volt are different OR
464 * Duplicate OPPs (both freq and volt are same) and !opp->available
465 * -ENOMEM Memory allocation failure
466 */
467static int _opp_add_dynamic(struct device *dev, unsigned long freq,
468 long u_volt, bool dynamic)
429{ 469{
430 struct device_opp *dev_opp = NULL; 470 struct device_opp *dev_opp = NULL;
431 struct dev_pm_opp *opp, *new_opp; 471 struct dev_pm_opp *opp, *new_opp;
@@ -449,9 +489,9 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
449 new_opp->dynamic = dynamic; 489 new_opp->dynamic = dynamic;
450 490
451 /* Check for existing list for 'dev' */ 491 /* Check for existing list for 'dev' */
452 dev_opp = find_device_opp(dev); 492 dev_opp = _find_device_opp(dev);
453 if (IS_ERR(dev_opp)) { 493 if (IS_ERR(dev_opp)) {
454 dev_opp = add_device_opp(dev); 494 dev_opp = _add_device_opp(dev);
455 if (!dev_opp) { 495 if (!dev_opp) {
456 ret = -ENOMEM; 496 ret = -ENOMEM;
457 goto free_opp; 497 goto free_opp;
@@ -519,34 +559,53 @@ free_opp:
519 * mutex cannot be locked. 559 * mutex cannot be locked.
520 * 560 *
521 * Return: 561 * Return:
522 * 0: On success OR 562 * 0 On success OR
523 * Duplicate OPPs (both freq and volt are same) and opp->available 563 * Duplicate OPPs (both freq and volt are same) and opp->available
524 * -EEXIST: Freq are same and volt are different OR 564 * -EEXIST Freq are same and volt are different OR
525 * Duplicate OPPs (both freq and volt are same) and !opp->available 565 * Duplicate OPPs (both freq and volt are same) and !opp->available
526 * -ENOMEM: Memory allocation failure 566 * -ENOMEM Memory allocation failure
527 */ 567 */
528int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) 568int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
529{ 569{
530 return dev_pm_opp_add_dynamic(dev, freq, u_volt, true); 570 return _opp_add_dynamic(dev, freq, u_volt, true);
531} 571}
532EXPORT_SYMBOL_GPL(dev_pm_opp_add); 572EXPORT_SYMBOL_GPL(dev_pm_opp_add);
533 573
534static void kfree_opp_rcu(struct rcu_head *head) 574/**
575 * _kfree_opp_rcu() - Free OPP RCU handler
576 * @head: RCU head
577 */
578static void _kfree_opp_rcu(struct rcu_head *head)
535{ 579{
536 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); 580 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
537 581
538 kfree_rcu(opp, rcu_head); 582 kfree_rcu(opp, rcu_head);
539} 583}
540 584
541static void kfree_device_rcu(struct rcu_head *head) 585/**
586 * _kfree_device_rcu() - Free device_opp RCU handler
587 * @head: RCU head
588 */
589static void _kfree_device_rcu(struct rcu_head *head)
542{ 590{
543 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); 591 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
544 592
545 kfree_rcu(device_opp, rcu_head); 593 kfree_rcu(device_opp, rcu_head);
546} 594}
547 595
548static void __dev_pm_opp_remove(struct device_opp *dev_opp, 596/**
549 struct dev_pm_opp *opp) 597 * _opp_remove() - Remove an OPP from a table definition
598 * @dev_opp: points back to the device_opp struct this opp belongs to
599 * @opp: pointer to the OPP to remove
600 *
601 * This function removes an opp definition from the opp list.
602 *
603 * Locking: The internal device_opp and opp structures are RCU protected.
604 * It is assumed that the caller holds required mutex for an RCU updater
605 * strategy.
606 */
607static void _opp_remove(struct device_opp *dev_opp,
608 struct dev_pm_opp *opp)
550{ 609{
551 /* 610 /*
552 * Notify the changes in the availability of the operable 611 * Notify the changes in the availability of the operable
@@ -554,12 +613,12 @@ static void __dev_pm_opp_remove(struct device_opp *dev_opp,
554 */ 613 */
555 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); 614 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
556 list_del_rcu(&opp->node); 615 list_del_rcu(&opp->node);
557 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, kfree_opp_rcu); 616 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
558 617
559 if (list_empty(&dev_opp->opp_list)) { 618 if (list_empty(&dev_opp->opp_list)) {
560 list_del_rcu(&dev_opp->node); 619 list_del_rcu(&dev_opp->node);
561 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, 620 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
562 kfree_device_rcu); 621 _kfree_device_rcu);
563 } 622 }
564} 623}
565 624
@@ -569,6 +628,12 @@ static void __dev_pm_opp_remove(struct device_opp *dev_opp,
569 * @freq: OPP to remove with matching 'freq' 628 * @freq: OPP to remove with matching 'freq'
570 * 629 *
571 * This function removes an opp from the opp list. 630 * This function removes an opp from the opp list.
631 *
632 * Locking: The internal device_opp and opp structures are RCU protected.
633 * Hence this function internally uses RCU updater strategy with mutex locks
634 * to keep the integrity of the internal data structures. Callers should ensure
635 * that this function is *NOT* called under RCU protection or in contexts where
636 * mutex cannot be locked.
572 */ 637 */
573void dev_pm_opp_remove(struct device *dev, unsigned long freq) 638void dev_pm_opp_remove(struct device *dev, unsigned long freq)
574{ 639{
@@ -579,7 +644,7 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
579 /* Hold our list modification lock here */ 644 /* Hold our list modification lock here */
580 mutex_lock(&dev_opp_list_lock); 645 mutex_lock(&dev_opp_list_lock);
581 646
582 dev_opp = find_device_opp(dev); 647 dev_opp = _find_device_opp(dev);
583 if (IS_ERR(dev_opp)) 648 if (IS_ERR(dev_opp))
584 goto unlock; 649 goto unlock;
585 650
@@ -596,14 +661,14 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
596 goto unlock; 661 goto unlock;
597 } 662 }
598 663
599 __dev_pm_opp_remove(dev_opp, opp); 664 _opp_remove(dev_opp, opp);
600unlock: 665unlock:
601 mutex_unlock(&dev_opp_list_lock); 666 mutex_unlock(&dev_opp_list_lock);
602} 667}
603EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 668EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
604 669
605/** 670/**
606 * opp_set_availability() - helper to set the availability of an opp 671 * _opp_set_availability() - helper to set the availability of an opp
607 * @dev: device for which we do this operation 672 * @dev: device for which we do this operation
608 * @freq: OPP frequency to modify availability 673 * @freq: OPP frequency to modify availability
609 * @availability_req: availability status requested for this opp 674 * @availability_req: availability status requested for this opp
@@ -611,7 +676,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
611 * Set the availability of an OPP with an RCU operation, opp_{enable,disable} 676 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
612 * share a common logic which is isolated here. 677 * share a common logic which is isolated here.
613 * 678 *
614 * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the 679 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
615 * copy operation, returns 0 if no modifcation was done OR modification was 680 * copy operation, returns 0 if no modifcation was done OR modification was
616 * successful. 681 * successful.
617 * 682 *
@@ -621,8 +686,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
621 * that this function is *NOT* called under RCU protection or in contexts where 686 * that this function is *NOT* called under RCU protection or in contexts where
622 * mutex locking or synchronize_rcu() blocking calls cannot be used. 687 * mutex locking or synchronize_rcu() blocking calls cannot be used.
623 */ 688 */
624static int opp_set_availability(struct device *dev, unsigned long freq, 689static int _opp_set_availability(struct device *dev, unsigned long freq,
625 bool availability_req) 690 bool availability_req)
626{ 691{
627 struct device_opp *dev_opp; 692 struct device_opp *dev_opp;
628 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); 693 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
@@ -638,7 +703,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
638 mutex_lock(&dev_opp_list_lock); 703 mutex_lock(&dev_opp_list_lock);
639 704
640 /* Find the device_opp */ 705 /* Find the device_opp */
641 dev_opp = find_device_opp(dev); 706 dev_opp = _find_device_opp(dev);
642 if (IS_ERR(dev_opp)) { 707 if (IS_ERR(dev_opp)) {
643 r = PTR_ERR(dev_opp); 708 r = PTR_ERR(dev_opp);
644 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); 709 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
@@ -668,7 +733,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
668 733
669 list_replace_rcu(&opp->node, &new_opp->node); 734 list_replace_rcu(&opp->node, &new_opp->node);
670 mutex_unlock(&dev_opp_list_lock); 735 mutex_unlock(&dev_opp_list_lock);
671 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, kfree_opp_rcu); 736 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
672 737
673 /* Notify the change of the OPP availability */ 738 /* Notify the change of the OPP availability */
674 if (availability_req) 739 if (availability_req)
@@ -700,10 +765,14 @@ unlock:
700 * integrity of the internal data structures. Callers should ensure that 765 * integrity of the internal data structures. Callers should ensure that
701 * this function is *NOT* called under RCU protection or in contexts where 766 * this function is *NOT* called under RCU protection or in contexts where
702 * mutex locking or synchronize_rcu() blocking calls cannot be used. 767 * mutex locking or synchronize_rcu() blocking calls cannot be used.
768 *
769 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
770 * copy operation, returns 0 if no modifcation was done OR modification was
771 * successful.
703 */ 772 */
704int dev_pm_opp_enable(struct device *dev, unsigned long freq) 773int dev_pm_opp_enable(struct device *dev, unsigned long freq)
705{ 774{
706 return opp_set_availability(dev, freq, true); 775 return _opp_set_availability(dev, freq, true);
707} 776}
708EXPORT_SYMBOL_GPL(dev_pm_opp_enable); 777EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
709 778
@@ -722,26 +791,41 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
722 * integrity of the internal data structures. Callers should ensure that 791 * integrity of the internal data structures. Callers should ensure that
723 * this function is *NOT* called under RCU protection or in contexts where 792 * this function is *NOT* called under RCU protection or in contexts where
724 * mutex locking or synchronize_rcu() blocking calls cannot be used. 793 * mutex locking or synchronize_rcu() blocking calls cannot be used.
794 *
795 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
796 * copy operation, returns 0 if no modifcation was done OR modification was
797 * successful.
725 */ 798 */
726int dev_pm_opp_disable(struct device *dev, unsigned long freq) 799int dev_pm_opp_disable(struct device *dev, unsigned long freq)
727{ 800{
728 return opp_set_availability(dev, freq, false); 801 return _opp_set_availability(dev, freq, false);
729} 802}
730EXPORT_SYMBOL_GPL(dev_pm_opp_disable); 803EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
731 804
732/** 805/**
733 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp 806 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
734 * @dev: device pointer used to lookup device OPPs. 807 * @dev: device pointer used to lookup device OPPs.
808 *
809 * Return: pointer to notifier head if found, otherwise -ENODEV or
810 * -EINVAL based on type of error casted as pointer. value must be checked
811 * with IS_ERR to determine valid pointer or error result.
812 *
813 * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
814 * protected pointer. The reason for the same is that the opp pointer which is
815 * returned will remain valid for use with opp_get_{voltage, freq} only while
816 * under the locked area. The pointer returned must be used prior to unlocking
817 * with rcu_read_unlock() to maintain the integrity of the pointer.
735 */ 818 */
736struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) 819struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
737{ 820{
738 struct device_opp *dev_opp = find_device_opp(dev); 821 struct device_opp *dev_opp = _find_device_opp(dev);
739 822
740 if (IS_ERR(dev_opp)) 823 if (IS_ERR(dev_opp))
741 return ERR_CAST(dev_opp); /* matching type */ 824 return ERR_CAST(dev_opp); /* matching type */
742 825
743 return &dev_opp->srcu_head; 826 return &dev_opp->srcu_head;
744} 827}
828EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
745 829
746#ifdef CONFIG_OF 830#ifdef CONFIG_OF
747/** 831/**
@@ -749,6 +833,22 @@ struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
749 * @dev: device pointer used to lookup device OPPs. 833 * @dev: device pointer used to lookup device OPPs.
750 * 834 *
751 * Register the initial OPP table with the OPP library for given device. 835 * Register the initial OPP table with the OPP library for given device.
836 *
837 * Locking: The internal device_opp and opp structures are RCU protected.
838 * Hence this function indirectly uses RCU updater strategy with mutex locks
839 * to keep the integrity of the internal data structures. Callers should ensure
840 * that this function is *NOT* called under RCU protection or in contexts where
841 * mutex cannot be locked.
842 *
843 * Return:
844 * 0 On success OR
845 * Duplicate OPPs (both freq and volt are same) and opp->available
846 * -EEXIST Freq are same and volt are different OR
847 * Duplicate OPPs (both freq and volt are same) and !opp->available
848 * -ENOMEM Memory allocation failure
849 * -ENODEV when 'operating-points' property is not found or is invalid data
850 * in device node.
851 * -ENODATA when empty 'operating-points' property is found
752 */ 852 */
753int of_init_opp_table(struct device *dev) 853int of_init_opp_table(struct device *dev)
754{ 854{
@@ -777,7 +877,7 @@ int of_init_opp_table(struct device *dev)
777 unsigned long freq = be32_to_cpup(val++) * 1000; 877 unsigned long freq = be32_to_cpup(val++) * 1000;
778 unsigned long volt = be32_to_cpup(val++); 878 unsigned long volt = be32_to_cpup(val++);
779 879
780 if (dev_pm_opp_add_dynamic(dev, freq, volt, false)) 880 if (_opp_add_dynamic(dev, freq, volt, false))
781 dev_warn(dev, "%s: Failed to add OPP %ld\n", 881 dev_warn(dev, "%s: Failed to add OPP %ld\n",
782 __func__, freq); 882 __func__, freq);
783 nr -= 2; 883 nr -= 2;
@@ -792,6 +892,12 @@ EXPORT_SYMBOL_GPL(of_init_opp_table);
792 * @dev: device pointer used to lookup device OPPs. 892 * @dev: device pointer used to lookup device OPPs.
793 * 893 *
794 * Free OPPs created using static entries present in DT. 894 * Free OPPs created using static entries present in DT.
895 *
896 * Locking: The internal device_opp and opp structures are RCU protected.
897 * Hence this function indirectly uses RCU updater strategy with mutex locks
898 * to keep the integrity of the internal data structures. Callers should ensure
899 * that this function is *NOT* called under RCU protection or in contexts where
900 * mutex cannot be locked.
795 */ 901 */
796void of_free_opp_table(struct device *dev) 902void of_free_opp_table(struct device *dev)
797{ 903{
@@ -799,7 +905,7 @@ void of_free_opp_table(struct device *dev)
799 struct dev_pm_opp *opp, *tmp; 905 struct dev_pm_opp *opp, *tmp;
800 906
801 /* Check for existing list for 'dev' */ 907 /* Check for existing list for 'dev' */
802 dev_opp = find_device_opp(dev); 908 dev_opp = _find_device_opp(dev);
803 if (IS_ERR(dev_opp)) { 909 if (IS_ERR(dev_opp)) {
804 int error = PTR_ERR(dev_opp); 910 int error = PTR_ERR(dev_opp);
805 if (error != -ENODEV) 911 if (error != -ENODEV)
@@ -816,7 +922,7 @@ void of_free_opp_table(struct device *dev)
816 /* Free static OPPs */ 922 /* Free static OPPs */
817 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { 923 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
818 if (!opp->dynamic) 924 if (!opp->dynamic)
819 __dev_pm_opp_remove(dev_opp, opp); 925 _opp_remove(dev_opp, opp);
820 } 926 }
821 927
822 mutex_unlock(&dev_opp_list_lock); 928 mutex_unlock(&dev_opp_list_lock);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index a8fe4c1a8d07..e56d538d039e 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -64,6 +64,8 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
64 struct pm_qos_flags *pqf; 64 struct pm_qos_flags *pqf;
65 s32 val; 65 s32 val;
66 66
67 lockdep_assert_held(&dev->power.lock);
68
67 if (IS_ERR_OR_NULL(qos)) 69 if (IS_ERR_OR_NULL(qos))
68 return PM_QOS_FLAGS_UNDEFINED; 70 return PM_QOS_FLAGS_UNDEFINED;
69 71
@@ -104,6 +106,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
104 */ 106 */
105s32 __dev_pm_qos_read_value(struct device *dev) 107s32 __dev_pm_qos_read_value(struct device *dev)
106{ 108{
109 lockdep_assert_held(&dev->power.lock);
110
107 return IS_ERR_OR_NULL(dev->power.qos) ? 111 return IS_ERR_OR_NULL(dev->power.qos) ?
108 0 : pm_qos_read_value(&dev->power.qos->resume_latency); 112 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
109} 113}
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 63fc7f06a014..2a04d341e598 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -47,6 +47,7 @@
47#include <asm/xen/hypervisor.h> 47#include <asm/xen/hypervisor.h>
48#include <asm/xen/hypercall.h> 48#include <asm/xen/hypercall.h>
49#include <xen/balloon.h> 49#include <xen/balloon.h>
50#include <xen/grant_table.h>
50#include "common.h" 51#include "common.h"
51 52
52/* 53/*
@@ -100,7 +101,7 @@ module_param(log_stats, int, 0644);
100 101
101#define BLKBACK_INVALID_HANDLE (~0) 102#define BLKBACK_INVALID_HANDLE (~0)
102 103
103/* Number of free pages to remove on each call to free_xenballooned_pages */ 104/* Number of free pages to remove on each call to gnttab_free_pages */
104#define NUM_BATCH_FREE_PAGES 10 105#define NUM_BATCH_FREE_PAGES 10
105 106
106static inline int get_free_page(struct xen_blkif *blkif, struct page **page) 107static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
@@ -111,7 +112,7 @@ static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
111 if (list_empty(&blkif->free_pages)) { 112 if (list_empty(&blkif->free_pages)) {
112 BUG_ON(blkif->free_pages_num != 0); 113 BUG_ON(blkif->free_pages_num != 0);
113 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); 114 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
114 return alloc_xenballooned_pages(1, page, false); 115 return gnttab_alloc_pages(1, page);
115 } 116 }
116 BUG_ON(blkif->free_pages_num == 0); 117 BUG_ON(blkif->free_pages_num == 0);
117 page[0] = list_first_entry(&blkif->free_pages, struct page, lru); 118 page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
@@ -151,14 +152,14 @@ static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
151 blkif->free_pages_num--; 152 blkif->free_pages_num--;
152 if (++num_pages == NUM_BATCH_FREE_PAGES) { 153 if (++num_pages == NUM_BATCH_FREE_PAGES) {
153 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); 154 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
154 free_xenballooned_pages(num_pages, page); 155 gnttab_free_pages(num_pages, page);
155 spin_lock_irqsave(&blkif->free_pages_lock, flags); 156 spin_lock_irqsave(&blkif->free_pages_lock, flags);
156 num_pages = 0; 157 num_pages = 0;
157 } 158 }
158 } 159 }
159 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); 160 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
160 if (num_pages != 0) 161 if (num_pages != 0)
161 free_xenballooned_pages(num_pages, page); 162 gnttab_free_pages(num_pages, page);
162} 163}
163 164
164#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) 165#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
@@ -262,6 +263,17 @@ static void put_persistent_gnt(struct xen_blkif *blkif,
262 atomic_dec(&blkif->persistent_gnt_in_use); 263 atomic_dec(&blkif->persistent_gnt_in_use);
263} 264}
264 265
266static void free_persistent_gnts_unmap_callback(int result,
267 struct gntab_unmap_queue_data *data)
268{
269 struct completion *c = data->data;
270
271 /* BUG_ON used to reproduce existing behaviour,
272 but is this the best way to deal with this? */
273 BUG_ON(result);
274 complete(c);
275}
276
265static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, 277static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
266 unsigned int num) 278 unsigned int num)
267{ 279{
@@ -269,8 +281,17 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
269 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 281 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
270 struct persistent_gnt *persistent_gnt; 282 struct persistent_gnt *persistent_gnt;
271 struct rb_node *n; 283 struct rb_node *n;
272 int ret = 0;
273 int segs_to_unmap = 0; 284 int segs_to_unmap = 0;
285 struct gntab_unmap_queue_data unmap_data;
286 struct completion unmap_completion;
287
288 init_completion(&unmap_completion);
289
290 unmap_data.data = &unmap_completion;
291 unmap_data.done = &free_persistent_gnts_unmap_callback;
292 unmap_data.pages = pages;
293 unmap_data.unmap_ops = unmap;
294 unmap_data.kunmap_ops = NULL;
274 295
275 foreach_grant_safe(persistent_gnt, n, root, node) { 296 foreach_grant_safe(persistent_gnt, n, root, node) {
276 BUG_ON(persistent_gnt->handle == 297 BUG_ON(persistent_gnt->handle ==
@@ -285,9 +306,11 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
285 306
286 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || 307 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
287 !rb_next(&persistent_gnt->node)) { 308 !rb_next(&persistent_gnt->node)) {
288 ret = gnttab_unmap_refs(unmap, NULL, pages, 309
289 segs_to_unmap); 310 unmap_data.count = segs_to_unmap;
290 BUG_ON(ret); 311 gnttab_unmap_refs_async(&unmap_data);
312 wait_for_completion(&unmap_completion);
313
291 put_free_pages(blkif, pages, segs_to_unmap); 314 put_free_pages(blkif, pages, segs_to_unmap);
292 segs_to_unmap = 0; 315 segs_to_unmap = 0;
293 } 316 }
@@ -653,18 +676,14 @@ void xen_blkbk_free_caches(struct xen_blkif *blkif)
653 shrink_free_pagepool(blkif, 0 /* All */); 676 shrink_free_pagepool(blkif, 0 /* All */);
654} 677}
655 678
656/* 679static unsigned int xen_blkbk_unmap_prepare(
657 * Unmap the grant references, and also remove the M2P over-rides 680 struct xen_blkif *blkif,
658 * used in the 'pending_req'. 681 struct grant_page **pages,
659 */ 682 unsigned int num,
660static void xen_blkbk_unmap(struct xen_blkif *blkif, 683 struct gnttab_unmap_grant_ref *unmap_ops,
661 struct grant_page *pages[], 684 struct page **unmap_pages)
662 int num)
663{ 685{
664 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
665 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
666 unsigned int i, invcount = 0; 686 unsigned int i, invcount = 0;
667 int ret;
668 687
669 for (i = 0; i < num; i++) { 688 for (i = 0; i < num; i++) {
670 if (pages[i]->persistent_gnt != NULL) { 689 if (pages[i]->persistent_gnt != NULL) {
@@ -674,21 +693,95 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
674 if (pages[i]->handle == BLKBACK_INVALID_HANDLE) 693 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
675 continue; 694 continue;
676 unmap_pages[invcount] = pages[i]->page; 695 unmap_pages[invcount] = pages[i]->page;
677 gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page), 696 gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
678 GNTMAP_host_map, pages[i]->handle); 697 GNTMAP_host_map, pages[i]->handle);
679 pages[i]->handle = BLKBACK_INVALID_HANDLE; 698 pages[i]->handle = BLKBACK_INVALID_HANDLE;
680 if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { 699 invcount++;
681 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, 700 }
682 invcount); 701
702 return invcount;
703}
704
705static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
706{
707 struct pending_req* pending_req = (struct pending_req*) (data->data);
708 struct xen_blkif *blkif = pending_req->blkif;
709
710 /* BUG_ON used to reproduce existing behaviour,
711 but is this the best way to deal with this? */
712 BUG_ON(result);
713
714 put_free_pages(blkif, data->pages, data->count);
715 make_response(blkif, pending_req->id,
716 pending_req->operation, pending_req->status);
717 free_req(blkif, pending_req);
718 /*
719 * Make sure the request is freed before releasing blkif,
720 * or there could be a race between free_req and the
721 * cleanup done in xen_blkif_free during shutdown.
722 *
723 * NB: The fact that we might try to wake up pending_free_wq
724 * before drain_complete (in case there's a drain going on)
725 * it's not a problem with our current implementation
726 * because we can assure there's no thread waiting on
727 * pending_free_wq if there's a drain going on, but it has
728 * to be taken into account if the current model is changed.
729 */
730 if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
731 complete(&blkif->drain_complete);
732 }
733 xen_blkif_put(blkif);
734}
735
736static void xen_blkbk_unmap_and_respond(struct pending_req *req)
737{
738 struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
739 struct xen_blkif *blkif = req->blkif;
740 struct grant_page **pages = req->segments;
741 unsigned int invcount;
742
743 invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_pages,
744 req->unmap, req->unmap_pages);
745
746 work->data = req;
747 work->done = xen_blkbk_unmap_and_respond_callback;
748 work->unmap_ops = req->unmap;
749 work->kunmap_ops = NULL;
750 work->pages = req->unmap_pages;
751 work->count = invcount;
752
753 gnttab_unmap_refs_async(&req->gnttab_unmap_data);
754}
755
756
757/*
758 * Unmap the grant references.
759 *
760 * This could accumulate ops up to the batch size to reduce the number
761 * of hypercalls, but since this is only used in error paths there's
762 * no real need.
763 */
764static void xen_blkbk_unmap(struct xen_blkif *blkif,
765 struct grant_page *pages[],
766 int num)
767{
768 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
769 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
770 unsigned int invcount = 0;
771 int ret;
772
773 while (num) {
774 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
775
776 invcount = xen_blkbk_unmap_prepare(blkif, pages, batch,
777 unmap, unmap_pages);
778 if (invcount) {
779 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
683 BUG_ON(ret); 780 BUG_ON(ret);
684 put_free_pages(blkif, unmap_pages, invcount); 781 put_free_pages(blkif, unmap_pages, invcount);
685 invcount = 0;
686 } 782 }
687 } 783 pages += batch;
688 if (invcount) { 784 num -= batch;
689 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
690 BUG_ON(ret);
691 put_free_pages(blkif, unmap_pages, invcount);
692 } 785 }
693} 786}
694 787
@@ -982,32 +1075,8 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
982 * the grant references associated with 'request' and provide 1075 * the grant references associated with 'request' and provide
983 * the proper response on the ring. 1076 * the proper response on the ring.
984 */ 1077 */
985 if (atomic_dec_and_test(&pending_req->pendcnt)) { 1078 if (atomic_dec_and_test(&pending_req->pendcnt))
986 struct xen_blkif *blkif = pending_req->blkif; 1079 xen_blkbk_unmap_and_respond(pending_req);
987
988 xen_blkbk_unmap(blkif,
989 pending_req->segments,
990 pending_req->nr_pages);
991 make_response(blkif, pending_req->id,
992 pending_req->operation, pending_req->status);
993 free_req(blkif, pending_req);
994 /*
995 * Make sure the request is freed before releasing blkif,
996 * or there could be a race between free_req and the
997 * cleanup done in xen_blkif_free during shutdown.
998 *
999 * NB: The fact that we might try to wake up pending_free_wq
1000 * before drain_complete (in case there's a drain going on)
1001 * it's not a problem with our current implementation
1002 * because we can assure there's no thread waiting on
1003 * pending_free_wq if there's a drain going on, but it has
1004 * to be taken into account if the current model is changed.
1005 */
1006 if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
1007 complete(&blkif->drain_complete);
1008 }
1009 xen_blkif_put(blkif);
1010 }
1011} 1080}
1012 1081
1013/* 1082/*
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index f65b807e3236..cc90a840e616 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -350,6 +350,9 @@ struct pending_req {
350 struct grant_page *indirect_pages[MAX_INDIRECT_PAGES]; 350 struct grant_page *indirect_pages[MAX_INDIRECT_PAGES];
351 struct seg_buf seg[MAX_INDIRECT_SEGMENTS]; 351 struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
352 struct bio *biolist[MAX_INDIRECT_SEGMENTS]; 352 struct bio *biolist[MAX_INDIRECT_SEGMENTS];
353 struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
354 struct page *unmap_pages[MAX_INDIRECT_SEGMENTS];
355 struct gntab_unmap_queue_data gnttab_unmap_data;
353}; 356};
354 357
355 358
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index d5d4cd82b9f7..5c0baa9ffc64 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -976,8 +976,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
976 status = acpi_resource_to_address64(res, &addr); 976 status = acpi_resource_to_address64(res, &addr);
977 977
978 if (ACPI_SUCCESS(status)) { 978 if (ACPI_SUCCESS(status)) {
979 hdp->hd_phys_address = addr.minimum; 979 hdp->hd_phys_address = addr.address.minimum;
980 hdp->hd_address = ioremap(addr.minimum, addr.address_length); 980 hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length);
981 981
982 if (hpet_is_known(hdp)) { 982 if (hpet_is_known(hdp)) {
983 iounmap(hdp->hd_address); 983 iounmap(hdp->hd_address);
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 89ae88f91895..c59bdcb83217 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -57,6 +57,16 @@ config X86_ACPI_CPUFREQ_CPB
57 By enabling this option the acpi_cpufreq driver provides the old 57 By enabling this option the acpi_cpufreq driver provides the old
58 entry in addition to the new boost ones, for compatibility reasons. 58 entry in addition to the new boost ones, for compatibility reasons.
59 59
60config X86_SFI_CPUFREQ
61 tristate "SFI Performance-States driver"
62 depends on X86_INTEL_MID && SFI
63 help
64 This adds a CPUFreq driver for some Silvermont based Intel Atom
65 architectures like Z34xx and Z35xx which enumerate processor
66 performance states through SFI.
67
68 If in doubt, say N.
69
60config ELAN_CPUFREQ 70config ELAN_CPUFREQ
61 tristate "AMD Elan SC400 and SC410" 71 tristate "AMD Elan SC400 and SC410"
62 depends on MELAN 72 depends on MELAN
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index b3ca7b0b2c33..8b4220ac888b 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
41obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o 41obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
42obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o 42obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
43obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o 43obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
44obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
44 45
45################################################################################## 46##################################################################################
46# ARM SoC drivers 47# ARM SoC drivers
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index fde97d6e31d6..bab67db54b7e 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -320,8 +320,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
320{ 320{
321 struct private_data *priv = policy->driver_data; 321 struct private_data *priv = policy->driver_data;
322 322
323 if (priv->cdev) 323 cpufreq_cooling_unregister(priv->cdev);
324 cpufreq_cooling_unregister(priv->cdev);
325 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 324 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
326 of_free_opp_table(priv->cpu_dev); 325 of_free_opp_table(priv->cpu_dev);
327 clk_put(policy->clk); 326 clk_put(policy->clk);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 46bed4f81cde..28e59a48b35f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -27,9 +27,21 @@
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/suspend.h> 29#include <linux/suspend.h>
30#include <linux/syscore_ops.h>
30#include <linux/tick.h> 31#include <linux/tick.h>
31#include <trace/events/power.h> 32#include <trace/events/power.h>
32 33
34/* Macros to iterate over lists */
35/* Iterate over online CPUs policies */
36static LIST_HEAD(cpufreq_policy_list);
37#define for_each_policy(__policy) \
38 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
39
40/* Iterate over governors */
41static LIST_HEAD(cpufreq_governor_list);
42#define for_each_governor(__governor) \
43 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
44
33/** 45/**
34 * The "cpufreq driver" - the arch- or hardware-dependent low 46 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock 47 * level driver of CPUFreq support, and its spinlock. This lock
@@ -40,7 +52,6 @@ static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback); 52static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41static DEFINE_RWLOCK(cpufreq_driver_lock); 53static DEFINE_RWLOCK(cpufreq_driver_lock);
42DEFINE_MUTEX(cpufreq_governor_lock); 54DEFINE_MUTEX(cpufreq_governor_lock);
43static LIST_HEAD(cpufreq_policy_list);
44 55
45/* This one keeps track of the previously set governor of a removed CPU */ 56/* This one keeps track of the previously set governor of a removed CPU */
46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); 57static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
@@ -62,7 +73,7 @@ static DECLARE_RWSEM(cpufreq_rwsem);
62/* internal prototypes */ 73/* internal prototypes */
63static int __cpufreq_governor(struct cpufreq_policy *policy, 74static int __cpufreq_governor(struct cpufreq_policy *policy,
64 unsigned int event); 75 unsigned int event);
65static unsigned int __cpufreq_get(unsigned int cpu); 76static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
66static void handle_update(struct work_struct *work); 77static void handle_update(struct work_struct *work);
67 78
68/** 79/**
@@ -93,7 +104,6 @@ void disable_cpufreq(void)
93{ 104{
94 off = 1; 105 off = 1;
95} 106}
96static LIST_HEAD(cpufreq_governor_list);
97static DEFINE_MUTEX(cpufreq_governor_mutex); 107static DEFINE_MUTEX(cpufreq_governor_mutex);
98 108
99bool have_governor_per_policy(void) 109bool have_governor_per_policy(void)
@@ -202,7 +212,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
202 struct cpufreq_policy *policy = NULL; 212 struct cpufreq_policy *policy = NULL;
203 unsigned long flags; 213 unsigned long flags;
204 214
205 if (cpufreq_disabled() || (cpu >= nr_cpu_ids)) 215 if (cpu >= nr_cpu_ids)
206 return NULL; 216 return NULL;
207 217
208 if (!down_read_trylock(&cpufreq_rwsem)) 218 if (!down_read_trylock(&cpufreq_rwsem))
@@ -229,9 +239,6 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
229 239
230void cpufreq_cpu_put(struct cpufreq_policy *policy) 240void cpufreq_cpu_put(struct cpufreq_policy *policy)
231{ 241{
232 if (cpufreq_disabled())
233 return;
234
235 kobject_put(&policy->kobj); 242 kobject_put(&policy->kobj);
236 up_read(&cpufreq_rwsem); 243 up_read(&cpufreq_rwsem);
237} 244}
@@ -249,12 +256,12 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
249 * systems as each CPU might be scaled differently. So, use the arch 256 * systems as each CPU might be scaled differently. So, use the arch
250 * per-CPU loops_per_jiffy value wherever possible. 257 * per-CPU loops_per_jiffy value wherever possible.
251 */ 258 */
252#ifndef CONFIG_SMP
253static unsigned long l_p_j_ref;
254static unsigned int l_p_j_ref_freq;
255
256static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 259static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
257{ 260{
261#ifndef CONFIG_SMP
262 static unsigned long l_p_j_ref;
263 static unsigned int l_p_j_ref_freq;
264
258 if (ci->flags & CPUFREQ_CONST_LOOPS) 265 if (ci->flags & CPUFREQ_CONST_LOOPS)
259 return; 266 return;
260 267
@@ -270,13 +277,8 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
270 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", 277 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
271 loops_per_jiffy, ci->new); 278 loops_per_jiffy, ci->new);
272 } 279 }
273}
274#else
275static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
276{
277 return;
278}
279#endif 280#endif
281}
280 282
281static void __cpufreq_notify_transition(struct cpufreq_policy *policy, 283static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
282 struct cpufreq_freqs *freqs, unsigned int state) 284 struct cpufreq_freqs *freqs, unsigned int state)
@@ -432,11 +434,11 @@ static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
432} 434}
433define_one_global_rw(boost); 435define_one_global_rw(boost);
434 436
435static struct cpufreq_governor *__find_governor(const char *str_governor) 437static struct cpufreq_governor *find_governor(const char *str_governor)
436{ 438{
437 struct cpufreq_governor *t; 439 struct cpufreq_governor *t;
438 440
439 list_for_each_entry(t, &cpufreq_governor_list, governor_list) 441 for_each_governor(t)
440 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) 442 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
441 return t; 443 return t;
442 444
@@ -463,12 +465,12 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
463 *policy = CPUFREQ_POLICY_POWERSAVE; 465 *policy = CPUFREQ_POLICY_POWERSAVE;
464 err = 0; 466 err = 0;
465 } 467 }
466 } else if (has_target()) { 468 } else {
467 struct cpufreq_governor *t; 469 struct cpufreq_governor *t;
468 470
469 mutex_lock(&cpufreq_governor_mutex); 471 mutex_lock(&cpufreq_governor_mutex);
470 472
471 t = __find_governor(str_governor); 473 t = find_governor(str_governor);
472 474
473 if (t == NULL) { 475 if (t == NULL) {
474 int ret; 476 int ret;
@@ -478,7 +480,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
478 mutex_lock(&cpufreq_governor_mutex); 480 mutex_lock(&cpufreq_governor_mutex);
479 481
480 if (ret == 0) 482 if (ret == 0)
481 t = __find_governor(str_governor); 483 t = find_governor(str_governor);
482 } 484 }
483 485
484 if (t != NULL) { 486 if (t != NULL) {
@@ -513,8 +515,7 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
513show_one(scaling_min_freq, min); 515show_one(scaling_min_freq, min);
514show_one(scaling_max_freq, max); 516show_one(scaling_max_freq, max);
515 517
516static ssize_t show_scaling_cur_freq( 518static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
517 struct cpufreq_policy *policy, char *buf)
518{ 519{
519 ssize_t ret; 520 ssize_t ret;
520 521
@@ -563,7 +564,7 @@ store_one(scaling_max_freq, max);
563static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, 564static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
564 char *buf) 565 char *buf)
565{ 566{
566 unsigned int cur_freq = __cpufreq_get(policy->cpu); 567 unsigned int cur_freq = __cpufreq_get(policy);
567 if (!cur_freq) 568 if (!cur_freq)
568 return sprintf(buf, "<unknown>"); 569 return sprintf(buf, "<unknown>");
569 return sprintf(buf, "%u\n", cur_freq); 570 return sprintf(buf, "%u\n", cur_freq);
@@ -639,7 +640,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
639 goto out; 640 goto out;
640 } 641 }
641 642
642 list_for_each_entry(t, &cpufreq_governor_list, governor_list) { 643 for_each_governor(t) {
643 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) 644 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
644 - (CPUFREQ_NAME_LEN + 2))) 645 - (CPUFREQ_NAME_LEN + 2)))
645 goto out; 646 goto out;
@@ -902,7 +903,7 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
902 903
903 /* set up files for this cpu device */ 904 /* set up files for this cpu device */
904 drv_attr = cpufreq_driver->attr; 905 drv_attr = cpufreq_driver->attr;
905 while ((drv_attr) && (*drv_attr)) { 906 while (drv_attr && *drv_attr) {
906 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 907 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
907 if (ret) 908 if (ret)
908 return ret; 909 return ret;
@@ -936,7 +937,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
936 memcpy(&new_policy, policy, sizeof(*policy)); 937 memcpy(&new_policy, policy, sizeof(*policy));
937 938
938 /* Update governor of new_policy to the governor used before hotplug */ 939 /* Update governor of new_policy to the governor used before hotplug */
939 gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); 940 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
940 if (gov) 941 if (gov)
941 pr_debug("Restoring governor %s for cpu %d\n", 942 pr_debug("Restoring governor %s for cpu %d\n",
942 policy->governor->name, policy->cpu); 943 policy->governor->name, policy->cpu);
@@ -958,7 +959,6 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
958 } 959 }
959} 960}
960 961
961#ifdef CONFIG_HOTPLUG_CPU
962static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, 962static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
963 unsigned int cpu, struct device *dev) 963 unsigned int cpu, struct device *dev)
964{ 964{
@@ -996,7 +996,6 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
996 996
997 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); 997 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
998} 998}
999#endif
1000 999
1001static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) 1000static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1002{ 1001{
@@ -1033,6 +1032,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
1033 init_rwsem(&policy->rwsem); 1032 init_rwsem(&policy->rwsem);
1034 spin_lock_init(&policy->transition_lock); 1033 spin_lock_init(&policy->transition_lock);
1035 init_waitqueue_head(&policy->transition_wait); 1034 init_waitqueue_head(&policy->transition_wait);
1035 init_completion(&policy->kobj_unregister);
1036 INIT_WORK(&policy->update, handle_update);
1036 1037
1037 return policy; 1038 return policy;
1038 1039
@@ -1091,15 +1092,9 @@ static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1091 } 1092 }
1092 1093
1093 down_write(&policy->rwsem); 1094 down_write(&policy->rwsem);
1094
1095 policy->last_cpu = policy->cpu;
1096 policy->cpu = cpu; 1095 policy->cpu = cpu;
1097
1098 up_write(&policy->rwsem); 1096 up_write(&policy->rwsem);
1099 1097
1100 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1101 CPUFREQ_UPDATE_POLICY_CPU, policy);
1102
1103 return 0; 1098 return 0;
1104} 1099}
1105 1100
@@ -1110,41 +1105,32 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1110 struct cpufreq_policy *policy; 1105 struct cpufreq_policy *policy;
1111 unsigned long flags; 1106 unsigned long flags;
1112 bool recover_policy = cpufreq_suspended; 1107 bool recover_policy = cpufreq_suspended;
1113#ifdef CONFIG_HOTPLUG_CPU
1114 struct cpufreq_policy *tpolicy;
1115#endif
1116 1108
1117 if (cpu_is_offline(cpu)) 1109 if (cpu_is_offline(cpu))
1118 return 0; 1110 return 0;
1119 1111
1120 pr_debug("adding CPU %u\n", cpu); 1112 pr_debug("adding CPU %u\n", cpu);
1121 1113
1122#ifdef CONFIG_SMP
1123 /* check whether a different CPU already registered this 1114 /* check whether a different CPU already registered this
1124 * CPU because it is in the same boat. */ 1115 * CPU because it is in the same boat. */
1125 policy = cpufreq_cpu_get(cpu); 1116 policy = cpufreq_cpu_get_raw(cpu);
1126 if (unlikely(policy)) { 1117 if (unlikely(policy))
1127 cpufreq_cpu_put(policy);
1128 return 0; 1118 return 0;
1129 }
1130#endif
1131 1119
1132 if (!down_read_trylock(&cpufreq_rwsem)) 1120 if (!down_read_trylock(&cpufreq_rwsem))
1133 return 0; 1121 return 0;
1134 1122
1135#ifdef CONFIG_HOTPLUG_CPU
1136 /* Check if this cpu was hot-unplugged earlier and has siblings */ 1123 /* Check if this cpu was hot-unplugged earlier and has siblings */
1137 read_lock_irqsave(&cpufreq_driver_lock, flags); 1124 read_lock_irqsave(&cpufreq_driver_lock, flags);
1138 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { 1125 for_each_policy(policy) {
1139 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { 1126 if (cpumask_test_cpu(cpu, policy->related_cpus)) {
1140 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1127 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1141 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev); 1128 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1142 up_read(&cpufreq_rwsem); 1129 up_read(&cpufreq_rwsem);
1143 return ret; 1130 return ret;
1144 } 1131 }
1145 } 1132 }
1146 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1133 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1147#endif
1148 1134
1149 /* 1135 /*
1150 * Restore the saved policy when doing light-weight init and fall back 1136 * Restore the saved policy when doing light-weight init and fall back
@@ -1171,9 +1157,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1171 1157
1172 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1158 cpumask_copy(policy->cpus, cpumask_of(cpu));
1173 1159
1174 init_completion(&policy->kobj_unregister);
1175 INIT_WORK(&policy->update, handle_update);
1176
1177 /* call driver. From then on the cpufreq must be able 1160 /* call driver. From then on the cpufreq must be able
1178 * to accept all calls to ->verify and ->setpolicy for this CPU 1161 * to accept all calls to ->verify and ->setpolicy for this CPU
1179 */ 1162 */
@@ -1371,11 +1354,10 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1371 pr_err("%s: Failed to stop governor\n", __func__); 1354 pr_err("%s: Failed to stop governor\n", __func__);
1372 return ret; 1355 return ret;
1373 } 1356 }
1374 }
1375 1357
1376 if (!cpufreq_driver->setpolicy)
1377 strncpy(per_cpu(cpufreq_cpu_governor, cpu), 1358 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1378 policy->governor->name, CPUFREQ_NAME_LEN); 1359 policy->governor->name, CPUFREQ_NAME_LEN);
1360 }
1379 1361
1380 down_read(&policy->rwsem); 1362 down_read(&policy->rwsem);
1381 cpus = cpumask_weight(policy->cpus); 1363 cpus = cpumask_weight(policy->cpus);
@@ -1416,9 +1398,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1416 unsigned long flags; 1398 unsigned long flags;
1417 struct cpufreq_policy *policy; 1399 struct cpufreq_policy *policy;
1418 1400
1419 read_lock_irqsave(&cpufreq_driver_lock, flags); 1401 write_lock_irqsave(&cpufreq_driver_lock, flags);
1420 policy = per_cpu(cpufreq_cpu_data, cpu); 1402 policy = per_cpu(cpufreq_cpu_data, cpu);
1421 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1403 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1404 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1422 1405
1423 if (!policy) { 1406 if (!policy) {
1424 pr_debug("%s: No cpu_data found\n", __func__); 1407 pr_debug("%s: No cpu_data found\n", __func__);
@@ -1473,7 +1456,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1473 } 1456 }
1474 } 1457 }
1475 1458
1476 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1477 return 0; 1459 return 0;
1478} 1460}
1479 1461
@@ -1510,30 +1492,23 @@ static void handle_update(struct work_struct *work)
1510/** 1492/**
1511 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're 1493 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1512 * in deep trouble. 1494 * in deep trouble.
1513 * @cpu: cpu number 1495 * @policy: policy managing CPUs
1514 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1515 * @new_freq: CPU frequency the CPU actually runs at 1496 * @new_freq: CPU frequency the CPU actually runs at
1516 * 1497 *
1517 * We adjust to current frequency first, and need to clean up later. 1498 * We adjust to current frequency first, and need to clean up later.
1518 * So either call to cpufreq_update_policy() or schedule handle_update()). 1499 * So either call to cpufreq_update_policy() or schedule handle_update()).
1519 */ 1500 */
1520static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, 1501static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1521 unsigned int new_freq) 1502 unsigned int new_freq)
1522{ 1503{
1523 struct cpufreq_policy *policy;
1524 struct cpufreq_freqs freqs; 1504 struct cpufreq_freqs freqs;
1525 unsigned long flags;
1526 1505
1527 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", 1506 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1528 old_freq, new_freq); 1507 policy->cur, new_freq);
1529 1508
1530 freqs.old = old_freq; 1509 freqs.old = policy->cur;
1531 freqs.new = new_freq; 1510 freqs.new = new_freq;
1532 1511
1533 read_lock_irqsave(&cpufreq_driver_lock, flags);
1534 policy = per_cpu(cpufreq_cpu_data, cpu);
1535 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1536
1537 cpufreq_freq_transition_begin(policy, &freqs); 1512 cpufreq_freq_transition_begin(policy, &freqs);
1538 cpufreq_freq_transition_end(policy, &freqs, 0); 1513 cpufreq_freq_transition_end(policy, &freqs, 0);
1539} 1514}
@@ -1583,22 +1558,21 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu)
1583} 1558}
1584EXPORT_SYMBOL(cpufreq_quick_get_max); 1559EXPORT_SYMBOL(cpufreq_quick_get_max);
1585 1560
1586static unsigned int __cpufreq_get(unsigned int cpu) 1561static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1587{ 1562{
1588 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1589 unsigned int ret_freq = 0; 1563 unsigned int ret_freq = 0;
1590 1564
1591 if (!cpufreq_driver->get) 1565 if (!cpufreq_driver->get)
1592 return ret_freq; 1566 return ret_freq;
1593 1567
1594 ret_freq = cpufreq_driver->get(cpu); 1568 ret_freq = cpufreq_driver->get(policy->cpu);
1595 1569
1596 if (ret_freq && policy->cur && 1570 if (ret_freq && policy->cur &&
1597 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1571 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1598 /* verify no discrepancy between actual and 1572 /* verify no discrepancy between actual and
1599 saved value exists */ 1573 saved value exists */
1600 if (unlikely(ret_freq != policy->cur)) { 1574 if (unlikely(ret_freq != policy->cur)) {
1601 cpufreq_out_of_sync(cpu, policy->cur, ret_freq); 1575 cpufreq_out_of_sync(policy, ret_freq);
1602 schedule_work(&policy->update); 1576 schedule_work(&policy->update);
1603 } 1577 }
1604 } 1578 }
@@ -1619,7 +1593,7 @@ unsigned int cpufreq_get(unsigned int cpu)
1619 1593
1620 if (policy) { 1594 if (policy) {
1621 down_read(&policy->rwsem); 1595 down_read(&policy->rwsem);
1622 ret_freq = __cpufreq_get(cpu); 1596 ret_freq = __cpufreq_get(policy);
1623 up_read(&policy->rwsem); 1597 up_read(&policy->rwsem);
1624 1598
1625 cpufreq_cpu_put(policy); 1599 cpufreq_cpu_put(policy);
@@ -1682,7 +1656,7 @@ void cpufreq_suspend(void)
1682 1656
1683 pr_debug("%s: Suspending Governors\n", __func__); 1657 pr_debug("%s: Suspending Governors\n", __func__);
1684 1658
1685 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { 1659 for_each_policy(policy) {
1686 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) 1660 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1687 pr_err("%s: Failed to stop governor for policy: %p\n", 1661 pr_err("%s: Failed to stop governor for policy: %p\n",
1688 __func__, policy); 1662 __func__, policy);
@@ -1716,7 +1690,7 @@ void cpufreq_resume(void)
1716 1690
1717 pr_debug("%s: Resuming Governors\n", __func__); 1691 pr_debug("%s: Resuming Governors\n", __func__);
1718 1692
1719 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { 1693 for_each_policy(policy) {
1720 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) 1694 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1721 pr_err("%s: Failed to resume driver: %p\n", __func__, 1695 pr_err("%s: Failed to resume driver: %p\n", __func__,
1722 policy); 1696 policy);
@@ -2006,10 +1980,6 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
2006} 1980}
2007EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1981EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2008 1982
2009/*
2010 * when "event" is CPUFREQ_GOV_LIMITS
2011 */
2012
2013static int __cpufreq_governor(struct cpufreq_policy *policy, 1983static int __cpufreq_governor(struct cpufreq_policy *policy,
2014 unsigned int event) 1984 unsigned int event)
2015{ 1985{
@@ -2107,7 +2077,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
2107 2077
2108 governor->initialized = 0; 2078 governor->initialized = 0;
2109 err = -EBUSY; 2079 err = -EBUSY;
2110 if (__find_governor(governor->name) == NULL) { 2080 if (!find_governor(governor->name)) {
2111 err = 0; 2081 err = 0;
2112 list_add(&governor->governor_list, &cpufreq_governor_list); 2082 list_add(&governor->governor_list, &cpufreq_governor_list);
2113 } 2083 }
@@ -2307,8 +2277,7 @@ int cpufreq_update_policy(unsigned int cpu)
2307 policy->cur = new_policy.cur; 2277 policy->cur = new_policy.cur;
2308 } else { 2278 } else {
2309 if (policy->cur != new_policy.cur && has_target()) 2279 if (policy->cur != new_policy.cur && has_target())
2310 cpufreq_out_of_sync(cpu, policy->cur, 2280 cpufreq_out_of_sync(policy, new_policy.cur);
2311 new_policy.cur);
2312 } 2281 }
2313 } 2282 }
2314 2283
@@ -2364,7 +2333,7 @@ static int cpufreq_boost_set_sw(int state)
2364 struct cpufreq_policy *policy; 2333 struct cpufreq_policy *policy;
2365 int ret = -EINVAL; 2334 int ret = -EINVAL;
2366 2335
2367 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { 2336 for_each_policy(policy) {
2368 freq_table = cpufreq_frequency_get_table(policy->cpu); 2337 freq_table = cpufreq_frequency_get_table(policy->cpu);
2369 if (freq_table) { 2338 if (freq_table) {
2370 ret = cpufreq_frequency_table_cpuinfo(policy, 2339 ret = cpufreq_frequency_table_cpuinfo(policy,
@@ -2454,9 +2423,6 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2454 2423
2455 pr_debug("trying to register driver %s\n", driver_data->name); 2424 pr_debug("trying to register driver %s\n", driver_data->name);
2456 2425
2457 if (driver_data->setpolicy)
2458 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2459
2460 write_lock_irqsave(&cpufreq_driver_lock, flags); 2426 write_lock_irqsave(&cpufreq_driver_lock, flags);
2461 if (cpufreq_driver) { 2427 if (cpufreq_driver) {
2462 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2428 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -2465,6 +2431,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2465 cpufreq_driver = driver_data; 2431 cpufreq_driver = driver_data;
2466 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2432 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2467 2433
2434 if (driver_data->setpolicy)
2435 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2436
2468 if (cpufreq_boost_supported()) { 2437 if (cpufreq_boost_supported()) {
2469 /* 2438 /*
2470 * Check if driver provides function to enable boost - 2439 * Check if driver provides function to enable boost -
@@ -2485,23 +2454,12 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2485 if (ret) 2454 if (ret)
2486 goto err_boost_unreg; 2455 goto err_boost_unreg;
2487 2456
2488 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { 2457 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2489 int i; 2458 list_empty(&cpufreq_policy_list)) {
2490 ret = -ENODEV;
2491
2492 /* check for at least one working CPU */
2493 for (i = 0; i < nr_cpu_ids; i++)
2494 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2495 ret = 0;
2496 break;
2497 }
2498
2499 /* if all ->init() calls failed, unregister */ 2459 /* if all ->init() calls failed, unregister */
2500 if (ret) { 2460 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2501 pr_debug("no CPU initialized for driver %s\n", 2461 driver_data->name);
2502 driver_data->name); 2462 goto err_if_unreg;
2503 goto err_if_unreg;
2504 }
2505 } 2463 }
2506 2464
2507 register_hotcpu_notifier(&cpufreq_cpu_notifier); 2465 register_hotcpu_notifier(&cpufreq_cpu_notifier);
@@ -2556,6 +2514,14 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2556} 2514}
2557EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 2515EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2558 2516
2517/*
2518 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2519 * or mutexes when secondary CPUs are halted.
2520 */
2521static struct syscore_ops cpufreq_syscore_ops = {
2522 .shutdown = cpufreq_suspend,
2523};
2524
2559static int __init cpufreq_core_init(void) 2525static int __init cpufreq_core_init(void)
2560{ 2526{
2561 if (cpufreq_disabled()) 2527 if (cpufreq_disabled())
@@ -2564,6 +2530,8 @@ static int __init cpufreq_core_init(void)
2564 cpufreq_global_kobject = kobject_create(); 2530 cpufreq_global_kobject = kobject_create();
2565 BUG_ON(!cpufreq_global_kobject); 2531 BUG_ON(!cpufreq_global_kobject);
2566 2532
2533 register_syscore_ops(&cpufreq_syscore_ops);
2534
2567 return 0; 2535 return 0;
2568} 2536}
2569core_initcall(cpufreq_core_init); 2537core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 0cd9b4dcef99..5e370a30a964 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -18,7 +18,6 @@
18static spinlock_t cpufreq_stats_lock; 18static spinlock_t cpufreq_stats_lock;
19 19
20struct cpufreq_stats { 20struct cpufreq_stats {
21 unsigned int cpu;
22 unsigned int total_trans; 21 unsigned int total_trans;
23 unsigned long long last_time; 22 unsigned long long last_time;
24 unsigned int max_state; 23 unsigned int max_state;
@@ -31,50 +30,33 @@ struct cpufreq_stats {
31#endif 30#endif
32}; 31};
33 32
34static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table); 33static int cpufreq_stats_update(struct cpufreq_stats *stats)
35
36struct cpufreq_stats_attribute {
37 struct attribute attr;
38 ssize_t(*show) (struct cpufreq_stats *, char *);
39};
40
41static int cpufreq_stats_update(unsigned int cpu)
42{ 34{
43 struct cpufreq_stats *stat; 35 unsigned long long cur_time = get_jiffies_64();
44 unsigned long long cur_time;
45 36
46 cur_time = get_jiffies_64();
47 spin_lock(&cpufreq_stats_lock); 37 spin_lock(&cpufreq_stats_lock);
48 stat = per_cpu(cpufreq_stats_table, cpu); 38 stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
49 if (stat->time_in_state) 39 stats->last_time = cur_time;
50 stat->time_in_state[stat->last_index] +=
51 cur_time - stat->last_time;
52 stat->last_time = cur_time;
53 spin_unlock(&cpufreq_stats_lock); 40 spin_unlock(&cpufreq_stats_lock);
54 return 0; 41 return 0;
55} 42}
56 43
57static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf) 44static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
58{ 45{
59 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); 46 return sprintf(buf, "%d\n", policy->stats->total_trans);
60 if (!stat)
61 return 0;
62 return sprintf(buf, "%d\n",
63 per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
64} 47}
65 48
66static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) 49static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
67{ 50{
51 struct cpufreq_stats *stats = policy->stats;
68 ssize_t len = 0; 52 ssize_t len = 0;
69 int i; 53 int i;
70 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); 54
71 if (!stat) 55 cpufreq_stats_update(stats);
72 return 0; 56 for (i = 0; i < stats->state_num; i++) {
73 cpufreq_stats_update(stat->cpu); 57 len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
74 for (i = 0; i < stat->state_num; i++) {
75 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
76 (unsigned long long) 58 (unsigned long long)
77 jiffies_64_to_clock_t(stat->time_in_state[i])); 59 jiffies_64_to_clock_t(stats->time_in_state[i]));
78 } 60 }
79 return len; 61 return len;
80} 62}
@@ -82,38 +64,35 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
82#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 64#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
83static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) 65static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
84{ 66{
67 struct cpufreq_stats *stats = policy->stats;
85 ssize_t len = 0; 68 ssize_t len = 0;
86 int i, j; 69 int i, j;
87 70
88 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
89 if (!stat)
90 return 0;
91 cpufreq_stats_update(stat->cpu);
92 len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n"); 71 len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
93 len += snprintf(buf + len, PAGE_SIZE - len, " : "); 72 len += snprintf(buf + len, PAGE_SIZE - len, " : ");
94 for (i = 0; i < stat->state_num; i++) { 73 for (i = 0; i < stats->state_num; i++) {
95 if (len >= PAGE_SIZE) 74 if (len >= PAGE_SIZE)
96 break; 75 break;
97 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", 76 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
98 stat->freq_table[i]); 77 stats->freq_table[i]);
99 } 78 }
100 if (len >= PAGE_SIZE) 79 if (len >= PAGE_SIZE)
101 return PAGE_SIZE; 80 return PAGE_SIZE;
102 81
103 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 82 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
104 83
105 for (i = 0; i < stat->state_num; i++) { 84 for (i = 0; i < stats->state_num; i++) {
106 if (len >= PAGE_SIZE) 85 if (len >= PAGE_SIZE)
107 break; 86 break;
108 87
109 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ", 88 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
110 stat->freq_table[i]); 89 stats->freq_table[i]);
111 90
112 for (j = 0; j < stat->state_num; j++) { 91 for (j = 0; j < stats->state_num; j++) {
113 if (len >= PAGE_SIZE) 92 if (len >= PAGE_SIZE)
114 break; 93 break;
115 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", 94 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
116 stat->trans_table[i*stat->max_state+j]); 95 stats->trans_table[i*stats->max_state+j]);
117 } 96 }
118 if (len >= PAGE_SIZE) 97 if (len >= PAGE_SIZE)
119 break; 98 break;
@@ -142,28 +121,29 @@ static struct attribute_group stats_attr_group = {
142 .name = "stats" 121 .name = "stats"
143}; 122};
144 123
145static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) 124static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
146{ 125{
147 int index; 126 int index;
148 for (index = 0; index < stat->max_state; index++) 127 for (index = 0; index < stats->max_state; index++)
149 if (stat->freq_table[index] == freq) 128 if (stats->freq_table[index] == freq)
150 return index; 129 return index;
151 return -1; 130 return -1;
152} 131}
153 132
154static void __cpufreq_stats_free_table(struct cpufreq_policy *policy) 133static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
155{ 134{
156 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); 135 struct cpufreq_stats *stats = policy->stats;
157 136
158 if (!stat) 137 /* Already freed */
138 if (!stats)
159 return; 139 return;
160 140
161 pr_debug("%s: Free stat table\n", __func__); 141 pr_debug("%s: Free stats table\n", __func__);
162 142
163 sysfs_remove_group(&policy->kobj, &stats_attr_group); 143 sysfs_remove_group(&policy->kobj, &stats_attr_group);
164 kfree(stat->time_in_state); 144 kfree(stats->time_in_state);
165 kfree(stat); 145 kfree(stats);
166 per_cpu(cpufreq_stats_table, policy->cpu) = NULL; 146 policy->stats = NULL;
167} 147}
168 148
169static void cpufreq_stats_free_table(unsigned int cpu) 149static void cpufreq_stats_free_table(unsigned int cpu)
@@ -174,37 +154,33 @@ static void cpufreq_stats_free_table(unsigned int cpu)
174 if (!policy) 154 if (!policy)
175 return; 155 return;
176 156
177 if (cpufreq_frequency_get_table(policy->cpu)) 157 __cpufreq_stats_free_table(policy);
178 __cpufreq_stats_free_table(policy);
179 158
180 cpufreq_cpu_put(policy); 159 cpufreq_cpu_put(policy);
181} 160}
182 161
183static int __cpufreq_stats_create_table(struct cpufreq_policy *policy) 162static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
184{ 163{
185 unsigned int i, count = 0, ret = 0; 164 unsigned int i = 0, count = 0, ret = -ENOMEM;
186 struct cpufreq_stats *stat; 165 struct cpufreq_stats *stats;
187 unsigned int alloc_size; 166 unsigned int alloc_size;
188 unsigned int cpu = policy->cpu; 167 unsigned int cpu = policy->cpu;
189 struct cpufreq_frequency_table *pos, *table; 168 struct cpufreq_frequency_table *pos, *table;
190 169
170 /* We need cpufreq table for creating stats table */
191 table = cpufreq_frequency_get_table(cpu); 171 table = cpufreq_frequency_get_table(cpu);
192 if (unlikely(!table)) 172 if (unlikely(!table))
193 return 0; 173 return 0;
194 174
195 if (per_cpu(cpufreq_stats_table, cpu)) 175 /* stats already initialized */
196 return -EBUSY; 176 if (policy->stats)
197 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 177 return -EEXIST;
198 if ((stat) == NULL)
199 return -ENOMEM;
200
201 ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
202 if (ret)
203 goto error_out;
204 178
205 stat->cpu = cpu; 179 stats = kzalloc(sizeof(*stats), GFP_KERNEL);
206 per_cpu(cpufreq_stats_table, cpu) = stat; 180 if (!stats)
181 return -ENOMEM;
207 182
183 /* Find total allocation size */
208 cpufreq_for_each_valid_entry(pos, table) 184 cpufreq_for_each_valid_entry(pos, table)
209 count++; 185 count++;
210 186
@@ -213,32 +189,40 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
213#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 189#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
214 alloc_size += count * count * sizeof(int); 190 alloc_size += count * count * sizeof(int);
215#endif 191#endif
216 stat->max_state = count; 192
217 stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL); 193 /* Allocate memory for time_in_state/freq_table/trans_table in one go */
218 if (!stat->time_in_state) { 194 stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
219 ret = -ENOMEM; 195 if (!stats->time_in_state)
220 goto error_alloc; 196 goto free_stat;
221 } 197
222 stat->freq_table = (unsigned int *)(stat->time_in_state + count); 198 stats->freq_table = (unsigned int *)(stats->time_in_state + count);
223 199
224#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 200#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
225 stat->trans_table = stat->freq_table + count; 201 stats->trans_table = stats->freq_table + count;
226#endif 202#endif
227 i = 0; 203
204 stats->max_state = count;
205
206 /* Find valid-unique entries */
228 cpufreq_for_each_valid_entry(pos, table) 207 cpufreq_for_each_valid_entry(pos, table)
229 if (freq_table_get_index(stat, pos->frequency) == -1) 208 if (freq_table_get_index(stats, pos->frequency) == -1)
230 stat->freq_table[i++] = pos->frequency; 209 stats->freq_table[i++] = pos->frequency;
231 stat->state_num = i; 210
232 spin_lock(&cpufreq_stats_lock); 211 stats->state_num = i;
233 stat->last_time = get_jiffies_64(); 212 stats->last_time = get_jiffies_64();
234 stat->last_index = freq_table_get_index(stat, policy->cur); 213 stats->last_index = freq_table_get_index(stats, policy->cur);
235 spin_unlock(&cpufreq_stats_lock); 214
236 return 0; 215 policy->stats = stats;
237error_alloc: 216 ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
238 sysfs_remove_group(&policy->kobj, &stats_attr_group); 217 if (!ret)
239error_out: 218 return 0;
240 kfree(stat); 219
241 per_cpu(cpufreq_stats_table, cpu) = NULL; 220 /* We failed, release resources */
221 policy->stats = NULL;
222 kfree(stats->time_in_state);
223free_stat:
224 kfree(stats);
225
242 return ret; 226 return ret;
243} 227}
244 228
@@ -259,30 +243,12 @@ static void cpufreq_stats_create_table(unsigned int cpu)
259 cpufreq_cpu_put(policy); 243 cpufreq_cpu_put(policy);
260} 244}
261 245
262static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
263{
264 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
265 policy->last_cpu);
266
267 pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
268 policy->cpu, policy->last_cpu);
269 per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
270 policy->last_cpu);
271 per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
272 stat->cpu = policy->cpu;
273}
274
275static int cpufreq_stat_notifier_policy(struct notifier_block *nb, 246static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
276 unsigned long val, void *data) 247 unsigned long val, void *data)
277{ 248{
278 int ret = 0; 249 int ret = 0;
279 struct cpufreq_policy *policy = data; 250 struct cpufreq_policy *policy = data;
280 251
281 if (val == CPUFREQ_UPDATE_POLICY_CPU) {
282 cpufreq_stats_update_policy_cpu(policy);
283 return 0;
284 }
285
286 if (val == CPUFREQ_CREATE_POLICY) 252 if (val == CPUFREQ_CREATE_POLICY)
287 ret = __cpufreq_stats_create_table(policy); 253 ret = __cpufreq_stats_create_table(policy);
288 else if (val == CPUFREQ_REMOVE_POLICY) 254 else if (val == CPUFREQ_REMOVE_POLICY)
@@ -295,35 +261,45 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
295 unsigned long val, void *data) 261 unsigned long val, void *data)
296{ 262{
297 struct cpufreq_freqs *freq = data; 263 struct cpufreq_freqs *freq = data;
298 struct cpufreq_stats *stat; 264 struct cpufreq_policy *policy = cpufreq_cpu_get(freq->cpu);
265 struct cpufreq_stats *stats;
299 int old_index, new_index; 266 int old_index, new_index;
300 267
301 if (val != CPUFREQ_POSTCHANGE) 268 if (!policy) {
269 pr_err("%s: No policy found\n", __func__);
302 return 0; 270 return 0;
271 }
303 272
304 stat = per_cpu(cpufreq_stats_table, freq->cpu); 273 if (val != CPUFREQ_POSTCHANGE)
305 if (!stat) 274 goto put_policy;
306 return 0;
307 275
308 old_index = stat->last_index; 276 if (!policy->stats) {
309 new_index = freq_table_get_index(stat, freq->new); 277 pr_debug("%s: No stats found\n", __func__);
278 goto put_policy;
279 }
310 280
311 /* We can't do stat->time_in_state[-1]= .. */ 281 stats = policy->stats;
312 if (old_index == -1 || new_index == -1) 282
313 return 0; 283 old_index = stats->last_index;
284 new_index = freq_table_get_index(stats, freq->new);
314 285
315 cpufreq_stats_update(freq->cpu); 286 /* We can't do stats->time_in_state[-1]= .. */
287 if (old_index == -1 || new_index == -1)
288 goto put_policy;
316 289
317 if (old_index == new_index) 290 if (old_index == new_index)
318 return 0; 291 goto put_policy;
319 292
320 spin_lock(&cpufreq_stats_lock); 293 cpufreq_stats_update(stats);
321 stat->last_index = new_index; 294
295 stats->last_index = new_index;
322#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 296#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
323 stat->trans_table[old_index * stat->max_state + new_index]++; 297 stats->trans_table[old_index * stats->max_state + new_index]++;
324#endif 298#endif
325 stat->total_trans++; 299 stats->total_trans++;
326 spin_unlock(&cpufreq_stats_lock); 300
301put_policy:
302 cpufreq_cpu_put(policy);
327 return 0; 303 return 0;
328} 304}
329 305
@@ -374,8 +350,7 @@ static void __exit cpufreq_stats_exit(void)
374} 350}
375 351
376MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>"); 352MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
377MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats " 353MODULE_DESCRIPTION("Export cpufreq stats via sysfs");
378 "through sysfs filesystem");
379MODULE_LICENSE("GPL"); 354MODULE_LICENSE("GPL");
380 355
381module_init(cpufreq_stats_init); 356module_init(cpufreq_stats_init);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 742eefba12c2..872c5772c5d3 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -148,6 +148,8 @@ struct perf_limits {
148 int32_t min_perf; 148 int32_t min_perf;
149 int max_policy_pct; 149 int max_policy_pct;
150 int max_sysfs_pct; 150 int max_sysfs_pct;
151 int min_policy_pct;
152 int min_sysfs_pct;
151}; 153};
152 154
153static struct perf_limits limits = { 155static struct perf_limits limits = {
@@ -159,6 +161,8 @@ static struct perf_limits limits = {
159 .min_perf = 0, 161 .min_perf = 0,
160 .max_policy_pct = 100, 162 .max_policy_pct = 100,
161 .max_sysfs_pct = 100, 163 .max_sysfs_pct = 100,
164 .min_policy_pct = 0,
165 .min_sysfs_pct = 0,
162}; 166};
163 167
164static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 168static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
@@ -338,6 +342,33 @@ static void __init intel_pstate_debug_expose_params(void)
338 return sprintf(buf, "%u\n", limits.object); \ 342 return sprintf(buf, "%u\n", limits.object); \
339 } 343 }
340 344
345static ssize_t show_turbo_pct(struct kobject *kobj,
346 struct attribute *attr, char *buf)
347{
348 struct cpudata *cpu;
349 int total, no_turbo, turbo_pct;
350 uint32_t turbo_fp;
351
352 cpu = all_cpu_data[0];
353
354 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
355 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
356 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
357 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
358 return sprintf(buf, "%u\n", turbo_pct);
359}
360
361static ssize_t show_num_pstates(struct kobject *kobj,
362 struct attribute *attr, char *buf)
363{
364 struct cpudata *cpu;
365 int total;
366
367 cpu = all_cpu_data[0];
368 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
369 return sprintf(buf, "%u\n", total);
370}
371
341static ssize_t show_no_turbo(struct kobject *kobj, 372static ssize_t show_no_turbo(struct kobject *kobj,
342 struct attribute *attr, char *buf) 373 struct attribute *attr, char *buf)
343{ 374{
@@ -404,7 +435,9 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
404 ret = sscanf(buf, "%u", &input); 435 ret = sscanf(buf, "%u", &input);
405 if (ret != 1) 436 if (ret != 1)
406 return -EINVAL; 437 return -EINVAL;
407 limits.min_perf_pct = clamp_t(int, input, 0 , 100); 438
439 limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
440 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
408 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 441 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
409 442
410 if (hwp_active) 443 if (hwp_active)
@@ -418,11 +451,15 @@ show_one(min_perf_pct, min_perf_pct);
418define_one_global_rw(no_turbo); 451define_one_global_rw(no_turbo);
419define_one_global_rw(max_perf_pct); 452define_one_global_rw(max_perf_pct);
420define_one_global_rw(min_perf_pct); 453define_one_global_rw(min_perf_pct);
454define_one_global_ro(turbo_pct);
455define_one_global_ro(num_pstates);
421 456
422static struct attribute *intel_pstate_attributes[] = { 457static struct attribute *intel_pstate_attributes[] = {
423 &no_turbo.attr, 458 &no_turbo.attr,
424 &max_perf_pct.attr, 459 &max_perf_pct.attr,
425 &min_perf_pct.attr, 460 &min_perf_pct.attr,
461 &turbo_pct.attr,
462 &num_pstates.attr,
426 NULL 463 NULL
427}; 464};
428 465
@@ -825,6 +862,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
825 ICPU(0x46, core_params), 862 ICPU(0x46, core_params),
826 ICPU(0x47, core_params), 863 ICPU(0x47, core_params),
827 ICPU(0x4c, byt_params), 864 ICPU(0x4c, byt_params),
865 ICPU(0x4e, core_params),
828 ICPU(0x4f, core_params), 866 ICPU(0x4f, core_params),
829 ICPU(0x56, core_params), 867 ICPU(0x56, core_params),
830 {} 868 {}
@@ -887,7 +925,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
887 if (!policy->cpuinfo.max_freq) 925 if (!policy->cpuinfo.max_freq)
888 return -ENODEV; 926 return -ENODEV;
889 927
890 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 928 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
929 policy->max >= policy->cpuinfo.max_freq) {
930 limits.min_policy_pct = 100;
891 limits.min_perf_pct = 100; 931 limits.min_perf_pct = 100;
892 limits.min_perf = int_tofp(1); 932 limits.min_perf = int_tofp(1);
893 limits.max_policy_pct = 100; 933 limits.max_policy_pct = 100;
@@ -897,8 +937,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
897 return 0; 937 return 0;
898 } 938 }
899 939
900 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 940 limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
901 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 941 limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
942 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
902 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 943 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
903 944
904 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; 945 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
@@ -978,6 +1019,7 @@ static struct cpufreq_driver intel_pstate_driver = {
978 1019
979static int __initdata no_load; 1020static int __initdata no_load;
980static int __initdata no_hwp; 1021static int __initdata no_hwp;
1022static int __initdata hwp_only;
981static unsigned int force_load; 1023static unsigned int force_load;
982 1024
983static int intel_pstate_msrs_not_valid(void) 1025static int intel_pstate_msrs_not_valid(void)
@@ -1175,6 +1217,9 @@ static int __init intel_pstate_init(void)
1175 if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp) 1217 if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp)
1176 intel_pstate_hwp_enable(); 1218 intel_pstate_hwp_enable();
1177 1219
1220 if (!hwp_active && hwp_only)
1221 goto out;
1222
1178 rc = cpufreq_register_driver(&intel_pstate_driver); 1223 rc = cpufreq_register_driver(&intel_pstate_driver);
1179 if (rc) 1224 if (rc)
1180 goto out; 1225 goto out;
@@ -1209,6 +1254,8 @@ static int __init intel_pstate_setup(char *str)
1209 no_hwp = 1; 1254 no_hwp = 1;
1210 if (!strcmp(str, "force")) 1255 if (!strcmp(str, "force"))
1211 force_load = 1; 1256 force_load = 1;
1257 if (!strcmp(str, "hwp_only"))
1258 hwp_only = 1;
1212 return 0; 1259 return 0;
1213} 1260}
1214early_param("intel_pstate", intel_pstate_setup); 1261early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/ls1x-cpufreq.c b/drivers/cpufreq/ls1x-cpufreq.c
index 25fbd6a1374f..f0913eee2f50 100644
--- a/drivers/cpufreq/ls1x-cpufreq.c
+++ b/drivers/cpufreq/ls1x-cpufreq.c
@@ -210,7 +210,6 @@ out:
210static struct platform_driver ls1x_cpufreq_platdrv = { 210static struct platform_driver ls1x_cpufreq_platdrv = {
211 .driver = { 211 .driver = {
212 .name = "ls1x-cpufreq", 212 .name = "ls1x-cpufreq",
213 .owner = THIS_MODULE,
214 }, 213 },
215 .probe = ls1x_cpufreq_probe, 214 .probe = ls1x_cpufreq_probe,
216 .remove = ls1x_cpufreq_remove, 215 .remove = ls1x_cpufreq_remove,
diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c
new file mode 100644
index 000000000000..ffa3389e535b
--- /dev/null
+++ b/drivers/cpufreq/sfi-cpufreq.c
@@ -0,0 +1,136 @@
1/*
2 * SFI Performance States Driver
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * Author: Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>
14 * Author: Srinidhi Kasagar <srinidhi.kasagar@intel.com>
15 */
16
17#include <linux/cpufreq.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/sfi.h>
22#include <linux/slab.h>
23#include <linux/smp.h>
24
25#include <asm/msr.h>
26
27struct cpufreq_frequency_table *freq_table;
28static struct sfi_freq_table_entry *sfi_cpufreq_array;
29static int num_freq_table_entries;
30
31static int sfi_parse_freq(struct sfi_table_header *table)
32{
33 struct sfi_table_simple *sb;
34 struct sfi_freq_table_entry *pentry;
35 int totallen;
36
37 sb = (struct sfi_table_simple *)table;
38 num_freq_table_entries = SFI_GET_NUM_ENTRIES(sb,
39 struct sfi_freq_table_entry);
40 if (num_freq_table_entries <= 1) {
41 pr_err("No p-states discovered\n");
42 return -ENODEV;
43 }
44
45 pentry = (struct sfi_freq_table_entry *)sb->pentry;
46 totallen = num_freq_table_entries * sizeof(*pentry);
47
48 sfi_cpufreq_array = kzalloc(totallen, GFP_KERNEL);
49 if (!sfi_cpufreq_array)
50 return -ENOMEM;
51
52 memcpy(sfi_cpufreq_array, pentry, totallen);
53
54 return 0;
55}
56
57static int sfi_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
58{
59 unsigned int next_perf_state = 0; /* Index into perf table */
60 u32 lo, hi;
61
62 next_perf_state = policy->freq_table[index].driver_data;
63
64 rdmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, &lo, &hi);
65 lo = (lo & ~INTEL_PERF_CTL_MASK) |
66 ((u32) sfi_cpufreq_array[next_perf_state].ctrl_val &
67 INTEL_PERF_CTL_MASK);
68 wrmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, lo, hi);
69
70 return 0;
71}
72
73static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy)
74{
75 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
76 policy->cpuinfo.transition_latency = 100000; /* 100us */
77
78 return cpufreq_table_validate_and_show(policy, freq_table);
79}
80
81static struct cpufreq_driver sfi_cpufreq_driver = {
82 .flags = CPUFREQ_CONST_LOOPS,
83 .verify = cpufreq_generic_frequency_table_verify,
84 .target_index = sfi_cpufreq_target,
85 .init = sfi_cpufreq_cpu_init,
86 .name = "sfi-cpufreq",
87 .attr = cpufreq_generic_attr,
88};
89
90static int __init sfi_cpufreq_init(void)
91{
92 int ret, i;
93
94 /* parse the freq table from SFI */
95 ret = sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, sfi_parse_freq);
96 if (ret)
97 return ret;
98
99 freq_table = kzalloc(sizeof(*freq_table) *
100 (num_freq_table_entries + 1), GFP_KERNEL);
101 if (!freq_table) {
102 ret = -ENOMEM;
103 goto err_free_array;
104 }
105
106 for (i = 0; i < num_freq_table_entries; i++) {
107 freq_table[i].driver_data = i;
108 freq_table[i].frequency = sfi_cpufreq_array[i].freq_mhz * 1000;
109 }
110 freq_table[i].frequency = CPUFREQ_TABLE_END;
111
112 ret = cpufreq_register_driver(&sfi_cpufreq_driver);
113 if (ret)
114 goto err_free_tbl;
115
116 return ret;
117
118err_free_tbl:
119 kfree(freq_table);
120err_free_array:
121 kfree(sfi_cpufreq_array);
122 return ret;
123}
124late_initcall(sfi_cpufreq_init);
125
126static void __exit sfi_cpufreq_exit(void)
127{
128 cpufreq_unregister_driver(&sfi_cpufreq_driver);
129 kfree(freq_table);
130 kfree(sfi_cpufreq_array);
131}
132module_exit(sfi_cpufreq_exit);
133
134MODULE_AUTHOR("Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>");
135MODULE_DESCRIPTION("SFI Performance-States Driver");
136MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index e3e225fe6b45..40c34faffe59 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -182,6 +182,10 @@ static int __init bl_idle_init(void)
182 */ 182 */
183 if (!of_match_node(compatible_machine_match, root)) 183 if (!of_match_node(compatible_machine_match, root))
184 return -ENODEV; 184 return -ENODEV;
185
186 if (!mcpm_is_available())
187 return -EUNATCH;
188
185 /* 189 /*
186 * For now the differentiation between little and big cores 190 * For now the differentiation between little and big cores
187 * is based on the part number. A7 cores are considered little 191 * is based on the part number. A7 cores are considered little
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 3891f6781298..64281bb2f650 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -88,4 +88,16 @@ config ARM_EXYNOS5_BUS_DEVFREQ
88 It reads PPMU counters of memory controllers and adjusts the 88 It reads PPMU counters of memory controllers and adjusts the
89 operating frequencies and voltages with OPP support. 89 operating frequencies and voltages with OPP support.
90 90
91config ARM_TEGRA_DEVFREQ
92 tristate "Tegra DEVFREQ Driver"
93 depends on ARCH_TEGRA_124_SOC
94 select DEVFREQ_GOV_SIMPLE_ONDEMAND
95 select PM_OPP
96 help
97 This adds the DEVFREQ driver for the Tegra family of SoCs.
98 It reads ACTMON counters of memory controllers and adjusts the
99 operating frequencies and voltages with OPP support.
100
101source "drivers/devfreq/event/Kconfig"
102
91endif # PM_DEVFREQ 103endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 16138c9e0d58..5134f9ee983d 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_PM_DEVFREQ) += devfreq.o 1obj-$(CONFIG_PM_DEVFREQ) += devfreq.o
2obj-$(CONFIG_PM_DEVFREQ_EVENT) += devfreq-event.o
2obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o 3obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
3obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o 4obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
4obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o 5obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
@@ -7,3 +8,7 @@ obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
7# DEVFREQ Drivers 8# DEVFREQ Drivers
8obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos/ 9obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos/
9obj-$(CONFIG_ARM_EXYNOS5_BUS_DEVFREQ) += exynos/ 10obj-$(CONFIG_ARM_EXYNOS5_BUS_DEVFREQ) += exynos/
11obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra-devfreq.o
12
13# DEVFREQ Event Drivers
14obj-$(CONFIG_PM_DEVFREQ_EVENT) += event/
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
new file mode 100644
index 000000000000..f304a0289eda
--- /dev/null
+++ b/drivers/devfreq/devfreq-event.c
@@ -0,0 +1,494 @@
1/*
2 * devfreq-event: a framework to provide raw data and events of devfreq devices
3 *
4 * Copyright (C) 2015 Samsung Electronics
5 * Author: Chanwoo Choi <cw00.choi@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This driver is based on drivers/devfreq/devfreq.c.
12 */
13
14#include <linux/devfreq-event.h>
15#include <linux/kernel.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/list.h>
21#include <linux/of.h>
22
23static struct class *devfreq_event_class;
24
25/* The list of all devfreq event list */
26static LIST_HEAD(devfreq_event_list);
27static DEFINE_MUTEX(devfreq_event_list_lock);
28
29#define to_devfreq_event(DEV) container_of(DEV, struct devfreq_event_dev, dev)
30
31/**
32 * devfreq_event_enable_edev() - Enable the devfreq-event dev and increase
33 * the enable_count of devfreq-event dev.
34 * @edev : the devfreq-event device
35 *
36 * Note that this function increase the enable_count and enable the
37 * devfreq-event device. The devfreq-event device should be enabled before
38 * using it by devfreq device.
39 */
40int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
41{
42 int ret = 0;
43
44 if (!edev || !edev->desc)
45 return -EINVAL;
46
47 mutex_lock(&edev->lock);
48 if (edev->desc->ops && edev->desc->ops->enable
49 && edev->enable_count == 0) {
50 ret = edev->desc->ops->enable(edev);
51 if (ret < 0)
52 goto err;
53 }
54 edev->enable_count++;
55err:
56 mutex_unlock(&edev->lock);
57
58 return ret;
59}
60EXPORT_SYMBOL_GPL(devfreq_event_enable_edev);
61
62/**
63 * devfreq_event_disable_edev() - Disable the devfreq-event dev and decrease
64 * the enable_count of the devfreq-event dev.
65 * @edev : the devfreq-event device
66 *
67 * Note that this function decrease the enable_count and disable the
68 * devfreq-event device. After the devfreq-event device is disabled,
69 * devfreq device can't use the devfreq-event device for get/set/reset
70 * operations.
71 */
72int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
73{
74 int ret = 0;
75
76 if (!edev || !edev->desc)
77 return -EINVAL;
78
79 mutex_lock(&edev->lock);
80 if (edev->enable_count <= 0) {
81 dev_warn(&edev->dev, "unbalanced enable_count\n");
82 ret = -EIO;
83 goto err;
84 }
85
86 if (edev->desc->ops && edev->desc->ops->disable
87 && edev->enable_count == 1) {
88 ret = edev->desc->ops->disable(edev);
89 if (ret < 0)
90 goto err;
91 }
92 edev->enable_count--;
93err:
94 mutex_unlock(&edev->lock);
95
96 return ret;
97}
98EXPORT_SYMBOL_GPL(devfreq_event_disable_edev);
99
100/**
101 * devfreq_event_is_enabled() - Check whether devfreq-event dev is enabled or
102 * not.
103 * @edev : the devfreq-event device
104 *
105 * Note that this function check whether devfreq-event dev is enabled or not.
106 * If return true, the devfreq-event dev is enabeld. If return false, the
107 * devfreq-event dev is disabled.
108 */
109bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
110{
111 bool enabled = false;
112
113 if (!edev || !edev->desc)
114 return enabled;
115
116 mutex_lock(&edev->lock);
117
118 if (edev->enable_count > 0)
119 enabled = true;
120
121 mutex_unlock(&edev->lock);
122
123 return enabled;
124}
125EXPORT_SYMBOL_GPL(devfreq_event_is_enabled);
126
127/**
128 * devfreq_event_set_event() - Set event to devfreq-event dev to start.
129 * @edev : the devfreq-event device
130 *
131 * Note that this function set the event to the devfreq-event device to start
132 * for getting the event data which could be various event type.
133 */
134int devfreq_event_set_event(struct devfreq_event_dev *edev)
135{
136 int ret;
137
138 if (!edev || !edev->desc)
139 return -EINVAL;
140
141 if (!edev->desc->ops || !edev->desc->ops->set_event)
142 return -EINVAL;
143
144 if (!devfreq_event_is_enabled(edev))
145 return -EPERM;
146
147 mutex_lock(&edev->lock);
148 ret = edev->desc->ops->set_event(edev);
149 mutex_unlock(&edev->lock);
150
151 return ret;
152}
153EXPORT_SYMBOL_GPL(devfreq_event_set_event);
154
155/**
156 * devfreq_event_get_event() - Get {load|total}_count from devfreq-event dev.
157 * @edev : the devfreq-event device
158 * @edata : the calculated data of devfreq-event device
159 *
160 * Note that this function get the calculated event data from devfreq-event dev
161 * after stoping the progress of whole sequence of devfreq-event dev.
162 */
163int devfreq_event_get_event(struct devfreq_event_dev *edev,
164 struct devfreq_event_data *edata)
165{
166 int ret;
167
168 if (!edev || !edev->desc)
169 return -EINVAL;
170
171 if (!edev->desc->ops || !edev->desc->ops->get_event)
172 return -EINVAL;
173
174 if (!devfreq_event_is_enabled(edev))
175 return -EINVAL;
176
177 edata->total_count = edata->load_count = 0;
178
179 mutex_lock(&edev->lock);
180 ret = edev->desc->ops->get_event(edev, edata);
181 if (ret < 0)
182 edata->total_count = edata->load_count = 0;
183 mutex_unlock(&edev->lock);
184
185 return ret;
186}
187EXPORT_SYMBOL_GPL(devfreq_event_get_event);
188
189/**
190 * devfreq_event_reset_event() - Reset all opeations of devfreq-event dev.
191 * @edev : the devfreq-event device
192 *
193 * Note that this function stop all operations of devfreq-event dev and reset
194 * the current event data to make the devfreq-event device into initial state.
195 */
196int devfreq_event_reset_event(struct devfreq_event_dev *edev)
197{
198 int ret = 0;
199
200 if (!edev || !edev->desc)
201 return -EINVAL;
202
203 if (!devfreq_event_is_enabled(edev))
204 return -EPERM;
205
206 mutex_lock(&edev->lock);
207 if (edev->desc->ops && edev->desc->ops->reset)
208 ret = edev->desc->ops->reset(edev);
209 mutex_unlock(&edev->lock);
210
211 return ret;
212}
213EXPORT_SYMBOL_GPL(devfreq_event_reset_event);
214
215/**
216 * devfreq_event_get_edev_by_phandle() - Get the devfreq-event dev from
217 * devicetree.
218 * @dev : the pointer to the given device
219 * @index : the index into list of devfreq-event device
220 *
221 * Note that this function return the pointer of devfreq-event device.
222 */
223struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
224 int index)
225{
226 struct device_node *node;
227 struct devfreq_event_dev *edev;
228
229 if (!dev->of_node) {
230 dev_err(dev, "device does not have a device node entry\n");
231 return ERR_PTR(-EINVAL);
232 }
233
234 node = of_parse_phandle(dev->of_node, "devfreq-events", index);
235 if (!node) {
236 dev_err(dev, "failed to get phandle in %s node\n",
237 dev->of_node->full_name);
238 return ERR_PTR(-ENODEV);
239 }
240
241 mutex_lock(&devfreq_event_list_lock);
242 list_for_each_entry(edev, &devfreq_event_list, node) {
243 if (!strcmp(edev->desc->name, node->name))
244 goto out;
245 }
246 edev = NULL;
247out:
248 mutex_unlock(&devfreq_event_list_lock);
249
250 if (!edev) {
251 dev_err(dev, "unable to get devfreq-event device : %s\n",
252 node->name);
253 of_node_put(node);
254 return ERR_PTR(-ENODEV);
255 }
256
257 of_node_put(node);
258
259 return edev;
260}
261EXPORT_SYMBOL_GPL(devfreq_event_get_edev_by_phandle);
262
263/**
264 * devfreq_event_get_edev_count() - Get the count of devfreq-event dev
265 * @dev : the pointer to the given device
266 *
267 * Note that this function return the count of devfreq-event devices.
268 */
269int devfreq_event_get_edev_count(struct device *dev)
270{
271 int count;
272
273 if (!dev->of_node) {
274 dev_err(dev, "device does not have a device node entry\n");
275 return -EINVAL;
276 }
277
278 count = of_property_count_elems_of_size(dev->of_node, "devfreq-events",
279 sizeof(u32));
280 if (count < 0 ) {
281 dev_err(dev,
282 "failed to get the count of devfreq-event in %s node\n",
283 dev->of_node->full_name);
284 return count;
285 }
286
287 return count;
288}
289EXPORT_SYMBOL_GPL(devfreq_event_get_edev_count);
290
291static void devfreq_event_release_edev(struct device *dev)
292{
293 struct devfreq_event_dev *edev = to_devfreq_event(dev);
294
295 kfree(edev);
296}
297
298/**
299 * devfreq_event_add_edev() - Add new devfreq-event device.
300 * @dev : the device owning the devfreq-event device being created
301 * @desc : the devfreq-event device's decriptor which include essential
302 * data for devfreq-event device.
303 *
304 * Note that this function add new devfreq-event device to devfreq-event class
305 * list and register the device of the devfreq-event device.
306 */
307struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
308 struct devfreq_event_desc *desc)
309{
310 struct devfreq_event_dev *edev;
311 static atomic_t event_no = ATOMIC_INIT(0);
312 int ret;
313
314 if (!dev || !desc)
315 return ERR_PTR(-EINVAL);
316
317 if (!desc->name || !desc->ops)
318 return ERR_PTR(-EINVAL);
319
320 if (!desc->ops->set_event || !desc->ops->get_event)
321 return ERR_PTR(-EINVAL);
322
323 edev = kzalloc(sizeof(struct devfreq_event_dev), GFP_KERNEL);
324 if (!edev)
325 return ERR_PTR(-ENOMEM);
326
327 mutex_init(&edev->lock);
328 edev->desc = desc;
329 edev->enable_count = 0;
330 edev->dev.parent = dev;
331 edev->dev.class = devfreq_event_class;
332 edev->dev.release = devfreq_event_release_edev;
333
334 dev_set_name(&edev->dev, "event.%d", atomic_inc_return(&event_no) - 1);
335 ret = device_register(&edev->dev);
336 if (ret < 0) {
337 put_device(&edev->dev);
338 return ERR_PTR(ret);
339 }
340 dev_set_drvdata(&edev->dev, edev);
341
342 INIT_LIST_HEAD(&edev->node);
343
344 mutex_lock(&devfreq_event_list_lock);
345 list_add(&edev->node, &devfreq_event_list);
346 mutex_unlock(&devfreq_event_list_lock);
347
348 return edev;
349}
350EXPORT_SYMBOL_GPL(devfreq_event_add_edev);
351
352/**
353 * devfreq_event_remove_edev() - Remove the devfreq-event device registered.
354 * @dev : the devfreq-event device
355 *
356 * Note that this function remove the registered devfreq-event device.
357 */
358int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
359{
360 if (!edev)
361 return -EINVAL;
362
363 WARN_ON(edev->enable_count);
364
365 mutex_lock(&devfreq_event_list_lock);
366 list_del(&edev->node);
367 mutex_unlock(&devfreq_event_list_lock);
368
369 device_unregister(&edev->dev);
370
371 return 0;
372}
373EXPORT_SYMBOL_GPL(devfreq_event_remove_edev);
374
375static int devm_devfreq_event_match(struct device *dev, void *res, void *data)
376{
377 struct devfreq_event_dev **r = res;
378
379 if (WARN_ON(!r || !*r))
380 return 0;
381
382 return *r == data;
383}
384
385static void devm_devfreq_event_release(struct device *dev, void *res)
386{
387 devfreq_event_remove_edev(*(struct devfreq_event_dev **)res);
388}
389
390/**
391 * devm_devfreq_event_add_edev() - Resource-managed devfreq_event_add_edev()
392 * @dev : the device owning the devfreq-event device being created
393 * @desc : the devfreq-event device's decriptor which include essential
394 * data for devfreq-event device.
395 *
396 * Note that this function manages automatically the memory of devfreq-event
397 * device using device resource management and simplify the free operation
398 * for memory of devfreq-event device.
399 */
400struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
401 struct devfreq_event_desc *desc)
402{
403 struct devfreq_event_dev **ptr, *edev;
404
405 ptr = devres_alloc(devm_devfreq_event_release, sizeof(*ptr), GFP_KERNEL);
406 if (!ptr)
407 return ERR_PTR(-ENOMEM);
408
409 edev = devfreq_event_add_edev(dev, desc);
410 if (IS_ERR(edev)) {
411 devres_free(ptr);
412 return ERR_PTR(-ENOMEM);
413 }
414
415 *ptr = edev;
416 devres_add(dev, ptr);
417
418 return edev;
419}
420EXPORT_SYMBOL_GPL(devm_devfreq_event_add_edev);
421
422/**
423 * devm_devfreq_event_remove_edev()- Resource-managed devfreq_event_remove_edev()
424 * @dev : the device owning the devfreq-event device being created
425 * @edev : the devfreq-event device
426 *
427 * Note that this function manages automatically the memory of devfreq-event
428 * device using device resource management.
429 */
430void devm_devfreq_event_remove_edev(struct device *dev,
431 struct devfreq_event_dev *edev)
432{
433 WARN_ON(devres_release(dev, devm_devfreq_event_release,
434 devm_devfreq_event_match, edev));
435}
436EXPORT_SYMBOL_GPL(devm_devfreq_event_remove_edev);
437
438/*
439 * Device attributes for devfreq-event class.
440 */
441static ssize_t name_show(struct device *dev, struct device_attribute *attr,
442 char *buf)
443{
444 struct devfreq_event_dev *edev = to_devfreq_event(dev);
445
446 if (!edev || !edev->desc)
447 return -EINVAL;
448
449 return sprintf(buf, "%s\n", edev->desc->name);
450}
451static DEVICE_ATTR_RO(name);
452
453static ssize_t enable_count_show(struct device *dev,
454 struct device_attribute *attr, char *buf)
455{
456 struct devfreq_event_dev *edev = to_devfreq_event(dev);
457
458 if (!edev || !edev->desc)
459 return -EINVAL;
460
461 return sprintf(buf, "%d\n", edev->enable_count);
462}
463static DEVICE_ATTR_RO(enable_count);
464
465static struct attribute *devfreq_event_attrs[] = {
466 &dev_attr_name.attr,
467 &dev_attr_enable_count.attr,
468 NULL,
469};
470ATTRIBUTE_GROUPS(devfreq_event);
471
472static int __init devfreq_event_init(void)
473{
474 devfreq_event_class = class_create(THIS_MODULE, "devfreq-event");
475 if (IS_ERR(devfreq_event_class)) {
476 pr_err("%s: couldn't create class\n", __FILE__);
477 return PTR_ERR(devfreq_event_class);
478 }
479
480 devfreq_event_class->dev_groups = devfreq_event_groups;
481
482 return 0;
483}
484subsys_initcall(devfreq_event_init);
485
486static void __exit devfreq_event_exit(void)
487{
488 class_destroy(devfreq_event_class);
489}
490module_exit(devfreq_event_exit);
491
492MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
493MODULE_DESCRIPTION("DEVFREQ-Event class support");
494MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
new file mode 100644
index 000000000000..a11720affc31
--- /dev/null
+++ b/drivers/devfreq/event/Kconfig
@@ -0,0 +1,25 @@
1menuconfig PM_DEVFREQ_EVENT
2 bool "DEVFREQ-Event device Support"
3 help
4 The devfreq-event device provide the raw data and events which
5 indicate the current state of devfreq-event device. The provided
6 data from devfreq-event device is used to monitor the state of
7 device and determine the suitable size of resource to reduce the
8 wasted resource.
9
10 The devfreq-event device can support the various type of events
11 (e.g., raw data, utilization, latency, bandwidth). The events
12 may be used by devfreq governor and other subsystem.
13
14if PM_DEVFREQ_EVENT
15
16config DEVFREQ_EVENT_EXYNOS_PPMU
17 bool "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
18 depends on ARCH_EXYNOS
19 select PM_OPP
20 help
21 This add the devfreq-event driver for Exynos SoC. It provides PPMU
22 (Platform Performance Monitoring Unit) counters to estimate the
23 utilization of each module.
24
25endif # PM_DEVFREQ_EVENT
diff --git a/drivers/devfreq/event/Makefile b/drivers/devfreq/event/Makefile
new file mode 100644
index 000000000000..be146ead79cf
--- /dev/null
+++ b/drivers/devfreq/event/Makefile
@@ -0,0 +1,2 @@
1# Exynos DEVFREQ Event Drivers
2obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_PPMU) += exynos-ppmu.o
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
new file mode 100644
index 000000000000..135be0aada9d
--- /dev/null
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -0,0 +1,374 @@
1/*
2 * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author : Chanwoo Choi <cw00.choi@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This driver is based on drivers/devfreq/exynos/exynos_ppmu.c
12 */
13
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/of_address.h>
20#include <linux/platform_device.h>
21#include <linux/suspend.h>
22#include <linux/devfreq-event.h>
23
24#include "exynos-ppmu.h"
25
26struct exynos_ppmu_data {
27 void __iomem *base;
28 struct clk *clk;
29};
30
31struct exynos_ppmu {
32 struct devfreq_event_dev **edev;
33 struct devfreq_event_desc *desc;
34 unsigned int num_events;
35
36 struct device *dev;
37 struct mutex lock;
38
39 struct exynos_ppmu_data ppmu;
40};
41
42#define PPMU_EVENT(name) \
43 { "ppmu-event0-"#name, PPMU_PMNCNT0 }, \
44 { "ppmu-event1-"#name, PPMU_PMNCNT1 }, \
45 { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \
46 { "ppmu-event3-"#name, PPMU_PMNCNT3 }
47
48struct __exynos_ppmu_events {
49 char *name;
50 int id;
51} ppmu_events[] = {
52 /* For Exynos3250, Exynos4 and Exynos5260 */
53 PPMU_EVENT(g3d),
54 PPMU_EVENT(fsys),
55
56 /* For Exynos4 SoCs and Exynos3250 */
57 PPMU_EVENT(dmc0),
58 PPMU_EVENT(dmc1),
59 PPMU_EVENT(cpu),
60 PPMU_EVENT(rightbus),
61 PPMU_EVENT(leftbus),
62 PPMU_EVENT(lcd0),
63 PPMU_EVENT(camif),
64
65 /* Only for Exynos3250 and Exynos5260 */
66 PPMU_EVENT(mfc),
67
68 /* Only for Exynos4 SoCs */
69 PPMU_EVENT(mfc-left),
70 PPMU_EVENT(mfc-right),
71
72 /* Only for Exynos5260 SoCs */
73 PPMU_EVENT(drex0-s0),
74 PPMU_EVENT(drex0-s1),
75 PPMU_EVENT(drex1-s0),
76 PPMU_EVENT(drex1-s1),
77 PPMU_EVENT(eagle),
78 PPMU_EVENT(kfc),
79 PPMU_EVENT(isp),
80 PPMU_EVENT(fimc),
81 PPMU_EVENT(gscl),
82 PPMU_EVENT(mscl),
83 PPMU_EVENT(fimd0x),
84 PPMU_EVENT(fimd1x),
85 { /* sentinel */ },
86};
87
88static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
89{
90 int i;
91
92 for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
93 if (!strcmp(edev->desc->name, ppmu_events[i].name))
94 return ppmu_events[i].id;
95
96 return -EINVAL;
97}
98
99static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
100{
101 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
102 u32 pmnc;
103
104 /* Disable all counters */
105 __raw_writel(PPMU_CCNT_MASK |
106 PPMU_PMCNT0_MASK |
107 PPMU_PMCNT1_MASK |
108 PPMU_PMCNT2_MASK |
109 PPMU_PMCNT3_MASK,
110 info->ppmu.base + PPMU_CNTENC);
111
112 /* Disable PPMU */
113 pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
114 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
115 __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
116
117 return 0;
118}
119
120static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
121{
122 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
123 int id = exynos_ppmu_find_ppmu_id(edev);
124 u32 pmnc, cntens;
125
126 if (id < 0)
127 return id;
128
129 /* Enable specific counter */
130 cntens = __raw_readl(info->ppmu.base + PPMU_CNTENS);
131 cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
132 __raw_writel(cntens, info->ppmu.base + PPMU_CNTENS);
133
134 /* Set the event of Read/Write data count */
135 __raw_writel(PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT,
136 info->ppmu.base + PPMU_BEVTxSEL(id));
137
138 /* Reset cycle counter/performance counter and enable PPMU */
139 pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
140 pmnc &= ~(PPMU_PMNC_ENABLE_MASK
141 | PPMU_PMNC_COUNTER_RESET_MASK
142 | PPMU_PMNC_CC_RESET_MASK);
143 pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
144 pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
145 pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
146 __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
147
148 return 0;
149}
150
151static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
152 struct devfreq_event_data *edata)
153{
154 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
155 int id = exynos_ppmu_find_ppmu_id(edev);
156 u32 pmnc, cntenc;
157
158 if (id < 0)
159 return -EINVAL;
160
161 /* Disable PPMU */
162 pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
163 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
164 __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
165
166 /* Read cycle count */
167 edata->total_count = __raw_readl(info->ppmu.base + PPMU_CCNT);
168
169 /* Read performance count */
170 switch (id) {
171 case PPMU_PMNCNT0:
172 case PPMU_PMNCNT1:
173 case PPMU_PMNCNT2:
174 edata->load_count
175 = __raw_readl(info->ppmu.base + PPMU_PMNCT(id));
176 break;
177 case PPMU_PMNCNT3:
178 edata->load_count =
179 ((__raw_readl(info->ppmu.base + PPMU_PMCNT3_HIGH) << 8)
180 | __raw_readl(info->ppmu.base + PPMU_PMCNT3_LOW));
181 break;
182 default:
183 return -EINVAL;
184 }
185
186 /* Disable specific counter */
187 cntenc = __raw_readl(info->ppmu.base + PPMU_CNTENC);
188 cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
189 __raw_writel(cntenc, info->ppmu.base + PPMU_CNTENC);
190
191 dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
192 edata->load_count, edata->total_count);
193
194 return 0;
195}
196
197static struct devfreq_event_ops exynos_ppmu_ops = {
198 .disable = exynos_ppmu_disable,
199 .set_event = exynos_ppmu_set_event,
200 .get_event = exynos_ppmu_get_event,
201};
202
203static int of_get_devfreq_events(struct device_node *np,
204 struct exynos_ppmu *info)
205{
206 struct devfreq_event_desc *desc;
207 struct device *dev = info->dev;
208 struct device_node *events_np, *node;
209 int i, j, count;
210
211 events_np = of_get_child_by_name(np, "events");
212 if (!events_np) {
213 dev_err(dev,
214 "failed to get child node of devfreq-event devices\n");
215 return -EINVAL;
216 }
217
218 count = of_get_child_count(events_np);
219 desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
220 if (!desc)
221 return -ENOMEM;
222 info->num_events = count;
223
224 j = 0;
225 for_each_child_of_node(events_np, node) {
226 for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
227 if (!ppmu_events[i].name)
228 continue;
229
230 if (!of_node_cmp(node->name, ppmu_events[i].name))
231 break;
232 }
233
234 if (i == ARRAY_SIZE(ppmu_events)) {
235 dev_warn(dev,
236 "don't know how to configure events : %s\n",
237 node->name);
238 continue;
239 }
240
241 desc[j].ops = &exynos_ppmu_ops;
242 desc[j].driver_data = info;
243
244 of_property_read_string(node, "event-name", &desc[j].name);
245
246 j++;
247
248 of_node_put(node);
249 }
250 info->desc = desc;
251
252 of_node_put(events_np);
253
254 return 0;
255}
256
257static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
258{
259 struct device *dev = info->dev;
260 struct device_node *np = dev->of_node;
261 int ret = 0;
262
263 if (!np) {
264 dev_err(dev, "failed to find devicetree node\n");
265 return -EINVAL;
266 }
267
268 /* Maps the memory mapped IO to control PPMU register */
269 info->ppmu.base = of_iomap(np, 0);
270 if (IS_ERR_OR_NULL(info->ppmu.base)) {
271 dev_err(dev, "failed to map memory region\n");
272 return -ENOMEM;
273 }
274
275 info->ppmu.clk = devm_clk_get(dev, "ppmu");
276 if (IS_ERR(info->ppmu.clk)) {
277 info->ppmu.clk = NULL;
278 dev_warn(dev, "cannot get PPMU clock\n");
279 }
280
281 ret = of_get_devfreq_events(np, info);
282 if (ret < 0) {
283 dev_err(dev, "failed to parse exynos ppmu dt node\n");
284 goto err;
285 }
286
287 return 0;
288
289err:
290 iounmap(info->ppmu.base);
291
292 return ret;
293}
294
295static int exynos_ppmu_probe(struct platform_device *pdev)
296{
297 struct exynos_ppmu *info;
298 struct devfreq_event_dev **edev;
299 struct devfreq_event_desc *desc;
300 int i, ret = 0, size;
301
302 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
303 if (!info)
304 return -ENOMEM;
305
306 mutex_init(&info->lock);
307 info->dev = &pdev->dev;
308
309 /* Parse dt data to get resource */
310 ret = exynos_ppmu_parse_dt(info);
311 if (ret < 0) {
312 dev_err(&pdev->dev,
313 "failed to parse devicetree for resource\n");
314 return ret;
315 }
316 desc = info->desc;
317
318 size = sizeof(struct devfreq_event_dev *) * info->num_events;
319 info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
320 if (!info->edev) {
321 dev_err(&pdev->dev,
322 "failed to allocate memory devfreq-event devices\n");
323 return -ENOMEM;
324 }
325 edev = info->edev;
326 platform_set_drvdata(pdev, info);
327
328 for (i = 0; i < info->num_events; i++) {
329 edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
330 if (IS_ERR(edev)) {
331 ret = PTR_ERR(edev);
332 dev_err(&pdev->dev,
333 "failed to add devfreq-event device\n");
334 goto err;
335 }
336 }
337
338 clk_prepare_enable(info->ppmu.clk);
339
340 return 0;
341err:
342 iounmap(info->ppmu.base);
343
344 return ret;
345}
346
347static int exynos_ppmu_remove(struct platform_device *pdev)
348{
349 struct exynos_ppmu *info = platform_get_drvdata(pdev);
350
351 clk_disable_unprepare(info->ppmu.clk);
352 iounmap(info->ppmu.base);
353
354 return 0;
355}
356
357static struct of_device_id exynos_ppmu_id_match[] = {
358 { .compatible = "samsung,exynos-ppmu", },
359 { /* sentinel */ },
360};
361
362static struct platform_driver exynos_ppmu_driver = {
363 .probe = exynos_ppmu_probe,
364 .remove = exynos_ppmu_remove,
365 .driver = {
366 .name = "exynos-ppmu",
367 .of_match_table = exynos_ppmu_id_match,
368 },
369};
370module_platform_driver(exynos_ppmu_driver);
371
372MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver");
373MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
374MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/exynos-ppmu.h b/drivers/devfreq/event/exynos-ppmu.h
new file mode 100644
index 000000000000..4e831d48c138
--- /dev/null
+++ b/drivers/devfreq/event/exynos-ppmu.h
@@ -0,0 +1,93 @@
1/*
2 * exynos_ppmu.h - EXYNOS PPMU header file
3 *
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5 * Author : Chanwoo Choi <cw00.choi@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef __EXYNOS_PPMU_H__
13#define __EXYNOS_PPMU_H__
14
15enum ppmu_state {
16 PPMU_DISABLE = 0,
17 PPMU_ENABLE,
18};
19
20enum ppmu_counter {
21 PPMU_PMNCNT0 = 0,
22 PPMU_PMNCNT1,
23 PPMU_PMNCNT2,
24 PPMU_PMNCNT3,
25
26 PPMU_PMNCNT_MAX,
27};
28
29enum ppmu_event_type {
30 PPMU_RO_BUSY_CYCLE_CNT = 0x0,
31 PPMU_WO_BUSY_CYCLE_CNT = 0x1,
32 PPMU_RW_BUSY_CYCLE_CNT = 0x2,
33 PPMU_RO_REQUEST_CNT = 0x3,
34 PPMU_WO_REQUEST_CNT = 0x4,
35 PPMU_RO_DATA_CNT = 0x5,
36 PPMU_WO_DATA_CNT = 0x6,
37 PPMU_RO_LATENCY = 0x12,
38 PPMU_WO_LATENCY = 0x16,
39};
40
41enum ppmu_reg {
42 /* PPC control register */
43 PPMU_PMNC = 0x00,
44 PPMU_CNTENS = 0x10,
45 PPMU_CNTENC = 0x20,
46 PPMU_INTENS = 0x30,
47 PPMU_INTENC = 0x40,
48 PPMU_FLAG = 0x50,
49
50 /* Cycle Counter and Performance Event Counter Register */
51 PPMU_CCNT = 0x100,
52 PPMU_PMCNT0 = 0x110,
53 PPMU_PMCNT1 = 0x120,
54 PPMU_PMCNT2 = 0x130,
55 PPMU_PMCNT3_HIGH = 0x140,
56 PPMU_PMCNT3_LOW = 0x150,
57
58 /* Bus Event Generator */
59 PPMU_BEVT0SEL = 0x1000,
60 PPMU_BEVT1SEL = 0x1100,
61 PPMU_BEVT2SEL = 0x1200,
62 PPMU_BEVT3SEL = 0x1300,
63 PPMU_COUNTER_RESET = 0x1810,
64 PPMU_READ_OVERFLOW_CNT = 0x1810,
65 PPMU_READ_UNDERFLOW_CNT = 0x1814,
66 PPMU_WRITE_OVERFLOW_CNT = 0x1850,
67 PPMU_WRITE_UNDERFLOW_CNT = 0x1854,
68 PPMU_READ_PENDING_CNT = 0x1880,
69 PPMU_WRITE_PENDING_CNT = 0x1884
70};
71
72/* PMNC register */
73#define PPMU_PMNC_CC_RESET_SHIFT 2
74#define PPMU_PMNC_COUNTER_RESET_SHIFT 1
75#define PPMU_PMNC_ENABLE_SHIFT 0
76#define PPMU_PMNC_START_MODE_MASK BIT(16)
77#define PPMU_PMNC_CC_DIVIDER_MASK BIT(3)
78#define PPMU_PMNC_CC_RESET_MASK BIT(2)
79#define PPMU_PMNC_COUNTER_RESET_MASK BIT(1)
80#define PPMU_PMNC_ENABLE_MASK BIT(0)
81
82/* CNTENS/CNTENC/INTENS/INTENC/FLAG register */
83#define PPMU_CCNT_MASK BIT(31)
84#define PPMU_PMCNT3_MASK BIT(3)
85#define PPMU_PMCNT2_MASK BIT(2)
86#define PPMU_PMCNT1_MASK BIT(1)
87#define PPMU_PMCNT0_MASK BIT(0)
88
89/* PPMU_PMNCTx/PPMU_BETxSEL registers */
90#define PPMU_PMNCT(x) (PPMU_PMCNT0 + (0x10 * x))
91#define PPMU_BEVTxSEL(x) (PPMU_BEVT0SEL + (0x100 * x))
92
93#endif /* __EXYNOS_PPMU_H__ */
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
new file mode 100644
index 000000000000..34790961af5a
--- /dev/null
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -0,0 +1,718 @@
1/*
2 * A devfreq driver for NVIDIA Tegra SoCs
3 *
4 * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
5 * Copyright (C) 2014 Google, Inc
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 */
20
21#include <linux/clk.h>
22#include <linux/cpufreq.h>
23#include <linux/devfreq.h>
24#include <linux/interrupt.h>
25#include <linux/io.h>
26#include <linux/module.h>
27#include <linux/platform_device.h>
28#include <linux/pm_opp.h>
29#include <linux/reset.h>
30
31#include "governor.h"
32
33#define ACTMON_GLB_STATUS 0x0
34#define ACTMON_GLB_PERIOD_CTRL 0x4
35
36#define ACTMON_DEV_CTRL 0x0
37#define ACTMON_DEV_CTRL_K_VAL_SHIFT 10
38#define ACTMON_DEV_CTRL_ENB_PERIODIC BIT(18)
39#define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN BIT(20)
40#define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN BIT(21)
41#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT 23
42#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT 26
43#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN BIT(29)
44#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN BIT(30)
45#define ACTMON_DEV_CTRL_ENB BIT(31)
46
47#define ACTMON_DEV_UPPER_WMARK 0x4
48#define ACTMON_DEV_LOWER_WMARK 0x8
49#define ACTMON_DEV_INIT_AVG 0xc
50#define ACTMON_DEV_AVG_UPPER_WMARK 0x10
51#define ACTMON_DEV_AVG_LOWER_WMARK 0x14
52#define ACTMON_DEV_COUNT_WEIGHT 0x18
53#define ACTMON_DEV_AVG_COUNT 0x20
54#define ACTMON_DEV_INTR_STATUS 0x24
55
56#define ACTMON_INTR_STATUS_CLEAR 0xffffffff
57
58#define ACTMON_DEV_INTR_CONSECUTIVE_UPPER BIT(31)
59#define ACTMON_DEV_INTR_CONSECUTIVE_LOWER BIT(30)
60
61#define ACTMON_ABOVE_WMARK_WINDOW 1
62#define ACTMON_BELOW_WMARK_WINDOW 3
63#define ACTMON_BOOST_FREQ_STEP 16000
64
65/* activity counter is incremented every 256 memory transactions, and each
66 * transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is
67 * 4 * 256 = 1024.
68 */
69#define ACTMON_COUNT_WEIGHT 0x400
70
71/*
72 * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
73 * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
74 */
75#define ACTMON_AVERAGE_WINDOW_LOG2 6
76#define ACTMON_SAMPLING_PERIOD 12 /* ms */
77#define ACTMON_DEFAULT_AVG_BAND 6 /* 1/10 of % */
78
79#define KHZ 1000
80
81/* Assume that the bus is saturated if the utilization is 25% */
82#define BUS_SATURATION_RATIO 25
83
84/**
85 * struct tegra_devfreq_device_config - configuration specific to an ACTMON
86 * device
87 *
88 * Coefficients and thresholds are in %
89 */
90struct tegra_devfreq_device_config {
91 u32 offset;
92 u32 irq_mask;
93
94 unsigned int boost_up_coeff;
95 unsigned int boost_down_coeff;
96 unsigned int boost_up_threshold;
97 unsigned int boost_down_threshold;
98 u32 avg_dependency_threshold;
99};
100
101enum tegra_actmon_device {
102 MCALL = 0,
103 MCCPU,
104};
105
106static struct tegra_devfreq_device_config actmon_device_configs[] = {
107 {
108 /* MCALL */
109 .offset = 0x1c0,
110 .irq_mask = 1 << 26,
111 .boost_up_coeff = 200,
112 .boost_down_coeff = 50,
113 .boost_up_threshold = 60,
114 .boost_down_threshold = 40,
115 },
116 {
117 /* MCCPU */
118 .offset = 0x200,
119 .irq_mask = 1 << 25,
120 .boost_up_coeff = 800,
121 .boost_down_coeff = 90,
122 .boost_up_threshold = 27,
123 .boost_down_threshold = 10,
124 .avg_dependency_threshold = 50000,
125 },
126};
127
128/**
129 * struct tegra_devfreq_device - state specific to an ACTMON device
130 *
131 * Frequencies are in kHz.
132 */
133struct tegra_devfreq_device {
134 const struct tegra_devfreq_device_config *config;
135
136 void __iomem *regs;
137 u32 avg_band_freq;
138 u32 avg_count;
139
140 unsigned long target_freq;
141 unsigned long boost_freq;
142};
143
144struct tegra_devfreq {
145 struct devfreq *devfreq;
146
147 struct platform_device *pdev;
148 struct reset_control *reset;
149 struct clk *clock;
150 void __iomem *regs;
151
152 spinlock_t lock;
153
154 struct clk *emc_clock;
155 unsigned long max_freq;
156 unsigned long cur_freq;
157 struct notifier_block rate_change_nb;
158
159 struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
160};
161
162struct tegra_actmon_emc_ratio {
163 unsigned long cpu_freq;
164 unsigned long emc_freq;
165};
166
167static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
168 { 1400000, ULONG_MAX },
169 { 1200000, 750000 },
170 { 1100000, 600000 },
171 { 1000000, 500000 },
172 { 800000, 375000 },
173 { 500000, 200000 },
174 { 250000, 100000 },
175};
176
177static unsigned long do_percent(unsigned long val, unsigned int pct)
178{
179 return val * pct / 100;
180}
181
182static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq_device *dev)
183{
184 u32 avg = dev->avg_count;
185 u32 band = dev->avg_band_freq * ACTMON_SAMPLING_PERIOD;
186
187 writel(avg + band, dev->regs + ACTMON_DEV_AVG_UPPER_WMARK);
188 avg = max(avg, band);
189 writel(avg - band, dev->regs + ACTMON_DEV_AVG_LOWER_WMARK);
190}
191
192static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
193 struct tegra_devfreq_device *dev)
194{
195 u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
196
197 writel(do_percent(val, dev->config->boost_up_threshold),
198 dev->regs + ACTMON_DEV_UPPER_WMARK);
199
200 writel(do_percent(val, dev->config->boost_down_threshold),
201 dev->regs + ACTMON_DEV_LOWER_WMARK);
202}
203
204static void actmon_write_barrier(struct tegra_devfreq *tegra)
205{
206 /* ensure the update has reached the ACTMON */
207 wmb();
208 readl(tegra->regs + ACTMON_GLB_STATUS);
209}
210
211static irqreturn_t actmon_isr(int irq, void *data)
212{
213 struct tegra_devfreq *tegra = data;
214 struct tegra_devfreq_device *dev = NULL;
215 unsigned long flags;
216 u32 val;
217 unsigned int i;
218
219 val = readl(tegra->regs + ACTMON_GLB_STATUS);
220
221 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
222 if (val & tegra->devices[i].config->irq_mask) {
223 dev = tegra->devices + i;
224 break;
225 }
226 }
227
228 if (!dev)
229 return IRQ_NONE;
230
231 spin_lock_irqsave(&tegra->lock, flags);
232
233 dev->avg_count = readl(dev->regs + ACTMON_DEV_AVG_COUNT);
234 tegra_devfreq_update_avg_wmark(dev);
235
236 val = readl(dev->regs + ACTMON_DEV_INTR_STATUS);
237 if (val & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
238 val = readl(dev->regs + ACTMON_DEV_CTRL) |
239 ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN |
240 ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
241
242 /*
243 * new_boost = min(old_boost * up_coef + step, max_freq)
244 */
245 dev->boost_freq = do_percent(dev->boost_freq,
246 dev->config->boost_up_coeff);
247 dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
248 if (dev->boost_freq >= tegra->max_freq) {
249 dev->boost_freq = tegra->max_freq;
250 val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
251 }
252 writel(val, dev->regs + ACTMON_DEV_CTRL);
253 } else if (val & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
254 val = readl(dev->regs + ACTMON_DEV_CTRL) |
255 ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN |
256 ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
257
258 /*
259 * new_boost = old_boost * down_coef
260 * or 0 if (old_boost * down_coef < step / 2)
261 */
262 dev->boost_freq = do_percent(dev->boost_freq,
263 dev->config->boost_down_coeff);
264 if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
265 dev->boost_freq = 0;
266 val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
267 }
268 writel(val, dev->regs + ACTMON_DEV_CTRL);
269 }
270
271 if (dev->config->avg_dependency_threshold) {
272 val = readl(dev->regs + ACTMON_DEV_CTRL);
273 if (dev->avg_count >= dev->config->avg_dependency_threshold)
274 val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
275 else if (dev->boost_freq == 0)
276 val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
277 writel(val, dev->regs + ACTMON_DEV_CTRL);
278 }
279
280 writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS);
281
282 actmon_write_barrier(tegra);
283
284 spin_unlock_irqrestore(&tegra->lock, flags);
285
286 return IRQ_WAKE_THREAD;
287}
288
289static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
290 unsigned long cpu_freq)
291{
292 unsigned int i;
293 struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
294
295 for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
296 if (cpu_freq >= ratio->cpu_freq) {
297 if (ratio->emc_freq >= tegra->max_freq)
298 return tegra->max_freq;
299 else
300 return ratio->emc_freq;
301 }
302 }
303
304 return 0;
305}
306
307static void actmon_update_target(struct tegra_devfreq *tegra,
308 struct tegra_devfreq_device *dev)
309{
310 unsigned long cpu_freq = 0;
311 unsigned long static_cpu_emc_freq = 0;
312 unsigned int avg_sustain_coef;
313 unsigned long flags;
314
315 if (dev->config->avg_dependency_threshold) {
316 cpu_freq = cpufreq_get(0);
317 static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
318 }
319
320 spin_lock_irqsave(&tegra->lock, flags);
321
322 dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD;
323 avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
324 dev->target_freq = do_percent(dev->target_freq, avg_sustain_coef);
325 dev->target_freq += dev->boost_freq;
326
327 if (dev->avg_count >= dev->config->avg_dependency_threshold)
328 dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
329
330 spin_unlock_irqrestore(&tegra->lock, flags);
331}
332
333static irqreturn_t actmon_thread_isr(int irq, void *data)
334{
335 struct tegra_devfreq *tegra = data;
336
337 mutex_lock(&tegra->devfreq->lock);
338 update_devfreq(tegra->devfreq);
339 mutex_unlock(&tegra->devfreq->lock);
340
341 return IRQ_HANDLED;
342}
343
344static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
345 unsigned long action, void *ptr)
346{
347 struct clk_notifier_data *data = ptr;
348 struct tegra_devfreq *tegra = container_of(nb, struct tegra_devfreq,
349 rate_change_nb);
350 unsigned int i;
351 unsigned long flags;
352
353 spin_lock_irqsave(&tegra->lock, flags);
354
355 switch (action) {
356 case POST_RATE_CHANGE:
357 tegra->cur_freq = data->new_rate / KHZ;
358
359 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
360 tegra_devfreq_update_wmark(tegra, tegra->devices + i);
361
362 actmon_write_barrier(tegra);
363 break;
364 case PRE_RATE_CHANGE:
365 /* fall through */
366 case ABORT_RATE_CHANGE:
367 break;
368 };
369
370 spin_unlock_irqrestore(&tegra->lock, flags);
371
372 return NOTIFY_OK;
373}
374
375static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
376 struct tegra_devfreq_device *dev)
377{
378 u32 val;
379
380 dev->avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
381 dev->target_freq = tegra->cur_freq;
382
383 dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
384 writel(dev->avg_count, dev->regs + ACTMON_DEV_INIT_AVG);
385
386 tegra_devfreq_update_avg_wmark(dev);
387 tegra_devfreq_update_wmark(tegra, dev);
388
389 writel(ACTMON_COUNT_WEIGHT, dev->regs + ACTMON_DEV_COUNT_WEIGHT);
390 writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS);
391
392 val = 0;
393 val |= ACTMON_DEV_CTRL_ENB_PERIODIC |
394 ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN |
395 ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
396 val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
397 << ACTMON_DEV_CTRL_K_VAL_SHIFT;
398 val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
399 << ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
400 val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
401 << ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
402 val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN |
403 ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
404
405 writel(val, dev->regs + ACTMON_DEV_CTRL);
406
407 actmon_write_barrier(tegra);
408
409 val = readl(dev->regs + ACTMON_DEV_CTRL);
410 val |= ACTMON_DEV_CTRL_ENB;
411 writel(val, dev->regs + ACTMON_DEV_CTRL);
412
413 actmon_write_barrier(tegra);
414}
415
416static int tegra_devfreq_suspend(struct device *dev)
417{
418 struct platform_device *pdev;
419 struct tegra_devfreq *tegra;
420 struct tegra_devfreq_device *actmon_dev;
421 unsigned int i;
422 u32 val;
423
424 pdev = container_of(dev, struct platform_device, dev);
425 tegra = platform_get_drvdata(pdev);
426
427 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
428 actmon_dev = &tegra->devices[i];
429
430 val = readl(actmon_dev->regs + ACTMON_DEV_CTRL);
431 val &= ~ACTMON_DEV_CTRL_ENB;
432 writel(val, actmon_dev->regs + ACTMON_DEV_CTRL);
433
434 writel(ACTMON_INTR_STATUS_CLEAR,
435 actmon_dev->regs + ACTMON_DEV_INTR_STATUS);
436
437 actmon_write_barrier(tegra);
438 }
439
440 return 0;
441}
442
443static int tegra_devfreq_resume(struct device *dev)
444{
445 struct platform_device *pdev;
446 struct tegra_devfreq *tegra;
447 struct tegra_devfreq_device *actmon_dev;
448 unsigned int i;
449
450 pdev = container_of(dev, struct platform_device, dev);
451 tegra = platform_get_drvdata(pdev);
452
453 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
454 actmon_dev = &tegra->devices[i];
455
456 tegra_actmon_configure_device(tegra, actmon_dev);
457 }
458
459 return 0;
460}
461
462static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
463 u32 flags)
464{
465 struct platform_device *pdev;
466 struct tegra_devfreq *tegra;
467 struct dev_pm_opp *opp;
468 unsigned long rate = *freq * KHZ;
469
470 pdev = container_of(dev, struct platform_device, dev);
471 tegra = platform_get_drvdata(pdev);
472
473 rcu_read_lock();
474 opp = devfreq_recommended_opp(dev, &rate, flags);
475 if (IS_ERR(opp)) {
476 rcu_read_unlock();
477 dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
478 return PTR_ERR(opp);
479 }
480 rate = dev_pm_opp_get_freq(opp);
481 rcu_read_unlock();
482
483 /* TODO: Once we have per-user clk constraints, set a floor */
484 clk_set_rate(tegra->emc_clock, rate);
485
486 /* TODO: Set voltage as well */
487
488 return 0;
489}
490
491static int tegra_devfreq_get_dev_status(struct device *dev,
492 struct devfreq_dev_status *stat)
493{
494 struct platform_device *pdev;
495 struct tegra_devfreq *tegra;
496 struct tegra_devfreq_device *actmon_dev;
497
498 pdev = container_of(dev, struct platform_device, dev);
499 tegra = platform_get_drvdata(pdev);
500
501 stat->current_frequency = tegra->cur_freq;
502
503 /* To be used by the tegra governor */
504 stat->private_data = tegra;
505
506 /* The below are to be used by the other governors */
507
508 actmon_dev = &tegra->devices[MCALL];
509
510 /* Number of cycles spent on memory access */
511 stat->busy_time = actmon_dev->avg_count;
512
513 /* The bus can be considered to be saturated way before 100% */
514 stat->busy_time *= 100 / BUS_SATURATION_RATIO;
515
516 /* Number of cycles in a sampling period */
517 stat->total_time = ACTMON_SAMPLING_PERIOD * tegra->cur_freq;
518
519 return 0;
520}
521
522static int tegra_devfreq_get_target(struct devfreq *devfreq,
523 unsigned long *freq)
524{
525 struct devfreq_dev_status stat;
526 struct tegra_devfreq *tegra;
527 struct tegra_devfreq_device *dev;
528 unsigned long target_freq = 0;
529 unsigned int i;
530 int err;
531
532 err = devfreq->profile->get_dev_status(devfreq->dev.parent, &stat);
533 if (err)
534 return err;
535
536 tegra = stat.private_data;
537
538 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
539 dev = &tegra->devices[i];
540
541 actmon_update_target(tegra, dev);
542
543 target_freq = max(target_freq, dev->target_freq);
544 }
545
546 *freq = target_freq;
547
548 return 0;
549}
550
551static int tegra_devfreq_event_handler(struct devfreq *devfreq,
552 unsigned int event, void *data)
553{
554 return 0;
555}
556
557static struct devfreq_governor tegra_devfreq_governor = {
558 .name = "tegra",
559 .get_target_freq = tegra_devfreq_get_target,
560 .event_handler = tegra_devfreq_event_handler,
561};
562
563static struct devfreq_dev_profile tegra_devfreq_profile = {
564 .polling_ms = 0,
565 .target = tegra_devfreq_target,
566 .get_dev_status = tegra_devfreq_get_dev_status,
567};
568
569static int tegra_devfreq_probe(struct platform_device *pdev)
570{
571 struct tegra_devfreq *tegra;
572 struct tegra_devfreq_device *dev;
573 struct resource *res;
574 unsigned long max_freq;
575 unsigned int i;
576 int irq;
577 int err;
578
579 tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
580 if (!tegra)
581 return -ENOMEM;
582
583 spin_lock_init(&tegra->lock);
584
585 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
586 if (!res) {
587 dev_err(&pdev->dev, "Failed to get regs resource\n");
588 return -ENODEV;
589 }
590
591 tegra->regs = devm_ioremap_resource(&pdev->dev, res);
592 if (IS_ERR(tegra->regs)) {
593 dev_err(&pdev->dev, "Failed to get IO memory\n");
594 return PTR_ERR(tegra->regs);
595 }
596
597 tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
598 if (IS_ERR(tegra->reset)) {
599 dev_err(&pdev->dev, "Failed to get reset\n");
600 return PTR_ERR(tegra->reset);
601 }
602
603 tegra->clock = devm_clk_get(&pdev->dev, "actmon");
604 if (IS_ERR(tegra->clock)) {
605 dev_err(&pdev->dev, "Failed to get actmon clock\n");
606 return PTR_ERR(tegra->clock);
607 }
608
609 tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
610 if (IS_ERR(tegra->emc_clock)) {
611 dev_err(&pdev->dev, "Failed to get emc clock\n");
612 return PTR_ERR(tegra->emc_clock);
613 }
614
615 err = of_init_opp_table(&pdev->dev);
616 if (err) {
617 dev_err(&pdev->dev, "Failed to init operating point table\n");
618 return err;
619 }
620
621 tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb;
622 err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb);
623 if (err) {
624 dev_err(&pdev->dev,
625 "Failed to register rate change notifier\n");
626 return err;
627 }
628
629 reset_control_assert(tegra->reset);
630
631 err = clk_prepare_enable(tegra->clock);
632 if (err) {
633 reset_control_deassert(tegra->reset);
634 return err;
635 }
636
637 reset_control_deassert(tegra->reset);
638
639 max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX);
640 tegra->max_freq = max_freq / KHZ;
641
642 clk_set_rate(tegra->emc_clock, max_freq);
643
644 tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
645
646 writel(ACTMON_SAMPLING_PERIOD - 1,
647 tegra->regs + ACTMON_GLB_PERIOD_CTRL);
648
649 for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
650 dev = tegra->devices + i;
651 dev->config = actmon_device_configs + i;
652 dev->regs = tegra->regs + dev->config->offset;
653
654 tegra_actmon_configure_device(tegra, tegra->devices + i);
655 }
656
657 err = devfreq_add_governor(&tegra_devfreq_governor);
658 if (err) {
659 dev_err(&pdev->dev, "Failed to add governor\n");
660 return err;
661 }
662
663 tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
664 tegra->devfreq = devm_devfreq_add_device(&pdev->dev,
665 &tegra_devfreq_profile,
666 "tegra",
667 NULL);
668
669 irq = platform_get_irq(pdev, 0);
670 err = devm_request_threaded_irq(&pdev->dev, irq, actmon_isr,
671 actmon_thread_isr, IRQF_SHARED,
672 "tegra-devfreq", tegra);
673 if (err) {
674 dev_err(&pdev->dev, "Interrupt request failed\n");
675 return err;
676 }
677
678 platform_set_drvdata(pdev, tegra);
679
680 return 0;
681}
682
683static int tegra_devfreq_remove(struct platform_device *pdev)
684{
685 struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
686
687 clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
688
689 clk_disable_unprepare(tegra->clock);
690
691 return 0;
692}
693
694static SIMPLE_DEV_PM_OPS(tegra_devfreq_pm_ops,
695 tegra_devfreq_suspend,
696 tegra_devfreq_resume);
697
698static struct of_device_id tegra_devfreq_of_match[] = {
699 { .compatible = "nvidia,tegra124-actmon" },
700 { },
701};
702
703static struct platform_driver tegra_devfreq_driver = {
704 .probe = tegra_devfreq_probe,
705 .remove = tegra_devfreq_remove,
706 .driver = {
707 .name = "tegra-devfreq",
708 .owner = THIS_MODULE,
709 .of_match_table = tegra_devfreq_of_match,
710 .pm = &tegra_devfreq_pm_ops,
711 },
712};
713module_platform_driver(tegra_devfreq_driver);
714
715MODULE_LICENSE("GPL");
716MODULE_DESCRIPTION("Tegra devfreq driver");
717MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");
718MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index de361a156b34..5a635646e05c 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -43,7 +43,7 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
43{ 43{
44 const struct acpi_csrt_shared_info *si; 44 const struct acpi_csrt_shared_info *si;
45 struct list_head resource_list; 45 struct list_head resource_list;
46 struct resource_list_entry *rentry; 46 struct resource_entry *rentry;
47 resource_size_t mem = 0, irq = 0; 47 resource_size_t mem = 0, irq = 0;
48 int ret; 48 int ret;
49 49
@@ -56,10 +56,10 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
56 return 0; 56 return 0;
57 57
58 list_for_each_entry(rentry, &resource_list, node) { 58 list_for_each_entry(rentry, &resource_list, node) {
59 if (resource_type(&rentry->res) == IORESOURCE_MEM) 59 if (resource_type(rentry->res) == IORESOURCE_MEM)
60 mem = rentry->res.start; 60 mem = rentry->res->start;
61 else if (resource_type(&rentry->res) == IORESOURCE_IRQ) 61 else if (resource_type(rentry->res) == IORESOURCE_IRQ)
62 irq = rentry->res.start; 62 irq = rentry->res->start;
63 } 63 }
64 64
65 acpi_dev_free_resource_list(&resource_list); 65 acpi_dev_free_resource_list(&resource_list);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 4d6b26979fbd..bb3725b672cf 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -861,8 +861,8 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
861 break; 861 break;
862 862
863 case ACPI_RESOURCE_TYPE_ADDRESS64: 863 case ACPI_RESOURCE_TYPE_ADDRESS64:
864 hyperv_mmio.start = res->data.address64.minimum; 864 hyperv_mmio.start = res->data.address64.address.minimum;
865 hyperv_mmio.end = res->data.address64.maximum; 865 hyperv_mmio.end = res->data.address64.address.maximum;
866 break; 866 break;
867 } 867 }
868 868
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 6dbf6fcbdfaf..e8902f8dddfc 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -386,7 +386,7 @@ static int __init pcc_init(void)
386 ret = acpi_pcc_probe(); 386 ret = acpi_pcc_probe();
387 387
388 if (ret) { 388 if (ret) {
389 pr_err("ACPI PCC probe failed.\n"); 389 pr_debug("ACPI PCC probe failed.\n");
390 return -ENODEV; 390 return -ENODEV;
391 } 391 }
392 392
@@ -394,7 +394,7 @@ static int __init pcc_init(void)
394 pcc_mbox_probe, NULL, 0, NULL, 0); 394 pcc_mbox_probe, NULL, 0, NULL, 0);
395 395
396 if (!pcc_pdev) { 396 if (!pcc_pdev) {
397 pr_err("Err creating PCC platform bundle\n"); 397 pr_debug("Err creating PCC platform bundle\n");
398 return -ENODEV; 398 return -ENODEV;
399 } 399 }
400 400
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index e07ce5ff2d48..b10964e8cb54 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -553,8 +553,8 @@ static unsigned long __init lance_probe1( struct net_device *dev,
553 if (lp->cardtype == PAM_CARD || 553 if (lp->cardtype == PAM_CARD ||
554 memaddr == (unsigned short *)0xffe00000) { 554 memaddr == (unsigned short *)0xffe00000) {
555 /* PAMs card and Riebl on ST use level 5 autovector */ 555 /* PAMs card and Riebl on ST use level 5 autovector */
556 if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO, 556 if (request_irq(IRQ_AUTO_5, lance_interrupt, 0,
557 "PAM,Riebl-ST Ethernet", dev)) { 557 "PAM,Riebl-ST Ethernet", dev)) {
558 printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); 558 printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
559 return 0; 559 return 0;
560 } 560 }
@@ -567,8 +567,8 @@ static unsigned long __init lance_probe1( struct net_device *dev,
567 printk( "Lance: request for VME interrupt failed\n" ); 567 printk( "Lance: request for VME interrupt failed\n" );
568 return 0; 568 return 0;
569 } 569 }
570 if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO, 570 if (request_irq(irq, lance_interrupt, 0, "Riebl-VME Ethernet",
571 "Riebl-VME Ethernet", dev)) { 571 dev)) {
572 printk( "Lance: request for irq %u failed\n", irq ); 572 printk( "Lance: request for irq %u failed\n", irq );
573 return 0; 573 return 0;
574 } 574 }
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 14a1c5cec3a5..fa274e0f47d7 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4915,7 +4915,7 @@ static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4915 4915
4916 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); 4916 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4917 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1); 4917 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4918 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT); 4918 rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B);
4919} 4919}
4920 4920
4921static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) 4921static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
@@ -4948,7 +4948,7 @@ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4948 RTL_W8(MaxTxPacketSize, 0x3f); 4948 RTL_W8(MaxTxPacketSize, 0x3f);
4949 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); 4949 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4950 RTL_W8(Config4, RTL_R8(Config4) | 0x01); 4950 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4951 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT); 4951 rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B);
4952} 4952}
4953 4953
4954static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) 4954static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
@@ -4964,7 +4964,7 @@ static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4964static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) 4964static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4965{ 4965{
4966 rtl_tx_performance_tweak(tp->pci_dev, 4966 rtl_tx_performance_tweak(tp->pci_dev,
4967 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); 4967 PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN);
4968} 4968}
4969 4969
4970static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) 4970static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 037f74f0fcf6..12f9e2708afb 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -483,9 +483,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
483 * better enable it. The long term solution would be to use just a 483 * better enable it. The long term solution would be to use just a
484 * bunch of valid page descriptors, without dependency on ballooning 484 * bunch of valid page descriptors, without dependency on ballooning
485 */ 485 */
486 err = alloc_xenballooned_pages(MAX_PENDING_REQS, 486 err = gnttab_alloc_pages(MAX_PENDING_REQS,
487 queue->mmap_pages, 487 queue->mmap_pages);
488 false);
489 if (err) { 488 if (err) {
490 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); 489 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
491 return -ENOMEM; 490 return -ENOMEM;
@@ -664,7 +663,7 @@ void xenvif_disconnect(struct xenvif *vif)
664 */ 663 */
665void xenvif_deinit_queue(struct xenvif_queue *queue) 664void xenvif_deinit_queue(struct xenvif_queue *queue)
666{ 665{
667 free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); 666 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
668} 667}
669 668
670void xenvif_free(struct xenvif *vif) 669void xenvif_free(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index c8ce701a7efb..7dc2d64db3cb 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -314,9 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
314static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, 314static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
315 struct netrx_pending_operations *npo, 315 struct netrx_pending_operations *npo,
316 struct page *page, unsigned long size, 316 struct page *page, unsigned long size,
317 unsigned long offset, int *head, 317 unsigned long offset, int *head)
318 struct xenvif_queue *foreign_queue,
319 grant_ref_t foreign_gref)
320{ 318{
321 struct gnttab_copy *copy_gop; 319 struct gnttab_copy *copy_gop;
322 struct xenvif_rx_meta *meta; 320 struct xenvif_rx_meta *meta;
@@ -333,6 +331,8 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
333 offset &= ~PAGE_MASK; 331 offset &= ~PAGE_MASK;
334 332
335 while (size > 0) { 333 while (size > 0) {
334 struct xen_page_foreign *foreign;
335
336 BUG_ON(offset >= PAGE_SIZE); 336 BUG_ON(offset >= PAGE_SIZE);
337 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); 337 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
338 338
@@ -361,9 +361,10 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
361 copy_gop->flags = GNTCOPY_dest_gref; 361 copy_gop->flags = GNTCOPY_dest_gref;
362 copy_gop->len = bytes; 362 copy_gop->len = bytes;
363 363
364 if (foreign_queue) { 364 foreign = xen_page_foreign(page);
365 copy_gop->source.domid = foreign_queue->vif->domid; 365 if (foreign) {
366 copy_gop->source.u.ref = foreign_gref; 366 copy_gop->source.domid = foreign->domid;
367 copy_gop->source.u.ref = foreign->gref;
367 copy_gop->flags |= GNTCOPY_source_gref; 368 copy_gop->flags |= GNTCOPY_source_gref;
368 } else { 369 } else {
369 copy_gop->source.domid = DOMID_SELF; 370 copy_gop->source.domid = DOMID_SELF;
@@ -406,35 +407,6 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
406} 407}
407 408
408/* 409/*
409 * Find the grant ref for a given frag in a chain of struct ubuf_info's
410 * skb: the skb itself
411 * i: the frag's number
412 * ubuf: a pointer to an element in the chain. It should not be NULL
413 *
414 * Returns a pointer to the element in the chain where the page were found. If
415 * not found, returns NULL.
416 * See the definition of callback_struct in common.h for more details about
417 * the chain.
418 */
419static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
420 const int i,
421 const struct ubuf_info *ubuf)
422{
423 struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
424
425 do {
426 u16 pending_idx = ubuf->desc;
427
428 if (skb_shinfo(skb)->frags[i].page.p ==
429 foreign_queue->mmap_pages[pending_idx])
430 break;
431 ubuf = (struct ubuf_info *) ubuf->ctx;
432 } while (ubuf);
433
434 return ubuf;
435}
436
437/*
438 * Prepare an SKB to be transmitted to the frontend. 410 * Prepare an SKB to be transmitted to the frontend.
439 * 411 *
440 * This function is responsible for allocating grant operations, meta 412 * This function is responsible for allocating grant operations, meta
@@ -459,8 +431,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
459 int head = 1; 431 int head = 1;
460 int old_meta_prod; 432 int old_meta_prod;
461 int gso_type; 433 int gso_type;
462 const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
463 const struct ubuf_info *const head_ubuf = ubuf;
464 434
465 old_meta_prod = npo->meta_prod; 435 old_meta_prod = npo->meta_prod;
466 436
@@ -507,68 +477,16 @@ static int xenvif_gop_skb(struct sk_buff *skb,
507 len = skb_tail_pointer(skb) - data; 477 len = skb_tail_pointer(skb) - data;
508 478
509 xenvif_gop_frag_copy(queue, skb, npo, 479 xenvif_gop_frag_copy(queue, skb, npo,
510 virt_to_page(data), len, offset, &head, 480 virt_to_page(data), len, offset, &head);
511 NULL,
512 0);
513 data += len; 481 data += len;
514 } 482 }
515 483
516 for (i = 0; i < nr_frags; i++) { 484 for (i = 0; i < nr_frags; i++) {
517 /* This variable also signals whether foreign_gref has a real
518 * value or not.
519 */
520 struct xenvif_queue *foreign_queue = NULL;
521 grant_ref_t foreign_gref;
522
523 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
524 (ubuf->callback == &xenvif_zerocopy_callback)) {
525 const struct ubuf_info *const startpoint = ubuf;
526
527 /* Ideally ubuf points to the chain element which
528 * belongs to this frag. Or if frags were removed from
529 * the beginning, then shortly before it.
530 */
531 ubuf = xenvif_find_gref(skb, i, ubuf);
532
533 /* Try again from the beginning of the list, if we
534 * haven't tried from there. This only makes sense in
535 * the unlikely event of reordering the original frags.
536 * For injected local pages it's an unnecessary second
537 * run.
538 */
539 if (unlikely(!ubuf) && startpoint != head_ubuf)
540 ubuf = xenvif_find_gref(skb, i, head_ubuf);
541
542 if (likely(ubuf)) {
543 u16 pending_idx = ubuf->desc;
544
545 foreign_queue = ubuf_to_queue(ubuf);
546 foreign_gref =
547 foreign_queue->pending_tx_info[pending_idx].req.gref;
548 /* Just a safety measure. If this was the last
549 * element on the list, the for loop will
550 * iterate again if a local page were added to
551 * the end. Using head_ubuf here prevents the
552 * second search on the chain. Or the original
553 * frags changed order, but that's less likely.
554 * In any way, ubuf shouldn't be NULL.
555 */
556 ubuf = ubuf->ctx ?
557 (struct ubuf_info *) ubuf->ctx :
558 head_ubuf;
559 } else
560 /* This frag was a local page, added to the
561 * array after the skb left netback.
562 */
563 ubuf = head_ubuf;
564 }
565 xenvif_gop_frag_copy(queue, skb, npo, 485 xenvif_gop_frag_copy(queue, skb, npo,
566 skb_frag_page(&skb_shinfo(skb)->frags[i]), 486 skb_frag_page(&skb_shinfo(skb)->frags[i]),
567 skb_frag_size(&skb_shinfo(skb)->frags[i]), 487 skb_frag_size(&skb_shinfo(skb)->frags[i]),
568 skb_shinfo(skb)->frags[i].page_offset, 488 skb_shinfo(skb)->frags[i].page_offset,
569 &head, 489 &head);
570 foreign_queue,
571 foreign_queue ? foreign_gref : UINT_MAX);
572 } 490 }
573 491
574 return npo->meta_prod - old_meta_prod; 492 return npo->meta_prod - old_meta_prod;
@@ -1241,12 +1159,6 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
1241 /* Take an extra reference to offset network stack's put_page */ 1159 /* Take an extra reference to offset network stack's put_page */
1242 get_page(queue->mmap_pages[pending_idx]); 1160 get_page(queue->mmap_pages[pending_idx]);
1243 } 1161 }
1244 /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
1245 * overlaps with "index", and "mapping" is not set. I think mapping
1246 * should be set. If delivered to local stack, it would drop this
1247 * skb in sk_filter unless the socket has the right to use it.
1248 */
1249 skb->pfmemalloc = false;
1250} 1162}
1251 1163
1252static int xenvif_get_extras(struct xenvif_queue *queue, 1164static int xenvif_get_extras(struct xenvif_queue *queue,
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 88471d3d98cd..110fece2ff53 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -140,6 +140,7 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
140 unsigned char busno, unsigned char bus_max, 140 unsigned char busno, unsigned char bus_max,
141 struct list_head *resources, resource_size_t *io_base) 141 struct list_head *resources, resource_size_t *io_base)
142{ 142{
143 struct resource_entry *window;
143 struct resource *res; 144 struct resource *res;
144 struct resource *bus_range; 145 struct resource *bus_range;
145 struct of_pci_range range; 146 struct of_pci_range range;
@@ -225,7 +226,10 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
225conversion_failed: 226conversion_failed:
226 kfree(res); 227 kfree(res);
227parse_failed: 228parse_failed:
229 resource_list_for_each_entry(window, resources)
230 kfree(window->res);
228 pci_free_resource_list(resources); 231 pci_free_resource_list(resources);
232 kfree(bus_range);
229 return err; 233 return err;
230} 234}
231EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); 235EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
diff --git a/drivers/parport/parport_atari.c b/drivers/parport/parport_atari.c
index 7ad59ac68cf6..a81cd2a2747f 100644
--- a/drivers/parport/parport_atari.c
+++ b/drivers/parport/parport_atari.c
@@ -192,8 +192,8 @@ static int __init parport_atari_init(void)
192 &parport_atari_ops); 192 &parport_atari_ops);
193 if (!p) 193 if (!p)
194 return -ENODEV; 194 return -ENODEV;
195 if (request_irq(IRQ_MFP_BUSY, parport_irq_handler, 195 if (request_irq(IRQ_MFP_BUSY, parport_irq_handler, 0, p->name,
196 IRQ_TYPE_SLOW, p->name, p)) { 196 p)) {
197 parport_put_port (p); 197 parport_put_port (p);
198 return -ENODEV; 198 return -ENODEV;
199 } 199 }
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 49dd766852ba..d9b64a175990 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -67,6 +67,93 @@ EXPORT_SYMBOL(pci_bus_write_config_byte);
67EXPORT_SYMBOL(pci_bus_write_config_word); 67EXPORT_SYMBOL(pci_bus_write_config_word);
68EXPORT_SYMBOL(pci_bus_write_config_dword); 68EXPORT_SYMBOL(pci_bus_write_config_dword);
69 69
70int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
71 int where, int size, u32 *val)
72{
73 void __iomem *addr;
74
75 addr = bus->ops->map_bus(bus, devfn, where);
76 if (!addr) {
77 *val = ~0;
78 return PCIBIOS_DEVICE_NOT_FOUND;
79 }
80
81 if (size == 1)
82 *val = readb(addr);
83 else if (size == 2)
84 *val = readw(addr);
85 else
86 *val = readl(addr);
87
88 return PCIBIOS_SUCCESSFUL;
89}
90EXPORT_SYMBOL_GPL(pci_generic_config_read);
91
92int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
93 int where, int size, u32 val)
94{
95 void __iomem *addr;
96
97 addr = bus->ops->map_bus(bus, devfn, where);
98 if (!addr)
99 return PCIBIOS_DEVICE_NOT_FOUND;
100
101 if (size == 1)
102 writeb(val, addr);
103 else if (size == 2)
104 writew(val, addr);
105 else
106 writel(val, addr);
107
108 return PCIBIOS_SUCCESSFUL;
109}
110EXPORT_SYMBOL_GPL(pci_generic_config_write);
111
112int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
113 int where, int size, u32 *val)
114{
115 void __iomem *addr;
116
117 addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
118 if (!addr) {
119 *val = ~0;
120 return PCIBIOS_DEVICE_NOT_FOUND;
121 }
122
123 *val = readl(addr);
124
125 if (size <= 2)
126 *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
127
128 return PCIBIOS_SUCCESSFUL;
129}
130EXPORT_SYMBOL_GPL(pci_generic_config_read32);
131
132int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
133 int where, int size, u32 val)
134{
135 void __iomem *addr;
136 u32 mask, tmp;
137
138 addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
139 if (!addr)
140 return PCIBIOS_DEVICE_NOT_FOUND;
141
142 if (size == 4) {
143 writel(val, addr);
144 return PCIBIOS_SUCCESSFUL;
145 } else {
146 mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
147 }
148
149 tmp = readl(addr) & mask;
150 tmp |= val << ((where & 0x3) * 8);
151 writel(tmp, addr);
152
153 return PCIBIOS_SUCCESSFUL;
154}
155EXPORT_SYMBOL_GPL(pci_generic_config_write32);
156
70/** 157/**
71 * pci_bus_set_ops - Set raw operations of pci bus 158 * pci_bus_set_ops - Set raw operations of pci bus
72 * @bus: pci bus struct 159 * @bus: pci bus struct
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 8fb16188cd82..90fa3a78fb7c 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -20,17 +20,16 @@
20void pci_add_resource_offset(struct list_head *resources, struct resource *res, 20void pci_add_resource_offset(struct list_head *resources, struct resource *res,
21 resource_size_t offset) 21 resource_size_t offset)
22{ 22{
23 struct pci_host_bridge_window *window; 23 struct resource_entry *entry;
24 24
25 window = kzalloc(sizeof(struct pci_host_bridge_window), GFP_KERNEL); 25 entry = resource_list_create_entry(res, 0);
26 if (!window) { 26 if (!entry) {
27 printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res); 27 printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res);
28 return; 28 return;
29 } 29 }
30 30
31 window->res = res; 31 entry->offset = offset;
32 window->offset = offset; 32 resource_list_add_tail(entry, resources);
33 list_add_tail(&window->list, resources);
34} 33}
35EXPORT_SYMBOL(pci_add_resource_offset); 34EXPORT_SYMBOL(pci_add_resource_offset);
36 35
@@ -42,12 +41,7 @@ EXPORT_SYMBOL(pci_add_resource);
42 41
43void pci_free_resource_list(struct list_head *resources) 42void pci_free_resource_list(struct list_head *resources)
44{ 43{
45 struct pci_host_bridge_window *window, *tmp; 44 resource_list_free(resources);
46
47 list_for_each_entry_safe(window, tmp, resources, list) {
48 list_del(&window->list);
49 kfree(window);
50 }
51} 45}
52EXPORT_SYMBOL(pci_free_resource_list); 46EXPORT_SYMBOL(pci_free_resource_list);
53 47
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index 0e5f3c95af5b..39b2dbe585aa 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -35,10 +35,10 @@ void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
35 struct resource *res) 35 struct resource *res)
36{ 36{
37 struct pci_host_bridge *bridge = find_pci_host_bridge(bus); 37 struct pci_host_bridge *bridge = find_pci_host_bridge(bus);
38 struct pci_host_bridge_window *window; 38 struct resource_entry *window;
39 resource_size_t offset = 0; 39 resource_size_t offset = 0;
40 40
41 list_for_each_entry(window, &bridge->windows, list) { 41 resource_list_for_each_entry(window, &bridge->windows) {
42 if (resource_contains(window->res, res)) { 42 if (resource_contains(window->res, res)) {
43 offset = window->offset; 43 offset = window->offset;
44 break; 44 break;
@@ -60,10 +60,10 @@ void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
60 struct pci_bus_region *region) 60 struct pci_bus_region *region)
61{ 61{
62 struct pci_host_bridge *bridge = find_pci_host_bridge(bus); 62 struct pci_host_bridge *bridge = find_pci_host_bridge(bus);
63 struct pci_host_bridge_window *window; 63 struct resource_entry *window;
64 resource_size_t offset = 0; 64 resource_size_t offset = 0;
65 65
66 list_for_each_entry(window, &bridge->windows, list) { 66 resource_list_for_each_entry(window, &bridge->windows) {
67 struct pci_bus_region bus_region; 67 struct pci_bus_region bus_region;
68 68
69 if (resource_type(res) != resource_type(window->res)) 69 if (resource_type(res) != resource_type(window->res))
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index c4b6568e486d..7b892a9cc4fc 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -102,4 +102,8 @@ config PCI_LAYERSCAPE
102 help 102 help
103 Say Y here if you want PCIe controller support on Layerscape SoCs. 103 Say Y here if you want PCIe controller support on Layerscape SoCs.
104 104
105config PCI_VERSATILE
106 bool "ARM Versatile PB PCI controller"
107 depends on ARCH_VERSATILE
108
105endmenu 109endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 44c26998027f..e61d91c92bf1 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
12obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o 12obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
13obj-$(CONFIG_PCI_XGENE) += pci-xgene.o 13obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
14obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o 14obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
15obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 6eb1aa75bd37..ba46e581db99 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -76,55 +76,9 @@ static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
76 .map_bus = gen_pci_map_cfg_bus_ecam, 76 .map_bus = gen_pci_map_cfg_bus_ecam,
77}; 77};
78 78
79static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn,
80 int where, int size, u32 *val)
81{
82 void __iomem *addr;
83 struct pci_sys_data *sys = bus->sysdata;
84 struct gen_pci *pci = sys->private_data;
85
86 addr = pci->cfg.ops->map_bus(bus, devfn, where);
87
88 switch (size) {
89 case 1:
90 *val = readb(addr);
91 break;
92 case 2:
93 *val = readw(addr);
94 break;
95 default:
96 *val = readl(addr);
97 }
98
99 return PCIBIOS_SUCCESSFUL;
100}
101
102static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn,
103 int where, int size, u32 val)
104{
105 void __iomem *addr;
106 struct pci_sys_data *sys = bus->sysdata;
107 struct gen_pci *pci = sys->private_data;
108
109 addr = pci->cfg.ops->map_bus(bus, devfn, where);
110
111 switch (size) {
112 case 1:
113 writeb(val, addr);
114 break;
115 case 2:
116 writew(val, addr);
117 break;
118 default:
119 writel(val, addr);
120 }
121
122 return PCIBIOS_SUCCESSFUL;
123}
124
125static struct pci_ops gen_pci_ops = { 79static struct pci_ops gen_pci_ops = {
126 .read = gen_pci_config_read, 80 .read = pci_generic_config_read,
127 .write = gen_pci_config_write, 81 .write = pci_generic_config_write,
128}; 82};
129 83
130static const struct of_device_id gen_pci_of_match[] = { 84static const struct of_device_id gen_pci_of_match[] = {
@@ -149,14 +103,14 @@ static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
149 struct device *dev = pci->host.dev.parent; 103 struct device *dev = pci->host.dev.parent;
150 struct device_node *np = dev->of_node; 104 struct device_node *np = dev->of_node;
151 resource_size_t iobase; 105 resource_size_t iobase;
152 struct pci_host_bridge_window *win; 106 struct resource_entry *win;
153 107
154 err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, 108 err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
155 &iobase); 109 &iobase);
156 if (err) 110 if (err)
157 return err; 111 return err;
158 112
159 list_for_each_entry(win, &pci->resources, list) { 113 resource_list_for_each_entry(win, &pci->resources) {
160 struct resource *parent, *res = win->res; 114 struct resource *parent, *res = win->res;
161 115
162 switch (resource_type(res)) { 116 switch (resource_type(res)) {
@@ -287,6 +241,7 @@ static int gen_pci_probe(struct platform_device *pdev)
287 241
288 of_id = of_match_node(gen_pci_of_match, np); 242 of_id = of_match_node(gen_pci_of_match, np);
289 pci->cfg.ops = of_id->data; 243 pci->cfg.ops = of_id->data;
244 gen_pci_ops.map_bus = pci->cfg.ops->map_bus;
290 pci->host.dev.parent = dev; 245 pci->host.dev.parent = dev;
291 INIT_LIST_HEAD(&pci->host.windows); 246 INIT_LIST_HEAD(&pci->host.windows);
292 INIT_LIST_HEAD(&pci->resources); 247 INIT_LIST_HEAD(&pci->resources);
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 78f79e31ac5c..75333b0c4f0a 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -119,7 +119,7 @@ static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
119 struct pcie_port *pp = &ks_pcie->pp; 119 struct pcie_port *pp = &ks_pcie->pp;
120 struct irq_chip *chip = irq_desc_get_chip(desc); 120 struct irq_chip *chip = irq_desc_get_chip(desc);
121 121
122 dev_dbg(pp->dev, "ks_pci_msi_irq_handler, irq %d\n", irq); 122 dev_dbg(pp->dev, "%s, irq %d\n", __func__, irq);
123 123
124 /* 124 /*
125 * The chained irq handler installation would have replaced normal 125 * The chained irq handler installation would have replaced normal
@@ -197,7 +197,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
197 */ 197 */
198 for (temp = 0; temp < max_host_irqs; temp++) { 198 for (temp = 0; temp < max_host_irqs; temp++) {
199 host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp); 199 host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
200 if (host_irqs[temp] < 0) 200 if (!host_irqs[temp])
201 break; 201 break;
202 } 202 }
203 if (temp) { 203 if (temp) {
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 6697b1a4d4fa..68c9e5e9b0a8 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -167,7 +167,6 @@ MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
167static struct platform_driver ls_pcie_driver = { 167static struct platform_driver ls_pcie_driver = {
168 .driver = { 168 .driver = {
169 .name = "layerscape-pcie", 169 .name = "layerscape-pcie",
170 .owner = THIS_MODULE,
171 .of_match_table = ls_pcie_of_match, 170 .of_match_table = ls_pcie_of_match,
172 }, 171 },
173}; 172};
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 1dd759596b0a..1309cfbaa719 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -101,9 +101,7 @@ struct mvebu_pcie {
101 struct mvebu_pcie_port *ports; 101 struct mvebu_pcie_port *ports;
102 struct msi_controller *msi; 102 struct msi_controller *msi;
103 struct resource io; 103 struct resource io;
104 char io_name[30];
105 struct resource realio; 104 struct resource realio;
106 char mem_name[30];
107 struct resource mem; 105 struct resource mem;
108 struct resource busn; 106 struct resource busn;
109 int nports; 107 int nports;
@@ -723,18 +721,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
723{ 721{
724 struct mvebu_pcie *pcie = sys_to_pcie(sys); 722 struct mvebu_pcie *pcie = sys_to_pcie(sys);
725 int i; 723 int i;
726 int domain = 0;
727 724
728#ifdef CONFIG_PCI_DOMAINS 725 pcie->mem.name = "PCI MEM";
729 domain = sys->domain; 726 pcie->realio.name = "PCI I/O";
730#endif
731
732 snprintf(pcie->mem_name, sizeof(pcie->mem_name), "PCI MEM %04x",
733 domain);
734 pcie->mem.name = pcie->mem_name;
735
736 snprintf(pcie->io_name, sizeof(pcie->io_name), "PCI I/O %04x", domain);
737 pcie->realio.name = pcie->io_name;
738 727
739 if (request_resource(&iomem_resource, &pcie->mem)) 728 if (request_resource(&iomem_resource, &pcie->mem))
740 return 0; 729 return 0;
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index d9c042febb1a..dd6b84e6206c 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -131,52 +131,6 @@ static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
131 return priv->reg + (slot >> 1) * 0x100 + where; 131 return priv->reg + (slot >> 1) * 0x100 + where;
132} 132}
133 133
134static int rcar_pci_read_config(struct pci_bus *bus, unsigned int devfn,
135 int where, int size, u32 *val)
136{
137 void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
138
139 if (!reg)
140 return PCIBIOS_DEVICE_NOT_FOUND;
141
142 switch (size) {
143 case 1:
144 *val = ioread8(reg);
145 break;
146 case 2:
147 *val = ioread16(reg);
148 break;
149 default:
150 *val = ioread32(reg);
151 break;
152 }
153
154 return PCIBIOS_SUCCESSFUL;
155}
156
157static int rcar_pci_write_config(struct pci_bus *bus, unsigned int devfn,
158 int where, int size, u32 val)
159{
160 void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
161
162 if (!reg)
163 return PCIBIOS_DEVICE_NOT_FOUND;
164
165 switch (size) {
166 case 1:
167 iowrite8(val, reg);
168 break;
169 case 2:
170 iowrite16(val, reg);
171 break;
172 default:
173 iowrite32(val, reg);
174 break;
175 }
176
177 return PCIBIOS_SUCCESSFUL;
178}
179
180/* PCI interrupt mapping */ 134/* PCI interrupt mapping */
181static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 135static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
182{ 136{
@@ -325,8 +279,9 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
325} 279}
326 280
327static struct pci_ops rcar_pci_ops = { 281static struct pci_ops rcar_pci_ops = {
328 .read = rcar_pci_read_config, 282 .map_bus = rcar_pci_cfg_base,
329 .write = rcar_pci_write_config, 283 .read = pci_generic_config_read,
284 .write = pci_generic_config_write,
330}; 285};
331 286
332static int rcar_pci_probe(struct platform_device *pdev) 287static int rcar_pci_probe(struct platform_device *pdev)
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index a800ae916394..00e92720d7f7 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -480,59 +480,10 @@ static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
480 return addr; 480 return addr;
481} 481}
482 482
483static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
484 int where, int size, u32 *value)
485{
486 void __iomem *addr;
487
488 addr = tegra_pcie_conf_address(bus, devfn, where);
489 if (!addr) {
490 *value = 0xffffffff;
491 return PCIBIOS_DEVICE_NOT_FOUND;
492 }
493
494 *value = readl(addr);
495
496 if (size == 1)
497 *value = (*value >> (8 * (where & 3))) & 0xff;
498 else if (size == 2)
499 *value = (*value >> (8 * (where & 3))) & 0xffff;
500
501 return PCIBIOS_SUCCESSFUL;
502}
503
504static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
505 int where, int size, u32 value)
506{
507 void __iomem *addr;
508 u32 mask, tmp;
509
510 addr = tegra_pcie_conf_address(bus, devfn, where);
511 if (!addr)
512 return PCIBIOS_DEVICE_NOT_FOUND;
513
514 if (size == 4) {
515 writel(value, addr);
516 return PCIBIOS_SUCCESSFUL;
517 }
518
519 if (size == 2)
520 mask = ~(0xffff << ((where & 0x3) * 8));
521 else if (size == 1)
522 mask = ~(0xff << ((where & 0x3) * 8));
523 else
524 return PCIBIOS_BAD_REGISTER_NUMBER;
525
526 tmp = readl(addr) & mask;
527 tmp |= value << ((where & 0x3) * 8);
528 writel(tmp, addr);
529
530 return PCIBIOS_SUCCESSFUL;
531}
532
533static struct pci_ops tegra_pcie_ops = { 483static struct pci_ops tegra_pcie_ops = {
534 .read = tegra_pcie_read_conf, 484 .map_bus = tegra_pcie_conf_address,
535 .write = tegra_pcie_write_conf, 485 .read = pci_generic_config_read32,
486 .write = pci_generic_config_write32,
536}; 487};
537 488
538static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port) 489static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
@@ -625,19 +576,6 @@ static void tegra_pcie_port_free(struct tegra_pcie_port *port)
625 devm_kfree(pcie->dev, port); 576 devm_kfree(pcie->dev, port);
626} 577}
627 578
628static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
629{
630 u16 reg;
631
632 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
633 pci_read_config_word(dev, PCI_COMMAND, &reg);
634 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
635 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
636 pci_write_config_word(dev, PCI_COMMAND, reg);
637 }
638}
639DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
640
641/* Tegra PCIE root complex wrongly reports device class */ 579/* Tegra PCIE root complex wrongly reports device class */
642static void tegra_pcie_fixup_class(struct pci_dev *dev) 580static void tegra_pcie_fixup_class(struct pci_dev *dev)
643{ 581{
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
new file mode 100644
index 000000000000..1ec694a52379
--- /dev/null
+++ b/drivers/pci/host/pci-versatile.c
@@ -0,0 +1,237 @@
1/*
2 * Copyright 2004 Koninklijke Philips Electronics NV
3 *
4 * Conversion to platform driver and DT:
5 * Copyright 2014 Linaro Ltd.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * 14/04/2005 Initial version, colin.king@philips.com
17 */
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/of_address.h>
21#include <linux/of_pci.h>
22#include <linux/of_platform.h>
23#include <linux/pci.h>
24#include <linux/platform_device.h>
25
26static void __iomem *versatile_pci_base;
27static void __iomem *versatile_cfg_base[2];
28
29#define PCI_IMAP(m) (versatile_pci_base + ((m) * 4))
30#define PCI_SMAP(m) (versatile_pci_base + 0x14 + ((m) * 4))
31#define PCI_SELFID (versatile_pci_base + 0xc)
32
33#define VP_PCI_DEVICE_ID 0x030010ee
34#define VP_PCI_CLASS_ID 0x0b400000
35
36static u32 pci_slot_ignore;
37
38static int __init versatile_pci_slot_ignore(char *str)
39{
40 int retval;
41 int slot;
42
43 while ((retval = get_option(&str, &slot))) {
44 if ((slot < 0) || (slot > 31))
45 pr_err("Illegal slot value: %d\n", slot);
46 else
47 pci_slot_ignore |= (1 << slot);
48 }
49 return 1;
50}
51__setup("pci_slot_ignore=", versatile_pci_slot_ignore);
52
53
54static void __iomem *versatile_map_bus(struct pci_bus *bus,
55 unsigned int devfn, int offset)
56{
57 unsigned int busnr = bus->number;
58
59 if (pci_slot_ignore & (1 << PCI_SLOT(devfn)))
60 return NULL;
61
62 return versatile_cfg_base[1] + ((busnr << 16) | (devfn << 8) | offset);
63}
64
65static struct pci_ops pci_versatile_ops = {
66 .map_bus = versatile_map_bus,
67 .read = pci_generic_config_read32,
68 .write = pci_generic_config_write,
69};
70
71static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
72 struct list_head *res)
73{
74 int err, mem = 1, res_valid = 0;
75 struct device_node *np = dev->of_node;
76 resource_size_t iobase;
77 struct resource_entry *win;
78
79 err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase);
80 if (err)
81 return err;
82
83 resource_list_for_each_entry(win, res, list) {
84 struct resource *parent, *res = win->res;
85
86 switch (resource_type(res)) {
87 case IORESOURCE_IO:
88 parent = &ioport_resource;
89 err = pci_remap_iospace(res, iobase);
90 if (err) {
91 dev_warn(dev, "error %d: failed to map resource %pR\n",
92 err, res);
93 continue;
94 }
95 break;
96 case IORESOURCE_MEM:
97 parent = &iomem_resource;
98 res_valid |= !(res->flags & IORESOURCE_PREFETCH);
99
100 writel(res->start >> 28, PCI_IMAP(mem));
101 writel(PHYS_OFFSET >> 28, PCI_SMAP(mem));
102 mem++;
103
104 break;
105 case IORESOURCE_BUS:
106 default:
107 continue;
108 }
109
110 err = devm_request_resource(dev, parent, res);
111 if (err)
112 goto out_release_res;
113 }
114
115 if (!res_valid) {
116 dev_err(dev, "non-prefetchable memory resource required\n");
117 err = -EINVAL;
118 goto out_release_res;
119 }
120
121 return 0;
122
123out_release_res:
124 pci_free_resource_list(res);
125 return err;
126}
127
128/* Unused, temporary to satisfy ARM arch code */
129struct pci_sys_data sys;
130
131static int versatile_pci_probe(struct platform_device *pdev)
132{
133 struct resource *res;
134 int ret, i, myslot = -1;
135 u32 val;
136 void __iomem *local_pci_cfg_base;
137 struct pci_bus *bus;
138 LIST_HEAD(pci_res);
139
140 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
141 if (!res)
142 return -ENODEV;
143 versatile_pci_base = devm_ioremap_resource(&pdev->dev, res);
144
145 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
146 if (!res)
147 return -ENODEV;
148 versatile_cfg_base[0] = devm_ioremap_resource(&pdev->dev, res);
149
150 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
151 if (!res)
152 return -ENODEV;
153 versatile_cfg_base[1] = devm_ioremap_resource(&pdev->dev, res);
154
155 ret = versatile_pci_parse_request_of_pci_ranges(&pdev->dev, &pci_res);
156 if (ret)
157 return ret;
158
159 /*
160 * We need to discover the PCI core first to configure itself
161 * before the main PCI probing is performed
162 */
163 for (i = 0; i < 32; i++) {
164 if ((readl(versatile_cfg_base[0] + (i << 11) + PCI_VENDOR_ID) == VP_PCI_DEVICE_ID) &&
165 (readl(versatile_cfg_base[0] + (i << 11) + PCI_CLASS_REVISION) == VP_PCI_CLASS_ID)) {
166 myslot = i;
167 break;
168 }
169 }
170 if (myslot == -1) {
171 dev_err(&pdev->dev, "Cannot find PCI core!\n");
172 return -EIO;
173 }
174 /*
175 * Do not to map Versatile FPGA PCI device into memory space
176 */
177 pci_slot_ignore |= (1 << myslot);
178
179 dev_info(&pdev->dev, "PCI core found (slot %d)\n", myslot);
180
181 writel(myslot, PCI_SELFID);
182 local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11);
183
184 val = readl(local_pci_cfg_base + PCI_COMMAND);
185 val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
186 writel(val, local_pci_cfg_base + PCI_COMMAND);
187
188 /*
189 * Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM
190 */
191 writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0);
192 writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1);
193 writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
194
195 /*
196 * For many years the kernel and QEMU were symbiotically buggy
197 * in that they both assumed the same broken IRQ mapping.
198 * QEMU therefore attempts to auto-detect old broken kernels
199 * so that they still work on newer QEMU as they did on old
200 * QEMU. Since we now use the correct (ie matching-hardware)
201 * IRQ mapping we write a definitely different value to a
202 * PCI_INTERRUPT_LINE register to tell QEMU that we expect
203 * real hardware behaviour and it need not be backwards
204 * compatible for us. This write is harmless on real hardware.
205 */
206 writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE);
207
208 pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
209 pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC);
210
211 bus = pci_scan_root_bus(&pdev->dev, 0, &pci_versatile_ops, &sys, &pci_res);
212 if (!bus)
213 return -ENOMEM;
214
215 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
216 pci_assign_unassigned_bus_resources(bus);
217
218 return 0;
219}
220
221static const struct of_device_id versatile_pci_of_match[] = {
222 { .compatible = "arm,versatile-pci", },
223 { },
224};
225MODULE_DEVICE_TABLE(of, versatile_pci_of_match);
226
227static struct platform_driver versatile_pci_driver = {
228 .driver = {
229 .name = "versatile-pci",
230 .of_match_table = versatile_pci_of_match,
231 },
232 .probe = versatile_pci_probe,
233};
234module_platform_driver(versatile_pci_driver);
235
236MODULE_DESCRIPTION("Versatile PCI driver");
237MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index b1d0596457c5..aab55474dd0d 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -16,7 +16,7 @@
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 */ 18 */
19#include <linux/clk-private.h> 19#include <linux/clk.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/jiffies.h> 22#include <linux/jiffies.h>
@@ -74,92 +74,6 @@ static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
74 return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags; 74 return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
75} 75}
76 76
77/* PCIe Configuration Out/In */
78static inline void xgene_pcie_cfg_out32(void __iomem *addr, int offset, u32 val)
79{
80 writel(val, addr + offset);
81}
82
83static inline void xgene_pcie_cfg_out16(void __iomem *addr, int offset, u16 val)
84{
85 u32 val32 = readl(addr + (offset & ~0x3));
86
87 switch (offset & 0x3) {
88 case 2:
89 val32 &= ~0xFFFF0000;
90 val32 |= (u32)val << 16;
91 break;
92 case 0:
93 default:
94 val32 &= ~0xFFFF;
95 val32 |= val;
96 break;
97 }
98 writel(val32, addr + (offset & ~0x3));
99}
100
101static inline void xgene_pcie_cfg_out8(void __iomem *addr, int offset, u8 val)
102{
103 u32 val32 = readl(addr + (offset & ~0x3));
104
105 switch (offset & 0x3) {
106 case 0:
107 val32 &= ~0xFF;
108 val32 |= val;
109 break;
110 case 1:
111 val32 &= ~0xFF00;
112 val32 |= (u32)val << 8;
113 break;
114 case 2:
115 val32 &= ~0xFF0000;
116 val32 |= (u32)val << 16;
117 break;
118 case 3:
119 default:
120 val32 &= ~0xFF000000;
121 val32 |= (u32)val << 24;
122 break;
123 }
124 writel(val32, addr + (offset & ~0x3));
125}
126
127static inline void xgene_pcie_cfg_in32(void __iomem *addr, int offset, u32 *val)
128{
129 *val = readl(addr + offset);
130}
131
132static inline void xgene_pcie_cfg_in16(void __iomem *addr, int offset, u32 *val)
133{
134 *val = readl(addr + (offset & ~0x3));
135
136 switch (offset & 0x3) {
137 case 2:
138 *val >>= 16;
139 break;
140 }
141
142 *val &= 0xFFFF;
143}
144
145static inline void xgene_pcie_cfg_in8(void __iomem *addr, int offset, u32 *val)
146{
147 *val = readl(addr + (offset & ~0x3));
148
149 switch (offset & 0x3) {
150 case 3:
151 *val = *val >> 24;
152 break;
153 case 2:
154 *val = *val >> 16;
155 break;
156 case 1:
157 *val = *val >> 8;
158 break;
159 }
160 *val &= 0xFF;
161}
162
163/* 77/*
164 * When the address bit [17:16] is 2'b01, the Configuration access will be 78 * When the address bit [17:16] is 2'b01, the Configuration access will be
165 * treated as Type 1 and it will be forwarded to external PCIe device. 79 * treated as Type 1 and it will be forwarded to external PCIe device.
@@ -213,69 +127,23 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
213 return false; 127 return false;
214} 128}
215 129
216static int xgene_pcie_read_config(struct pci_bus *bus, unsigned int devfn, 130static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
217 int offset, int len, u32 *val) 131 int offset)
218{
219 struct xgene_pcie_port *port = bus->sysdata;
220 void __iomem *addr;
221
222 if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up)
223 return PCIBIOS_DEVICE_NOT_FOUND;
224
225 if (xgene_pcie_hide_rc_bars(bus, offset)) {
226 *val = 0;
227 return PCIBIOS_SUCCESSFUL;
228 }
229
230 xgene_pcie_set_rtdid_reg(bus, devfn);
231 addr = xgene_pcie_get_cfg_base(bus);
232 switch (len) {
233 case 1:
234 xgene_pcie_cfg_in8(addr, offset, val);
235 break;
236 case 2:
237 xgene_pcie_cfg_in16(addr, offset, val);
238 break;
239 default:
240 xgene_pcie_cfg_in32(addr, offset, val);
241 break;
242 }
243
244 return PCIBIOS_SUCCESSFUL;
245}
246
247static int xgene_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
248 int offset, int len, u32 val)
249{ 132{
250 struct xgene_pcie_port *port = bus->sysdata; 133 struct xgene_pcie_port *port = bus->sysdata;
251 void __iomem *addr;
252 134
253 if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up) 135 if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up ||
254 return PCIBIOS_DEVICE_NOT_FOUND; 136 xgene_pcie_hide_rc_bars(bus, offset))
255 137 return NULL;
256 if (xgene_pcie_hide_rc_bars(bus, offset))
257 return PCIBIOS_SUCCESSFUL;
258 138
259 xgene_pcie_set_rtdid_reg(bus, devfn); 139 xgene_pcie_set_rtdid_reg(bus, devfn);
260 addr = xgene_pcie_get_cfg_base(bus); 140 return xgene_pcie_get_cfg_base(bus);
261 switch (len) {
262 case 1:
263 xgene_pcie_cfg_out8(addr, offset, (u8)val);
264 break;
265 case 2:
266 xgene_pcie_cfg_out16(addr, offset, (u16)val);
267 break;
268 default:
269 xgene_pcie_cfg_out32(addr, offset, val);
270 break;
271 }
272
273 return PCIBIOS_SUCCESSFUL;
274} 141}
275 142
276static struct pci_ops xgene_pcie_ops = { 143static struct pci_ops xgene_pcie_ops = {
277 .read = xgene_pcie_read_config, 144 .map_bus = xgene_pcie_map_bus,
278 .write = xgene_pcie_write_config 145 .read = pci_generic_config_read32,
146 .write = pci_generic_config_write32,
279}; 147};
280 148
281static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr, 149static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
@@ -401,11 +269,11 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
401 struct list_head *res, 269 struct list_head *res,
402 resource_size_t io_base) 270 resource_size_t io_base)
403{ 271{
404 struct pci_host_bridge_window *window; 272 struct resource_entry *window;
405 struct device *dev = port->dev; 273 struct device *dev = port->dev;
406 int ret; 274 int ret;
407 275
408 list_for_each_entry(window, res, list) { 276 resource_list_for_each_entry(window, res) {
409 struct resource *res = window->res; 277 struct resource *res = window->res;
410 u64 restype = resource_type(res); 278 u64 restype = resource_type(res);
411 279
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 17ca98657a28..1f4ea6f2d910 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -511,9 +511,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
511 dw_pci.private_data = (void **)&pp; 511 dw_pci.private_data = (void **)&pp;
512 512
513 pci_common_init_dev(pp->dev, &dw_pci); 513 pci_common_init_dev(pp->dev, &dw_pci);
514#ifdef CONFIG_PCI_DOMAINS
515 dw_pci.domain++;
516#endif
517 514
518 return 0; 515 return 0;
519} 516}
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 748786c402fc..c57bd0ac39a0 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -397,9 +397,6 @@ static void rcar_pcie_enable(struct rcar_pcie *pcie)
397#endif 397#endif
398 398
399 pci_common_init_dev(&pdev->dev, &rcar_pci); 399 pci_common_init_dev(&pdev->dev, &rcar_pci);
400#ifdef CONFIG_PCI_DOMAINS
401 rcar_pci.domain++;
402#endif
403} 400}
404 401
405static int phy_wait_for_ack(struct rcar_pcie *pcie) 402static int phy_wait_for_ack(struct rcar_pcie *pcie)
@@ -757,7 +754,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
757 goto err_map_reg; 754 goto err_map_reg;
758 755
759 i = irq_of_parse_and_map(pdev->dev.of_node, 0); 756 i = irq_of_parse_and_map(pdev->dev.of_node, 0);
760 if (i < 0) { 757 if (!i) {
761 dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); 758 dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
762 err = -ENOENT; 759 err = -ENOENT;
763 goto err_map_reg; 760 goto err_map_reg;
@@ -765,7 +762,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
765 pcie->msi.irq1 = i; 762 pcie->msi.irq1 = i;
766 763
767 i = irq_of_parse_and_map(pdev->dev.of_node, 1); 764 i = irq_of_parse_and_map(pdev->dev.of_node, 1);
768 if (i < 0) { 765 if (!i) {
769 dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); 766 dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
770 err = -ENOENT; 767 err = -ENOENT;
771 goto err_map_reg; 768 goto err_map_reg;
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index ef3ebaf9a738..f1a06a091ccb 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -148,10 +148,10 @@ static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
148 */ 148 */
149static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port) 149static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
150{ 150{
151 u32 val = pcie_read(port, XILINX_PCIE_REG_RPEFR); 151 unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
152 152
153 if (val & XILINX_PCIE_RPEFR_ERR_VALID) { 153 if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
154 dev_dbg(port->dev, "Requester ID %d\n", 154 dev_dbg(port->dev, "Requester ID %lu\n",
155 val & XILINX_PCIE_RPEFR_REQ_ID); 155 val & XILINX_PCIE_RPEFR_REQ_ID);
156 pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK, 156 pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
157 XILINX_PCIE_REG_RPEFR); 157 XILINX_PCIE_REG_RPEFR);
@@ -189,7 +189,7 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
189} 189}
190 190
191/** 191/**
192 * xilinx_pcie_config_base - Get configuration base 192 * xilinx_pcie_map_bus - Get configuration base
193 * @bus: PCI Bus structure 193 * @bus: PCI Bus structure
194 * @devfn: Device/function 194 * @devfn: Device/function
195 * @where: Offset from base 195 * @where: Offset from base
@@ -197,96 +197,26 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
197 * Return: Base address of the configuration space needed to be 197 * Return: Base address of the configuration space needed to be
198 * accessed. 198 * accessed.
199 */ 199 */
200static void __iomem *xilinx_pcie_config_base(struct pci_bus *bus, 200static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus,
201 unsigned int devfn, int where) 201 unsigned int devfn, int where)
202{ 202{
203 struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata); 203 struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
204 int relbus; 204 int relbus;
205 205
206 if (!xilinx_pcie_valid_device(bus, devfn))
207 return NULL;
208
206 relbus = (bus->number << ECAM_BUS_NUM_SHIFT) | 209 relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
207 (devfn << ECAM_DEV_NUM_SHIFT); 210 (devfn << ECAM_DEV_NUM_SHIFT);
208 211
209 return port->reg_base + relbus + where; 212 return port->reg_base + relbus + where;
210} 213}
211 214
212/**
213 * xilinx_pcie_read_config - Read configuration space
214 * @bus: PCI Bus structure
215 * @devfn: Device/function
216 * @where: Offset from base
217 * @size: Byte/word/dword
218 * @val: Value to be read
219 *
220 * Return: PCIBIOS_SUCCESSFUL on success
221 * PCIBIOS_DEVICE_NOT_FOUND on failure
222 */
223static int xilinx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
224 int where, int size, u32 *val)
225{
226 void __iomem *addr;
227
228 if (!xilinx_pcie_valid_device(bus, devfn)) {
229 *val = 0xFFFFFFFF;
230 return PCIBIOS_DEVICE_NOT_FOUND;
231 }
232
233 addr = xilinx_pcie_config_base(bus, devfn, where);
234
235 switch (size) {
236 case 1:
237 *val = readb(addr);
238 break;
239 case 2:
240 *val = readw(addr);
241 break;
242 default:
243 *val = readl(addr);
244 break;
245 }
246
247 return PCIBIOS_SUCCESSFUL;
248}
249
250/**
251 * xilinx_pcie_write_config - Write configuration space
252 * @bus: PCI Bus structure
253 * @devfn: Device/function
254 * @where: Offset from base
255 * @size: Byte/word/dword
256 * @val: Value to be written to device
257 *
258 * Return: PCIBIOS_SUCCESSFUL on success
259 * PCIBIOS_DEVICE_NOT_FOUND on failure
260 */
261static int xilinx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
262 int where, int size, u32 val)
263{
264 void __iomem *addr;
265
266 if (!xilinx_pcie_valid_device(bus, devfn))
267 return PCIBIOS_DEVICE_NOT_FOUND;
268
269 addr = xilinx_pcie_config_base(bus, devfn, where);
270
271 switch (size) {
272 case 1:
273 writeb(val, addr);
274 break;
275 case 2:
276 writew(val, addr);
277 break;
278 default:
279 writel(val, addr);
280 break;
281 }
282
283 return PCIBIOS_SUCCESSFUL;
284}
285
286/* PCIe operations */ 215/* PCIe operations */
287static struct pci_ops xilinx_pcie_ops = { 216static struct pci_ops xilinx_pcie_ops = {
288 .read = xilinx_pcie_read_config, 217 .map_bus = xilinx_pcie_map_bus,
289 .write = xilinx_pcie_write_config, 218 .read = pci_generic_config_read,
219 .write = pci_generic_config_write,
290}; 220};
291 221
292/* MSI functions */ 222/* MSI functions */
@@ -737,7 +667,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
737 resource_size_t offset; 667 resource_size_t offset;
738 struct of_pci_range_parser parser; 668 struct of_pci_range_parser parser;
739 struct of_pci_range range; 669 struct of_pci_range range;
740 struct pci_host_bridge_window *win; 670 struct resource_entry *win;
741 int err = 0, mem_resno = 0; 671 int err = 0, mem_resno = 0;
742 672
743 /* Get the ranges */ 673 /* Get the ranges */
@@ -807,7 +737,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
807 737
808free_resources: 738free_resources:
809 release_child_resources(&iomem_resource); 739 release_child_resources(&iomem_resource);
810 list_for_each_entry(win, &port->resources, list) 740 resource_list_for_each_entry(win, &port->resources)
811 devm_kfree(dev, win->res); 741 devm_kfree(dev, win->res);
812 pci_free_resource_list(&port->resources); 742 pci_free_resource_list(&port->resources);
813 743
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index a5a7fd8332ac..46db29395a62 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -214,8 +214,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
214 214
215 kfree(slot->hotplug_slot->info); 215 kfree(slot->hotplug_slot->info);
216 kfree(slot->hotplug_slot); 216 kfree(slot->hotplug_slot);
217 if (slot->dev) 217 pci_dev_put(slot->dev);
218 pci_dev_put(slot->dev);
219 kfree(slot); 218 kfree(slot);
220} 219}
221 220
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index ff32e85e1de6..f052e951b23e 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -532,8 +532,6 @@ static void interrupt_event_handler(struct work_struct *work)
532 pciehp_green_led_off(p_slot); 532 pciehp_green_led_off(p_slot);
533 break; 533 break;
534 case INT_PRESENCE_ON: 534 case INT_PRESENCE_ON:
535 if (!HP_SUPR_RM(ctrl))
536 break;
537 ctrl_dbg(ctrl, "Surprise Insertion\n"); 535 ctrl_dbg(ctrl, "Surprise Insertion\n");
538 handle_surprise_event(p_slot); 536 handle_surprise_event(p_slot);
539 break; 537 break;
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index bada20999870..c32fb786d48e 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -475,7 +475,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
475 struct slot *slot = bss_hotplug_slot->private; 475 struct slot *slot = bss_hotplug_slot->private;
476 struct pci_dev *dev, *temp; 476 struct pci_dev *dev, *temp;
477 int rc; 477 int rc;
478 acpi_owner_id ssdt_id = 0; 478 acpi_handle ssdt_hdl = NULL;
479 479
480 /* Acquire update access to the bus */ 480 /* Acquire update access to the bus */
481 mutex_lock(&sn_hotplug_mutex); 481 mutex_lock(&sn_hotplug_mutex);
@@ -522,7 +522,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
522 if (ACPI_SUCCESS(ret) && 522 if (ACPI_SUCCESS(ret) &&
523 (adr>>16) == (slot->device_num + 1)) { 523 (adr>>16) == (slot->device_num + 1)) {
524 /* retain the owner id */ 524 /* retain the owner id */
525 acpi_get_id(chandle, &ssdt_id); 525 ssdt_hdl = chandle;
526 526
527 ret = acpi_bus_get_device(chandle, 527 ret = acpi_bus_get_device(chandle,
528 &device); 528 &device);
@@ -547,12 +547,13 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
547 pci_unlock_rescan_remove(); 547 pci_unlock_rescan_remove();
548 548
549 /* Remove the SSDT for the slot from the ACPI namespace */ 549 /* Remove the SSDT for the slot from the ACPI namespace */
550 if (SN_ACPI_BASE_SUPPORT() && ssdt_id) { 550 if (SN_ACPI_BASE_SUPPORT() && ssdt_hdl) {
551 acpi_status ret; 551 acpi_status ret;
552 ret = acpi_unload_table_id(ssdt_id); 552 ret = acpi_unload_parent_table(ssdt_hdl);
553 if (ACPI_FAILURE(ret)) { 553 if (ACPI_FAILURE(ret)) {
554 printk(KERN_ERR "%s: acpi_unload_table_id failed (0x%x) for id %d\n", 554 acpi_handle_err(ssdt_hdl,
555 __func__, ret, ssdt_id); 555 "%s: acpi_unload_parent_table failed (0x%x)\n",
556 __func__, ret);
556 /* try to continue on */ 557 /* try to continue on */
557 } 558 }
558 } 559 }
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index fd60806d3fd0..c3e7dfcf9ff5 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -694,11 +694,16 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
694{ 694{
695 resource_size_t phys_addr; 695 resource_size_t phys_addr;
696 u32 table_offset; 696 u32 table_offset;
697 unsigned long flags;
697 u8 bir; 698 u8 bir;
698 699
699 pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, 700 pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE,
700 &table_offset); 701 &table_offset);
701 bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); 702 bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
703 flags = pci_resource_flags(dev, bir);
704 if (!flags || (flags & IORESOURCE_UNSET))
705 return NULL;
706
702 table_offset &= PCI_MSIX_TABLE_OFFSET; 707 table_offset &= PCI_MSIX_TABLE_OFFSET;
703 phys_addr = pci_resource_start(dev, bir) + table_offset; 708 phys_addr = pci_resource_start(dev, bir) + table_offset;
704 709
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 3542150fc8a3..489063987325 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -501,12 +501,29 @@ static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
501 return 0; 501 return 0;
502} 502}
503 503
504static bool acpi_pci_need_resume(struct pci_dev *dev)
505{
506 struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
507
508 if (!adev || !acpi_device_power_manageable(adev))
509 return false;
510
511 if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
512 return true;
513
514 if (acpi_target_system_state() == ACPI_STATE_S0)
515 return false;
516
517 return !!adev->power.flags.dsw_present;
518}
519
504static struct pci_platform_pm_ops acpi_pci_platform_pm = { 520static struct pci_platform_pm_ops acpi_pci_platform_pm = {
505 .is_manageable = acpi_pci_power_manageable, 521 .is_manageable = acpi_pci_power_manageable,
506 .set_state = acpi_pci_set_power_state, 522 .set_state = acpi_pci_set_power_state,
507 .choose_state = acpi_pci_choose_state, 523 .choose_state = acpi_pci_choose_state,
508 .sleep_wake = acpi_pci_sleep_wake, 524 .sleep_wake = acpi_pci_sleep_wake,
509 .run_wake = acpi_pci_run_wake, 525 .run_wake = acpi_pci_run_wake,
526 .need_resume = acpi_pci_need_resume,
510}; 527};
511 528
512void acpi_pci_add_bus(struct pci_bus *bus) 529void acpi_pci_add_bus(struct pci_bus *bus)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 887e6bd95af7..3cb2210de553 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -653,7 +653,6 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
653static int pci_pm_prepare(struct device *dev) 653static int pci_pm_prepare(struct device *dev)
654{ 654{
655 struct device_driver *drv = dev->driver; 655 struct device_driver *drv = dev->driver;
656 int error = 0;
657 656
658 /* 657 /*
659 * Devices having power.ignore_children set may still be necessary for 658 * Devices having power.ignore_children set may still be necessary for
@@ -662,10 +661,12 @@ static int pci_pm_prepare(struct device *dev)
662 if (dev->power.ignore_children) 661 if (dev->power.ignore_children)
663 pm_runtime_resume(dev); 662 pm_runtime_resume(dev);
664 663
665 if (drv && drv->pm && drv->pm->prepare) 664 if (drv && drv->pm && drv->pm->prepare) {
666 error = drv->pm->prepare(dev); 665 int error = drv->pm->prepare(dev);
667 666 if (error)
668 return error; 667 return error;
668 }
669 return pci_dev_keep_suspended(to_pci_dev(dev));
669} 670}
670 671
671 672
@@ -1383,7 +1384,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
1383 if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev))) 1384 if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
1384 return -ENOMEM; 1385 return -ENOMEM;
1385 1386
1386 if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x", 1387 if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
1387 pdev->vendor, pdev->device, 1388 pdev->vendor, pdev->device,
1388 pdev->subsystem_vendor, pdev->subsystem_device, 1389 pdev->subsystem_vendor, pdev->subsystem_device,
1389 (u8)(pdev->class >> 16), (u8)(pdev->class >> 8), 1390 (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e9d4fd861ba1..81f06e8dcc04 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -10,6 +10,8 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/of.h>
14#include <linux/of_pci.h>
13#include <linux/pci.h> 15#include <linux/pci.h>
14#include <linux/pm.h> 16#include <linux/pm.h>
15#include <linux/slab.h> 17#include <linux/slab.h>
@@ -521,6 +523,11 @@ static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
521 pci_platform_pm->run_wake(dev, enable) : -ENODEV; 523 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
522} 524}
523 525
526static inline bool platform_pci_need_resume(struct pci_dev *dev)
527{
528 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
529}
530
524/** 531/**
525 * pci_raw_set_power_state - Use PCI PM registers to set the power state of 532 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
526 * given PCI device 533 * given PCI device
@@ -1999,6 +2006,27 @@ bool pci_dev_run_wake(struct pci_dev *dev)
1999} 2006}
2000EXPORT_SYMBOL_GPL(pci_dev_run_wake); 2007EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2001 2008
2009/**
2010 * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
2011 * @pci_dev: Device to check.
2012 *
2013 * Return 'true' if the device is runtime-suspended, it doesn't have to be
2014 * reconfigured due to wakeup settings difference between system and runtime
2015 * suspend and the current power state of it is suitable for the upcoming
2016 * (system) transition.
2017 */
2018bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2019{
2020 struct device *dev = &pci_dev->dev;
2021
2022 if (!pm_runtime_suspended(dev)
2023 || (device_can_wakeup(dev) && !device_may_wakeup(dev))
2024 || platform_pci_need_resume(pci_dev))
2025 return false;
2026
2027 return pci_target_state(pci_dev) == pci_dev->current_state;
2028}
2029
2002void pci_config_pm_runtime_get(struct pci_dev *pdev) 2030void pci_config_pm_runtime_get(struct pci_dev *pdev)
2003{ 2031{
2004 struct device *dev = &pdev->dev; 2032 struct device *dev = &pdev->dev;
@@ -3197,7 +3225,7 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
3197{ 3225{
3198 u16 csr; 3226 u16 csr;
3199 3227
3200 if (!dev->pm_cap) 3228 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
3201 return -ENOTTY; 3229 return -ENOTTY;
3202 3230
3203 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr); 3231 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
@@ -4471,6 +4499,53 @@ int pci_get_new_domain_nr(void)
4471{ 4499{
4472 return atomic_inc_return(&__domain_nr); 4500 return atomic_inc_return(&__domain_nr);
4473} 4501}
4502
4503#ifdef CONFIG_PCI_DOMAINS_GENERIC
4504void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
4505{
4506 static int use_dt_domains = -1;
4507 int domain = of_get_pci_domain_nr(parent->of_node);
4508
4509 /*
4510 * Check DT domain and use_dt_domains values.
4511 *
4512 * If DT domain property is valid (domain >= 0) and
4513 * use_dt_domains != 0, the DT assignment is valid since this means
4514 * we have not previously allocated a domain number by using
4515 * pci_get_new_domain_nr(); we should also update use_dt_domains to
4516 * 1, to indicate that we have just assigned a domain number from
4517 * DT.
4518 *
4519 * If DT domain property value is not valid (ie domain < 0), and we
4520 * have not previously assigned a domain number from DT
4521 * (use_dt_domains != 1) we should assign a domain number by
4522 * using the:
4523 *
4524 * pci_get_new_domain_nr()
4525 *
4526 * API and update the use_dt_domains value to keep track of method we
4527 * are using to assign domain numbers (use_dt_domains = 0).
4528 *
4529 * All other combinations imply we have a platform that is trying
4530 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
4531 * which is a recipe for domain mishandling and it is prevented by
4532 * invalidating the domain value (domain = -1) and printing a
4533 * corresponding error.
4534 */
4535 if (domain >= 0 && use_dt_domains) {
4536 use_dt_domains = 1;
4537 } else if (domain < 0 && use_dt_domains != 1) {
4538 use_dt_domains = 0;
4539 domain = pci_get_new_domain_nr();
4540 } else {
4541 dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n",
4542 parent->of_node->full_name);
4543 domain = -1;
4544 }
4545
4546 bus->domain_nr = domain;
4547}
4548#endif
4474#endif 4549#endif
4475 4550
4476/** 4551/**
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d54632a1db43..4091f82239cd 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -50,6 +50,10 @@ int pci_probe_reset_function(struct pci_dev *dev);
50 * for given device (the device's wake-up capability has to be 50 * for given device (the device's wake-up capability has to be
51 * enabled by @sleep_wake for this feature to work) 51 * enabled by @sleep_wake for this feature to work)
52 * 52 *
53 * @need_resume: returns 'true' if the given device (which is currently
54 * suspended) needs to be resumed to be configured for system
55 * wakeup.
56 *
53 * If given platform is generally capable of power managing PCI devices, all of 57 * If given platform is generally capable of power managing PCI devices, all of
54 * these callbacks are mandatory. 58 * these callbacks are mandatory.
55 */ 59 */
@@ -59,6 +63,7 @@ struct pci_platform_pm_ops {
59 pci_power_t (*choose_state)(struct pci_dev *dev); 63 pci_power_t (*choose_state)(struct pci_dev *dev);
60 int (*sleep_wake)(struct pci_dev *dev, bool enable); 64 int (*sleep_wake)(struct pci_dev *dev, bool enable);
61 int (*run_wake)(struct pci_dev *dev, bool enable); 65 int (*run_wake)(struct pci_dev *dev, bool enable);
66 bool (*need_resume)(struct pci_dev *dev);
62}; 67};
63 68
64int pci_set_platform_pm(struct pci_platform_pm_ops *ops); 69int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
@@ -67,6 +72,7 @@ void pci_power_up(struct pci_dev *dev);
67void pci_disable_enabled_device(struct pci_dev *dev); 72void pci_disable_enabled_device(struct pci_dev *dev);
68int pci_finish_runtime_suspend(struct pci_dev *dev); 73int pci_finish_runtime_suspend(struct pci_dev *dev);
69int __pci_pme_wakeup(struct pci_dev *dev, void *ign); 74int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
75bool pci_dev_keep_suspended(struct pci_dev *dev);
70void pci_config_pm_runtime_get(struct pci_dev *dev); 76void pci_config_pm_runtime_get(struct pci_dev *dev);
71void pci_config_pm_runtime_put(struct pci_dev *dev); 77void pci_config_pm_runtime_put(struct pci_dev *dev);
72void pci_pm_init(struct pci_dev *dev); 78void pci_pm_init(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index e1e7026b838d..820740a22e94 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -859,7 +859,10 @@ static ssize_t link_state_store(struct device *dev,
859{ 859{
860 struct pci_dev *pdev = to_pci_dev(dev); 860 struct pci_dev *pdev = to_pci_dev(dev);
861 struct pcie_link_state *link, *root = pdev->link_state->root; 861 struct pcie_link_state *link, *root = pdev->link_state->root;
862 u32 val = buf[0] - '0', state = 0; 862 u32 val, state = 0;
863
864 if (kstrtouint(buf, 10, &val))
865 return -EINVAL;
863 866
864 if (aspm_disabled) 867 if (aspm_disabled)
865 return -EPERM; 868 return -EPERM;
@@ -900,15 +903,14 @@ static ssize_t clk_ctl_store(struct device *dev,
900 size_t n) 903 size_t n)
901{ 904{
902 struct pci_dev *pdev = to_pci_dev(dev); 905 struct pci_dev *pdev = to_pci_dev(dev);
903 int state; 906 bool state;
904 907
905 if (n < 1) 908 if (strtobool(buf, &state))
906 return -EINVAL; 909 return -EINVAL;
907 state = buf[0]-'0';
908 910
909 down_read(&pci_bus_sem); 911 down_read(&pci_bus_sem);
910 mutex_lock(&aspm_lock); 912 mutex_lock(&aspm_lock);
911 pcie_set_clkpm_nocheck(pdev->link_state, !!state); 913 pcie_set_clkpm_nocheck(pdev->link_state, state);
912 mutex_unlock(&aspm_lock); 914 mutex_unlock(&aspm_lock);
913 up_read(&pci_bus_sem); 915 up_read(&pci_bus_sem);
914 916
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 23212f8ae09b..8d2f400e96cb 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1895,7 +1895,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1895 int error; 1895 int error;
1896 struct pci_host_bridge *bridge; 1896 struct pci_host_bridge *bridge;
1897 struct pci_bus *b, *b2; 1897 struct pci_bus *b, *b2;
1898 struct pci_host_bridge_window *window, *n; 1898 struct resource_entry *window, *n;
1899 struct resource *res; 1899 struct resource *res;
1900 resource_size_t offset; 1900 resource_size_t offset;
1901 char bus_addr[64]; 1901 char bus_addr[64];
@@ -1959,8 +1959,8 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1959 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev)); 1959 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1960 1960
1961 /* Add initial resources to the bus */ 1961 /* Add initial resources to the bus */
1962 list_for_each_entry_safe(window, n, resources, list) { 1962 resource_list_for_each_entry_safe(window, n, resources) {
1963 list_move_tail(&window->list, &bridge->windows); 1963 list_move_tail(&window->node, &bridge->windows);
1964 res = window->res; 1964 res = window->res;
1965 offset = window->offset; 1965 offset = window->offset;
1966 if (res->flags & IORESOURCE_BUS) 1966 if (res->flags & IORESOURCE_BUS)
@@ -2060,12 +2060,12 @@ void pci_bus_release_busn_res(struct pci_bus *b)
2060struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, 2060struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2061 struct pci_ops *ops, void *sysdata, struct list_head *resources) 2061 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2062{ 2062{
2063 struct pci_host_bridge_window *window; 2063 struct resource_entry *window;
2064 bool found = false; 2064 bool found = false;
2065 struct pci_bus *b; 2065 struct pci_bus *b;
2066 int max; 2066 int max;
2067 2067
2068 list_for_each_entry(window, resources, list) 2068 resource_list_for_each_entry(window, resources)
2069 if (window->res->flags & IORESOURCE_BUS) { 2069 if (window->res->flags & IORESOURCE_BUS) {
2070 found = true; 2070 found = true;
2071 break; 2071 break;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 903d5078b5ed..85f247e28a80 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3076,6 +3076,27 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
3076 */ 3076 */
3077DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); 3077DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
3078 3078
3079static void quirk_no_pm_reset(struct pci_dev *dev)
3080{
3081 /*
3082 * We can't do a bus reset on root bus devices, but an ineffective
3083 * PM reset may be better than nothing.
3084 */
3085 if (!pci_is_root_bus(dev->bus))
3086 dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET;
3087}
3088
3089/*
3090 * Some AMD/ATI GPUS (HD8570 - Oland) report that a D3hot->D0 transition
3091 * causes a reset (i.e., they advertise NoSoftRst-). This transition seems
3092 * to have no effect on the device: it retains the framebuffer contents and
3093 * monitor sync. Advertising this support makes other layers, like VFIO,
3094 * assume pci_reset_function() is viable for this device. Mark it as
3095 * unavailable to skip it when testing reset methods.
3096 */
3097DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
3098 PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
3099
3079#ifdef CONFIG_ACPI 3100#ifdef CONFIG_ACPI
3080/* 3101/*
3081 * Apple: Shutdown Cactus Ridge Thunderbolt controller. 3102 * Apple: Shutdown Cactus Ridge Thunderbolt controller.
@@ -3576,6 +3597,44 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
3576 quirk_dma_func1_alias); 3597 quirk_dma_func1_alias);
3577 3598
3578/* 3599/*
3600 * Some devices DMA with the wrong devfn, not just the wrong function.
3601 * quirk_fixed_dma_alias() uses this table to create fixed aliases, where
3602 * the alias is "fixed" and independent of the device devfn.
3603 *
3604 * For example, the Adaptec 3405 is a PCIe card with an Intel 80333 I/O
3605 * processor. To software, this appears as a PCIe-to-PCI/X bridge with a
3606 * single device on the secondary bus. In reality, the single exposed
3607 * device at 0e.0 is the Address Translation Unit (ATU) of the controller
3608 * that provides a bridge to the internal bus of the I/O processor. The
3609 * controller supports private devices, which can be hidden from PCI config
3610 * space. In the case of the Adaptec 3405, a private device at 01.0
3611 * appears to be the DMA engine, which therefore needs to become a DMA
3612 * alias for the device.
3613 */
3614static const struct pci_device_id fixed_dma_alias_tbl[] = {
3615 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
3616 PCI_VENDOR_ID_ADAPTEC2, 0x02bb), /* Adaptec 3405 */
3617 .driver_data = PCI_DEVFN(1, 0) },
3618 { 0 }
3619};
3620
3621static void quirk_fixed_dma_alias(struct pci_dev *dev)
3622{
3623 const struct pci_device_id *id;
3624
3625 id = pci_match_id(fixed_dma_alias_tbl, dev);
3626 if (id) {
3627 dev->dma_alias_devfn = id->driver_data;
3628 dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
3629 dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
3630 PCI_SLOT(dev->dma_alias_devfn),
3631 PCI_FUNC(dev->dma_alias_devfn));
3632 }
3633}
3634
3635DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
3636
3637/*
3579 * A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in 3638 * A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in
3580 * using the wrong DMA alias for the device. Some of these devices can be 3639 * using the wrong DMA alias for the device. Some of these devices can be
3581 * used as either forward or reverse bridges, so we need to test whether the 3640 * used as either forward or reverse bridges, so we need to test whether the
@@ -3678,6 +3737,9 @@ static const u16 pci_quirk_intel_pch_acs_ids[] = {
3678 0x9c98, 0x9c99, 0x9c9a, 0x9c9b, 3737 0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
3679 /* Patsburg (X79) PCH */ 3738 /* Patsburg (X79) PCH */
3680 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e, 3739 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
3740 /* Wellsburg (X99) PCH */
3741 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
3742 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
3681}; 3743};
3682 3744
3683static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev) 3745static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
@@ -3761,6 +3823,8 @@ static const struct pci_dev_acs_enabled {
3761 { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs }, 3823 { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
3762 { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs }, 3824 { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
3763 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, 3825 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
3826 { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
3827 { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
3764 { 0 } 3828 { 0 }
3765}; 3829};
3766 3830
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index f955edb9bea7..eb0ad530dc43 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -71,6 +71,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
71{ 71{
72 void __iomem *image; 72 void __iomem *image;
73 int last_image; 73 int last_image;
74 unsigned length;
74 75
75 image = rom; 76 image = rom;
76 do { 77 do {
@@ -93,9 +94,9 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
93 if (readb(pds + 3) != 'R') 94 if (readb(pds + 3) != 'R')
94 break; 95 break;
95 last_image = readb(pds + 21) & 0x80; 96 last_image = readb(pds + 21) & 0x80;
96 /* this length is reliable */ 97 length = readw(pds + 16);
97 image += readw(pds + 16) * 512; 98 image += length * 512;
98 } while (!last_image); 99 } while (length && !last_image);
99 100
100 /* never return a size larger than the PCI resource window */ 101 /* never return a size larger than the PCI resource window */
101 /* there are known ROMs that get the size wrong */ 102 /* there are known ROMs that get the size wrong */
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 66977ebf13b3..ff0356fb378f 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -180,20 +180,21 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
180 struct pnp_dev *dev = data; 180 struct pnp_dev *dev = data;
181 struct acpi_resource_dma *dma; 181 struct acpi_resource_dma *dma;
182 struct acpi_resource_vendor_typed *vendor_typed; 182 struct acpi_resource_vendor_typed *vendor_typed;
183 struct resource r = {0}; 183 struct resource_win win = {{0}, 0};
184 struct resource *r = &win.res;
184 int i, flags; 185 int i, flags;
185 186
186 if (acpi_dev_resource_address_space(res, &r) 187 if (acpi_dev_resource_address_space(res, &win)
187 || acpi_dev_resource_ext_address_space(res, &r)) { 188 || acpi_dev_resource_ext_address_space(res, &win)) {
188 pnp_add_resource(dev, &r); 189 pnp_add_resource(dev, &win.res);
189 return AE_OK; 190 return AE_OK;
190 } 191 }
191 192
192 r.flags = 0; 193 r->flags = 0;
193 if (acpi_dev_resource_interrupt(res, 0, &r)) { 194 if (acpi_dev_resource_interrupt(res, 0, r)) {
194 pnpacpi_add_irqresource(dev, &r); 195 pnpacpi_add_irqresource(dev, r);
195 for (i = 1; acpi_dev_resource_interrupt(res, i, &r); i++) 196 for (i = 1; acpi_dev_resource_interrupt(res, i, r); i++)
196 pnpacpi_add_irqresource(dev, &r); 197 pnpacpi_add_irqresource(dev, r);
197 198
198 if (i > 1) { 199 if (i > 1) {
199 /* 200 /*
@@ -209,7 +210,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
209 } 210 }
210 } 211 }
211 return AE_OK; 212 return AE_OK;
212 } else if (r.flags & IORESOURCE_DISABLED) { 213 } else if (r->flags & IORESOURCE_DISABLED) {
213 pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED); 214 pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED);
214 return AE_OK; 215 return AE_OK;
215 } 216 }
@@ -218,13 +219,13 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
218 case ACPI_RESOURCE_TYPE_MEMORY24: 219 case ACPI_RESOURCE_TYPE_MEMORY24:
219 case ACPI_RESOURCE_TYPE_MEMORY32: 220 case ACPI_RESOURCE_TYPE_MEMORY32:
220 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 221 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
221 if (acpi_dev_resource_memory(res, &r)) 222 if (acpi_dev_resource_memory(res, r))
222 pnp_add_resource(dev, &r); 223 pnp_add_resource(dev, r);
223 break; 224 break;
224 case ACPI_RESOURCE_TYPE_IO: 225 case ACPI_RESOURCE_TYPE_IO:
225 case ACPI_RESOURCE_TYPE_FIXED_IO: 226 case ACPI_RESOURCE_TYPE_FIXED_IO:
226 if (acpi_dev_resource_io(res, &r)) 227 if (acpi_dev_resource_io(res, r))
227 pnp_add_resource(dev, &r); 228 pnp_add_resource(dev, r);
228 break; 229 break;
229 case ACPI_RESOURCE_TYPE_DMA: 230 case ACPI_RESOURCE_TYPE_DMA:
230 dma = &res->data.dma; 231 dma = &res->data.dma;
@@ -410,12 +411,12 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
410 if (p->resource_type == ACPI_MEMORY_RANGE) { 411 if (p->resource_type == ACPI_MEMORY_RANGE) {
411 if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) 412 if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
412 flags = IORESOURCE_MEM_WRITEABLE; 413 flags = IORESOURCE_MEM_WRITEABLE;
413 pnp_register_mem_resource(dev, option_flags, p->minimum, 414 pnp_register_mem_resource(dev, option_flags, p->address.minimum,
414 p->minimum, 0, p->address_length, 415 p->address.minimum, 0, p->address.address_length,
415 flags); 416 flags);
416 } else if (p->resource_type == ACPI_IO_RANGE) 417 } else if (p->resource_type == ACPI_IO_RANGE)
417 pnp_register_port_resource(dev, option_flags, p->minimum, 418 pnp_register_port_resource(dev, option_flags, p->address.minimum,
418 p->minimum, 0, p->address_length, 419 p->address.minimum, 0, p->address.address_length,
419 IORESOURCE_IO_FIXED); 420 IORESOURCE_IO_FIXED);
420} 421}
421 422
@@ -429,12 +430,12 @@ static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
429 if (p->resource_type == ACPI_MEMORY_RANGE) { 430 if (p->resource_type == ACPI_MEMORY_RANGE) {
430 if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) 431 if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
431 flags = IORESOURCE_MEM_WRITEABLE; 432 flags = IORESOURCE_MEM_WRITEABLE;
432 pnp_register_mem_resource(dev, option_flags, p->minimum, 433 pnp_register_mem_resource(dev, option_flags, p->address.minimum,
433 p->minimum, 0, p->address_length, 434 p->address.minimum, 0, p->address.address_length,
434 flags); 435 flags);
435 } else if (p->resource_type == ACPI_IO_RANGE) 436 } else if (p->resource_type == ACPI_IO_RANGE)
436 pnp_register_port_resource(dev, option_flags, p->minimum, 437 pnp_register_port_resource(dev, option_flags, p->address.minimum,
437 p->minimum, 0, p->address_length, 438 p->address.minimum, 0, p->address.address_length,
438 IORESOURCE_IO_FIXED); 439 IORESOURCE_IO_FIXED);
439} 440}
440 441
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 8bcfecd66281..eeca70ddbf61 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -2430,7 +2430,7 @@ static int tsi721_probe(struct pci_dev *pdev,
2430 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, 2430 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
2431 PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | 2431 PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
2432 PCI_EXP_DEVCTL_NOSNOOP_EN, 2432 PCI_EXP_DEVCTL_NOSNOOP_EN,
2433 0x2 << MAX_READ_REQUEST_SZ_SHIFT); 2433 PCI_EXP_DEVCTL_READRQ_512B);
2434 2434
2435 /* Adjust PCIe completion timeout. */ 2435 /* Adjust PCIe completion timeout. */
2436 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2); 2436 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index a7b42680a06a..9d2502543ef6 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -72,8 +72,6 @@
72#define TSI721_MSIXPBA_OFFSET 0x2a000 72#define TSI721_MSIXPBA_OFFSET 0x2a000
73#define TSI721_PCIECFG_EPCTL 0x400 73#define TSI721_PCIECFG_EPCTL 0x400
74 74
75#define MAX_READ_REQUEST_SZ_SHIFT 12
76
77/* 75/*
78 * Event Management Registers 76 * Event Management Registers
79 */ 77 */
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 6776931e25d4..78ce4d61a69b 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -813,12 +813,13 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
813 pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, 813 pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
814 &devcontrol); 814 &devcontrol);
815 815
816 if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) { 816 if ((devcontrol & PCI_EXP_DEVCTL_READRQ) >
817 PCI_EXP_DEVCTL_READRQ_512B) {
817 esas2r_log(ESAS2R_LOG_INFO, 818 esas2r_log(ESAS2R_LOG_INFO,
818 "max read request size > 512B"); 819 "max read request size > 512B");
819 820
820 devcontrol &= ~PCI_EXP_DEVCTL_READRQ; 821 devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
821 devcontrol |= 0x2000; 822 devcontrol |= PCI_EXP_DEVCTL_READRQ_512B;
822 pci_write_config_word(a->pcid, 823 pci_write_config_word(a->pcid,
823 pcie_cap_reg + PCI_EXP_DEVCTL, 824 pcie_cap_reg + PCI_EXP_DEVCTL,
824 devcontrol); 825 devcontrol);
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index 1e824fb1649b..296db7a69c27 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -161,7 +161,7 @@ static int sfi_verify_table(struct sfi_table_header *table)
161 * Check for common case that we can re-use mapping to SYST, 161 * Check for common case that we can re-use mapping to SYST,
162 * which requires syst_pa, syst_va to be initialized. 162 * which requires syst_pa, syst_va to be initialized.
163 */ 163 */
164struct sfi_table_header *sfi_map_table(u64 pa) 164static struct sfi_table_header *sfi_map_table(u64 pa)
165{ 165{
166 struct sfi_table_header *th; 166 struct sfi_table_header *th;
167 u32 length; 167 u32 length;
@@ -189,7 +189,7 @@ struct sfi_table_header *sfi_map_table(u64 pa)
189 * Undoes effect of sfi_map_table() by unmapping table 189 * Undoes effect of sfi_map_table() by unmapping table
190 * if it did not completely fit on same page as SYST. 190 * if it did not completely fit on same page as SYST.
191 */ 191 */
192void sfi_unmap_table(struct sfi_table_header *th) 192static void sfi_unmap_table(struct sfi_table_header *th)
193{ 193{
194 if (!TABLE_ON_PAGE(syst_va, th, th->len)) 194 if (!TABLE_ON_PAGE(syst_va, th, th->len))
195 sfi_unmap_memory(th, TABLE_ON_PAGE(th, th, th->len) ? 195 sfi_unmap_memory(th, TABLE_ON_PAGE(th, th, th->len) ?
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index aeb50bb6ba9c..eaffb0248de1 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3452,8 +3452,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
3452 return status; 3452 return status;
3453} 3453}
3454 3454
3455#ifdef CONFIG_PM
3456
3457int usb_remote_wakeup(struct usb_device *udev) 3455int usb_remote_wakeup(struct usb_device *udev)
3458{ 3456{
3459 int status = 0; 3457 int status = 0;
@@ -3512,16 +3510,6 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
3512 return connect_change; 3510 return connect_change;
3513} 3511}
3514 3512
3515#else
3516
3517static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
3518 u16 portstatus, u16 portchange)
3519{
3520 return 0;
3521}
3522
3523#endif
3524
3525static int check_ports_changed(struct usb_hub *hub) 3513static int check_ports_changed(struct usb_hub *hub)
3526{ 3514{
3527 int port1; 3515 int port1;
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index 4953b657635e..cb9ee2556850 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -3118,8 +3118,7 @@ int __init atafb_init(void)
3118 printk("atafb_init: initializing Falcon hw\n"); 3118 printk("atafb_init: initializing Falcon hw\n");
3119 fbhw = &falcon_switch; 3119 fbhw = &falcon_switch;
3120 atafb_ops.fb_setcolreg = &falcon_setcolreg; 3120 atafb_ops.fb_setcolreg = &falcon_setcolreg;
3121 error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, 3121 error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, 0,
3122 IRQ_TYPE_PRIO,
3123 "framebuffer:modeswitch", 3122 "framebuffer:modeswitch",
3124 falcon_vbl_switcher); 3123 falcon_vbl_switcher);
3125 if (error) 3124 if (error)
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 3860d02729dc..0b52d92cb2e5 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -92,7 +92,6 @@ EXPORT_SYMBOL_GPL(balloon_stats);
92 92
93/* We increase/decrease in batches which fit in a page */ 93/* We increase/decrease in batches which fit in a page */
94static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)]; 94static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
95static DEFINE_PER_CPU(struct page *, balloon_scratch_page);
96 95
97 96
98/* List of ballooned pages, threaded through the mem_map array. */ 97/* List of ballooned pages, threaded through the mem_map array. */
@@ -423,22 +422,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
423 page = pfn_to_page(pfn); 422 page = pfn_to_page(pfn);
424 423
425#ifdef CONFIG_XEN_HAVE_PVMMU 424#ifdef CONFIG_XEN_HAVE_PVMMU
426 /*
427 * Ballooned out frames are effectively replaced with
428 * a scratch frame. Ensure direct mappings and the
429 * p2m are consistent.
430 */
431 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 425 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
432 if (!PageHighMem(page)) { 426 if (!PageHighMem(page)) {
433 struct page *scratch_page = get_balloon_scratch_page();
434
435 ret = HYPERVISOR_update_va_mapping( 427 ret = HYPERVISOR_update_va_mapping(
436 (unsigned long)__va(pfn << PAGE_SHIFT), 428 (unsigned long)__va(pfn << PAGE_SHIFT),
437 pfn_pte(page_to_pfn(scratch_page), 429 __pte_ma(0), 0);
438 PAGE_KERNEL_RO), 0);
439 BUG_ON(ret); 430 BUG_ON(ret);
440
441 put_balloon_scratch_page();
442 } 431 }
443 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 432 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
444 } 433 }
@@ -500,18 +489,6 @@ static void balloon_process(struct work_struct *work)
500 mutex_unlock(&balloon_mutex); 489 mutex_unlock(&balloon_mutex);
501} 490}
502 491
503struct page *get_balloon_scratch_page(void)
504{
505 struct page *ret = get_cpu_var(balloon_scratch_page);
506 BUG_ON(ret == NULL);
507 return ret;
508}
509
510void put_balloon_scratch_page(void)
511{
512 put_cpu_var(balloon_scratch_page);
513}
514
515/* Resets the Xen limit, sets new target, and kicks off processing. */ 492/* Resets the Xen limit, sets new target, and kicks off processing. */
516void balloon_set_new_target(unsigned long target) 493void balloon_set_new_target(unsigned long target)
517{ 494{
@@ -605,61 +582,13 @@ static void __init balloon_add_region(unsigned long start_pfn,
605 } 582 }
606} 583}
607 584
608static int alloc_balloon_scratch_page(int cpu)
609{
610 if (per_cpu(balloon_scratch_page, cpu) != NULL)
611 return 0;
612
613 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
614 if (per_cpu(balloon_scratch_page, cpu) == NULL) {
615 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
616 return -ENOMEM;
617 }
618
619 return 0;
620}
621
622
623static int balloon_cpu_notify(struct notifier_block *self,
624 unsigned long action, void *hcpu)
625{
626 int cpu = (long)hcpu;
627 switch (action) {
628 case CPU_UP_PREPARE:
629 if (alloc_balloon_scratch_page(cpu))
630 return NOTIFY_BAD;
631 break;
632 default:
633 break;
634 }
635 return NOTIFY_OK;
636}
637
638static struct notifier_block balloon_cpu_notifier = {
639 .notifier_call = balloon_cpu_notify,
640};
641
642static int __init balloon_init(void) 585static int __init balloon_init(void)
643{ 586{
644 int i, cpu; 587 int i;
645 588
646 if (!xen_domain()) 589 if (!xen_domain())
647 return -ENODEV; 590 return -ENODEV;
648 591
649 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
650 register_cpu_notifier(&balloon_cpu_notifier);
651
652 get_online_cpus();
653 for_each_online_cpu(cpu) {
654 if (alloc_balloon_scratch_page(cpu)) {
655 put_online_cpus();
656 unregister_cpu_notifier(&balloon_cpu_notifier);
657 return -ENOMEM;
658 }
659 }
660 put_online_cpus();
661 }
662
663 pr_info("Initialising balloon driver\n"); 592 pr_info("Initialising balloon driver\n");
664 593
665 balloon_stats.current_pages = xen_pv_domain() 594 balloon_stats.current_pages = xen_pv_domain()
@@ -696,15 +625,4 @@ static int __init balloon_init(void)
696 625
697subsys_initcall(balloon_init); 626subsys_initcall(balloon_init);
698 627
699static int __init balloon_clear(void)
700{
701 int cpu;
702
703 for_each_possible_cpu(cpu)
704 per_cpu(balloon_scratch_page, cpu) = NULL;
705
706 return 0;
707}
708early_initcall(balloon_clear);
709
710MODULE_LICENSE("GPL"); 628MODULE_LICENSE("GPL");
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 073b4a19a8b0..d5bb1a33d0a3 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -67,7 +67,7 @@ struct gntdev_priv {
67 * Only populated if populate_freeable_maps == 1 */ 67 * Only populated if populate_freeable_maps == 1 */
68 struct list_head freeable_maps; 68 struct list_head freeable_maps;
69 /* lock protects maps and freeable_maps */ 69 /* lock protects maps and freeable_maps */
70 spinlock_t lock; 70 struct mutex lock;
71 struct mm_struct *mm; 71 struct mm_struct *mm;
72 struct mmu_notifier mn; 72 struct mmu_notifier mn;
73}; 73};
@@ -91,7 +91,9 @@ struct grant_map {
91 struct gnttab_map_grant_ref *map_ops; 91 struct gnttab_map_grant_ref *map_ops;
92 struct gnttab_unmap_grant_ref *unmap_ops; 92 struct gnttab_unmap_grant_ref *unmap_ops;
93 struct gnttab_map_grant_ref *kmap_ops; 93 struct gnttab_map_grant_ref *kmap_ops;
94 struct gnttab_unmap_grant_ref *kunmap_ops;
94 struct page **pages; 95 struct page **pages;
96 unsigned long pages_vm_start;
95}; 97};
96 98
97static int unmap_grant_pages(struct grant_map *map, int offset, int pages); 99static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
@@ -118,12 +120,13 @@ static void gntdev_free_map(struct grant_map *map)
118 return; 120 return;
119 121
120 if (map->pages) 122 if (map->pages)
121 free_xenballooned_pages(map->count, map->pages); 123 gnttab_free_pages(map->count, map->pages);
122 kfree(map->pages); 124 kfree(map->pages);
123 kfree(map->grants); 125 kfree(map->grants);
124 kfree(map->map_ops); 126 kfree(map->map_ops);
125 kfree(map->unmap_ops); 127 kfree(map->unmap_ops);
126 kfree(map->kmap_ops); 128 kfree(map->kmap_ops);
129 kfree(map->kunmap_ops);
127 kfree(map); 130 kfree(map);
128} 131}
129 132
@@ -140,21 +143,24 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
140 add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL); 143 add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
141 add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL); 144 add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
142 add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL); 145 add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
146 add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
143 add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); 147 add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
144 if (NULL == add->grants || 148 if (NULL == add->grants ||
145 NULL == add->map_ops || 149 NULL == add->map_ops ||
146 NULL == add->unmap_ops || 150 NULL == add->unmap_ops ||
147 NULL == add->kmap_ops || 151 NULL == add->kmap_ops ||
152 NULL == add->kunmap_ops ||
148 NULL == add->pages) 153 NULL == add->pages)
149 goto err; 154 goto err;
150 155
151 if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */)) 156 if (gnttab_alloc_pages(count, add->pages))
152 goto err; 157 goto err;
153 158
154 for (i = 0; i < count; i++) { 159 for (i = 0; i < count; i++) {
155 add->map_ops[i].handle = -1; 160 add->map_ops[i].handle = -1;
156 add->unmap_ops[i].handle = -1; 161 add->unmap_ops[i].handle = -1;
157 add->kmap_ops[i].handle = -1; 162 add->kmap_ops[i].handle = -1;
163 add->kunmap_ops[i].handle = -1;
158 } 164 }
159 165
160 add->index = 0; 166 add->index = 0;
@@ -216,9 +222,9 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
216 } 222 }
217 223
218 if (populate_freeable_maps && priv) { 224 if (populate_freeable_maps && priv) {
219 spin_lock(&priv->lock); 225 mutex_lock(&priv->lock);
220 list_del(&map->next); 226 list_del(&map->next);
221 spin_unlock(&priv->lock); 227 mutex_unlock(&priv->lock);
222 } 228 }
223 229
224 if (map->pages && !use_ptemod) 230 if (map->pages && !use_ptemod)
@@ -239,6 +245,14 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
239 BUG_ON(pgnr >= map->count); 245 BUG_ON(pgnr >= map->count);
240 pte_maddr = arbitrary_virt_to_machine(pte).maddr; 246 pte_maddr = arbitrary_virt_to_machine(pte).maddr;
241 247
248 /*
249 * Set the PTE as special to force get_user_pages_fast() fall
250 * back to the slow path. If this is not supported as part of
251 * the grant map, it will be done afterwards.
252 */
253 if (xen_feature(XENFEAT_gnttab_map_avail_bits))
254 flags |= (1 << _GNTMAP_guest_avail0);
255
242 gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags, 256 gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
243 map->grants[pgnr].ref, 257 map->grants[pgnr].ref,
244 map->grants[pgnr].domid); 258 map->grants[pgnr].domid);
@@ -247,6 +261,15 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
247 return 0; 261 return 0;
248} 262}
249 263
264#ifdef CONFIG_X86
265static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token,
266 unsigned long addr, void *data)
267{
268 set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
269 return 0;
270}
271#endif
272
250static int map_grant_pages(struct grant_map *map) 273static int map_grant_pages(struct grant_map *map)
251{ 274{
252 int i, err = 0; 275 int i, err = 0;
@@ -280,6 +303,8 @@ static int map_grant_pages(struct grant_map *map)
280 map->flags | GNTMAP_host_map, 303 map->flags | GNTMAP_host_map,
281 map->grants[i].ref, 304 map->grants[i].ref,
282 map->grants[i].domid); 305 map->grants[i].domid);
306 gnttab_set_unmap_op(&map->kunmap_ops[i], address,
307 map->flags | GNTMAP_host_map, -1);
283 } 308 }
284 } 309 }
285 310
@@ -290,20 +315,42 @@ static int map_grant_pages(struct grant_map *map)
290 return err; 315 return err;
291 316
292 for (i = 0; i < map->count; i++) { 317 for (i = 0; i < map->count; i++) {
293 if (map->map_ops[i].status) 318 if (map->map_ops[i].status) {
294 err = -EINVAL; 319 err = -EINVAL;
295 else { 320 continue;
296 BUG_ON(map->map_ops[i].handle == -1);
297 map->unmap_ops[i].handle = map->map_ops[i].handle;
298 pr_debug("map handle=%d\n", map->map_ops[i].handle);
299 } 321 }
322
323 map->unmap_ops[i].handle = map->map_ops[i].handle;
324 if (use_ptemod)
325 map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
300 } 326 }
301 return err; 327 return err;
302} 328}
303 329
330struct unmap_grant_pages_callback_data
331{
332 struct completion completion;
333 int result;
334};
335
336static void unmap_grant_callback(int result,
337 struct gntab_unmap_queue_data *data)
338{
339 struct unmap_grant_pages_callback_data* d = data->data;
340
341 d->result = result;
342 complete(&d->completion);
343}
344
304static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) 345static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
305{ 346{
306 int i, err = 0; 347 int i, err = 0;
348 struct gntab_unmap_queue_data unmap_data;
349 struct unmap_grant_pages_callback_data data;
350
351 init_completion(&data.completion);
352 unmap_data.data = &data;
353 unmap_data.done= &unmap_grant_callback;
307 354
308 if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { 355 if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
309 int pgno = (map->notify.addr >> PAGE_SHIFT); 356 int pgno = (map->notify.addr >> PAGE_SHIFT);
@@ -315,11 +362,16 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
315 } 362 }
316 } 363 }
317 364
318 err = gnttab_unmap_refs(map->unmap_ops + offset, 365 unmap_data.unmap_ops = map->unmap_ops + offset;
319 use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset, 366 unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
320 pages); 367 unmap_data.pages = map->pages + offset;
321 if (err) 368 unmap_data.count = pages;
322 return err; 369
370 gnttab_unmap_refs_async(&unmap_data);
371
372 wait_for_completion(&data.completion);
373 if (data.result)
374 return data.result;
323 375
324 for (i = 0; i < pages; i++) { 376 for (i = 0; i < pages; i++) {
325 if (map->unmap_ops[offset+i].status) 377 if (map->unmap_ops[offset+i].status)
@@ -387,17 +439,26 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
387 * not do any unmapping, since that has been done prior to 439 * not do any unmapping, since that has been done prior to
388 * closing the vma, but it may still iterate the unmap_ops list. 440 * closing the vma, but it may still iterate the unmap_ops list.
389 */ 441 */
390 spin_lock(&priv->lock); 442 mutex_lock(&priv->lock);
391 map->vma = NULL; 443 map->vma = NULL;
392 spin_unlock(&priv->lock); 444 mutex_unlock(&priv->lock);
393 } 445 }
394 vma->vm_private_data = NULL; 446 vma->vm_private_data = NULL;
395 gntdev_put_map(priv, map); 447 gntdev_put_map(priv, map);
396} 448}
397 449
450static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
451 unsigned long addr)
452{
453 struct grant_map *map = vma->vm_private_data;
454
455 return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
456}
457
398static struct vm_operations_struct gntdev_vmops = { 458static struct vm_operations_struct gntdev_vmops = {
399 .open = gntdev_vma_open, 459 .open = gntdev_vma_open,
400 .close = gntdev_vma_close, 460 .close = gntdev_vma_close,
461 .find_special_page = gntdev_vma_find_special_page,
401}; 462};
402 463
403/* ------------------------------------------------------------------ */ 464/* ------------------------------------------------------------------ */
@@ -433,14 +494,14 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
433 struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); 494 struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
434 struct grant_map *map; 495 struct grant_map *map;
435 496
436 spin_lock(&priv->lock); 497 mutex_lock(&priv->lock);
437 list_for_each_entry(map, &priv->maps, next) { 498 list_for_each_entry(map, &priv->maps, next) {
438 unmap_if_in_range(map, start, end); 499 unmap_if_in_range(map, start, end);
439 } 500 }
440 list_for_each_entry(map, &priv->freeable_maps, next) { 501 list_for_each_entry(map, &priv->freeable_maps, next) {
441 unmap_if_in_range(map, start, end); 502 unmap_if_in_range(map, start, end);
442 } 503 }
443 spin_unlock(&priv->lock); 504 mutex_unlock(&priv->lock);
444} 505}
445 506
446static void mn_invl_page(struct mmu_notifier *mn, 507static void mn_invl_page(struct mmu_notifier *mn,
@@ -457,7 +518,7 @@ static void mn_release(struct mmu_notifier *mn,
457 struct grant_map *map; 518 struct grant_map *map;
458 int err; 519 int err;
459 520
460 spin_lock(&priv->lock); 521 mutex_lock(&priv->lock);
461 list_for_each_entry(map, &priv->maps, next) { 522 list_for_each_entry(map, &priv->maps, next) {
462 if (!map->vma) 523 if (!map->vma)
463 continue; 524 continue;
@@ -476,7 +537,7 @@ static void mn_release(struct mmu_notifier *mn,
476 err = unmap_grant_pages(map, /* offset */ 0, map->count); 537 err = unmap_grant_pages(map, /* offset */ 0, map->count);
477 WARN_ON(err); 538 WARN_ON(err);
478 } 539 }
479 spin_unlock(&priv->lock); 540 mutex_unlock(&priv->lock);
480} 541}
481 542
482static struct mmu_notifier_ops gntdev_mmu_ops = { 543static struct mmu_notifier_ops gntdev_mmu_ops = {
@@ -498,7 +559,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
498 559
499 INIT_LIST_HEAD(&priv->maps); 560 INIT_LIST_HEAD(&priv->maps);
500 INIT_LIST_HEAD(&priv->freeable_maps); 561 INIT_LIST_HEAD(&priv->freeable_maps);
501 spin_lock_init(&priv->lock); 562 mutex_init(&priv->lock);
502 563
503 if (use_ptemod) { 564 if (use_ptemod) {
504 priv->mm = get_task_mm(current); 565 priv->mm = get_task_mm(current);
@@ -572,10 +633,10 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
572 return -EFAULT; 633 return -EFAULT;
573 } 634 }
574 635
575 spin_lock(&priv->lock); 636 mutex_lock(&priv->lock);
576 gntdev_add_map(priv, map); 637 gntdev_add_map(priv, map);
577 op.index = map->index << PAGE_SHIFT; 638 op.index = map->index << PAGE_SHIFT;
578 spin_unlock(&priv->lock); 639 mutex_unlock(&priv->lock);
579 640
580 if (copy_to_user(u, &op, sizeof(op)) != 0) 641 if (copy_to_user(u, &op, sizeof(op)) != 0)
581 return -EFAULT; 642 return -EFAULT;
@@ -594,7 +655,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
594 return -EFAULT; 655 return -EFAULT;
595 pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count); 656 pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
596 657
597 spin_lock(&priv->lock); 658 mutex_lock(&priv->lock);
598 map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); 659 map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
599 if (map) { 660 if (map) {
600 list_del(&map->next); 661 list_del(&map->next);
@@ -602,7 +663,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
602 list_add_tail(&map->next, &priv->freeable_maps); 663 list_add_tail(&map->next, &priv->freeable_maps);
603 err = 0; 664 err = 0;
604 } 665 }
605 spin_unlock(&priv->lock); 666 mutex_unlock(&priv->lock);
606 if (map) 667 if (map)
607 gntdev_put_map(priv, map); 668 gntdev_put_map(priv, map);
608 return err; 669 return err;
@@ -670,7 +731,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
670 out_flags = op.action; 731 out_flags = op.action;
671 out_event = op.event_channel_port; 732 out_event = op.event_channel_port;
672 733
673 spin_lock(&priv->lock); 734 mutex_lock(&priv->lock);
674 735
675 list_for_each_entry(map, &priv->maps, next) { 736 list_for_each_entry(map, &priv->maps, next) {
676 uint64_t begin = map->index << PAGE_SHIFT; 737 uint64_t begin = map->index << PAGE_SHIFT;
@@ -698,7 +759,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
698 rc = 0; 759 rc = 0;
699 760
700 unlock_out: 761 unlock_out:
701 spin_unlock(&priv->lock); 762 mutex_unlock(&priv->lock);
702 763
703 /* Drop the reference to the event channel we did not save in the map */ 764 /* Drop the reference to the event channel we did not save in the map */
704 if (out_flags & UNMAP_NOTIFY_SEND_EVENT) 765 if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
@@ -748,7 +809,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
748 pr_debug("map %d+%d at %lx (pgoff %lx)\n", 809 pr_debug("map %d+%d at %lx (pgoff %lx)\n",
749 index, count, vma->vm_start, vma->vm_pgoff); 810 index, count, vma->vm_start, vma->vm_pgoff);
750 811
751 spin_lock(&priv->lock); 812 mutex_lock(&priv->lock);
752 map = gntdev_find_map_index(priv, index, count); 813 map = gntdev_find_map_index(priv, index, count);
753 if (!map) 814 if (!map)
754 goto unlock_out; 815 goto unlock_out;
@@ -783,7 +844,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
783 map->flags |= GNTMAP_readonly; 844 map->flags |= GNTMAP_readonly;
784 } 845 }
785 846
786 spin_unlock(&priv->lock); 847 mutex_unlock(&priv->lock);
787 848
788 if (use_ptemod) { 849 if (use_ptemod) {
789 err = apply_to_page_range(vma->vm_mm, vma->vm_start, 850 err = apply_to_page_range(vma->vm_mm, vma->vm_start,
@@ -806,16 +867,34 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
806 if (err) 867 if (err)
807 goto out_put_map; 868 goto out_put_map;
808 } 869 }
870 } else {
871#ifdef CONFIG_X86
872 /*
873 * If the PTEs were not made special by the grant map
874 * hypercall, do so here.
875 *
876 * This is racy since the mapping is already visible
877 * to userspace but userspace should be well-behaved
878 * enough to not touch it until the mmap() call
879 * returns.
880 */
881 if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
882 apply_to_page_range(vma->vm_mm, vma->vm_start,
883 vma->vm_end - vma->vm_start,
884 set_grant_ptes_as_special, NULL);
885 }
886#endif
887 map->pages_vm_start = vma->vm_start;
809 } 888 }
810 889
811 return 0; 890 return 0;
812 891
813unlock_out: 892unlock_out:
814 spin_unlock(&priv->lock); 893 mutex_unlock(&priv->lock);
815 return err; 894 return err;
816 895
817out_unlock_put: 896out_unlock_put:
818 spin_unlock(&priv->lock); 897 mutex_unlock(&priv->lock);
819out_put_map: 898out_put_map:
820 if (use_ptemod) 899 if (use_ptemod)
821 map->vma = NULL; 900 map->vma = NULL;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7786291ba229..17972fbacddc 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -42,6 +42,7 @@
42#include <linux/io.h> 42#include <linux/io.h>
43#include <linux/delay.h> 43#include <linux/delay.h>
44#include <linux/hardirq.h> 44#include <linux/hardirq.h>
45#include <linux/workqueue.h>
45 46
46#include <xen/xen.h> 47#include <xen/xen.h>
47#include <xen/interface/xen.h> 48#include <xen/interface/xen.h>
@@ -50,6 +51,7 @@
50#include <xen/interface/memory.h> 51#include <xen/interface/memory.h>
51#include <xen/hvc-console.h> 52#include <xen/hvc-console.h>
52#include <xen/swiotlb-xen.h> 53#include <xen/swiotlb-xen.h>
54#include <xen/balloon.h>
53#include <asm/xen/hypercall.h> 55#include <asm/xen/hypercall.h>
54#include <asm/xen/interface.h> 56#include <asm/xen/interface.h>
55 57
@@ -671,6 +673,59 @@ void gnttab_free_auto_xlat_frames(void)
671} 673}
672EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames); 674EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
673 675
676/**
677 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
678 * @nr_pages: number of pages to alloc
679 * @pages: returns the pages
680 */
681int gnttab_alloc_pages(int nr_pages, struct page **pages)
682{
683 int i;
684 int ret;
685
686 ret = alloc_xenballooned_pages(nr_pages, pages, false);
687 if (ret < 0)
688 return ret;
689
690 for (i = 0; i < nr_pages; i++) {
691#if BITS_PER_LONG < 64
692 struct xen_page_foreign *foreign;
693
694 foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
695 if (!foreign) {
696 gnttab_free_pages(nr_pages, pages);
697 return -ENOMEM;
698 }
699 set_page_private(pages[i], (unsigned long)foreign);
700#endif
701 SetPagePrivate(pages[i]);
702 }
703
704 return 0;
705}
706EXPORT_SYMBOL(gnttab_alloc_pages);
707
708/**
709 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
710 * @nr_pages; number of pages to free
711 * @pages: the pages
712 */
713void gnttab_free_pages(int nr_pages, struct page **pages)
714{
715 int i;
716
717 for (i = 0; i < nr_pages; i++) {
718 if (PagePrivate(pages[i])) {
719#if BITS_PER_LONG < 64
720 kfree((void *)page_private(pages[i]));
721#endif
722 ClearPagePrivate(pages[i]);
723 }
724 }
725 free_xenballooned_pages(nr_pages, pages);
726}
727EXPORT_SYMBOL(gnttab_free_pages);
728
674/* Handling of paged out grant targets (GNTST_eagain) */ 729/* Handling of paged out grant targets (GNTST_eagain) */
675#define MAX_DELAY 256 730#define MAX_DELAY 256
676static inline void 731static inline void
@@ -727,30 +782,87 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
727 if (ret) 782 if (ret)
728 return ret; 783 return ret;
729 784
730 /* Retry eagain maps */ 785 for (i = 0; i < count; i++) {
731 for (i = 0; i < count; i++) 786 /* Retry eagain maps */
732 if (map_ops[i].status == GNTST_eagain) 787 if (map_ops[i].status == GNTST_eagain)
733 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, 788 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
734 &map_ops[i].status, __func__); 789 &map_ops[i].status, __func__);
735 790
791 if (map_ops[i].status == GNTST_okay) {
792 struct xen_page_foreign *foreign;
793
794 SetPageForeign(pages[i]);
795 foreign = xen_page_foreign(pages[i]);
796 foreign->domid = map_ops[i].dom;
797 foreign->gref = map_ops[i].ref;
798 }
799 }
800
736 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count); 801 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
737} 802}
738EXPORT_SYMBOL_GPL(gnttab_map_refs); 803EXPORT_SYMBOL_GPL(gnttab_map_refs);
739 804
740int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 805int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
741 struct gnttab_map_grant_ref *kmap_ops, 806 struct gnttab_unmap_grant_ref *kunmap_ops,
742 struct page **pages, unsigned int count) 807 struct page **pages, unsigned int count)
743{ 808{
809 unsigned int i;
744 int ret; 810 int ret;
745 811
746 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); 812 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
747 if (ret) 813 if (ret)
748 return ret; 814 return ret;
749 815
750 return clear_foreign_p2m_mapping(unmap_ops, kmap_ops, pages, count); 816 for (i = 0; i < count; i++)
817 ClearPageForeign(pages[i]);
818
819 return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
751} 820}
752EXPORT_SYMBOL_GPL(gnttab_unmap_refs); 821EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
753 822
823#define GNTTAB_UNMAP_REFS_DELAY 5
824
825static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
826
827static void gnttab_unmap_work(struct work_struct *work)
828{
829 struct gntab_unmap_queue_data
830 *unmap_data = container_of(work,
831 struct gntab_unmap_queue_data,
832 gnttab_work.work);
833 if (unmap_data->age != UINT_MAX)
834 unmap_data->age++;
835 __gnttab_unmap_refs_async(unmap_data);
836}
837
838static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
839{
840 int ret;
841 int pc;
842
843 for (pc = 0; pc < item->count; pc++) {
844 if (page_count(item->pages[pc]) > 1) {
845 unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
846 schedule_delayed_work(&item->gnttab_work,
847 msecs_to_jiffies(delay));
848 return;
849 }
850 }
851
852 ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
853 item->pages, item->count);
854 item->done(ret, item);
855}
856
857void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
858{
859 INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
860 item->age = 0;
861
862 __gnttab_unmap_refs_async(item);
863}
864EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
865
754static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) 866static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
755{ 867{
756 int rc; 868 int rc;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index f8bb36f9d9ce..bf1940706422 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -105,10 +105,16 @@ static void do_suspend(void)
105 105
106 err = freeze_processes(); 106 err = freeze_processes();
107 if (err) { 107 if (err) {
108 pr_err("%s: freeze failed %d\n", __func__, err); 108 pr_err("%s: freeze processes failed %d\n", __func__, err);
109 goto out; 109 goto out;
110 } 110 }
111 111
112 err = freeze_kernel_threads();
113 if (err) {
114 pr_err("%s: freeze kernel threads failed %d\n", __func__, err);
115 goto out_thaw;
116 }
117
112 err = dpm_suspend_start(PMSG_FREEZE); 118 err = dpm_suspend_start(PMSG_FREEZE);
113 if (err) { 119 if (err) {
114 pr_err("%s: dpm_suspend_start %d\n", __func__, err); 120 pr_err("%s: dpm_suspend_start %d\n", __func__, err);
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 83b5c53bec6b..8a65423bc696 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -374,7 +374,7 @@ static struct frontswap_ops tmem_frontswap_ops = {
374}; 374};
375#endif 375#endif
376 376
377static int xen_tmem_init(void) 377static int __init xen_tmem_init(void)
378{ 378{
379 if (!xen_domain()) 379 if (!xen_domain())
380 return 0; 380 return 0;
diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c
index 34e40b733f9a..4fc886cd5586 100644
--- a/drivers/xen/xen-acpi-memhotplug.c
+++ b/drivers/xen/xen-acpi-memhotplug.c
@@ -117,8 +117,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
117 list_for_each_entry(info, &mem_device->res_list, list) { 117 list_for_each_entry(info, &mem_device->res_list, list) {
118 if ((info->caching == address64.info.mem.caching) && 118 if ((info->caching == address64.info.mem.caching) &&
119 (info->write_protect == address64.info.mem.write_protect) && 119 (info->write_protect == address64.info.mem.write_protect) &&
120 (info->start_addr + info->length == address64.minimum)) { 120 (info->start_addr + info->length == address64.address.minimum)) {
121 info->length += address64.address_length; 121 info->length += address64.address.address_length;
122 return AE_OK; 122 return AE_OK;
123 } 123 }
124 } 124 }
@@ -130,8 +130,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
130 INIT_LIST_HEAD(&new->list); 130 INIT_LIST_HEAD(&new->list);
131 new->caching = address64.info.mem.caching; 131 new->caching = address64.info.mem.caching;
132 new->write_protect = address64.info.mem.write_protect; 132 new->write_protect = address64.info.mem.write_protect;
133 new->start_addr = address64.minimum; 133 new->start_addr = address64.address.minimum;
134 new->length = address64.address_length; 134 new->length = address64.address.address_length;
135 list_add_tail(&new->list, &mem_device->res_list); 135 list_add_tail(&new->list, &mem_device->res_list);
136 136
137 return AE_OK; 137 return AE_OK;
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index e999496eda3e..ecd540a7a562 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -227,7 +227,7 @@ static void put_free_pages(struct page **page, int num)
227 return; 227 return;
228 if (i > scsiback_max_buffer_pages) { 228 if (i > scsiback_max_buffer_pages) {
229 n = min(num, i - scsiback_max_buffer_pages); 229 n = min(num, i - scsiback_max_buffer_pages);
230 free_xenballooned_pages(n, page + num - n); 230 gnttab_free_pages(n, page + num - n);
231 n = num - n; 231 n = num - n;
232 } 232 }
233 spin_lock_irqsave(&free_pages_lock, flags); 233 spin_lock_irqsave(&free_pages_lock, flags);
@@ -244,7 +244,7 @@ static int get_free_page(struct page **page)
244 spin_lock_irqsave(&free_pages_lock, flags); 244 spin_lock_irqsave(&free_pages_lock, flags);
245 if (list_empty(&scsiback_free_pages)) { 245 if (list_empty(&scsiback_free_pages)) {
246 spin_unlock_irqrestore(&free_pages_lock, flags); 246 spin_unlock_irqrestore(&free_pages_lock, flags);
247 return alloc_xenballooned_pages(1, page, false); 247 return gnttab_alloc_pages(1, page);
248 } 248 }
249 page[0] = list_first_entry(&scsiback_free_pages, struct page, lru); 249 page[0] = list_first_entry(&scsiback_free_pages, struct page, lru);
250 list_del(&page[0]->lru); 250 list_del(&page[0]->lru);
@@ -2106,7 +2106,7 @@ static void __exit scsiback_exit(void)
2106 while (free_pages_num) { 2106 while (free_pages_num) {
2107 if (get_free_page(&page)) 2107 if (get_free_page(&page))
2108 BUG(); 2108 BUG();
2109 free_xenballooned_pages(1, &page); 2109 gnttab_free_pages(1, &page);
2110 } 2110 }
2111 scsiback_deregister_configfs(); 2111 scsiback_deregister_configfs();
2112 xenbus_unregister_driver(&scsiback_driver); 2112 xenbus_unregister_driver(&scsiback_driver);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 85534ea63555..9433e46518c8 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -326,10 +326,13 @@ static int xenbus_write_transaction(unsigned msg_type,
326 } 326 }
327 327
328 if (msg_type == XS_TRANSACTION_START) { 328 if (msg_type == XS_TRANSACTION_START) {
329 trans->handle.id = simple_strtoul(reply, NULL, 0); 329 if (u->u.msg.type == XS_ERROR)
330 330 kfree(trans);
331 list_add(&trans->list, &u->transactions); 331 else {
332 } else if (msg_type == XS_TRANSACTION_END) { 332 trans->handle.id = simple_strtoul(reply, NULL, 0);
333 list_add(&trans->list, &u->transactions);
334 }
335 } else if (u->u.msg.type == XS_TRANSACTION_END) {
333 list_for_each_entry(trans, &u->transactions, list) 336 list_for_each_entry(trans, &u->transactions, list)
334 if (trans->handle.id == u->u.msg.tx_id) 337 if (trans->handle.id == u->u.msg.tx_id)
335 break; 338 break;
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index c35c5c614e38..06ea5cd05cd9 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -239,23 +239,21 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
239 return err; 239 return err;
240} 240}
241 241
242/** 242/*
243 * Must be called with lock_flocks() already held. Fills in the passed 243 * Fills in the passed counter variables, so you can prepare pagelist metadata
244 * counter variables, so you can prepare pagelist metadata before calling 244 * before calling ceph_encode_locks.
245 * ceph_encode_locks.
246 */ 245 */
247void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) 246void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
248{ 247{
249 struct file_lock *lock; 248 struct file_lock_context *ctx;
250 249
251 *fcntl_count = 0; 250 *fcntl_count = 0;
252 *flock_count = 0; 251 *flock_count = 0;
253 252
254 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { 253 ctx = inode->i_flctx;
255 if (lock->fl_flags & FL_POSIX) 254 if (ctx) {
256 ++(*fcntl_count); 255 *fcntl_count = ctx->flc_posix_cnt;
257 else if (lock->fl_flags & FL_FLOCK) 256 *flock_count = ctx->flc_flock_cnt;
258 ++(*flock_count);
259 } 257 }
260 dout("counted %d flock locks and %d fcntl locks", 258 dout("counted %d flock locks and %d fcntl locks",
261 *flock_count, *fcntl_count); 259 *flock_count, *fcntl_count);
@@ -271,6 +269,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
271 int num_fcntl_locks, int num_flock_locks) 269 int num_fcntl_locks, int num_flock_locks)
272{ 270{
273 struct file_lock *lock; 271 struct file_lock *lock;
272 struct file_lock_context *ctx = inode->i_flctx;
274 int err = 0; 273 int err = 0;
275 int seen_fcntl = 0; 274 int seen_fcntl = 0;
276 int seen_flock = 0; 275 int seen_flock = 0;
@@ -279,33 +278,34 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
279 dout("encoding %d flock and %d fcntl locks", num_flock_locks, 278 dout("encoding %d flock and %d fcntl locks", num_flock_locks,
280 num_fcntl_locks); 279 num_fcntl_locks);
281 280
282 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { 281 if (!ctx)
283 if (lock->fl_flags & FL_POSIX) { 282 return 0;
284 ++seen_fcntl; 283
285 if (seen_fcntl > num_fcntl_locks) { 284 spin_lock(&ctx->flc_lock);
286 err = -ENOSPC; 285 list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
287 goto fail; 286 ++seen_fcntl;
288 } 287 if (seen_fcntl > num_fcntl_locks) {
289 err = lock_to_ceph_filelock(lock, &flocks[l]); 288 err = -ENOSPC;
290 if (err) 289 goto fail;
291 goto fail;
292 ++l;
293 } 290 }
291 err = lock_to_ceph_filelock(lock, &flocks[l]);
292 if (err)
293 goto fail;
294 ++l;
294 } 295 }
295 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { 296 list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
296 if (lock->fl_flags & FL_FLOCK) { 297 ++seen_flock;
297 ++seen_flock; 298 if (seen_flock > num_flock_locks) {
298 if (seen_flock > num_flock_locks) { 299 err = -ENOSPC;
299 err = -ENOSPC; 300 goto fail;
300 goto fail;
301 }
302 err = lock_to_ceph_filelock(lock, &flocks[l]);
303 if (err)
304 goto fail;
305 ++l;
306 } 301 }
302 err = lock_to_ceph_filelock(lock, &flocks[l]);
303 if (err)
304 goto fail;
305 ++l;
307 } 306 }
308fail: 307fail:
308 spin_unlock(&ctx->flc_lock);
309 return err; 309 return err;
310} 310}
311 311
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index d2171f4a6980..5f62fb7a5d0a 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2700,20 +2700,16 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2700 struct ceph_filelock *flocks; 2700 struct ceph_filelock *flocks;
2701 2701
2702encode_again: 2702encode_again:
2703 spin_lock(&inode->i_lock);
2704 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); 2703 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
2705 spin_unlock(&inode->i_lock);
2706 flocks = kmalloc((num_fcntl_locks+num_flock_locks) * 2704 flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
2707 sizeof(struct ceph_filelock), GFP_NOFS); 2705 sizeof(struct ceph_filelock), GFP_NOFS);
2708 if (!flocks) { 2706 if (!flocks) {
2709 err = -ENOMEM; 2707 err = -ENOMEM;
2710 goto out_free; 2708 goto out_free;
2711 } 2709 }
2712 spin_lock(&inode->i_lock);
2713 err = ceph_encode_locks_to_buffer(inode, flocks, 2710 err = ceph_encode_locks_to_buffer(inode, flocks,
2714 num_fcntl_locks, 2711 num_fcntl_locks,
2715 num_flock_locks); 2712 num_flock_locks);
2716 spin_unlock(&inode->i_lock);
2717 if (err) { 2713 if (err) {
2718 kfree(flocks); 2714 kfree(flocks);
2719 if (err == -ENOSPC) 2715 if (err == -ENOSPC)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 294ff302a237..8fe1f7a21b3e 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1113,11 +1113,6 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1113 return rc; 1113 return rc;
1114} 1114}
1115 1115
1116/* copied from fs/locks.c with a name change */
1117#define cifs_for_each_lock(inode, lockp) \
1118 for (lockp = &inode->i_flock; *lockp != NULL; \
1119 lockp = &(*lockp)->fl_next)
1120
1121struct lock_to_push { 1116struct lock_to_push {
1122 struct list_head llist; 1117 struct list_head llist;
1123 __u64 offset; 1118 __u64 offset;
@@ -1132,8 +1127,9 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
1132{ 1127{
1133 struct inode *inode = cfile->dentry->d_inode; 1128 struct inode *inode = cfile->dentry->d_inode;
1134 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 1129 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1135 struct file_lock *flock, **before; 1130 struct file_lock *flock;
1136 unsigned int count = 0, i = 0; 1131 struct file_lock_context *flctx = inode->i_flctx;
1132 unsigned int i;
1137 int rc = 0, xid, type; 1133 int rc = 0, xid, type;
1138 struct list_head locks_to_send, *el; 1134 struct list_head locks_to_send, *el;
1139 struct lock_to_push *lck, *tmp; 1135 struct lock_to_push *lck, *tmp;
@@ -1141,21 +1137,17 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
1141 1137
1142 xid = get_xid(); 1138 xid = get_xid();
1143 1139
1144 spin_lock(&inode->i_lock); 1140 if (!flctx)
1145 cifs_for_each_lock(inode, before) { 1141 goto out;
1146 if ((*before)->fl_flags & FL_POSIX)
1147 count++;
1148 }
1149 spin_unlock(&inode->i_lock);
1150 1142
1151 INIT_LIST_HEAD(&locks_to_send); 1143 INIT_LIST_HEAD(&locks_to_send);
1152 1144
1153 /* 1145 /*
1154 * Allocating count locks is enough because no FL_POSIX locks can be 1146 * Allocating flc_posix_cnt locks is enough because no FL_POSIX locks
1155 * added to the list while we are holding cinode->lock_sem that 1147 * can be added to the list while we are holding cinode->lock_sem that
1156 * protects locking operations of this inode. 1148 * protects locking operations of this inode.
1157 */ 1149 */
1158 for (; i < count; i++) { 1150 for (i = 0; i < flctx->flc_posix_cnt; i++) {
1159 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); 1151 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1160 if (!lck) { 1152 if (!lck) {
1161 rc = -ENOMEM; 1153 rc = -ENOMEM;
@@ -1165,11 +1157,8 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
1165 } 1157 }
1166 1158
1167 el = locks_to_send.next; 1159 el = locks_to_send.next;
1168 spin_lock(&inode->i_lock); 1160 spin_lock(&flctx->flc_lock);
1169 cifs_for_each_lock(inode, before) { 1161 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
1170 flock = *before;
1171 if ((flock->fl_flags & FL_POSIX) == 0)
1172 continue;
1173 if (el == &locks_to_send) { 1162 if (el == &locks_to_send) {
1174 /* 1163 /*
1175 * The list ended. We don't have enough allocated 1164 * The list ended. We don't have enough allocated
@@ -1189,9 +1178,8 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
1189 lck->length = length; 1178 lck->length = length;
1190 lck->type = type; 1179 lck->type = type;
1191 lck->offset = flock->fl_start; 1180 lck->offset = flock->fl_start;
1192 el = el->next;
1193 } 1181 }
1194 spin_unlock(&inode->i_lock); 1182 spin_unlock(&flctx->flc_lock);
1195 1183
1196 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { 1184 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1197 int stored_rc; 1185 int stored_rc;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 9b4e7d750d4f..d4dbf3c259b3 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -466,6 +466,8 @@ static void ext3_put_super (struct super_block * sb)
466 } 466 }
467 sb->s_fs_info = NULL; 467 sb->s_fs_info = NULL;
468 kfree(sbi->s_blockgroup_lock); 468 kfree(sbi->s_blockgroup_lock);
469 mutex_destroy(&sbi->s_orphan_lock);
470 mutex_destroy(&sbi->s_resize_lock);
469 kfree(sbi); 471 kfree(sbi);
470} 472}
471 473
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 74c5f53595fb..ac64edbe501d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1046,10 +1046,7 @@ static int ext4_mark_dquot_dirty(struct dquot *dquot);
1046static int ext4_write_info(struct super_block *sb, int type); 1046static int ext4_write_info(struct super_block *sb, int type);
1047static int ext4_quota_on(struct super_block *sb, int type, int format_id, 1047static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1048 struct path *path); 1048 struct path *path);
1049static int ext4_quota_on_sysfile(struct super_block *sb, int type,
1050 int format_id);
1051static int ext4_quota_off(struct super_block *sb, int type); 1049static int ext4_quota_off(struct super_block *sb, int type);
1052static int ext4_quota_off_sysfile(struct super_block *sb, int type);
1053static int ext4_quota_on_mount(struct super_block *sb, int type); 1050static int ext4_quota_on_mount(struct super_block *sb, int type);
1054static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 1051static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1055 size_t len, loff_t off); 1052 size_t len, loff_t off);
@@ -1084,16 +1081,6 @@ static const struct quotactl_ops ext4_qctl_operations = {
1084 .get_dqblk = dquot_get_dqblk, 1081 .get_dqblk = dquot_get_dqblk,
1085 .set_dqblk = dquot_set_dqblk 1082 .set_dqblk = dquot_set_dqblk
1086}; 1083};
1087
1088static const struct quotactl_ops ext4_qctl_sysfile_operations = {
1089 .quota_on_meta = ext4_quota_on_sysfile,
1090 .quota_off = ext4_quota_off_sysfile,
1091 .quota_sync = dquot_quota_sync,
1092 .get_info = dquot_get_dqinfo,
1093 .set_info = dquot_set_dqinfo,
1094 .get_dqblk = dquot_get_dqblk,
1095 .set_dqblk = dquot_set_dqblk
1096};
1097#endif 1084#endif
1098 1085
1099static const struct super_operations ext4_sops = { 1086static const struct super_operations ext4_sops = {
@@ -3935,7 +3922,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3935#ifdef CONFIG_QUOTA 3922#ifdef CONFIG_QUOTA
3936 sb->dq_op = &ext4_quota_operations; 3923 sb->dq_op = &ext4_quota_operations;
3937 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) 3924 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
3938 sb->s_qcop = &ext4_qctl_sysfile_operations; 3925 sb->s_qcop = &dquot_quotactl_sysfile_ops;
3939 else 3926 else
3940 sb->s_qcop = &ext4_qctl_operations; 3927 sb->s_qcop = &ext4_qctl_operations;
3941 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; 3928 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
@@ -5288,21 +5275,6 @@ static int ext4_enable_quotas(struct super_block *sb)
5288 return 0; 5275 return 0;
5289} 5276}
5290 5277
5291/*
5292 * quota_on function that is used when QUOTA feature is set.
5293 */
5294static int ext4_quota_on_sysfile(struct super_block *sb, int type,
5295 int format_id)
5296{
5297 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
5298 return -EINVAL;
5299
5300 /*
5301 * USAGE was enabled at mount time. Only need to enable LIMITS now.
5302 */
5303 return ext4_quota_enable(sb, type, format_id, DQUOT_LIMITS_ENABLED);
5304}
5305
5306static int ext4_quota_off(struct super_block *sb, int type) 5278static int ext4_quota_off(struct super_block *sb, int type)
5307{ 5279{
5308 struct inode *inode = sb_dqopt(sb)->files[type]; 5280 struct inode *inode = sb_dqopt(sb)->files[type];
@@ -5329,18 +5301,6 @@ out:
5329 return dquot_quota_off(sb, type); 5301 return dquot_quota_off(sb, type);
5330} 5302}
5331 5303
5332/*
5333 * quota_off function that is used when QUOTA feature is set.
5334 */
5335static int ext4_quota_off_sysfile(struct super_block *sb, int type)
5336{
5337 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
5338 return -EINVAL;
5339
5340 /* Disable only the limits. */
5341 return dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
5342}
5343
5344/* Read data from quotafile - avoid pagecache and such because we cannot afford 5304/* Read data from quotafile - avoid pagecache and such because we cannot afford
5345 * acquiring the locks... As quota files are never truncated and quota code 5305 * acquiring the locks... As quota files are never truncated and quota code
5346 * itself serializes the operations (and no one else should touch the files) 5306 * itself serializes the operations (and no one else should touch the files)
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 3088e2a38e30..7b3143064af1 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -73,7 +73,7 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
73 73
74 BUG_ON(name == NULL); 74 BUG_ON(name == NULL);
75 75
76 if (acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode))) 76 if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
77 return -E2BIG; 77 return -E2BIG;
78 78
79 if (type == ACL_TYPE_ACCESS) { 79 if (type == ACL_TYPE_ACCESS) {
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index c5a34f09e228..6371192961e2 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1896,7 +1896,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
1896 1896
1897 ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN); 1897 ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN);
1898 if (ht == NULL) 1898 if (ht == NULL)
1899 ht = vzalloc(size); 1899 ht = __vmalloc(size, GFP_NOFS | __GFP_NOWARN | __GFP_ZERO,
1900 PAGE_KERNEL);
1900 if (!ht) 1901 if (!ht)
1901 return -ENOMEM; 1902 return -ENOMEM;
1902 1903
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index a23524aa3eac..aeb7bc958a18 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -173,19 +173,14 @@ void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
173 spin_unlock(&lru_lock); 173 spin_unlock(&lru_lock);
174} 174}
175 175
176static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl) 176static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
177{ 177{
178 spin_lock(&lru_lock);
178 if (!list_empty(&gl->gl_lru)) { 179 if (!list_empty(&gl->gl_lru)) {
179 list_del_init(&gl->gl_lru); 180 list_del_init(&gl->gl_lru);
180 atomic_dec(&lru_count); 181 atomic_dec(&lru_count);
181 clear_bit(GLF_LRU, &gl->gl_flags); 182 clear_bit(GLF_LRU, &gl->gl_flags);
182 } 183 }
183}
184
185static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
186{
187 spin_lock(&lru_lock);
188 __gfs2_glock_remove_from_lru(gl);
189 spin_unlock(&lru_lock); 184 spin_unlock(&lru_lock);
190} 185}
191 186
@@ -205,9 +200,7 @@ void gfs2_glock_put(struct gfs2_glock *gl)
205 200
206 lockref_mark_dead(&gl->gl_lockref); 201 lockref_mark_dead(&gl->gl_lockref);
207 202
208 spin_lock(&lru_lock); 203 gfs2_glock_remove_from_lru(gl);
209 __gfs2_glock_remove_from_lru(gl);
210 spin_unlock(&lru_lock);
211 spin_unlock(&gl->gl_lockref.lock); 204 spin_unlock(&gl->gl_lockref.lock);
212 spin_lock_bucket(gl->gl_hash); 205 spin_lock_bucket(gl->gl_hash);
213 hlist_bl_del_rcu(&gl->gl_list); 206 hlist_bl_del_rcu(&gl->gl_list);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 9054002ebe70..73c72253faac 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -543,10 +543,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
543 } 543 }
544 544
545 error = gfs2_dir_add(&dip->i_inode, name, ip, da); 545 error = gfs2_dir_add(&dip->i_inode, name, ip, da);
546 if (error)
547 goto fail_end_trans;
548 546
549fail_end_trans:
550 gfs2_trans_end(sdp); 547 gfs2_trans_end(sdp);
551fail_ipreserv: 548fail_ipreserv:
552 gfs2_inplace_release(dip); 549 gfs2_inplace_release(dip);
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 573bd3b758fa..1b645773c98e 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -439,7 +439,7 @@ static void gfs2_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
439 439
440 ls->ls_recover_jid_done = jid; 440 ls->ls_recover_jid_done = jid;
441 ls->ls_recover_jid_status = message; 441 ls->ls_recover_jid_status = message;
442 sprintf(env_jid, "JID=%d", jid); 442 sprintf(env_jid, "JID=%u", jid);
443 sprintf(env_status, "RECOVERY=%s", 443 sprintf(env_status, "RECOVERY=%s",
444 message == LM_RD_SUCCESS ? "Done" : "Failed"); 444 message == LM_RD_SUCCESS ? "Done" : "Failed");
445 kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp); 445 kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 3ab566ba5696..ae8e8811f0e8 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -96,7 +96,7 @@ static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
96 struct super_block *sb = sdp->sd_vfs; 96 struct super_block *sb = sdp->sd_vfs;
97 int frozen = (sb->s_writers.frozen == SB_UNFROZEN) ? 0 : 1; 97 int frozen = (sb->s_writers.frozen == SB_UNFROZEN) ? 0 : 1;
98 98
99 return snprintf(buf, PAGE_SIZE, "%u\n", frozen); 99 return snprintf(buf, PAGE_SIZE, "%d\n", frozen);
100} 100}
101 101
102static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 102static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
diff --git a/fs/inode.c b/fs/inode.c
index c760fac33c92..3a53b1da3fb8 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -194,7 +194,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
194#ifdef CONFIG_FSNOTIFY 194#ifdef CONFIG_FSNOTIFY
195 inode->i_fsnotify_mask = 0; 195 inode->i_fsnotify_mask = 0;
196#endif 196#endif
197 197 inode->i_flctx = NULL;
198 this_cpu_inc(nr_inodes); 198 this_cpu_inc(nr_inodes);
199 199
200 return 0; 200 return 0;
@@ -237,6 +237,7 @@ void __destroy_inode(struct inode *inode)
237 BUG_ON(inode_has_buffers(inode)); 237 BUG_ON(inode_has_buffers(inode));
238 security_inode_free(inode); 238 security_inode_free(inode);
239 fsnotify_inode_delete(inode); 239 fsnotify_inode_delete(inode);
240 locks_free_lock_context(inode->i_flctx);
240 if (!inode->i_nlink) { 241 if (!inode->i_nlink) {
241 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 242 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
242 atomic_long_dec(&inode->i_sb->s_remove_count); 243 atomic_long_dec(&inode->i_sb->s_remove_count);
diff --git a/fs/isofs/util.c b/fs/isofs/util.c
index 01e1ee7a998b..005a15cfd30a 100644
--- a/fs/isofs/util.c
+++ b/fs/isofs/util.c
@@ -2,6 +2,7 @@
2 * linux/fs/isofs/util.c 2 * linux/fs/isofs/util.c
3 */ 3 */
4 4
5#include <linux/time.h>
5#include "isofs.h" 6#include "isofs.h"
6 7
7/* 8/*
@@ -17,9 +18,9 @@
17int iso_date(char * p, int flag) 18int iso_date(char * p, int flag)
18{ 19{
19 int year, month, day, hour, minute, second, tz; 20 int year, month, day, hour, minute, second, tz;
20 int crtime, days, i; 21 int crtime;
21 22
22 year = p[0] - 70; 23 year = p[0];
23 month = p[1]; 24 month = p[1];
24 day = p[2]; 25 day = p[2];
25 hour = p[3]; 26 hour = p[3];
@@ -31,18 +32,7 @@ int iso_date(char * p, int flag)
31 if (year < 0) { 32 if (year < 0) {
32 crtime = 0; 33 crtime = 0;
33 } else { 34 } else {
34 int monlen[12] = {31,28,31,30,31,30,31,31,30,31,30,31}; 35 crtime = mktime64(year+1900, month, day, hour, minute, second);
35
36 days = year * 365;
37 if (year > 2)
38 days += (year+1) / 4;
39 for (i = 1; i < month; i++)
40 days += monlen[i-1];
41 if (((year+2) % 4) == 0 && month > 2)
42 days++;
43 days += day - 1;
44 crtime = ((((days * 24) + hour) * 60 + minute) * 60)
45 + second;
46 36
47 /* sign extend */ 37 /* sign extend */
48 if (tz & 0x80) 38 if (tz & 0x80)
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index d12ff4e2dbe7..665ef5a05183 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -164,12 +164,15 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
164{ 164{
165 struct inode *inode = nlmsvc_file_inode(file); 165 struct inode *inode = nlmsvc_file_inode(file);
166 struct file_lock *fl; 166 struct file_lock *fl;
167 struct file_lock_context *flctx = inode->i_flctx;
167 struct nlm_host *lockhost; 168 struct nlm_host *lockhost;
168 169
170 if (!flctx || list_empty_careful(&flctx->flc_posix))
171 return 0;
169again: 172again:
170 file->f_locks = 0; 173 file->f_locks = 0;
171 spin_lock(&inode->i_lock); 174 spin_lock(&flctx->flc_lock);
172 for (fl = inode->i_flock; fl; fl = fl->fl_next) { 175 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
173 if (fl->fl_lmops != &nlmsvc_lock_operations) 176 if (fl->fl_lmops != &nlmsvc_lock_operations)
174 continue; 177 continue;
175 178
@@ -180,7 +183,7 @@ again:
180 if (match(lockhost, host)) { 183 if (match(lockhost, host)) {
181 struct file_lock lock = *fl; 184 struct file_lock lock = *fl;
182 185
183 spin_unlock(&inode->i_lock); 186 spin_unlock(&flctx->flc_lock);
184 lock.fl_type = F_UNLCK; 187 lock.fl_type = F_UNLCK;
185 lock.fl_start = 0; 188 lock.fl_start = 0;
186 lock.fl_end = OFFSET_MAX; 189 lock.fl_end = OFFSET_MAX;
@@ -192,7 +195,7 @@ again:
192 goto again; 195 goto again;
193 } 196 }
194 } 197 }
195 spin_unlock(&inode->i_lock); 198 spin_unlock(&flctx->flc_lock);
196 199
197 return 0; 200 return 0;
198} 201}
@@ -223,18 +226,21 @@ nlm_file_inuse(struct nlm_file *file)
223{ 226{
224 struct inode *inode = nlmsvc_file_inode(file); 227 struct inode *inode = nlmsvc_file_inode(file);
225 struct file_lock *fl; 228 struct file_lock *fl;
229 struct file_lock_context *flctx = inode->i_flctx;
226 230
227 if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares) 231 if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares)
228 return 1; 232 return 1;
229 233
230 spin_lock(&inode->i_lock); 234 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
231 for (fl = inode->i_flock; fl; fl = fl->fl_next) { 235 spin_lock(&flctx->flc_lock);
232 if (fl->fl_lmops == &nlmsvc_lock_operations) { 236 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
233 spin_unlock(&inode->i_lock); 237 if (fl->fl_lmops == &nlmsvc_lock_operations) {
234 return 1; 238 spin_unlock(&flctx->flc_lock);
239 return 1;
240 }
235 } 241 }
242 spin_unlock(&flctx->flc_lock);
236 } 243 }
237 spin_unlock(&inode->i_lock);
238 file->f_locks = 0; 244 file->f_locks = 0;
239 return 0; 245 return 0;
240} 246}
diff --git a/fs/locks.c b/fs/locks.c
index 59e2f905e4ff..4d0d41163a50 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -157,14 +157,11 @@ static int target_leasetype(struct file_lock *fl)
157int leases_enable = 1; 157int leases_enable = 1;
158int lease_break_time = 45; 158int lease_break_time = 45;
159 159
160#define for_each_lock(inode, lockp) \
161 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
162
163/* 160/*
164 * The global file_lock_list is only used for displaying /proc/locks, so we 161 * The global file_lock_list is only used for displaying /proc/locks, so we
165 * keep a list on each CPU, with each list protected by its own spinlock via 162 * keep a list on each CPU, with each list protected by its own spinlock via
166 * the file_lock_lglock. Note that alterations to the list also require that 163 * the file_lock_lglock. Note that alterations to the list also require that
167 * the relevant i_lock is held. 164 * the relevant flc_lock is held.
168 */ 165 */
169DEFINE_STATIC_LGLOCK(file_lock_lglock); 166DEFINE_STATIC_LGLOCK(file_lock_lglock);
170static DEFINE_PER_CPU(struct hlist_head, file_lock_list); 167static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
@@ -192,21 +189,68 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
192 * contrast to those that are acting as records of acquired locks). 189 * contrast to those that are acting as records of acquired locks).
193 * 190 *
194 * Note that when we acquire this lock in order to change the above fields, 191 * Note that when we acquire this lock in order to change the above fields,
195 * we often hold the i_lock as well. In certain cases, when reading the fields 192 * we often hold the flc_lock as well. In certain cases, when reading the fields
196 * protected by this lock, we can skip acquiring it iff we already hold the 193 * protected by this lock, we can skip acquiring it iff we already hold the
197 * i_lock. 194 * flc_lock.
198 * 195 *
199 * In particular, adding an entry to the fl_block list requires that you hold 196 * In particular, adding an entry to the fl_block list requires that you hold
200 * both the i_lock and the blocked_lock_lock (acquired in that order). Deleting 197 * both the flc_lock and the blocked_lock_lock (acquired in that order).
201 * an entry from the list however only requires the file_lock_lock. 198 * Deleting an entry from the list however only requires the file_lock_lock.
202 */ 199 */
203static DEFINE_SPINLOCK(blocked_lock_lock); 200static DEFINE_SPINLOCK(blocked_lock_lock);
204 201
202static struct kmem_cache *flctx_cache __read_mostly;
205static struct kmem_cache *filelock_cache __read_mostly; 203static struct kmem_cache *filelock_cache __read_mostly;
206 204
205static struct file_lock_context *
206locks_get_lock_context(struct inode *inode)
207{
208 struct file_lock_context *new;
209
210 if (likely(inode->i_flctx))
211 goto out;
212
213 new = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
214 if (!new)
215 goto out;
216
217 spin_lock_init(&new->flc_lock);
218 INIT_LIST_HEAD(&new->flc_flock);
219 INIT_LIST_HEAD(&new->flc_posix);
220 INIT_LIST_HEAD(&new->flc_lease);
221
222 /*
223 * Assign the pointer if it's not already assigned. If it is, then
224 * free the context we just allocated.
225 */
226 spin_lock(&inode->i_lock);
227 if (likely(!inode->i_flctx)) {
228 inode->i_flctx = new;
229 new = NULL;
230 }
231 spin_unlock(&inode->i_lock);
232
233 if (new)
234 kmem_cache_free(flctx_cache, new);
235out:
236 return inode->i_flctx;
237}
238
239void
240locks_free_lock_context(struct file_lock_context *ctx)
241{
242 if (ctx) {
243 WARN_ON_ONCE(!list_empty(&ctx->flc_flock));
244 WARN_ON_ONCE(!list_empty(&ctx->flc_posix));
245 WARN_ON_ONCE(!list_empty(&ctx->flc_lease));
246 kmem_cache_free(flctx_cache, ctx);
247 }
248}
249
207static void locks_init_lock_heads(struct file_lock *fl) 250static void locks_init_lock_heads(struct file_lock *fl)
208{ 251{
209 INIT_HLIST_NODE(&fl->fl_link); 252 INIT_HLIST_NODE(&fl->fl_link);
253 INIT_LIST_HEAD(&fl->fl_list);
210 INIT_LIST_HEAD(&fl->fl_block); 254 INIT_LIST_HEAD(&fl->fl_block);
211 init_waitqueue_head(&fl->fl_wait); 255 init_waitqueue_head(&fl->fl_wait);
212} 256}
@@ -243,6 +287,7 @@ EXPORT_SYMBOL_GPL(locks_release_private);
243void locks_free_lock(struct file_lock *fl) 287void locks_free_lock(struct file_lock *fl)
244{ 288{
245 BUG_ON(waitqueue_active(&fl->fl_wait)); 289 BUG_ON(waitqueue_active(&fl->fl_wait));
290 BUG_ON(!list_empty(&fl->fl_list));
246 BUG_ON(!list_empty(&fl->fl_block)); 291 BUG_ON(!list_empty(&fl->fl_block));
247 BUG_ON(!hlist_unhashed(&fl->fl_link)); 292 BUG_ON(!hlist_unhashed(&fl->fl_link));
248 293
@@ -257,8 +302,8 @@ locks_dispose_list(struct list_head *dispose)
257 struct file_lock *fl; 302 struct file_lock *fl;
258 303
259 while (!list_empty(dispose)) { 304 while (!list_empty(dispose)) {
260 fl = list_first_entry(dispose, struct file_lock, fl_block); 305 fl = list_first_entry(dispose, struct file_lock, fl_list);
261 list_del_init(&fl->fl_block); 306 list_del_init(&fl->fl_list);
262 locks_free_lock(fl); 307 locks_free_lock(fl);
263 } 308 }
264} 309}
@@ -513,7 +558,7 @@ static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
513 return fl1->fl_owner == fl2->fl_owner; 558 return fl1->fl_owner == fl2->fl_owner;
514} 559}
515 560
516/* Must be called with the i_lock held! */ 561/* Must be called with the flc_lock held! */
517static void locks_insert_global_locks(struct file_lock *fl) 562static void locks_insert_global_locks(struct file_lock *fl)
518{ 563{
519 lg_local_lock(&file_lock_lglock); 564 lg_local_lock(&file_lock_lglock);
@@ -522,12 +567,12 @@ static void locks_insert_global_locks(struct file_lock *fl)
522 lg_local_unlock(&file_lock_lglock); 567 lg_local_unlock(&file_lock_lglock);
523} 568}
524 569
525/* Must be called with the i_lock held! */ 570/* Must be called with the flc_lock held! */
526static void locks_delete_global_locks(struct file_lock *fl) 571static void locks_delete_global_locks(struct file_lock *fl)
527{ 572{
528 /* 573 /*
529 * Avoid taking lock if already unhashed. This is safe since this check 574 * Avoid taking lock if already unhashed. This is safe since this check
530 * is done while holding the i_lock, and new insertions into the list 575 * is done while holding the flc_lock, and new insertions into the list
531 * also require that it be held. 576 * also require that it be held.
532 */ 577 */
533 if (hlist_unhashed(&fl->fl_link)) 578 if (hlist_unhashed(&fl->fl_link))
@@ -579,10 +624,10 @@ static void locks_delete_block(struct file_lock *waiter)
579 * the order they blocked. The documentation doesn't require this but 624 * the order they blocked. The documentation doesn't require this but
580 * it seems like the reasonable thing to do. 625 * it seems like the reasonable thing to do.
581 * 626 *
582 * Must be called with both the i_lock and blocked_lock_lock held. The fl_block 627 * Must be called with both the flc_lock and blocked_lock_lock held. The
583 * list itself is protected by the blocked_lock_lock, but by ensuring that the 628 * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
584 * i_lock is also held on insertions we can avoid taking the blocked_lock_lock 629 * that the flc_lock is also held on insertions we can avoid taking the
585 * in some cases when we see that the fl_block list is empty. 630 * blocked_lock_lock in some cases when we see that the fl_block list is empty.
586 */ 631 */
587static void __locks_insert_block(struct file_lock *blocker, 632static void __locks_insert_block(struct file_lock *blocker,
588 struct file_lock *waiter) 633 struct file_lock *waiter)
@@ -594,7 +639,7 @@ static void __locks_insert_block(struct file_lock *blocker,
594 locks_insert_global_blocked(waiter); 639 locks_insert_global_blocked(waiter);
595} 640}
596 641
597/* Must be called with i_lock held. */ 642/* Must be called with flc_lock held. */
598static void locks_insert_block(struct file_lock *blocker, 643static void locks_insert_block(struct file_lock *blocker,
599 struct file_lock *waiter) 644 struct file_lock *waiter)
600{ 645{
@@ -606,15 +651,15 @@ static void locks_insert_block(struct file_lock *blocker,
606/* 651/*
607 * Wake up processes blocked waiting for blocker. 652 * Wake up processes blocked waiting for blocker.
608 * 653 *
609 * Must be called with the inode->i_lock held! 654 * Must be called with the inode->flc_lock held!
610 */ 655 */
611static void locks_wake_up_blocks(struct file_lock *blocker) 656static void locks_wake_up_blocks(struct file_lock *blocker)
612{ 657{
613 /* 658 /*
614 * Avoid taking global lock if list is empty. This is safe since new 659 * Avoid taking global lock if list is empty. This is safe since new
615 * blocked requests are only added to the list under the i_lock, and 660 * blocked requests are only added to the list under the flc_lock, and
616 * the i_lock is always held here. Note that removal from the fl_block 661 * the flc_lock is always held here. Note that removal from the fl_block
617 * list does not require the i_lock, so we must recheck list_empty() 662 * list does not require the flc_lock, so we must recheck list_empty()
618 * after acquiring the blocked_lock_lock. 663 * after acquiring the blocked_lock_lock.
619 */ 664 */
620 if (list_empty(&blocker->fl_block)) 665 if (list_empty(&blocker->fl_block))
@@ -635,63 +680,36 @@ static void locks_wake_up_blocks(struct file_lock *blocker)
635 spin_unlock(&blocked_lock_lock); 680 spin_unlock(&blocked_lock_lock);
636} 681}
637 682
638/* Insert file lock fl into an inode's lock list at the position indicated 683static void
639 * by pos. At the same time add the lock to the global file lock list. 684locks_insert_lock_ctx(struct file_lock *fl, int *counter,
640 * 685 struct list_head *before)
641 * Must be called with the i_lock held!
642 */
643static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
644{ 686{
645 fl->fl_nspid = get_pid(task_tgid(current)); 687 fl->fl_nspid = get_pid(task_tgid(current));
646 688 list_add_tail(&fl->fl_list, before);
647 /* insert into file's list */ 689 ++*counter;
648 fl->fl_next = *pos;
649 *pos = fl;
650
651 locks_insert_global_locks(fl); 690 locks_insert_global_locks(fl);
652} 691}
653 692
654/** 693static void
655 * locks_delete_lock - Delete a lock and then free it. 694locks_unlink_lock_ctx(struct file_lock *fl, int *counter)
656 * @thisfl_p: pointer that points to the fl_next field of the previous
657 * inode->i_flock list entry
658 *
659 * Unlink a lock from all lists and free the namespace reference, but don't
660 * free it yet. Wake up processes that are blocked waiting for this lock and
661 * notify the FS that the lock has been cleared.
662 *
663 * Must be called with the i_lock held!
664 */
665static void locks_unlink_lock(struct file_lock **thisfl_p)
666{ 695{
667 struct file_lock *fl = *thisfl_p;
668
669 locks_delete_global_locks(fl); 696 locks_delete_global_locks(fl);
670 697 list_del_init(&fl->fl_list);
671 *thisfl_p = fl->fl_next; 698 --*counter;
672 fl->fl_next = NULL;
673
674 if (fl->fl_nspid) { 699 if (fl->fl_nspid) {
675 put_pid(fl->fl_nspid); 700 put_pid(fl->fl_nspid);
676 fl->fl_nspid = NULL; 701 fl->fl_nspid = NULL;
677 } 702 }
678
679 locks_wake_up_blocks(fl); 703 locks_wake_up_blocks(fl);
680} 704}
681 705
682/* 706static void
683 * Unlink a lock from all lists and free it. 707locks_delete_lock_ctx(struct file_lock *fl, int *counter,
684 * 708 struct list_head *dispose)
685 * Must be called with i_lock held!
686 */
687static void locks_delete_lock(struct file_lock **thisfl_p,
688 struct list_head *dispose)
689{ 709{
690 struct file_lock *fl = *thisfl_p; 710 locks_unlink_lock_ctx(fl, counter);
691
692 locks_unlink_lock(thisfl_p);
693 if (dispose) 711 if (dispose)
694 list_add(&fl->fl_block, dispose); 712 list_add(&fl->fl_list, dispose);
695 else 713 else
696 locks_free_lock(fl); 714 locks_free_lock(fl);
697} 715}
@@ -746,22 +764,27 @@ void
746posix_test_lock(struct file *filp, struct file_lock *fl) 764posix_test_lock(struct file *filp, struct file_lock *fl)
747{ 765{
748 struct file_lock *cfl; 766 struct file_lock *cfl;
767 struct file_lock_context *ctx;
749 struct inode *inode = file_inode(filp); 768 struct inode *inode = file_inode(filp);
750 769
751 spin_lock(&inode->i_lock); 770 ctx = inode->i_flctx;
752 for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) { 771 if (!ctx || list_empty_careful(&ctx->flc_posix)) {
753 if (!IS_POSIX(cfl))
754 continue;
755 if (posix_locks_conflict(fl, cfl))
756 break;
757 }
758 if (cfl) {
759 locks_copy_conflock(fl, cfl);
760 if (cfl->fl_nspid)
761 fl->fl_pid = pid_vnr(cfl->fl_nspid);
762 } else
763 fl->fl_type = F_UNLCK; 772 fl->fl_type = F_UNLCK;
764 spin_unlock(&inode->i_lock); 773 return;
774 }
775
776 spin_lock(&ctx->flc_lock);
777 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
778 if (posix_locks_conflict(fl, cfl)) {
779 locks_copy_conflock(fl, cfl);
780 if (cfl->fl_nspid)
781 fl->fl_pid = pid_vnr(cfl->fl_nspid);
782 goto out;
783 }
784 }
785 fl->fl_type = F_UNLCK;
786out:
787 spin_unlock(&ctx->flc_lock);
765 return; 788 return;
766} 789}
767EXPORT_SYMBOL(posix_test_lock); 790EXPORT_SYMBOL(posix_test_lock);
@@ -845,34 +868,34 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
845static int flock_lock_file(struct file *filp, struct file_lock *request) 868static int flock_lock_file(struct file *filp, struct file_lock *request)
846{ 869{
847 struct file_lock *new_fl = NULL; 870 struct file_lock *new_fl = NULL;
848 struct file_lock **before; 871 struct file_lock *fl;
849 struct inode * inode = file_inode(filp); 872 struct file_lock_context *ctx;
873 struct inode *inode = file_inode(filp);
850 int error = 0; 874 int error = 0;
851 int found = 0; 875 bool found = false;
852 LIST_HEAD(dispose); 876 LIST_HEAD(dispose);
853 877
878 ctx = locks_get_lock_context(inode);
879 if (!ctx)
880 return -ENOMEM;
881
854 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) { 882 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
855 new_fl = locks_alloc_lock(); 883 new_fl = locks_alloc_lock();
856 if (!new_fl) 884 if (!new_fl)
857 return -ENOMEM; 885 return -ENOMEM;
858 } 886 }
859 887
860 spin_lock(&inode->i_lock); 888 spin_lock(&ctx->flc_lock);
861 if (request->fl_flags & FL_ACCESS) 889 if (request->fl_flags & FL_ACCESS)
862 goto find_conflict; 890 goto find_conflict;
863 891
864 for_each_lock(inode, before) { 892 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
865 struct file_lock *fl = *before;
866 if (IS_POSIX(fl))
867 break;
868 if (IS_LEASE(fl))
869 continue;
870 if (filp != fl->fl_file) 893 if (filp != fl->fl_file)
871 continue; 894 continue;
872 if (request->fl_type == fl->fl_type) 895 if (request->fl_type == fl->fl_type)
873 goto out; 896 goto out;
874 found = 1; 897 found = true;
875 locks_delete_lock(before, &dispose); 898 locks_delete_lock_ctx(fl, &ctx->flc_flock_cnt, &dispose);
876 break; 899 break;
877 } 900 }
878 901
@@ -887,18 +910,13 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
887 * give it the opportunity to lock the file. 910 * give it the opportunity to lock the file.
888 */ 911 */
889 if (found) { 912 if (found) {
890 spin_unlock(&inode->i_lock); 913 spin_unlock(&ctx->flc_lock);
891 cond_resched(); 914 cond_resched();
892 spin_lock(&inode->i_lock); 915 spin_lock(&ctx->flc_lock);
893 } 916 }
894 917
895find_conflict: 918find_conflict:
896 for_each_lock(inode, before) { 919 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
897 struct file_lock *fl = *before;
898 if (IS_POSIX(fl))
899 break;
900 if (IS_LEASE(fl))
901 continue;
902 if (!flock_locks_conflict(request, fl)) 920 if (!flock_locks_conflict(request, fl))
903 continue; 921 continue;
904 error = -EAGAIN; 922 error = -EAGAIN;
@@ -911,12 +929,12 @@ find_conflict:
911 if (request->fl_flags & FL_ACCESS) 929 if (request->fl_flags & FL_ACCESS)
912 goto out; 930 goto out;
913 locks_copy_lock(new_fl, request); 931 locks_copy_lock(new_fl, request);
914 locks_insert_lock(before, new_fl); 932 locks_insert_lock_ctx(new_fl, &ctx->flc_flock_cnt, &ctx->flc_flock);
915 new_fl = NULL; 933 new_fl = NULL;
916 error = 0; 934 error = 0;
917 935
918out: 936out:
919 spin_unlock(&inode->i_lock); 937 spin_unlock(&ctx->flc_lock);
920 if (new_fl) 938 if (new_fl)
921 locks_free_lock(new_fl); 939 locks_free_lock(new_fl);
922 locks_dispose_list(&dispose); 940 locks_dispose_list(&dispose);
@@ -925,16 +943,20 @@ out:
925 943
926static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) 944static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
927{ 945{
928 struct file_lock *fl; 946 struct file_lock *fl, *tmp;
929 struct file_lock *new_fl = NULL; 947 struct file_lock *new_fl = NULL;
930 struct file_lock *new_fl2 = NULL; 948 struct file_lock *new_fl2 = NULL;
931 struct file_lock *left = NULL; 949 struct file_lock *left = NULL;
932 struct file_lock *right = NULL; 950 struct file_lock *right = NULL;
933 struct file_lock **before; 951 struct file_lock_context *ctx;
934 int error; 952 int error;
935 bool added = false; 953 bool added = false;
936 LIST_HEAD(dispose); 954 LIST_HEAD(dispose);
937 955
956 ctx = locks_get_lock_context(inode);
957 if (!ctx)
958 return -ENOMEM;
959
938 /* 960 /*
939 * We may need two file_lock structures for this operation, 961 * We may need two file_lock structures for this operation,
940 * so we get them in advance to avoid races. 962 * so we get them in advance to avoid races.
@@ -948,15 +970,14 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
948 new_fl2 = locks_alloc_lock(); 970 new_fl2 = locks_alloc_lock();
949 } 971 }
950 972
951 spin_lock(&inode->i_lock); 973 spin_lock(&ctx->flc_lock);
952 /* 974 /*
953 * New lock request. Walk all POSIX locks and look for conflicts. If 975 * New lock request. Walk all POSIX locks and look for conflicts. If
954 * there are any, either return error or put the request on the 976 * there are any, either return error or put the request on the
955 * blocker's list of waiters and the global blocked_hash. 977 * blocker's list of waiters and the global blocked_hash.
956 */ 978 */
957 if (request->fl_type != F_UNLCK) { 979 if (request->fl_type != F_UNLCK) {
958 for_each_lock(inode, before) { 980 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
959 fl = *before;
960 if (!IS_POSIX(fl)) 981 if (!IS_POSIX(fl))
961 continue; 982 continue;
962 if (!posix_locks_conflict(request, fl)) 983 if (!posix_locks_conflict(request, fl))
@@ -986,29 +1007,25 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
986 if (request->fl_flags & FL_ACCESS) 1007 if (request->fl_flags & FL_ACCESS)
987 goto out; 1008 goto out;
988 1009
989 /* 1010 /* Find the first old lock with the same owner as the new lock */
990 * Find the first old lock with the same owner as the new lock. 1011 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
991 */ 1012 if (posix_same_owner(request, fl))
992 1013 break;
993 before = &inode->i_flock;
994
995 /* First skip locks owned by other processes. */
996 while ((fl = *before) && (!IS_POSIX(fl) ||
997 !posix_same_owner(request, fl))) {
998 before = &fl->fl_next;
999 } 1014 }
1000 1015
1001 /* Process locks with this owner. */ 1016 /* Process locks with this owner. */
1002 while ((fl = *before) && posix_same_owner(request, fl)) { 1017 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1003 /* Detect adjacent or overlapping regions (if same lock type) 1018 if (!posix_same_owner(request, fl))
1004 */ 1019 break;
1020
1021 /* Detect adjacent or overlapping regions (if same lock type) */
1005 if (request->fl_type == fl->fl_type) { 1022 if (request->fl_type == fl->fl_type) {
1006 /* In all comparisons of start vs end, use 1023 /* In all comparisons of start vs end, use
1007 * "start - 1" rather than "end + 1". If end 1024 * "start - 1" rather than "end + 1". If end
1008 * is OFFSET_MAX, end + 1 will become negative. 1025 * is OFFSET_MAX, end + 1 will become negative.
1009 */ 1026 */
1010 if (fl->fl_end < request->fl_start - 1) 1027 if (fl->fl_end < request->fl_start - 1)
1011 goto next_lock; 1028 continue;
1012 /* If the next lock in the list has entirely bigger 1029 /* If the next lock in the list has entirely bigger
1013 * addresses than the new one, insert the lock here. 1030 * addresses than the new one, insert the lock here.
1014 */ 1031 */
@@ -1029,18 +1046,18 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
1029 else 1046 else
1030 request->fl_end = fl->fl_end; 1047 request->fl_end = fl->fl_end;
1031 if (added) { 1048 if (added) {
1032 locks_delete_lock(before, &dispose); 1049 locks_delete_lock_ctx(fl, &ctx->flc_posix_cnt,
1050 &dispose);
1033 continue; 1051 continue;
1034 } 1052 }
1035 request = fl; 1053 request = fl;
1036 added = true; 1054 added = true;
1037 } 1055 } else {
1038 else {
1039 /* Processing for different lock types is a bit 1056 /* Processing for different lock types is a bit
1040 * more complex. 1057 * more complex.
1041 */ 1058 */
1042 if (fl->fl_end < request->fl_start) 1059 if (fl->fl_end < request->fl_start)
1043 goto next_lock; 1060 continue;
1044 if (fl->fl_start > request->fl_end) 1061 if (fl->fl_start > request->fl_end)
1045 break; 1062 break;
1046 if (request->fl_type == F_UNLCK) 1063 if (request->fl_type == F_UNLCK)
@@ -1059,7 +1076,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
1059 * one (This may happen several times). 1076 * one (This may happen several times).
1060 */ 1077 */
1061 if (added) { 1078 if (added) {
1062 locks_delete_lock(before, &dispose); 1079 locks_delete_lock_ctx(fl,
1080 &ctx->flc_posix_cnt, &dispose);
1063 continue; 1081 continue;
1064 } 1082 }
1065 /* 1083 /*
@@ -1075,15 +1093,13 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
1075 locks_copy_lock(new_fl, request); 1093 locks_copy_lock(new_fl, request);
1076 request = new_fl; 1094 request = new_fl;
1077 new_fl = NULL; 1095 new_fl = NULL;
1078 locks_delete_lock(before, &dispose); 1096 locks_insert_lock_ctx(request,
1079 locks_insert_lock(before, request); 1097 &ctx->flc_posix_cnt, &fl->fl_list);
1098 locks_delete_lock_ctx(fl,
1099 &ctx->flc_posix_cnt, &dispose);
1080 added = true; 1100 added = true;
1081 } 1101 }
1082 } 1102 }
1083 /* Go on to next lock.
1084 */
1085 next_lock:
1086 before = &fl->fl_next;
1087 } 1103 }
1088 1104
1089 /* 1105 /*
@@ -1108,7 +1124,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
1108 goto out; 1124 goto out;
1109 } 1125 }
1110 locks_copy_lock(new_fl, request); 1126 locks_copy_lock(new_fl, request);
1111 locks_insert_lock(before, new_fl); 1127 locks_insert_lock_ctx(new_fl, &ctx->flc_posix_cnt,
1128 &fl->fl_list);
1112 new_fl = NULL; 1129 new_fl = NULL;
1113 } 1130 }
1114 if (right) { 1131 if (right) {
@@ -1119,7 +1136,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
1119 left = new_fl2; 1136 left = new_fl2;
1120 new_fl2 = NULL; 1137 new_fl2 = NULL;
1121 locks_copy_lock(left, right); 1138 locks_copy_lock(left, right);
1122 locks_insert_lock(before, left); 1139 locks_insert_lock_ctx(left, &ctx->flc_posix_cnt,
1140 &fl->fl_list);
1123 } 1141 }
1124 right->fl_start = request->fl_end + 1; 1142 right->fl_start = request->fl_end + 1;
1125 locks_wake_up_blocks(right); 1143 locks_wake_up_blocks(right);
@@ -1129,7 +1147,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
1129 locks_wake_up_blocks(left); 1147 locks_wake_up_blocks(left);
1130 } 1148 }
1131 out: 1149 out:
1132 spin_unlock(&inode->i_lock); 1150 spin_unlock(&ctx->flc_lock);
1133 /* 1151 /*
1134 * Free any unused locks. 1152 * Free any unused locks.
1135 */ 1153 */
@@ -1199,22 +1217,29 @@ EXPORT_SYMBOL(posix_lock_file_wait);
1199 */ 1217 */
1200int locks_mandatory_locked(struct file *file) 1218int locks_mandatory_locked(struct file *file)
1201{ 1219{
1220 int ret;
1202 struct inode *inode = file_inode(file); 1221 struct inode *inode = file_inode(file);
1222 struct file_lock_context *ctx;
1203 struct file_lock *fl; 1223 struct file_lock *fl;
1204 1224
1225 ctx = inode->i_flctx;
1226 if (!ctx || list_empty_careful(&ctx->flc_posix))
1227 return 0;
1228
1205 /* 1229 /*
1206 * Search the lock list for this inode for any POSIX locks. 1230 * Search the lock list for this inode for any POSIX locks.
1207 */ 1231 */
1208 spin_lock(&inode->i_lock); 1232 spin_lock(&ctx->flc_lock);
1209 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 1233 ret = 0;
1210 if (!IS_POSIX(fl)) 1234 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1211 continue;
1212 if (fl->fl_owner != current->files && 1235 if (fl->fl_owner != current->files &&
1213 fl->fl_owner != file) 1236 fl->fl_owner != file) {
1237 ret = -EAGAIN;
1214 break; 1238 break;
1239 }
1215 } 1240 }
1216 spin_unlock(&inode->i_lock); 1241 spin_unlock(&ctx->flc_lock);
1217 return fl ? -EAGAIN : 0; 1242 return ret;
1218} 1243}
1219 1244
1220/** 1245/**
@@ -1294,9 +1319,9 @@ static void lease_clear_pending(struct file_lock *fl, int arg)
1294} 1319}
1295 1320
1296/* We already had a lease on this file; just change its type */ 1321/* We already had a lease on this file; just change its type */
1297int lease_modify(struct file_lock **before, int arg, struct list_head *dispose) 1322int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1298{ 1323{
1299 struct file_lock *fl = *before; 1324 struct file_lock_context *flctx;
1300 int error = assign_type(fl, arg); 1325 int error = assign_type(fl, arg);
1301 1326
1302 if (error) 1327 if (error)
@@ -1306,6 +1331,7 @@ int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
1306 if (arg == F_UNLCK) { 1331 if (arg == F_UNLCK) {
1307 struct file *filp = fl->fl_file; 1332 struct file *filp = fl->fl_file;
1308 1333
1334 flctx = file_inode(filp)->i_flctx;
1309 f_delown(filp); 1335 f_delown(filp);
1310 filp->f_owner.signum = 0; 1336 filp->f_owner.signum = 0;
1311 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); 1337 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
@@ -1313,7 +1339,7 @@ int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
1313 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); 1339 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1314 fl->fl_fasync = NULL; 1340 fl->fl_fasync = NULL;
1315 } 1341 }
1316 locks_delete_lock(before, dispose); 1342 locks_delete_lock_ctx(fl, &flctx->flc_lease_cnt, dispose);
1317 } 1343 }
1318 return 0; 1344 return 0;
1319} 1345}
@@ -1329,20 +1355,17 @@ static bool past_time(unsigned long then)
1329 1355
1330static void time_out_leases(struct inode *inode, struct list_head *dispose) 1356static void time_out_leases(struct inode *inode, struct list_head *dispose)
1331{ 1357{
1332 struct file_lock **before; 1358 struct file_lock_context *ctx = inode->i_flctx;
1333 struct file_lock *fl; 1359 struct file_lock *fl, *tmp;
1334 1360
1335 lockdep_assert_held(&inode->i_lock); 1361 lockdep_assert_held(&ctx->flc_lock);
1336 1362
1337 before = &inode->i_flock; 1363 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1338 while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
1339 trace_time_out_leases(inode, fl); 1364 trace_time_out_leases(inode, fl);
1340 if (past_time(fl->fl_downgrade_time)) 1365 if (past_time(fl->fl_downgrade_time))
1341 lease_modify(before, F_RDLCK, dispose); 1366 lease_modify(fl, F_RDLCK, dispose);
1342 if (past_time(fl->fl_break_time)) 1367 if (past_time(fl->fl_break_time))
1343 lease_modify(before, F_UNLCK, dispose); 1368 lease_modify(fl, F_UNLCK, dispose);
1344 if (fl == *before) /* lease_modify may have freed fl */
1345 before = &fl->fl_next;
1346 } 1369 }
1347} 1370}
1348 1371
@@ -1356,11 +1379,12 @@ static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1356static bool 1379static bool
1357any_leases_conflict(struct inode *inode, struct file_lock *breaker) 1380any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1358{ 1381{
1382 struct file_lock_context *ctx = inode->i_flctx;
1359 struct file_lock *fl; 1383 struct file_lock *fl;
1360 1384
1361 lockdep_assert_held(&inode->i_lock); 1385 lockdep_assert_held(&ctx->flc_lock);
1362 1386
1363 for (fl = inode->i_flock ; fl && IS_LEASE(fl); fl = fl->fl_next) { 1387 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1364 if (leases_conflict(fl, breaker)) 1388 if (leases_conflict(fl, breaker))
1365 return true; 1389 return true;
1366 } 1390 }
@@ -1384,7 +1408,8 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1384{ 1408{
1385 int error = 0; 1409 int error = 0;
1386 struct file_lock *new_fl; 1410 struct file_lock *new_fl;
1387 struct file_lock *fl, **before; 1411 struct file_lock_context *ctx = inode->i_flctx;
1412 struct file_lock *fl;
1388 unsigned long break_time; 1413 unsigned long break_time;
1389 int want_write = (mode & O_ACCMODE) != O_RDONLY; 1414 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1390 LIST_HEAD(dispose); 1415 LIST_HEAD(dispose);
@@ -1394,7 +1419,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1394 return PTR_ERR(new_fl); 1419 return PTR_ERR(new_fl);
1395 new_fl->fl_flags = type; 1420 new_fl->fl_flags = type;
1396 1421
1397 spin_lock(&inode->i_lock); 1422 /* typically we will check that ctx is non-NULL before calling */
1423 if (!ctx) {
1424 WARN_ON_ONCE(1);
1425 return error;
1426 }
1427
1428 spin_lock(&ctx->flc_lock);
1398 1429
1399 time_out_leases(inode, &dispose); 1430 time_out_leases(inode, &dispose);
1400 1431
@@ -1408,9 +1439,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1408 break_time++; /* so that 0 means no break time */ 1439 break_time++; /* so that 0 means no break time */
1409 } 1440 }
1410 1441
1411 for (before = &inode->i_flock; 1442 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1412 ((fl = *before) != NULL) && IS_LEASE(fl);
1413 before = &fl->fl_next) {
1414 if (!leases_conflict(fl, new_fl)) 1443 if (!leases_conflict(fl, new_fl))
1415 continue; 1444 continue;
1416 if (want_write) { 1445 if (want_write) {
@@ -1419,17 +1448,17 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1419 fl->fl_flags |= FL_UNLOCK_PENDING; 1448 fl->fl_flags |= FL_UNLOCK_PENDING;
1420 fl->fl_break_time = break_time; 1449 fl->fl_break_time = break_time;
1421 } else { 1450 } else {
1422 if (lease_breaking(inode->i_flock)) 1451 if (lease_breaking(fl))
1423 continue; 1452 continue;
1424 fl->fl_flags |= FL_DOWNGRADE_PENDING; 1453 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1425 fl->fl_downgrade_time = break_time; 1454 fl->fl_downgrade_time = break_time;
1426 } 1455 }
1427 if (fl->fl_lmops->lm_break(fl)) 1456 if (fl->fl_lmops->lm_break(fl))
1428 locks_delete_lock(before, &dispose); 1457 locks_delete_lock_ctx(fl, &ctx->flc_lease_cnt,
1458 &dispose);
1429 } 1459 }
1430 1460
1431 fl = inode->i_flock; 1461 if (list_empty(&ctx->flc_lease))
1432 if (!fl || !IS_LEASE(fl))
1433 goto out; 1462 goto out;
1434 1463
1435 if (mode & O_NONBLOCK) { 1464 if (mode & O_NONBLOCK) {
@@ -1439,18 +1468,19 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1439 } 1468 }
1440 1469
1441restart: 1470restart:
1442 break_time = inode->i_flock->fl_break_time; 1471 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1472 break_time = fl->fl_break_time;
1443 if (break_time != 0) 1473 if (break_time != 0)
1444 break_time -= jiffies; 1474 break_time -= jiffies;
1445 if (break_time == 0) 1475 if (break_time == 0)
1446 break_time++; 1476 break_time++;
1447 locks_insert_block(inode->i_flock, new_fl); 1477 locks_insert_block(fl, new_fl);
1448 trace_break_lease_block(inode, new_fl); 1478 trace_break_lease_block(inode, new_fl);
1449 spin_unlock(&inode->i_lock); 1479 spin_unlock(&ctx->flc_lock);
1450 locks_dispose_list(&dispose); 1480 locks_dispose_list(&dispose);
1451 error = wait_event_interruptible_timeout(new_fl->fl_wait, 1481 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1452 !new_fl->fl_next, break_time); 1482 !new_fl->fl_next, break_time);
1453 spin_lock(&inode->i_lock); 1483 spin_lock(&ctx->flc_lock);
1454 trace_break_lease_unblock(inode, new_fl); 1484 trace_break_lease_unblock(inode, new_fl);
1455 locks_delete_block(new_fl); 1485 locks_delete_block(new_fl);
1456 if (error >= 0) { 1486 if (error >= 0) {
@@ -1462,12 +1492,10 @@ restart:
1462 time_out_leases(inode, &dispose); 1492 time_out_leases(inode, &dispose);
1463 if (any_leases_conflict(inode, new_fl)) 1493 if (any_leases_conflict(inode, new_fl))
1464 goto restart; 1494 goto restart;
1465
1466 error = 0; 1495 error = 0;
1467 } 1496 }
1468
1469out: 1497out:
1470 spin_unlock(&inode->i_lock); 1498 spin_unlock(&ctx->flc_lock);
1471 locks_dispose_list(&dispose); 1499 locks_dispose_list(&dispose);
1472 locks_free_lock(new_fl); 1500 locks_free_lock(new_fl);
1473 return error; 1501 return error;
@@ -1487,14 +1515,18 @@ EXPORT_SYMBOL(__break_lease);
1487void lease_get_mtime(struct inode *inode, struct timespec *time) 1515void lease_get_mtime(struct inode *inode, struct timespec *time)
1488{ 1516{
1489 bool has_lease = false; 1517 bool has_lease = false;
1490 struct file_lock *flock; 1518 struct file_lock_context *ctx = inode->i_flctx;
1519 struct file_lock *fl;
1491 1520
1492 if (inode->i_flock) { 1521 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1493 spin_lock(&inode->i_lock); 1522 spin_lock(&ctx->flc_lock);
1494 flock = inode->i_flock; 1523 if (!list_empty(&ctx->flc_lease)) {
1495 if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK)) 1524 fl = list_first_entry(&ctx->flc_lease,
1496 has_lease = true; 1525 struct file_lock, fl_list);
1497 spin_unlock(&inode->i_lock); 1526 if (fl->fl_type == F_WRLCK)
1527 has_lease = true;
1528 }
1529 spin_unlock(&ctx->flc_lock);
1498 } 1530 }
1499 1531
1500 if (has_lease) 1532 if (has_lease)
@@ -1532,20 +1564,22 @@ int fcntl_getlease(struct file *filp)
1532{ 1564{
1533 struct file_lock *fl; 1565 struct file_lock *fl;
1534 struct inode *inode = file_inode(filp); 1566 struct inode *inode = file_inode(filp);
1567 struct file_lock_context *ctx = inode->i_flctx;
1535 int type = F_UNLCK; 1568 int type = F_UNLCK;
1536 LIST_HEAD(dispose); 1569 LIST_HEAD(dispose);
1537 1570
1538 spin_lock(&inode->i_lock); 1571 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1539 time_out_leases(file_inode(filp), &dispose); 1572 spin_lock(&ctx->flc_lock);
1540 for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl); 1573 time_out_leases(file_inode(filp), &dispose);
1541 fl = fl->fl_next) { 1574 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1542 if (fl->fl_file == filp) { 1575 if (fl->fl_file != filp)
1576 continue;
1543 type = target_leasetype(fl); 1577 type = target_leasetype(fl);
1544 break; 1578 break;
1545 } 1579 }
1580 spin_unlock(&ctx->flc_lock);
1581 locks_dispose_list(&dispose);
1546 } 1582 }
1547 spin_unlock(&inode->i_lock);
1548 locks_dispose_list(&dispose);
1549 return type; 1583 return type;
1550} 1584}
1551 1585
@@ -1578,9 +1612,10 @@ check_conflicting_open(const struct dentry *dentry, const long arg)
1578static int 1612static int
1579generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv) 1613generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1580{ 1614{
1581 struct file_lock *fl, **before, **my_before = NULL, *lease; 1615 struct file_lock *fl, *my_fl = NULL, *lease;
1582 struct dentry *dentry = filp->f_path.dentry; 1616 struct dentry *dentry = filp->f_path.dentry;
1583 struct inode *inode = dentry->d_inode; 1617 struct inode *inode = dentry->d_inode;
1618 struct file_lock_context *ctx;
1584 bool is_deleg = (*flp)->fl_flags & FL_DELEG; 1619 bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1585 int error; 1620 int error;
1586 LIST_HEAD(dispose); 1621 LIST_HEAD(dispose);
@@ -1588,6 +1623,10 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
1588 lease = *flp; 1623 lease = *flp;
1589 trace_generic_add_lease(inode, lease); 1624 trace_generic_add_lease(inode, lease);
1590 1625
1626 ctx = locks_get_lock_context(inode);
1627 if (!ctx)
1628 return -ENOMEM;
1629
1591 /* 1630 /*
1592 * In the delegation case we need mutual exclusion with 1631 * In the delegation case we need mutual exclusion with
1593 * a number of operations that take the i_mutex. We trylock 1632 * a number of operations that take the i_mutex. We trylock
@@ -1606,7 +1645,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
1606 return -EINVAL; 1645 return -EINVAL;
1607 } 1646 }
1608 1647
1609 spin_lock(&inode->i_lock); 1648 spin_lock(&ctx->flc_lock);
1610 time_out_leases(inode, &dispose); 1649 time_out_leases(inode, &dispose);
1611 error = check_conflicting_open(dentry, arg); 1650 error = check_conflicting_open(dentry, arg);
1612 if (error) 1651 if (error)
@@ -1621,13 +1660,12 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
1621 * except for this filp. 1660 * except for this filp.
1622 */ 1661 */
1623 error = -EAGAIN; 1662 error = -EAGAIN;
1624 for (before = &inode->i_flock; 1663 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1625 ((fl = *before) != NULL) && IS_LEASE(fl);
1626 before = &fl->fl_next) {
1627 if (fl->fl_file == filp) { 1664 if (fl->fl_file == filp) {
1628 my_before = before; 1665 my_fl = fl;
1629 continue; 1666 continue;
1630 } 1667 }
1668
1631 /* 1669 /*
1632 * No exclusive leases if someone else has a lease on 1670 * No exclusive leases if someone else has a lease on
1633 * this file: 1671 * this file:
@@ -1642,9 +1680,8 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
1642 goto out; 1680 goto out;
1643 } 1681 }
1644 1682
1645 if (my_before != NULL) { 1683 if (my_fl != NULL) {
1646 lease = *my_before; 1684 error = lease->fl_lmops->lm_change(my_fl, arg, &dispose);
1647 error = lease->fl_lmops->lm_change(my_before, arg, &dispose);
1648 if (error) 1685 if (error)
1649 goto out; 1686 goto out;
1650 goto out_setup; 1687 goto out_setup;
@@ -1654,7 +1691,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
1654 if (!leases_enable) 1691 if (!leases_enable)
1655 goto out; 1692 goto out;
1656 1693
1657 locks_insert_lock(before, lease); 1694 locks_insert_lock_ctx(lease, &ctx->flc_lease_cnt, &ctx->flc_lease);
1658 /* 1695 /*
1659 * The check in break_lease() is lockless. It's possible for another 1696 * The check in break_lease() is lockless. It's possible for another
1660 * open to race in after we did the earlier check for a conflicting 1697 * open to race in after we did the earlier check for a conflicting
@@ -1666,45 +1703,49 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
1666 */ 1703 */
1667 smp_mb(); 1704 smp_mb();
1668 error = check_conflicting_open(dentry, arg); 1705 error = check_conflicting_open(dentry, arg);
1669 if (error) 1706 if (error) {
1670 goto out_unlink; 1707 locks_unlink_lock_ctx(lease, &ctx->flc_lease_cnt);
1708 goto out;
1709 }
1671 1710
1672out_setup: 1711out_setup:
1673 if (lease->fl_lmops->lm_setup) 1712 if (lease->fl_lmops->lm_setup)
1674 lease->fl_lmops->lm_setup(lease, priv); 1713 lease->fl_lmops->lm_setup(lease, priv);
1675out: 1714out:
1676 spin_unlock(&inode->i_lock); 1715 spin_unlock(&ctx->flc_lock);
1677 locks_dispose_list(&dispose); 1716 locks_dispose_list(&dispose);
1678 if (is_deleg) 1717 if (is_deleg)
1679 mutex_unlock(&inode->i_mutex); 1718 mutex_unlock(&inode->i_mutex);
1680 if (!error && !my_before) 1719 if (!error && !my_fl)
1681 *flp = NULL; 1720 *flp = NULL;
1682 return error; 1721 return error;
1683out_unlink:
1684 locks_unlink_lock(before);
1685 goto out;
1686} 1722}
1687 1723
1688static int generic_delete_lease(struct file *filp) 1724static int generic_delete_lease(struct file *filp)
1689{ 1725{
1690 int error = -EAGAIN; 1726 int error = -EAGAIN;
1691 struct file_lock *fl, **before; 1727 struct file_lock *fl, *victim = NULL;
1692 struct dentry *dentry = filp->f_path.dentry; 1728 struct dentry *dentry = filp->f_path.dentry;
1693 struct inode *inode = dentry->d_inode; 1729 struct inode *inode = dentry->d_inode;
1730 struct file_lock_context *ctx = inode->i_flctx;
1694 LIST_HEAD(dispose); 1731 LIST_HEAD(dispose);
1695 1732
1696 spin_lock(&inode->i_lock); 1733 if (!ctx) {
1697 time_out_leases(inode, &dispose); 1734 trace_generic_delete_lease(inode, NULL);
1698 for (before = &inode->i_flock; 1735 return error;
1699 ((fl = *before) != NULL) && IS_LEASE(fl); 1736 }
1700 before = &fl->fl_next) { 1737
1701 if (fl->fl_file == filp) 1738 spin_lock(&ctx->flc_lock);
1739 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1740 if (fl->fl_file == filp) {
1741 victim = fl;
1702 break; 1742 break;
1743 }
1703 } 1744 }
1704 trace_generic_delete_lease(inode, fl); 1745 trace_generic_delete_lease(inode, fl);
1705 if (fl && IS_LEASE(fl)) 1746 if (victim)
1706 error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose); 1747 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1707 spin_unlock(&inode->i_lock); 1748 spin_unlock(&ctx->flc_lock);
1708 locks_dispose_list(&dispose); 1749 locks_dispose_list(&dispose);
1709 return error; 1750 return error;
1710} 1751}
@@ -2171,7 +2212,7 @@ again:
2171 */ 2212 */
2172 /* 2213 /*
2173 * we need that spin_lock here - it prevents reordering between 2214 * we need that spin_lock here - it prevents reordering between
2174 * update of inode->i_flock and check for it done in close(). 2215 * update of i_flctx->flc_posix and check for it done in close().
2175 * rcu_read_lock() wouldn't do. 2216 * rcu_read_lock() wouldn't do.
2176 */ 2217 */
2177 spin_lock(&current->files->file_lock); 2218 spin_lock(&current->files->file_lock);
@@ -2331,13 +2372,14 @@ out:
2331void locks_remove_posix(struct file *filp, fl_owner_t owner) 2372void locks_remove_posix(struct file *filp, fl_owner_t owner)
2332{ 2373{
2333 struct file_lock lock; 2374 struct file_lock lock;
2375 struct file_lock_context *ctx = file_inode(filp)->i_flctx;
2334 2376
2335 /* 2377 /*
2336 * If there are no locks held on this file, we don't need to call 2378 * If there are no locks held on this file, we don't need to call
2337 * posix_lock_file(). Another process could be setting a lock on this 2379 * posix_lock_file(). Another process could be setting a lock on this
2338 * file at the same time, but we wouldn't remove that lock anyway. 2380 * file at the same time, but we wouldn't remove that lock anyway.
2339 */ 2381 */
2340 if (!file_inode(filp)->i_flock) 2382 if (!ctx || list_empty(&ctx->flc_posix))
2341 return; 2383 return;
2342 2384
2343 lock.fl_type = F_UNLCK; 2385 lock.fl_type = F_UNLCK;
@@ -2358,67 +2400,67 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
2358 2400
2359EXPORT_SYMBOL(locks_remove_posix); 2401EXPORT_SYMBOL(locks_remove_posix);
2360 2402
2403/* The i_flctx must be valid when calling into here */
2404static void
2405locks_remove_flock(struct file *filp)
2406{
2407 struct file_lock fl = {
2408 .fl_owner = filp,
2409 .fl_pid = current->tgid,
2410 .fl_file = filp,
2411 .fl_flags = FL_FLOCK,
2412 .fl_type = F_UNLCK,
2413 .fl_end = OFFSET_MAX,
2414 };
2415 struct file_lock_context *flctx = file_inode(filp)->i_flctx;
2416
2417 if (list_empty(&flctx->flc_flock))
2418 return;
2419
2420 if (filp->f_op->flock)
2421 filp->f_op->flock(filp, F_SETLKW, &fl);
2422 else
2423 flock_lock_file(filp, &fl);
2424
2425 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2426 fl.fl_ops->fl_release_private(&fl);
2427}
2428
2429/* The i_flctx must be valid when calling into here */
2430static void
2431locks_remove_lease(struct file *filp)
2432{
2433 struct inode *inode = file_inode(filp);
2434 struct file_lock_context *ctx = inode->i_flctx;
2435 struct file_lock *fl, *tmp;
2436 LIST_HEAD(dispose);
2437
2438 if (list_empty(&ctx->flc_lease))
2439 return;
2440
2441 spin_lock(&ctx->flc_lock);
2442 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2443 lease_modify(fl, F_UNLCK, &dispose);
2444 spin_unlock(&ctx->flc_lock);
2445 locks_dispose_list(&dispose);
2446}
2447
2361/* 2448/*
2362 * This function is called on the last close of an open file. 2449 * This function is called on the last close of an open file.
2363 */ 2450 */
2364void locks_remove_file(struct file *filp) 2451void locks_remove_file(struct file *filp)
2365{ 2452{
2366 struct inode * inode = file_inode(filp); 2453 if (!file_inode(filp)->i_flctx)
2367 struct file_lock *fl;
2368 struct file_lock **before;
2369 LIST_HEAD(dispose);
2370
2371 if (!inode->i_flock)
2372 return; 2454 return;
2373 2455
2456 /* remove any OFD locks */
2374 locks_remove_posix(filp, filp); 2457 locks_remove_posix(filp, filp);
2375 2458
2376 if (filp->f_op->flock) { 2459 /* remove flock locks */
2377 struct file_lock fl = { 2460 locks_remove_flock(filp);
2378 .fl_owner = filp,
2379 .fl_pid = current->tgid,
2380 .fl_file = filp,
2381 .fl_flags = FL_FLOCK,
2382 .fl_type = F_UNLCK,
2383 .fl_end = OFFSET_MAX,
2384 };
2385 filp->f_op->flock(filp, F_SETLKW, &fl);
2386 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2387 fl.fl_ops->fl_release_private(&fl);
2388 }
2389
2390 spin_lock(&inode->i_lock);
2391 before = &inode->i_flock;
2392
2393 while ((fl = *before) != NULL) {
2394 if (fl->fl_file == filp) {
2395 if (IS_LEASE(fl)) {
2396 lease_modify(before, F_UNLCK, &dispose);
2397 continue;
2398 }
2399
2400 /*
2401 * There's a leftover lock on the list of a type that
2402 * we didn't expect to see. Most likely a classic
2403 * POSIX lock that ended up not getting released
2404 * properly, or that raced onto the list somehow. Log
2405 * some info about it and then just remove it from
2406 * the list.
2407 */
2408 WARN(!IS_FLOCK(fl),
2409 "leftover lock: dev=%u:%u ino=%lu type=%hhd flags=0x%x start=%lld end=%lld\n",
2410 MAJOR(inode->i_sb->s_dev),
2411 MINOR(inode->i_sb->s_dev), inode->i_ino,
2412 fl->fl_type, fl->fl_flags,
2413 fl->fl_start, fl->fl_end);
2414 2461
2415 locks_delete_lock(before, &dispose); 2462 /* remove any leases */
2416 continue; 2463 locks_remove_lease(filp);
2417 }
2418 before = &fl->fl_next;
2419 }
2420 spin_unlock(&inode->i_lock);
2421 locks_dispose_list(&dispose);
2422} 2464}
2423 2465
2424/** 2466/**
@@ -2621,6 +2663,9 @@ static int __init filelock_init(void)
2621{ 2663{
2622 int i; 2664 int i;
2623 2665
2666 flctx_cache = kmem_cache_create("file_lock_ctx",
2667 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2668
2624 filelock_cache = kmem_cache_create("file_lock_cache", 2669 filelock_cache = kmem_cache_create("file_lock_cache",
2625 sizeof(struct file_lock), 0, SLAB_PANIC, NULL); 2670 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2626 2671
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 7f3f60641344..8cdb2b28a104 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -85,25 +85,30 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
85{ 85{
86 struct inode *inode = state->inode; 86 struct inode *inode = state->inode;
87 struct file_lock *fl; 87 struct file_lock *fl;
88 struct file_lock_context *flctx = inode->i_flctx;
89 struct list_head *list;
88 int status = 0; 90 int status = 0;
89 91
90 if (inode->i_flock == NULL) 92 if (flctx == NULL)
91 goto out; 93 goto out;
92 94
93 /* Protect inode->i_flock using the i_lock */ 95 list = &flctx->flc_posix;
94 spin_lock(&inode->i_lock); 96 spin_lock(&flctx->flc_lock);
95 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 97restart:
96 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) 98 list_for_each_entry(fl, list, fl_list) {
97 continue;
98 if (nfs_file_open_context(fl->fl_file) != ctx) 99 if (nfs_file_open_context(fl->fl_file) != ctx)
99 continue; 100 continue;
100 spin_unlock(&inode->i_lock); 101 spin_unlock(&flctx->flc_lock);
101 status = nfs4_lock_delegation_recall(fl, state, stateid); 102 status = nfs4_lock_delegation_recall(fl, state, stateid);
102 if (status < 0) 103 if (status < 0)
103 goto out; 104 goto out;
104 spin_lock(&inode->i_lock); 105 spin_lock(&flctx->flc_lock);
105 } 106 }
106 spin_unlock(&inode->i_lock); 107 if (list == &flctx->flc_posix) {
108 list = &flctx->flc_flock;
109 goto restart;
110 }
111 spin_unlock(&flctx->flc_lock);
107out: 112out:
108 return status; 113 return status;
109} 114}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 5194933ed419..a3bb22ab68c5 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1366,49 +1366,55 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
1366 struct nfs_inode *nfsi = NFS_I(inode); 1366 struct nfs_inode *nfsi = NFS_I(inode);
1367 struct file_lock *fl; 1367 struct file_lock *fl;
1368 int status = 0; 1368 int status = 0;
1369 struct file_lock_context *flctx = inode->i_flctx;
1370 struct list_head *list;
1369 1371
1370 if (inode->i_flock == NULL) 1372 if (flctx == NULL)
1371 return 0; 1373 return 0;
1372 1374
1375 list = &flctx->flc_posix;
1376
1373 /* Guard against delegation returns and new lock/unlock calls */ 1377 /* Guard against delegation returns and new lock/unlock calls */
1374 down_write(&nfsi->rwsem); 1378 down_write(&nfsi->rwsem);
1375 /* Protect inode->i_flock using the BKL */ 1379 spin_lock(&flctx->flc_lock);
1376 spin_lock(&inode->i_lock); 1380restart:
1377 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 1381 list_for_each_entry(fl, list, fl_list) {
1378 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
1379 continue;
1380 if (nfs_file_open_context(fl->fl_file)->state != state) 1382 if (nfs_file_open_context(fl->fl_file)->state != state)
1381 continue; 1383 continue;
1382 spin_unlock(&inode->i_lock); 1384 spin_unlock(&flctx->flc_lock);
1383 status = ops->recover_lock(state, fl); 1385 status = ops->recover_lock(state, fl);
1384 switch (status) { 1386 switch (status) {
1385 case 0: 1387 case 0:
1386 break; 1388 break;
1387 case -ESTALE: 1389 case -ESTALE:
1388 case -NFS4ERR_ADMIN_REVOKED: 1390 case -NFS4ERR_ADMIN_REVOKED:
1389 case -NFS4ERR_STALE_STATEID: 1391 case -NFS4ERR_STALE_STATEID:
1390 case -NFS4ERR_BAD_STATEID: 1392 case -NFS4ERR_BAD_STATEID:
1391 case -NFS4ERR_EXPIRED: 1393 case -NFS4ERR_EXPIRED:
1392 case -NFS4ERR_NO_GRACE: 1394 case -NFS4ERR_NO_GRACE:
1393 case -NFS4ERR_STALE_CLIENTID: 1395 case -NFS4ERR_STALE_CLIENTID:
1394 case -NFS4ERR_BADSESSION: 1396 case -NFS4ERR_BADSESSION:
1395 case -NFS4ERR_BADSLOT: 1397 case -NFS4ERR_BADSLOT:
1396 case -NFS4ERR_BAD_HIGH_SLOT: 1398 case -NFS4ERR_BAD_HIGH_SLOT:
1397 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1399 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1398 goto out; 1400 goto out;
1399 default: 1401 default:
1400 printk(KERN_ERR "NFS: %s: unhandled error %d\n", 1402 pr_err("NFS: %s: unhandled error %d\n",
1401 __func__, status); 1403 __func__, status);
1402 case -ENOMEM: 1404 case -ENOMEM:
1403 case -NFS4ERR_DENIED: 1405 case -NFS4ERR_DENIED:
1404 case -NFS4ERR_RECLAIM_BAD: 1406 case -NFS4ERR_RECLAIM_BAD:
1405 case -NFS4ERR_RECLAIM_CONFLICT: 1407 case -NFS4ERR_RECLAIM_CONFLICT:
1406 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1408 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1407 status = 0; 1409 status = 0;
1408 } 1410 }
1409 spin_lock(&inode->i_lock); 1411 spin_lock(&flctx->flc_lock);
1410 } 1412 }
1411 spin_unlock(&inode->i_lock); 1413 if (list == &flctx->flc_posix) {
1414 list = &flctx->flc_flock;
1415 goto restart;
1416 }
1417 spin_unlock(&flctx->flc_lock);
1412out: 1418out:
1413 up_write(&nfsi->rwsem); 1419 up_write(&nfsi->rwsem);
1414 return status; 1420 return status;
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 2b5e769beb16..29c7f33c9cf1 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -826,11 +826,15 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
826 struct nfs_pageio_descriptor *pgio) 826 struct nfs_pageio_descriptor *pgio)
827{ 827{
828 size_t size; 828 size_t size;
829 struct file_lock_context *flctx;
829 830
830 if (prev) { 831 if (prev) {
831 if (!nfs_match_open_context(req->wb_context, prev->wb_context)) 832 if (!nfs_match_open_context(req->wb_context, prev->wb_context))
832 return false; 833 return false;
833 if (req->wb_context->dentry->d_inode->i_flock != NULL && 834 flctx = req->wb_context->dentry->d_inode->i_flctx;
835 if (flctx != NULL &&
836 !(list_empty_careful(&flctx->flc_posix) &&
837 list_empty_careful(&flctx->flc_flock)) &&
834 !nfs_match_lock_context(req->wb_lock_context, 838 !nfs_match_lock_context(req->wb_lock_context,
835 prev->wb_lock_context)) 839 prev->wb_lock_context))
836 return false; 840 return false;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index af3af685a9e3..4ae66f416eb9 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1091,6 +1091,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
1091{ 1091{
1092 struct nfs_open_context *ctx = nfs_file_open_context(file); 1092 struct nfs_open_context *ctx = nfs_file_open_context(file);
1093 struct nfs_lock_context *l_ctx; 1093 struct nfs_lock_context *l_ctx;
1094 struct file_lock_context *flctx = file_inode(file)->i_flctx;
1094 struct nfs_page *req; 1095 struct nfs_page *req;
1095 int do_flush, status; 1096 int do_flush, status;
1096 /* 1097 /*
@@ -1109,7 +1110,9 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
1109 do_flush = req->wb_page != page || req->wb_context != ctx; 1110 do_flush = req->wb_page != page || req->wb_context != ctx;
1110 /* for now, flush if more than 1 request in page_group */ 1111 /* for now, flush if more than 1 request in page_group */
1111 do_flush |= req->wb_this_page != req; 1112 do_flush |= req->wb_this_page != req;
1112 if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) { 1113 if (l_ctx && flctx &&
1114 !(list_empty_careful(&flctx->flc_posix) &&
1115 list_empty_careful(&flctx->flc_flock))) {
1113 do_flush |= l_ctx->lockowner.l_owner != current->files 1116 do_flush |= l_ctx->lockowner.l_owner != current->files
1114 || l_ctx->lockowner.l_pid != current->tgid; 1117 || l_ctx->lockowner.l_pid != current->tgid;
1115 } 1118 }
@@ -1170,6 +1173,13 @@ out:
1170 return PageUptodate(page) != 0; 1173 return PageUptodate(page) != 0;
1171} 1174}
1172 1175
1176static bool
1177is_whole_file_wrlock(struct file_lock *fl)
1178{
1179 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
1180 fl->fl_type == F_WRLCK;
1181}
1182
1173/* If we know the page is up to date, and we're not using byte range locks (or 1183/* If we know the page is up to date, and we're not using byte range locks (or
1174 * if we have the whole file locked for writing), it may be more efficient to 1184 * if we have the whole file locked for writing), it may be more efficient to
1175 * extend the write to cover the entire page in order to avoid fragmentation 1185 * extend the write to cover the entire page in order to avoid fragmentation
@@ -1180,17 +1190,36 @@ out:
1180 */ 1190 */
1181static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) 1191static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
1182{ 1192{
1193 int ret;
1194 struct file_lock_context *flctx = inode->i_flctx;
1195 struct file_lock *fl;
1196
1183 if (file->f_flags & O_DSYNC) 1197 if (file->f_flags & O_DSYNC)
1184 return 0; 1198 return 0;
1185 if (!nfs_write_pageuptodate(page, inode)) 1199 if (!nfs_write_pageuptodate(page, inode))
1186 return 0; 1200 return 0;
1187 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 1201 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
1188 return 1; 1202 return 1;
1189 if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 && 1203 if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
1190 inode->i_flock->fl_end == OFFSET_MAX && 1204 list_empty_careful(&flctx->flc_posix)))
1191 inode->i_flock->fl_type != F_RDLCK)) 1205 return 0;
1192 return 1; 1206
1193 return 0; 1207 /* Check to see if there are whole file write locks */
1208 ret = 0;
1209 spin_lock(&flctx->flc_lock);
1210 if (!list_empty(&flctx->flc_posix)) {
1211 fl = list_first_entry(&flctx->flc_posix, struct file_lock,
1212 fl_list);
1213 if (is_whole_file_wrlock(fl))
1214 ret = 1;
1215 } else if (!list_empty(&flctx->flc_flock)) {
1216 fl = list_first_entry(&flctx->flc_flock, struct file_lock,
1217 fl_list);
1218 if (fl->fl_type == F_WRLCK)
1219 ret = 1;
1220 }
1221 spin_unlock(&flctx->flc_lock);
1222 return ret;
1194} 1223}
1195 1224
1196/* 1225/*
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index c06a1ba80d73..532a60cca2fb 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3477,7 +3477,8 @@ nfsd_break_deleg_cb(struct file_lock *fl)
3477} 3477}
3478 3478
3479static int 3479static int
3480nfsd_change_deleg_cb(struct file_lock **onlist, int arg, struct list_head *dispose) 3480nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
3481 struct list_head *dispose)
3481{ 3482{
3482 if (arg & F_UNLCK) 3483 if (arg & F_UNLCK)
3483 return lease_modify(onlist, arg, dispose); 3484 return lease_modify(onlist, arg, dispose);
@@ -5556,10 +5557,11 @@ out_nfserr:
5556static bool 5557static bool
5557check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) 5558check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
5558{ 5559{
5559 struct file_lock **flpp; 5560 struct file_lock *fl;
5560 int status = false; 5561 int status = false;
5561 struct file *filp = find_any_file(fp); 5562 struct file *filp = find_any_file(fp);
5562 struct inode *inode; 5563 struct inode *inode;
5564 struct file_lock_context *flctx;
5563 5565
5564 if (!filp) { 5566 if (!filp) {
5565 /* Any valid lock stateid should have some sort of access */ 5567 /* Any valid lock stateid should have some sort of access */
@@ -5568,15 +5570,18 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
5568 } 5570 }
5569 5571
5570 inode = file_inode(filp); 5572 inode = file_inode(filp);
5573 flctx = inode->i_flctx;
5571 5574
5572 spin_lock(&inode->i_lock); 5575 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
5573 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { 5576 spin_lock(&flctx->flc_lock);
5574 if ((*flpp)->fl_owner == (fl_owner_t)lowner) { 5577 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
5575 status = true; 5578 if (fl->fl_owner == (fl_owner_t)lowner) {
5576 break; 5579 status = true;
5580 break;
5581 }
5577 } 5582 }
5583 spin_unlock(&flctx->flc_lock);
5578 } 5584 }
5579 spin_unlock(&inode->i_lock);
5580 fput(filp); 5585 fput(filp);
5581 return status; 5586 return status;
5582} 5587}
diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h
index 1eae330193a6..b6d51333ad02 100644
--- a/fs/ocfs2/quota.h
+++ b/fs/ocfs2/quota.h
@@ -48,6 +48,7 @@ struct ocfs2_quota_recovery {
48/* In-memory structure with quota header information */ 48/* In-memory structure with quota header information */
49struct ocfs2_mem_dqinfo { 49struct ocfs2_mem_dqinfo {
50 unsigned int dqi_type; /* Quota type this structure describes */ 50 unsigned int dqi_type; /* Quota type this structure describes */
51 unsigned int dqi_flags; /* Flags OLQF_* */
51 unsigned int dqi_chunks; /* Number of chunks in local quota file */ 52 unsigned int dqi_chunks; /* Number of chunks in local quota file */
52 unsigned int dqi_blocks; /* Number of blocks allocated for local quota file */ 53 unsigned int dqi_blocks; /* Number of blocks allocated for local quota file */
53 unsigned int dqi_syncms; /* How often should we sync with other nodes */ 54 unsigned int dqi_syncms; /* How often should we sync with other nodes */
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index bb72af344475..3d0b63d34225 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -286,7 +286,7 @@ static void olq_update_info(struct buffer_head *bh, void *private)
286 ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data + 286 ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
287 OCFS2_LOCAL_INFO_OFF); 287 OCFS2_LOCAL_INFO_OFF);
288 spin_lock(&dq_data_lock); 288 spin_lock(&dq_data_lock);
289 ldinfo->dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK); 289 ldinfo->dqi_flags = cpu_to_le32(oinfo->dqi_flags);
290 ldinfo->dqi_chunks = cpu_to_le32(oinfo->dqi_chunks); 290 ldinfo->dqi_chunks = cpu_to_le32(oinfo->dqi_chunks);
291 ldinfo->dqi_blocks = cpu_to_le32(oinfo->dqi_blocks); 291 ldinfo->dqi_blocks = cpu_to_le32(oinfo->dqi_blocks);
292 spin_unlock(&dq_data_lock); 292 spin_unlock(&dq_data_lock);
@@ -695,8 +695,8 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
695 /* We don't need the lock and we have to acquire quota file locks 695 /* We don't need the lock and we have to acquire quota file locks
696 * which will later depend on this lock */ 696 * which will later depend on this lock */
697 mutex_unlock(&sb_dqopt(sb)->dqio_mutex); 697 mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
698 info->dqi_maxblimit = 0x7fffffffffffffffLL; 698 info->dqi_max_spc_limit = 0x7fffffffffffffffLL;
699 info->dqi_maxilimit = 0x7fffffffffffffffLL; 699 info->dqi_max_ino_limit = 0x7fffffffffffffffLL;
700 oinfo = kmalloc(sizeof(struct ocfs2_mem_dqinfo), GFP_NOFS); 700 oinfo = kmalloc(sizeof(struct ocfs2_mem_dqinfo), GFP_NOFS);
701 if (!oinfo) { 701 if (!oinfo) {
702 mlog(ML_ERROR, "failed to allocate memory for ocfs2 quota" 702 mlog(ML_ERROR, "failed to allocate memory for ocfs2 quota"
@@ -731,13 +731,13 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
731 } 731 }
732 ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data + 732 ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
733 OCFS2_LOCAL_INFO_OFF); 733 OCFS2_LOCAL_INFO_OFF);
734 info->dqi_flags = le32_to_cpu(ldinfo->dqi_flags); 734 oinfo->dqi_flags = le32_to_cpu(ldinfo->dqi_flags);
735 oinfo->dqi_chunks = le32_to_cpu(ldinfo->dqi_chunks); 735 oinfo->dqi_chunks = le32_to_cpu(ldinfo->dqi_chunks);
736 oinfo->dqi_blocks = le32_to_cpu(ldinfo->dqi_blocks); 736 oinfo->dqi_blocks = le32_to_cpu(ldinfo->dqi_blocks);
737 oinfo->dqi_libh = bh; 737 oinfo->dqi_libh = bh;
738 738
739 /* We crashed when using local quota file? */ 739 /* We crashed when using local quota file? */
740 if (!(info->dqi_flags & OLQF_CLEAN)) { 740 if (!(oinfo->dqi_flags & OLQF_CLEAN)) {
741 rec = OCFS2_SB(sb)->quota_rec; 741 rec = OCFS2_SB(sb)->quota_rec;
742 if (!rec) { 742 if (!rec) {
743 rec = ocfs2_alloc_quota_recovery(); 743 rec = ocfs2_alloc_quota_recovery();
@@ -766,7 +766,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
766 } 766 }
767 767
768 /* Now mark quota file as used */ 768 /* Now mark quota file as used */
769 info->dqi_flags &= ~OLQF_CLEAN; 769 oinfo->dqi_flags &= ~OLQF_CLEAN;
770 status = ocfs2_modify_bh(lqinode, bh, olq_update_info, info); 770 status = ocfs2_modify_bh(lqinode, bh, olq_update_info, info);
771 if (status < 0) { 771 if (status < 0) {
772 mlog_errno(status); 772 mlog_errno(status);
@@ -851,7 +851,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
851 goto out; 851 goto out;
852 852
853 /* Mark local file as clean */ 853 /* Mark local file as clean */
854 info->dqi_flags |= OLQF_CLEAN; 854 oinfo->dqi_flags |= OLQF_CLEAN;
855 status = ocfs2_modify_bh(sb_dqopt(sb)->files[type], 855 status = ocfs2_modify_bh(sb_dqopt(sb)->files[type],
856 oinfo->dqi_libh, 856 oinfo->dqi_libh,
857 olq_update_info, 857 olq_update_info,
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index c09d6da23c3d..87a1f7679d9b 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1002,36 +1002,6 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
1002 } 1002 }
1003} 1003}
1004 1004
1005/* Handle quota on quotactl */
1006static int ocfs2_quota_on(struct super_block *sb, int type, int format_id)
1007{
1008 unsigned int feature[OCFS2_MAXQUOTAS] = {
1009 OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
1010 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
1011
1012 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
1013 return -EINVAL;
1014
1015 return dquot_enable(sb_dqopt(sb)->files[type], type,
1016 format_id, DQUOT_LIMITS_ENABLED);
1017}
1018
1019/* Handle quota off quotactl */
1020static int ocfs2_quota_off(struct super_block *sb, int type)
1021{
1022 return dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
1023}
1024
1025static const struct quotactl_ops ocfs2_quotactl_ops = {
1026 .quota_on_meta = ocfs2_quota_on,
1027 .quota_off = ocfs2_quota_off,
1028 .quota_sync = dquot_quota_sync,
1029 .get_info = dquot_get_dqinfo,
1030 .set_info = dquot_set_dqinfo,
1031 .get_dqblk = dquot_get_dqblk,
1032 .set_dqblk = dquot_set_dqblk,
1033};
1034
1035static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) 1005static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1036{ 1006{
1037 struct dentry *root; 1007 struct dentry *root;
@@ -2087,7 +2057,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
2087 sb->s_op = &ocfs2_sops; 2057 sb->s_op = &ocfs2_sops;
2088 sb->s_d_op = &ocfs2_dentry_ops; 2058 sb->s_d_op = &ocfs2_dentry_ops;
2089 sb->s_export_op = &ocfs2_export_ops; 2059 sb->s_export_op = &ocfs2_export_ops;
2090 sb->s_qcop = &ocfs2_quotactl_ops; 2060 sb->s_qcop = &dquot_quotactl_sysfile_ops;
2091 sb->dq_op = &ocfs2_quota_operations; 2061 sb->dq_op = &ocfs2_quota_operations;
2092 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; 2062 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
2093 sb->s_xattr = ocfs2_xattr_handlers; 2063 sb->s_xattr = ocfs2_xattr_handlers;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 69df5b239844..0ccd4ba3a246 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1248,7 +1248,7 @@ static int ignore_hardlimit(struct dquot *dquot)
1248 1248
1249 return capable(CAP_SYS_RESOURCE) && 1249 return capable(CAP_SYS_RESOURCE) &&
1250 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || 1250 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1251 !(info->dqi_flags & V1_DQF_RSQUASH)); 1251 !(info->dqi_flags & DQF_ROOT_SQUASH));
1252} 1252}
1253 1253
1254/* needs dq_data_lock */ 1254/* needs dq_data_lock */
@@ -2385,14 +2385,84 @@ out:
2385} 2385}
2386EXPORT_SYMBOL(dquot_quota_on_mount); 2386EXPORT_SYMBOL(dquot_quota_on_mount);
2387 2387
2388static inline qsize_t qbtos(qsize_t blocks) 2388static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
2389{ 2389{
2390 return blocks << QIF_DQBLKSIZE_BITS; 2390 int ret;
2391 int type;
2392 struct quota_info *dqopt = sb_dqopt(sb);
2393
2394 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2395 return -ENOSYS;
2396 /* Accounting cannot be turned on while fs is mounted */
2397 flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
2398 if (!flags)
2399 return -EINVAL;
2400 for (type = 0; type < MAXQUOTAS; type++) {
2401 if (!(flags & qtype_enforce_flag(type)))
2402 continue;
2403 /* Can't enforce without accounting */
2404 if (!sb_has_quota_usage_enabled(sb, type))
2405 return -EINVAL;
2406 ret = dquot_enable(dqopt->files[type], type,
2407 dqopt->info[type].dqi_fmt_id,
2408 DQUOT_LIMITS_ENABLED);
2409 if (ret < 0)
2410 goto out_err;
2411 }
2412 return 0;
2413out_err:
2414 /* Backout enforcement enablement we already did */
2415 for (type--; type >= 0; type--) {
2416 if (flags & qtype_enforce_flag(type))
2417 dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2418 }
2419 /* Error code translation for better compatibility with XFS */
2420 if (ret == -EBUSY)
2421 ret = -EEXIST;
2422 return ret;
2391} 2423}
2392 2424
2393static inline qsize_t stoqb(qsize_t space) 2425static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
2394{ 2426{
2395 return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; 2427 int ret;
2428 int type;
2429 struct quota_info *dqopt = sb_dqopt(sb);
2430
2431 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2432 return -ENOSYS;
2433 /*
2434 * We don't support turning off accounting via quotactl. In principle
2435 * quota infrastructure can do this but filesystems don't expect
2436 * userspace to be able to do it.
2437 */
2438 if (flags &
2439 (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
2440 return -EOPNOTSUPP;
2441
2442 /* Filter out limits not enabled */
2443 for (type = 0; type < MAXQUOTAS; type++)
2444 if (!sb_has_quota_limits_enabled(sb, type))
2445 flags &= ~qtype_enforce_flag(type);
2446 /* Nothing left? */
2447 if (!flags)
2448 return -EEXIST;
2449 for (type = 0; type < MAXQUOTAS; type++) {
2450 if (flags & qtype_enforce_flag(type)) {
2451 ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2452 if (ret < 0)
2453 goto out_err;
2454 }
2455 }
2456 return 0;
2457out_err:
2458 /* Backout enforcement disabling we already did */
2459 for (type--; type >= 0; type--) {
2460 if (flags & qtype_enforce_flag(type))
2461 dquot_enable(dqopt->files[type], type,
2462 dqopt->info[type].dqi_fmt_id,
2463 DQUOT_LIMITS_ENABLED);
2464 }
2465 return ret;
2396} 2466}
2397 2467
2398/* Generic routine for getting common part of quota structure */ 2468/* Generic routine for getting common part of quota structure */
@@ -2444,13 +2514,13 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2444 return -EINVAL; 2514 return -EINVAL;
2445 2515
2446 if (((di->d_fieldmask & QC_SPC_SOFT) && 2516 if (((di->d_fieldmask & QC_SPC_SOFT) &&
2447 stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) || 2517 di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
2448 ((di->d_fieldmask & QC_SPC_HARD) && 2518 ((di->d_fieldmask & QC_SPC_HARD) &&
2449 stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) || 2519 di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
2450 ((di->d_fieldmask & QC_INO_SOFT) && 2520 ((di->d_fieldmask & QC_INO_SOFT) &&
2451 (di->d_ino_softlimit > dqi->dqi_maxilimit)) || 2521 (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
2452 ((di->d_fieldmask & QC_INO_HARD) && 2522 ((di->d_fieldmask & QC_INO_HARD) &&
2453 (di->d_ino_hardlimit > dqi->dqi_maxilimit))) 2523 (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
2454 return -ERANGE; 2524 return -ERANGE;
2455 2525
2456 spin_lock(&dq_data_lock); 2526 spin_lock(&dq_data_lock);
@@ -2577,6 +2647,14 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2577 goto out; 2647 goto out;
2578 } 2648 }
2579 mi = sb_dqopt(sb)->info + type; 2649 mi = sb_dqopt(sb)->info + type;
2650 if (ii->dqi_valid & IIF_FLAGS) {
2651 if (ii->dqi_flags & ~DQF_SETINFO_MASK ||
2652 (ii->dqi_flags & DQF_ROOT_SQUASH &&
2653 mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD)) {
2654 err = -EINVAL;
2655 goto out;
2656 }
2657 }
2580 spin_lock(&dq_data_lock); 2658 spin_lock(&dq_data_lock);
2581 if (ii->dqi_valid & IIF_BGRACE) 2659 if (ii->dqi_valid & IIF_BGRACE)
2582 mi->dqi_bgrace = ii->dqi_bgrace; 2660 mi->dqi_bgrace = ii->dqi_bgrace;
@@ -2606,6 +2684,17 @@ const struct quotactl_ops dquot_quotactl_ops = {
2606}; 2684};
2607EXPORT_SYMBOL(dquot_quotactl_ops); 2685EXPORT_SYMBOL(dquot_quotactl_ops);
2608 2686
2687const struct quotactl_ops dquot_quotactl_sysfile_ops = {
2688 .quota_enable = dquot_quota_enable,
2689 .quota_disable = dquot_quota_disable,
2690 .quota_sync = dquot_quota_sync,
2691 .get_info = dquot_get_dqinfo,
2692 .set_info = dquot_set_dqinfo,
2693 .get_dqblk = dquot_get_dqblk,
2694 .set_dqblk = dquot_set_dqblk
2695};
2696EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
2697
2609static int do_proc_dqstats(struct ctl_table *table, int write, 2698static int do_proc_dqstats(struct ctl_table *table, int write,
2610 void __user *buffer, size_t *lenp, loff_t *ppos) 2699 void __user *buffer, size_t *lenp, loff_t *ppos)
2611{ 2700{
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 6f3856328eea..d14a799c7785 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -66,18 +66,40 @@ static int quota_sync_all(int type)
66 return ret; 66 return ret;
67} 67}
68 68
69unsigned int qtype_enforce_flag(int type)
70{
71 switch (type) {
72 case USRQUOTA:
73 return FS_QUOTA_UDQ_ENFD;
74 case GRPQUOTA:
75 return FS_QUOTA_GDQ_ENFD;
76 case PRJQUOTA:
77 return FS_QUOTA_PDQ_ENFD;
78 }
79 return 0;
80}
81
69static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id, 82static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
70 struct path *path) 83 struct path *path)
71{ 84{
72 if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_on_meta) 85 if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable)
73 return -ENOSYS; 86 return -ENOSYS;
74 if (sb->s_qcop->quota_on_meta) 87 if (sb->s_qcop->quota_enable)
75 return sb->s_qcop->quota_on_meta(sb, type, id); 88 return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type));
76 if (IS_ERR(path)) 89 if (IS_ERR(path))
77 return PTR_ERR(path); 90 return PTR_ERR(path);
78 return sb->s_qcop->quota_on(sb, type, id, path); 91 return sb->s_qcop->quota_on(sb, type, id, path);
79} 92}
80 93
94static int quota_quotaoff(struct super_block *sb, int type)
95{
96 if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable)
97 return -ENOSYS;
98 if (sb->s_qcop->quota_disable)
99 return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type));
100 return sb->s_qcop->quota_off(sb, type);
101}
102
81static int quota_getfmt(struct super_block *sb, int type, void __user *addr) 103static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
82{ 104{
83 __u32 fmt; 105 __u32 fmt;
@@ -208,15 +230,26 @@ static int quota_setquota(struct super_block *sb, int type, qid_t id,
208 return sb->s_qcop->set_dqblk(sb, qid, &fdq); 230 return sb->s_qcop->set_dqblk(sb, qid, &fdq);
209} 231}
210 232
211static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr) 233static int quota_enable(struct super_block *sb, void __user *addr)
212{ 234{
213 __u32 flags; 235 __u32 flags;
214 236
215 if (copy_from_user(&flags, addr, sizeof(flags))) 237 if (copy_from_user(&flags, addr, sizeof(flags)))
216 return -EFAULT; 238 return -EFAULT;
217 if (!sb->s_qcop->set_xstate) 239 if (!sb->s_qcop->quota_enable)
218 return -ENOSYS; 240 return -ENOSYS;
219 return sb->s_qcop->set_xstate(sb, flags, cmd); 241 return sb->s_qcop->quota_enable(sb, flags);
242}
243
244static int quota_disable(struct super_block *sb, void __user *addr)
245{
246 __u32 flags;
247
248 if (copy_from_user(&flags, addr, sizeof(flags)))
249 return -EFAULT;
250 if (!sb->s_qcop->quota_disable)
251 return -ENOSYS;
252 return sb->s_qcop->quota_disable(sb, flags);
220} 253}
221 254
222static int quota_getxstate(struct super_block *sb, void __user *addr) 255static int quota_getxstate(struct super_block *sb, void __user *addr)
@@ -429,9 +462,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
429 case Q_QUOTAON: 462 case Q_QUOTAON:
430 return quota_quotaon(sb, type, cmd, id, path); 463 return quota_quotaon(sb, type, cmd, id, path);
431 case Q_QUOTAOFF: 464 case Q_QUOTAOFF:
432 if (!sb->s_qcop->quota_off) 465 return quota_quotaoff(sb, type);
433 return -ENOSYS;
434 return sb->s_qcop->quota_off(sb, type);
435 case Q_GETFMT: 466 case Q_GETFMT:
436 return quota_getfmt(sb, type, addr); 467 return quota_getfmt(sb, type, addr);
437 case Q_GETINFO: 468 case Q_GETINFO:
@@ -447,8 +478,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
447 return -ENOSYS; 478 return -ENOSYS;
448 return sb->s_qcop->quota_sync(sb, type); 479 return sb->s_qcop->quota_sync(sb, type);
449 case Q_XQUOTAON: 480 case Q_XQUOTAON:
481 return quota_enable(sb, addr);
450 case Q_XQUOTAOFF: 482 case Q_XQUOTAOFF:
451 return quota_setxstate(sb, cmd, addr); 483 return quota_disable(sb, addr);
452 case Q_XQUOTARM: 484 case Q_XQUOTARM:
453 return quota_rmxquota(sb, addr); 485 return quota_rmxquota(sb, addr);
454 case Q_XGETQSTAT: 486 case Q_XGETQSTAT:
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index 469c6848b322..8fe79beced5c 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -169,8 +169,8 @@ static int v1_read_file_info(struct super_block *sb, int type)
169 } 169 }
170 ret = 0; 170 ret = 0;
171 /* limits are stored as unsigned 32-bit data */ 171 /* limits are stored as unsigned 32-bit data */
172 dqopt->info[type].dqi_maxblimit = 0xffffffff; 172 dqopt->info[type].dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
173 dqopt->info[type].dqi_maxilimit = 0xffffffff; 173 dqopt->info[type].dqi_max_ino_limit = 0xffffffff;
174 dqopt->info[type].dqi_igrace = 174 dqopt->info[type].dqi_igrace =
175 dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME; 175 dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
176 dqopt->info[type].dqi_bgrace = 176 dqopt->info[type].dqi_bgrace =
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index 02751ec695c5..9cb10d7197f7 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -117,16 +117,17 @@ static int v2_read_file_info(struct super_block *sb, int type)
117 qinfo = info->dqi_priv; 117 qinfo = info->dqi_priv;
118 if (version == 0) { 118 if (version == 0) {
119 /* limits are stored as unsigned 32-bit data */ 119 /* limits are stored as unsigned 32-bit data */
120 info->dqi_maxblimit = 0xffffffff; 120 info->dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
121 info->dqi_maxilimit = 0xffffffff; 121 info->dqi_max_ino_limit = 0xffffffff;
122 } else { 122 } else {
123 /* used space is stored as unsigned 64-bit value */ 123 /* used space is stored as unsigned 64-bit value in bytes */
124 info->dqi_maxblimit = 0xffffffffffffffffULL; /* 2^64-1 */ 124 info->dqi_max_spc_limit = 0xffffffffffffffffULL; /* 2^64-1 */
125 info->dqi_maxilimit = 0xffffffffffffffffULL; 125 info->dqi_max_ino_limit = 0xffffffffffffffffULL;
126 } 126 }
127 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); 127 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
128 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); 128 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
129 info->dqi_flags = le32_to_cpu(dinfo.dqi_flags); 129 /* No flags currently supported */
130 info->dqi_flags = 0;
130 qinfo->dqi_sb = sb; 131 qinfo->dqi_sb = sb;
131 qinfo->dqi_type = type; 132 qinfo->dqi_type = type;
132 qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); 133 qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
@@ -157,7 +158,8 @@ static int v2_write_file_info(struct super_block *sb, int type)
157 info->dqi_flags &= ~DQF_INFO_DIRTY; 158 info->dqi_flags &= ~DQF_INFO_DIRTY;
158 dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace); 159 dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
159 dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace); 160 dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
160 dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK); 161 /* No flags currently supported */
162 dinfo.dqi_flags = cpu_to_le32(0);
161 spin_unlock(&dq_data_lock); 163 spin_unlock(&dq_data_lock);
162 dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks); 164 dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks);
163 dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk); 165 dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk);
diff --git a/fs/read_write.c b/fs/read_write.c
index c0805c93b6fa..4060691e78f7 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -358,7 +358,7 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
358 return retval; 358 return retval;
359 } 359 }
360 360
361 if (unlikely(inode->i_flock && mandatory_lock(inode))) { 361 if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
362 retval = locks_mandatory_area( 362 retval = locks_mandatory_area(
363 read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE, 363 read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
364 inode, file, pos, count); 364 inode, file, pos, count);
diff --git a/fs/udf/Kconfig b/fs/udf/Kconfig
index 0e0e99bd6bce..c6e17a744c3b 100644
--- a/fs/udf/Kconfig
+++ b/fs/udf/Kconfig
@@ -2,10 +2,12 @@ config UDF_FS
2 tristate "UDF file system support" 2 tristate "UDF file system support"
3 select CRC_ITU_T 3 select CRC_ITU_T
4 help 4 help
5 This is the new file system used on some CD-ROMs and DVDs. Say Y if 5 This is a file system used on some CD-ROMs and DVDs. Since the
6 you intend to mount DVD discs or CDRW's written in packet mode, or 6 file system is supported by multiple operating systems and is more
7 if written to by other UDF utilities, such as DirectCD. 7 compatible with standard unix file systems, it is also suitable for
8 Please read <file:Documentation/filesystems/udf.txt>. 8 removable USB disks. Say Y if you intend to mount DVD discs or CDRW's
9 written in packet mode, or if you want to use UDF for removable USB
10 disks. Please read <file:Documentation/filesystems/udf.txt>.
9 11
10 To compile this file system support as a module, choose M here: the 12 To compile this file system support as a module, choose M here: the
11 module will be called udf. 13 module will be called udf.
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 5bc71d9a674a..a445d599098d 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -750,7 +750,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
750 /* Are we beyond EOF? */ 750 /* Are we beyond EOF? */
751 if (etype == -1) { 751 if (etype == -1) {
752 int ret; 752 int ret;
753 isBeyondEOF = 1; 753 isBeyondEOF = true;
754 if (count) { 754 if (count) {
755 if (c) 755 if (c)
756 laarr[0] = laarr[1]; 756 laarr[0] = laarr[1];
@@ -792,7 +792,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
792 endnum = c + 1; 792 endnum = c + 1;
793 lastblock = 1; 793 lastblock = 1;
794 } else { 794 } else {
795 isBeyondEOF = 0; 795 isBeyondEOF = false;
796 endnum = startnum = ((count > 2) ? 2 : count); 796 endnum = startnum = ((count > 2) ? 2 : count);
797 797
798 /* if the current extent is in position 0, 798 /* if the current extent is in position 0,
@@ -1288,6 +1288,7 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode)
1288 struct kernel_lb_addr *iloc = &iinfo->i_location; 1288 struct kernel_lb_addr *iloc = &iinfo->i_location;
1289 unsigned int link_count; 1289 unsigned int link_count;
1290 unsigned int indirections = 0; 1290 unsigned int indirections = 0;
1291 int bs = inode->i_sb->s_blocksize;
1291 int ret = -EIO; 1292 int ret = -EIO;
1292 1293
1293reread: 1294reread:
@@ -1374,38 +1375,35 @@ reread:
1374 if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) { 1375 if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
1375 iinfo->i_efe = 1; 1376 iinfo->i_efe = 1;
1376 iinfo->i_use = 0; 1377 iinfo->i_use = 0;
1377 ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize - 1378 ret = udf_alloc_i_data(inode, bs -
1378 sizeof(struct extendedFileEntry)); 1379 sizeof(struct extendedFileEntry));
1379 if (ret) 1380 if (ret)
1380 goto out; 1381 goto out;
1381 memcpy(iinfo->i_ext.i_data, 1382 memcpy(iinfo->i_ext.i_data,
1382 bh->b_data + sizeof(struct extendedFileEntry), 1383 bh->b_data + sizeof(struct extendedFileEntry),
1383 inode->i_sb->s_blocksize - 1384 bs - sizeof(struct extendedFileEntry));
1384 sizeof(struct extendedFileEntry));
1385 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) { 1385 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
1386 iinfo->i_efe = 0; 1386 iinfo->i_efe = 0;
1387 iinfo->i_use = 0; 1387 iinfo->i_use = 0;
1388 ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize - 1388 ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
1389 sizeof(struct fileEntry));
1390 if (ret) 1389 if (ret)
1391 goto out; 1390 goto out;
1392 memcpy(iinfo->i_ext.i_data, 1391 memcpy(iinfo->i_ext.i_data,
1393 bh->b_data + sizeof(struct fileEntry), 1392 bh->b_data + sizeof(struct fileEntry),
1394 inode->i_sb->s_blocksize - sizeof(struct fileEntry)); 1393 bs - sizeof(struct fileEntry));
1395 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) { 1394 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
1396 iinfo->i_efe = 0; 1395 iinfo->i_efe = 0;
1397 iinfo->i_use = 1; 1396 iinfo->i_use = 1;
1398 iinfo->i_lenAlloc = le32_to_cpu( 1397 iinfo->i_lenAlloc = le32_to_cpu(
1399 ((struct unallocSpaceEntry *)bh->b_data)-> 1398 ((struct unallocSpaceEntry *)bh->b_data)->
1400 lengthAllocDescs); 1399 lengthAllocDescs);
1401 ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize - 1400 ret = udf_alloc_i_data(inode, bs -
1402 sizeof(struct unallocSpaceEntry)); 1401 sizeof(struct unallocSpaceEntry));
1403 if (ret) 1402 if (ret)
1404 goto out; 1403 goto out;
1405 memcpy(iinfo->i_ext.i_data, 1404 memcpy(iinfo->i_ext.i_data,
1406 bh->b_data + sizeof(struct unallocSpaceEntry), 1405 bh->b_data + sizeof(struct unallocSpaceEntry),
1407 inode->i_sb->s_blocksize - 1406 bs - sizeof(struct unallocSpaceEntry));
1408 sizeof(struct unallocSpaceEntry));
1409 return 0; 1407 return 0;
1410 } 1408 }
1411 1409
@@ -1489,6 +1487,15 @@ reread:
1489 } 1487 }
1490 inode->i_generation = iinfo->i_unique; 1488 inode->i_generation = iinfo->i_unique;
1491 1489
1490 /*
1491 * Sanity check length of allocation descriptors and extended attrs to
1492 * avoid integer overflows
1493 */
1494 if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs)
1495 goto out;
1496 /* Now do exact checks */
1497 if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs)
1498 goto out;
1492 /* Sanity checks for files in ICB so that we don't get confused later */ 1499 /* Sanity checks for files in ICB so that we don't get confused later */
1493 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { 1500 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1494 /* 1501 /*
@@ -1498,8 +1505,7 @@ reread:
1498 if (iinfo->i_lenAlloc != inode->i_size) 1505 if (iinfo->i_lenAlloc != inode->i_size)
1499 goto out; 1506 goto out;
1500 /* File in ICB has to fit in there... */ 1507 /* File in ICB has to fit in there... */
1501 if (inode->i_size > inode->i_sb->s_blocksize - 1508 if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
1502 udf_file_entry_alloc_offset(inode))
1503 goto out; 1509 goto out;
1504 } 1510 }
1505 1511
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 3ccb2f11fc76..f169411c4ea0 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1599,7 +1599,7 @@ static noinline int udf_process_sequence(
1599 struct udf_vds_record *curr; 1599 struct udf_vds_record *curr;
1600 struct generic_desc *gd; 1600 struct generic_desc *gd;
1601 struct volDescPtr *vdp; 1601 struct volDescPtr *vdp;
1602 int done = 0; 1602 bool done = false;
1603 uint32_t vdsn; 1603 uint32_t vdsn;
1604 uint16_t ident; 1604 uint16_t ident;
1605 long next_s = 0, next_e = 0; 1605 long next_s = 0, next_e = 0;
@@ -1680,7 +1680,7 @@ static noinline int udf_process_sequence(
1680 lastblock = next_e; 1680 lastblock = next_e;
1681 next_s = next_e = 0; 1681 next_s = next_e = 0;
1682 } else 1682 } else
1683 done = 1; 1683 done = true;
1684 break; 1684 break;
1685 } 1685 }
1686 brelse(bh); 1686 brelse(bh);
@@ -2300,6 +2300,7 @@ static void udf_put_super(struct super_block *sb)
2300 udf_close_lvid(sb); 2300 udf_close_lvid(sb);
2301 brelse(sbi->s_lvid_bh); 2301 brelse(sbi->s_lvid_bh);
2302 udf_sb_free_partitions(sb); 2302 udf_sb_free_partitions(sb);
2303 mutex_destroy(&sbi->s_alloc_mutex);
2303 kfree(sb->s_fs_info); 2304 kfree(sb->s_fs_info);
2304 sb->s_fs_info = NULL; 2305 sb->s_fs_info = NULL;
2305} 2306}
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index 53e95b2a1369..a7a3a63bb360 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -91,16 +91,6 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
91 return ptr; 91 return ptr;
92} 92}
93 93
94void
95kmem_free(const void *ptr)
96{
97 if (!is_vmalloc_addr(ptr)) {
98 kfree(ptr);
99 } else {
100 vfree(ptr);
101 }
102}
103
104void * 94void *
105kmem_realloc(const void *ptr, size_t newsize, size_t oldsize, 95kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
106 xfs_km_flags_t flags) 96 xfs_km_flags_t flags)
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 64db0e53edea..cc6b768fc068 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -63,7 +63,10 @@ kmem_flags_convert(xfs_km_flags_t flags)
63extern void *kmem_alloc(size_t, xfs_km_flags_t); 63extern void *kmem_alloc(size_t, xfs_km_flags_t);
64extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t); 64extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
65extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t); 65extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
66extern void kmem_free(const void *); 66static inline void kmem_free(const void *ptr)
67{
68 kvfree(ptr);
69}
67 70
68 71
69extern void *kmem_zalloc_greedy(size_t *, size_t, size_t); 72extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 5d38e8b8a913..15105dbc9e28 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -403,7 +403,7 @@ xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
403 if (!xfs_sb_version_hasattr2(&mp->m_sb)) { 403 if (!xfs_sb_version_hasattr2(&mp->m_sb)) {
404 xfs_sb_version_addattr2(&mp->m_sb); 404 xfs_sb_version_addattr2(&mp->m_sb);
405 spin_unlock(&mp->m_sb_lock); 405 spin_unlock(&mp->m_sb_lock);
406 xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); 406 xfs_log_sb(tp);
407 } else 407 } else
408 spin_unlock(&mp->m_sb_lock); 408 spin_unlock(&mp->m_sb_lock);
409 } 409 }
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index b5eb4743f75a..61ec015dca16 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -973,7 +973,11 @@ xfs_bmap_local_to_extents(
973 *firstblock = args.fsbno; 973 *firstblock = args.fsbno;
974 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); 974 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
975 975
976 /* initialise the block and copy the data */ 976 /*
977 * Initialise the block and copy the data
978 *
979 * Note: init_fn must set the buffer log item type correctly!
980 */
977 init_fn(tp, bp, ip, ifp); 981 init_fn(tp, bp, ip, ifp);
978 982
979 /* account for the change in fork size and log everything */ 983 /* account for the change in fork size and log everything */
@@ -1221,22 +1225,20 @@ xfs_bmap_add_attrfork(
1221 goto bmap_cancel; 1225 goto bmap_cancel;
1222 if (!xfs_sb_version_hasattr(&mp->m_sb) || 1226 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1223 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { 1227 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1224 __int64_t sbfields = 0; 1228 bool log_sb = false;
1225 1229
1226 spin_lock(&mp->m_sb_lock); 1230 spin_lock(&mp->m_sb_lock);
1227 if (!xfs_sb_version_hasattr(&mp->m_sb)) { 1231 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1228 xfs_sb_version_addattr(&mp->m_sb); 1232 xfs_sb_version_addattr(&mp->m_sb);
1229 sbfields |= XFS_SB_VERSIONNUM; 1233 log_sb = true;
1230 } 1234 }
1231 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) { 1235 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1232 xfs_sb_version_addattr2(&mp->m_sb); 1236 xfs_sb_version_addattr2(&mp->m_sb);
1233 sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); 1237 log_sb = true;
1234 } 1238 }
1235 if (sbfields) { 1239 spin_unlock(&mp->m_sb_lock);
1236 spin_unlock(&mp->m_sb_lock); 1240 if (log_sb)
1237 xfs_mod_sb(tp, sbfields); 1241 xfs_log_sb(tp);
1238 } else
1239 spin_unlock(&mp->m_sb_lock);
1240 } 1242 }
1241 1243
1242 error = xfs_bmap_finish(&tp, &flist, &committed); 1244 error = xfs_bmap_finish(&tp, &flist, &committed);
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 44db6db86402..b9d8a499d2c4 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -28,6 +28,37 @@ struct xfs_trans;
28extern kmem_zone_t *xfs_bmap_free_item_zone; 28extern kmem_zone_t *xfs_bmap_free_item_zone;
29 29
30/* 30/*
31 * Argument structure for xfs_bmap_alloc.
32 */
33struct xfs_bmalloca {
34 xfs_fsblock_t *firstblock; /* i/o first block allocated */
35 struct xfs_bmap_free *flist; /* bmap freelist */
36 struct xfs_trans *tp; /* transaction pointer */
37 struct xfs_inode *ip; /* incore inode pointer */
38 struct xfs_bmbt_irec prev; /* extent before the new one */
39 struct xfs_bmbt_irec got; /* extent after, or delayed */
40
41 xfs_fileoff_t offset; /* offset in file filling in */
42 xfs_extlen_t length; /* i/o length asked/allocated */
43 xfs_fsblock_t blkno; /* starting block of new extent */
44
45 struct xfs_btree_cur *cur; /* btree cursor */
46 xfs_extnum_t idx; /* current extent index */
47 int nallocs;/* number of extents alloc'd */
48 int logflags;/* flags for transaction logging */
49
50 xfs_extlen_t total; /* total blocks needed for xaction */
51 xfs_extlen_t minlen; /* minimum allocation size (blocks) */
52 xfs_extlen_t minleft; /* amount must be left after alloc */
53 bool eof; /* set if allocating past last extent */
54 bool wasdel; /* replacing a delayed allocation */
55 bool userdata;/* set if is user data */
56 bool aeof; /* allocated space at eof */
57 bool conv; /* overwriting unwritten extents */
58 int flags;
59};
60
61/*
31 * List of extents to be free "later". 62 * List of extents to be free "later".
32 * The list is kept sorted on xbf_startblock. 63 * The list is kept sorted on xbf_startblock.
33 */ 64 */
@@ -149,6 +180,8 @@ void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
149void xfs_bmap_add_free(xfs_fsblock_t bno, xfs_filblks_t len, 180void xfs_bmap_add_free(xfs_fsblock_t bno, xfs_filblks_t len,
150 struct xfs_bmap_free *flist, struct xfs_mount *mp); 181 struct xfs_bmap_free *flist, struct xfs_mount *mp);
151void xfs_bmap_cancel(struct xfs_bmap_free *flist); 182void xfs_bmap_cancel(struct xfs_bmap_free *flist);
183int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
184 int *committed);
152void xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork); 185void xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork);
153int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip, 186int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
154 xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork); 187 xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork);
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index fbd6da263571..8eb718979383 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -151,10 +151,13 @@ typedef struct xfs_sb {
151 __uint32_t sb_features2; /* additional feature bits */ 151 __uint32_t sb_features2; /* additional feature bits */
152 152
153 /* 153 /*
154 * bad features2 field as a result of failing to pad the sb 154 * bad features2 field as a result of failing to pad the sb structure to
155 * structure to 64 bits. Some machines will be using this field 155 * 64 bits. Some machines will be using this field for features2 bits.
156 * for features2 bits. Easiest just to mark it bad and not use 156 * Easiest just to mark it bad and not use it for anything else.
157 * it for anything else. 157 *
158 * This is not kept up to date in memory; it is always overwritten by
159 * the value in sb_features2 when formatting the incore superblock to
160 * the disk buffer.
158 */ 161 */
159 __uint32_t sb_bad_features2; 162 __uint32_t sb_bad_features2;
160 163
@@ -304,8 +307,8 @@ typedef enum {
304#define XFS_SB_ICOUNT XFS_SB_MVAL(ICOUNT) 307#define XFS_SB_ICOUNT XFS_SB_MVAL(ICOUNT)
305#define XFS_SB_IFREE XFS_SB_MVAL(IFREE) 308#define XFS_SB_IFREE XFS_SB_MVAL(IFREE)
306#define XFS_SB_FDBLOCKS XFS_SB_MVAL(FDBLOCKS) 309#define XFS_SB_FDBLOCKS XFS_SB_MVAL(FDBLOCKS)
307#define XFS_SB_FEATURES2 XFS_SB_MVAL(FEATURES2) 310#define XFS_SB_FEATURES2 (XFS_SB_MVAL(FEATURES2) | \
308#define XFS_SB_BAD_FEATURES2 XFS_SB_MVAL(BAD_FEATURES2) 311 XFS_SB_MVAL(BAD_FEATURES2))
309#define XFS_SB_FEATURES_COMPAT XFS_SB_MVAL(FEATURES_COMPAT) 312#define XFS_SB_FEATURES_COMPAT XFS_SB_MVAL(FEATURES_COMPAT)
310#define XFS_SB_FEATURES_RO_COMPAT XFS_SB_MVAL(FEATURES_RO_COMPAT) 313#define XFS_SB_FEATURES_RO_COMPAT XFS_SB_MVAL(FEATURES_RO_COMPAT)
311#define XFS_SB_FEATURES_INCOMPAT XFS_SB_MVAL(FEATURES_INCOMPAT) 314#define XFS_SB_FEATURES_INCOMPAT XFS_SB_MVAL(FEATURES_INCOMPAT)
@@ -319,9 +322,9 @@ typedef enum {
319 XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \ 322 XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \
320 XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \ 323 XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \
321 XFS_SB_ICOUNT | XFS_SB_IFREE | XFS_SB_FDBLOCKS | XFS_SB_FEATURES2 | \ 324 XFS_SB_ICOUNT | XFS_SB_IFREE | XFS_SB_FDBLOCKS | XFS_SB_FEATURES2 | \
322 XFS_SB_BAD_FEATURES2 | XFS_SB_FEATURES_COMPAT | \ 325 XFS_SB_FEATURES_COMPAT | XFS_SB_FEATURES_RO_COMPAT | \
323 XFS_SB_FEATURES_RO_COMPAT | XFS_SB_FEATURES_INCOMPAT | \ 326 XFS_SB_FEATURES_INCOMPAT | XFS_SB_FEATURES_LOG_INCOMPAT | \
324 XFS_SB_FEATURES_LOG_INCOMPAT | XFS_SB_PQUOTINO) 327 XFS_SB_PQUOTINO)
325 328
326 329
327/* 330/*
@@ -453,13 +456,11 @@ static inline void xfs_sb_version_addattr2(struct xfs_sb *sbp)
453{ 456{
454 sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT; 457 sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
455 sbp->sb_features2 |= XFS_SB_VERSION2_ATTR2BIT; 458 sbp->sb_features2 |= XFS_SB_VERSION2_ATTR2BIT;
456 sbp->sb_bad_features2 |= XFS_SB_VERSION2_ATTR2BIT;
457} 459}
458 460
459static inline void xfs_sb_version_removeattr2(struct xfs_sb *sbp) 461static inline void xfs_sb_version_removeattr2(struct xfs_sb *sbp)
460{ 462{
461 sbp->sb_features2 &= ~XFS_SB_VERSION2_ATTR2BIT; 463 sbp->sb_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
462 sbp->sb_bad_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
463 if (!sbp->sb_features2) 464 if (!sbp->sb_features2)
464 sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT; 465 sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT;
465} 466}
@@ -475,7 +476,6 @@ static inline void xfs_sb_version_addprojid32bit(struct xfs_sb *sbp)
475{ 476{
476 sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT; 477 sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
477 sbp->sb_features2 |= XFS_SB_VERSION2_PROJID32BIT; 478 sbp->sb_features2 |= XFS_SB_VERSION2_PROJID32BIT;
478 sbp->sb_bad_features2 |= XFS_SB_VERSION2_PROJID32BIT;
479} 479}
480 480
481/* 481/*
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index 18dc721ca19f..18dc721ca19f 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 752915fa775a..b0a5fe95a3e2 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -40,69 +40,6 @@
40 * Physical superblock buffer manipulations. Shared with libxfs in userspace. 40 * Physical superblock buffer manipulations. Shared with libxfs in userspace.
41 */ 41 */
42 42
43static const struct {
44 short offset;
45 short type; /* 0 = integer
46 * 1 = binary / string (no translation)
47 */
48} xfs_sb_info[] = {
49 { offsetof(xfs_sb_t, sb_magicnum), 0 },
50 { offsetof(xfs_sb_t, sb_blocksize), 0 },
51 { offsetof(xfs_sb_t, sb_dblocks), 0 },
52 { offsetof(xfs_sb_t, sb_rblocks), 0 },
53 { offsetof(xfs_sb_t, sb_rextents), 0 },
54 { offsetof(xfs_sb_t, sb_uuid), 1 },
55 { offsetof(xfs_sb_t, sb_logstart), 0 },
56 { offsetof(xfs_sb_t, sb_rootino), 0 },
57 { offsetof(xfs_sb_t, sb_rbmino), 0 },
58 { offsetof(xfs_sb_t, sb_rsumino), 0 },
59 { offsetof(xfs_sb_t, sb_rextsize), 0 },
60 { offsetof(xfs_sb_t, sb_agblocks), 0 },
61 { offsetof(xfs_sb_t, sb_agcount), 0 },
62 { offsetof(xfs_sb_t, sb_rbmblocks), 0 },
63 { offsetof(xfs_sb_t, sb_logblocks), 0 },
64 { offsetof(xfs_sb_t, sb_versionnum), 0 },
65 { offsetof(xfs_sb_t, sb_sectsize), 0 },
66 { offsetof(xfs_sb_t, sb_inodesize), 0 },
67 { offsetof(xfs_sb_t, sb_inopblock), 0 },
68 { offsetof(xfs_sb_t, sb_fname[0]), 1 },
69 { offsetof(xfs_sb_t, sb_blocklog), 0 },
70 { offsetof(xfs_sb_t, sb_sectlog), 0 },
71 { offsetof(xfs_sb_t, sb_inodelog), 0 },
72 { offsetof(xfs_sb_t, sb_inopblog), 0 },
73 { offsetof(xfs_sb_t, sb_agblklog), 0 },
74 { offsetof(xfs_sb_t, sb_rextslog), 0 },
75 { offsetof(xfs_sb_t, sb_inprogress), 0 },
76 { offsetof(xfs_sb_t, sb_imax_pct), 0 },
77 { offsetof(xfs_sb_t, sb_icount), 0 },
78 { offsetof(xfs_sb_t, sb_ifree), 0 },
79 { offsetof(xfs_sb_t, sb_fdblocks), 0 },
80 { offsetof(xfs_sb_t, sb_frextents), 0 },
81 { offsetof(xfs_sb_t, sb_uquotino), 0 },
82 { offsetof(xfs_sb_t, sb_gquotino), 0 },
83 { offsetof(xfs_sb_t, sb_qflags), 0 },
84 { offsetof(xfs_sb_t, sb_flags), 0 },
85 { offsetof(xfs_sb_t, sb_shared_vn), 0 },
86 { offsetof(xfs_sb_t, sb_inoalignmt), 0 },
87 { offsetof(xfs_sb_t, sb_unit), 0 },
88 { offsetof(xfs_sb_t, sb_width), 0 },
89 { offsetof(xfs_sb_t, sb_dirblklog), 0 },
90 { offsetof(xfs_sb_t, sb_logsectlog), 0 },
91 { offsetof(xfs_sb_t, sb_logsectsize), 0 },
92 { offsetof(xfs_sb_t, sb_logsunit), 0 },
93 { offsetof(xfs_sb_t, sb_features2), 0 },
94 { offsetof(xfs_sb_t, sb_bad_features2), 0 },
95 { offsetof(xfs_sb_t, sb_features_compat), 0 },
96 { offsetof(xfs_sb_t, sb_features_ro_compat), 0 },
97 { offsetof(xfs_sb_t, sb_features_incompat), 0 },
98 { offsetof(xfs_sb_t, sb_features_log_incompat), 0 },
99 { offsetof(xfs_sb_t, sb_crc), 0 },
100 { offsetof(xfs_sb_t, sb_pad), 0 },
101 { offsetof(xfs_sb_t, sb_pquotino), 0 },
102 { offsetof(xfs_sb_t, sb_lsn), 0 },
103 { sizeof(xfs_sb_t), 0 }
104};
105
106/* 43/*
107 * Reference counting access wrappers to the perag structures. 44 * Reference counting access wrappers to the perag structures.
108 * Because we never free per-ag structures, the only thing we 45 * Because we never free per-ag structures, the only thing we
@@ -461,58 +398,49 @@ xfs_sb_from_disk(
461 __xfs_sb_from_disk(to, from, true); 398 __xfs_sb_from_disk(to, from, true);
462} 399}
463 400
464static inline void 401static void
465xfs_sb_quota_to_disk( 402xfs_sb_quota_to_disk(
466 xfs_dsb_t *to, 403 struct xfs_dsb *to,
467 xfs_sb_t *from, 404 struct xfs_sb *from)
468 __int64_t *fields)
469{ 405{
470 __uint16_t qflags = from->sb_qflags; 406 __uint16_t qflags = from->sb_qflags;
471 407
408 to->sb_uquotino = cpu_to_be64(from->sb_uquotino);
409 if (xfs_sb_version_has_pquotino(from)) {
410 to->sb_qflags = cpu_to_be16(from->sb_qflags);
411 to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
412 to->sb_pquotino = cpu_to_be64(from->sb_pquotino);
413 return;
414 }
415
472 /* 416 /*
473 * We need to do these manipilations only if we are working 417 * The in-core version of sb_qflags do not have XFS_OQUOTA_*
474 * with an older version of on-disk superblock. 418 * flags, whereas the on-disk version does. So, convert incore
419 * XFS_{PG}QUOTA_* flags to on-disk XFS_OQUOTA_* flags.
475 */ 420 */
476 if (xfs_sb_version_has_pquotino(from)) 421 qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD |
477 return; 422 XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD);
478 423
479 if (*fields & XFS_SB_QFLAGS) { 424 if (from->sb_qflags &
480 /* 425 (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD))
481 * The in-core version of sb_qflags do not have 426 qflags |= XFS_OQUOTA_ENFD;
482 * XFS_OQUOTA_* flags, whereas the on-disk version 427 if (from->sb_qflags &
483 * does. So, convert incore XFS_{PG}QUOTA_* flags 428 (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))
484 * to on-disk XFS_OQUOTA_* flags. 429 qflags |= XFS_OQUOTA_CHKD;
485 */ 430 to->sb_qflags = cpu_to_be16(qflags);
486 qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD |
487 XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD);
488
489 if (from->sb_qflags &
490 (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD))
491 qflags |= XFS_OQUOTA_ENFD;
492 if (from->sb_qflags &
493 (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))
494 qflags |= XFS_OQUOTA_CHKD;
495 to->sb_qflags = cpu_to_be16(qflags);
496 *fields &= ~XFS_SB_QFLAGS;
497 }
498 431
499 /* 432 /*
500 * GQUOTINO and PQUOTINO cannot be used together in versions of 433 * GQUOTINO and PQUOTINO cannot be used together in versions
501 * superblock that do not have pquotino. from->sb_flags tells us which 434 * of superblock that do not have pquotino. from->sb_flags
502 * quota is active and should be copied to disk. If neither are active, 435 * tells us which quota is active and should be copied to
503 * make sure we write NULLFSINO to the sb_gquotino field as a quota 436 * disk. If neither are active, we should NULL the inode.
504 * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature
505 * bit is set.
506 * 437 *
507 * Note that we don't need to handle the sb_uquotino or sb_pquotino here 438 * In all cases, the separate pquotino must remain 0 because it
508 * as they do not require any translation. Hence the main sb field loop 439 * it beyond the "end" of the valid non-pquotino superblock.
509 * will write them appropriately from the in-core superblock.
510 */ 440 */
511 if ((*fields & XFS_SB_GQUOTINO) && 441 if (from->sb_qflags & XFS_GQUOTA_ACCT)
512 (from->sb_qflags & XFS_GQUOTA_ACCT))
513 to->sb_gquotino = cpu_to_be64(from->sb_gquotino); 442 to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
514 else if ((*fields & XFS_SB_PQUOTINO) && 443 else if (from->sb_qflags & XFS_PQUOTA_ACCT)
515 (from->sb_qflags & XFS_PQUOTA_ACCT))
516 to->sb_gquotino = cpu_to_be64(from->sb_pquotino); 444 to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
517 else { 445 else {
518 /* 446 /*
@@ -526,63 +454,78 @@ xfs_sb_quota_to_disk(
526 to->sb_gquotino = cpu_to_be64(NULLFSINO); 454 to->sb_gquotino = cpu_to_be64(NULLFSINO);
527 } 455 }
528 456
529 *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO); 457 to->sb_pquotino = 0;
530} 458}
531 459
532/*
533 * Copy in core superblock to ondisk one.
534 *
535 * The fields argument is mask of superblock fields to copy.
536 */
537void 460void
538xfs_sb_to_disk( 461xfs_sb_to_disk(
539 xfs_dsb_t *to, 462 struct xfs_dsb *to,
540 xfs_sb_t *from, 463 struct xfs_sb *from)
541 __int64_t fields)
542{ 464{
543 xfs_caddr_t to_ptr = (xfs_caddr_t)to; 465 xfs_sb_quota_to_disk(to, from);
544 xfs_caddr_t from_ptr = (xfs_caddr_t)from;
545 xfs_sb_field_t f;
546 int first;
547 int size;
548
549 ASSERT(fields);
550 if (!fields)
551 return;
552 466
553 /* We should never write the crc here, it's updated in the IO path */ 467 to->sb_magicnum = cpu_to_be32(from->sb_magicnum);
554 fields &= ~XFS_SB_CRC; 468 to->sb_blocksize = cpu_to_be32(from->sb_blocksize);
555 469 to->sb_dblocks = cpu_to_be64(from->sb_dblocks);
556 xfs_sb_quota_to_disk(to, from, &fields); 470 to->sb_rblocks = cpu_to_be64(from->sb_rblocks);
557 while (fields) { 471 to->sb_rextents = cpu_to_be64(from->sb_rextents);
558 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); 472 memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
559 first = xfs_sb_info[f].offset; 473 to->sb_logstart = cpu_to_be64(from->sb_logstart);
560 size = xfs_sb_info[f + 1].offset - first; 474 to->sb_rootino = cpu_to_be64(from->sb_rootino);
561 475 to->sb_rbmino = cpu_to_be64(from->sb_rbmino);
562 ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1); 476 to->sb_rsumino = cpu_to_be64(from->sb_rsumino);
563 477 to->sb_rextsize = cpu_to_be32(from->sb_rextsize);
564 if (size == 1 || xfs_sb_info[f].type == 1) { 478 to->sb_agblocks = cpu_to_be32(from->sb_agblocks);
565 memcpy(to_ptr + first, from_ptr + first, size); 479 to->sb_agcount = cpu_to_be32(from->sb_agcount);
566 } else { 480 to->sb_rbmblocks = cpu_to_be32(from->sb_rbmblocks);
567 switch (size) { 481 to->sb_logblocks = cpu_to_be32(from->sb_logblocks);
568 case 2: 482 to->sb_versionnum = cpu_to_be16(from->sb_versionnum);
569 *(__be16 *)(to_ptr + first) = 483 to->sb_sectsize = cpu_to_be16(from->sb_sectsize);
570 cpu_to_be16(*(__u16 *)(from_ptr + first)); 484 to->sb_inodesize = cpu_to_be16(from->sb_inodesize);
571 break; 485 to->sb_inopblock = cpu_to_be16(from->sb_inopblock);
572 case 4: 486 memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
573 *(__be32 *)(to_ptr + first) = 487 to->sb_blocklog = from->sb_blocklog;
574 cpu_to_be32(*(__u32 *)(from_ptr + first)); 488 to->sb_sectlog = from->sb_sectlog;
575 break; 489 to->sb_inodelog = from->sb_inodelog;
576 case 8: 490 to->sb_inopblog = from->sb_inopblog;
577 *(__be64 *)(to_ptr + first) = 491 to->sb_agblklog = from->sb_agblklog;
578 cpu_to_be64(*(__u64 *)(from_ptr + first)); 492 to->sb_rextslog = from->sb_rextslog;
579 break; 493 to->sb_inprogress = from->sb_inprogress;
580 default: 494 to->sb_imax_pct = from->sb_imax_pct;
581 ASSERT(0); 495 to->sb_icount = cpu_to_be64(from->sb_icount);
582 } 496 to->sb_ifree = cpu_to_be64(from->sb_ifree);
583 } 497 to->sb_fdblocks = cpu_to_be64(from->sb_fdblocks);
498 to->sb_frextents = cpu_to_be64(from->sb_frextents);
584 499
585 fields &= ~(1LL << f); 500 to->sb_flags = from->sb_flags;
501 to->sb_shared_vn = from->sb_shared_vn;
502 to->sb_inoalignmt = cpu_to_be32(from->sb_inoalignmt);
503 to->sb_unit = cpu_to_be32(from->sb_unit);
504 to->sb_width = cpu_to_be32(from->sb_width);
505 to->sb_dirblklog = from->sb_dirblklog;
506 to->sb_logsectlog = from->sb_logsectlog;
507 to->sb_logsectsize = cpu_to_be16(from->sb_logsectsize);
508 to->sb_logsunit = cpu_to_be32(from->sb_logsunit);
509
510 /*
511 * We need to ensure that bad_features2 always matches features2.
512 * Hence we enforce that here rather than having to remember to do it
513 * everywhere else that updates features2.
514 */
515 from->sb_bad_features2 = from->sb_features2;
516 to->sb_features2 = cpu_to_be32(from->sb_features2);
517 to->sb_bad_features2 = cpu_to_be32(from->sb_bad_features2);
518
519 if (xfs_sb_version_hascrc(from)) {
520 to->sb_features_compat = cpu_to_be32(from->sb_features_compat);
521 to->sb_features_ro_compat =
522 cpu_to_be32(from->sb_features_ro_compat);
523 to->sb_features_incompat =
524 cpu_to_be32(from->sb_features_incompat);
525 to->sb_features_log_incompat =
526 cpu_to_be32(from->sb_features_log_incompat);
527 to->sb_pad = 0;
528 to->sb_lsn = cpu_to_be64(from->sb_lsn);
586 } 529 }
587} 530}
588 531
@@ -816,42 +759,51 @@ xfs_initialize_perag_data(
816} 759}
817 760
818/* 761/*
819 * xfs_mod_sb() can be used to copy arbitrary changes to the 762 * xfs_log_sb() can be used to copy arbitrary changes to the in-core superblock
820 * in-core superblock into the superblock buffer to be logged. 763 * into the superblock buffer to be logged. It does not provide the higher
821 * It does not provide the higher level of locking that is 764 * level of locking that is needed to protect the in-core superblock from
822 * needed to protect the in-core superblock from concurrent 765 * concurrent access.
823 * access.
824 */ 766 */
825void 767void
826xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) 768xfs_log_sb(
769 struct xfs_trans *tp)
827{ 770{
828 xfs_buf_t *bp; 771 struct xfs_mount *mp = tp->t_mountp;
829 int first; 772 struct xfs_buf *bp = xfs_trans_getsb(tp, mp, 0);
830 int last;
831 xfs_mount_t *mp;
832 xfs_sb_field_t f;
833
834 ASSERT(fields);
835 if (!fields)
836 return;
837 mp = tp->t_mountp;
838 bp = xfs_trans_getsb(tp, mp, 0);
839 first = sizeof(xfs_sb_t);
840 last = 0;
841
842 /* translate/copy */
843 773
844 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields); 774 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
775 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
776 xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb));
777}
845 778
846 /* find modified range */ 779/*
847 f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields); 780 * xfs_sync_sb
848 ASSERT((1LL << f) & XFS_SB_MOD_BITS); 781 *
849 last = xfs_sb_info[f + 1].offset - 1; 782 * Sync the superblock to disk.
783 *
784 * Note that the caller is responsible for checking the frozen state of the
785 * filesystem. This procedure uses the non-blocking transaction allocator and
786 * thus will allow modifications to a frozen fs. This is required because this
787 * code can be called during the process of freezing where use of the high-level
788 * allocator would deadlock.
789 */
790int
791xfs_sync_sb(
792 struct xfs_mount *mp,
793 bool wait)
794{
795 struct xfs_trans *tp;
796 int error;
850 797
851 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); 798 tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_CHANGE, KM_SLEEP);
852 ASSERT((1LL << f) & XFS_SB_MOD_BITS); 799 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
853 first = xfs_sb_info[f].offset; 800 if (error) {
801 xfs_trans_cancel(tp, 0);
802 return error;
803 }
854 804
855 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); 805 xfs_log_sb(tp);
856 xfs_trans_log_buf(tp, bp, first, last); 806 if (wait)
807 xfs_trans_set_sync(tp);
808 return xfs_trans_commit(tp, 0);
857} 809}
diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h
index 8eb1c54bafbf..b25bb9a343f3 100644
--- a/fs/xfs/libxfs/xfs_sb.h
+++ b/fs/xfs/libxfs/xfs_sb.h
@@ -27,11 +27,12 @@ extern struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *, xfs_agnumber_t,
27extern void xfs_perag_put(struct xfs_perag *pag); 27extern void xfs_perag_put(struct xfs_perag *pag);
28extern int xfs_initialize_perag_data(struct xfs_mount *, xfs_agnumber_t); 28extern int xfs_initialize_perag_data(struct xfs_mount *, xfs_agnumber_t);
29 29
30extern void xfs_sb_calc_crc(struct xfs_buf *); 30extern void xfs_sb_calc_crc(struct xfs_buf *bp);
31extern void xfs_mod_sb(struct xfs_trans *, __int64_t); 31extern void xfs_log_sb(struct xfs_trans *tp);
32extern void xfs_sb_mount_common(struct xfs_mount *, struct xfs_sb *); 32extern int xfs_sync_sb(struct xfs_mount *mp, bool wait);
33extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *); 33extern void xfs_sb_mount_common(struct xfs_mount *mp, struct xfs_sb *sbp);
34extern void xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t); 34extern void xfs_sb_from_disk(struct xfs_sb *to, struct xfs_dsb *from);
35extern void xfs_sb_to_disk(struct xfs_dsb *to, struct xfs_sb *from);
35extern void xfs_sb_quota_from_disk(struct xfs_sb *sbp); 36extern void xfs_sb_quota_from_disk(struct xfs_sb *sbp);
36 37
37#endif /* __XFS_SB_H__ */ 38#endif /* __XFS_SB_H__ */
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 82404da2ca67..8dda4b321343 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -82,7 +82,7 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
82#define XFS_TRANS_ATTR_RM 23 82#define XFS_TRANS_ATTR_RM 23
83#define XFS_TRANS_ATTR_FLAG 24 83#define XFS_TRANS_ATTR_FLAG 24
84#define XFS_TRANS_CLEAR_AGI_BUCKET 25 84#define XFS_TRANS_CLEAR_AGI_BUCKET 25
85#define XFS_TRANS_QM_SBCHANGE 26 85#define XFS_TRANS_SB_CHANGE 26
86/* 86/*
87 * Dummy entries since we use the transaction type to index into the 87 * Dummy entries since we use the transaction type to index into the
88 * trans_type[] in xlog_recover_print_trans_head() 88 * trans_type[] in xlog_recover_print_trans_head()
@@ -95,17 +95,15 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
95#define XFS_TRANS_QM_DQCLUSTER 32 95#define XFS_TRANS_QM_DQCLUSTER 32
96#define XFS_TRANS_QM_QINOCREATE 33 96#define XFS_TRANS_QM_QINOCREATE 33
97#define XFS_TRANS_QM_QUOTAOFF_END 34 97#define XFS_TRANS_QM_QUOTAOFF_END 34
98#define XFS_TRANS_SB_UNIT 35 98#define XFS_TRANS_FSYNC_TS 35
99#define XFS_TRANS_FSYNC_TS 36 99#define XFS_TRANS_GROWFSRT_ALLOC 36
100#define XFS_TRANS_GROWFSRT_ALLOC 37 100#define XFS_TRANS_GROWFSRT_ZERO 37
101#define XFS_TRANS_GROWFSRT_ZERO 38 101#define XFS_TRANS_GROWFSRT_FREE 38
102#define XFS_TRANS_GROWFSRT_FREE 39 102#define XFS_TRANS_SWAPEXT 39
103#define XFS_TRANS_SWAPEXT 40 103#define XFS_TRANS_CHECKPOINT 40
104#define XFS_TRANS_SB_COUNT 41 104#define XFS_TRANS_ICREATE 41
105#define XFS_TRANS_CHECKPOINT 42 105#define XFS_TRANS_CREATE_TMPFILE 42
106#define XFS_TRANS_ICREATE 43 106#define XFS_TRANS_TYPE_MAX 43
107#define XFS_TRANS_CREATE_TMPFILE 44
108#define XFS_TRANS_TYPE_MAX 44
109/* new transaction types need to be reflected in xfs_logprint(8) */ 107/* new transaction types need to be reflected in xfs_logprint(8) */
110 108
111#define XFS_TRANS_TYPES \ 109#define XFS_TRANS_TYPES \
@@ -113,7 +111,6 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
113 { XFS_TRANS_SETATTR_SIZE, "SETATTR_SIZE" }, \ 111 { XFS_TRANS_SETATTR_SIZE, "SETATTR_SIZE" }, \
114 { XFS_TRANS_INACTIVE, "INACTIVE" }, \ 112 { XFS_TRANS_INACTIVE, "INACTIVE" }, \
115 { XFS_TRANS_CREATE, "CREATE" }, \ 113 { XFS_TRANS_CREATE, "CREATE" }, \
116 { XFS_TRANS_CREATE_TMPFILE, "CREATE_TMPFILE" }, \
117 { XFS_TRANS_CREATE_TRUNC, "CREATE_TRUNC" }, \ 114 { XFS_TRANS_CREATE_TRUNC, "CREATE_TRUNC" }, \
118 { XFS_TRANS_TRUNCATE_FILE, "TRUNCATE_FILE" }, \ 115 { XFS_TRANS_TRUNCATE_FILE, "TRUNCATE_FILE" }, \
119 { XFS_TRANS_REMOVE, "REMOVE" }, \ 116 { XFS_TRANS_REMOVE, "REMOVE" }, \
@@ -134,23 +131,23 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
134 { XFS_TRANS_ATTR_RM, "ATTR_RM" }, \ 131 { XFS_TRANS_ATTR_RM, "ATTR_RM" }, \
135 { XFS_TRANS_ATTR_FLAG, "ATTR_FLAG" }, \ 132 { XFS_TRANS_ATTR_FLAG, "ATTR_FLAG" }, \
136 { XFS_TRANS_CLEAR_AGI_BUCKET, "CLEAR_AGI_BUCKET" }, \ 133 { XFS_TRANS_CLEAR_AGI_BUCKET, "CLEAR_AGI_BUCKET" }, \
137 { XFS_TRANS_QM_SBCHANGE, "QM_SBCHANGE" }, \ 134 { XFS_TRANS_SB_CHANGE, "SBCHANGE" }, \
135 { XFS_TRANS_DUMMY1, "DUMMY1" }, \
136 { XFS_TRANS_DUMMY2, "DUMMY2" }, \
138 { XFS_TRANS_QM_QUOTAOFF, "QM_QUOTAOFF" }, \ 137 { XFS_TRANS_QM_QUOTAOFF, "QM_QUOTAOFF" }, \
139 { XFS_TRANS_QM_DQALLOC, "QM_DQALLOC" }, \ 138 { XFS_TRANS_QM_DQALLOC, "QM_DQALLOC" }, \
140 { XFS_TRANS_QM_SETQLIM, "QM_SETQLIM" }, \ 139 { XFS_TRANS_QM_SETQLIM, "QM_SETQLIM" }, \
141 { XFS_TRANS_QM_DQCLUSTER, "QM_DQCLUSTER" }, \ 140 { XFS_TRANS_QM_DQCLUSTER, "QM_DQCLUSTER" }, \
142 { XFS_TRANS_QM_QINOCREATE, "QM_QINOCREATE" }, \ 141 { XFS_TRANS_QM_QINOCREATE, "QM_QINOCREATE" }, \
143 { XFS_TRANS_QM_QUOTAOFF_END, "QM_QOFF_END" }, \ 142 { XFS_TRANS_QM_QUOTAOFF_END, "QM_QOFF_END" }, \
144 { XFS_TRANS_SB_UNIT, "SB_UNIT" }, \
145 { XFS_TRANS_FSYNC_TS, "FSYNC_TS" }, \ 143 { XFS_TRANS_FSYNC_TS, "FSYNC_TS" }, \
146 { XFS_TRANS_GROWFSRT_ALLOC, "GROWFSRT_ALLOC" }, \ 144 { XFS_TRANS_GROWFSRT_ALLOC, "GROWFSRT_ALLOC" }, \
147 { XFS_TRANS_GROWFSRT_ZERO, "GROWFSRT_ZERO" }, \ 145 { XFS_TRANS_GROWFSRT_ZERO, "GROWFSRT_ZERO" }, \
148 { XFS_TRANS_GROWFSRT_FREE, "GROWFSRT_FREE" }, \ 146 { XFS_TRANS_GROWFSRT_FREE, "GROWFSRT_FREE" }, \
149 { XFS_TRANS_SWAPEXT, "SWAPEXT" }, \ 147 { XFS_TRANS_SWAPEXT, "SWAPEXT" }, \
150 { XFS_TRANS_SB_COUNT, "SB_COUNT" }, \
151 { XFS_TRANS_CHECKPOINT, "CHECKPOINT" }, \ 148 { XFS_TRANS_CHECKPOINT, "CHECKPOINT" }, \
152 { XFS_TRANS_DUMMY1, "DUMMY1" }, \ 149 { XFS_TRANS_ICREATE, "ICREATE" }, \
153 { XFS_TRANS_DUMMY2, "DUMMY2" }, \ 150 { XFS_TRANS_CREATE_TMPFILE, "CREATE_TMPFILE" }, \
154 { XLOG_UNMOUNT_REC_TYPE, "UNMOUNT" } 151 { XLOG_UNMOUNT_REC_TYPE, "UNMOUNT" }
155 152
156/* 153/*
diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
index c80c5236c3da..e7e26bd6468f 100644
--- a/fs/xfs/libxfs/xfs_symlink_remote.c
+++ b/fs/xfs/libxfs/xfs_symlink_remote.c
@@ -178,6 +178,8 @@ xfs_symlink_local_to_remote(
178 struct xfs_mount *mp = ip->i_mount; 178 struct xfs_mount *mp = ip->i_mount;
179 char *buf; 179 char *buf;
180 180
181 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF);
182
181 if (!xfs_sb_version_hascrc(&mp->m_sb)) { 183 if (!xfs_sb_version_hascrc(&mp->m_sb)) {
182 bp->b_ops = NULL; 184 bp->b_ops = NULL;
183 memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes); 185 memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index 6c1330f29050..68cb1e7bf2bb 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -716,17 +716,6 @@ xfs_calc_clear_agi_bucket_reservation(
716} 716}
717 717
718/* 718/*
719 * Clearing the quotaflags in the superblock.
720 * the super block for changing quota flags: sector size
721 */
722STATIC uint
723xfs_calc_qm_sbchange_reservation(
724 struct xfs_mount *mp)
725{
726 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
727}
728
729/*
730 * Adjusting quota limits. 719 * Adjusting quota limits.
731 * the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot) 720 * the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot)
732 */ 721 */
@@ -864,9 +853,6 @@ xfs_trans_resv_calc(
864 * The following transactions are logged in logical format with 853 * The following transactions are logged in logical format with
865 * a default log count. 854 * a default log count.
866 */ 855 */
867 resp->tr_qm_sbchange.tr_logres = xfs_calc_qm_sbchange_reservation(mp);
868 resp->tr_qm_sbchange.tr_logcount = XFS_DEFAULT_LOG_COUNT;
869
870 resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation(mp); 856 resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation(mp);
871 resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT; 857 resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT;
872 858
diff --git a/fs/xfs/libxfs/xfs_trans_resv.h b/fs/xfs/libxfs/xfs_trans_resv.h
index 1097d14cd583..2d5bdfce6d8f 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.h
+++ b/fs/xfs/libxfs/xfs_trans_resv.h
@@ -56,7 +56,6 @@ struct xfs_trans_resv {
56 struct xfs_trans_res tr_growrtalloc; /* grow realtime allocations */ 56 struct xfs_trans_res tr_growrtalloc; /* grow realtime allocations */
57 struct xfs_trans_res tr_growrtzero; /* grow realtime zeroing */ 57 struct xfs_trans_res tr_growrtzero; /* grow realtime zeroing */
58 struct xfs_trans_res tr_growrtfree; /* grow realtime freeing */ 58 struct xfs_trans_res tr_growrtfree; /* grow realtime freeing */
59 struct xfs_trans_res tr_qm_sbchange; /* change quota flags */
60 struct xfs_trans_res tr_qm_setqlim; /* adjust quota limits */ 59 struct xfs_trans_res tr_qm_setqlim; /* adjust quota limits */
61 struct xfs_trans_res tr_qm_dqalloc; /* allocate quota on disk */ 60 struct xfs_trans_res tr_qm_dqalloc; /* allocate quota on disk */
62 struct xfs_trans_res tr_qm_quotaoff; /* turn quota off */ 61 struct xfs_trans_res tr_qm_quotaoff; /* turn quota off */
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index b79dc66b2ecd..b79dc66b2ecd 100644
--- a/fs/xfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 18e2f3bbae5e..3a9b7a1b8704 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -135,30 +135,22 @@ xfs_setfilesize_trans_alloc(
135 */ 135 */
136STATIC int 136STATIC int
137xfs_setfilesize( 137xfs_setfilesize(
138 struct xfs_ioend *ioend) 138 struct xfs_inode *ip,
139 struct xfs_trans *tp,
140 xfs_off_t offset,
141 size_t size)
139{ 142{
140 struct xfs_inode *ip = XFS_I(ioend->io_inode);
141 struct xfs_trans *tp = ioend->io_append_trans;
142 xfs_fsize_t isize; 143 xfs_fsize_t isize;
143 144
144 /*
145 * The transaction may have been allocated in the I/O submission thread,
146 * thus we need to mark ourselves as beeing in a transaction manually.
147 * Similarly for freeze protection.
148 */
149 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
150 rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
151 0, 1, _THIS_IP_);
152
153 xfs_ilock(ip, XFS_ILOCK_EXCL); 145 xfs_ilock(ip, XFS_ILOCK_EXCL);
154 isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size); 146 isize = xfs_new_eof(ip, offset + size);
155 if (!isize) { 147 if (!isize) {
156 xfs_iunlock(ip, XFS_ILOCK_EXCL); 148 xfs_iunlock(ip, XFS_ILOCK_EXCL);
157 xfs_trans_cancel(tp, 0); 149 xfs_trans_cancel(tp, 0);
158 return 0; 150 return 0;
159 } 151 }
160 152
161 trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); 153 trace_xfs_setfilesize(ip, offset, size);
162 154
163 ip->i_d.di_size = isize; 155 ip->i_d.di_size = isize;
164 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 156 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
@@ -167,6 +159,25 @@ xfs_setfilesize(
167 return xfs_trans_commit(tp, 0); 159 return xfs_trans_commit(tp, 0);
168} 160}
169 161
162STATIC int
163xfs_setfilesize_ioend(
164 struct xfs_ioend *ioend)
165{
166 struct xfs_inode *ip = XFS_I(ioend->io_inode);
167 struct xfs_trans *tp = ioend->io_append_trans;
168
169 /*
170 * The transaction may have been allocated in the I/O submission thread,
171 * thus we need to mark ourselves as being in a transaction manually.
172 * Similarly for freeze protection.
173 */
174 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
175 rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
176 0, 1, _THIS_IP_);
177
178 return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
179}
180
170/* 181/*
171 * Schedule IO completion handling on the final put of an ioend. 182 * Schedule IO completion handling on the final put of an ioend.
172 * 183 *
@@ -182,8 +193,7 @@ xfs_finish_ioend(
182 193
183 if (ioend->io_type == XFS_IO_UNWRITTEN) 194 if (ioend->io_type == XFS_IO_UNWRITTEN)
184 queue_work(mp->m_unwritten_workqueue, &ioend->io_work); 195 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
185 else if (ioend->io_append_trans || 196 else if (ioend->io_append_trans)
186 (ioend->io_isdirect && xfs_ioend_is_append(ioend)))
187 queue_work(mp->m_data_workqueue, &ioend->io_work); 197 queue_work(mp->m_data_workqueue, &ioend->io_work);
188 else 198 else
189 xfs_destroy_ioend(ioend); 199 xfs_destroy_ioend(ioend);
@@ -215,22 +225,8 @@ xfs_end_io(
215 if (ioend->io_type == XFS_IO_UNWRITTEN) { 225 if (ioend->io_type == XFS_IO_UNWRITTEN) {
216 error = xfs_iomap_write_unwritten(ip, ioend->io_offset, 226 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
217 ioend->io_size); 227 ioend->io_size);
218 } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
219 /*
220 * For direct I/O we do not know if we need to allocate blocks
221 * or not so we can't preallocate an append transaction as that
222 * results in nested reservations and log space deadlocks. Hence
223 * allocate the transaction here. While this is sub-optimal and
224 * can block IO completion for some time, we're stuck with doing
225 * it this way until we can pass the ioend to the direct IO
226 * allocation callbacks and avoid nesting that way.
227 */
228 error = xfs_setfilesize_trans_alloc(ioend);
229 if (error)
230 goto done;
231 error = xfs_setfilesize(ioend);
232 } else if (ioend->io_append_trans) { 228 } else if (ioend->io_append_trans) {
233 error = xfs_setfilesize(ioend); 229 error = xfs_setfilesize_ioend(ioend);
234 } else { 230 } else {
235 ASSERT(!xfs_ioend_is_append(ioend)); 231 ASSERT(!xfs_ioend_is_append(ioend));
236 } 232 }
@@ -242,17 +238,6 @@ done:
242} 238}
243 239
244/* 240/*
245 * Call IO completion handling in caller context on the final put of an ioend.
246 */
247STATIC void
248xfs_finish_ioend_sync(
249 struct xfs_ioend *ioend)
250{
251 if (atomic_dec_and_test(&ioend->io_remaining))
252 xfs_end_io(&ioend->io_work);
253}
254
255/*
256 * Allocate and initialise an IO completion structure. 241 * Allocate and initialise an IO completion structure.
257 * We need to track unwritten extent write completion here initially. 242 * We need to track unwritten extent write completion here initially.
258 * We'll need to extend this for updating the ondisk inode size later 243 * We'll need to extend this for updating the ondisk inode size later
@@ -273,7 +258,6 @@ xfs_alloc_ioend(
273 * all the I/O from calling the completion routine too early. 258 * all the I/O from calling the completion routine too early.
274 */ 259 */
275 atomic_set(&ioend->io_remaining, 1); 260 atomic_set(&ioend->io_remaining, 1);
276 ioend->io_isdirect = 0;
277 ioend->io_error = 0; 261 ioend->io_error = 0;
278 ioend->io_list = NULL; 262 ioend->io_list = NULL;
279 ioend->io_type = type; 263 ioend->io_type = type;
@@ -1459,11 +1443,7 @@ xfs_get_blocks_direct(
1459 * 1443 *
1460 * If the private argument is non-NULL __xfs_get_blocks signals us that we 1444 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1461 * need to issue a transaction to convert the range from unwritten to written 1445 * need to issue a transaction to convert the range from unwritten to written
1462 * extents. In case this is regular synchronous I/O we just call xfs_end_io 1446 * extents.
1463 * to do this and we are done. But in case this was a successful AIO
1464 * request this handler is called from interrupt context, from which we
1465 * can't start transactions. In that case offload the I/O completion to
1466 * the workqueues we also use for buffered I/O completion.
1467 */ 1447 */
1468STATIC void 1448STATIC void
1469xfs_end_io_direct_write( 1449xfs_end_io_direct_write(
@@ -1472,7 +1452,12 @@ xfs_end_io_direct_write(
1472 ssize_t size, 1452 ssize_t size,
1473 void *private) 1453 void *private)
1474{ 1454{
1475 struct xfs_ioend *ioend = iocb->private; 1455 struct inode *inode = file_inode(iocb->ki_filp);
1456 struct xfs_inode *ip = XFS_I(inode);
1457 struct xfs_mount *mp = ip->i_mount;
1458
1459 if (XFS_FORCED_SHUTDOWN(mp))
1460 return;
1476 1461
1477 /* 1462 /*
1478 * While the generic direct I/O code updates the inode size, it does 1463 * While the generic direct I/O code updates the inode size, it does
@@ -1480,22 +1465,33 @@ xfs_end_io_direct_write(
1480 * end_io handler thinks the on-disk size is outside the in-core 1465 * end_io handler thinks the on-disk size is outside the in-core
1481 * size. To prevent this just update it a little bit earlier here. 1466 * size. To prevent this just update it a little bit earlier here.
1482 */ 1467 */
1483 if (offset + size > i_size_read(ioend->io_inode)) 1468 if (offset + size > i_size_read(inode))
1484 i_size_write(ioend->io_inode, offset + size); 1469 i_size_write(inode, offset + size);
1485 1470
1486 /* 1471 /*
1487 * blockdev_direct_IO can return an error even after the I/O 1472 * For direct I/O we do not know if we need to allocate blocks or not,
1488 * completion handler was called. Thus we need to protect 1473 * so we can't preallocate an append transaction, as that results in
1489 * against double-freeing. 1474 * nested reservations and log space deadlocks. Hence allocate the
1475 * transaction here. While this is sub-optimal and can block IO
1476 * completion for some time, we're stuck with doing it this way until
1477 * we can pass the ioend to the direct IO allocation callbacks and
1478 * avoid nesting that way.
1490 */ 1479 */
1491 iocb->private = NULL; 1480 if (private && size > 0) {
1492 1481 xfs_iomap_write_unwritten(ip, offset, size);
1493 ioend->io_offset = offset; 1482 } else if (offset + size > ip->i_d.di_size) {
1494 ioend->io_size = size; 1483 struct xfs_trans *tp;
1495 if (private && size > 0) 1484 int error;
1496 ioend->io_type = XFS_IO_UNWRITTEN; 1485
1486 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
1487 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
1488 if (error) {
1489 xfs_trans_cancel(tp, 0);
1490 return;
1491 }
1497 1492
1498 xfs_finish_ioend_sync(ioend); 1493 xfs_setfilesize(ip, tp, offset, size);
1494 }
1499} 1495}
1500 1496
1501STATIC ssize_t 1497STATIC ssize_t
@@ -1507,39 +1503,16 @@ xfs_vm_direct_IO(
1507{ 1503{
1508 struct inode *inode = iocb->ki_filp->f_mapping->host; 1504 struct inode *inode = iocb->ki_filp->f_mapping->host;
1509 struct block_device *bdev = xfs_find_bdev_for_inode(inode); 1505 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
1510 struct xfs_ioend *ioend = NULL;
1511 ssize_t ret;
1512 1506
1513 if (rw & WRITE) { 1507 if (rw & WRITE) {
1514 size_t size = iov_iter_count(iter); 1508 return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
1515
1516 /*
1517 * We cannot preallocate a size update transaction here as we
1518 * don't know whether allocation is necessary or not. Hence we
1519 * can only tell IO completion that one is necessary if we are
1520 * not doing unwritten extent conversion.
1521 */
1522 iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
1523 if (offset + size > XFS_I(inode)->i_d.di_size)
1524 ioend->io_isdirect = 1;
1525
1526 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
1527 offset, xfs_get_blocks_direct, 1509 offset, xfs_get_blocks_direct,
1528 xfs_end_io_direct_write, NULL, 1510 xfs_end_io_direct_write, NULL,
1529 DIO_ASYNC_EXTEND); 1511 DIO_ASYNC_EXTEND);
1530 if (ret != -EIOCBQUEUED && iocb->private)
1531 goto out_destroy_ioend;
1532 } else {
1533 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
1534 offset, xfs_get_blocks_direct,
1535 NULL, NULL, 0);
1536 } 1512 }
1537 1513 return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
1538 return ret; 1514 offset, xfs_get_blocks_direct,
1539 1515 NULL, NULL, 0);
1540out_destroy_ioend:
1541 xfs_destroy_ioend(ioend);
1542 return ret;
1543} 1516}
1544 1517
1545/* 1518/*
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index f94dd459dff9..ac644e0137a4 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -24,14 +24,12 @@ extern mempool_t *xfs_ioend_pool;
24 * Types of I/O for bmap clustering and I/O completion tracking. 24 * Types of I/O for bmap clustering and I/O completion tracking.
25 */ 25 */
26enum { 26enum {
27 XFS_IO_DIRECT = 0, /* special case for direct I/O ioends */
28 XFS_IO_DELALLOC, /* covers delalloc region */ 27 XFS_IO_DELALLOC, /* covers delalloc region */
29 XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */ 28 XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */
30 XFS_IO_OVERWRITE, /* covers already allocated extent */ 29 XFS_IO_OVERWRITE, /* covers already allocated extent */
31}; 30};
32 31
33#define XFS_IO_TYPES \ 32#define XFS_IO_TYPES \
34 { 0, "" }, \
35 { XFS_IO_DELALLOC, "delalloc" }, \ 33 { XFS_IO_DELALLOC, "delalloc" }, \
36 { XFS_IO_UNWRITTEN, "unwritten" }, \ 34 { XFS_IO_UNWRITTEN, "unwritten" }, \
37 { XFS_IO_OVERWRITE, "overwrite" } 35 { XFS_IO_OVERWRITE, "overwrite" }
@@ -45,7 +43,6 @@ typedef struct xfs_ioend {
45 unsigned int io_type; /* delalloc / unwritten */ 43 unsigned int io_type; /* delalloc / unwritten */
46 int io_error; /* I/O error code */ 44 int io_error; /* I/O error code */
47 atomic_t io_remaining; /* hold count */ 45 atomic_t io_remaining; /* hold count */
48 unsigned int io_isdirect : 1;/* direct I/O */
49 struct inode *io_inode; /* file being written to */ 46 struct inode *io_inode; /* file being written to */
50 struct buffer_head *io_buffer_head;/* buffer linked list head */ 47 struct buffer_head *io_buffer_head;/* buffer linked list head */
51 struct buffer_head *io_buffer_tail;/* buffer linked list tail */ 48 struct buffer_head *io_buffer_tail;/* buffer linked list tail */
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 2fdb72d2c908..736429a72a12 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -26,43 +26,8 @@ struct xfs_ifork;
26struct xfs_inode; 26struct xfs_inode;
27struct xfs_mount; 27struct xfs_mount;
28struct xfs_trans; 28struct xfs_trans;
29struct xfs_bmalloca;
29 30
30/*
31 * Argument structure for xfs_bmap_alloc.
32 */
33struct xfs_bmalloca {
34 xfs_fsblock_t *firstblock; /* i/o first block allocated */
35 struct xfs_bmap_free *flist; /* bmap freelist */
36 struct xfs_trans *tp; /* transaction pointer */
37 struct xfs_inode *ip; /* incore inode pointer */
38 struct xfs_bmbt_irec prev; /* extent before the new one */
39 struct xfs_bmbt_irec got; /* extent after, or delayed */
40
41 xfs_fileoff_t offset; /* offset in file filling in */
42 xfs_extlen_t length; /* i/o length asked/allocated */
43 xfs_fsblock_t blkno; /* starting block of new extent */
44
45 struct xfs_btree_cur *cur; /* btree cursor */
46 xfs_extnum_t idx; /* current extent index */
47 int nallocs;/* number of extents alloc'd */
48 int logflags;/* flags for transaction logging */
49
50 xfs_extlen_t total; /* total blocks needed for xaction */
51 xfs_extlen_t minlen; /* minimum allocation size (blocks) */
52 xfs_extlen_t minleft; /* amount must be left after alloc */
53 bool eof; /* set if allocating past last extent */
54 bool wasdel; /* replacing a delayed allocation */
55 bool userdata;/* set if is user data */
56 bool aeof; /* allocated space at eof */
57 bool conv; /* overwriting unwritten extents */
58 int flags;
59 struct completion *done;
60 struct work_struct work;
61 int result;
62};
63
64int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
65 int *committed);
66int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); 31int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
67int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, 32int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
68 int whichfork, int *eof); 33 int whichfork, int *eof);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 3f9bd58edec7..507d96a57ac7 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -319,6 +319,10 @@ xfs_buf_item_format(
319 ASSERT(atomic_read(&bip->bli_refcount) > 0); 319 ASSERT(atomic_read(&bip->bli_refcount) > 0);
320 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 320 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
321 (bip->bli_flags & XFS_BLI_STALE)); 321 (bip->bli_flags & XFS_BLI_STALE));
322 ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
323 (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
324 && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
325
322 326
323 /* 327 /*
324 * If it is an inode buffer, transfer the in-memory state to the 328 * If it is an inode buffer, transfer the in-memory state to the
@@ -535,7 +539,7 @@ xfs_buf_item_push(
535 if ((bp->b_flags & XBF_WRITE_FAIL) && 539 if ((bp->b_flags & XBF_WRITE_FAIL) &&
536 ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) { 540 ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
537 xfs_warn(bp->b_target->bt_mount, 541 xfs_warn(bp->b_target->bt_mount,
538"Detected failing async write on buffer block 0x%llx. Retrying async write.\n", 542"Detected failing async write on buffer block 0x%llx. Retrying async write.",
539 (long long)bp->b_bn); 543 (long long)bp->b_bn);
540 } 544 }
541 545
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h
index c24c67e22a2a..2f536f33cd26 100644
--- a/fs/xfs/xfs_dquot.h
+++ b/fs/xfs/xfs_dquot.h
@@ -86,7 +86,7 @@ static inline void xfs_dqflock(xfs_dquot_t *dqp)
86 wait_for_completion(&dqp->q_flush); 86 wait_for_completion(&dqp->q_flush);
87} 87}
88 88
89static inline int xfs_dqflock_nowait(xfs_dquot_t *dqp) 89static inline bool xfs_dqflock_nowait(xfs_dquot_t *dqp)
90{ 90{
91 return try_wait_for_completion(&dqp->q_flush); 91 return try_wait_for_completion(&dqp->q_flush);
92} 92}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index ac7f1e8f92b3..f2d05a19d68c 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -127,6 +127,42 @@ xfs_iozero(
127 return (-status); 127 return (-status);
128} 128}
129 129
130int
131xfs_update_prealloc_flags(
132 struct xfs_inode *ip,
133 enum xfs_prealloc_flags flags)
134{
135 struct xfs_trans *tp;
136 int error;
137
138 tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
139 error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
140 if (error) {
141 xfs_trans_cancel(tp, 0);
142 return error;
143 }
144
145 xfs_ilock(ip, XFS_ILOCK_EXCL);
146 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
147
148 if (!(flags & XFS_PREALLOC_INVISIBLE)) {
149 ip->i_d.di_mode &= ~S_ISUID;
150 if (ip->i_d.di_mode & S_IXGRP)
151 ip->i_d.di_mode &= ~S_ISGID;
152 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
153 }
154
155 if (flags & XFS_PREALLOC_SET)
156 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
157 if (flags & XFS_PREALLOC_CLEAR)
158 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
159
160 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
161 if (flags & XFS_PREALLOC_SYNC)
162 xfs_trans_set_sync(tp);
163 return xfs_trans_commit(tp, 0);
164}
165
130/* 166/*
131 * Fsync operations on directories are much simpler than on regular files, 167 * Fsync operations on directories are much simpler than on regular files,
132 * as there is no file data to flush, and thus also no need for explicit 168 * as there is no file data to flush, and thus also no need for explicit
@@ -784,8 +820,8 @@ xfs_file_fallocate(
784{ 820{
785 struct inode *inode = file_inode(file); 821 struct inode *inode = file_inode(file);
786 struct xfs_inode *ip = XFS_I(inode); 822 struct xfs_inode *ip = XFS_I(inode);
787 struct xfs_trans *tp;
788 long error; 823 long error;
824 enum xfs_prealloc_flags flags = 0;
789 loff_t new_size = 0; 825 loff_t new_size = 0;
790 826
791 if (!S_ISREG(inode->i_mode)) 827 if (!S_ISREG(inode->i_mode))
@@ -822,6 +858,8 @@ xfs_file_fallocate(
822 if (error) 858 if (error)
823 goto out_unlock; 859 goto out_unlock;
824 } else { 860 } else {
861 flags |= XFS_PREALLOC_SET;
862
825 if (!(mode & FALLOC_FL_KEEP_SIZE) && 863 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
826 offset + len > i_size_read(inode)) { 864 offset + len > i_size_read(inode)) {
827 new_size = offset + len; 865 new_size = offset + len;
@@ -839,28 +877,10 @@ xfs_file_fallocate(
839 goto out_unlock; 877 goto out_unlock;
840 } 878 }
841 879
842 tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
843 error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
844 if (error) {
845 xfs_trans_cancel(tp, 0);
846 goto out_unlock;
847 }
848
849 xfs_ilock(ip, XFS_ILOCK_EXCL);
850 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
851 ip->i_d.di_mode &= ~S_ISUID;
852 if (ip->i_d.di_mode & S_IXGRP)
853 ip->i_d.di_mode &= ~S_ISGID;
854
855 if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE)))
856 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
857
858 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
859 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
860
861 if (file->f_flags & O_DSYNC) 880 if (file->f_flags & O_DSYNC)
862 xfs_trans_set_sync(tp); 881 flags |= XFS_PREALLOC_SYNC;
863 error = xfs_trans_commit(tp, 0); 882
883 error = xfs_update_prealloc_flags(ip, flags);
864 if (error) 884 if (error)
865 goto out_unlock; 885 goto out_unlock;
866 886
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index fdc64220fcb0..fba6532efba4 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -488,6 +488,7 @@ xfs_growfs_data_private(
488 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree); 488 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree);
489 if (dpct) 489 if (dpct)
490 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct); 490 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
491 xfs_trans_set_sync(tp);
491 error = xfs_trans_commit(tp, 0); 492 error = xfs_trans_commit(tp, 0);
492 if (error) 493 if (error)
493 return error; 494 return error;
@@ -541,7 +542,7 @@ xfs_growfs_data_private(
541 saved_error = error; 542 saved_error = error;
542 continue; 543 continue;
543 } 544 }
544 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS); 545 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
545 546
546 error = xfs_bwrite(bp); 547 error = xfs_bwrite(bp);
547 xfs_buf_relse(bp); 548 xfs_buf_relse(bp);
@@ -756,37 +757,6 @@ out:
756 return 0; 757 return 0;
757} 758}
758 759
759/*
760 * Dump a transaction into the log that contains no real change. This is needed
761 * to be able to make the log dirty or stamp the current tail LSN into the log
762 * during the covering operation.
763 *
764 * We cannot use an inode here for this - that will push dirty state back up
765 * into the VFS and then periodic inode flushing will prevent log covering from
766 * making progress. Hence we log a field in the superblock instead and use a
767 * synchronous transaction to ensure the superblock is immediately unpinned
768 * and can be written back.
769 */
770int
771xfs_fs_log_dummy(
772 xfs_mount_t *mp)
773{
774 xfs_trans_t *tp;
775 int error;
776
777 tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
778 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
779 if (error) {
780 xfs_trans_cancel(tp, 0);
781 return error;
782 }
783
784 /* log the UUID because it is an unchanging field */
785 xfs_mod_sb(tp, XFS_SB_UUID);
786 xfs_trans_set_sync(tp);
787 return xfs_trans_commit(tp, 0);
788}
789
790int 760int
791xfs_fs_goingdown( 761xfs_fs_goingdown(
792 xfs_mount_t *mp, 762 xfs_mount_t *mp,
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 41f804e740d7..daafa1f6d260 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1995,6 +1995,7 @@ xfs_iunlink(
1995 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino); 1995 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1996 offset = offsetof(xfs_agi_t, agi_unlinked) + 1996 offset = offsetof(xfs_agi_t, agi_unlinked) +
1997 (sizeof(xfs_agino_t) * bucket_index); 1997 (sizeof(xfs_agino_t) * bucket_index);
1998 xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
1998 xfs_trans_log_buf(tp, agibp, offset, 1999 xfs_trans_log_buf(tp, agibp, offset,
1999 (offset + sizeof(xfs_agino_t) - 1)); 2000 (offset + sizeof(xfs_agino_t) - 1));
2000 return 0; 2001 return 0;
@@ -2086,6 +2087,7 @@ xfs_iunlink_remove(
2086 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino); 2087 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
2087 offset = offsetof(xfs_agi_t, agi_unlinked) + 2088 offset = offsetof(xfs_agi_t, agi_unlinked) +
2088 (sizeof(xfs_agino_t) * bucket_index); 2089 (sizeof(xfs_agino_t) * bucket_index);
2090 xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
2089 xfs_trans_log_buf(tp, agibp, offset, 2091 xfs_trans_log_buf(tp, agibp, offset,
2090 (offset + sizeof(xfs_agino_t) - 1)); 2092 (offset + sizeof(xfs_agino_t) - 1));
2091 } else { 2093 } else {
@@ -2656,6 +2658,124 @@ xfs_sort_for_rename(
2656} 2658}
2657 2659
2658/* 2660/*
2661 * xfs_cross_rename()
2662 *
2663 * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
2664 */
2665STATIC int
2666xfs_cross_rename(
2667 struct xfs_trans *tp,
2668 struct xfs_inode *dp1,
2669 struct xfs_name *name1,
2670 struct xfs_inode *ip1,
2671 struct xfs_inode *dp2,
2672 struct xfs_name *name2,
2673 struct xfs_inode *ip2,
2674 struct xfs_bmap_free *free_list,
2675 xfs_fsblock_t *first_block,
2676 int spaceres)
2677{
2678 int error = 0;
2679 int ip1_flags = 0;
2680 int ip2_flags = 0;
2681 int dp2_flags = 0;
2682
2683 /* Swap inode number for dirent in first parent */
2684 error = xfs_dir_replace(tp, dp1, name1,
2685 ip2->i_ino,
2686 first_block, free_list, spaceres);
2687 if (error)
2688 goto out;
2689
2690 /* Swap inode number for dirent in second parent */
2691 error = xfs_dir_replace(tp, dp2, name2,
2692 ip1->i_ino,
2693 first_block, free_list, spaceres);
2694 if (error)
2695 goto out;
2696
2697 /*
2698 * If we're renaming one or more directories across different parents,
2699 * update the respective ".." entries (and link counts) to match the new
2700 * parents.
2701 */
2702 if (dp1 != dp2) {
2703 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2704
2705 if (S_ISDIR(ip2->i_d.di_mode)) {
2706 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2707 dp1->i_ino, first_block,
2708 free_list, spaceres);
2709 if (error)
2710 goto out;
2711
2712 /* transfer ip2 ".." reference to dp1 */
2713 if (!S_ISDIR(ip1->i_d.di_mode)) {
2714 error = xfs_droplink(tp, dp2);
2715 if (error)
2716 goto out;
2717 error = xfs_bumplink(tp, dp1);
2718 if (error)
2719 goto out;
2720 }
2721
2722 /*
2723 * Although ip1 isn't changed here, userspace needs
2724 * to be warned about the change, so that applications
2725 * relying on it (like backup ones), will properly
2726 * notify the change
2727 */
2728 ip1_flags |= XFS_ICHGTIME_CHG;
2729 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2730 }
2731
2732 if (S_ISDIR(ip1->i_d.di_mode)) {
2733 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2734 dp2->i_ino, first_block,
2735 free_list, spaceres);
2736 if (error)
2737 goto out;
2738
2739 /* transfer ip1 ".." reference to dp2 */
2740 if (!S_ISDIR(ip2->i_d.di_mode)) {
2741 error = xfs_droplink(tp, dp1);
2742 if (error)
2743 goto out;
2744 error = xfs_bumplink(tp, dp2);
2745 if (error)
2746 goto out;
2747 }
2748
2749 /*
2750 * Although ip2 isn't changed here, userspace needs
2751 * to be warned about the change, so that applications
2752 * relying on it (like backup ones), will properly
2753 * notify the change
2754 */
2755 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2756 ip2_flags |= XFS_ICHGTIME_CHG;
2757 }
2758 }
2759
2760 if (ip1_flags) {
2761 xfs_trans_ichgtime(tp, ip1, ip1_flags);
2762 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2763 }
2764 if (ip2_flags) {
2765 xfs_trans_ichgtime(tp, ip2, ip2_flags);
2766 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2767 }
2768 if (dp2_flags) {
2769 xfs_trans_ichgtime(tp, dp2, dp2_flags);
2770 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2771 }
2772 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2773 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2774out:
2775 return error;
2776}
2777
2778/*
2659 * xfs_rename 2779 * xfs_rename
2660 */ 2780 */
2661int 2781int
@@ -2665,7 +2785,8 @@ xfs_rename(
2665 xfs_inode_t *src_ip, 2785 xfs_inode_t *src_ip,
2666 xfs_inode_t *target_dp, 2786 xfs_inode_t *target_dp,
2667 struct xfs_name *target_name, 2787 struct xfs_name *target_name,
2668 xfs_inode_t *target_ip) 2788 xfs_inode_t *target_ip,
2789 unsigned int flags)
2669{ 2790{
2670 xfs_trans_t *tp = NULL; 2791 xfs_trans_t *tp = NULL;
2671 xfs_mount_t *mp = src_dp->i_mount; 2792 xfs_mount_t *mp = src_dp->i_mount;
@@ -2743,6 +2864,18 @@ xfs_rename(
2743 } 2864 }
2744 2865
2745 /* 2866 /*
2867 * Handle RENAME_EXCHANGE flags
2868 */
2869 if (flags & RENAME_EXCHANGE) {
2870 error = xfs_cross_rename(tp, src_dp, src_name, src_ip,
2871 target_dp, target_name, target_ip,
2872 &free_list, &first_block, spaceres);
2873 if (error)
2874 goto abort_return;
2875 goto finish_rename;
2876 }
2877
2878 /*
2746 * Set up the target. 2879 * Set up the target.
2747 */ 2880 */
2748 if (target_ip == NULL) { 2881 if (target_ip == NULL) {
@@ -2881,6 +3014,7 @@ xfs_rename(
2881 if (new_parent) 3014 if (new_parent)
2882 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); 3015 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
2883 3016
3017finish_rename:
2884 /* 3018 /*
2885 * If this is a synchronous mount, make sure that the 3019 * If this is a synchronous mount, make sure that the
2886 * rename transaction goes to disk before returning to 3020 * rename transaction goes to disk before returning to
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 4ed2ba9342dc..86cd6b39bed7 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -338,7 +338,7 @@ int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
338int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name, 338int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
339 struct xfs_inode *src_ip, struct xfs_inode *target_dp, 339 struct xfs_inode *src_ip, struct xfs_inode *target_dp,
340 struct xfs_name *target_name, 340 struct xfs_name *target_name,
341 struct xfs_inode *target_ip); 341 struct xfs_inode *target_ip, unsigned int flags);
342 342
343void xfs_ilock(xfs_inode_t *, uint); 343void xfs_ilock(xfs_inode_t *, uint);
344int xfs_ilock_nowait(xfs_inode_t *, uint); 344int xfs_ilock_nowait(xfs_inode_t *, uint);
@@ -377,6 +377,15 @@ int xfs_droplink(struct xfs_trans *, struct xfs_inode *);
377int xfs_bumplink(struct xfs_trans *, struct xfs_inode *); 377int xfs_bumplink(struct xfs_trans *, struct xfs_inode *);
378 378
379/* from xfs_file.c */ 379/* from xfs_file.c */
380enum xfs_prealloc_flags {
381 XFS_PREALLOC_SET = (1 << 1),
382 XFS_PREALLOC_CLEAR = (1 << 2),
383 XFS_PREALLOC_SYNC = (1 << 3),
384 XFS_PREALLOC_INVISIBLE = (1 << 4),
385};
386
387int xfs_update_prealloc_flags(struct xfs_inode *,
388 enum xfs_prealloc_flags);
380int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t); 389int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
381int xfs_iozero(struct xfs_inode *, loff_t, size_t); 390int xfs_iozero(struct xfs_inode *, loff_t, size_t);
382 391
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index a1831980a68e..f7afb86c9148 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -606,11 +606,8 @@ xfs_ioc_space(
606 unsigned int cmd, 606 unsigned int cmd,
607 xfs_flock64_t *bf) 607 xfs_flock64_t *bf)
608{ 608{
609 struct xfs_mount *mp = ip->i_mount;
610 struct xfs_trans *tp;
611 struct iattr iattr; 609 struct iattr iattr;
612 bool setprealloc = false; 610 enum xfs_prealloc_flags flags = 0;
613 bool clrprealloc = false;
614 int error; 611 int error;
615 612
616 /* 613 /*
@@ -630,6 +627,11 @@ xfs_ioc_space(
630 if (!S_ISREG(inode->i_mode)) 627 if (!S_ISREG(inode->i_mode))
631 return -EINVAL; 628 return -EINVAL;
632 629
630 if (filp->f_flags & O_DSYNC)
631 flags |= XFS_PREALLOC_SYNC;
632 if (ioflags & XFS_IO_INVIS)
633 flags |= XFS_PREALLOC_INVISIBLE;
634
633 error = mnt_want_write_file(filp); 635 error = mnt_want_write_file(filp);
634 if (error) 636 if (error)
635 return error; 637 return error;
@@ -673,25 +675,23 @@ xfs_ioc_space(
673 } 675 }
674 676
675 if (bf->l_start < 0 || 677 if (bf->l_start < 0 ||
676 bf->l_start > mp->m_super->s_maxbytes || 678 bf->l_start > inode->i_sb->s_maxbytes ||
677 bf->l_start + bf->l_len < 0 || 679 bf->l_start + bf->l_len < 0 ||
678 bf->l_start + bf->l_len >= mp->m_super->s_maxbytes) { 680 bf->l_start + bf->l_len >= inode->i_sb->s_maxbytes) {
679 error = -EINVAL; 681 error = -EINVAL;
680 goto out_unlock; 682 goto out_unlock;
681 } 683 }
682 684
683 switch (cmd) { 685 switch (cmd) {
684 case XFS_IOC_ZERO_RANGE: 686 case XFS_IOC_ZERO_RANGE:
687 flags |= XFS_PREALLOC_SET;
685 error = xfs_zero_file_space(ip, bf->l_start, bf->l_len); 688 error = xfs_zero_file_space(ip, bf->l_start, bf->l_len);
686 if (!error)
687 setprealloc = true;
688 break; 689 break;
689 case XFS_IOC_RESVSP: 690 case XFS_IOC_RESVSP:
690 case XFS_IOC_RESVSP64: 691 case XFS_IOC_RESVSP64:
692 flags |= XFS_PREALLOC_SET;
691 error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len, 693 error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len,
692 XFS_BMAPI_PREALLOC); 694 XFS_BMAPI_PREALLOC);
693 if (!error)
694 setprealloc = true;
695 break; 695 break;
696 case XFS_IOC_UNRESVSP: 696 case XFS_IOC_UNRESVSP:
697 case XFS_IOC_UNRESVSP64: 697 case XFS_IOC_UNRESVSP64:
@@ -701,6 +701,7 @@ xfs_ioc_space(
701 case XFS_IOC_ALLOCSP64: 701 case XFS_IOC_ALLOCSP64:
702 case XFS_IOC_FREESP: 702 case XFS_IOC_FREESP:
703 case XFS_IOC_FREESP64: 703 case XFS_IOC_FREESP64:
704 flags |= XFS_PREALLOC_CLEAR;
704 if (bf->l_start > XFS_ISIZE(ip)) { 705 if (bf->l_start > XFS_ISIZE(ip)) {
705 error = xfs_alloc_file_space(ip, XFS_ISIZE(ip), 706 error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
706 bf->l_start - XFS_ISIZE(ip), 0); 707 bf->l_start - XFS_ISIZE(ip), 0);
@@ -712,8 +713,6 @@ xfs_ioc_space(
712 iattr.ia_size = bf->l_start; 713 iattr.ia_size = bf->l_start;
713 714
714 error = xfs_setattr_size(ip, &iattr); 715 error = xfs_setattr_size(ip, &iattr);
715 if (!error)
716 clrprealloc = true;
717 break; 716 break;
718 default: 717 default:
719 ASSERT(0); 718 ASSERT(0);
@@ -723,32 +722,7 @@ xfs_ioc_space(
723 if (error) 722 if (error)
724 goto out_unlock; 723 goto out_unlock;
725 724
726 tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID); 725 error = xfs_update_prealloc_flags(ip, flags);
727 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0);
728 if (error) {
729 xfs_trans_cancel(tp, 0);
730 goto out_unlock;
731 }
732
733 xfs_ilock(ip, XFS_ILOCK_EXCL);
734 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
735
736 if (!(ioflags & XFS_IO_INVIS)) {
737 ip->i_d.di_mode &= ~S_ISUID;
738 if (ip->i_d.di_mode & S_IXGRP)
739 ip->i_d.di_mode &= ~S_ISGID;
740 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
741 }
742
743 if (setprealloc)
744 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
745 else if (clrprealloc)
746 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
747
748 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
749 if (filp->f_flags & O_DSYNC)
750 xfs_trans_set_sync(tp);
751 error = xfs_trans_commit(tp, 0);
752 726
753out_unlock: 727out_unlock:
754 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 728 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
@@ -1013,20 +987,182 @@ xfs_diflags_to_linux(
1013 inode->i_flags &= ~S_NOATIME; 987 inode->i_flags &= ~S_NOATIME;
1014} 988}
1015 989
1016#define FSX_PROJID 1 990static int
1017#define FSX_EXTSIZE 2 991xfs_ioctl_setattr_xflags(
1018#define FSX_XFLAGS 4 992 struct xfs_trans *tp,
1019#define FSX_NONBLOCK 8 993 struct xfs_inode *ip,
994 struct fsxattr *fa)
995{
996 struct xfs_mount *mp = ip->i_mount;
997
998 /* Can't change realtime flag if any extents are allocated. */
999 if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
1000 XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & XFS_XFLAG_REALTIME))
1001 return -EINVAL;
1002
1003 /* If realtime flag is set then must have realtime device */
1004 if (fa->fsx_xflags & XFS_XFLAG_REALTIME) {
1005 if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
1006 (ip->i_d.di_extsize % mp->m_sb.sb_rextsize))
1007 return -EINVAL;
1008 }
1009
1010 /*
1011 * Can't modify an immutable/append-only file unless
1012 * we have appropriate permission.
1013 */
1014 if (((ip->i_d.di_flags & (XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND)) ||
1015 (fa->fsx_xflags & (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) &&
1016 !capable(CAP_LINUX_IMMUTABLE))
1017 return -EPERM;
1018
1019 xfs_set_diflags(ip, fa->fsx_xflags);
1020 xfs_diflags_to_linux(ip);
1021 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1022 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1023 XFS_STATS_INC(xs_ig_attrchg);
1024 return 0;
1025}
1026
1027/*
1028 * Set up the transaction structure for the setattr operation, checking that we
1029 * have permission to do so. On success, return a clean transaction and the
1030 * inode locked exclusively ready for further operation specific checks. On
1031 * failure, return an error without modifying or locking the inode.
1032 */
1033static struct xfs_trans *
1034xfs_ioctl_setattr_get_trans(
1035 struct xfs_inode *ip)
1036{
1037 struct xfs_mount *mp = ip->i_mount;
1038 struct xfs_trans *tp;
1039 int error;
1040
1041 if (mp->m_flags & XFS_MOUNT_RDONLY)
1042 return ERR_PTR(-EROFS);
1043 if (XFS_FORCED_SHUTDOWN(mp))
1044 return ERR_PTR(-EIO);
1045
1046 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
1047 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
1048 if (error)
1049 goto out_cancel;
1050
1051 xfs_ilock(ip, XFS_ILOCK_EXCL);
1052 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1053
1054 /*
1055 * CAP_FOWNER overrides the following restrictions:
1056 *
1057 * The user ID of the calling process must be equal to the file owner
1058 * ID, except in cases where the CAP_FSETID capability is applicable.
1059 */
1060 if (!inode_owner_or_capable(VFS_I(ip))) {
1061 error = -EPERM;
1062 goto out_cancel;
1063 }
1064
1065 if (mp->m_flags & XFS_MOUNT_WSYNC)
1066 xfs_trans_set_sync(tp);
1067
1068 return tp;
1069
1070out_cancel:
1071 xfs_trans_cancel(tp, 0);
1072 return ERR_PTR(error);
1073}
1074
1075/*
1076 * extent size hint validation is somewhat cumbersome. Rules are:
1077 *
1078 * 1. extent size hint is only valid for directories and regular files
1079 * 2. XFS_XFLAG_EXTSIZE is only valid for regular files
1080 * 3. XFS_XFLAG_EXTSZINHERIT is only valid for directories.
1081 * 4. can only be changed on regular files if no extents are allocated
1082 * 5. can be changed on directories at any time
1083 * 6. extsize hint of 0 turns off hints, clears inode flags.
1084 * 7. Extent size must be a multiple of the appropriate block size.
1085 * 8. for non-realtime files, the extent size hint must be limited
1086 * to half the AG size to avoid alignment extending the extent beyond the
1087 * limits of the AG.
1088 */
1089static int
1090xfs_ioctl_setattr_check_extsize(
1091 struct xfs_inode *ip,
1092 struct fsxattr *fa)
1093{
1094 struct xfs_mount *mp = ip->i_mount;
1095
1096 if ((fa->fsx_xflags & XFS_XFLAG_EXTSIZE) && !S_ISREG(ip->i_d.di_mode))
1097 return -EINVAL;
1098
1099 if ((fa->fsx_xflags & XFS_XFLAG_EXTSZINHERIT) &&
1100 !S_ISDIR(ip->i_d.di_mode))
1101 return -EINVAL;
1102
1103 if (S_ISREG(ip->i_d.di_mode) && ip->i_d.di_nextents &&
1104 ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
1105 return -EINVAL;
1106
1107 if (fa->fsx_extsize != 0) {
1108 xfs_extlen_t size;
1109 xfs_fsblock_t extsize_fsb;
1110
1111 extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
1112 if (extsize_fsb > MAXEXTLEN)
1113 return -EINVAL;
1114
1115 if (XFS_IS_REALTIME_INODE(ip) ||
1116 (fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
1117 size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
1118 } else {
1119 size = mp->m_sb.sb_blocksize;
1120 if (extsize_fsb > mp->m_sb.sb_agblocks / 2)
1121 return -EINVAL;
1122 }
1123
1124 if (fa->fsx_extsize % size)
1125 return -EINVAL;
1126 } else
1127 fa->fsx_xflags &= ~(XFS_XFLAG_EXTSIZE | XFS_XFLAG_EXTSZINHERIT);
1128
1129 return 0;
1130}
1131
1132static int
1133xfs_ioctl_setattr_check_projid(
1134 struct xfs_inode *ip,
1135 struct fsxattr *fa)
1136{
1137 /* Disallow 32bit project ids if projid32bit feature is not enabled. */
1138 if (fa->fsx_projid > (__uint16_t)-1 &&
1139 !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
1140 return -EINVAL;
1141
1142 /*
1143 * Project Quota ID state is only allowed to change from within the init
1144 * namespace. Enforce that restriction only if we are trying to change
1145 * the quota ID state. Everything else is allowed in user namespaces.
1146 */
1147 if (current_user_ns() == &init_user_ns)
1148 return 0;
1149
1150 if (xfs_get_projid(ip) != fa->fsx_projid)
1151 return -EINVAL;
1152 if ((fa->fsx_xflags & XFS_XFLAG_PROJINHERIT) !=
1153 (ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT))
1154 return -EINVAL;
1155
1156 return 0;
1157}
1020 1158
1021STATIC int 1159STATIC int
1022xfs_ioctl_setattr( 1160xfs_ioctl_setattr(
1023 xfs_inode_t *ip, 1161 xfs_inode_t *ip,
1024 struct fsxattr *fa, 1162 struct fsxattr *fa)
1025 int mask)
1026{ 1163{
1027 struct xfs_mount *mp = ip->i_mount; 1164 struct xfs_mount *mp = ip->i_mount;
1028 struct xfs_trans *tp; 1165 struct xfs_trans *tp;
1029 unsigned int lock_flags = 0;
1030 struct xfs_dquot *udqp = NULL; 1166 struct xfs_dquot *udqp = NULL;
1031 struct xfs_dquot *pdqp = NULL; 1167 struct xfs_dquot *pdqp = NULL;
1032 struct xfs_dquot *olddquot = NULL; 1168 struct xfs_dquot *olddquot = NULL;
@@ -1034,17 +1170,9 @@ xfs_ioctl_setattr(
1034 1170
1035 trace_xfs_ioctl_setattr(ip); 1171 trace_xfs_ioctl_setattr(ip);
1036 1172
1037 if (mp->m_flags & XFS_MOUNT_RDONLY) 1173 code = xfs_ioctl_setattr_check_projid(ip, fa);
1038 return -EROFS; 1174 if (code)
1039 if (XFS_FORCED_SHUTDOWN(mp)) 1175 return code;
1040 return -EIO;
1041
1042 /*
1043 * Disallow 32bit project ids when projid32bit feature is not enabled.
1044 */
1045 if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) &&
1046 !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
1047 return -EINVAL;
1048 1176
1049 /* 1177 /*
1050 * If disk quotas is on, we make sure that the dquots do exist on disk, 1178 * If disk quotas is on, we make sure that the dquots do exist on disk,
@@ -1054,7 +1182,7 @@ xfs_ioctl_setattr(
1054 * If the IDs do change before we take the ilock, we're covered 1182 * If the IDs do change before we take the ilock, we're covered
1055 * because the i_*dquot fields will get updated anyway. 1183 * because the i_*dquot fields will get updated anyway.
1056 */ 1184 */
1057 if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) { 1185 if (XFS_IS_QUOTA_ON(mp)) {
1058 code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid, 1186 code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid,
1059 ip->i_d.di_gid, fa->fsx_projid, 1187 ip->i_d.di_gid, fa->fsx_projid,
1060 XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp); 1188 XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
@@ -1062,175 +1190,49 @@ xfs_ioctl_setattr(
1062 return code; 1190 return code;
1063 } 1191 }
1064 1192
1065 /* 1193 tp = xfs_ioctl_setattr_get_trans(ip);
1066 * For the other attributes, we acquire the inode lock and 1194 if (IS_ERR(tp)) {
1067 * first do an error checking pass. 1195 code = PTR_ERR(tp);
1068 */ 1196 goto error_free_dquots;
1069 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
1070 code = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
1071 if (code)
1072 goto error_return;
1073
1074 lock_flags = XFS_ILOCK_EXCL;
1075 xfs_ilock(ip, lock_flags);
1076
1077 /*
1078 * CAP_FOWNER overrides the following restrictions:
1079 *
1080 * The user ID of the calling process must be equal
1081 * to the file owner ID, except in cases where the
1082 * CAP_FSETID capability is applicable.
1083 */
1084 if (!inode_owner_or_capable(VFS_I(ip))) {
1085 code = -EPERM;
1086 goto error_return;
1087 }
1088
1089 /*
1090 * Do a quota reservation only if projid is actually going to change.
1091 * Only allow changing of projid from init_user_ns since it is a
1092 * non user namespace aware identifier.
1093 */
1094 if (mask & FSX_PROJID) {
1095 if (current_user_ns() != &init_user_ns) {
1096 code = -EINVAL;
1097 goto error_return;
1098 }
1099
1100 if (XFS_IS_QUOTA_RUNNING(mp) &&
1101 XFS_IS_PQUOTA_ON(mp) &&
1102 xfs_get_projid(ip) != fa->fsx_projid) {
1103 ASSERT(tp);
1104 code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL,
1105 pdqp, capable(CAP_FOWNER) ?
1106 XFS_QMOPT_FORCE_RES : 0);
1107 if (code) /* out of quota */
1108 goto error_return;
1109 }
1110 } 1197 }
1111 1198
1112 if (mask & FSX_EXTSIZE) {
1113 /*
1114 * Can't change extent size if any extents are allocated.
1115 */
1116 if (ip->i_d.di_nextents &&
1117 ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=
1118 fa->fsx_extsize)) {
1119 code = -EINVAL; /* EFBIG? */
1120 goto error_return;
1121 }
1122 1199
1123 /* 1200 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) &&
1124 * Extent size must be a multiple of the appropriate block 1201 xfs_get_projid(ip) != fa->fsx_projid) {
1125 * size, if set at all. It must also be smaller than the 1202 code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp,
1126 * maximum extent size supported by the filesystem. 1203 capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0);
1127 * 1204 if (code) /* out of quota */
1128 * Also, for non-realtime files, limit the extent size hint to 1205 goto error_trans_cancel;
1129 * half the size of the AGs in the filesystem so alignment
1130 * doesn't result in extents larger than an AG.
1131 */
1132 if (fa->fsx_extsize != 0) {
1133 xfs_extlen_t size;
1134 xfs_fsblock_t extsize_fsb;
1135
1136 extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
1137 if (extsize_fsb > MAXEXTLEN) {
1138 code = -EINVAL;
1139 goto error_return;
1140 }
1141
1142 if (XFS_IS_REALTIME_INODE(ip) ||
1143 ((mask & FSX_XFLAGS) &&
1144 (fa->fsx_xflags & XFS_XFLAG_REALTIME))) {
1145 size = mp->m_sb.sb_rextsize <<
1146 mp->m_sb.sb_blocklog;
1147 } else {
1148 size = mp->m_sb.sb_blocksize;
1149 if (extsize_fsb > mp->m_sb.sb_agblocks / 2) {
1150 code = -EINVAL;
1151 goto error_return;
1152 }
1153 }
1154
1155 if (fa->fsx_extsize % size) {
1156 code = -EINVAL;
1157 goto error_return;
1158 }
1159 }
1160 } 1206 }
1161 1207
1208 code = xfs_ioctl_setattr_check_extsize(ip, fa);
1209 if (code)
1210 goto error_trans_cancel;
1162 1211
1163 if (mask & FSX_XFLAGS) { 1212 code = xfs_ioctl_setattr_xflags(tp, ip, fa);
1164 /* 1213 if (code)
1165 * Can't change realtime flag if any extents are allocated. 1214 goto error_trans_cancel;
1166 */
1167 if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
1168 (XFS_IS_REALTIME_INODE(ip)) !=
1169 (fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
1170 code = -EINVAL; /* EFBIG? */
1171 goto error_return;
1172 }
1173
1174 /*
1175 * If realtime flag is set then must have realtime data.
1176 */
1177 if ((fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
1178 if ((mp->m_sb.sb_rblocks == 0) ||
1179 (mp->m_sb.sb_rextsize == 0) ||
1180 (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) {
1181 code = -EINVAL;
1182 goto error_return;
1183 }
1184 }
1185
1186 /*
1187 * Can't modify an immutable/append-only file unless
1188 * we have appropriate permission.
1189 */
1190 if ((ip->i_d.di_flags &
1191 (XFS_DIFLAG_IMMUTABLE|XFS_DIFLAG_APPEND) ||
1192 (fa->fsx_xflags &
1193 (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) &&
1194 !capable(CAP_LINUX_IMMUTABLE)) {
1195 code = -EPERM;
1196 goto error_return;
1197 }
1198 }
1199
1200 xfs_trans_ijoin(tp, ip, 0);
1201 1215
1202 /* 1216 /*
1203 * Change file ownership. Must be the owner or privileged. 1217 * Change file ownership. Must be the owner or privileged. CAP_FSETID
1218 * overrides the following restrictions:
1219 *
1220 * The set-user-ID and set-group-ID bits of a file will be cleared upon
1221 * successful return from chown()
1204 */ 1222 */
1205 if (mask & FSX_PROJID) {
1206 /*
1207 * CAP_FSETID overrides the following restrictions:
1208 *
1209 * The set-user-ID and set-group-ID bits of a file will be
1210 * cleared upon successful return from chown()
1211 */
1212 if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
1213 !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
1214 ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
1215
1216 /*
1217 * Change the ownerships and register quota modifications
1218 * in the transaction.
1219 */
1220 if (xfs_get_projid(ip) != fa->fsx_projid) {
1221 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
1222 olddquot = xfs_qm_vop_chown(tp, ip,
1223 &ip->i_pdquot, pdqp);
1224 }
1225 ASSERT(ip->i_d.di_version > 1);
1226 xfs_set_projid(ip, fa->fsx_projid);
1227 }
1228 1223
1229 } 1224 if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
1225 !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
1226 ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
1230 1227
1231 if (mask & FSX_XFLAGS) { 1228 /* Change the ownerships and register project quota modifications */
1232 xfs_set_diflags(ip, fa->fsx_xflags); 1229 if (xfs_get_projid(ip) != fa->fsx_projid) {
1233 xfs_diflags_to_linux(ip); 1230 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
1231 olddquot = xfs_qm_vop_chown(tp, ip,
1232 &ip->i_pdquot, pdqp);
1233 }
1234 ASSERT(ip->i_d.di_version > 1);
1235 xfs_set_projid(ip, fa->fsx_projid);
1234 } 1236 }
1235 1237
1236 /* 1238 /*
@@ -1238,34 +1240,12 @@ xfs_ioctl_setattr(
1238 * extent size hint should be set on the inode. If no extent size flags 1240 * extent size hint should be set on the inode. If no extent size flags
1239 * are set on the inode then unconditionally clear the extent size hint. 1241 * are set on the inode then unconditionally clear the extent size hint.
1240 */ 1242 */
1241 if (mask & FSX_EXTSIZE) { 1243 if (ip->i_d.di_flags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
1242 int extsize = 0; 1244 ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
1243 1245 else
1244 if (ip->i_d.di_flags & 1246 ip->i_d.di_extsize = 0;
1245 (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
1246 extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
1247 ip->i_d.di_extsize = extsize;
1248 }
1249
1250 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1251 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1252
1253 XFS_STATS_INC(xs_ig_attrchg);
1254 1247
1255 /*
1256 * If this is a synchronous mount, make sure that the
1257 * transaction goes to disk before returning to the user.
1258 * This is slightly sub-optimal in that truncates require
1259 * two sync transactions instead of one for wsync filesystems.
1260 * One for the truncate and one for the timestamps since we
1261 * don't want to change the timestamps unless we're sure the
1262 * truncate worked. Truncates are less than 1% of the laddis
1263 * mix so this probably isn't worth the trouble to optimize.
1264 */
1265 if (mp->m_flags & XFS_MOUNT_WSYNC)
1266 xfs_trans_set_sync(tp);
1267 code = xfs_trans_commit(tp, 0); 1248 code = xfs_trans_commit(tp, 0);
1268 xfs_iunlock(ip, lock_flags);
1269 1249
1270 /* 1250 /*
1271 * Release any dquot(s) the inode had kept before chown. 1251 * Release any dquot(s) the inode had kept before chown.
@@ -1276,12 +1256,11 @@ xfs_ioctl_setattr(
1276 1256
1277 return code; 1257 return code;
1278 1258
1279 error_return: 1259error_trans_cancel:
1260 xfs_trans_cancel(tp, 0);
1261error_free_dquots:
1280 xfs_qm_dqrele(udqp); 1262 xfs_qm_dqrele(udqp);
1281 xfs_qm_dqrele(pdqp); 1263 xfs_qm_dqrele(pdqp);
1282 xfs_trans_cancel(tp, 0);
1283 if (lock_flags)
1284 xfs_iunlock(ip, lock_flags);
1285 return code; 1264 return code;
1286} 1265}
1287 1266
@@ -1292,20 +1271,15 @@ xfs_ioc_fssetxattr(
1292 void __user *arg) 1271 void __user *arg)
1293{ 1272{
1294 struct fsxattr fa; 1273 struct fsxattr fa;
1295 unsigned int mask;
1296 int error; 1274 int error;
1297 1275
1298 if (copy_from_user(&fa, arg, sizeof(fa))) 1276 if (copy_from_user(&fa, arg, sizeof(fa)))
1299 return -EFAULT; 1277 return -EFAULT;
1300 1278
1301 mask = FSX_XFLAGS | FSX_EXTSIZE | FSX_PROJID;
1302 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1303 mask |= FSX_NONBLOCK;
1304
1305 error = mnt_want_write_file(filp); 1279 error = mnt_want_write_file(filp);
1306 if (error) 1280 if (error)
1307 return error; 1281 return error;
1308 error = xfs_ioctl_setattr(ip, &fa, mask); 1282 error = xfs_ioctl_setattr(ip, &fa);
1309 mnt_drop_write_file(filp); 1283 mnt_drop_write_file(filp);
1310 return error; 1284 return error;
1311} 1285}
@@ -1325,14 +1299,14 @@ xfs_ioc_getxflags(
1325 1299
1326STATIC int 1300STATIC int
1327xfs_ioc_setxflags( 1301xfs_ioc_setxflags(
1328 xfs_inode_t *ip, 1302 struct xfs_inode *ip,
1329 struct file *filp, 1303 struct file *filp,
1330 void __user *arg) 1304 void __user *arg)
1331{ 1305{
1306 struct xfs_trans *tp;
1332 struct fsxattr fa; 1307 struct fsxattr fa;
1333 unsigned int flags; 1308 unsigned int flags;
1334 unsigned int mask; 1309 int error;
1335 int error;
1336 1310
1337 if (copy_from_user(&flags, arg, sizeof(flags))) 1311 if (copy_from_user(&flags, arg, sizeof(flags)))
1338 return -EFAULT; 1312 return -EFAULT;
@@ -1342,15 +1316,26 @@ xfs_ioc_setxflags(
1342 FS_SYNC_FL)) 1316 FS_SYNC_FL))
1343 return -EOPNOTSUPP; 1317 return -EOPNOTSUPP;
1344 1318
1345 mask = FSX_XFLAGS;
1346 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1347 mask |= FSX_NONBLOCK;
1348 fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip)); 1319 fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip));
1349 1320
1350 error = mnt_want_write_file(filp); 1321 error = mnt_want_write_file(filp);
1351 if (error) 1322 if (error)
1352 return error; 1323 return error;
1353 error = xfs_ioctl_setattr(ip, &fa, mask); 1324
1325 tp = xfs_ioctl_setattr_get_trans(ip);
1326 if (IS_ERR(tp)) {
1327 error = PTR_ERR(tp);
1328 goto out_drop_write;
1329 }
1330
1331 error = xfs_ioctl_setattr_xflags(tp, ip, &fa);
1332 if (error) {
1333 xfs_trans_cancel(tp, 0);
1334 goto out_drop_write;
1335 }
1336
1337 error = xfs_trans_commit(tp, 0);
1338out_drop_write:
1354 mnt_drop_write_file(filp); 1339 mnt_drop_write_file(filp);
1355 return error; 1340 return error;
1356} 1341}
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index ec6772866f3d..bfc7c7c8a0c8 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -423,7 +423,7 @@ xfs_compat_attrmulti_by_handle(
423 423
424 ops = memdup_user(compat_ptr(am_hreq.ops), size); 424 ops = memdup_user(compat_ptr(am_hreq.ops), size);
425 if (IS_ERR(ops)) { 425 if (IS_ERR(ops)) {
426 error = -PTR_ERR(ops); 426 error = PTR_ERR(ops);
427 goto out_dput; 427 goto out_dput;
428 } 428 }
429 429
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index c980e2a5086b..ccb1dd0d509e 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -802,7 +802,7 @@ int
802xfs_iomap_write_unwritten( 802xfs_iomap_write_unwritten(
803 xfs_inode_t *ip, 803 xfs_inode_t *ip,
804 xfs_off_t offset, 804 xfs_off_t offset,
805 size_t count) 805 xfs_off_t count)
806{ 806{
807 xfs_mount_t *mp = ip->i_mount; 807 xfs_mount_t *mp = ip->i_mount;
808 xfs_fileoff_t offset_fsb; 808 xfs_fileoff_t offset_fsb;
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 411fbb8919ef..8688e663d744 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -27,6 +27,6 @@ int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
27 struct xfs_bmbt_irec *); 27 struct xfs_bmbt_irec *);
28int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, 28int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t,
29 struct xfs_bmbt_irec *); 29 struct xfs_bmbt_irec *);
30int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t); 30int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t);
31 31
32#endif /* __XFS_IOMAP_H__*/ 32#endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index c50311cae1b1..ce80eeb8faa4 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -380,18 +380,27 @@ xfs_vn_rename(
380 struct inode *odir, 380 struct inode *odir,
381 struct dentry *odentry, 381 struct dentry *odentry,
382 struct inode *ndir, 382 struct inode *ndir,
383 struct dentry *ndentry) 383 struct dentry *ndentry,
384 unsigned int flags)
384{ 385{
385 struct inode *new_inode = ndentry->d_inode; 386 struct inode *new_inode = ndentry->d_inode;
387 int omode = 0;
386 struct xfs_name oname; 388 struct xfs_name oname;
387 struct xfs_name nname; 389 struct xfs_name nname;
388 390
389 xfs_dentry_to_name(&oname, odentry, 0); 391 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
392 return -EINVAL;
393
394 /* if we are exchanging files, we need to set i_mode of both files */
395 if (flags & RENAME_EXCHANGE)
396 omode = ndentry->d_inode->i_mode;
397
398 xfs_dentry_to_name(&oname, odentry, omode);
390 xfs_dentry_to_name(&nname, ndentry, odentry->d_inode->i_mode); 399 xfs_dentry_to_name(&nname, ndentry, odentry->d_inode->i_mode);
391 400
392 return xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), 401 return xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode),
393 XFS_I(ndir), &nname, new_inode ? 402 XFS_I(ndir), &nname,
394 XFS_I(new_inode) : NULL); 403 new_inode ? XFS_I(new_inode) : NULL, flags);
395} 404}
396 405
397/* 406/*
@@ -1144,7 +1153,7 @@ static const struct inode_operations xfs_dir_inode_operations = {
1144 */ 1153 */
1145 .rmdir = xfs_vn_unlink, 1154 .rmdir = xfs_vn_unlink,
1146 .mknod = xfs_vn_mknod, 1155 .mknod = xfs_vn_mknod,
1147 .rename = xfs_vn_rename, 1156 .rename2 = xfs_vn_rename,
1148 .get_acl = xfs_get_acl, 1157 .get_acl = xfs_get_acl,
1149 .set_acl = xfs_set_acl, 1158 .set_acl = xfs_set_acl,
1150 .getattr = xfs_vn_getattr, 1159 .getattr = xfs_vn_getattr,
@@ -1172,7 +1181,7 @@ static const struct inode_operations xfs_dir_ci_inode_operations = {
1172 */ 1181 */
1173 .rmdir = xfs_vn_unlink, 1182 .rmdir = xfs_vn_unlink,
1174 .mknod = xfs_vn_mknod, 1183 .mknod = xfs_vn_mknod,
1175 .rename = xfs_vn_rename, 1184 .rename2 = xfs_vn_rename,
1176 .get_acl = xfs_get_acl, 1185 .get_acl = xfs_get_acl,
1177 .set_acl = xfs_set_acl, 1186 .set_acl = xfs_set_acl,
1178 .getattr = xfs_vn_getattr, 1187 .getattr = xfs_vn_getattr,
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index e408bf5a3ff7..bcc7cfabb787 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -33,6 +33,7 @@
33#include "xfs_fsops.h" 33#include "xfs_fsops.h"
34#include "xfs_cksum.h" 34#include "xfs_cksum.h"
35#include "xfs_sysfs.h" 35#include "xfs_sysfs.h"
36#include "xfs_sb.h"
36 37
37kmem_zone_t *xfs_log_ticket_zone; 38kmem_zone_t *xfs_log_ticket_zone;
38 39
@@ -1290,9 +1291,20 @@ xfs_log_worker(
1290 struct xfs_mount *mp = log->l_mp; 1291 struct xfs_mount *mp = log->l_mp;
1291 1292
1292 /* dgc: errors ignored - not fatal and nowhere to report them */ 1293 /* dgc: errors ignored - not fatal and nowhere to report them */
1293 if (xfs_log_need_covered(mp)) 1294 if (xfs_log_need_covered(mp)) {
1294 xfs_fs_log_dummy(mp); 1295 /*
1295 else 1296 * Dump a transaction into the log that contains no real change.
1297 * This is needed to stamp the current tail LSN into the log
1298 * during the covering operation.
1299 *
1300 * We cannot use an inode here for this - that will push dirty
1301 * state back up into the VFS and then periodic inode flushing
1302 * will prevent log covering from making progress. Hence we
1303 * synchronously log the superblock instead to ensure the
1304 * superblock is immediately unpinned and can be written back.
1305 */
1306 xfs_sync_sb(mp, true);
1307 } else
1296 xfs_log_force(mp, 0); 1308 xfs_log_force(mp, 0);
1297 1309
1298 /* start pushing all the metadata that is currently dirty */ 1310 /* start pushing all the metadata that is currently dirty */
@@ -1395,6 +1407,8 @@ xlog_alloc_log(
1395 ASSERT(xfs_buf_islocked(bp)); 1407 ASSERT(xfs_buf_islocked(bp));
1396 xfs_buf_unlock(bp); 1408 xfs_buf_unlock(bp);
1397 1409
1410 /* use high priority wq for log I/O completion */
1411 bp->b_ioend_wq = mp->m_log_workqueue;
1398 bp->b_iodone = xlog_iodone; 1412 bp->b_iodone = xlog_iodone;
1399 log->l_xbuf = bp; 1413 log->l_xbuf = bp;
1400 1414
@@ -1427,6 +1441,8 @@ xlog_alloc_log(
1427 ASSERT(xfs_buf_islocked(bp)); 1441 ASSERT(xfs_buf_islocked(bp));
1428 xfs_buf_unlock(bp); 1442 xfs_buf_unlock(bp);
1429 1443
1444 /* use high priority wq for log I/O completion */
1445 bp->b_ioend_wq = mp->m_log_workqueue;
1430 bp->b_iodone = xlog_iodone; 1446 bp->b_iodone = xlog_iodone;
1431 iclog->ic_bp = bp; 1447 iclog->ic_bp = bp;
1432 iclog->ic_data = bp->b_addr; 1448 iclog->ic_data = bp->b_addr;
@@ -1806,8 +1822,6 @@ xlog_sync(
1806 XFS_BUF_ZEROFLAGS(bp); 1822 XFS_BUF_ZEROFLAGS(bp);
1807 XFS_BUF_ASYNC(bp); 1823 XFS_BUF_ASYNC(bp);
1808 bp->b_flags |= XBF_SYNCIO; 1824 bp->b_flags |= XBF_SYNCIO;
1809 /* use high priority completion wq */
1810 bp->b_ioend_wq = log->l_mp->m_log_workqueue;
1811 1825
1812 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) { 1826 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
1813 bp->b_flags |= XBF_FUA; 1827 bp->b_flags |= XBF_FUA;
@@ -1856,8 +1870,6 @@ xlog_sync(
1856 bp->b_flags |= XBF_SYNCIO; 1870 bp->b_flags |= XBF_SYNCIO;
1857 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) 1871 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
1858 bp->b_flags |= XBF_FUA; 1872 bp->b_flags |= XBF_FUA;
1859 /* use high priority completion wq */
1860 bp->b_ioend_wq = log->l_mp->m_log_workqueue;
1861 1873
1862 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1874 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1863 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); 1875 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
@@ -2027,7 +2039,7 @@ xlog_print_tic_res(
2027 " total reg = %u bytes (o/flow = %u bytes)\n" 2039 " total reg = %u bytes (o/flow = %u bytes)\n"
2028 " ophdrs = %u (ophdr space = %u bytes)\n" 2040 " ophdrs = %u (ophdr space = %u bytes)\n"
2029 " ophdr + reg = %u bytes\n" 2041 " ophdr + reg = %u bytes\n"
2030 " num regions = %u\n", 2042 " num regions = %u",
2031 ((ticket->t_trans_type <= 0 || 2043 ((ticket->t_trans_type <= 0 ||
2032 ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ? 2044 ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
2033 "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]), 2045 "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index d3d38836f87f..4fa80e63eea2 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -408,11 +408,11 @@ xfs_update_alignment(xfs_mount_t *mp)
408 if (xfs_sb_version_hasdalign(sbp)) { 408 if (xfs_sb_version_hasdalign(sbp)) {
409 if (sbp->sb_unit != mp->m_dalign) { 409 if (sbp->sb_unit != mp->m_dalign) {
410 sbp->sb_unit = mp->m_dalign; 410 sbp->sb_unit = mp->m_dalign;
411 mp->m_update_flags |= XFS_SB_UNIT; 411 mp->m_update_sb = true;
412 } 412 }
413 if (sbp->sb_width != mp->m_swidth) { 413 if (sbp->sb_width != mp->m_swidth) {
414 sbp->sb_width = mp->m_swidth; 414 sbp->sb_width = mp->m_swidth;
415 mp->m_update_flags |= XFS_SB_WIDTH; 415 mp->m_update_sb = true;
416 } 416 }
417 } else { 417 } else {
418 xfs_warn(mp, 418 xfs_warn(mp,
@@ -583,38 +583,19 @@ int
583xfs_mount_reset_sbqflags( 583xfs_mount_reset_sbqflags(
584 struct xfs_mount *mp) 584 struct xfs_mount *mp)
585{ 585{
586 int error;
587 struct xfs_trans *tp;
588
589 mp->m_qflags = 0; 586 mp->m_qflags = 0;
590 587
591 /* 588 /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */
592 * It is OK to look at sb_qflags here in mount path,
593 * without m_sb_lock.
594 */
595 if (mp->m_sb.sb_qflags == 0) 589 if (mp->m_sb.sb_qflags == 0)
596 return 0; 590 return 0;
597 spin_lock(&mp->m_sb_lock); 591 spin_lock(&mp->m_sb_lock);
598 mp->m_sb.sb_qflags = 0; 592 mp->m_sb.sb_qflags = 0;
599 spin_unlock(&mp->m_sb_lock); 593 spin_unlock(&mp->m_sb_lock);
600 594
601 /* 595 if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
602 * If the fs is readonly, let the incore superblock run
603 * with quotas off but don't flush the update out to disk
604 */
605 if (mp->m_flags & XFS_MOUNT_RDONLY)
606 return 0; 596 return 0;
607 597
608 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); 598 return xfs_sync_sb(mp, false);
609 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
610 if (error) {
611 xfs_trans_cancel(tp, 0);
612 xfs_alert(mp, "%s: Superblock update failed!", __func__);
613 return error;
614 }
615
616 xfs_mod_sb(tp, XFS_SB_QFLAGS);
617 return xfs_trans_commit(tp, 0);
618} 599}
619 600
620__uint64_t 601__uint64_t
@@ -659,26 +640,25 @@ xfs_mountfs(
659 xfs_sb_mount_common(mp, sbp); 640 xfs_sb_mount_common(mp, sbp);
660 641
661 /* 642 /*
662 * Check for a mismatched features2 values. Older kernels 643 * Check for a mismatched features2 values. Older kernels read & wrote
663 * read & wrote into the wrong sb offset for sb_features2 644 * into the wrong sb offset for sb_features2 on some platforms due to
664 * on some platforms due to xfs_sb_t not being 64bit size aligned 645 * xfs_sb_t not being 64bit size aligned when sb_features2 was added,
665 * when sb_features2 was added, which made older superblock 646 * which made older superblock reading/writing routines swap it as a
666 * reading/writing routines swap it as a 64-bit value. 647 * 64-bit value.
667 * 648 *
668 * For backwards compatibility, we make both slots equal. 649 * For backwards compatibility, we make both slots equal.
669 * 650 *
670 * If we detect a mismatched field, we OR the set bits into the 651 * If we detect a mismatched field, we OR the set bits into the existing
671 * existing features2 field in case it has already been modified; we 652 * features2 field in case it has already been modified; we don't want
672 * don't want to lose any features. We then update the bad location 653 * to lose any features. We then update the bad location with the ORed
673 * with the ORed value so that older kernels will see any features2 654 * value so that older kernels will see any features2 flags. The
674 * flags, and mark the two fields as needing updates once the 655 * superblock writeback code ensures the new sb_features2 is copied to
675 * transaction subsystem is online. 656 * sb_bad_features2 before it is logged or written to disk.
676 */ 657 */
677 if (xfs_sb_has_mismatched_features2(sbp)) { 658 if (xfs_sb_has_mismatched_features2(sbp)) {
678 xfs_warn(mp, "correcting sb_features alignment problem"); 659 xfs_warn(mp, "correcting sb_features alignment problem");
679 sbp->sb_features2 |= sbp->sb_bad_features2; 660 sbp->sb_features2 |= sbp->sb_bad_features2;
680 sbp->sb_bad_features2 = sbp->sb_features2; 661 mp->m_update_sb = true;
681 mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2;
682 662
683 /* 663 /*
684 * Re-check for ATTR2 in case it was found in bad_features2 664 * Re-check for ATTR2 in case it was found in bad_features2
@@ -692,17 +672,17 @@ xfs_mountfs(
692 if (xfs_sb_version_hasattr2(&mp->m_sb) && 672 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
693 (mp->m_flags & XFS_MOUNT_NOATTR2)) { 673 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
694 xfs_sb_version_removeattr2(&mp->m_sb); 674 xfs_sb_version_removeattr2(&mp->m_sb);
695 mp->m_update_flags |= XFS_SB_FEATURES2; 675 mp->m_update_sb = true;
696 676
697 /* update sb_versionnum for the clearing of the morebits */ 677 /* update sb_versionnum for the clearing of the morebits */
698 if (!sbp->sb_features2) 678 if (!sbp->sb_features2)
699 mp->m_update_flags |= XFS_SB_VERSIONNUM; 679 mp->m_update_sb = true;
700 } 680 }
701 681
702 /* always use v2 inodes by default now */ 682 /* always use v2 inodes by default now */
703 if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) { 683 if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
704 mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT; 684 mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT;
705 mp->m_update_flags |= XFS_SB_VERSIONNUM; 685 mp->m_update_sb = true;
706 } 686 }
707 687
708 /* 688 /*
@@ -895,8 +875,8 @@ xfs_mountfs(
895 * the next remount into writeable mode. Otherwise we would never 875 * the next remount into writeable mode. Otherwise we would never
896 * perform the update e.g. for the root filesystem. 876 * perform the update e.g. for the root filesystem.
897 */ 877 */
898 if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) { 878 if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
899 error = xfs_mount_log_sb(mp, mp->m_update_flags); 879 error = xfs_sync_sb(mp, false);
900 if (error) { 880 if (error) {
901 xfs_warn(mp, "failed to write sb changes"); 881 xfs_warn(mp, "failed to write sb changes");
902 goto out_rtunmount; 882 goto out_rtunmount;
@@ -1103,9 +1083,6 @@ xfs_fs_writable(
1103int 1083int
1104xfs_log_sbcount(xfs_mount_t *mp) 1084xfs_log_sbcount(xfs_mount_t *mp)
1105{ 1085{
1106 xfs_trans_t *tp;
1107 int error;
1108
1109 /* allow this to proceed during the freeze sequence... */ 1086 /* allow this to proceed during the freeze sequence... */
1110 if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE)) 1087 if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE))
1111 return 0; 1088 return 0;
@@ -1119,17 +1096,7 @@ xfs_log_sbcount(xfs_mount_t *mp)
1119 if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) 1096 if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1120 return 0; 1097 return 0;
1121 1098
1122 tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP); 1099 return xfs_sync_sb(mp, true);
1123 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
1124 if (error) {
1125 xfs_trans_cancel(tp, 0);
1126 return error;
1127 }
1128
1129 xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS);
1130 xfs_trans_set_sync(tp);
1131 error = xfs_trans_commit(tp, 0);
1132 return error;
1133} 1100}
1134 1101
1135/* 1102/*
@@ -1423,34 +1390,6 @@ xfs_freesb(
1423} 1390}
1424 1391
1425/* 1392/*
1426 * Used to log changes to the superblock unit and width fields which could
1427 * be altered by the mount options, as well as any potential sb_features2
1428 * fixup. Only the first superblock is updated.
1429 */
1430int
1431xfs_mount_log_sb(
1432 xfs_mount_t *mp,
1433 __int64_t fields)
1434{
1435 xfs_trans_t *tp;
1436 int error;
1437
1438 ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID |
1439 XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 |
1440 XFS_SB_VERSIONNUM));
1441
1442 tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
1443 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
1444 if (error) {
1445 xfs_trans_cancel(tp, 0);
1446 return error;
1447 }
1448 xfs_mod_sb(tp, fields);
1449 error = xfs_trans_commit(tp, 0);
1450 return error;
1451}
1452
1453/*
1454 * If the underlying (data/log/rt) device is readonly, there are some 1393 * If the underlying (data/log/rt) device is readonly, there are some
1455 * operations that cannot proceed. 1394 * operations that cannot proceed.
1456 */ 1395 */
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 22ccf69d4d3c..a5b2ff822653 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -162,8 +162,7 @@ typedef struct xfs_mount {
162 struct delayed_work m_reclaim_work; /* background inode reclaim */ 162 struct delayed_work m_reclaim_work; /* background inode reclaim */
163 struct delayed_work m_eofblocks_work; /* background eof blocks 163 struct delayed_work m_eofblocks_work; /* background eof blocks
164 trimming */ 164 trimming */
165 __int64_t m_update_flags; /* sb flags we need to update 165 bool m_update_sb; /* sb needs update in mount */
166 on the next remount,rw */
167 int64_t m_low_space[XFS_LOWSP_MAX]; 166 int64_t m_low_space[XFS_LOWSP_MAX];
168 /* low free space thresholds */ 167 /* low free space thresholds */
169 struct xfs_kobj m_kobj; 168 struct xfs_kobj m_kobj;
@@ -378,7 +377,7 @@ extern void xfs_unmountfs(xfs_mount_t *);
378extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int); 377extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int);
379extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, 378extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *,
380 uint, int); 379 uint, int);
381extern int xfs_mount_log_sb(xfs_mount_t *, __int64_t); 380extern int xfs_mount_log_sb(xfs_mount_t *);
382extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); 381extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
383extern int xfs_readsb(xfs_mount_t *, int); 382extern int xfs_readsb(xfs_mount_t *, int);
384extern void xfs_freesb(xfs_mount_t *); 383extern void xfs_freesb(xfs_mount_t *);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 79fb19dd9c83..3e8186279541 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -714,7 +714,6 @@ STATIC int
714xfs_qm_qino_alloc( 714xfs_qm_qino_alloc(
715 xfs_mount_t *mp, 715 xfs_mount_t *mp,
716 xfs_inode_t **ip, 716 xfs_inode_t **ip,
717 __int64_t sbfields,
718 uint flags) 717 uint flags)
719{ 718{
720 xfs_trans_t *tp; 719 xfs_trans_t *tp;
@@ -777,11 +776,6 @@ xfs_qm_qino_alloc(
777 spin_lock(&mp->m_sb_lock); 776 spin_lock(&mp->m_sb_lock);
778 if (flags & XFS_QMOPT_SBVERSION) { 777 if (flags & XFS_QMOPT_SBVERSION) {
779 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); 778 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
780 ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
781 XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) ==
782 (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
783 XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
784 XFS_SB_QFLAGS));
785 779
786 xfs_sb_version_addquota(&mp->m_sb); 780 xfs_sb_version_addquota(&mp->m_sb);
787 mp->m_sb.sb_uquotino = NULLFSINO; 781 mp->m_sb.sb_uquotino = NULLFSINO;
@@ -798,7 +792,7 @@ xfs_qm_qino_alloc(
798 else 792 else
799 mp->m_sb.sb_pquotino = (*ip)->i_ino; 793 mp->m_sb.sb_pquotino = (*ip)->i_ino;
800 spin_unlock(&mp->m_sb_lock); 794 spin_unlock(&mp->m_sb_lock);
801 xfs_mod_sb(tp, sbfields); 795 xfs_log_sb(tp);
802 796
803 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { 797 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
804 xfs_alert(mp, "%s failed (error %d)!", __func__, error); 798 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
@@ -1451,7 +1445,7 @@ xfs_qm_mount_quotas(
1451 spin_unlock(&mp->m_sb_lock); 1445 spin_unlock(&mp->m_sb_lock);
1452 1446
1453 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { 1447 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1454 if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { 1448 if (xfs_sync_sb(mp, false)) {
1455 /* 1449 /*
1456 * We could only have been turning quotas off. 1450 * We could only have been turning quotas off.
1457 * We aren't in very good shape actually because 1451 * We aren't in very good shape actually because
@@ -1482,7 +1476,6 @@ xfs_qm_init_quotainos(
1482 struct xfs_inode *gip = NULL; 1476 struct xfs_inode *gip = NULL;
1483 struct xfs_inode *pip = NULL; 1477 struct xfs_inode *pip = NULL;
1484 int error; 1478 int error;
1485 __int64_t sbflags = 0;
1486 uint flags = 0; 1479 uint flags = 0;
1487 1480
1488 ASSERT(mp->m_quotainfo); 1481 ASSERT(mp->m_quotainfo);
@@ -1517,9 +1510,6 @@ xfs_qm_init_quotainos(
1517 } 1510 }
1518 } else { 1511 } else {
1519 flags |= XFS_QMOPT_SBVERSION; 1512 flags |= XFS_QMOPT_SBVERSION;
1520 sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1521 XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
1522 XFS_SB_QFLAGS);
1523 } 1513 }
1524 1514
1525 /* 1515 /*
@@ -1530,7 +1520,6 @@ xfs_qm_init_quotainos(
1530 */ 1520 */
1531 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { 1521 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1532 error = xfs_qm_qino_alloc(mp, &uip, 1522 error = xfs_qm_qino_alloc(mp, &uip,
1533 sbflags | XFS_SB_UQUOTINO,
1534 flags | XFS_QMOPT_UQUOTA); 1523 flags | XFS_QMOPT_UQUOTA);
1535 if (error) 1524 if (error)
1536 goto error_rele; 1525 goto error_rele;
@@ -1539,7 +1528,6 @@ xfs_qm_init_quotainos(
1539 } 1528 }
1540 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) { 1529 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1541 error = xfs_qm_qino_alloc(mp, &gip, 1530 error = xfs_qm_qino_alloc(mp, &gip,
1542 sbflags | XFS_SB_GQUOTINO,
1543 flags | XFS_QMOPT_GQUOTA); 1531 flags | XFS_QMOPT_GQUOTA);
1544 if (error) 1532 if (error)
1545 goto error_rele; 1533 goto error_rele;
@@ -1548,7 +1536,6 @@ xfs_qm_init_quotainos(
1548 } 1536 }
1549 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) { 1537 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1550 error = xfs_qm_qino_alloc(mp, &pip, 1538 error = xfs_qm_qino_alloc(mp, &pip,
1551 sbflags | XFS_SB_PQUOTINO,
1552 flags | XFS_QMOPT_PQUOTA); 1539 flags | XFS_QMOPT_PQUOTA);
1553 if (error) 1540 if (error)
1554 goto error_rele; 1541 goto error_rele;
@@ -1587,32 +1574,6 @@ xfs_qm_dqfree_one(
1587 xfs_qm_dqdestroy(dqp); 1574 xfs_qm_dqdestroy(dqp);
1588} 1575}
1589 1576
1590/*
1591 * Start a transaction and write the incore superblock changes to
1592 * disk. flags parameter indicates which fields have changed.
1593 */
1594int
1595xfs_qm_write_sb_changes(
1596 xfs_mount_t *mp,
1597 __int64_t flags)
1598{
1599 xfs_trans_t *tp;
1600 int error;
1601
1602 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
1603 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
1604 if (error) {
1605 xfs_trans_cancel(tp, 0);
1606 return error;
1607 }
1608
1609 xfs_mod_sb(tp, flags);
1610 error = xfs_trans_commit(tp, 0);
1611
1612 return error;
1613}
1614
1615
1616/* --------------- utility functions for vnodeops ---------------- */ 1577/* --------------- utility functions for vnodeops ---------------- */
1617 1578
1618 1579
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 41f6c0b9d51c..0d4d3590cf85 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -157,7 +157,6 @@ struct xfs_dquot_acct {
157#define XFS_QM_RTBWARNLIMIT 5 157#define XFS_QM_RTBWARNLIMIT 5
158 158
159extern void xfs_qm_destroy_quotainfo(struct xfs_mount *); 159extern void xfs_qm_destroy_quotainfo(struct xfs_mount *);
160extern int xfs_qm_write_sb_changes(struct xfs_mount *, __int64_t);
161 160
162/* dquot stuff */ 161/* dquot stuff */
163extern void xfs_qm_dqpurge_all(struct xfs_mount *, uint); 162extern void xfs_qm_dqpurge_all(struct xfs_mount *, uint);
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index cb6168ec92c9..9b965db45800 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -91,8 +91,7 @@ xfs_qm_scall_quotaoff(
91 mutex_unlock(&q->qi_quotaofflock); 91 mutex_unlock(&q->qi_quotaofflock);
92 92
93 /* XXX what to do if error ? Revert back to old vals incore ? */ 93 /* XXX what to do if error ? Revert back to old vals incore ? */
94 error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS); 94 return xfs_sync_sb(mp, false);
95 return error;
96 } 95 }
97 96
98 dqtype = 0; 97 dqtype = 0;
@@ -313,7 +312,6 @@ xfs_qm_scall_quotaon(
313{ 312{
314 int error; 313 int error;
315 uint qf; 314 uint qf;
316 __int64_t sbflags;
317 315
318 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); 316 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
319 /* 317 /*
@@ -321,30 +319,22 @@ xfs_qm_scall_quotaon(
321 */ 319 */
322 flags &= ~(XFS_ALL_QUOTA_ACCT); 320 flags &= ~(XFS_ALL_QUOTA_ACCT);
323 321
324 sbflags = 0;
325
326 if (flags == 0) { 322 if (flags == 0) {
327 xfs_debug(mp, "%s: zero flags, m_qflags=%x", 323 xfs_debug(mp, "%s: zero flags, m_qflags=%x",
328 __func__, mp->m_qflags); 324 __func__, mp->m_qflags);
329 return -EINVAL; 325 return -EINVAL;
330 } 326 }
331 327
332 /* No fs can turn on quotas with a delayed effect */
333 ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
334
335 /* 328 /*
336 * Can't enforce without accounting. We check the superblock 329 * Can't enforce without accounting. We check the superblock
337 * qflags here instead of m_qflags because rootfs can have 330 * qflags here instead of m_qflags because rootfs can have
338 * quota acct on ondisk without m_qflags' knowing. 331 * quota acct on ondisk without m_qflags' knowing.
339 */ 332 */
340 if (((flags & XFS_UQUOTA_ACCT) == 0 && 333 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
341 (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
342 (flags & XFS_UQUOTA_ENFD)) || 334 (flags & XFS_UQUOTA_ENFD)) ||
343 ((flags & XFS_GQUOTA_ACCT) == 0 && 335 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
344 (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
345 (flags & XFS_GQUOTA_ENFD)) || 336 (flags & XFS_GQUOTA_ENFD)) ||
346 ((flags & XFS_PQUOTA_ACCT) == 0 && 337 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
347 (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
348 (flags & XFS_PQUOTA_ENFD))) { 338 (flags & XFS_PQUOTA_ENFD))) {
349 xfs_debug(mp, 339 xfs_debug(mp,
350 "%s: Can't enforce without acct, flags=%x sbflags=%x", 340 "%s: Can't enforce without acct, flags=%x sbflags=%x",
@@ -369,11 +359,11 @@ xfs_qm_scall_quotaon(
369 /* 359 /*
370 * There's nothing to change if it's the same. 360 * There's nothing to change if it's the same.
371 */ 361 */
372 if ((qf & flags) == flags && sbflags == 0) 362 if ((qf & flags) == flags)
373 return -EEXIST; 363 return -EEXIST;
374 sbflags |= XFS_SB_QFLAGS;
375 364
376 if ((error = xfs_qm_write_sb_changes(mp, sbflags))) 365 error = xfs_sync_sb(mp, false);
366 if (error)
377 return error; 367 return error;
378 /* 368 /*
379 * If we aren't trying to switch on quota enforcement, we are done. 369 * If we aren't trying to switch on quota enforcement, we are done.
@@ -383,8 +373,7 @@ xfs_qm_scall_quotaon(
383 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) != 373 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
384 (mp->m_qflags & XFS_PQUOTA_ACCT)) || 374 (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
385 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) != 375 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
386 (mp->m_qflags & XFS_GQUOTA_ACCT)) || 376 (mp->m_qflags & XFS_GQUOTA_ACCT)))
387 (flags & XFS_ALL_QUOTA_ENFD) == 0)
388 return 0; 377 return 0;
389 378
390 if (! XFS_IS_QUOTA_RUNNING(mp)) 379 if (! XFS_IS_QUOTA_RUNNING(mp))
@@ -421,20 +410,12 @@ xfs_qm_scall_getqstat(
421 memset(out, 0, sizeof(fs_quota_stat_t)); 410 memset(out, 0, sizeof(fs_quota_stat_t));
422 411
423 out->qs_version = FS_QSTAT_VERSION; 412 out->qs_version = FS_QSTAT_VERSION;
424 if (!xfs_sb_version_hasquota(&mp->m_sb)) {
425 out->qs_uquota.qfs_ino = NULLFSINO;
426 out->qs_gquota.qfs_ino = NULLFSINO;
427 return 0;
428 }
429
430 out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & 413 out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
431 (XFS_ALL_QUOTA_ACCT| 414 (XFS_ALL_QUOTA_ACCT|
432 XFS_ALL_QUOTA_ENFD)); 415 XFS_ALL_QUOTA_ENFD));
433 if (q) { 416 uip = q->qi_uquotaip;
434 uip = q->qi_uquotaip; 417 gip = q->qi_gquotaip;
435 gip = q->qi_gquotaip; 418 pip = q->qi_pquotaip;
436 pip = q->qi_pquotaip;
437 }
438 if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { 419 if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
439 if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 420 if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
440 0, 0, &uip) == 0) 421 0, 0, &uip) == 0)
@@ -480,14 +461,13 @@ xfs_qm_scall_getqstat(
480 if (temppqip) 461 if (temppqip)
481 IRELE(pip); 462 IRELE(pip);
482 } 463 }
483 if (q) { 464 out->qs_incoredqs = q->qi_dquots;
484 out->qs_incoredqs = q->qi_dquots; 465 out->qs_btimelimit = q->qi_btimelimit;
485 out->qs_btimelimit = q->qi_btimelimit; 466 out->qs_itimelimit = q->qi_itimelimit;
486 out->qs_itimelimit = q->qi_itimelimit; 467 out->qs_rtbtimelimit = q->qi_rtbtimelimit;
487 out->qs_rtbtimelimit = q->qi_rtbtimelimit; 468 out->qs_bwarnlimit = q->qi_bwarnlimit;
488 out->qs_bwarnlimit = q->qi_bwarnlimit; 469 out->qs_iwarnlimit = q->qi_iwarnlimit;
489 out->qs_iwarnlimit = q->qi_iwarnlimit; 470
490 }
491 return 0; 471 return 0;
492} 472}
493 473
@@ -508,13 +488,6 @@ xfs_qm_scall_getqstatv(
508 bool tempgqip = false; 488 bool tempgqip = false;
509 bool temppqip = false; 489 bool temppqip = false;
510 490
511 if (!xfs_sb_version_hasquota(&mp->m_sb)) {
512 out->qs_uquota.qfs_ino = NULLFSINO;
513 out->qs_gquota.qfs_ino = NULLFSINO;
514 out->qs_pquota.qfs_ino = NULLFSINO;
515 return 0;
516 }
517
518 out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & 491 out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
519 (XFS_ALL_QUOTA_ACCT| 492 (XFS_ALL_QUOTA_ACCT|
520 XFS_ALL_QUOTA_ENFD)); 493 XFS_ALL_QUOTA_ENFD));
@@ -522,11 +495,9 @@ xfs_qm_scall_getqstatv(
522 out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; 495 out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
523 out->qs_pquota.qfs_ino = mp->m_sb.sb_pquotino; 496 out->qs_pquota.qfs_ino = mp->m_sb.sb_pquotino;
524 497
525 if (q) { 498 uip = q->qi_uquotaip;
526 uip = q->qi_uquotaip; 499 gip = q->qi_gquotaip;
527 gip = q->qi_gquotaip; 500 pip = q->qi_pquotaip;
528 pip = q->qi_pquotaip;
529 }
530 if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { 501 if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
531 if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 502 if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
532 0, 0, &uip) == 0) 503 0, 0, &uip) == 0)
@@ -561,14 +532,13 @@ xfs_qm_scall_getqstatv(
561 if (temppqip) 532 if (temppqip)
562 IRELE(pip); 533 IRELE(pip);
563 } 534 }
564 if (q) { 535 out->qs_incoredqs = q->qi_dquots;
565 out->qs_incoredqs = q->qi_dquots; 536 out->qs_btimelimit = q->qi_btimelimit;
566 out->qs_btimelimit = q->qi_btimelimit; 537 out->qs_itimelimit = q->qi_itimelimit;
567 out->qs_itimelimit = q->qi_itimelimit; 538 out->qs_rtbtimelimit = q->qi_rtbtimelimit;
568 out->qs_rtbtimelimit = q->qi_rtbtimelimit; 539 out->qs_bwarnlimit = q->qi_bwarnlimit;
569 out->qs_bwarnlimit = q->qi_bwarnlimit; 540 out->qs_iwarnlimit = q->qi_iwarnlimit;
570 out->qs_iwarnlimit = q->qi_iwarnlimit; 541
571 }
572 return 0; 542 return 0;
573} 543}
574 544
@@ -800,7 +770,7 @@ xfs_qm_log_quotaoff(
800 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; 770 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
801 spin_unlock(&mp->m_sb_lock); 771 spin_unlock(&mp->m_sb_lock);
802 772
803 xfs_mod_sb(tp, XFS_SB_QFLAGS); 773 xfs_log_sb(tp);
804 774
805 /* 775 /*
806 * We have to make sure that the transaction is secure on disk before we 776 * We have to make sure that the transaction is secure on disk before we
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index 801a84c1cdc3..6923905ab33d 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -64,19 +64,10 @@ xfs_fs_get_xstatev(
64 return xfs_qm_scall_getqstatv(mp, fqs); 64 return xfs_qm_scall_getqstatv(mp, fqs);
65} 65}
66 66
67STATIC int 67static unsigned int
68xfs_fs_set_xstate( 68xfs_quota_flags(unsigned int uflags)
69 struct super_block *sb,
70 unsigned int uflags,
71 int op)
72{ 69{
73 struct xfs_mount *mp = XFS_M(sb); 70 unsigned int flags = 0;
74 unsigned int flags = 0;
75
76 if (sb->s_flags & MS_RDONLY)
77 return -EROFS;
78 if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp))
79 return -ENOSYS;
80 71
81 if (uflags & FS_QUOTA_UDQ_ACCT) 72 if (uflags & FS_QUOTA_UDQ_ACCT)
82 flags |= XFS_UQUOTA_ACCT; 73 flags |= XFS_UQUOTA_ACCT;
@@ -91,16 +82,39 @@ xfs_fs_set_xstate(
91 if (uflags & FS_QUOTA_PDQ_ENFD) 82 if (uflags & FS_QUOTA_PDQ_ENFD)
92 flags |= XFS_PQUOTA_ENFD; 83 flags |= XFS_PQUOTA_ENFD;
93 84
94 switch (op) { 85 return flags;
95 case Q_XQUOTAON: 86}
96 return xfs_qm_scall_quotaon(mp, flags); 87
97 case Q_XQUOTAOFF: 88STATIC int
98 if (!XFS_IS_QUOTA_ON(mp)) 89xfs_quota_enable(
99 return -EINVAL; 90 struct super_block *sb,
100 return xfs_qm_scall_quotaoff(mp, flags); 91 unsigned int uflags)
101 } 92{
93 struct xfs_mount *mp = XFS_M(sb);
94
95 if (sb->s_flags & MS_RDONLY)
96 return -EROFS;
97 if (!XFS_IS_QUOTA_RUNNING(mp))
98 return -ENOSYS;
99
100 return xfs_qm_scall_quotaon(mp, xfs_quota_flags(uflags));
101}
102
103STATIC int
104xfs_quota_disable(
105 struct super_block *sb,
106 unsigned int uflags)
107{
108 struct xfs_mount *mp = XFS_M(sb);
109
110 if (sb->s_flags & MS_RDONLY)
111 return -EROFS;
112 if (!XFS_IS_QUOTA_RUNNING(mp))
113 return -ENOSYS;
114 if (!XFS_IS_QUOTA_ON(mp))
115 return -EINVAL;
102 116
103 return -EINVAL; 117 return xfs_qm_scall_quotaoff(mp, xfs_quota_flags(uflags));
104} 118}
105 119
106STATIC int 120STATIC int
@@ -166,7 +180,8 @@ xfs_fs_set_dqblk(
166const struct quotactl_ops xfs_quotactl_operations = { 180const struct quotactl_ops xfs_quotactl_operations = {
167 .get_xstatev = xfs_fs_get_xstatev, 181 .get_xstatev = xfs_fs_get_xstatev,
168 .get_xstate = xfs_fs_get_xstate, 182 .get_xstate = xfs_fs_get_xstate,
169 .set_xstate = xfs_fs_set_xstate, 183 .quota_enable = xfs_quota_enable,
184 .quota_disable = xfs_quota_disable,
170 .rm_xquota = xfs_fs_rm_xquota, 185 .rm_xquota = xfs_fs_rm_xquota,
171 .get_dqblk = xfs_fs_get_dqblk, 186 .get_dqblk = xfs_fs_get_dqblk,
172 .set_dqblk = xfs_fs_set_dqblk, 187 .set_dqblk = xfs_fs_set_dqblk,
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 19cbda196369..f2449fd86926 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -685,7 +685,7 @@ xfs_blkdev_get(
685 mp); 685 mp);
686 if (IS_ERR(*bdevp)) { 686 if (IS_ERR(*bdevp)) {
687 error = PTR_ERR(*bdevp); 687 error = PTR_ERR(*bdevp);
688 xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error); 688 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
689 } 689 }
690 690
691 return error; 691 return error;
@@ -1111,6 +1111,11 @@ xfs_fs_statfs(
1111 statp->f_files, 1111 statp->f_files,
1112 mp->m_maxicount); 1112 mp->m_maxicount);
1113 1113
1114 /* If sb_icount overshot maxicount, report actual allocation */
1115 statp->f_files = max_t(typeof(statp->f_files),
1116 statp->f_files,
1117 sbp->sb_icount);
1118
1114 /* make sure statp->f_ffree does not underflow */ 1119 /* make sure statp->f_ffree does not underflow */
1115 ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); 1120 ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
1116 statp->f_ffree = max_t(__int64_t, ffree, 0); 1121 statp->f_ffree = max_t(__int64_t, ffree, 0);
@@ -1257,13 +1262,13 @@ xfs_fs_remount(
1257 * If this is the first remount to writeable state we 1262 * If this is the first remount to writeable state we
1258 * might have some superblock changes to update. 1263 * might have some superblock changes to update.
1259 */ 1264 */
1260 if (mp->m_update_flags) { 1265 if (mp->m_update_sb) {
1261 error = xfs_mount_log_sb(mp, mp->m_update_flags); 1266 error = xfs_sync_sb(mp, false);
1262 if (error) { 1267 if (error) {
1263 xfs_warn(mp, "failed to write sb changes"); 1268 xfs_warn(mp, "failed to write sb changes");
1264 return error; 1269 return error;
1265 } 1270 }
1266 mp->m_update_flags = 0; 1271 mp->m_update_sb = false;
1267 } 1272 }
1268 1273
1269 /* 1274 /*
@@ -1293,8 +1298,9 @@ xfs_fs_remount(
1293 1298
1294/* 1299/*
1295 * Second stage of a freeze. The data is already frozen so we only 1300 * Second stage of a freeze. The data is already frozen so we only
1296 * need to take care of the metadata. Once that's done write a dummy 1301 * need to take care of the metadata. Once that's done sync the superblock
1297 * record to dirty the log in case of a crash while frozen. 1302 * to the log to dirty it in case of a crash while frozen. This ensures that we
1303 * will recover the unlinked inode lists on the next mount.
1298 */ 1304 */
1299STATIC int 1305STATIC int
1300xfs_fs_freeze( 1306xfs_fs_freeze(
@@ -1304,7 +1310,7 @@ xfs_fs_freeze(
1304 1310
1305 xfs_save_resvblks(mp); 1311 xfs_save_resvblks(mp);
1306 xfs_quiesce_attr(mp); 1312 xfs_quiesce_attr(mp);
1307 return xfs_fs_log_dummy(mp); 1313 return xfs_sync_sb(mp, true);
1308} 1314}
1309 1315
1310STATIC int 1316STATIC int
diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c
index 1743b9f8e23d..a0c8067cea6f 100644
--- a/fs/xfs/xfs_sysctl.c
+++ b/fs/xfs/xfs_sysctl.c
@@ -149,24 +149,6 @@ static struct ctl_table xfs_table[] = {
149 .extra2 = &xfs_params.inherit_noatim.max 149 .extra2 = &xfs_params.inherit_noatim.max
150 }, 150 },
151 { 151 {
152 .procname = "xfsbufd_centisecs",
153 .data = &xfs_params.xfs_buf_timer.val,
154 .maxlen = sizeof(int),
155 .mode = 0644,
156 .proc_handler = proc_dointvec_minmax,
157 .extra1 = &xfs_params.xfs_buf_timer.min,
158 .extra2 = &xfs_params.xfs_buf_timer.max
159 },
160 {
161 .procname = "age_buffer_centisecs",
162 .data = &xfs_params.xfs_buf_age.val,
163 .maxlen = sizeof(int),
164 .mode = 0644,
165 .proc_handler = proc_dointvec_minmax,
166 .extra1 = &xfs_params.xfs_buf_age.min,
167 .extra2 = &xfs_params.xfs_buf_age.max
168 },
169 {
170 .procname = "inherit_nosymlinks", 152 .procname = "inherit_nosymlinks",
171 .data = &xfs_params.inherit_nosym.val, 153 .data = &xfs_params.inherit_nosym.val,
172 .maxlen = sizeof(int), 154 .maxlen = sizeof(int),
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index fa3135b9bf04..eb90cd59a0ec 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -472,6 +472,7 @@ xfs_trans_apply_sb_deltas(
472 whole = 1; 472 whole = 1;
473 } 473 }
474 474
475 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
475 if (whole) 476 if (whole)
476 /* 477 /*
477 * Log the whole thing, the fields are noncontiguous. 478 * Log the whole thing, the fields are noncontiguous.
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 0a4d4ab6d9a9..75798412859a 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -327,9 +327,10 @@ xfs_trans_read_buf_map(
327 return -EIO; 327 return -EIO;
328 } 328 }
329 329
330 if (tp) 330 if (tp) {
331 _xfs_trans_bjoin(tp, bp, 1); 331 _xfs_trans_bjoin(tp, bp, 1);
332 trace_xfs_trans_read_buf(bp->b_fspriv); 332 trace_xfs_trans_read_buf(bp->b_fspriv);
333 }
333 *bpp = bp; 334 *bpp = bp;
334 return 0; 335 return 0;
335 336
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index d5ec6c87810f..6b040f4ddfab 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 5a0a3e5daf85..03aacfb3e98b 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 8b06e4c1dd5d..11c3a011dcbf 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 7461327e14e4..273de709495c 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 1baae6edda89..9318a87ee39a 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index a08e55a263c9..b0bb30ebb807 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 03b3e6d405ff..0bc78df66d4b 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2014, Intel Corp. 10 * Copyright (C) 2000 - 2015, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 5ba78464c1b1..d56f5d722138 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,7 @@
46 46
47/* Current ACPICA subsystem version in YYYYMMDD format */ 47/* Current ACPICA subsystem version in YYYYMMDD format */
48 48
49#define ACPI_CA_VERSION 0x20141107 49#define ACPI_CA_VERSION 0x20150204
50 50
51#include <acpi/acconfig.h> 51#include <acpi/acconfig.h>
52#include <acpi/actypes.h> 52#include <acpi/actypes.h>
@@ -569,6 +569,14 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
569 address, 569 address,
570 void *context)) 570 void *context))
571ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status 571ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
572 acpi_install_gpe_raw_handler(acpi_handle
573 gpe_device,
574 u32 gpe_number,
575 u32 type,
576 acpi_gpe_handler
577 address,
578 void *context))
579ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
572 acpi_remove_gpe_handler(acpi_handle gpe_device, 580 acpi_remove_gpe_handler(acpi_handle gpe_device,
573 u32 gpe_number, 581 u32 gpe_number,
574 acpi_gpe_handler 582 acpi_gpe_handler
@@ -891,12 +899,6 @@ ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
891ACPI_GLOBAL(u8, acpi_gbl_permanent_mmap); 899ACPI_GLOBAL(u8, acpi_gbl_permanent_mmap);
892 900
893ACPI_EXTERNAL_RETURN_STATUS(acpi_status 901ACPI_EXTERNAL_RETURN_STATUS(acpi_status
894 acpi_get_id(acpi_handle object,
895 acpi_owner_id * out_type))
896
897ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_unload_table_id(acpi_owner_id id))
898
899ACPI_EXTERNAL_RETURN_STATUS(acpi_status
900 acpi_get_table_with_size(acpi_string signature, 902 acpi_get_table_with_size(acpi_string signature,
901 u32 instance, 903 u32 instance,
902 struct acpi_table_header 904 struct acpi_table_header
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index eb760ca0b2e0..ebe242638591 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -305,43 +305,51 @@ struct acpi_resource_source {
305 u8 max_address_fixed; \ 305 u8 max_address_fixed; \
306 union acpi_resource_attribute info; 306 union acpi_resource_attribute info;
307 307
308struct acpi_resource_address { 308struct acpi_address16_attribute {
309ACPI_RESOURCE_ADDRESS_COMMON}; 309 u16 granularity;
310
311struct acpi_resource_address16 {
312 ACPI_RESOURCE_ADDRESS_COMMON u16 granularity;
313 u16 minimum; 310 u16 minimum;
314 u16 maximum; 311 u16 maximum;
315 u16 translation_offset; 312 u16 translation_offset;
316 u16 address_length; 313 u16 address_length;
317 struct acpi_resource_source resource_source;
318}; 314};
319 315
320struct acpi_resource_address32 { 316struct acpi_address32_attribute {
321 ACPI_RESOURCE_ADDRESS_COMMON u32 granularity; 317 u32 granularity;
322 u32 minimum; 318 u32 minimum;
323 u32 maximum; 319 u32 maximum;
324 u32 translation_offset; 320 u32 translation_offset;
325 u32 address_length; 321 u32 address_length;
326 struct acpi_resource_source resource_source;
327}; 322};
328 323
329struct acpi_resource_address64 { 324struct acpi_address64_attribute {
330 ACPI_RESOURCE_ADDRESS_COMMON u64 granularity; 325 u64 granularity;
331 u64 minimum; 326 u64 minimum;
332 u64 maximum; 327 u64 maximum;
333 u64 translation_offset; 328 u64 translation_offset;
334 u64 address_length; 329 u64 address_length;
330};
331
332struct acpi_resource_address {
333ACPI_RESOURCE_ADDRESS_COMMON};
334
335struct acpi_resource_address16 {
336 ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address16_attribute address;
337 struct acpi_resource_source resource_source;
338};
339
340struct acpi_resource_address32 {
341 ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address32_attribute address;
342 struct acpi_resource_source resource_source;
343};
344
345struct acpi_resource_address64 {
346 ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address64_attribute address;
335 struct acpi_resource_source resource_source; 347 struct acpi_resource_source resource_source;
336}; 348};
337 349
338struct acpi_resource_extended_address64 { 350struct acpi_resource_extended_address64 {
339 ACPI_RESOURCE_ADDRESS_COMMON u8 revision_ID; 351 ACPI_RESOURCE_ADDRESS_COMMON u8 revision_ID;
340 u64 granularity; 352 struct acpi_address64_attribute address;
341 u64 minimum;
342 u64 maximum;
343 u64 translation_offset;
344 u64 address_length;
345 u64 type_specific; 353 u64 type_specific;
346}; 354};
347 355
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index bee19d8170c5..d4081fef1095 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 29e79370641d..b80b0e6dabc5 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index ecff62405f17..f06d75e5fa54 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 5480cb2236bf..440ca8104b43 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index bbef17368e49..b034f1068dfe 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -744,7 +744,7 @@ typedef u32 acpi_event_status;
744/* 744/*
745 * GPE info flags - Per GPE 745 * GPE info flags - Per GPE
746 * +-------+-+-+---+ 746 * +-------+-+-+---+
747 * | 7:4 |3|2|1:0| 747 * | 7:5 |4|3|2:0|
748 * +-------+-+-+---+ 748 * +-------+-+-+---+
749 * | | | | 749 * | | | |
750 * | | | +-- Type of dispatch:to method, handler, notify, or none 750 * | | | +-- Type of dispatch:to method, handler, notify, or none
@@ -756,13 +756,15 @@ typedef u32 acpi_event_status;
756#define ACPI_GPE_DISPATCH_METHOD (u8) 0x01 756#define ACPI_GPE_DISPATCH_METHOD (u8) 0x01
757#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x02 757#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x02
758#define ACPI_GPE_DISPATCH_NOTIFY (u8) 0x03 758#define ACPI_GPE_DISPATCH_NOTIFY (u8) 0x03
759#define ACPI_GPE_DISPATCH_MASK (u8) 0x03 759#define ACPI_GPE_DISPATCH_RAW_HANDLER (u8) 0x04
760#define ACPI_GPE_DISPATCH_MASK (u8) 0x07
761#define ACPI_GPE_DISPATCH_TYPE(flags) ((u8) ((flags) & ACPI_GPE_DISPATCH_MASK))
760 762
761#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x04 763#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x08
762#define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 764#define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00
763#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x04 765#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x08
764 766
765#define ACPI_GPE_CAN_WAKE (u8) 0x08 767#define ACPI_GPE_CAN_WAKE (u8) 0x10
766 768
767/* 769/*
768 * Flags for GPE and Lock interfaces 770 * Flags for GPE and Lock interfaces
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 5f8cc1fa3278..ad74dc51d5b7 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 2b612384c994..71e5ec5b07a3 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 384875da3713..f54de0a63558 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 1ba7c190c2cc..74ba46c8157a 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index 568d4b886712..acedc3f026de 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d459cd17b477..24c7aa8b1d20 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -27,6 +27,7 @@
27 27
28#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/ioport.h> /* for struct resource */ 29#include <linux/ioport.h> /* for struct resource */
30#include <linux/resource_ext.h>
30#include <linux/device.h> 31#include <linux/device.h>
31#include <linux/property.h> 32#include <linux/property.h>
32 33
@@ -151,6 +152,10 @@ int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
151int acpi_unmap_cpu(int cpu); 152int acpi_unmap_cpu(int cpu);
152#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 153#endif /* CONFIG_ACPI_HOTPLUG_CPU */
153 154
155#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
156int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
157#endif
158
154int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); 159int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
155int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); 160int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
156int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base); 161int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base);
@@ -288,22 +293,25 @@ extern int pnpacpi_disabled;
288bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res); 293bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res);
289bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res); 294bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res);
290bool acpi_dev_resource_address_space(struct acpi_resource *ares, 295bool acpi_dev_resource_address_space(struct acpi_resource *ares,
291 struct resource *res); 296 struct resource_win *win);
292bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, 297bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
293 struct resource *res); 298 struct resource_win *win);
294unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); 299unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable);
295bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, 300bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
296 struct resource *res); 301 struct resource *res);
297 302
298struct resource_list_entry {
299 struct list_head node;
300 struct resource res;
301};
302
303void acpi_dev_free_resource_list(struct list_head *list); 303void acpi_dev_free_resource_list(struct list_head *list);
304int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, 304int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
305 int (*preproc)(struct acpi_resource *, void *), 305 int (*preproc)(struct acpi_resource *, void *),
306 void *preproc_data); 306 void *preproc_data);
307int acpi_dev_filter_resource_type(struct acpi_resource *ares,
308 unsigned long types);
309
310static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares,
311 void *arg)
312{
313 return acpi_dev_filter_resource_type(ares, (unsigned long)arg);
314}
307 315
308int acpi_check_resource_conflict(const struct resource *res); 316int acpi_check_resource_conflict(const struct resource *res);
309 317
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 4d078cebafd2..2ee4888c1f47 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -66,8 +66,6 @@ struct cpufreq_policy {
66 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs 66 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
67 should set cpufreq */ 67 should set cpufreq */
68 unsigned int cpu; /* cpu nr of CPU managing this policy */ 68 unsigned int cpu; /* cpu nr of CPU managing this policy */
69 unsigned int last_cpu; /* cpu nr of previous CPU that managed
70 * this policy */
71 struct clk *clk; 69 struct clk *clk;
72 struct cpufreq_cpuinfo cpuinfo;/* see above */ 70 struct cpufreq_cpuinfo cpuinfo;/* see above */
73 71
@@ -113,6 +111,9 @@ struct cpufreq_policy {
113 wait_queue_head_t transition_wait; 111 wait_queue_head_t transition_wait;
114 struct task_struct *transition_task; /* Task which is doing the transition */ 112 struct task_struct *transition_task; /* Task which is doing the transition */
115 113
114 /* cpufreq-stats */
115 struct cpufreq_stats *stats;
116
116 /* For cpufreq driver's internal use */ 117 /* For cpufreq driver's internal use */
117 void *driver_data; 118 void *driver_data;
118}; 119};
@@ -367,9 +368,8 @@ static inline void cpufreq_resume(void) {}
367#define CPUFREQ_INCOMPATIBLE (1) 368#define CPUFREQ_INCOMPATIBLE (1)
368#define CPUFREQ_NOTIFY (2) 369#define CPUFREQ_NOTIFY (2)
369#define CPUFREQ_START (3) 370#define CPUFREQ_START (3)
370#define CPUFREQ_UPDATE_POLICY_CPU (4) 371#define CPUFREQ_CREATE_POLICY (4)
371#define CPUFREQ_CREATE_POLICY (5) 372#define CPUFREQ_REMOVE_POLICY (5)
372#define CPUFREQ_REMOVE_POLICY (6)
373 373
374#ifdef CONFIG_CPU_FREQ 374#ifdef CONFIG_CPU_FREQ
375int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); 375int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
new file mode 100644
index 000000000000..602fbbfcfeed
--- /dev/null
+++ b/include/linux/devfreq-event.h
@@ -0,0 +1,196 @@
1/*
2 * devfreq-event: a framework to provide raw data and events of devfreq devices
3 *
4 * Copyright (C) 2014 Samsung Electronics
5 * Author: Chanwoo Choi <cw00.choi@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef __LINUX_DEVFREQ_EVENT_H__
13#define __LINUX_DEVFREQ_EVENT_H__
14
15#include <linux/device.h>
16
17/**
18 * struct devfreq_event_dev - the devfreq-event device
19 *
20 * @node : Contain the devfreq-event device that have been registered.
21 * @dev : the device registered by devfreq-event class. dev.parent is
22 * the device using devfreq-event.
23 * @lock : a mutex to protect accessing devfreq-event.
24 * @enable_count: the number of enable function have been called.
25 * @desc : the description for devfreq-event device.
26 *
27 * This structure contains devfreq-event device information.
28 */
29struct devfreq_event_dev {
30 struct list_head node;
31
32 struct device dev;
33 struct mutex lock;
34 u32 enable_count;
35
36 const struct devfreq_event_desc *desc;
37};
38
39/**
40 * struct devfreq_event_data - the devfreq-event data
41 *
42 * @load_count : load count of devfreq-event device for the given period.
43 * @total_count : total count of devfreq-event device for the given period.
44 * each count may represent a clock cycle, a time unit
45 * (ns/us/...), or anything the device driver wants.
46 * Generally, utilization is load_count / total_count.
47 *
48 * This structure contains the data of devfreq-event device for polling period.
49 */
50struct devfreq_event_data {
51 unsigned long load_count;
52 unsigned long total_count;
53};
54
55/**
56 * struct devfreq_event_ops - the operations of devfreq-event device
57 *
58 * @enable : Enable the devfreq-event device.
59 * @disable : Disable the devfreq-event device.
60 * @reset : Reset all setting of the devfreq-event device.
61 * @set_event : Set the specific event type for the devfreq-event device.
62 * @get_event : Get the result of the devfreq-event devie with specific
63 * event type.
64 *
65 * This structure contains devfreq-event device operations which can be
66 * implemented by devfreq-event device drivers.
67 */
68struct devfreq_event_ops {
69 /* Optional functions */
70 int (*enable)(struct devfreq_event_dev *edev);
71 int (*disable)(struct devfreq_event_dev *edev);
72 int (*reset)(struct devfreq_event_dev *edev);
73
74 /* Mandatory functions */
75 int (*set_event)(struct devfreq_event_dev *edev);
76 int (*get_event)(struct devfreq_event_dev *edev,
77 struct devfreq_event_data *edata);
78};
79
80/**
81 * struct devfreq_event_desc - the descriptor of devfreq-event device
82 *
83 * @name : the name of devfreq-event device.
84 * @driver_data : the private data for devfreq-event driver.
85 * @ops : the operation to control devfreq-event device.
86 *
87 * Each devfreq-event device is described with a this structure.
88 * This structure contains the various data for devfreq-event device.
89 */
90struct devfreq_event_desc {
91 const char *name;
92 void *driver_data;
93
94 struct devfreq_event_ops *ops;
95};
96
97#if defined(CONFIG_PM_DEVFREQ_EVENT)
98extern int devfreq_event_enable_edev(struct devfreq_event_dev *edev);
99extern int devfreq_event_disable_edev(struct devfreq_event_dev *edev);
100extern bool devfreq_event_is_enabled(struct devfreq_event_dev *edev);
101extern int devfreq_event_set_event(struct devfreq_event_dev *edev);
102extern int devfreq_event_get_event(struct devfreq_event_dev *edev,
103 struct devfreq_event_data *edata);
104extern int devfreq_event_reset_event(struct devfreq_event_dev *edev);
105extern struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
106 struct device *dev, int index);
107extern int devfreq_event_get_edev_count(struct device *dev);
108extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
109 struct devfreq_event_desc *desc);
110extern int devfreq_event_remove_edev(struct devfreq_event_dev *edev);
111extern struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
112 struct devfreq_event_desc *desc);
113extern void devm_devfreq_event_remove_edev(struct device *dev,
114 struct devfreq_event_dev *edev);
115static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
116{
117 return edev->desc->driver_data;
118}
119#else
120static inline int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
121{
122 return -EINVAL;
123}
124
125static inline int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
126{
127 return -EINVAL;
128}
129
130static inline bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
131{
132 return false;
133}
134
135static inline int devfreq_event_set_event(struct devfreq_event_dev *edev)
136{
137 return -EINVAL;
138}
139
140static inline int devfreq_event_get_event(struct devfreq_event_dev *edev,
141 struct devfreq_event_data *edata)
142{
143 return -EINVAL;
144}
145
146static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
147{
148 return -EINVAL;
149}
150
151static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
152{
153 return ERR_PTR(-EINVAL);
154}
155
156static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
157 struct device *dev, int index)
158{
159 return ERR_PTR(-EINVAL);
160}
161
162static inline int devfreq_event_get_edev_count(struct device *dev)
163{
164 return -EINVAL;
165}
166
167static inline struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
168 struct devfreq_event_desc *desc)
169{
170 return ERR_PTR(-EINVAL);
171}
172
173static inline int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
174{
175 return -EINVAL;
176}
177
178static inline struct devfreq_event_dev *devm_devfreq_event_add_edev(
179 struct device *dev,
180 struct devfreq_event_desc *desc)
181{
182 return ERR_PTR(-EINVAL);
183}
184
185static inline void devm_devfreq_event_remove_edev(struct device *dev,
186 struct devfreq_event_dev *edev)
187{
188}
189
190static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
191{
192 return NULL;
193}
194#endif /* CONFIG_PM_DEVFREQ_EVENT */
195
196#endif /* __LINUX_DEVFREQ_EVENT_H__ */
diff --git a/include/linux/dqblk_v1.h b/include/linux/dqblk_v1.h
index 3713a7232dd8..c0d4d1e2a45c 100644
--- a/include/linux/dqblk_v1.h
+++ b/include/linux/dqblk_v1.h
@@ -5,9 +5,6 @@
5#ifndef _LINUX_DQBLK_V1_H 5#ifndef _LINUX_DQBLK_V1_H
6#define _LINUX_DQBLK_V1_H 6#define _LINUX_DQBLK_V1_H
7 7
8/* Root squash turned on */
9#define V1_DQF_RSQUASH 1
10
11/* Numbers of blocks needed for updates */ 8/* Numbers of blocks needed for updates */
12#define V1_INIT_ALLOC 1 9#define V1_INIT_ALLOC 1
13#define V1_INIT_REWRITE 1 10#define V1_INIT_REWRITE 1
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 60acab209701..f125b88443bd 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -623,7 +623,7 @@ struct inode {
623 atomic_t i_readcount; /* struct files open RO */ 623 atomic_t i_readcount; /* struct files open RO */
624#endif 624#endif
625 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ 625 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
626 struct file_lock *i_flock; 626 struct file_lock_context *i_flctx;
627 struct address_space i_data; 627 struct address_space i_data;
628 struct list_head i_devices; 628 struct list_head i_devices;
629 union { 629 union {
@@ -883,6 +883,8 @@ static inline struct file *get_file(struct file *f)
883/* legacy typedef, should eventually be removed */ 883/* legacy typedef, should eventually be removed */
884typedef void *fl_owner_t; 884typedef void *fl_owner_t;
885 885
886struct file_lock;
887
886struct file_lock_operations { 888struct file_lock_operations {
887 void (*fl_copy_lock)(struct file_lock *, struct file_lock *); 889 void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
888 void (*fl_release_private)(struct file_lock *); 890 void (*fl_release_private)(struct file_lock *);
@@ -896,7 +898,7 @@ struct lock_manager_operations {
896 void (*lm_notify)(struct file_lock *); /* unblock callback */ 898 void (*lm_notify)(struct file_lock *); /* unblock callback */
897 int (*lm_grant)(struct file_lock *, int); 899 int (*lm_grant)(struct file_lock *, int);
898 bool (*lm_break)(struct file_lock *); 900 bool (*lm_break)(struct file_lock *);
899 int (*lm_change)(struct file_lock **, int, struct list_head *); 901 int (*lm_change)(struct file_lock *, int, struct list_head *);
900 void (*lm_setup)(struct file_lock *, void **); 902 void (*lm_setup)(struct file_lock *, void **);
901}; 903};
902 904
@@ -921,17 +923,17 @@ int locks_in_grace(struct net *);
921 * FIXME: should we create a separate "struct lock_request" to help distinguish 923 * FIXME: should we create a separate "struct lock_request" to help distinguish
922 * these two uses? 924 * these two uses?
923 * 925 *
924 * The i_flock list is ordered by: 926 * The varous i_flctx lists are ordered by:
925 * 927 *
926 * 1) lock type -- FL_LEASEs first, then FL_FLOCK, and finally FL_POSIX 928 * 1) lock owner
927 * 2) lock owner 929 * 2) lock range start
928 * 3) lock range start 930 * 3) lock range end
929 * 4) lock range end
930 * 931 *
931 * Obviously, the last two criteria only matter for POSIX locks. 932 * Obviously, the last two criteria only matter for POSIX locks.
932 */ 933 */
933struct file_lock { 934struct file_lock {
934 struct file_lock *fl_next; /* singly linked list for this inode */ 935 struct file_lock *fl_next; /* singly linked list for this inode */
936 struct list_head fl_list; /* link into file_lock_context */
935 struct hlist_node fl_link; /* node in global lists */ 937 struct hlist_node fl_link; /* node in global lists */
936 struct list_head fl_block; /* circular list of blocked processes */ 938 struct list_head fl_block; /* circular list of blocked processes */
937 fl_owner_t fl_owner; 939 fl_owner_t fl_owner;
@@ -962,6 +964,16 @@ struct file_lock {
962 } fl_u; 964 } fl_u;
963}; 965};
964 966
967struct file_lock_context {
968 spinlock_t flc_lock;
969 struct list_head flc_flock;
970 struct list_head flc_posix;
971 struct list_head flc_lease;
972 int flc_flock_cnt;
973 int flc_posix_cnt;
974 int flc_lease_cnt;
975};
976
965/* The following constant reflects the upper bound of the file/locking space */ 977/* The following constant reflects the upper bound of the file/locking space */
966#ifndef OFFSET_MAX 978#ifndef OFFSET_MAX
967#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1))) 979#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
@@ -988,6 +1000,7 @@ extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
988extern int fcntl_getlease(struct file *filp); 1000extern int fcntl_getlease(struct file *filp);
989 1001
990/* fs/locks.c */ 1002/* fs/locks.c */
1003void locks_free_lock_context(struct file_lock_context *ctx);
991void locks_free_lock(struct file_lock *fl); 1004void locks_free_lock(struct file_lock *fl);
992extern void locks_init_lock(struct file_lock *); 1005extern void locks_init_lock(struct file_lock *);
993extern struct file_lock * locks_alloc_lock(void); 1006extern struct file_lock * locks_alloc_lock(void);
@@ -1008,7 +1021,7 @@ extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int t
1008extern void lease_get_mtime(struct inode *, struct timespec *time); 1021extern void lease_get_mtime(struct inode *, struct timespec *time);
1009extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); 1022extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
1010extern int vfs_setlease(struct file *, long, struct file_lock **, void **); 1023extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
1011extern int lease_modify(struct file_lock **, int, struct list_head *); 1024extern int lease_modify(struct file_lock *, int, struct list_head *);
1012#else /* !CONFIG_FILE_LOCKING */ 1025#else /* !CONFIG_FILE_LOCKING */
1013static inline int fcntl_getlk(struct file *file, unsigned int cmd, 1026static inline int fcntl_getlk(struct file *file, unsigned int cmd,
1014 struct flock __user *user) 1027 struct flock __user *user)
@@ -1045,6 +1058,11 @@ static inline int fcntl_getlease(struct file *filp)
1045 return F_UNLCK; 1058 return F_UNLCK;
1046} 1059}
1047 1060
1061static inline void
1062locks_free_lock_context(struct file_lock_context *ctx)
1063{
1064}
1065
1048static inline void locks_init_lock(struct file_lock *fl) 1066static inline void locks_init_lock(struct file_lock *fl)
1049{ 1067{
1050 return; 1068 return;
@@ -1135,7 +1153,7 @@ static inline int vfs_setlease(struct file *filp, long arg,
1135 return -EINVAL; 1153 return -EINVAL;
1136} 1154}
1137 1155
1138static inline int lease_modify(struct file_lock **before, int arg, 1156static inline int lease_modify(struct file_lock *fl, int arg,
1139 struct list_head *dispose) 1157 struct list_head *dispose)
1140{ 1158{
1141 return -EINVAL; 1159 return -EINVAL;
@@ -1957,7 +1975,7 @@ static inline int locks_verify_truncate(struct inode *inode,
1957 struct file *filp, 1975 struct file *filp,
1958 loff_t size) 1976 loff_t size)
1959{ 1977{
1960 if (inode->i_flock && mandatory_lock(inode)) 1978 if (inode->i_flctx && mandatory_lock(inode))
1961 return locks_mandatory_area( 1979 return locks_mandatory_area(
1962 FLOCK_VERIFY_WRITE, inode, filp, 1980 FLOCK_VERIFY_WRITE, inode, filp,
1963 size < inode->i_size ? size : inode->i_size, 1981 size < inode->i_size ? size : inode->i_size,
@@ -1971,11 +1989,12 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
1971{ 1989{
1972 /* 1990 /*
1973 * Since this check is lockless, we must ensure that any refcounts 1991 * Since this check is lockless, we must ensure that any refcounts
1974 * taken are done before checking inode->i_flock. Otherwise, we could 1992 * taken are done before checking i_flctx->flc_lease. Otherwise, we
1975 * end up racing with tasks trying to set a new lease on this file. 1993 * could end up racing with tasks trying to set a new lease on this
1994 * file.
1976 */ 1995 */
1977 smp_mb(); 1996 smp_mb();
1978 if (inode->i_flock) 1997 if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
1979 return __break_lease(inode, mode, FL_LEASE); 1998 return __break_lease(inode, mode, FL_LEASE);
1980 return 0; 1999 return 0;
1981} 2000}
@@ -1984,11 +2003,12 @@ static inline int break_deleg(struct inode *inode, unsigned int mode)
1984{ 2003{
1985 /* 2004 /*
1986 * Since this check is lockless, we must ensure that any refcounts 2005 * Since this check is lockless, we must ensure that any refcounts
1987 * taken are done before checking inode->i_flock. Otherwise, we could 2006 * taken are done before checking i_flctx->flc_lease. Otherwise, we
1988 * end up racing with tasks trying to set a new lease on this file. 2007 * could end up racing with tasks trying to set a new lease on this
2008 * file.
1989 */ 2009 */
1990 smp_mb(); 2010 smp_mb();
1991 if (inode->i_flock) 2011 if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
1992 return __break_lease(inode, mode, FL_DELEG); 2012 return __break_lease(inode, mode, FL_DELEG);
1993 return 0; 2013 return 0;
1994} 2014}
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 31229e0be90b..d32615280be9 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -956,15 +956,6 @@ void __log_wait_for_space(journal_t *journal);
956extern void __journal_drop_transaction(journal_t *, transaction_t *); 956extern void __journal_drop_transaction(journal_t *, transaction_t *);
957extern int cleanup_journal_tail(journal_t *); 957extern int cleanup_journal_tail(journal_t *);
958 958
959/* Debugging code only: */
960
961#define jbd_ENOSYS() \
962do { \
963 printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \
964 current->state = TASK_UNINTERRUPTIBLE; \
965 schedule(); \
966} while (1)
967
968/* 959/*
969 * is_journal_abort 960 * is_journal_abort
970 * 961 *
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 704b9a599b26..20e7f78041c8 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1251,15 +1251,6 @@ void __jbd2_log_wait_for_space(journal_t *journal);
1251extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); 1251extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
1252extern int jbd2_cleanup_journal_tail(journal_t *); 1252extern int jbd2_cleanup_journal_tail(journal_t *);
1253 1253
1254/* Debugging code only: */
1255
1256#define jbd_ENOSYS() \
1257do { \
1258 printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \
1259 current->state = TASK_UNINTERRUPTIBLE; \
1260 schedule(); \
1261} while (1)
1262
1263/* 1254/*
1264 * is_journal_abort 1255 * is_journal_abort
1265 * 1256 *
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a0da685bdb82..65db4aee738a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -284,6 +284,13 @@ struct vm_operations_struct {
284 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 284 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
285 unsigned long addr); 285 unsigned long addr);
286#endif 286#endif
287 /*
288 * Called by vm_normal_page() for special PTEs to find the
289 * page for @addr. This is useful if the default behavior
290 * (using pte_page()) would not find the correct page.
291 */
292 struct page *(*find_special_page)(struct vm_area_struct *vma,
293 unsigned long addr);
287}; 294};
288 295
289struct mmu_gather; 296struct mmu_gather;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e1f5fcd79792..5ed7bdaf22d5 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -121,8 +121,12 @@ enum pageflags {
121 PG_fscache = PG_private_2, /* page backed by cache */ 121 PG_fscache = PG_private_2, /* page backed by cache */
122 122
123 /* XEN */ 123 /* XEN */
124 /* Pinned in Xen as a read-only pagetable page. */
124 PG_pinned = PG_owner_priv_1, 125 PG_pinned = PG_owner_priv_1,
126 /* Pinned as part of domain save (see xen_mm_pin_all()). */
125 PG_savepinned = PG_dirty, 127 PG_savepinned = PG_dirty,
128 /* Has a grant mapping of another (foreign) domain's page. */
129 PG_foreign = PG_owner_priv_1,
126 130
127 /* SLOB */ 131 /* SLOB */
128 PG_slob_free = PG_private, 132 PG_slob_free = PG_private,
@@ -215,6 +219,7 @@ __PAGEFLAG(Slab, slab)
215PAGEFLAG(Checked, checked) /* Used by some filesystems */ 219PAGEFLAG(Checked, checked) /* Used by some filesystems */
216PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ 220PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
217PAGEFLAG(SavePinned, savepinned); /* Xen */ 221PAGEFLAG(SavePinned, savepinned); /* Xen */
222PAGEFLAG(Foreign, foreign); /* Xen */
218PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) 223PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
219PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) 224PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
220 __SETPAGEFLAG(SwapBacked, swapbacked) 225 __SETPAGEFLAG(SwapBacked, swapbacked)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 9603094ed59b..211e9da8a7d7 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -29,6 +29,7 @@
29#include <linux/atomic.h> 29#include <linux/atomic.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/resource_ext.h>
32#include <uapi/linux/pci.h> 33#include <uapi/linux/pci.h>
33 34
34#include <linux/pci_ids.h> 35#include <linux/pci_ids.h>
@@ -177,6 +178,8 @@ enum pci_dev_flags {
177 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), 178 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
178 /* Do not use bus resets for device */ 179 /* Do not use bus resets for device */
179 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6), 180 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
181 /* Do not use PM reset even if device advertises NoSoftRst- */
182 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
180}; 183};
181 184
182enum pci_irq_reroute_variant { 185enum pci_irq_reroute_variant {
@@ -397,16 +400,10 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
397 return (pdev->error_state != pci_channel_io_normal); 400 return (pdev->error_state != pci_channel_io_normal);
398} 401}
399 402
400struct pci_host_bridge_window {
401 struct list_head list;
402 struct resource *res; /* host bridge aperture (CPU address) */
403 resource_size_t offset; /* bus address + offset = CPU address */
404};
405
406struct pci_host_bridge { 403struct pci_host_bridge {
407 struct device dev; 404 struct device dev;
408 struct pci_bus *bus; /* root bus */ 405 struct pci_bus *bus; /* root bus */
409 struct list_head windows; /* pci_host_bridge_windows */ 406 struct list_head windows; /* resource_entry */
410 void (*release_fn)(struct pci_host_bridge *); 407 void (*release_fn)(struct pci_host_bridge *);
411 void *release_data; 408 void *release_data;
412}; 409};
@@ -562,6 +559,7 @@ static inline int pcibios_err_to_errno(int err)
562/* Low-level architecture-dependent routines */ 559/* Low-level architecture-dependent routines */
563 560
564struct pci_ops { 561struct pci_ops {
562 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
565 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); 563 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
566 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); 564 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
567}; 565};
@@ -859,6 +857,16 @@ int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
859 int where, u16 val); 857 int where, u16 val);
860int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, 858int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
861 int where, u32 val); 859 int where, u32 val);
860
861int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
862 int where, int size, u32 *val);
863int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
864 int where, int size, u32 val);
865int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
866 int where, int size, u32 *val);
867int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
868 int where, int size, u32 val);
869
862struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); 870struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
863 871
864static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) 872static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
@@ -1850,6 +1858,8 @@ static inline void pci_set_of_node(struct pci_dev *dev) { }
1850static inline void pci_release_of_node(struct pci_dev *dev) { } 1858static inline void pci_release_of_node(struct pci_dev *dev) { }
1851static inline void pci_set_bus_of_node(struct pci_bus *bus) { } 1859static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
1852static inline void pci_release_bus_of_node(struct pci_bus *bus) { } 1860static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
1861static inline struct device_node *
1862pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
1853#endif /* CONFIG_OF */ 1863#endif /* CONFIG_OF */
1854 1864
1855#ifdef CONFIG_EEH 1865#ifdef CONFIG_EEH
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 8b5976364619..e2f1be6dd9dd 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -597,7 +597,7 @@ struct dev_pm_info {
597 597
598extern void update_pm_runtime_accounting(struct device *dev); 598extern void update_pm_runtime_accounting(struct device *dev);
599extern int dev_pm_get_subsys_data(struct device *dev); 599extern int dev_pm_get_subsys_data(struct device *dev);
600extern int dev_pm_put_subsys_data(struct device *dev); 600extern void dev_pm_put_subsys_data(struct device *dev);
601 601
602/* 602/*
603 * Power domains provide callbacks that are executed during system suspend, 603 * Power domains provide callbacks that are executed during system suspend,
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index a9edab2c787a..080e778118ba 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -113,8 +113,6 @@ struct generic_pm_domain_data {
113 struct pm_domain_data base; 113 struct pm_domain_data base;
114 struct gpd_timing_data td; 114 struct gpd_timing_data td;
115 struct notifier_block nb; 115 struct notifier_block nb;
116 struct mutex lock;
117 unsigned int refcount;
118 int need_restore; 116 int need_restore;
119}; 117};
120 118
@@ -140,7 +138,6 @@ extern int __pm_genpd_name_add_device(const char *domain_name,
140 138
141extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, 139extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
142 struct device *dev); 140 struct device *dev);
143extern void pm_genpd_dev_need_restore(struct device *dev, bool val);
144extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 141extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
145 struct generic_pm_domain *new_subdomain); 142 struct generic_pm_domain *new_subdomain);
146extern int pm_genpd_add_subdomain_names(const char *master_name, 143extern int pm_genpd_add_subdomain_names(const char *master_name,
@@ -187,7 +184,6 @@ static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
187{ 184{
188 return -ENOSYS; 185 return -ENOSYS;
189} 186}
190static inline void pm_genpd_dev_need_restore(struct device *dev, bool val) {}
191static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 187static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
192 struct generic_pm_domain *new_sd) 188 struct generic_pm_domain *new_sd)
193{ 189{
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 097d7eb2441e..d534e8ed308a 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -216,19 +216,21 @@ struct mem_dqinfo {
216 unsigned long dqi_flags; 216 unsigned long dqi_flags;
217 unsigned int dqi_bgrace; 217 unsigned int dqi_bgrace;
218 unsigned int dqi_igrace; 218 unsigned int dqi_igrace;
219 qsize_t dqi_maxblimit; 219 qsize_t dqi_max_spc_limit;
220 qsize_t dqi_maxilimit; 220 qsize_t dqi_max_ino_limit;
221 void *dqi_priv; 221 void *dqi_priv;
222}; 222};
223 223
224struct super_block; 224struct super_block;
225 225
226#define DQF_MASK 0xffff /* Mask for format specific flags */ 226/* Mask for flags passed to userspace */
227#define DQF_GETINFO_MASK 0x1ffff /* Mask for flags passed to userspace */ 227#define DQF_GETINFO_MASK (DQF_ROOT_SQUASH | DQF_SYS_FILE)
228#define DQF_SETINFO_MASK 0xffff /* Mask for flags modifiable from userspace */ 228/* Mask for flags modifiable from userspace */
229#define DQF_SYS_FILE_B 16 229#define DQF_SETINFO_MASK DQF_ROOT_SQUASH
230#define DQF_SYS_FILE (1 << DQF_SYS_FILE_B) /* Quota file stored as system file */ 230
231#define DQF_INFO_DIRTY_B 31 231enum {
232 DQF_INFO_DIRTY_B = DQF_PRIVATE,
233};
232#define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */ 234#define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */
233 235
234extern void mark_info_dirty(struct super_block *sb, int type); 236extern void mark_info_dirty(struct super_block *sb, int type);
@@ -367,15 +369,15 @@ struct qc_dqblk {
367/* Operations handling requests from userspace */ 369/* Operations handling requests from userspace */
368struct quotactl_ops { 370struct quotactl_ops {
369 int (*quota_on)(struct super_block *, int, int, struct path *); 371 int (*quota_on)(struct super_block *, int, int, struct path *);
370 int (*quota_on_meta)(struct super_block *, int, int);
371 int (*quota_off)(struct super_block *, int); 372 int (*quota_off)(struct super_block *, int);
373 int (*quota_enable)(struct super_block *, unsigned int);
374 int (*quota_disable)(struct super_block *, unsigned int);
372 int (*quota_sync)(struct super_block *, int); 375 int (*quota_sync)(struct super_block *, int);
373 int (*get_info)(struct super_block *, int, struct if_dqinfo *); 376 int (*get_info)(struct super_block *, int, struct if_dqinfo *);
374 int (*set_info)(struct super_block *, int, struct if_dqinfo *); 377 int (*set_info)(struct super_block *, int, struct if_dqinfo *);
375 int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); 378 int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
376 int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); 379 int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
377 int (*get_xstate)(struct super_block *, struct fs_quota_stat *); 380 int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
378 int (*set_xstate)(struct super_block *, unsigned int, int);
379 int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); 381 int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
380 int (*rm_xquota)(struct super_block *, unsigned int); 382 int (*rm_xquota)(struct super_block *, unsigned int);
381}; 383};
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 29e3455f7d41..df73258cca47 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -166,6 +166,7 @@ static inline bool sb_has_quota_active(struct super_block *sb, int type)
166 */ 166 */
167extern const struct dquot_operations dquot_operations; 167extern const struct dquot_operations dquot_operations;
168extern const struct quotactl_ops dquot_quotactl_ops; 168extern const struct quotactl_ops dquot_quotactl_ops;
169extern const struct quotactl_ops dquot_quotactl_sysfile_ops;
169 170
170#else 171#else
171 172
@@ -386,4 +387,6 @@ static inline void dquot_release_reservation_block(struct inode *inode,
386 __dquot_free_space(inode, nr << inode->i_blkbits, DQUOT_SPACE_RESERVE); 387 __dquot_free_space(inode, nr << inode->i_blkbits, DQUOT_SPACE_RESERVE);
387} 388}
388 389
390unsigned int qtype_enforce_flag(int type);
391
389#endif /* _LINUX_QUOTAOPS_ */ 392#endif /* _LINUX_QUOTAOPS_ */
diff --git a/include/linux/resource_ext.h b/include/linux/resource_ext.h
new file mode 100644
index 000000000000..e2bf63d881d4
--- /dev/null
+++ b/include/linux/resource_ext.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright (C) 2015, Intel Corporation
3 * Author: Jiang Liu <jiang.liu@linux.intel.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#ifndef _LINUX_RESOURCE_EXT_H
15#define _LINUX_RESOURCE_EXT_H
16#include <linux/types.h>
17#include <linux/list.h>
18#include <linux/ioport.h>
19#include <linux/slab.h>
20
21/* Represent resource window for bridge devices */
22struct resource_win {
23 struct resource res; /* In master (CPU) address space */
24 resource_size_t offset; /* Translation offset for bridge */
25};
26
27/*
28 * Common resource list management data structure and interfaces to support
29 * ACPI, PNP and PCI host bridge etc.
30 */
31struct resource_entry {
32 struct list_head node;
33 struct resource *res; /* In master (CPU) address space */
34 resource_size_t offset; /* Translation offset for bridge */
35 struct resource __res; /* Default storage for res */
36};
37
38extern struct resource_entry *
39resource_list_create_entry(struct resource *res, size_t extra_size);
40extern void resource_list_free(struct list_head *head);
41
42static inline void resource_list_add(struct resource_entry *entry,
43 struct list_head *head)
44{
45 list_add(&entry->node, head);
46}
47
48static inline void resource_list_add_tail(struct resource_entry *entry,
49 struct list_head *head)
50{
51 list_add_tail(&entry->node, head);
52}
53
54static inline void resource_list_del(struct resource_entry *entry)
55{
56 list_del(&entry->node);
57}
58
59static inline void resource_list_free_entry(struct resource_entry *entry)
60{
61 kfree(entry);
62}
63
64static inline void
65resource_list_destroy_entry(struct resource_entry *entry)
66{
67 resource_list_del(entry);
68 resource_list_free_entry(entry);
69}
70
71#define resource_list_for_each_entry(entry, list) \
72 list_for_each_entry((entry), (list), node)
73
74#define resource_list_for_each_entry_safe(entry, tmp, list) \
75 list_for_each_entry_safe((entry), (tmp), (list), node)
76
77#endif /* _LINUX_RESOURCE_EXT_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 4a1d0cc38ff2..efe3443572ba 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -451,6 +451,10 @@
451#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */ 451#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
452#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */ 452#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */
453#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */ 453#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
454#define PCI_EXP_DEVCTL_READRQ_128B 0x0000 /* 128 Bytes */
455#define PCI_EXP_DEVCTL_READRQ_256B 0x1000 /* 256 Bytes */
456#define PCI_EXP_DEVCTL_READRQ_512B 0x2000 /* 512 Bytes */
457#define PCI_EXP_DEVCTL_READRQ_1024B 0x3000 /* 1024 Bytes */
454#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ 458#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
455#define PCI_EXP_DEVSTA 10 /* Device Status */ 459#define PCI_EXP_DEVSTA 10 /* Device Status */
456#define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */ 460#define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */
diff --git a/include/uapi/linux/quota.h b/include/uapi/linux/quota.h
index 3b6cfbeb086d..1f49b8341c99 100644
--- a/include/uapi/linux/quota.h
+++ b/include/uapi/linux/quota.h
@@ -126,10 +126,22 @@ struct if_dqblk {
126#define IIF_FLAGS 4 126#define IIF_FLAGS 4
127#define IIF_ALL (IIF_BGRACE | IIF_IGRACE | IIF_FLAGS) 127#define IIF_ALL (IIF_BGRACE | IIF_IGRACE | IIF_FLAGS)
128 128
129enum {
130 DQF_ROOT_SQUASH_B = 0,
131 DQF_SYS_FILE_B = 16,
132 /* Kernel internal flags invisible to userspace */
133 DQF_PRIVATE
134};
135
136/* Root squash enabled (for v1 quota format) */
137#define DQF_ROOT_SQUASH (1 << DQF_ROOT_SQUASH_B)
138/* Quota stored in a system file */
139#define DQF_SYS_FILE (1 << DQF_SYS_FILE_B)
140
129struct if_dqinfo { 141struct if_dqinfo {
130 __u64 dqi_bgrace; 142 __u64 dqi_bgrace;
131 __u64 dqi_igrace; 143 __u64 dqi_igrace;
132 __u32 dqi_flags; 144 __u32 dqi_flags; /* DFQ_* */
133 __u32 dqi_valid; 145 __u32 dqi_valid;
134}; 146};
135 147
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 3387465b9caa..143ca5ffab7a 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -45,6 +45,8 @@
45#include <asm/xen/hypervisor.h> 45#include <asm/xen/hypervisor.h>
46 46
47#include <xen/features.h> 47#include <xen/features.h>
48#include <linux/mm_types.h>
49#include <linux/page-flags.h>
48 50
49#define GNTTAB_RESERVED_XENSTORE 1 51#define GNTTAB_RESERVED_XENSTORE 1
50 52
@@ -58,6 +60,22 @@ struct gnttab_free_callback {
58 u16 count; 60 u16 count;
59}; 61};
60 62
63struct gntab_unmap_queue_data;
64
65typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
66
67struct gntab_unmap_queue_data
68{
69 struct delayed_work gnttab_work;
70 void *data;
71 gnttab_unmap_refs_done done;
72 struct gnttab_unmap_grant_ref *unmap_ops;
73 struct gnttab_unmap_grant_ref *kunmap_ops;
74 struct page **pages;
75 unsigned int count;
76 unsigned int age;
77};
78
61int gnttab_init(void); 79int gnttab_init(void);
62int gnttab_suspend(void); 80int gnttab_suspend(void);
63int gnttab_resume(void); 81int gnttab_resume(void);
@@ -163,12 +181,17 @@ void gnttab_free_auto_xlat_frames(void);
163 181
164#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) 182#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
165 183
184int gnttab_alloc_pages(int nr_pages, struct page **pages);
185void gnttab_free_pages(int nr_pages, struct page **pages);
186
166int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 187int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
167 struct gnttab_map_grant_ref *kmap_ops, 188 struct gnttab_map_grant_ref *kmap_ops,
168 struct page **pages, unsigned int count); 189 struct page **pages, unsigned int count);
169int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 190int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
170 struct gnttab_map_grant_ref *kunmap_ops, 191 struct gnttab_unmap_grant_ref *kunmap_ops,
171 struct page **pages, unsigned int count); 192 struct page **pages, unsigned int count);
193void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
194
172 195
173/* Perform a batch of grant map/copy operations. Retry every batch slot 196/* Perform a batch of grant map/copy operations. Retry every batch slot
174 * for which the hypervisor returns GNTST_eagain. This is typically due 197 * for which the hypervisor returns GNTST_eagain. This is typically due
@@ -182,4 +205,22 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
182void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count); 205void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
183void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count); 206void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
184 207
208
209struct xen_page_foreign {
210 domid_t domid;
211 grant_ref_t gref;
212};
213
214static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
215{
216 if (!PageForeign(page))
217 return NULL;
218#if BITS_PER_LONG < 64
219 return (struct xen_page_foreign *)page->private;
220#else
221 BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
222 return (struct xen_page_foreign *)&page->private;
223#endif
224}
225
185#endif /* __ASM_GNTTAB_H__ */ 226#endif /* __ASM_GNTTAB_H__ */
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 131a6ccdba25..6ad3d110bb81 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -41,6 +41,12 @@
41/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ 41/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
42#define XENFEAT_mmu_pt_update_preserve_ad 5 42#define XENFEAT_mmu_pt_update_preserve_ad 5
43 43
44/*
45 * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
46 * available pte bits.
47 */
48#define XENFEAT_gnttab_map_avail_bits 7
49
44/* x86: Does this Xen host support the HVM callback vector type? */ 50/* x86: Does this Xen host support the HVM callback vector type? */
45#define XENFEAT_hvm_callback_vector 8 51#define XENFEAT_hvm_callback_vector 8
46 52
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
index bcce56439d64..56806bc90c2f 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -526,6 +526,13 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
526#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) 526#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
527 527
528/* 528/*
529 * Bits to be placed in guest kernel available PTE bits (architecture
530 * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set).
531 */
532#define _GNTMAP_guest_avail0 (16)
533#define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0)
534
535/*
529 * Values for error status returns. All errors are -ve. 536 * Values for error status returns. All errors are -ve.
530 */ 537 */
531#define GNTST_okay (0) /* Normal return. */ 538#define GNTST_okay (0) /* Normal return. */
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 5f4c006c4b1e..97b0df71303e 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -41,6 +41,8 @@
41#include <linux/platform_device.h> 41#include <linux/platform_device.h>
42#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/kernel.h> 43#include <linux/kernel.h>
44#include <linux/debugfs.h>
45#include <linux/seq_file.h>
44 46
45#include <linux/uaccess.h> 47#include <linux/uaccess.h>
46#include <linux/export.h> 48#include <linux/export.h>
@@ -182,6 +184,81 @@ static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
182 c->target_value = value; 184 c->target_value = value;
183} 185}
184 186
187static inline int pm_qos_get_value(struct pm_qos_constraints *c);
188static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused)
189{
190 struct pm_qos_object *qos = (struct pm_qos_object *)s->private;
191 struct pm_qos_constraints *c;
192 struct pm_qos_request *req;
193 char *type;
194 unsigned long flags;
195 int tot_reqs = 0;
196 int active_reqs = 0;
197
198 if (IS_ERR_OR_NULL(qos)) {
199 pr_err("%s: bad qos param!\n", __func__);
200 return -EINVAL;
201 }
202 c = qos->constraints;
203 if (IS_ERR_OR_NULL(c)) {
204 pr_err("%s: Bad constraints on qos?\n", __func__);
205 return -EINVAL;
206 }
207
208 /* Lock to ensure we have a snapshot */
209 spin_lock_irqsave(&pm_qos_lock, flags);
210 if (plist_head_empty(&c->list)) {
211 seq_puts(s, "Empty!\n");
212 goto out;
213 }
214
215 switch (c->type) {
216 case PM_QOS_MIN:
217 type = "Minimum";
218 break;
219 case PM_QOS_MAX:
220 type = "Maximum";
221 break;
222 case PM_QOS_SUM:
223 type = "Sum";
224 break;
225 default:
226 type = "Unknown";
227 }
228
229 plist_for_each_entry(req, &c->list, node) {
230 char *state = "Default";
231
232 if ((req->node).prio != c->default_value) {
233 active_reqs++;
234 state = "Active";
235 }
236 tot_reqs++;
237 seq_printf(s, "%d: %d: %s\n", tot_reqs,
238 (req->node).prio, state);
239 }
240
241 seq_printf(s, "Type=%s, Value=%d, Requests: active=%d / total=%d\n",
242 type, pm_qos_get_value(c), active_reqs, tot_reqs);
243
244out:
245 spin_unlock_irqrestore(&pm_qos_lock, flags);
246 return 0;
247}
248
249static int pm_qos_dbg_open(struct inode *inode, struct file *file)
250{
251 return single_open(file, pm_qos_dbg_show_requests,
252 inode->i_private);
253}
254
255static const struct file_operations pm_qos_debug_fops = {
256 .open = pm_qos_dbg_open,
257 .read = seq_read,
258 .llseek = seq_lseek,
259 .release = single_release,
260};
261
185/** 262/**
186 * pm_qos_update_target - manages the constraints list and calls the notifiers 263 * pm_qos_update_target - manages the constraints list and calls the notifiers
187 * if needed 264 * if needed
@@ -509,12 +586,17 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
509EXPORT_SYMBOL_GPL(pm_qos_remove_notifier); 586EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
510 587
511/* User space interface to PM QoS classes via misc devices */ 588/* User space interface to PM QoS classes via misc devices */
512static int register_pm_qos_misc(struct pm_qos_object *qos) 589static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
513{ 590{
514 qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR; 591 qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
515 qos->pm_qos_power_miscdev.name = qos->name; 592 qos->pm_qos_power_miscdev.name = qos->name;
516 qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops; 593 qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
517 594
595 if (d) {
596 (void)debugfs_create_file(qos->name, S_IRUGO, d,
597 (void *)qos, &pm_qos_debug_fops);
598 }
599
518 return misc_register(&qos->pm_qos_power_miscdev); 600 return misc_register(&qos->pm_qos_power_miscdev);
519} 601}
520 602
@@ -608,11 +690,16 @@ static int __init pm_qos_power_init(void)
608{ 690{
609 int ret = 0; 691 int ret = 0;
610 int i; 692 int i;
693 struct dentry *d;
611 694
612 BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES); 695 BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
613 696
697 d = debugfs_create_dir("pm_qos", NULL);
698 if (IS_ERR_OR_NULL(d))
699 d = NULL;
700
614 for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) { 701 for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
615 ret = register_pm_qos_misc(pm_qos_array[i]); 702 ret = register_pm_qos_misc(pm_qos_array[i], d);
616 if (ret < 0) { 703 if (ret < 0) {
617 printk(KERN_ERR "pm_qos_param: %s setup failed\n", 704 printk(KERN_ERR "pm_qos_param: %s setup failed\n",
618 pm_qos_array[i]->name); 705 pm_qos_array[i]->name);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 0c40c16174b4..c24d5a23bf93 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1472,9 +1472,9 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1472/** 1472/**
1473 * free_unnecessary_pages - Release preallocated pages not needed for the image 1473 * free_unnecessary_pages - Release preallocated pages not needed for the image
1474 */ 1474 */
1475static void free_unnecessary_pages(void) 1475static unsigned long free_unnecessary_pages(void)
1476{ 1476{
1477 unsigned long save, to_free_normal, to_free_highmem; 1477 unsigned long save, to_free_normal, to_free_highmem, free;
1478 1478
1479 save = count_data_pages(); 1479 save = count_data_pages();
1480 if (alloc_normal >= save) { 1480 if (alloc_normal >= save) {
@@ -1495,6 +1495,7 @@ static void free_unnecessary_pages(void)
1495 else 1495 else
1496 to_free_normal = 0; 1496 to_free_normal = 0;
1497 } 1497 }
1498 free = to_free_normal + to_free_highmem;
1498 1499
1499 memory_bm_position_reset(&copy_bm); 1500 memory_bm_position_reset(&copy_bm);
1500 1501
@@ -1518,6 +1519,8 @@ static void free_unnecessary_pages(void)
1518 swsusp_unset_page_free(page); 1519 swsusp_unset_page_free(page);
1519 __free_page(page); 1520 __free_page(page);
1520 } 1521 }
1522
1523 return free;
1521} 1524}
1522 1525
1523/** 1526/**
@@ -1707,7 +1710,7 @@ int hibernate_preallocate_memory(void)
1707 * pages in memory, but we have allocated more. Release the excessive 1710 * pages in memory, but we have allocated more. Release the excessive
1708 * ones now. 1711 * ones now.
1709 */ 1712 */
1710 free_unnecessary_pages(); 1713 pages -= free_unnecessary_pages();
1711 1714
1712 out: 1715 out:
1713 stop = ktime_get(); 1716 stop = ktime_get();
@@ -2310,8 +2313,6 @@ static inline void free_highmem_data(void)
2310 free_image_page(buffer, PG_UNSAFE_CLEAR); 2313 free_image_page(buffer, PG_UNSAFE_CLEAR);
2311} 2314}
2312#else 2315#else
2313static inline int get_safe_write_buffer(void) { return 0; }
2314
2315static unsigned int 2316static unsigned int
2316count_highmem_image_pages(struct memory_bitmap *bm) { return 0; } 2317count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2317 2318
diff --git a/kernel/resource.c b/kernel/resource.c
index 0bcebffc4e77..19f2357dfda3 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -22,6 +22,7 @@
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/pfn.h> 23#include <linux/pfn.h>
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/resource_ext.h>
25#include <asm/io.h> 26#include <asm/io.h>
26 27
27 28
@@ -1529,6 +1530,30 @@ int iomem_is_exclusive(u64 addr)
1529 return err; 1530 return err;
1530} 1531}
1531 1532
1533struct resource_entry *resource_list_create_entry(struct resource *res,
1534 size_t extra_size)
1535{
1536 struct resource_entry *entry;
1537
1538 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1539 if (entry) {
1540 INIT_LIST_HEAD(&entry->node);
1541 entry->res = res ? res : &entry->__res;
1542 }
1543
1544 return entry;
1545}
1546EXPORT_SYMBOL(resource_list_create_entry);
1547
1548void resource_list_free(struct list_head *head)
1549{
1550 struct resource_entry *entry, *tmp;
1551
1552 list_for_each_entry_safe(entry, tmp, head, node)
1553 resource_list_destroy_entry(entry);
1554}
1555EXPORT_SYMBOL(resource_list_free);
1556
1532static int __init strict_iomem(char *str) 1557static int __init strict_iomem(char *str)
1533{ 1558{
1534 if (strstr(str, "relaxed")) 1559 if (strstr(str, "relaxed"))
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index 1c71382b283d..eb4220a132ec 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -13,5 +13,6 @@
13#define CREATE_TRACE_POINTS 13#define CREATE_TRACE_POINTS
14#include <trace/events/power.h> 14#include <trace/events/power.h>
15 15
16EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume);
16EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle); 17EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
17 18
diff --git a/mm/memory.c b/mm/memory.c
index 988d3099a25d..d63849b5188f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -754,6 +754,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
754 if (HAVE_PTE_SPECIAL) { 754 if (HAVE_PTE_SPECIAL) {
755 if (likely(!pte_special(pte))) 755 if (likely(!pte_special(pte)))
756 goto check_pfn; 756 goto check_pfn;
757 if (vma->vm_ops && vma->vm_ops->find_special_page)
758 return vma->vm_ops->find_special_page(vma, addr);
757 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 759 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
758 return NULL; 760 return NULL;
759 if (!is_zero_pfn(pfn)) 761 if (!is_zero_pfn(pfn))
diff --git a/sound/oss/dmasound/dmasound_atari.c b/sound/oss/dmasound/dmasound_atari.c
index 13c214466d3b..1c56bf58eff9 100644
--- a/sound/oss/dmasound/dmasound_atari.c
+++ b/sound/oss/dmasound/dmasound_atari.c
@@ -851,7 +851,7 @@ static int __init AtaIrqInit(void)
851 st_mfp.tim_dt_a = 1; /* Cause interrupt after first event. */ 851 st_mfp.tim_dt_a = 1; /* Cause interrupt after first event. */
852 st_mfp.tim_ct_a = 8; /* Turn on event counting. */ 852 st_mfp.tim_ct_a = 8; /* Turn on event counting. */
853 /* Register interrupt handler. */ 853 /* Register interrupt handler. */
854 if (request_irq(IRQ_MFP_TIMA, AtaInterrupt, IRQ_TYPE_SLOW, "DMA sound", 854 if (request_irq(IRQ_MFP_TIMA, AtaInterrupt, 0, "DMA sound",
855 AtaInterrupt)) 855 AtaInterrupt))
856 return 0; 856 return 0;
857 st_mfp.int_en_a |= 0x20; /* Turn interrupt on. */ 857 st_mfp.int_en_a |= 0x20; /* Turn interrupt on. */
diff --git a/tools/power/acpi/common/cmfsize.c b/tools/power/acpi/common/cmfsize.c
index f4b953354ff7..eec688041500 100644
--- a/tools/power/acpi/common/cmfsize.c
+++ b/tools/power/acpi/common/cmfsize.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/common/getopt.c b/tools/power/acpi/common/getopt.c
index 2f0f34a36db4..5da129e10aa2 100644
--- a/tools/power/acpi/common/getopt.c
+++ b/tools/power/acpi/common/getopt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/oslibcfs.c b/tools/power/acpi/os_specific/service_layers/oslibcfs.c
index c13ff9c51d74..b51e40a9a120 100644
--- a/tools/power/acpi/os_specific/service_layers/oslibcfs.c
+++ b/tools/power/acpi/os_specific/service_layers/oslibcfs.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index 0dc2485dedf5..92f1fd700344 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixdir.c b/tools/power/acpi/os_specific/service_layers/osunixdir.c
index 733f9e490fc4..e153fcb12b1a 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixdir.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixdir.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixmap.c b/tools/power/acpi/os_specific/service_layers/osunixmap.c
index 99b47b6194a3..3853a7350440 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixmap.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixmap.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
index 7ccb073f8316..6858c0893c91 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixxf.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/acpidump.h b/tools/power/acpi/tools/acpidump/acpidump.h
index a2d37d610639..84bdef0136cb 100644
--- a/tools/power/acpi/tools/acpidump/acpidump.h
+++ b/tools/power/acpi/tools/acpidump/acpidump.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index 24d32968802d..c736adf5fb55 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c
index d470046a6d81..8f2fe168228e 100644
--- a/tools/power/acpi/tools/acpidump/apfiles.c
+++ b/tools/power/acpi/tools/acpidump/apfiles.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index 853b4da22c3e..d0ba6535f5af 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 2e2ba2efa0d9..3ed7c0476d48 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -209,7 +209,7 @@ $(OUTPUT)%.o: %.c
209 209
210$(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ) 210$(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ)
211 $(ECHO) " CC " $@ 211 $(ECHO) " CC " $@
212 $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -lrt -lpci -L$(OUTPUT) -o $@ 212 $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -Wl,-rpath=./ -lrt -lpci -L$(OUTPUT) -o $@
213 $(QUIET) $(STRIPCMD) $@ 213 $(QUIET) $(STRIPCMD) $@
214 214
215$(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC) 215$(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC)
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index 56bfb523c5bb..9b950699e63d 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -12,16 +12,16 @@ turbostat \- Report processor frequency and idle statistics
12.RB [ "\-i interval_sec" ] 12.RB [ "\-i interval_sec" ]
13.SH DESCRIPTION 13.SH DESCRIPTION
14\fBturbostat \fP reports processor topology, frequency, 14\fBturbostat \fP reports processor topology, frequency,
15idle power-state statistics, temperature and power on modern X86 processors. 15idle power-state statistics, temperature and power on X86 processors.
16Either \fBcommand\fP is forked and statistics are printed 16There are two ways to invoke turbostat.
17upon its completion, or statistics are printed periodically. 17The first method is to supply a
18 18\fBcommand\fP, which is forked and statistics are printed
19\fBturbostat \fP 19upon its completion.
20must be run on root, and 20The second method is to omit the command,
21minimally requires that the processor 21and turbodstat will print statistics every 5 seconds.
22supports an "invariant" TSC, plus the APERF and MPERF MSRs. 22The 5-second interval can changed using the -i option.
23Additional information is reported depending on hardware counter support. 23
24 24Some information is not availalbe on older processors.
25.SS Options 25.SS Options
26The \fB-p\fP option limits output to the 1st thread in 1st core of each package. 26The \fB-p\fP option limits output to the 1st thread in 1st core of each package.
27.PP 27.PP
@@ -130,12 +130,13 @@ cpu3: MSR_IA32_THERM_STATUS: 0x884e0000 (27 C +/- 1)
130 ... 130 ...
131.fi 131.fi
132The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency 132The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency
133available at the minimum package voltage. The \fBTSC frequency\fP is the nominal 133available at the minimum package voltage. The \fBTSC frequency\fP is the base
134maximum frequency of the processor if turbo-mode were not available. This frequency 134frequency of the processor -- this should match the brand string
135in /proc/cpuinfo. This base frequency
135should be sustainable on all CPUs indefinitely, given nominal power and cooling. 136should be sustainable on all CPUs indefinitely, given nominal power and cooling.
136The remaining rows show what maximum turbo frequency is possible 137The remaining rows show what maximum turbo frequency is possible
137depending on the number of idle cores. Note that this information is 138depending on the number of idle cores. Note that not all information is
138not available on all processors. 139available on all processors.
139.SH FORK EXAMPLE 140.SH FORK EXAMPLE
140If turbostat is invoked with a command, it will fork that command 141If turbostat is invoked with a command, it will fork that command
141and output the statistics gathered when the command exits. 142and output the statistics gathered when the command exits.
@@ -176,6 +177,11 @@ not including any non-busy idle time.
176 177
177.B "turbostat " 178.B "turbostat "
178must be run as root. 179must be run as root.
180Alternatively, non-root users can be enabled to run turbostat this way:
181
182# setcap cap_sys_rawio=ep ./turbostat
183
184# chmod +r /dev/cpu/*/msr
179 185
180.B "turbostat " 186.B "turbostat "
181reads hardware counters, but doesn't write them. 187reads hardware counters, but doesn't write them.
@@ -184,15 +190,33 @@ multiple invocations of itself.
184 190
185\fBturbostat \fP 191\fBturbostat \fP
186may work poorly on Linux-2.6.20 through 2.6.29, 192may work poorly on Linux-2.6.20 through 2.6.29,
187as \fBacpi-cpufreq \fPperiodically cleared the APERF and MPERF 193as \fBacpi-cpufreq \fPperiodically cleared the APERF and MPERF MSRs
188in those kernels. 194in those kernels.
189 195
190If the TSC column does not make sense, then 196AVG_MHz = APERF_delta/measurement_interval. This is the actual
191the other numbers will also make no sense. 197number of elapsed cycles divided by the entire sample interval --
192Turbostat is lightweight, and its data collection is not atomic. 198including idle time. Note that this calculation is resiliant
193These issues are usually caused by an extremely short measurement 199to systems lacking a non-stop TSC.
194interval (much less than 1 second), or system activity that prevents 200
195turbostat from being able to run on all CPUS to quickly collect data. 201TSC_MHz = TSC_delta/measurement_interval.
202On a system with an invariant TSC, this value will be constant
203and will closely match the base frequency value shown
204in the brand string in /proc/cpuinfo. On a system where
205the TSC stops in idle, TSC_MHz will drop
206below the processor's base frequency.
207
208%Busy = MPERF_delta/TSC_delta
209
210Bzy_MHz = TSC_delta/APERF_delta/MPERF_delta/measurement_interval
211
212Note that these calculations depend on TSC_delta, so they
213are not reliable during intervals when TSC_MHz is not running at the base frequency.
214
215Turbostat data collection is not atomic.
216Extremely short measurement intervals (much less than 1 second),
217or system activity that prevents turbostat from being able
218to run on all CPUS to quickly collect data, will result in
219inconsistent results.
196 220
197The APERF, MPERF MSRs are defined to count non-halted cycles. 221The APERF, MPERF MSRs are defined to count non-halted cycles.
198Although it is not guaranteed by the architecture, turbostat assumes 222Although it is not guaranteed by the architecture, turbostat assumes
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 5b1b807265a1..a02c02f25e88 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -38,6 +38,8 @@
38#include <ctype.h> 38#include <ctype.h>
39#include <sched.h> 39#include <sched.h>
40#include <cpuid.h> 40#include <cpuid.h>
41#include <linux/capability.h>
42#include <errno.h>
41 43
42char *proc_stat = "/proc/stat"; 44char *proc_stat = "/proc/stat";
43unsigned int interval_sec = 5; /* set with -i interval_sec */ 45unsigned int interval_sec = 5; /* set with -i interval_sec */
@@ -59,8 +61,8 @@ unsigned int has_epb;
59unsigned int units = 1000000; /* MHz etc */ 61unsigned int units = 1000000; /* MHz etc */
60unsigned int genuine_intel; 62unsigned int genuine_intel;
61unsigned int has_invariant_tsc; 63unsigned int has_invariant_tsc;
62unsigned int do_nehalem_platform_info; 64unsigned int do_nhm_platform_info;
63unsigned int do_nehalem_turbo_ratio_limit; 65unsigned int do_nhm_turbo_ratio_limit;
64unsigned int do_ivt_turbo_ratio_limit; 66unsigned int do_ivt_turbo_ratio_limit;
65unsigned int extra_msr_offset32; 67unsigned int extra_msr_offset32;
66unsigned int extra_msr_offset64; 68unsigned int extra_msr_offset64;
@@ -81,6 +83,9 @@ unsigned int tcc_activation_temp;
81unsigned int tcc_activation_temp_override; 83unsigned int tcc_activation_temp_override;
82double rapl_power_units, rapl_energy_units, rapl_time_units; 84double rapl_power_units, rapl_energy_units, rapl_time_units;
83double rapl_joule_counter_range; 85double rapl_joule_counter_range;
86unsigned int do_core_perf_limit_reasons;
87unsigned int do_gfx_perf_limit_reasons;
88unsigned int do_ring_perf_limit_reasons;
84 89
85#define RAPL_PKG (1 << 0) 90#define RAPL_PKG (1 << 0)
86 /* 0x610 MSR_PKG_POWER_LIMIT */ 91 /* 0x610 MSR_PKG_POWER_LIMIT */
@@ -251,15 +256,13 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
251 sprintf(pathname, "/dev/cpu/%d/msr", cpu); 256 sprintf(pathname, "/dev/cpu/%d/msr", cpu);
252 fd = open(pathname, O_RDONLY); 257 fd = open(pathname, O_RDONLY);
253 if (fd < 0) 258 if (fd < 0)
254 return -1; 259 err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
255 260
256 retval = pread(fd, msr, sizeof *msr, offset); 261 retval = pread(fd, msr, sizeof *msr, offset);
257 close(fd); 262 close(fd);
258 263
259 if (retval != sizeof *msr) { 264 if (retval != sizeof *msr)
260 fprintf(stderr, "%s offset 0x%llx read failed\n", pathname, (unsigned long long)offset); 265 err(-1, "%s offset 0x%llx read failed", pathname, (unsigned long long)offset);
261 return -1;
262 }
263 266
264 return 0; 267 return 0;
265} 268}
@@ -281,7 +284,7 @@ void print_header(void)
281 outp += sprintf(outp, " CPU"); 284 outp += sprintf(outp, " CPU");
282 if (has_aperf) 285 if (has_aperf)
283 outp += sprintf(outp, " Avg_MHz"); 286 outp += sprintf(outp, " Avg_MHz");
284 if (do_nhm_cstates) 287 if (has_aperf)
285 outp += sprintf(outp, " %%Busy"); 288 outp += sprintf(outp, " %%Busy");
286 if (has_aperf) 289 if (has_aperf)
287 outp += sprintf(outp, " Bzy_MHz"); 290 outp += sprintf(outp, " Bzy_MHz");
@@ -337,7 +340,7 @@ void print_header(void)
337 outp += sprintf(outp, " PKG_%%"); 340 outp += sprintf(outp, " PKG_%%");
338 if (do_rapl & RAPL_DRAM_PERF_STATUS) 341 if (do_rapl & RAPL_DRAM_PERF_STATUS)
339 outp += sprintf(outp, " RAM_%%"); 342 outp += sprintf(outp, " RAM_%%");
340 } else { 343 } else if (do_rapl && rapl_joules) {
341 if (do_rapl & RAPL_PKG) 344 if (do_rapl & RAPL_PKG)
342 outp += sprintf(outp, " Pkg_J"); 345 outp += sprintf(outp, " Pkg_J");
343 if (do_rapl & RAPL_CORES) 346 if (do_rapl & RAPL_CORES)
@@ -457,25 +460,25 @@ int format_counters(struct thread_data *t, struct core_data *c,
457 outp += sprintf(outp, "%8d", t->cpu_id); 460 outp += sprintf(outp, "%8d", t->cpu_id);
458 } 461 }
459 462
460 /* AvgMHz */ 463 /* Avg_MHz */
461 if (has_aperf) 464 if (has_aperf)
462 outp += sprintf(outp, "%8.0f", 465 outp += sprintf(outp, "%8.0f",
463 1.0 / units * t->aperf / interval_float); 466 1.0 / units * t->aperf / interval_float);
464 467
465 /* %c0 */ 468 /* %Busy */
466 if (do_nhm_cstates) { 469 if (has_aperf) {
467 if (!skip_c0) 470 if (!skip_c0)
468 outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc); 471 outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc);
469 else 472 else
470 outp += sprintf(outp, "********"); 473 outp += sprintf(outp, "********");
471 } 474 }
472 475
473 /* BzyMHz */ 476 /* Bzy_MHz */
474 if (has_aperf) 477 if (has_aperf)
475 outp += sprintf(outp, "%8.0f", 478 outp += sprintf(outp, "%8.0f",
476 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); 479 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
477 480
478 /* TSC */ 481 /* TSC_MHz */
479 outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float); 482 outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
480 483
481 /* SMI */ 484 /* SMI */
@@ -561,7 +564,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
561 outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); 564 outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
562 if (do_rapl & RAPL_DRAM_PERF_STATUS) 565 if (do_rapl & RAPL_DRAM_PERF_STATUS)
563 outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); 566 outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
564 } else { 567 } else if (do_rapl && rapl_joules) {
565 if (do_rapl & RAPL_PKG) 568 if (do_rapl & RAPL_PKG)
566 outp += sprintf(outp, fmt8, 569 outp += sprintf(outp, fmt8,
567 p->energy_pkg * rapl_energy_units); 570 p->energy_pkg * rapl_energy_units);
@@ -578,8 +581,8 @@ int format_counters(struct thread_data *t, struct core_data *c,
578 outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); 581 outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
579 if (do_rapl & RAPL_DRAM_PERF_STATUS) 582 if (do_rapl & RAPL_DRAM_PERF_STATUS)
580 outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); 583 outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
581 outp += sprintf(outp, fmt8, interval_float);
582 584
585 outp += sprintf(outp, fmt8, interval_float);
583 } 586 }
584done: 587done:
585 outp += sprintf(outp, "\n"); 588 outp += sprintf(outp, "\n");
@@ -670,24 +673,26 @@ delta_thread(struct thread_data *new, struct thread_data *old,
670 673
671 old->c1 = new->c1 - old->c1; 674 old->c1 = new->c1 - old->c1;
672 675
673 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { 676 if (has_aperf) {
674 old->aperf = new->aperf - old->aperf; 677 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
675 old->mperf = new->mperf - old->mperf; 678 old->aperf = new->aperf - old->aperf;
676 } else { 679 old->mperf = new->mperf - old->mperf;
680 } else {
677 681
678 if (!aperf_mperf_unstable) { 682 if (!aperf_mperf_unstable) {
679 fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname); 683 fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
680 fprintf(stderr, "* Frequency results do not cover entire interval *\n"); 684 fprintf(stderr, "* Frequency results do not cover entire interval *\n");
681 fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n"); 685 fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
682 686
683 aperf_mperf_unstable = 1; 687 aperf_mperf_unstable = 1;
688 }
689 /*
690 * mperf delta is likely a huge "positive" number
691 * can not use it for calculating c0 time
692 */
693 skip_c0 = 1;
694 skip_c1 = 1;
684 } 695 }
685 /*
686 * mperf delta is likely a huge "positive" number
687 * can not use it for calculating c0 time
688 */
689 skip_c0 = 1;
690 skip_c1 = 1;
691 } 696 }
692 697
693 698
@@ -1019,7 +1024,7 @@ void print_verbose_header(void)
1019 unsigned long long msr; 1024 unsigned long long msr;
1020 unsigned int ratio; 1025 unsigned int ratio;
1021 1026
1022 if (!do_nehalem_platform_info) 1027 if (!do_nhm_platform_info)
1023 return; 1028 return;
1024 1029
1025 get_msr(0, MSR_NHM_PLATFORM_INFO, &msr); 1030 get_msr(0, MSR_NHM_PLATFORM_INFO, &msr);
@@ -1132,7 +1137,7 @@ print_nhm_turbo_ratio_limits:
1132 } 1137 }
1133 fprintf(stderr, ")\n"); 1138 fprintf(stderr, ")\n");
1134 1139
1135 if (!do_nehalem_turbo_ratio_limit) 1140 if (!do_nhm_turbo_ratio_limit)
1136 return; 1141 return;
1137 1142
1138 get_msr(0, MSR_NHM_TURBO_RATIO_LIMIT, &msr); 1143 get_msr(0, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
@@ -1178,6 +1183,7 @@ print_nhm_turbo_ratio_limits:
1178 if (ratio) 1183 if (ratio)
1179 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n", 1184 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
1180 ratio, bclk, ratio * bclk); 1185 ratio, bclk, ratio * bclk);
1186
1181} 1187}
1182 1188
1183void free_all_buffers(void) 1189void free_all_buffers(void)
@@ -1458,17 +1464,60 @@ void check_dev_msr()
1458 struct stat sb; 1464 struct stat sb;
1459 1465
1460 if (stat("/dev/cpu/0/msr", &sb)) 1466 if (stat("/dev/cpu/0/msr", &sb))
1461 err(-5, "no /dev/cpu/0/msr\n" 1467 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
1462 "Try \"# modprobe msr\"");
1463} 1468}
1464 1469
1465void check_super_user() 1470void check_permissions()
1466{ 1471{
1467 if (getuid() != 0) 1472 struct __user_cap_header_struct cap_header_data;
1468 errx(-6, "must be root"); 1473 cap_user_header_t cap_header = &cap_header_data;
1474 struct __user_cap_data_struct cap_data_data;
1475 cap_user_data_t cap_data = &cap_data_data;
1476 extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
1477 int do_exit = 0;
1478
1479 /* check for CAP_SYS_RAWIO */
1480 cap_header->pid = getpid();
1481 cap_header->version = _LINUX_CAPABILITY_VERSION;
1482 if (capget(cap_header, cap_data) < 0)
1483 err(-6, "capget(2) failed");
1484
1485 if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
1486 do_exit++;
1487 warnx("capget(CAP_SYS_RAWIO) failed,"
1488 " try \"# setcap cap_sys_rawio=ep %s\"", progname);
1489 }
1490
1491 /* test file permissions */
1492 if (euidaccess("/dev/cpu/0/msr", R_OK)) {
1493 do_exit++;
1494 warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
1495 }
1496
1497 /* if all else fails, thell them to be root */
1498 if (do_exit)
1499 if (getuid() != 0)
1500 warnx("... or simply run as root");
1501
1502 if (do_exit)
1503 exit(-6);
1469} 1504}
1470 1505
1471int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model) 1506/*
1507 * NHM adds support for additional MSRs:
1508 *
1509 * MSR_SMI_COUNT 0x00000034
1510 *
1511 * MSR_NHM_PLATFORM_INFO 0x000000ce
1512 * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
1513 *
1514 * MSR_PKG_C3_RESIDENCY 0x000003f8
1515 * MSR_PKG_C6_RESIDENCY 0x000003f9
1516 * MSR_CORE_C3_RESIDENCY 0x000003fc
1517 * MSR_CORE_C6_RESIDENCY 0x000003fd
1518 *
1519 */
1520int has_nhm_msrs(unsigned int family, unsigned int model)
1472{ 1521{
1473 if (!genuine_intel) 1522 if (!genuine_intel)
1474 return 0; 1523 return 0;
@@ -1495,13 +1544,27 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
1495 case 0x3D: /* BDW */ 1544 case 0x3D: /* BDW */
1496 case 0x4F: /* BDX */ 1545 case 0x4F: /* BDX */
1497 case 0x56: /* BDX-DE */ 1546 case 0x56: /* BDX-DE */
1498 return 1;
1499 case 0x2E: /* Nehalem-EX Xeon - Beckton */ 1547 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1500 case 0x2F: /* Westmere-EX Xeon - Eagleton */ 1548 case 0x2F: /* Westmere-EX Xeon - Eagleton */
1549 return 1;
1501 default: 1550 default:
1502 return 0; 1551 return 0;
1503 } 1552 }
1504} 1553}
1554int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model)
1555{
1556 if (!has_nhm_msrs(family, model))
1557 return 0;
1558
1559 switch (model) {
1560 /* Nehalem compatible, but do not include turbo-ratio limit support */
1561 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1562 case 0x2F: /* Westmere-EX Xeon - Eagleton */
1563 return 0;
1564 default:
1565 return 1;
1566 }
1567}
1505int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model) 1568int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model)
1506{ 1569{
1507 if (!genuine_intel) 1570 if (!genuine_intel)
@@ -1564,6 +1627,103 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1564 return 0; 1627 return 0;
1565} 1628}
1566 1629
1630/*
1631 * print_perf_limit()
1632 */
1633int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1634{
1635 unsigned long long msr;
1636 int cpu;
1637
1638 cpu = t->cpu_id;
1639
1640 /* per-package */
1641 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1642 return 0;
1643
1644 if (cpu_migrate(cpu)) {
1645 fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
1646 return -1;
1647 }
1648
1649 if (do_core_perf_limit_reasons) {
1650 get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr);
1651 fprintf(stderr, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
1652 fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
1653 (msr & 1 << 0) ? "PROCHOT, " : "",
1654 (msr & 1 << 1) ? "ThermStatus, " : "",
1655 (msr & 1 << 2) ? "bit2, " : "",
1656 (msr & 1 << 4) ? "Graphics, " : "",
1657 (msr & 1 << 5) ? "Auto-HWP, " : "",
1658 (msr & 1 << 6) ? "VR-Therm, " : "",
1659 (msr & 1 << 8) ? "Amps, " : "",
1660 (msr & 1 << 9) ? "CorePwr, " : "",
1661 (msr & 1 << 10) ? "PkgPwrL1, " : "",
1662 (msr & 1 << 11) ? "PkgPwrL2, " : "",
1663 (msr & 1 << 12) ? "MultiCoreTurbo, " : "",
1664 (msr & 1 << 13) ? "Transitions, " : "",
1665 (msr & 1 << 14) ? "bit14, " : "",
1666 (msr & 1 << 15) ? "bit15, " : "");
1667 fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
1668 (msr & 1 << 16) ? "PROCHOT, " : "",
1669 (msr & 1 << 17) ? "ThermStatus, " : "",
1670 (msr & 1 << 18) ? "bit18, " : "",
1671 (msr & 1 << 20) ? "Graphics, " : "",
1672 (msr & 1 << 21) ? "Auto-HWP, " : "",
1673 (msr & 1 << 22) ? "VR-Therm, " : "",
1674 (msr & 1 << 24) ? "Amps, " : "",
1675 (msr & 1 << 25) ? "CorePwr, " : "",
1676 (msr & 1 << 26) ? "PkgPwrL1, " : "",
1677 (msr & 1 << 27) ? "PkgPwrL2, " : "",
1678 (msr & 1 << 28) ? "MultiCoreTurbo, " : "",
1679 (msr & 1 << 29) ? "Transitions, " : "",
1680 (msr & 1 << 30) ? "bit30, " : "",
1681 (msr & 1 << 31) ? "bit31, " : "");
1682
1683 }
1684 if (do_gfx_perf_limit_reasons) {
1685 get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr);
1686 fprintf(stderr, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
1687 fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s)",
1688 (msr & 1 << 0) ? "PROCHOT, " : "",
1689 (msr & 1 << 1) ? "ThermStatus, " : "",
1690 (msr & 1 << 4) ? "Graphics, " : "",
1691 (msr & 1 << 6) ? "VR-Therm, " : "",
1692 (msr & 1 << 8) ? "Amps, " : "",
1693 (msr & 1 << 9) ? "GFXPwr, " : "",
1694 (msr & 1 << 10) ? "PkgPwrL1, " : "",
1695 (msr & 1 << 11) ? "PkgPwrL2, " : "");
1696 fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s)\n",
1697 (msr & 1 << 16) ? "PROCHOT, " : "",
1698 (msr & 1 << 17) ? "ThermStatus, " : "",
1699 (msr & 1 << 20) ? "Graphics, " : "",
1700 (msr & 1 << 22) ? "VR-Therm, " : "",
1701 (msr & 1 << 24) ? "Amps, " : "",
1702 (msr & 1 << 25) ? "GFXPwr, " : "",
1703 (msr & 1 << 26) ? "PkgPwrL1, " : "",
1704 (msr & 1 << 27) ? "PkgPwrL2, " : "");
1705 }
1706 if (do_ring_perf_limit_reasons) {
1707 get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr);
1708 fprintf(stderr, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
1709 fprintf(stderr, " (Active: %s%s%s%s%s%s)",
1710 (msr & 1 << 0) ? "PROCHOT, " : "",
1711 (msr & 1 << 1) ? "ThermStatus, " : "",
1712 (msr & 1 << 6) ? "VR-Therm, " : "",
1713 (msr & 1 << 8) ? "Amps, " : "",
1714 (msr & 1 << 10) ? "PkgPwrL1, " : "",
1715 (msr & 1 << 11) ? "PkgPwrL2, " : "");
1716 fprintf(stderr, " (Logged: %s%s%s%s%s%s)\n",
1717 (msr & 1 << 16) ? "PROCHOT, " : "",
1718 (msr & 1 << 17) ? "ThermStatus, " : "",
1719 (msr & 1 << 22) ? "VR-Therm, " : "",
1720 (msr & 1 << 24) ? "Amps, " : "",
1721 (msr & 1 << 26) ? "PkgPwrL1, " : "",
1722 (msr & 1 << 27) ? "PkgPwrL2, " : "");
1723 }
1724 return 0;
1725}
1726
1567#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ 1727#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
1568#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ 1728#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
1569 1729
@@ -1653,6 +1813,27 @@ void rapl_probe(unsigned int family, unsigned int model)
1653 return; 1813 return;
1654} 1814}
1655 1815
1816void perf_limit_reasons_probe(family, model)
1817{
1818 if (!genuine_intel)
1819 return;
1820
1821 if (family != 6)
1822 return;
1823
1824 switch (model) {
1825 case 0x3C: /* HSW */
1826 case 0x45: /* HSW */
1827 case 0x46: /* HSW */
1828 do_gfx_perf_limit_reasons = 1;
1829 case 0x3F: /* HSX */
1830 do_core_perf_limit_reasons = 1;
1831 do_ring_perf_limit_reasons = 1;
1832 default:
1833 return;
1834 }
1835}
1836
1656int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) 1837int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1657{ 1838{
1658 unsigned long long msr; 1839 unsigned long long msr;
@@ -1842,8 +2023,15 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1842 return 0; 2023 return 0;
1843} 2024}
1844 2025
2026/*
2027 * SNB adds support for additional MSRs:
2028 *
2029 * MSR_PKG_C7_RESIDENCY 0x000003fa
2030 * MSR_CORE_C7_RESIDENCY 0x000003fe
2031 * MSR_PKG_C2_RESIDENCY 0x0000060d
2032 */
1845 2033
1846int is_snb(unsigned int family, unsigned int model) 2034int has_snb_msrs(unsigned int family, unsigned int model)
1847{ 2035{
1848 if (!genuine_intel) 2036 if (!genuine_intel)
1849 return 0; 2037 return 0;
@@ -1865,7 +2053,14 @@ int is_snb(unsigned int family, unsigned int model)
1865 return 0; 2053 return 0;
1866} 2054}
1867 2055
1868int has_c8_c9_c10(unsigned int family, unsigned int model) 2056/*
2057 * HSW adds support for additional MSRs:
2058 *
2059 * MSR_PKG_C8_RESIDENCY 0x00000630
2060 * MSR_PKG_C9_RESIDENCY 0x00000631
2061 * MSR_PKG_C10_RESIDENCY 0x00000632
2062 */
2063int has_hsw_msrs(unsigned int family, unsigned int model)
1869{ 2064{
1870 if (!genuine_intel) 2065 if (!genuine_intel)
1871 return 0; 2066 return 0;
@@ -1917,7 +2112,7 @@ double slm_bclk(void)
1917 2112
1918double discover_bclk(unsigned int family, unsigned int model) 2113double discover_bclk(unsigned int family, unsigned int model)
1919{ 2114{
1920 if (is_snb(family, model)) 2115 if (has_snb_msrs(family, model))
1921 return 100.00; 2116 return 100.00;
1922 else if (is_slm(family, model)) 2117 else if (is_slm(family, model))
1923 return slm_bclk(); 2118 return slm_bclk();
@@ -1965,7 +2160,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
1965 } 2160 }
1966 2161
1967 /* Temperature Target MSR is Nehalem and newer only */ 2162 /* Temperature Target MSR is Nehalem and newer only */
1968 if (!do_nehalem_platform_info) 2163 if (!do_nhm_platform_info)
1969 goto guess; 2164 goto guess;
1970 2165
1971 if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr)) 2166 if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr))
@@ -2029,18 +2224,15 @@ void check_cpuid()
2029 ebx = ecx = edx = 0; 2224 ebx = ecx = edx = 0;
2030 __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx); 2225 __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
2031 2226
2032 if (max_level < 0x80000007) 2227 if (max_level >= 0x80000007) {
2033 errx(1, "CPUID: no invariant TSC (max_level 0x%x)", max_level);
2034 2228
2035 /* 2229 /*
2036 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8 2230 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
2037 * this check is valid for both Intel and AMD 2231 * this check is valid for both Intel and AMD
2038 */ 2232 */
2039 __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx); 2233 __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
2040 has_invariant_tsc = edx & (1 << 8); 2234 has_invariant_tsc = edx & (1 << 8);
2041 2235 }
2042 if (!has_invariant_tsc)
2043 errx(1, "No invariant TSC");
2044 2236
2045 /* 2237 /*
2046 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0 2238 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
@@ -2054,26 +2246,22 @@ void check_cpuid()
2054 has_epb = ecx & (1 << 3); 2246 has_epb = ecx & (1 << 3);
2055 2247
2056 if (verbose) 2248 if (verbose)
2057 fprintf(stderr, "CPUID(6): %s%s%s%s\n", 2249 fprintf(stderr, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sEPB\n",
2058 has_aperf ? "APERF" : "No APERF!", 2250 has_aperf ? "" : "No ",
2059 do_dts ? ", DTS" : "", 2251 do_dts ? "" : "No ",
2060 do_ptm ? ", PTM": "", 2252 do_ptm ? "" : "No ",
2061 has_epb ? ", EPB": ""); 2253 has_epb ? "" : "No ");
2062 2254
2063 if (!has_aperf) 2255 do_nhm_platform_info = do_nhm_cstates = do_smi = has_nhm_msrs(family, model);
2064 errx(-1, "No APERF"); 2256 do_snb_cstates = has_snb_msrs(family, model);
2065 2257 do_c8_c9_c10 = has_hsw_msrs(family, model);
2066 do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
2067 do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
2068 do_smi = do_nhm_cstates;
2069 do_snb_cstates = is_snb(family, model);
2070 do_c8_c9_c10 = has_c8_c9_c10(family, model);
2071 do_slm_cstates = is_slm(family, model); 2258 do_slm_cstates = is_slm(family, model);
2072 bclk = discover_bclk(family, model); 2259 bclk = discover_bclk(family, model);
2073 2260
2074 do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model); 2261 do_nhm_turbo_ratio_limit = has_nhm_turbo_ratio_limit(family, model);
2075 do_ivt_turbo_ratio_limit = has_ivt_turbo_ratio_limit(family, model); 2262 do_ivt_turbo_ratio_limit = has_ivt_turbo_ratio_limit(family, model);
2076 rapl_probe(family, model); 2263 rapl_probe(family, model);
2264 perf_limit_reasons_probe(family, model);
2077 2265
2078 return; 2266 return;
2079} 2267}
@@ -2299,10 +2487,9 @@ void setup_all_buffers(void)
2299 2487
2300void turbostat_init() 2488void turbostat_init()
2301{ 2489{
2302 check_cpuid();
2303
2304 check_dev_msr(); 2490 check_dev_msr();
2305 check_super_user(); 2491 check_permissions();
2492 check_cpuid();
2306 2493
2307 setup_all_buffers(); 2494 setup_all_buffers();
2308 2495
@@ -2313,6 +2500,9 @@ void turbostat_init()
2313 for_all_cpus(print_epb, ODD_COUNTERS); 2500 for_all_cpus(print_epb, ODD_COUNTERS);
2314 2501
2315 if (verbose) 2502 if (verbose)
2503 for_all_cpus(print_perf_limit, ODD_COUNTERS);
2504
2505 if (verbose)
2316 for_all_cpus(print_rapl, ODD_COUNTERS); 2506 for_all_cpus(print_rapl, ODD_COUNTERS);
2317 2507
2318 for_all_cpus(set_temperature_target, ODD_COUNTERS); 2508 for_all_cpus(set_temperature_target, ODD_COUNTERS);
@@ -2441,7 +2631,7 @@ int main(int argc, char **argv)
2441 cmdline(argc, argv); 2631 cmdline(argc, argv);
2442 2632
2443 if (verbose) 2633 if (verbose)
2444 fprintf(stderr, "turbostat v3.7 Feb 6, 2014" 2634 fprintf(stderr, "turbostat v3.9 23-Jan, 2015"
2445 " - Len Brown <lenb@kernel.org>\n"); 2635 " - Len Brown <lenb@kernel.org>\n");
2446 2636
2447 turbostat_init(); 2637 turbostat_init();