aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS8
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/ABI/testing/sysfs-devices-memory24
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm6
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-hugepages15
-rw-r--r--Documentation/CodingStyle42
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl57
-rw-r--r--Documentation/DocBook/procfs-guide.tmpl4
-rw-r--r--Documentation/accounting/delay-accounting.txt11
-rw-r--r--Documentation/accounting/getdelays.c8
-rw-r--r--Documentation/accounting/taskstats-struct.txt7
-rw-r--r--Documentation/bt8xxgpio.txt67
-rw-r--r--Documentation/controllers/memory.txt3
-rw-r--r--Documentation/edac.txt151
-rw-r--r--Documentation/fb/sh7760fb.txt131
-rw-r--r--Documentation/fb/tridentfb.txt46
-rw-r--r--Documentation/feature-removal-schedule.txt26
-rw-r--r--Documentation/filesystems/Locking7
-rw-r--r--Documentation/filesystems/proc.txt44
-rw-r--r--Documentation/filesystems/vfat.txt8
-rw-r--r--Documentation/gpio.txt135
-rw-r--r--Documentation/kernel-parameters.txt49
-rw-r--r--Documentation/laptops/thinkpad-acpi.txt26
-rw-r--r--Documentation/moxa-smartio392
-rw-r--r--Documentation/power/00-INDEX4
-rw-r--r--Documentation/power/apm-acpi.txt32
-rw-r--r--Documentation/power/pm.txt257
-rw-r--r--Documentation/powerpc/booting-without-of.txt57
-rw-r--r--Documentation/unaligned-memory-access.txt32
-rw-r--r--Documentation/vm/hugetlbpage.txt23
-rw-r--r--MAINTAINERS49
-rw-r--r--Makefile1
-rw-r--r--arch/Kconfig29
-rw-r--r--arch/alpha/Kconfig5
-rw-r--r--arch/alpha/boot/misc.c39
-rw-r--r--arch/alpha/mm/numa.c10
-rw-r--r--arch/arm/Kconfig22
-rw-r--r--arch/arm/boot/compressed/misc.c59
-rw-r--r--arch/arm/common/sa1111.c2
-rw-r--r--arch/arm/configs/ezx_defconfig1614
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/kgdb.c201
-rw-r--r--arch/arm/kernel/kprobes.c6
-rw-r--r--arch/arm/kernel/module.c1
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/mach-iop32x/n2100.c52
-rw-r--r--arch/arm/mach-ns9xxx/clock.c2
-rw-r--r--arch/arm/mach-pxa/Kconfig219
-rw-r--r--arch/arm/mach-pxa/Makefile15
-rw-r--r--arch/arm/mach-pxa/clock.c30
-rw-r--r--arch/arm/mach-pxa/clock.h33
-rw-r--r--arch/arm/mach-pxa/cm-x270-pci.c27
-rw-r--r--arch/arm/mach-pxa/cm-x270-pci.h14
-rw-r--r--arch/arm/mach-pxa/cm-x270.c403
-rw-r--r--arch/arm/mach-pxa/corgi.c1
-rw-r--r--arch/arm/mach-pxa/devices.c61
-rw-r--r--arch/arm/mach-pxa/devices.h2
-rw-r--r--arch/arm/mach-pxa/e400_lcd.c56
-rw-r--r--arch/arm/mach-pxa/e740_lcd.c123
-rw-r--r--arch/arm/mach-pxa/e750_lcd.c109
-rw-r--r--arch/arm/mach-pxa/e800_lcd.c159
-rw-r--r--arch/arm/mach-pxa/em-x270.c371
-rw-r--r--arch/arm/mach-pxa/eseries.c15
-rw-r--r--arch/arm/mach-pxa/eseries_udc.c57
-rw-r--r--arch/arm/mach-pxa/ezx.c220
-rw-r--r--arch/arm/mach-pxa/littleton.c70
-rw-r--r--arch/arm/mach-pxa/lubbock.c19
-rw-r--r--arch/arm/mach-pxa/magician.c49
-rw-r--r--arch/arm/mach-pxa/mainstone.c18
-rw-r--r--arch/arm/mach-pxa/mfp-pxa2xx.c71
-rw-r--r--arch/arm/mach-pxa/palmtx.c416
-rw-r--r--arch/arm/mach-pxa/pcm027.c31
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c74
-rw-r--r--arch/arm/mach-pxa/poodle.c1
-rw-r--r--arch/arm/mach-pxa/pxa25x.c59
-rw-r--r--arch/arm/mach-pxa/pxa300.c19
-rw-r--r--arch/arm/mach-pxa/pxa320.c21
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c25
-rw-r--r--arch/arm/mach-pxa/pxa930.c190
-rw-r--r--arch/arm/mach-pxa/reset.c96
-rw-r--r--arch/arm/mach-pxa/saar.c84
-rw-r--r--arch/arm/mach-pxa/spitz.c10
-rw-r--r--arch/arm/mach-pxa/ssp.c13
-rw-r--r--arch/arm/mach-pxa/tavorevb.c84
-rw-r--r--arch/arm/mach-pxa/tosa-bt.c150
-rw-r--r--arch/arm/mach-pxa/tosa.c382
-rw-r--r--arch/arm/mach-pxa/trizeps4.c1
-rw-r--r--arch/arm/mach-pxa/zylonite.c103
-rw-r--r--arch/arm/mach-pxa/zylonite_pxa300.c46
-rw-r--r--arch/arm/mach-pxa/zylonite_pxa320.c6
-rw-r--r--arch/arm/mach-sa1100/clock.c2
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/discontig.c34
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/plat-omap/fb.c5
-rw-r--r--arch/arm/plat-omap/gpio.c3
-rw-r--r--arch/arm/tools/mach-types10
-rw-r--r--arch/avr32/Kconfig3
-rw-r--r--arch/avr32/kernel/process.c2
-rw-r--r--arch/avr32/kernel/stacktrace.c1
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c27
-rw-r--r--arch/avr32/mach-at32ap/pio.c2
-rw-r--r--arch/avr32/mm/init.c5
-rw-r--r--arch/avr32/mm/ioremap.c1
-rw-r--r--arch/blackfin/kernel/process.c2
-rw-r--r--arch/cris/arch-v10/boot/compressed/misc.c36
-rw-r--r--arch/cris/arch-v10/mm/init.c2
-rw-r--r--arch/cris/arch-v32/boot/compressed/misc.c39
-rw-r--r--arch/cris/arch-v32/mm/init.c2
-rw-r--r--arch/cris/kernel/profile.c17
-rw-r--r--arch/frv/kernel/pm.c1
-rw-r--r--arch/h8300/Kconfig14
-rw-r--r--arch/h8300/boot/compressed/misc.c38
-rw-r--r--arch/h8300/kernel/setup.c1
-rw-r--r--arch/ia64/ia32/sys_ia32.c2
-rw-r--r--arch/ia64/kernel/entry.S6
-rw-r--r--arch/ia64/kernel/kprobes.c6
-rw-r--r--arch/ia64/kernel/sys_ia64.c2
-rw-r--r--arch/ia64/mm/discontig.c30
-rw-r--r--arch/ia64/mm/hugetlbpage.c15
-rw-r--r--arch/m32r/boot/compressed/misc.c37
-rw-r--r--arch/m32r/mm/discontig.c10
-rw-r--r--arch/m32r/mm/init.c6
-rw-r--r--arch/m68k/amiga/chipram.c1
-rw-r--r--arch/m68k/mm/init.c4
-rw-r--r--arch/m68k/mm/motorola.c2
-rw-r--r--arch/m68k/mm/sun3mmu.c2
-rw-r--r--arch/m68knommu/Kconfig15
-rw-r--r--arch/m68knommu/Makefile11
-rw-r--r--arch/m68knommu/configs/m5208evb_defconfig610
-rw-r--r--arch/m68knommu/configs/m5249evb_defconfig497
-rw-r--r--arch/m68knommu/configs/m5275evb_defconfig627
-rw-r--r--arch/m68knommu/configs/m5307c3_defconfig580
-rw-r--r--arch/m68knommu/configs/m5407c3_defconfig641
-rw-r--r--arch/m68knommu/kernel/setup.c1
-rw-r--r--arch/m68knommu/kernel/time.c40
-rw-r--r--arch/m68knommu/kernel/traps.c38
-rw-r--r--arch/m68knommu/kernel/vmlinux.lds.S1
-rw-r--r--arch/m68knommu/platform/coldfire/Makefile2
-rw-r--r--arch/m68knommu/platform/coldfire/dma_timer.c84
-rw-r--r--arch/m68knommu/platform/coldfire/head.S3
-rw-r--r--arch/m68knommu/platform/coldfire/pit.c91
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/au1000/common/power.c1
-rw-r--r--arch/mips/kernel/linux32.c1
-rw-r--r--arch/mips/kernel/module.c1
-rw-r--r--arch/mips/kernel/process.c2
-rw-r--r--arch/mips/kernel/stacktrace.c1
-rw-r--r--arch/mips/kernel/syscall.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c4
-rw-r--r--arch/mn10300/boot/compressed/misc.c37
-rw-r--r--arch/mn10300/mm/init.c6
-rw-r--r--arch/parisc/hpux/sys_hpux.c2
-rw-r--r--arch/parisc/mm/init.c5
-rw-r--r--arch/powerpc/Kconfig5
-rw-r--r--arch/powerpc/Kconfig.debug50
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/cputable.c11
-rw-r--r--arch/powerpc/kernel/entry_32.S6
-rw-r--r--arch/powerpc/kernel/idle.c2
-rw-r--r--arch/powerpc/kernel/iommu.c28
-rw-r--r--arch/powerpc/kernel/kgdb.c410
-rw-r--r--arch/powerpc/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kernel/lparcfg.c386
-rw-r--r--arch/powerpc/kernel/process.c46
-rw-r--r--arch/powerpc/kernel/prom_init.c9
-rw-r--r--arch/powerpc/kernel/ptrace.c72
-rw-r--r--arch/powerpc/kernel/setup_32.c16
-rw-r--r--arch/powerpc/kernel/signal.c6
-rw-r--r--arch/powerpc/kernel/suspend.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c3
-rw-r--r--arch/powerpc/kernel/traps.c16
-rw-r--r--arch/powerpc/kernel/vio.c1033
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/lib/code-patching.c1
-rw-r--r--arch/powerpc/mm/fault.c25
-rw-r--r--arch/powerpc/mm/hash_utils_64.c51
-rw-r--r--arch/powerpc/mm/hugetlbpage.c341
-rw-r--r--arch/powerpc/mm/init_64.c8
-rw-r--r--arch/powerpc/mm/numa.c3
-rw-r--r--arch/powerpc/mm/pgtable_32.c22
-rw-r--r--arch/powerpc/mm/pgtable_64.c16
-rw-r--r--arch/powerpc/mm/tlb_64.c2
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig4
-rw-r--r--arch/powerpc/platforms/cell/iommu.c16
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c23
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c35
-rw-r--r--arch/powerpc/platforms/cell/spufs/sputrace.c3
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c3
-rw-r--r--arch/powerpc/platforms/iseries/setup.c4
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c3
-rw-r--r--arch/powerpc/platforms/powermac/setup.c6
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig23
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c468
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c42
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h10
-rw-r--r--arch/powerpc/platforms/pseries/setup.c71
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c3
-rw-r--r--arch/powerpc/sysdev/qe_lib/Kconfig2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/kernel/kprobes.c6
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/setup.c4
-rw-r--r--arch/s390/kernel/topology.c14
-rw-r--r--arch/s390/mm/hugetlbpage.c8
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/boards/renesas/migor/setup.c3
-rw-r--r--arch/sh/boot/compressed/misc_32.c38
-rw-r--r--arch/sh/boot/compressed/misc_64.c40
-rw-r--r--arch/sh/kernel/process_32.c2
-rw-r--r--arch/sh/kernel/stacktrace.c1
-rw-r--r--arch/sh/kernel/sys_sh32.c2
-rw-r--r--arch/sh/mm/hugetlbpage.c8
-rw-r--r--arch/sh/mm/init.c2
-rw-r--r--arch/sh/mm/numa.c5
-rw-r--r--arch/sparc/Kconfig14
-rw-r--r--arch/sparc/kernel/sys_sparc.c2
-rw-r--r--arch/sparc/kernel/systbls.S3
-rw-r--r--arch/sparc/mm/srmmu.c3
-rw-r--r--arch/sparc/mm/sun4c.c3
-rw-r--r--arch/sparc64/kernel/iommu_common.h2
-rw-r--r--arch/sparc64/kernel/irq.c10
-rw-r--r--arch/sparc64/kernel/kprobes.c11
-rw-r--r--arch/sparc64/kernel/ldc.c38
-rw-r--r--arch/sparc64/kernel/process.c2
-rw-r--r--arch/sparc64/kernel/sys_sparc.c2
-rw-r--r--arch/sparc64/kernel/systbls.S6
-rw-r--r--arch/sparc64/kernel/time.c15
-rw-r--r--arch/sparc64/mm/hugetlbpage.c10
-rw-r--r--arch/sparc64/mm/init.c3
-rw-r--r--arch/um/include/init.h8
-rw-r--r--arch/um/include/irq_kern.h2
-rw-r--r--arch/um/include/irq_user.h2
-rw-r--r--arch/um/include/skas/skas.h1
-rw-r--r--arch/um/include/um_uaccess.h1
-rw-r--r--arch/um/kernel/irq.c35
-rw-r--r--arch/um/kernel/ksyms.c1
-rw-r--r--arch/um/kernel/mem.c33
-rw-r--r--arch/um/kernel/physmem.c2
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/kernel/ptrace.c2
-rw-r--r--arch/um/kernel/time.c8
-rw-r--r--arch/um/kernel/uaccess.c2
-rw-r--r--arch/um/os-Linux/sigio.c2
-rw-r--r--arch/um/os-Linux/signal.c2
-rw-r--r--arch/um/os-Linux/skas/process.c2
-rw-r--r--arch/um/os-Linux/umid.c2
-rw-r--r--arch/um/sys-i386/bugs.c2
-rw-r--r--arch/um/sys-i386/checksum.S5
-rw-r--r--arch/um/sys-i386/ldt.c4
-rw-r--r--arch/v850/Kconfig353
-rw-r--r--arch/v850/Kconfig.debug10
-rw-r--r--arch/v850/Makefile54
-rw-r--r--arch/v850/README44
-rw-r--r--arch/v850/configs/rte-ma1-cb_defconfig617
-rw-r--r--arch/v850/configs/rte-me2-cb_defconfig462
-rw-r--r--arch/v850/configs/sim_defconfig451
-rw-r--r--arch/v850/kernel/Makefile40
-rw-r--r--arch/v850/kernel/anna-rom.ld16
-rw-r--r--arch/v850/kernel/anna.c202
-rw-r--r--arch/v850/kernel/anna.ld20
-rw-r--r--arch/v850/kernel/as85ep1-rom.ld21
-rw-r--r--arch/v850/kernel/as85ep1.c234
-rw-r--r--arch/v850/kernel/as85ep1.ld49
-rw-r--r--arch/v850/kernel/asm-offsets.c58
-rw-r--r--arch/v850/kernel/bug.c142
-rw-r--r--arch/v850/kernel/entry.S1121
-rw-r--r--arch/v850/kernel/fpga85e2c.c167
-rw-r--r--arch/v850/kernel/fpga85e2c.ld62
-rw-r--r--arch/v850/kernel/gbus_int.c271
-rw-r--r--arch/v850/kernel/head.S128
-rw-r--r--arch/v850/kernel/highres_timer.c132
-rw-r--r--arch/v850/kernel/init_task.c48
-rw-r--r--arch/v850/kernel/intv.S87
-rw-r--r--arch/v850/kernel/irq.c123
-rw-r--r--arch/v850/kernel/ma.c69
-rw-r--r--arch/v850/kernel/mach.c17
-rw-r--r--arch/v850/kernel/mach.h56
-rw-r--r--arch/v850/kernel/me2.c73
-rw-r--r--arch/v850/kernel/memcons.c135
-rw-r--r--arch/v850/kernel/module.c237
-rw-r--r--arch/v850/kernel/process.c217
-rw-r--r--arch/v850/kernel/procfs.c67
-rw-r--r--arch/v850/kernel/ptrace.c235
-rw-r--r--arch/v850/kernel/rte_cb.c193
-rw-r--r--arch/v850/kernel/rte_cb_leds.c137
-rw-r--r--arch/v850/kernel/rte_cb_multi.c121
-rw-r--r--arch/v850/kernel/rte_ma1_cb-rom.ld14
-rw-r--r--arch/v850/kernel/rte_ma1_cb.c107
-rw-r--r--arch/v850/kernel/rte_ma1_cb.ld57
-rw-r--r--arch/v850/kernel/rte_mb_a_pci.c819
-rw-r--r--arch/v850/kernel/rte_me2_cb.c298
-rw-r--r--arch/v850/kernel/rte_me2_cb.ld30
-rw-r--r--arch/v850/kernel/rte_nb85e_cb-multi.ld57
-rw-r--r--arch/v850/kernel/rte_nb85e_cb.c81
-rw-r--r--arch/v850/kernel/rte_nb85e_cb.ld22
-rw-r--r--arch/v850/kernel/setup.c330
-rw-r--r--arch/v850/kernel/signal.c523
-rw-r--r--arch/v850/kernel/sim.c172
-rw-r--r--arch/v850/kernel/sim.ld13
-rw-r--r--arch/v850/kernel/sim85e2.c195
-rw-r--r--arch/v850/kernel/sim85e2.ld36
-rw-r--r--arch/v850/kernel/simcons.c161
-rw-r--r--arch/v850/kernel/syscalls.c196
-rw-r--r--arch/v850/kernel/teg.c62
-rw-r--r--arch/v850/kernel/time.c106
-rw-r--r--arch/v850/kernel/v850_ksyms.c51
-rw-r--r--arch/v850/kernel/v850e2_cache.c127
-rw-r--r--arch/v850/kernel/v850e_cache.c174
-rw-r--r--arch/v850/kernel/v850e_intc.c104
-rw-r--r--arch/v850/kernel/v850e_timer_d.c54
-rw-r--r--arch/v850/kernel/v850e_utils.c62
-rw-r--r--arch/v850/kernel/vmlinux.lds.S306
-rw-r--r--arch/v850/lib/Makefile6
-rw-r--r--arch/v850/lib/ashldi3.c62
-rw-r--r--arch/v850/lib/ashrdi3.c63
-rw-r--r--arch/v850/lib/checksum.c155
-rw-r--r--arch/v850/lib/lshrdi3.c62
-rw-r--r--arch/v850/lib/memcpy.c92
-rw-r--r--arch/v850/lib/memset.c68
-rw-r--r--arch/v850/lib/muldi3.c61
-rw-r--r--arch/v850/lib/negdi2.c25
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/boot/compressed/misc.c39
-rw-r--r--arch/x86/ia32/ia32entry.S97
-rw-r--r--arch/x86/ia32/sys_ia32.c2
-rw-r--r--arch/x86/kernel/acpi/cstate.c3
-rw-r--r--arch/x86/kernel/acpi/sleep.c4
-rw-r--r--arch/x86/kernel/apm_32.c1
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c16
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c23
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c157
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c7
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c5
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c4
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/entry_32.S55
-rw-r--r--arch/x86/kernel/entry_64.S55
-rw-r--r--arch/x86/kernel/genapic_flat_64.c2
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c4
-rw-r--r--arch/x86/kernel/hpet.c10
-rw-r--r--arch/x86/kernel/io_apic_64.c12
-rw-r--r--arch/x86/kernel/irqinit_64.c5
-rw-r--r--arch/x86/kernel/kprobes.c6
-rw-r--r--arch/x86/kernel/ldt.c6
-rw-r--r--arch/x86/kernel/microcode.c13
-rw-r--r--arch/x86/kernel/module_64.c1
-rw-r--r--arch/x86/kernel/pci-calgary_64.c85
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/reboot.c14
-rw-r--r--arch/x86/kernel/setup.c5
-rw-r--r--arch/x86/kernel/signal_32.c3
-rw-r--r--arch/x86/kernel/signal_64.c56
-rw-r--r--arch/x86/kernel/smpboot.c8
-rw-r--r--arch/x86/kernel/syscall_table_32.S6
-rw-r--r--arch/x86/mm/discontig_32.c3
-rw-r--r--arch/x86/mm/hugetlbpage.c78
-rw-r--r--arch/x86/mm/ioremap.c8
-rw-r--r--arch/x86/mm/numa_64.c4
-rw-r--r--arch/x86/oprofile/nmi_int.c36
-rw-r--r--arch/x86/pci/i386.c1
-rw-r--r--arch/x86/xen/smp.c4
-rw-r--r--arch/x86/xen/xen-asm_64.S2
-rw-r--r--arch/xtensa/kernel/setup.c1
-rw-r--r--arch/xtensa/kernel/syscall.c2
-rw-r--r--block/ioctl.c5
-rw-r--r--crypto/async_tx/async_memcpy.c12
-rw-r--r--crypto/async_tx/async_memset.c12
-rw-r--r--crypto/async_tx/async_tx.c33
-rw-r--r--crypto/async_tx/async_xor.c262
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/namespace/nsnames.c8
-rw-r--r--drivers/acpi/pci_link.c31
-rw-r--r--drivers/acpi/pci_slot.c5
-rw-r--r--drivers/acpi/processor_throttling.c17
-rw-r--r--drivers/acpi/sleep/main.c42
-rw-r--r--drivers/acpi/system.c1
-rw-r--r--drivers/acpi/tables/tbfadt.c17
-rw-r--r--drivers/acpi/thermal.c43
-rw-r--r--drivers/acpi/utilities/utalloc.c4
-rw-r--r--drivers/acpi/video.c14
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/auxdisplay/cfag12864b.c13
-rw-r--r--drivers/base/cpu.c4
-rw-r--r--drivers/base/firmware_class.c12
-rw-r--r--drivers/base/memory.c19
-rw-r--r--drivers/block/aoe/aoechr.c9
-rw-r--r--drivers/block/virtio_blk.c10
-rw-r--r--drivers/char/Kconfig26
-rw-r--r--drivers/char/Makefile7
-rw-r--r--drivers/char/ds1302.c17
-rw-r--r--drivers/char/dsp56k.c20
-rw-r--r--drivers/char/efirtc.c35
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hvc_console.c85
-rw-r--r--drivers/char/hvc_console.h35
-rw-r--r--drivers/char/hvc_irq.c44
-rw-r--r--drivers/char/hvc_iseries.c2
-rw-r--r--drivers/char/hvc_vio.c2
-rw-r--r--drivers/char/hvc_xen.c2
-rw-r--r--drivers/char/ip2/ip2main.c13
-rw-r--r--drivers/char/lcd.c516
-rw-r--r--drivers/char/lcd.h154
-rw-r--r--drivers/char/mem.c5
-rw-r--r--drivers/char/mspec.c23
-rw-r--r--drivers/char/mwave/mwavedd.c39
-rw-r--r--drivers/char/mwave/mwavedd.h2
-rw-r--r--drivers/char/mwave/tp3780i.c2
-rw-r--r--drivers/char/mxser.c337
-rw-r--r--drivers/char/nvram.c1
-rw-r--r--drivers/char/nwflash.c31
-rw-r--r--drivers/char/ppdev.c18
-rw-r--r--drivers/char/random.c1
-rw-r--r--drivers/char/rio/rio_linux.c8
-rw-r--r--drivers/char/rtc.c19
-rw-r--r--drivers/char/stallion.c1
-rw-r--r--drivers/char/sx.c73
-rw-r--r--drivers/char/tty_io.c16
-rw-r--r--drivers/char/virtio_console.c40
-rw-r--r--drivers/char/vt.c8
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c1
-rw-r--r--drivers/cpufreq/cpufreq.c14
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c4
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c79
-rw-r--r--drivers/dca/dca-core.c131
-rw-r--r--drivers/dca/dca-sysfs.c3
-rw-r--r--drivers/dma/Kconfig37
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/dmaengine.c35
-rw-r--r--drivers/dma/dmatest.c444
-rw-r--r--drivers/dma/dw_dmac.c1122
-rw-r--r--drivers/dma/dw_dmac_regs.h225
-rw-r--r--drivers/dma/fsldma.c38
-rw-r--r--drivers/dma/ioat.c15
-rw-r--r--drivers/dma/ioat_dca.c244
-rw-r--r--drivers/dma/ioat_dma.c402
-rw-r--r--drivers/dma/ioatdma.h28
-rw-r--r--drivers/dma/ioatdma_hw.h1
-rw-r--r--drivers/dma/ioatdma_registers.h20
-rw-r--r--drivers/dma/iop-adma.c53
-rw-r--r--drivers/dma/mv_xor.c1375
-rw-r--r--drivers/dma/mv_xor.h183
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/e752x_edac.c59
-rw-r--r--drivers/edac/edac_mc_sysfs.c158
-rw-r--r--drivers/edac/edac_pci_sysfs.c30
-rw-r--r--drivers/edac/i5100_edac.c981
-rw-r--r--drivers/edac/mpc85xx_edac.c67
-rw-r--r--drivers/edac/mv64x60_edac.c37
-rw-r--r--drivers/firmware/dcdbas.c16
-rw-r--r--drivers/firmware/dell_rbu.c28
-rw-r--r--drivers/gpio/Kconfig85
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/bt8xxgpio.c348
-rw-r--r--drivers/gpio/gpiolib.c536
-rw-r--r--drivers/gpio/max732x.c385
-rw-r--r--drivers/gpio/mcp23s08.c134
-rw-r--r--drivers/gpio/pca953x.c1
-rw-r--r--drivers/gpio/pcf857x.c1
-rw-r--r--drivers/hid/hid-core.c10
-rw-r--r--drivers/hid/hid-input-quirks.c40
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hidraw.c48
-rw-r--r--drivers/hid/usbhid/hid-quirks.c22
-rw-r--r--drivers/hid/usbhid/hiddev.c14
-rw-r--r--drivers/hid/usbhid/usbkbd.c10
-rw-r--r--drivers/hid/usbhid/usbmouse.c8
-rw-r--r--drivers/i2c/chips/Kconfig2
-rw-r--r--drivers/i2c/chips/tps65010.c2
-rw-r--r--drivers/ide/Kconfig4
-rw-r--r--drivers/ide/Makefile9
-rw-r--r--drivers/ide/arm/icside.c77
-rw-r--r--drivers/ide/arm/ide_arm.c14
-rw-r--r--drivers/ide/arm/palm_bk3710.c39
-rw-r--r--drivers/ide/arm/rapide.c30
-rw-r--r--drivers/ide/h8300/ide-h8300.c48
-rw-r--r--drivers/ide/ide-atapi.c58
-rw-r--r--drivers/ide/ide-cd.c196
-rw-r--r--drivers/ide/ide-cd.h38
-rw-r--r--drivers/ide/ide-cd_ioctl.c35
-rw-r--r--drivers/ide/ide-disk.c14
-rw-r--r--drivers/ide/ide-dma.c105
-rw-r--r--drivers/ide/ide-floppy.c102
-rw-r--r--drivers/ide/ide-generic.c107
-rw-r--r--drivers/ide/ide-io.c42
-rw-r--r--drivers/ide/ide-iops.c236
-rw-r--r--drivers/ide/ide-lib.c17
-rw-r--r--drivers/ide/ide-pnp.c29
-rw-r--r--drivers/ide/ide-probe.c437
-rw-r--r--drivers/ide/ide-proc.c8
-rw-r--r--drivers/ide/ide-tape.c159
-rw-r--r--drivers/ide/ide-taskfile.c46
-rw-r--r--drivers/ide/ide.c96
-rw-r--r--drivers/ide/legacy/buddha.c24
-rw-r--r--drivers/ide/legacy/falconide.c56
-rw-r--r--drivers/ide/legacy/gayle.c43
-rw-r--r--drivers/ide/legacy/ht6560b.c24
-rw-r--r--drivers/ide/legacy/ide-4drives.c20
-rw-r--r--drivers/ide/legacy/ide-cs.c54
-rw-r--r--drivers/ide/legacy/ide_platform.c32
-rw-r--r--drivers/ide/legacy/macide.c15
-rw-r--r--drivers/ide/legacy/q40ide.c47
-rw-r--r--drivers/ide/mips/au1xxx-ide.c56
-rw-r--r--drivers/ide/mips/swarm.c24
-rw-r--r--drivers/ide/pci/aec62xx.c81
-rw-r--r--drivers/ide/pci/alim15x3.c30
-rw-r--r--drivers/ide/pci/amd74xx.c156
-rw-r--r--drivers/ide/pci/atiixp.c20
-rw-r--r--drivers/ide/pci/cmd640.c29
-rw-r--r--drivers/ide/pci/cmd64x.c62
-rw-r--r--drivers/ide/pci/cs5520.c63
-rw-r--r--drivers/ide/pci/cs5530.c20
-rw-r--r--drivers/ide/pci/cs5535.c16
-rw-r--r--drivers/ide/pci/cy82c693.c35
-rw-r--r--drivers/ide/pci/delkin_cb.c25
-rw-r--r--drivers/ide/pci/generic.c78
-rw-r--r--drivers/ide/pci/hpt34x.c22
-rw-r--r--drivers/ide/pci/hpt366.c206
-rw-r--r--drivers/ide/pci/it8213.c35
-rw-r--r--drivers/ide/pci/it821x.c74
-rw-r--r--drivers/ide/pci/jmicron.c13
-rw-r--r--drivers/ide/pci/ns87415.c126
-rw-r--r--drivers/ide/pci/opti621.c13
-rw-r--r--drivers/ide/pci/pdc202xx_new.c78
-rw-r--r--drivers/ide/pci/pdc202xx_old.c52
-rw-r--r--drivers/ide/pci/piix.c122
-rw-r--r--drivers/ide/pci/rz1000.c13
-rw-r--r--drivers/ide/pci/sc1200.c50
-rw-r--r--drivers/ide/pci/scc_pata.c139
-rw-r--r--drivers/ide/pci/serverworks.c44
-rw-r--r--drivers/ide/pci/sgiioc4.c65
-rw-r--r--drivers/ide/pci/siimage.c167
-rw-r--r--drivers/ide/pci/sis5513.c39
-rw-r--r--drivers/ide/pci/sl82c105.c21
-rw-r--r--drivers/ide/pci/slc90e66.c13
-rw-r--r--drivers/ide/pci/tc86c001.c73
-rw-r--r--drivers/ide/pci/triflex.c13
-rw-r--r--drivers/ide/pci/trm290.c17
-rw-r--r--drivers/ide/pci/via82cxxx.c140
-rw-r--r--drivers/ide/ppc/pmac.c222
-rw-r--r--drivers/ide/setup-pci.c331
-rw-r--r--drivers/ieee1394/iso.c1
-rw-r--r--drivers/infiniband/core/cma.c99
-rw-r--r--drivers/infiniband/core/iwcm.c3
-rw-r--r--drivers/infiniband/core/sa_query.c3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c2
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.c1
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c12
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h15
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c70
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c74
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c26
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c1
-rw-r--r--drivers/input/keyboard/hil_kbd.c1
-rw-r--r--drivers/input/keyboard/tosakbd.c2
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c1
-rw-r--r--drivers/input/serio/hp_sdc.c2
-rw-r--r--drivers/isdn/gigaset/asyncdata.c3
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c12
-rw-r--r--drivers/isdn/gigaset/common.c2
-rw-r--r--drivers/isdn/gigaset/gigaset.h3
-rw-r--r--drivers/isdn/gigaset/i4l.c56
-rw-r--r--drivers/isdn/gigaset/interface.c25
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c7
-rw-r--r--drivers/isdn/hisax/st5481.h4
-rw-r--r--drivers/isdn/hisax/st5481_b.c4
-rw-r--r--drivers/isdn/hisax/st5481_d.c6
-rw-r--r--drivers/isdn/hisax/st5481_usb.c18
-rw-r--r--drivers/leds/Kconfig16
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-triggers.c3
-rw-r--r--drivers/leds/leds-atmel-pwm.c2
-rw-r--r--drivers/leds/leds-h1940.c9
-rw-r--r--drivers/leds/leds-pca9532.c337
-rw-r--r--drivers/leds/leds-pca955x.c384
-rw-r--r--drivers/lguest/lguest_device.c14
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-dvb.c1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ioread.c1
-rw-r--r--drivers/media/video/uvc/uvc_queue.c1
-rw-r--r--drivers/media/video/videobuf-core.c1
-rw-r--r--drivers/message/i2o/device.c54
-rw-r--r--drivers/mfd/Kconfig21
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/htc-egpio.c2
-rw-r--r--drivers/mfd/htc-pasic3.c2
-rw-r--r--drivers/mfd/mcp-sa11x0.c2
-rw-r--r--drivers/mfd/mfd-core.c114
-rw-r--r--drivers/mfd/sm501.c439
-rw-r--r--drivers/mfd/tc6393xb.c600
-rw-r--r--drivers/misc/Kconfig19
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/atmel_pwm.c3
-rw-r--r--drivers/misc/hp-wmi.c494
-rw-r--r--drivers/misc/phantom.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c3
-rw-r--r--drivers/misc/thinkpad_acpi.c475
-rw-r--r--drivers/mmc/card/mmc_test.c225
-rw-r--r--drivers/mmc/card/queue.c97
-rw-r--r--drivers/mmc/host/au1xmmc.c54
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c50
-rw-r--r--drivers/mmc/host/sdhci.c167
-rw-r--r--drivers/mmc/host/sdhci.h7
-rw-r--r--drivers/mtd/maps/uclinux.c1
-rw-r--r--drivers/mtd/nand/cmx270_nand.c79
-rw-r--r--drivers/mtd/ubi/build.c99
-rw-r--r--drivers/mtd/ubi/cdev.c234
-rw-r--r--drivers/mtd/ubi/debug.c158
-rw-r--r--drivers/mtd/ubi/debug.h74
-rw-r--r--drivers/mtd/ubi/eba.c77
-rw-r--r--drivers/mtd/ubi/gluebi.c16
-rw-r--r--drivers/mtd/ubi/io.c48
-rw-r--r--drivers/mtd/ubi/kapi.c50
-rw-r--r--drivers/mtd/ubi/misc.c2
-rw-r--r--drivers/mtd/ubi/scan.c136
-rw-r--r--drivers/mtd/ubi/scan.h21
-rw-r--r--drivers/mtd/ubi/ubi-media.h38
-rw-r--r--drivers/mtd/ubi/ubi.h75
-rw-r--r--drivers/mtd/ubi/upd.c32
-rw-r--r--drivers/mtd/ubi/vmt.c148
-rw-r--r--drivers/mtd/ubi/vtbl.c127
-rw-r--r--drivers/mtd/ubi/wl.c208
-rw-r--r--drivers/net/fec.c54
-rw-r--r--drivers/net/ibmveth.c189
-rw-r--r--drivers/net/ibmveth.h5
-rw-r--r--drivers/net/mlx4/cmd.c3
-rw-r--r--drivers/net/mlx4/eq.c1
-rw-r--r--drivers/net/mlx4/fw.c18
-rw-r--r--drivers/net/mlx4/fw.h2
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mlx4/mlx4.h1
-rw-r--r--drivers/net/mlx4/mr.c49
-rw-r--r--drivers/net/mlx4/pd.c7
-rw-r--r--drivers/net/smc91x.c94
-rw-r--r--drivers/net/smc91x.h76
-rw-r--r--drivers/net/virtio_net.c114
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/of_i2c.c2
-rw-r--r--drivers/parport/parport_ax88796.c2
-rw-r--r--drivers/pci/dmar.c4
-rw-r--r--drivers/pci/pci.c34
-rw-r--r--drivers/pci/proc.c18
-rw-r--r--drivers/pcmcia/Kconfig3
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/electra_cf.c1
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c93
-rw-r--r--drivers/pcmcia/pxa2xx_palmtx.c118
-rw-r--r--drivers/power/Kconfig6
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/ds2760_battery.c2
-rw-r--r--drivers/power/palmtx_battery.c198
-rw-r--r--drivers/power/pda_power.c2
-rw-r--r--drivers/rtc/Kconfig19
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c16
-rw-r--r--drivers/rtc/rtc-cmos.c294
-rw-r--r--drivers/rtc/rtc-dev.c58
-rw-r--r--drivers/rtc/rtc-ds1305.c847
-rw-r--r--drivers/rtc/rtc-m41t80.c20
-rw-r--r--drivers/rtc/rtc-m41t94.c173
-rw-r--r--drivers/rtc/rtc-omap.c21
-rw-r--r--drivers/rtc/rtc-pcf8583.c129
-rw-r--r--drivers/rtc/rtc-s3c.c89
-rw-r--r--drivers/rtc/rtc-vr41xx.c65
-rw-r--r--drivers/s390/kvm/kvm_virtio.c34
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c45
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h2
-rw-r--r--drivers/scsi/ide-scsi.c65
-rw-r--r--drivers/scsi/sun_esp.c1
-rw-r--r--drivers/serial/8250.c17
-rw-r--r--drivers/serial/8250_gsc.c2
-rw-r--r--drivers/serial/8250_pci.c17
-rw-r--r--drivers/serial/Kconfig16
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c116
-rw-r--r--drivers/serial/dz.c24
-rw-r--r--drivers/serial/mpsc.c148
-rw-r--r--drivers/serial/zs.c21
-rw-r--r--drivers/spi/Kconfig45
-rw-r--r--drivers/spi/au1550_spi.c207
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/spi/spi_mpc83xx.c29
-rw-r--r--drivers/spi/spidev.c19
-rw-r--r--drivers/spi/xilinx_spi.c5
-rw-r--r--drivers/telephony/ixj.c17
-rw-r--r--drivers/usb/gadget/at91_udc.h2
-rw-r--r--drivers/usb/gadget/cdc2.c2
-rw-r--r--drivers/usb/gadget/ether.c2
-rw-r--r--drivers/usb/gadget/file_storage.c14
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.c2
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h2
-rw-r--r--drivers/usb/gadget/gmidi.c2
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/goku_udc.h2
-rw-r--r--drivers/usb/gadget/inode.c2
-rw-r--r--drivers/usb/gadget/net2280.c2
-rw-r--r--drivers/usb/gadget/net2280.h2
-rw-r--r--drivers/usb/gadget/omap_udc.c6
-rw-r--r--drivers/usb/gadget/omap_udc.h2
-rw-r--r--drivers/usb/gadget/printer.c2
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c12
-rw-r--r--drivers/usb/gadget/pxa25x_udc.h2
-rw-r--r--drivers/usb/gadget/u_ether.c3
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp116x.h2
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/sl811.h2
-rw-r--r--drivers/usb/misc/usbtest.c4
-rw-r--r--drivers/video/Kconfig53
-rw-r--r--drivers/video/Makefile5
-rw-r--r--drivers/video/acornfb.c1
-rw-r--r--drivers/video/amifb.c24
-rw-r--r--drivers/video/atafb.c7
-rw-r--r--drivers/video/atmel_lcdfb.c92
-rw-r--r--drivers/video/aty/aty128fb.c6
-rw-r--r--drivers/video/aty/atyfb_base.c100
-rw-r--r--drivers/video/aty/radeon_base.c20
-rw-r--r--drivers/video/backlight/Kconfig45
-rw-r--r--drivers/video/backlight/Makefile8
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c244
-rw-r--r--drivers/video/backlight/backlight.c1
-rw-r--r--drivers/video/backlight/ili9320.c330
-rw-r--r--drivers/video/backlight/ili9320.h80
-rw-r--r--drivers/video/backlight/lcd.c2
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c116
-rw-r--r--drivers/video/backlight/platform_lcd.c172
-rw-r--r--drivers/video/backlight/vgg2432a4.c284
-rw-r--r--drivers/video/bf54x-lq043fb.c2
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c2
-rw-r--r--drivers/video/carminefb.c790
-rw-r--r--drivers/video/carminefb.h64
-rw-r--r--drivers/video/carminefb_regs.h159
-rw-r--r--drivers/video/cobalt_lcdfb.c371
-rw-r--r--drivers/video/console/fbcon.c5
-rw-r--r--drivers/video/console/fbcon.h8
-rw-r--r--drivers/video/console/mdacon.c4
-rw-r--r--drivers/video/fbmem.c10
-rw-r--r--drivers/video/fbmon.c2
-rw-r--r--drivers/video/fsl-diu-fb.c60
-rw-r--r--drivers/video/geode/lxfb.h2
-rw-r--r--drivers/video/geode/lxfb_ops.c28
-rw-r--r--drivers/video/hgafb.c36
-rw-r--r--drivers/video/imxfb.c1
-rw-r--r--drivers/video/neofb.c215
-rw-r--r--drivers/video/omap/dispc.c1
-rw-r--r--drivers/video/omap/omapfb_main.c1
-rw-r--r--drivers/video/pxafb.c72
-rw-r--r--drivers/video/pxafb.h2
-rw-r--r--drivers/video/sa1100fb.c8
-rw-r--r--drivers/video/sa1100fb.h2
-rw-r--r--drivers/video/sh7760fb.c658
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c725
-rw-r--r--drivers/video/sis/init.h1
-rw-r--r--drivers/video/sis/init301.h1
-rw-r--r--drivers/video/sis/initextlfb.c1
-rw-r--r--drivers/video/sis/osdef.h1
-rw-r--r--drivers/video/sis/sis.h22
-rw-r--r--drivers/video/sis/sis_accel.c1
-rw-r--r--drivers/video/sis/sis_main.c44
-rw-r--r--drivers/video/sis/sis_main.h4
-rw-r--r--drivers/video/sis/vgatypes.h4
-rw-r--r--drivers/video/skeletonfb.c37
-rw-r--r--drivers/video/sm501fb.c329
-rw-r--r--drivers/video/tdfxfb.c8
-rw-r--r--drivers/video/tridentfb.c1350
-rw-r--r--drivers/video/uvesafb.c4
-rw-r--r--drivers/video/vfb.c14
-rw-r--r--drivers/video/vga16fb.c122
-rw-r--r--drivers/virtio/virtio.c26
-rw-r--r--drivers/virtio/virtio_pci.c13
-rw-r--r--drivers/virtio/virtio_ring.c23
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/zorro/zorro-sysfs.c1
-rw-r--r--fs/Kconfig74
-rw-r--r--fs/aio.c2
-rw-r--r--fs/anon_inodes.c11
-rw-r--r--fs/autofs4/autofs_i.h28
-rw-r--r--fs/autofs4/expire.c91
-rw-r--r--fs/autofs4/inode.c33
-rw-r--r--fs/autofs4/root.c589
-rw-r--r--fs/autofs4/waitq.c267
-rw-r--r--fs/binfmt_elf.c99
-rw-r--r--fs/binfmt_elf_fdpic.c26
-rw-r--r--fs/binfmt_misc.c20
-rw-r--r--fs/coda/coda_linux.c6
-rw-r--r--fs/coda/psdev.c4
-rw-r--r--fs/coda/upcall.c15
-rw-r--r--fs/compat.c22
-rw-r--r--fs/compat_ioctl.c3
-rw-r--r--fs/dcache.c335
-rw-r--r--fs/dlm/plock.c2
-rw-r--r--fs/dquot.c129
-rw-r--r--fs/ecryptfs/Makefile2
-rw-r--r--fs/ecryptfs/crypto.c37
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h23
-rw-r--r--fs/ecryptfs/file.c17
-rw-r--r--fs/ecryptfs/inode.c31
-rw-r--r--fs/ecryptfs/keystore.c9
-rw-r--r--fs/ecryptfs/kthread.c203
-rw-r--r--fs/ecryptfs/main.c79
-rw-r--r--fs/ecryptfs/miscdev.c59
-rw-r--r--fs/ecryptfs/mmap.c11
-rw-r--r--fs/eventfd.c17
-rw-r--r--fs/eventpoll.c30
-rw-r--r--fs/exec.c143
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext2/xattr_security.c2
-rw-r--r--fs/ext2/xattr_trusted.c4
-rw-r--r--fs/ext2/xattr_user.c4
-rw-r--r--fs/ext3/dir.c14
-rw-r--r--fs/ext3/ialloc.c9
-rw-r--r--fs/ext3/inode.c46
-rw-r--r--fs/ext3/namei.c26
-rw-r--r--fs/ext3/super.c78
-rw-r--r--fs/ext3/xattr_security.c2
-rw-r--r--fs/ext3/xattr_trusted.c4
-rw-r--r--fs/ext3/xattr_user.c4
-rw-r--r--fs/fat/dir.c229
-rw-r--r--fs/fat/inode.c34
-rw-r--r--fs/fat/misc.c10
-rw-r--r--fs/fcntl.c15
-rw-r--r--fs/fuse/dir.c139
-rw-r--r--fs/fuse/file.c11
-rw-r--r--fs/fuse/fuse_i.h10
-rw-r--r--fs/fuse/inode.c177
-rw-r--r--fs/hfs/bitmap.c8
-rw-r--r--fs/hfs/btree.c2
-rw-r--r--fs/hfs/extent.c14
-rw-r--r--fs/hfs/hfs_fs.h5
-rw-r--r--fs/hfs/inode.c4
-rw-r--r--fs/hfs/super.c2
-rw-r--r--fs/hfsplus/extents.c14
-rw-r--r--fs/hfsplus/hfsplus_fs.h3
-rw-r--r--fs/hfsplus/inode.c4
-rw-r--r--fs/hfsplus/super.c2
-rw-r--r--fs/hugetlbfs/inode.c101
-rw-r--r--fs/inotify_user.c18
-rw-r--r--fs/isofs/rock.c22
-rw-r--r--fs/jbd/commit.c64
-rw-r--r--fs/jbd/journal.c8
-rw-r--r--fs/jbd/revoke.c163
-rw-r--r--fs/jbd/transaction.c57
-rw-r--r--fs/jfs/super.c1
-rw-r--r--fs/lockd/clntproc.c10
-rw-r--r--fs/lockd/svclock.c13
-rw-r--r--fs/locks.c90
-rw-r--r--fs/minix/inode.c3
-rw-r--r--fs/minix/minix.h6
-rw-r--r--fs/minix/namei.c24
-rw-r--r--fs/msdos/namei.c21
-rw-r--r--fs/nfs/nfsroot.c2
-rw-r--r--fs/nfsd/lockd.c13
-rw-r--r--fs/open.c3
-rw-r--r--fs/partitions/check.c38
-rw-r--r--fs/partitions/efi.c42
-rw-r--r--fs/partitions/ldm.c70
-rw-r--r--fs/partitions/ldm.h5
-rw-r--r--fs/pipe.c35
-rw-r--r--fs/proc/Kconfig59
-rw-r--r--fs/proc/base.c86
-rw-r--r--fs/proc/generic.c14
-rw-r--r--fs/proc/inode.c81
-rw-r--r--fs/proc/internal.h8
-rw-r--r--fs/proc/kcore.c10
-rw-r--r--fs/proc/kmsg.c2
-rw-r--r--fs/proc/proc_misc.c19
-rw-r--r--fs/quota.c18
-rw-r--r--fs/quota_v1.c1
-rw-r--r--fs/quota_v2.c1
-rw-r--r--fs/reiserfs/journal.c42
-rw-r--r--fs/reiserfs/super.c124
-rw-r--r--fs/reiserfs/xattr_security.c2
-rw-r--r--fs/reiserfs/xattr_trusted.c2
-rw-r--r--fs/reiserfs/xattr_user.c2
-rw-r--r--fs/signalfd.c19
-rw-r--r--fs/smbfs/cache.c1
-rw-r--r--fs/smbfs/proc.c1
-rw-r--r--fs/super.c1
-rw-r--r--fs/sync.c3
-rw-r--r--fs/timerfd.c9
-rw-r--r--fs/ufs/super.c3
-rw-r--r--fs/vfat/namei.c2
-rw-r--r--include/asm-alpha/ide.h44
-rw-r--r--include/asm-alpha/kvm.h6
-rw-r--r--include/asm-alpha/page.h3
-rw-r--r--include/asm-alpha/semaphore.h1
-rw-r--r--include/asm-alpha/socket.h5
-rw-r--r--include/asm-alpha/thread_info.h4
-rw-r--r--include/asm-arm/arch-iop13xx/adma.h18
-rw-r--r--include/asm-arm/arch-pxa/cm-x270.h50
-rw-r--r--include/asm-arm/arch-pxa/eseries-gpio.h50
-rw-r--r--include/asm-arm/arch-pxa/eseries-irq.h27
-rw-r--r--include/asm-arm/arch-pxa/hardware.h33
-rw-r--r--include/asm-arm/arch-pxa/irqs.h3
-rw-r--r--include/asm-arm/arch-pxa/mfp-pxa2xx.h1
-rw-r--r--include/asm-arm/arch-pxa/mfp-pxa930.h491
-rw-r--r--include/asm-arm/arch-pxa/mfp.h8
-rw-r--r--include/asm-arm/arch-pxa/palmtx.h106
-rw-r--r--include/asm-arm/arch-pxa/pxa27x-udc.h2
-rw-r--r--include/asm-arm/arch-pxa/pxa2xx_spi.h2
-rw-r--r--include/asm-arm/arch-pxa/pxa3xx_nand.h2
-rw-r--r--include/asm-arm/arch-pxa/pxafb.h3
-rw-r--r--include/asm-arm/arch-pxa/regs-lcd.h6
-rw-r--r--include/asm-arm/arch-pxa/regs-ssp.h16
-rw-r--r--include/asm-arm/arch-pxa/system.h17
-rw-r--r--include/asm-arm/arch-pxa/tosa.h50
-rw-r--r--include/asm-arm/arch-pxa/tosa_bt.h22
-rw-r--r--include/asm-arm/arch-pxa/uncompress.h13
-rw-r--r--include/asm-arm/arch-pxa/zylonite.h2
-rw-r--r--include/asm-arm/arch-sa1100/h3600.h5
-rw-r--r--include/asm-arm/hardware/iop3xx-adma.h4
-rw-r--r--include/asm-arm/ide.h4
-rw-r--r--include/asm-arm/kgdb.h104
-rw-r--r--include/asm-arm/kvm.h6
-rw-r--r--include/asm-arm/mach/udc_pxa2xx.h1
-rw-r--r--include/asm-arm/page-nommu.h4
-rw-r--r--include/asm-arm/page.h3
-rw-r--r--include/asm-arm/plat-orion/mv_xor.h28
-rw-r--r--include/asm-arm/ptrace.h6
-rw-r--r--include/asm-arm/semaphore.h1
-rw-r--r--include/asm-arm/thread_info.h13
-rw-r--r--include/asm-arm/traps.h2
-rw-r--r--include/asm-avr32/arch-at32ap/at32ap700x.h16
-rw-r--r--include/asm-avr32/kvm.h6
-rw-r--r--include/asm-avr32/page.h3
-rw-r--r--include/asm-avr32/semaphore.h1
-rw-r--r--include/asm-avr32/thread_info.h4
-rw-r--r--include/asm-blackfin/ide.h27
-rw-r--r--include/asm-blackfin/kvm.h6
-rw-r--r--include/asm-blackfin/page.h3
-rw-r--r--include/asm-blackfin/ptrace.h6
-rw-r--r--include/asm-blackfin/semaphore.h1
-rw-r--r--include/asm-blackfin/thread_info.h5
-rw-r--r--include/asm-cris/arch-v10/Kbuild1
-rw-r--r--include/asm-cris/arch-v10/ide.h91
-rw-r--r--include/asm-cris/arch-v10/ptrace.h4
-rw-r--r--include/asm-cris/arch-v32/Kbuild1
-rw-r--r--include/asm-cris/arch-v32/ide.h56
-rw-r--r--include/asm-cris/arch-v32/ptrace.h4
-rw-r--r--include/asm-cris/cacheflush.h1
-rw-r--r--include/asm-cris/ide.h1
-rw-r--r--include/asm-cris/kvm.h6
-rw-r--r--include/asm-cris/page.h3
-rw-r--r--include/asm-cris/ptrace.h4
-rw-r--r--include/asm-cris/semaphore.h1
-rw-r--r--include/asm-cris/thread_info.h2
-rw-r--r--include/asm-frv/Kbuild1
-rw-r--r--include/asm-frv/ide.h4
-rw-r--r--include/asm-frv/kvm.h6
-rw-r--r--include/asm-frv/page.h3
-rw-r--r--include/asm-frv/semaphore.h1
-rw-r--r--include/asm-frv/thread_info.h2
-rw-r--r--include/asm-generic/Kbuild.asm2
-rw-r--r--include/asm-generic/bug.h25
-rw-r--r--include/asm-generic/gpio.h35
-rw-r--r--include/asm-generic/int-ll64.h2
-rw-r--r--include/asm-h8300/elf.h4
-rw-r--r--include/asm-h8300/ide.h26
-rw-r--r--include/asm-h8300/keyboard.h24
-rw-r--r--include/asm-h8300/kvm.h6
-rw-r--r--include/asm-h8300/page.h3
-rw-r--r--include/asm-h8300/semaphore.h1
-rw-r--r--include/asm-h8300/thread_info.h5
-rw-r--r--include/asm-ia64/hugetlb.h5
-rw-r--r--include/asm-ia64/ide.h51
-rw-r--r--include/asm-ia64/page.h1
-rw-r--r--include/asm-ia64/semaphore.h1
-rw-r--r--include/asm-ia64/thread_info.h2
-rw-r--r--include/asm-ia64/unistd.h8
-rw-r--r--include/asm-m32r/ide.h70
-rw-r--r--include/asm-m32r/kvm.h6
-rw-r--r--include/asm-m32r/page.h3
-rw-r--r--include/asm-m32r/semaphore.h1
-rw-r--r--include/asm-m32r/thread_info.h2
-rw-r--r--include/asm-m68k/dvma.h2
-rw-r--r--include/asm-m68k/ide.h4
-rw-r--r--include/asm-m68k/kvm.h6
-rw-r--r--include/asm-m68k/page.h3
-rw-r--r--include/asm-m68k/semaphore.h1
-rw-r--r--include/asm-m68k/thread_info.h8
-rw-r--r--include/asm-m68knommu/bitops.h30
-rw-r--r--include/asm-m68knommu/byteorder.h16
-rw-r--r--include/asm-m68knommu/commproc.h19
-rw-r--r--include/asm-m68knommu/kvm.h6
-rw-r--r--include/asm-m68knommu/page.h3
-rw-r--r--include/asm-m68knommu/ptrace.h2
-rw-r--r--include/asm-m68knommu/semaphore.h1
-rw-r--r--include/asm-m68knommu/system.h11
-rw-r--r--include/asm-m68knommu/thread_info.h4
-rw-r--r--include/asm-mips/kvm.h6
-rw-r--r--include/asm-mips/mach-au1x00/au1550_spi.h1
-rw-r--r--include/asm-mips/mach-generic/gpio.h2
-rw-r--r--include/asm-mips/mach-generic/ide.h48
-rw-r--r--include/asm-mips/page.h3
-rw-r--r--include/asm-mips/processor.h2
-rw-r--r--include/asm-mips/semaphore.h1
-rw-r--r--include/asm-mips/socket.h7
-rw-r--r--include/asm-mips/thread_info.h2
-rw-r--r--include/asm-mn10300/ide.h4
-rw-r--r--include/asm-mn10300/kvm.h6
-rw-r--r--include/asm-mn10300/page.h3
-rw-r--r--include/asm-mn10300/pci.h9
-rw-r--r--include/asm-mn10300/ptrace.h8
-rw-r--r--include/asm-mn10300/scatterlist.h9
-rw-r--r--include/asm-mn10300/semaphore.h1
-rw-r--r--include/asm-mn10300/thread_info.h2
-rw-r--r--include/asm-parisc/ide.h4
-rw-r--r--include/asm-parisc/kvm.h6
-rw-r--r--include/asm-parisc/page.h4
-rw-r--r--include/asm-parisc/ptrace.h4
-rw-r--r--include/asm-parisc/semaphore.h1
-rw-r--r--include/asm-parisc/socket.h5
-rw-r--r--include/asm-parisc/thread_info.h10
-rw-r--r--include/asm-powerpc/Kbuild1
-rw-r--r--include/asm-powerpc/cputable.h2
-rw-r--r--include/asm-powerpc/elf.h8
-rw-r--r--include/asm-powerpc/firmware.h3
-rw-r--r--include/asm-powerpc/gpio.h4
-rw-r--r--include/asm-powerpc/hugetlb.h10
-rw-r--r--include/asm-powerpc/hvcall.h23
-rw-r--r--include/asm-powerpc/ide.h26
-rw-r--r--include/asm-powerpc/io.h5
-rw-r--r--include/asm-powerpc/kgdb.h92
-rw-r--r--include/asm-powerpc/lppaca.h5
-rw-r--r--include/asm-powerpc/machdep.h2
-rw-r--r--include/asm-powerpc/mmu-hash64.h6
-rw-r--r--include/asm-powerpc/mpc52xx_psc.h40
-rw-r--r--include/asm-powerpc/page.h3
-rw-r--r--include/asm-powerpc/page_64.h1
-rw-r--r--include/asm-powerpc/pgalloc-64.h4
-rw-r--r--include/asm-powerpc/pgtable-4k.h3
-rw-r--r--include/asm-powerpc/pgtable-64k.h2
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h16
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h8
-rw-r--r--include/asm-powerpc/pgtable.h13
-rw-r--r--include/asm-powerpc/semaphore.h1
-rw-r--r--include/asm-powerpc/syscalls.h1
-rw-r--r--include/asm-powerpc/systbl.h6
-rw-r--r--include/asm-powerpc/system.h2
-rw-r--r--include/asm-powerpc/thread_info.h14
-rw-r--r--include/asm-powerpc/tlbflush.h11
-rw-r--r--include/asm-powerpc/unistd.h8
-rw-r--r--include/asm-powerpc/vio.h27
-rw-r--r--include/asm-s390/Kbuild1
-rw-r--r--include/asm-s390/hugetlb.h3
-rw-r--r--include/asm-s390/kvm_virtio.h10
-rw-r--r--include/asm-s390/page.h3
-rw-r--r--include/asm-s390/semaphore.h1
-rw-r--r--include/asm-s390/thread_info.h5
-rw-r--r--include/asm-sh/hugetlb.h5
-rw-r--r--include/asm-sh/ide.h21
-rw-r--r--include/asm-sh/kvm.h6
-rw-r--r--include/asm-sh/page.h3
-rw-r--r--include/asm-sh/ptrace.h2
-rw-r--r--include/asm-sh/semaphore.h1
-rw-r--r--include/asm-sh/sh7760fb.h197
-rw-r--r--include/asm-sh/sh_mobile_lcdc.h66
-rw-r--r--include/asm-sh/thread_info.h2
-rw-r--r--include/asm-sparc/hugetlb.h5
-rw-r--r--include/asm-sparc/ide.h3
-rw-r--r--include/asm-sparc/kvm.h6
-rw-r--r--include/asm-sparc/page_32.h3
-rw-r--r--include/asm-sparc/page_64.h3
-rw-r--r--include/asm-sparc/semaphore.h1
-rw-r--r--include/asm-sparc/thread_info_32.h2
-rw-r--r--include/asm-sparc/thread_info_64.h2
-rw-r--r--include/asm-sparc/unistd_32.h8
-rw-r--r--include/asm-sparc/unistd_64.h8
-rw-r--r--include/asm-sparc64/kvm.h1
-rw-r--r--include/asm-sparc64/semaphore.h1
-rw-r--r--include/asm-um/kvm.h6
-rw-r--r--include/asm-um/page.h6
-rw-r--r--include/asm-um/ptrace-generic.h3
-rw-r--r--include/asm-um/semaphore.h1
-rw-r--r--include/asm-um/thread_info.h16
-rw-r--r--include/asm-v850/Kbuild1
-rw-r--r--include/asm-v850/a.out.h21
-rw-r--r--include/asm-v850/anna.h137
-rw-r--r--include/asm-v850/as85ep1.h152
-rw-r--r--include/asm-v850/asm.h32
-rw-r--r--include/asm-v850/atomic.h131
-rw-r--r--include/asm-v850/auxvec.h4
-rw-r--r--include/asm-v850/bitops.h161
-rw-r--r--include/asm-v850/bug.h25
-rw-r--r--include/asm-v850/bugs.h16
-rw-r--r--include/asm-v850/byteorder.h48
-rw-r--r--include/asm-v850/cache.h26
-rw-r--r--include/asm-v850/cacheflush.h70
-rw-r--r--include/asm-v850/checksum.h112
-rw-r--r--include/asm-v850/clinkage.h26
-rw-r--r--include/asm-v850/cputime.h6
-rw-r--r--include/asm-v850/current.h47
-rw-r--r--include/asm-v850/delay.h47
-rw-r--r--include/asm-v850/device.h7
-rw-r--r--include/asm-v850/div64.h1
-rw-r--r--include/asm-v850/dma-mapping.h11
-rw-r--r--include/asm-v850/dma.h18
-rw-r--r--include/asm-v850/elf.h99
-rw-r--r--include/asm-v850/emergency-restart.h6
-rw-r--r--include/asm-v850/entry.h113
-rw-r--r--include/asm-v850/errno.h6
-rw-r--r--include/asm-v850/fb.h12
-rw-r--r--include/asm-v850/fcntl.h11
-rw-r--r--include/asm-v850/flat.h133
-rw-r--r--include/asm-v850/fpga85e2c.h82
-rw-r--r--include/asm-v850/futex.h6
-rw-r--r--include/asm-v850/gbus_int.h97
-rw-r--r--include/asm-v850/hardirq.h28
-rw-r--r--include/asm-v850/highres_timer.h44
-rw-r--r--include/asm-v850/hw_irq.h4
-rw-r--r--include/asm-v850/io.h142
-rw-r--r--include/asm-v850/ioctl.h1
-rw-r--r--include/asm-v850/ioctls.h84
-rw-r--r--include/asm-v850/ipcbuf.h29
-rw-r--r--include/asm-v850/irq.h55
-rw-r--r--include/asm-v850/irq_regs.h1
-rw-r--r--include/asm-v850/kdebug.h1
-rw-r--r--include/asm-v850/kmap_types.h19
-rw-r--r--include/asm-v850/kvm.h6
-rw-r--r--include/asm-v850/linkage.h8
-rw-r--r--include/asm-v850/local.h6
-rw-r--r--include/asm-v850/ma.h101
-rw-r--r--include/asm-v850/ma1.h50
-rw-r--r--include/asm-v850/machdep.h60
-rw-r--r--include/asm-v850/macrology.h17
-rw-r--r--include/asm-v850/me2.h182
-rw-r--r--include/asm-v850/mman.h15
-rw-r--r--include/asm-v850/mmu.h11
-rw-r--r--include/asm-v850/mmu_context.h13
-rw-r--r--include/asm-v850/module.h62
-rw-r--r--include/asm-v850/msgbuf.h31
-rw-r--r--include/asm-v850/mutex.h9
-rw-r--r--include/asm-v850/page.h128
-rw-r--r--include/asm-v850/param.h33
-rw-r--r--include/asm-v850/pci.h119
-rw-r--r--include/asm-v850/percpu.h14
-rw-r--r--include/asm-v850/pgalloc.h22
-rw-r--r--include/asm-v850/pgtable.h59
-rw-r--r--include/asm-v850/poll.h9
-rw-r--r--include/asm-v850/posix_types.h72
-rw-r--r--include/asm-v850/processor.h120
-rw-r--r--include/asm-v850/ptrace.h121
-rw-r--r--include/asm-v850/resource.h6
-rw-r--r--include/asm-v850/rte_cb.h78
-rw-r--r--include/asm-v850/rte_ma1_cb.h128
-rw-r--r--include/asm-v850/rte_mb_a_pci.h56
-rw-r--r--include/asm-v850/rte_me2_cb.h202
-rw-r--r--include/asm-v850/rte_nb85e_cb.h111
-rw-r--r--include/asm-v850/scatterlist.h31
-rw-r--r--include/asm-v850/sections.h6
-rw-r--r--include/asm-v850/segment.h36
-rw-r--r--include/asm-v850/semaphore.h1
-rw-r--r--include/asm-v850/sembuf.h25
-rw-r--r--include/asm-v850/serial.h56
-rw-r--r--include/asm-v850/setup.h6
-rw-r--r--include/asm-v850/shmbuf.h42
-rw-r--r--include/asm-v850/shmparam.h6
-rw-r--r--include/asm-v850/sigcontext.h25
-rw-r--r--include/asm-v850/siginfo.h6
-rw-r--r--include/asm-v850/signal.h168
-rw-r--r--include/asm-v850/sim.h47
-rw-r--r--include/asm-v850/sim85e2.h69
-rw-r--r--include/asm-v850/sim85e2c.h26
-rw-r--r--include/asm-v850/sim85e2s.h28
-rw-r--r--include/asm-v850/simsyscall.h99
-rw-r--r--include/asm-v850/socket.h57
-rw-r--r--include/asm-v850/sockios.h13
-rw-r--r--include/asm-v850/stat.h73
-rw-r--r--include/asm-v850/statfs.h6
-rw-r--r--include/asm-v850/string.h25
-rw-r--r--include/asm-v850/system.h123
-rw-r--r--include/asm-v850/teg.h101
-rw-r--r--include/asm-v850/termbits.h200
-rw-r--r--include/asm-v850/termios.h90
-rw-r--r--include/asm-v850/thread_info.h129
-rw-r--r--include/asm-v850/timex.h18
-rw-r--r--include/asm-v850/tlb.h21
-rw-r--r--include/asm-v850/tlbflush.h64
-rw-r--r--include/asm-v850/topology.h6
-rw-r--r--include/asm-v850/types.h36
-rw-r--r--include/asm-v850/uaccess.h159
-rw-r--r--include/asm-v850/ucontext.h14
-rw-r--r--include/asm-v850/unaligned.h22
-rw-r--r--include/asm-v850/unistd.h244
-rw-r--r--include/asm-v850/user.h52
-rw-r--r--include/asm-v850/v850e.h21
-rw-r--r--include/asm-v850/v850e2.h69
-rw-r--r--include/asm-v850/v850e2_cache.h75
-rw-r--r--include/asm-v850/v850e_cache.h48
-rw-r--r--include/asm-v850/v850e_intc.h133
-rw-r--r--include/asm-v850/v850e_timer_c.h48
-rw-r--r--include/asm-v850/v850e_timer_d.h62
-rw-r--r--include/asm-v850/v850e_uart.h76
-rw-r--r--include/asm-v850/v850e_uarta.h278
-rw-r--r--include/asm-v850/v850e_uartb.h262
-rw-r--r--include/asm-v850/v850e_utils.h35
-rw-r--r--include/asm-x86/Kbuild2
-rw-r--r--include/asm-x86/gpio.h56
-rw-r--r--include/asm-x86/hugetlb.h10
-rw-r--r--include/asm-x86/i387.h54
-rw-r--r--include/asm-x86/ide.h65
-rw-r--r--include/asm-x86/io_32.h2
-rw-r--r--include/asm-x86/io_64.h2
-rw-r--r--include/asm-x86/ipi.h2
-rw-r--r--include/asm-x86/page.h3
-rw-r--r--include/asm-x86/processor-flags.h2
-rw-r--r--include/asm-x86/processor.h2
-rw-r--r--include/asm-x86/semaphore.h1
-rw-r--r--include/asm-x86/thread_info.h6
-rw-r--r--include/asm-x86/unistd_32.h6
-rw-r--r--include/asm-x86/unistd_64.h14
-rw-r--r--include/asm-xtensa/ide.h35
-rw-r--r--include/asm-xtensa/kvm.h6
-rw-r--r--include/asm-xtensa/page.h2
-rw-r--r--include/asm-xtensa/ptrace.h10
-rw-r--r--include/asm-xtensa/semaphore.h1
-rw-r--r--include/asm-xtensa/thread_info.h5
-rw-r--r--include/linux/Kbuild3
-rw-r--r--include/linux/acct.h3
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/anon_inodes.h2
-rw-r--r--include/linux/async_tx.h11
-rw-r--r--include/linux/atmel-pwm-bl.h43
-rw-r--r--include/linux/audit.h1
-rw-r--r--include/linux/auto_fs4.h2
-rw-r--r--include/linux/auxvec.h6
-rw-r--r--include/linux/bcd.h9
-rw-r--r--include/linux/binfmts.h2
-rw-r--r--include/linux/bootmem.h104
-rw-r--r--include/linux/byteorder/big_endian.h12
-rw-r--r--include/linux/byteorder/little_endian.h12
-rw-r--r--include/linux/cgroup.h51
-rw-r--r--include/linux/coda.h43
-rw-r--r--include/linux/consolemap.h14
-rw-r--r--include/linux/cpu.h15
-rw-r--r--include/linux/cpumask.h172
-rw-r--r--include/linux/cpuset.h7
-rw-r--r--include/linux/crash_dump.h8
-rw-r--r--include/linux/dca.h7
-rw-r--r--include/linux/delayacct.h19
-rw-r--r--include/linux/dirent.h20
-rw-r--r--include/linux/dmaengine.h69
-rw-r--r--include/linux/dw_dmac.h62
-rw-r--r--include/linux/eventfd.h7
-rw-r--r--include/linux/eventpoll.h4
-rw-r--r--include/linux/ext2_fs.h4
-rw-r--r--include/linux/ext3_fs.h1
-rw-r--r--include/linux/fb.h5
-rw-r--r--include/linux/fd1772.h80
-rw-r--r--include/linux/fs.h17
-rw-r--r--include/linux/fuse.h3
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/gfp.h24
-rw-r--r--include/linux/gpio.h13
-rw-r--r--include/linux/hid.h2
-rw-r--r--include/linux/hugetlb.h143
-rw-r--r--include/linux/i2c/max732x.h19
-rw-r--r--include/linux/ide.h223
-rw-r--r--include/linux/idr.h24
-rw-r--r--include/linux/init.h8
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/inotify.h6
-rw-r--r--include/linux/ipc_namespace.h3
-rw-r--r--include/linux/irqflags.h54
-rw-r--r--include/linux/kallsyms.h19
-rw-r--r--include/linux/kernel.h29
-rw-r--r--include/linux/kmod.h11
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/kprobes.h7
-rw-r--r--include/linux/kthread.h3
-rw-r--r--include/linux/lcd.h2
-rw-r--r--include/linux/leds-pca9532.h45
-rw-r--r--include/linux/leds.h16
-rw-r--r--include/linux/list.h4
-rw-r--r--include/linux/major.h2
-rw-r--r--include/linux/memcontrol.h24
-rw-r--r--include/linux/memory_hotplug.h20
-rw-r--r--include/linux/mempolicy.h19
-rw-r--r--include/linux/mfd/core.h55
-rw-r--r--include/linux/mfd/tc6393xb.h49
-rw-r--r--include/linux/mfd/tmio.h17
-rw-r--r--include/linux/migrate.h21
-rw-r--r--include/linux/mlx4/device.h10
-rw-r--r--include/linux/mlx4/qp.h18
-rw-r--r--include/linux/mm.h30
-rw-r--r--include/linux/mm_types.h15
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/msdos_fs.h56
-rw-r--r--include/linux/mtd/ubi.h5
-rw-r--r--include/linux/net.h18
-rw-r--r--include/linux/nfsd/nfsd.h1
-rw-r--r--include/linux/notifier.h2
-rw-r--r--include/linux/nsproxy.h7
-rw-r--r--include/linux/page-flags.h29
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/parser.h2
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h11
-rw-r--r--include/linux/pid.h8
-rw-r--r--include/linux/pid_namespace.h8
-rw-r--r--include/linux/pm.h109
-rw-r--r--include/linux/pm_legacy.h35
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/profile.h58
-rw-r--r--include/linux/quota.h33
-rw-r--r--include/linux/quotaops.h281
-rw-r--r--include/linux/ratelimit.h27
-rw-r--r--include/linux/rcupreempt.h9
-rw-r--r--include/linux/reiserfs_fs.h4
-rw-r--r--include/linux/reiserfs_fs_sb.h6
-rw-r--r--include/linux/res_counter.h33
-rw-r--r--include/linux/rtc.h17
-rw-r--r--include/linux/scatterlist.h38
-rw-r--r--include/linux/sched.h45
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/sem.h30
-rw-r--r--include/linux/semaphore.h6
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/signalfd.h6
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/sm501.h39
-rw-r--r--include/linux/smb_fs.h19
-rw-r--r--include/linux/smc91x.h12
-rw-r--r--include/linux/spi/ds1305.h35
-rw-r--r--include/linux/spi/mcp23s08.h25
-rw-r--r--include/linux/spi/spi.h2
-rw-r--r--include/linux/spinlock.h72
-rw-r--r--include/linux/string.h3
-rw-r--r--include/linux/syscalls.h9
-rw-r--r--include/linux/taskstats.h6
-rw-r--r--include/linux/tick.h5
-rw-r--r--include/linux/timerfd.h6
-rw-r--r--include/linux/typecheck.h24
-rw-r--r--include/linux/usb/composite.h2
-rw-r--r--include/linux/virtio_9p.h2
-rw-r--r--include/linux/virtio_balloon.h2
-rw-r--r--include/linux/virtio_blk.h5
-rw-r--r--include/linux/virtio_config.h16
-rw-r--r--include/linux/virtio_console.h2
-rw-r--r--include/linux/virtio_net.h2
-rw-r--r--include/linux/virtio_pci.h5
-rw-r--r--include/linux/virtio_ring.h2
-rw-r--r--include/linux/virtio_rng.h2
-rw-r--r--include/linux/vmstat.h6
-rw-r--r--include/linux/vt_kern.h19
-rw-r--r--include/linux/workqueue.h5
-rw-r--r--include/mtd/ubi-user.h76
-rw-r--r--include/net/ieee80211_radiotap.h2
-rw-r--r--include/rdma/rdma_cm.h4
-rw-r--r--include/video/atmel_lcdc.h1
-rw-r--r--include/video/ili9320.h201
-rw-r--r--include/video/neomagic.h17
-rw-r--r--include/video/platform_lcd.h21
-rw-r--r--include/video/trident.h77
-rw-r--r--init/do_mounts.c1
-rw-r--r--init/do_mounts_rd.c37
-rw-r--r--init/initramfs.c22
-rw-r--r--init/main.c9
-rw-r--r--init/version.c3
-rw-r--r--ipc/ipc_sysctl.c72
-rw-r--r--ipc/ipcns_notifier.c20
-rw-r--r--ipc/mqueue.c25
-rw-r--r--ipc/sem.c316
-rw-r--r--ipc/shm.c24
-rw-r--r--ipc/util.c61
-rw-r--r--ipc/util.h6
-rw-r--r--kernel/Kconfig.hz2
-rw-r--r--kernel/Makefile5
-rw-r--r--kernel/acct.c222
-rw-r--r--kernel/auditsc.c3
-rw-r--r--kernel/capability.c338
-rw-r--r--kernel/cgroup.c309
-rw-r--r--kernel/cpu.c47
-rw-r--r--kernel/cpuset.c359
-rw-r--r--kernel/delayacct.c16
-rw-r--r--kernel/exit.c61
-rw-r--r--kernel/fork.c44
-rw-r--r--kernel/irq/manage.c107
-rw-r--r--kernel/kallsyms.c2
-rw-r--r--kernel/kmod.c13
-rw-r--r--kernel/kprobes.c132
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/marker.c25
-rw-r--r--kernel/ns_cgroup.c8
-rw-r--r--kernel/nsproxy.c8
-rw-r--r--kernel/panic.c22
-rw-r--r--kernel/pid.c10
-rw-r--r--kernel/pid_namespace.c10
-rw-r--r--kernel/posix-timers.c21
-rw-r--r--kernel/power/Kconfig11
-rw-r--r--kernel/power/main.c194
-rw-r--r--kernel/power/poweroff.c4
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/power/snapshot.c88
-rw-r--r--kernel/printk.c19
-rw-r--r--kernel/profile.c4
-rw-r--r--kernel/rcuclassic.c2
-rw-r--r--kernel/rcupreempt.c10
-rw-r--r--kernel/res_counter.c48
-rw-r--r--kernel/sched.c351
-rw-r--r--kernel/sched_fair.c10
-rw-r--r--kernel/sched_rt.c83
-rw-r--r--kernel/signal.c80
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/softlockup.c45
-rw-r--r--kernel/stop_machine.c3
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/sys_ni.c6
-rw-r--r--kernel/sysctl.c34
-rw-r--r--kernel/sysctl_check.c2
-rw-r--r--kernel/taskstats.c6
-rw-r--r--kernel/time/clocksource.c4
-rw-r--r--kernel/time/tick-broadcast.c3
-rw-r--r--kernel/time/tick-common.c14
-rw-r--r--kernel/time/tick-sched.c16
-rw-r--r--kernel/trace/trace_sysprof.c6
-rw-r--r--kernel/tsacct.c25
-rw-r--r--kernel/workqueue.c149
-rw-r--r--lib/Kconfig.debug38
-rw-r--r--lib/Kconfig.kgdb3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bcd.c14
-rw-r--r--lib/cmdline.c16
-rw-r--r--lib/cpumask.c9
-rw-r--r--lib/debugobjects.c4
-rw-r--r--lib/idr.c140
-rw-r--r--lib/inflate.c52
-rw-r--r--lib/kobject.c9
-rw-r--r--lib/list_debug.c50
-rw-r--r--lib/lzo/lzo1x_decompress.c6
-rw-r--r--lib/ratelimit.c55
-rw-r--r--lib/scatterlist.c176
-rw-r--r--lib/smp_processor_id.c6
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/Makefile2
-rw-r--r--mm/allocpercpu.c4
-rw-r--r--mm/bootmem.c935
-rw-r--r--mm/filemap.c168
-rw-r--r--mm/hugetlb.c1612
-rw-r--r--mm/internal.h61
-rw-r--r--mm/memcontrol.c364
-rw-r--r--mm/memory.c243
-rw-r--r--mm/memory_hotplug.c80
-rw-r--r--mm/mempolicy.c9
-rw-r--r--mm/migrate.c24
-rw-r--r--mm/mm_init.c152
-rw-r--r--mm/mmap.c12
-rw-r--r--mm/mprotect.c6
-rw-r--r--mm/page_alloc.c152
-rw-r--r--mm/pdflush.c4
-rw-r--r--mm/rmap.c14
-rw-r--r--mm/shmem.c91
-rw-r--r--mm/slob.c12
-rw-r--r--mm/slub.c65
-rw-r--r--mm/sparse.c115
-rw-r--r--mm/swap.c8
-rw-r--r--mm/swapfile.c49
-rw-r--r--mm/vmalloc.c20
-rw-r--r--mm/vmscan.c5
-rw-r--r--mm/vmstat.c3
-rw-r--r--net/802/psnap.c4
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/compat.c52
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/sysctl_net_core.c4
-rw-r--r--net/core/user_dma.c1
-rw-r--r--net/core/utils.c5
-rw-r--r--net/ipv4/af_inet.c9
-rw-r--r--net/ipv6/af_inet6.c9
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/socket.c142
-rw-r--r--net/sunrpc/svc.c3
-rw-r--r--net/sysctl_net.c14
-rw-r--r--scripts/Makefile.fwinst10
-rwxr-xr-xscripts/checkpatch.pl310
-rwxr-xr-xscripts/checkstack.pl27
-rw-r--r--scripts/genksyms/genksyms.c3
-rw-r--r--scripts/mod/file2alias.c2
-rw-r--r--scripts/mod/mk_elfconfig.c2
-rw-r--r--security/Kconfig3
-rw-r--r--security/commoncap.c108
-rw-r--r--security/device_cgroup.c158
-rw-r--r--sound/core/info.c1
-rw-r--r--sound/oss/Kconfig41
-rw-r--r--sound/oss/Makefile3
-rw-r--r--sound/oss/trident.c4654
-rw-r--r--sound/oss/trident.h358
-rw-r--r--sound/soc/pxa/Kconfig1
-rw-r--r--sound/soc/pxa/tosa.c29
-rw-r--r--virt/kvm/kvm_main.c4
1507 files changed, 50091 insertions, 40592 deletions
diff --git a/CREDITS b/CREDITS
index 077b147388bd..c62dcb3b7e26 100644
--- a/CREDITS
+++ b/CREDITS
@@ -317,6 +317,14 @@ S: 2322 37th Ave SW
317S: Seattle, Washington 98126-2010 317S: Seattle, Washington 98126-2010
318S: USA 318S: USA
319 319
320N: Muli Ben-Yehuda
321E: mulix@mulix.org
322E: muli@il.ibm.com
323W: http://www.mulix.org
324D: trident OSS sound driver, x86-64 dma-ops and Calgary IOMMU,
325D: KVM and Xen bits and other misc. hackery.
326S: Haifa, Israel
327
320N: Johannes Berg 328N: Johannes Berg
321E: johannes@sipsolutions.net 329E: johannes@sipsolutions.net
322W: http://johannes.sipsolutions.net/ 330W: http://johannes.sipsolutions.net/
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 1977fab38656..6de71308a906 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -361,8 +361,6 @@ telephony/
361 - directory with info on telephony (e.g. voice over IP) support. 361 - directory with info on telephony (e.g. voice over IP) support.
362time_interpolators.txt 362time_interpolators.txt
363 - info on time interpolators. 363 - info on time interpolators.
364tipar.txt
365 - information about Parallel link cable for Texas Instruments handhelds.
366tty.txt 364tty.txt
367 - guide to the locking policies of the tty layer. 365 - guide to the locking policies of the tty layer.
368uml/ 366uml/
diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory
new file mode 100644
index 000000000000..7a16fe1e2270
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-memory
@@ -0,0 +1,24 @@
1What: /sys/devices/system/memory
2Date: June 2008
3Contact: Badari Pulavarty <pbadari@us.ibm.com>
4Description:
5 The /sys/devices/system/memory contains a snapshot of the
6 internal state of the kernel memory blocks. Files could be
7 added or removed dynamically to represent hot-add/remove
8 operations.
9
10Users: hotplug memory add/remove tools
11 https://w3.opensource.ibm.com/projects/powerpc-utils/
12
13What: /sys/devices/system/memory/memoryX/removable
14Date: June 2008
15Contact: Badari Pulavarty <pbadari@us.ibm.com>
16Description:
17 The file /sys/devices/system/memory/memoryX/removable
18 indicates whether this memory block is removable or not.
19 This is useful for a user-level agent to determine
20 identify removable sections of the memory before attempting
21 potentially expensive hot-remove memory operation
22
23Users: hotplug memory remove tools
24 https://w3.opensource.ibm.com/projects/powerpc-utils/
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm b/Documentation/ABI/testing/sysfs-kernel-mm
new file mode 100644
index 000000000000..190d523ac159
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-mm
@@ -0,0 +1,6 @@
1What: /sys/kernel/mm
2Date: July 2008
3Contact: Nishanth Aravamudan <nacc@us.ibm.com>, VM maintainers
4Description:
5 /sys/kernel/mm/ should contain any and all VM
6 related information in /sys/kernel/.
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-hugepages b/Documentation/ABI/testing/sysfs-kernel-mm-hugepages
new file mode 100644
index 000000000000..e21c00571cf4
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-hugepages
@@ -0,0 +1,15 @@
1What: /sys/kernel/mm/hugepages/
2Date: June 2008
3Contact: Nishanth Aravamudan <nacc@us.ibm.com>, hugetlb maintainers
4Description:
5 /sys/kernel/mm/hugepages/ contains a number of subdirectories
6 of the form hugepages-<size>kB, where <size> is the page size
7 of the hugepages supported by the kernel/CPU combination.
8
9 Under these directories are a number of files:
10 nr_hugepages
11 nr_overcommit_hugepages
12 free_hugepages
13 surplus_hugepages
14 resv_hugepages
15 See Documentation/vm/hugetlbpage.txt for details.
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index 6caa14615578..1875e502f872 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -474,25 +474,29 @@ make a good program).
474So, you can either get rid of GNU emacs, or change it to use saner 474So, you can either get rid of GNU emacs, or change it to use saner
475values. To do the latter, you can stick the following in your .emacs file: 475values. To do the latter, you can stick the following in your .emacs file:
476 476
477(defun linux-c-mode () 477(defun c-lineup-arglist-tabs-only (ignored)
478 "C mode with adjusted defaults for use with the Linux kernel." 478 "Line up argument lists by tabs, not spaces"
479 (interactive) 479 (let* ((anchor (c-langelem-pos c-syntactic-element))
480 (c-mode) 480 (column (c-langelem-2nd-pos c-syntactic-element))
481 (c-set-style "K&R") 481 (offset (- (1+ column) anchor))
482 (setq tab-width 8) 482 (steps (floor offset c-basic-offset)))
483 (setq indent-tabs-mode t) 483 (* (max steps 1)
484 (setq c-basic-offset 8)) 484 c-basic-offset)))
485 485
486This will define the M-x linux-c-mode command. When hacking on a 486(add-hook 'c-mode-hook
487module, if you put the string -*- linux-c -*- somewhere on the first 487 (lambda ()
488two lines, this mode will be automatically invoked. Also, you may want 488 (let ((filename (buffer-file-name)))
489to add 489 ;; Enable kernel mode for the appropriate files
490 490 (when (and filename
491(setq auto-mode-alist (cons '("/usr/src/linux.*/.*\\.[ch]$" . linux-c-mode) 491 (string-match "~/src/linux-trees" filename))
492 auto-mode-alist)) 492 (setq indent-tabs-mode t)
493 493 (c-set-style "linux")
494to your .emacs file if you want to have linux-c-mode switched on 494 (c-set-offset 'arglist-cont-nonempty
495automagically when you edit source files under /usr/src/linux. 495 '(c-lineup-gcc-asm-reg
496 c-lineup-arglist-tabs-only))))))
497
498This will make emacs go better with the kernel coding style for C
499files below ~/src/linux-trees.
496 500
497But even if you fail in getting emacs to do sane formatting, not 501But even if you fail in getting emacs to do sane formatting, not
498everything is lost: use "indent". 502everything is lost: use "indent".
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 2510763295d0..084f6ad7b7a0 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -219,10 +219,10 @@
219 </para> 219 </para>
220 220
221 <sect1 id="lock-intro"> 221 <sect1 id="lock-intro">
222 <title>Three Main Types of Kernel Locks: Spinlocks, Mutexes and Semaphores</title> 222 <title>Two Main Types of Kernel Locks: Spinlocks and Mutexes</title>
223 223
224 <para> 224 <para>
225 There are three main types of kernel locks. The fundamental type 225 There are two main types of kernel locks. The fundamental type
226 is the spinlock 226 is the spinlock
227 (<filename class="headerfile">include/asm/spinlock.h</filename>), 227 (<filename class="headerfile">include/asm/spinlock.h</filename>),
228 which is a very simple single-holder lock: if you can't get the 228 which is a very simple single-holder lock: if you can't get the
@@ -240,14 +240,6 @@
240 use a spinlock instead. 240 use a spinlock instead.
241 </para> 241 </para>
242 <para> 242 <para>
243 The third type is a semaphore
244 (<filename class="headerfile">include/linux/semaphore.h</filename>): it
245 can have more than one holder at any time (the number decided at
246 initialization time), although it is most commonly used as a
247 single-holder lock (a mutex). If you can't get a semaphore, your
248 task will be suspended and later on woken up - just like for mutexes.
249 </para>
250 <para>
251 Neither type of lock is recursive: see 243 Neither type of lock is recursive: see
252 <xref linkend="deadlock"/>. 244 <xref linkend="deadlock"/>.
253 </para> 245 </para>
@@ -278,7 +270,7 @@
278 </para> 270 </para>
279 271
280 <para> 272 <para>
281 Semaphores still exist, because they are required for 273 Mutexes still exist, because they are required for
282 synchronization between <firstterm linkend="gloss-usercontext">user 274 synchronization between <firstterm linkend="gloss-usercontext">user
283 contexts</firstterm>, as we will see below. 275 contexts</firstterm>, as we will see below.
284 </para> 276 </para>
@@ -289,18 +281,17 @@
289 281
290 <para> 282 <para>
291 If you have a data structure which is only ever accessed from 283 If you have a data structure which is only ever accessed from
292 user context, then you can use a simple semaphore 284 user context, then you can use a simple mutex
293 (<filename>linux/linux/semaphore.h</filename>) to protect it. This 285 (<filename>include/linux/mutex.h</filename>) to protect it. This
294 is the most trivial case: you initialize the semaphore to the number 286 is the most trivial case: you initialize the mutex. Then you can
295 of resources available (usually 1), and call 287 call <function>mutex_lock_interruptible()</function> to grab the mutex,
296 <function>down_interruptible()</function> to grab the semaphore, and 288 and <function>mutex_unlock()</function> to release it. There is also a
297 <function>up()</function> to release it. There is also a 289 <function>mutex_lock()</function>, which should be avoided, because it
298 <function>down()</function>, which should be avoided, because it
299 will not return if a signal is received. 290 will not return if a signal is received.
300 </para> 291 </para>
301 292
302 <para> 293 <para>
303 Example: <filename>linux/net/core/netfilter.c</filename> allows 294 Example: <filename>net/netfilter/nf_sockopt.c</filename> allows
304 registration of new <function>setsockopt()</function> and 295 registration of new <function>setsockopt()</function> and
305 <function>getsockopt()</function> calls, with 296 <function>getsockopt()</function> calls, with
306 <function>nf_register_sockopt()</function>. Registration and 297 <function>nf_register_sockopt()</function>. Registration and
@@ -515,7 +506,7 @@
515 <listitem> 506 <listitem>
516 <para> 507 <para>
517 If you are in a process context (any syscall) and want to 508 If you are in a process context (any syscall) and want to
518 lock other process out, use a semaphore. You can take a semaphore 509 lock other process out, use a mutex. You can take a mutex
519 and sleep (<function>copy_from_user*(</function> or 510 and sleep (<function>copy_from_user*(</function> or
520 <function>kmalloc(x,GFP_KERNEL)</function>). 511 <function>kmalloc(x,GFP_KERNEL)</function>).
521 </para> 512 </para>
@@ -662,7 +653,7 @@
662<entry>SLBH</entry> 653<entry>SLBH</entry>
663<entry>SLBH</entry> 654<entry>SLBH</entry>
664<entry>SLBH</entry> 655<entry>SLBH</entry>
665<entry>DI</entry> 656<entry>MLI</entry>
666<entry>None</entry> 657<entry>None</entry>
667</row> 658</row>
668 659
@@ -692,8 +683,8 @@
692<entry>spin_lock_bh</entry> 683<entry>spin_lock_bh</entry>
693</row> 684</row>
694<row> 685<row>
695<entry>DI</entry> 686<entry>MLI</entry>
696<entry>down_interruptible</entry> 687<entry>mutex_lock_interruptible</entry>
697</row> 688</row>
698 689
699</tbody> 690</tbody>
@@ -1310,7 +1301,7 @@ as Alan Cox says, <quote>Lock data, not code</quote>.
1310 <para> 1301 <para>
1311 There is a coding bug where a piece of code tries to grab a 1302 There is a coding bug where a piece of code tries to grab a
1312 spinlock twice: it will spin forever, waiting for the lock to 1303 spinlock twice: it will spin forever, waiting for the lock to
1313 be released (spinlocks, rwlocks and semaphores are not 1304 be released (spinlocks, rwlocks and mutexes are not
1314 recursive in Linux). This is trivial to diagnose: not a 1305 recursive in Linux). This is trivial to diagnose: not a
1315 stay-up-five-nights-talk-to-fluffy-code-bunnies kind of 1306 stay-up-five-nights-talk-to-fluffy-code-bunnies kind of
1316 problem. 1307 problem.
@@ -1335,7 +1326,7 @@ as Alan Cox says, <quote>Lock data, not code</quote>.
1335 1326
1336 <para> 1327 <para>
1337 This complete lockup is easy to diagnose: on SMP boxes the 1328 This complete lockup is easy to diagnose: on SMP boxes the
1338 watchdog timer or compiling with <symbol>DEBUG_SPINLOCKS</symbol> set 1329 watchdog timer or compiling with <symbol>DEBUG_SPINLOCK</symbol> set
1339 (<filename>include/linux/spinlock.h</filename>) will show this up 1330 (<filename>include/linux/spinlock.h</filename>) will show this up
1340 immediately when it happens. 1331 immediately when it happens.
1341 </para> 1332 </para>
@@ -1558,7 +1549,7 @@ the amount of locking which needs to be done.
1558 <title>Read/Write Lock Variants</title> 1549 <title>Read/Write Lock Variants</title>
1559 1550
1560 <para> 1551 <para>
1561 Both spinlocks and semaphores have read/write variants: 1552 Both spinlocks and mutexes have read/write variants:
1562 <type>rwlock_t</type> and <structname>struct rw_semaphore</structname>. 1553 <type>rwlock_t</type> and <structname>struct rw_semaphore</structname>.
1563 These divide users into two classes: the readers and the writers. If 1554 These divide users into two classes: the readers and the writers. If
1564 you are only reading the data, you can get a read lock, but to write to 1555 you are only reading the data, you can get a read lock, but to write to
@@ -1681,7 +1672,7 @@ the amount of locking which needs to be done.
1681 #include &lt;linux/slab.h&gt; 1672 #include &lt;linux/slab.h&gt;
1682 #include &lt;linux/string.h&gt; 1673 #include &lt;linux/string.h&gt;
1683+#include &lt;linux/rcupdate.h&gt; 1674+#include &lt;linux/rcupdate.h&gt;
1684 #include &lt;linux/semaphore.h&gt; 1675 #include &lt;linux/mutex.h&gt;
1685 #include &lt;asm/errno.h&gt; 1676 #include &lt;asm/errno.h&gt;
1686 1677
1687 struct object 1678 struct object
@@ -1913,7 +1904,7 @@ machines due to caching.
1913 </listitem> 1904 </listitem>
1914 <listitem> 1905 <listitem>
1915 <para> 1906 <para>
1916 <function> put_user()</function> 1907 <function>put_user()</function>
1917 </para> 1908 </para>
1918 </listitem> 1909 </listitem>
1919 </itemizedlist> 1910 </itemizedlist>
@@ -1927,13 +1918,13 @@ machines due to caching.
1927 1918
1928 <listitem> 1919 <listitem>
1929 <para> 1920 <para>
1930 <function>down_interruptible()</function> and 1921 <function>mutex_lock_interruptible()</function> and
1931 <function>down()</function> 1922 <function>mutex_lock()</function>
1932 </para> 1923 </para>
1933 <para> 1924 <para>
1934 There is a <function>down_trylock()</function> which can be 1925 There is a <function>mutex_trylock()</function> which can be
1935 used inside interrupt context, as it will not sleep. 1926 used inside interrupt context, as it will not sleep.
1936 <function>up()</function> will also never sleep. 1927 <function>mutex_unlock()</function> will also never sleep.
1937 </para> 1928 </para>
1938 </listitem> 1929 </listitem>
1939 </itemizedlist> 1930 </itemizedlist>
@@ -2023,7 +2014,7 @@ machines due to caching.
2023 <para> 2014 <para>
2024 Prior to 2.5, or when <symbol>CONFIG_PREEMPT</symbol> is 2015 Prior to 2.5, or when <symbol>CONFIG_PREEMPT</symbol> is
2025 unset, processes in user context inside the kernel would not 2016 unset, processes in user context inside the kernel would not
2026 preempt each other (ie. you had that CPU until you have it up, 2017 preempt each other (ie. you had that CPU until you gave it up,
2027 except for interrupts). With the addition of 2018 except for interrupts). With the addition of
2028 <symbol>CONFIG_PREEMPT</symbol> in 2.5.4, this changed: when 2019 <symbol>CONFIG_PREEMPT</symbol> in 2.5.4, this changed: when
2029 in user context, higher priority tasks can "cut in": spinlocks 2020 in user context, higher priority tasks can "cut in": spinlocks
diff --git a/Documentation/DocBook/procfs-guide.tmpl b/Documentation/DocBook/procfs-guide.tmpl
index 1fd6a1ec7591..8a5dc6e021ff 100644
--- a/Documentation/DocBook/procfs-guide.tmpl
+++ b/Documentation/DocBook/procfs-guide.tmpl
@@ -29,12 +29,12 @@
29 29
30 <revhistory> 30 <revhistory>
31 <revision> 31 <revision>
32 <revnumber>1.0&nbsp;</revnumber> 32 <revnumber>1.0</revnumber>
33 <date>May 30, 2001</date> 33 <date>May 30, 2001</date>
34 <revremark>Initial revision posted to linux-kernel</revremark> 34 <revremark>Initial revision posted to linux-kernel</revremark>
35 </revision> 35 </revision>
36 <revision> 36 <revision>
37 <revnumber>1.1&nbsp;</revnumber> 37 <revnumber>1.1</revnumber>
38 <date>June 3, 2001</date> 38 <date>June 3, 2001</date>
39 <revremark>Revised after comments from linux-kernel</revremark> 39 <revremark>Revised after comments from linux-kernel</revremark>
40 </revision> 40 </revision>
diff --git a/Documentation/accounting/delay-accounting.txt b/Documentation/accounting/delay-accounting.txt
index 1443cd71d263..8a12f0730c94 100644
--- a/Documentation/accounting/delay-accounting.txt
+++ b/Documentation/accounting/delay-accounting.txt
@@ -11,6 +11,7 @@ the delays experienced by a task while
11a) waiting for a CPU (while being runnable) 11a) waiting for a CPU (while being runnable)
12b) completion of synchronous block I/O initiated by the task 12b) completion of synchronous block I/O initiated by the task
13c) swapping in pages 13c) swapping in pages
14d) memory reclaim
14 15
15and makes these statistics available to userspace through 16and makes these statistics available to userspace through
16the taskstats interface. 17the taskstats interface.
@@ -41,7 +42,7 @@ this structure. See
41 include/linux/taskstats.h 42 include/linux/taskstats.h
42for a description of the fields pertaining to delay accounting. 43for a description of the fields pertaining to delay accounting.
43It will generally be in the form of counters returning the cumulative 44It will generally be in the form of counters returning the cumulative
44delay seen for cpu, sync block I/O, swapin etc. 45delay seen for cpu, sync block I/O, swapin, memory reclaim etc.
45 46
46Taking the difference of two successive readings of a given 47Taking the difference of two successive readings of a given
47counter (say cpu_delay_total) for a task will give the delay 48counter (say cpu_delay_total) for a task will give the delay
@@ -94,7 +95,9 @@ CPU count real total virtual total delay total
94 7876 92005750 100000000 24001500 95 7876 92005750 100000000 24001500
95IO count delay total 96IO count delay total
96 0 0 97 0 0
97MEM count delay total 98SWAP count delay total
99 0 0
100RECLAIM count delay total
98 0 0 101 0 0
99 102
100Get delays seen in executing a given simple command 103Get delays seen in executing a given simple command
@@ -108,5 +111,7 @@ CPU count real total virtual total delay total
108 6 4000250 4000000 0 111 6 4000250 4000000 0
109IO count delay total 112IO count delay total
110 0 0 113 0 0
111MEM count delay total 114SWAP count delay total
115 0 0
116RECLAIM count delay total
112 0 0 117 0 0
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c
index 40121b5cca14..3f7755f3963f 100644
--- a/Documentation/accounting/getdelays.c
+++ b/Documentation/accounting/getdelays.c
@@ -196,14 +196,18 @@ void print_delayacct(struct taskstats *t)
196 " %15llu%15llu%15llu%15llu\n" 196 " %15llu%15llu%15llu%15llu\n"
197 "IO %15s%15s\n" 197 "IO %15s%15s\n"
198 " %15llu%15llu\n" 198 " %15llu%15llu\n"
199 "MEM %15s%15s\n" 199 "SWAP %15s%15s\n"
200 " %15llu%15llu\n"
201 "RECLAIM %12s%15s\n"
200 " %15llu%15llu\n", 202 " %15llu%15llu\n",
201 "count", "real total", "virtual total", "delay total", 203 "count", "real total", "virtual total", "delay total",
202 t->cpu_count, t->cpu_run_real_total, t->cpu_run_virtual_total, 204 t->cpu_count, t->cpu_run_real_total, t->cpu_run_virtual_total,
203 t->cpu_delay_total, 205 t->cpu_delay_total,
204 "count", "delay total", 206 "count", "delay total",
205 t->blkio_count, t->blkio_delay_total, 207 t->blkio_count, t->blkio_delay_total,
206 "count", "delay total", t->swapin_count, t->swapin_delay_total); 208 "count", "delay total", t->swapin_count, t->swapin_delay_total,
209 "count", "delay total",
210 t->freepages_count, t->freepages_delay_total);
207} 211}
208 212
209void task_context_switch_counts(struct taskstats *t) 213void task_context_switch_counts(struct taskstats *t)
diff --git a/Documentation/accounting/taskstats-struct.txt b/Documentation/accounting/taskstats-struct.txt
index cd784f46bf8a..b988d110db59 100644
--- a/Documentation/accounting/taskstats-struct.txt
+++ b/Documentation/accounting/taskstats-struct.txt
@@ -26,6 +26,8 @@ There are three different groups of fields in the struct taskstats:
26 26
275) Time accounting for SMT machines 275) Time accounting for SMT machines
28 28
296) Extended delay accounting fields for memory reclaim
30
29Future extension should add fields to the end of the taskstats struct, and 31Future extension should add fields to the end of the taskstats struct, and
30should not change the relative position of each field within the struct. 32should not change the relative position of each field within the struct.
31 33
@@ -170,4 +172,9 @@ struct taskstats {
170 __u64 ac_utimescaled; /* utime scaled on frequency etc */ 172 __u64 ac_utimescaled; /* utime scaled on frequency etc */
171 __u64 ac_stimescaled; /* stime scaled on frequency etc */ 173 __u64 ac_stimescaled; /* stime scaled on frequency etc */
172 __u64 cpu_scaled_run_real_total; /* scaled cpu_run_real_total */ 174 __u64 cpu_scaled_run_real_total; /* scaled cpu_run_real_total */
175
1766) Extended delay accounting fields for memory reclaim
177 /* Delay waiting for memory reclaim */
178 __u64 freepages_count;
179 __u64 freepages_delay_total;
173} 180}
diff --git a/Documentation/bt8xxgpio.txt b/Documentation/bt8xxgpio.txt
new file mode 100644
index 000000000000..d8297e4ebd26
--- /dev/null
+++ b/Documentation/bt8xxgpio.txt
@@ -0,0 +1,67 @@
1===============================================================
2== BT8XXGPIO driver ==
3== ==
4== A driver for a selfmade cheap BT8xx based PCI GPIO-card ==
5== ==
6== For advanced documentation, see ==
7== http://www.bu3sch.de/btgpio.php ==
8===============================================================
9
10
11A generic digital 24-port PCI GPIO card can be built out of an ordinary
12Brooktree bt848, bt849, bt878 or bt879 based analog TV tuner card. The
13Brooktree chip is used in old analog Hauppauge WinTV PCI cards. You can easily
14find them used for low prices on the net.
15
16The bt8xx chip does have 24 digital GPIO ports.
17These ports are accessible via 24 pins on the SMD chip package.
18
19
20==============================================
21== How to physically access the GPIO pins ==
22==============================================
23
24The are several ways to access these pins. One might unsolder the whole chip
25and put it on a custom PCI board, or one might only unsolder each individual
26GPIO pin and solder that to some tiny wire. As the chip package really is tiny
27there are some advanced soldering skills needed in any case.
28
29The physical pinouts are drawn in the following ASCII art.
30The GPIO pins are marked with G00-G23
31
32 G G G G G G G G G G G G G G G G G G
33 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1
34 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7
35 | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
36 ---------------------------------------------------------------------------
37 --| ^ ^ |--
38 --| pin 86 pin 67 |--
39 --| |--
40 --| pin 61 > |-- G18
41 --| |-- G19
42 --| |-- G20
43 --| |-- G21
44 --| |-- G22
45 --| pin 56 > |-- G23
46 --| |--
47 --| Brooktree 878/879 |--
48 --| |--
49 --| |--
50 --| |--
51 --| |--
52 --| |--
53 --| |--
54 --| |--
55 --| |--
56 --| |--
57 --| |--
58 --| |--
59 --| |--
60 --| |--
61 --| O |--
62 --| |--
63 ---------------------------------------------------------------------------
64 | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
65 ^
66 This is pin 1
67
diff --git a/Documentation/controllers/memory.txt b/Documentation/controllers/memory.txt
index 866b9cd9a959..9b53d5827361 100644
--- a/Documentation/controllers/memory.txt
+++ b/Documentation/controllers/memory.txt
@@ -242,8 +242,7 @@ rmdir() if there are no tasks.
2421. Add support for accounting huge pages (as a separate controller) 2421. Add support for accounting huge pages (as a separate controller)
2432. Make per-cgroup scanner reclaim not-shared pages first 2432. Make per-cgroup scanner reclaim not-shared pages first
2443. Teach controller to account for shared-pages 2443. Teach controller to account for shared-pages
2454. Start reclamation when the limit is lowered 2454. Start reclamation in the background when the limit is
2465. Start reclamation in the background when the limit is
247 not yet hit but the usage is getting closer 246 not yet hit but the usage is getting closer
248 247
249Summary 248Summary
diff --git a/Documentation/edac.txt b/Documentation/edac.txt
index a5c36842ecef..ced527388001 100644
--- a/Documentation/edac.txt
+++ b/Documentation/edac.txt
@@ -222,74 +222,9 @@ both csrow2 and csrow3 are populated, this indicates a dual ranked
222set of DIMMs for channels 0 and 1. 222set of DIMMs for channels 0 and 1.
223 223
224 224
225Within each of the 'mc','mcX' and 'csrowX' directories are several 225Within each of the 'mcX' and 'csrowX' directories are several
226EDAC control and attribute files. 226EDAC control and attribute files.
227 227
228
229============================================================================
230DIRECTORY 'mc'
231
232In directory 'mc' are EDAC system overall control and attribute files:
233
234
235Panic on UE control file:
236
237 'edac_mc_panic_on_ue'
238
239 An uncorrectable error will cause a machine panic. This is usually
240 desirable. It is a bad idea to continue when an uncorrectable error
241 occurs - it is indeterminate what was uncorrected and the operating
242 system context might be so mangled that continuing will lead to further
243 corruption. If the kernel has MCE configured, then EDAC will never
244 notice the UE.
245
246 LOAD TIME: module/kernel parameter: panic_on_ue=[0|1]
247
248 RUN TIME: echo "1" >/sys/devices/system/edac/mc/edac_mc_panic_on_ue
249
250
251Log UE control file:
252
253 'edac_mc_log_ue'
254
255 Generate kernel messages describing uncorrectable errors. These errors
256 are reported through the system message log system. UE statistics
257 will be accumulated even when UE logging is disabled.
258
259 LOAD TIME: module/kernel parameter: log_ue=[0|1]
260
261 RUN TIME: echo "1" >/sys/devices/system/edac/mc/edac_mc_log_ue
262
263
264Log CE control file:
265
266 'edac_mc_log_ce'
267
268 Generate kernel messages describing correctable errors. These
269 errors are reported through the system message log system.
270 CE statistics will be accumulated even when CE logging is disabled.
271
272 LOAD TIME: module/kernel parameter: log_ce=[0|1]
273
274 RUN TIME: echo "1" >/sys/devices/system/edac/mc/edac_mc_log_ce
275
276
277Polling period control file:
278
279 'edac_mc_poll_msec'
280
281 The time period, in milliseconds, for polling for error information.
282 Too small a value wastes resources. Too large a value might delay
283 necessary handling of errors and might loose valuable information for
284 locating the error. 1000 milliseconds (once each second) is the current
285 default. Systems which require all the bandwidth they can get, may
286 increase this.
287
288 LOAD TIME: module/kernel parameter: poll_msec=[0|1]
289
290 RUN TIME: echo "1000" >/sys/devices/system/edac/mc/edac_mc_poll_msec
291
292
293============================================================================ 228============================================================================
294'mcX' DIRECTORIES 229'mcX' DIRECTORIES
295 230
@@ -537,7 +472,6 @@ Channel 1 DIMM Label control file:
537 motherboard specific and determination of this information 472 motherboard specific and determination of this information
538 must occur in userland at this time. 473 must occur in userland at this time.
539 474
540
541============================================================================ 475============================================================================
542SYSTEM LOGGING 476SYSTEM LOGGING
543 477
@@ -570,7 +504,6 @@ error type, a notice of "no info" and then an optional,
570driver-specific error message. 504driver-specific error message.
571 505
572 506
573
574============================================================================ 507============================================================================
575PCI Bus Parity Detection 508PCI Bus Parity Detection
576 509
@@ -604,6 +537,74 @@ Enable/Disable PCI Parity checking control file:
604 echo "0" >/sys/devices/system/edac/pci/check_pci_parity 537 echo "0" >/sys/devices/system/edac/pci/check_pci_parity
605 538
606 539
540Parity Count:
541
542 'pci_parity_count'
543
544 This attribute file will display the number of parity errors that
545 have been detected.
546
547
548============================================================================
549MODULE PARAMETERS
550
551Panic on UE control file:
552
553 'edac_mc_panic_on_ue'
554
555 An uncorrectable error will cause a machine panic. This is usually
556 desirable. It is a bad idea to continue when an uncorrectable error
557 occurs - it is indeterminate what was uncorrected and the operating
558 system context might be so mangled that continuing will lead to further
559 corruption. If the kernel has MCE configured, then EDAC will never
560 notice the UE.
561
562 LOAD TIME: module/kernel parameter: edac_mc_panic_on_ue=[0|1]
563
564 RUN TIME: echo "1" > /sys/module/edac_core/parameters/edac_mc_panic_on_ue
565
566
567Log UE control file:
568
569 'edac_mc_log_ue'
570
571 Generate kernel messages describing uncorrectable errors. These errors
572 are reported through the system message log system. UE statistics
573 will be accumulated even when UE logging is disabled.
574
575 LOAD TIME: module/kernel parameter: edac_mc_log_ue=[0|1]
576
577 RUN TIME: echo "1" > /sys/module/edac_core/parameters/edac_mc_log_ue
578
579
580Log CE control file:
581
582 'edac_mc_log_ce'
583
584 Generate kernel messages describing correctable errors. These
585 errors are reported through the system message log system.
586 CE statistics will be accumulated even when CE logging is disabled.
587
588 LOAD TIME: module/kernel parameter: edac_mc_log_ce=[0|1]
589
590 RUN TIME: echo "1" > /sys/module/edac_core/parameters/edac_mc_log_ce
591
592
593Polling period control file:
594
595 'edac_mc_poll_msec'
596
597 The time period, in milliseconds, for polling for error information.
598 Too small a value wastes resources. Too large a value might delay
599 necessary handling of errors and might loose valuable information for
600 locating the error. 1000 milliseconds (once each second) is the current
601 default. Systems which require all the bandwidth they can get, may
602 increase this.
603
604 LOAD TIME: module/kernel parameter: edac_mc_poll_msec=[0|1]
605
606 RUN TIME: echo "1000" > /sys/module/edac_core/parameters/edac_mc_poll_msec
607
607 608
608Panic on PCI PARITY Error: 609Panic on PCI PARITY Error:
609 610
@@ -614,21 +615,13 @@ Panic on PCI PARITY Error:
614 error has been detected. 615 error has been detected.
615 616
616 617
617 module/kernel parameter: panic_on_pci_parity=[0|1] 618 module/kernel parameter: edac_panic_on_pci_pe=[0|1]
618 619
619 Enable: 620 Enable:
620 echo "1" >/sys/devices/system/edac/pci/panic_on_pci_parity 621 echo "1" > /sys/module/edac_core/parameters/edac_panic_on_pci_pe
621 622
622 Disable: 623 Disable:
623 echo "0" >/sys/devices/system/edac/pci/panic_on_pci_parity 624 echo "0" > /sys/module/edac_core/parameters/edac_panic_on_pci_pe
624
625
626Parity Count:
627
628 'pci_parity_count'
629
630 This attribute file will display the number of parity errors that
631 have been detected.
632 625
633 626
634 627
diff --git a/Documentation/fb/sh7760fb.txt b/Documentation/fb/sh7760fb.txt
new file mode 100644
index 000000000000..c87bfe5c630a
--- /dev/null
+++ b/Documentation/fb/sh7760fb.txt
@@ -0,0 +1,131 @@
1SH7760/SH7763 integrated LCDC Framebuffer driver
2================================================
3
40. Overwiew
5-----------
6The SH7760/SH7763 have an integrated LCD Display controller (LCDC) which
7supports (in theory) resolutions ranging from 1x1 to 1024x1024,
8with color depths ranging from 1 to 16 bits, on STN, DSTN and TFT Panels.
9
10Caveats:
11* Framebuffer memory must be a large chunk allocated at the top
12 of Area3 (HW requirement). Because of this requirement you should NOT
13 make the driver a module since at runtime it may become impossible to
14 get a large enough contiguous chunk of memory.
15
16* The driver does not support changing resolution while loaded
17 (displays aren't hotpluggable anyway)
18
19* Heavy flickering may be observed
20 a) if you're using 15/16bit color modes at >= 640x480 px resolutions,
21 b) during PCMCIA (or any other slow bus) activity.
22
23* Rotation works only 90degress clockwise, and only if horizontal
24 resolution is <= 320 pixels.
25
26files: drivers/video/sh7760fb.c
27 include/asm-sh/sh7760fb.h
28 Documentation/fb/sh7760fb.txt
29
301. Platform setup
31-----------------
32SH7760:
33 Video data is fetched via the DMABRG DMA engine, so you have to
34 configure the SH DMAC for DMABRG mode (write 0x94808080 to the
35 DMARSRA register somewhere at boot).
36
37 PFC registers PCCR and PCDR must be set to peripheral mode.
38 (write zeros to both).
39
40The driver does NOT do the above for you since board setup is, well, job
41of the board setup code.
42
432. Panel definitions
44--------------------
45The LCDC must explicitly be told about the type of LCD panel
46attached. Data must be wrapped in a "struct sh7760fb_platdata" and
47passed to the driver as platform_data.
48
49Suggest you take a closer look at the SH7760 Manual, Section 30.
50(http://documentation.renesas.com/eng/products/mpumcu/e602291_sh7760.pdf)
51
52The following code illustrates what needs to be done to
53get the framebuffer working on a 640x480 TFT:
54
55====================== cut here ======================================
56
57#include <linux/fb.h>
58#include <asm/sh7760fb.h>
59
60/*
61 * NEC NL6440bc26-01 640x480 TFT
62 * dotclock 25175 kHz
63 * Xres 640 Yres 480
64 * Htotal 800 Vtotal 525
65 * HsynStart 656 VsynStart 490
66 * HsynLenn 30 VsynLenn 2
67 *
68 * The linux framebuffer layer does not use the syncstart/synclen
69 * values but right/left/upper/lower margin values. The comments
70 * for the x_margin explain how to calculate those from given
71 * panel sync timings.
72 */
73static struct fb_videomode nl6448bc26 = {
74 .name = "NL6448BC26",
75 .refresh = 60,
76 .xres = 640,
77 .yres = 480,
78 .pixclock = 39683, /* in picoseconds! */
79 .hsync_len = 30,
80 .vsync_len = 2,
81 .left_margin = 114, /* HTOT - (HSYNSLEN + HSYNSTART) */
82 .right_margin = 16, /* HSYNSTART - XRES */
83 .upper_margin = 33, /* VTOT - (VSYNLEN + VSYNSTART) */
84 .lower_margin = 10, /* VSYNSTART - YRES */
85 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
86 .vmode = FB_VMODE_NONINTERLACED,
87 .flag = 0,
88};
89
90static struct sh7760fb_platdata sh7760fb_nl6448 = {
91 .def_mode = &nl6448bc26,
92 .ldmtr = LDMTR_TFT_COLOR_16, /* 16bit TFT panel */
93 .lddfr = LDDFR_8BPP, /* we want 8bit output */
94 .ldpmmr = 0x0070,
95 .ldpspr = 0x0500,
96 .ldaclnr = 0,
97 .ldickr = LDICKR_CLKSRC(LCDC_CLKSRC_EXTERNAL) |
98 LDICKR_CLKDIV(1),
99 .rotate = 0,
100 .novsync = 1,
101 .blank = NULL,
102};
103
104/* SH7760:
105 * 0xFE300800: 256 * 4byte xRGB palette ram
106 * 0xFE300C00: 42 bytes ctrl registers
107 */
108static struct resource sh7760_lcdc_res[] = {
109 [0] = {
110 .start = 0xFE300800,
111 .end = 0xFE300CFF,
112 .flags = IORESOURCE_MEM,
113 },
114 [1] = {
115 .start = 65,
116 .end = 65,
117 .flags = IORESOURCE_IRQ,
118 },
119};
120
121static struct platform_device sh7760_lcdc_dev = {
122 .dev = {
123 .platform_data = &sh7760fb_nl6448,
124 },
125 .name = "sh7760-lcdc",
126 .id = -1,
127 .resource = sh7760_lcdc_res,
128 .num_resources = ARRAY_SIZE(sh7760_lcdc_res),
129};
130
131====================== cut here ======================================
diff --git a/Documentation/fb/tridentfb.txt b/Documentation/fb/tridentfb.txt
index 8a6c8a43e6a3..45d9de5b13a3 100644
--- a/Documentation/fb/tridentfb.txt
+++ b/Documentation/fb/tridentfb.txt
@@ -3,11 +3,25 @@ Tridentfb is a framebuffer driver for some Trident chip based cards.
3The following list of chips is thought to be supported although not all are 3The following list of chips is thought to be supported although not all are
4tested: 4tested:
5 5
6those from the Image series with Cyber in their names - accelerated 6those from the TGUI series 9440/96XX and with Cyber in their names
7those with Blade in their names (Blade3D,CyberBlade...) - accelerated 7those from the Image series and with Cyber in their names
8the newer CyberBladeXP family - nonaccelerated 8those with Blade in their names (Blade3D,CyberBlade...)
9 9the newer CyberBladeXP family
10Only PCI/AGP based cards are supported, none of the older Tridents. 10
11All families are accelerated. Only PCI/AGP based cards are supported,
12none of the older Tridents.
13The driver supports 8, 16 and 32 bits per pixel depths.
14The TGUI family requires a line length to be power of 2 if acceleration
15is enabled. This means that range of possible resolutions and bpp is
16limited comparing to the range if acceleration is disabled (see list
17of parameters below).
18
19Known bugs:
201. The driver randomly locks up on 3DImage975 chip with acceleration
21 enabled. The same happens in X11 (Xorg).
222. The ramdac speeds require some more fine tuning. It is possible to
23 switch resolution which the chip does not support at some depths for
24 older chips.
11 25
12How to use it? 26How to use it?
13============== 27==============
@@ -17,12 +31,11 @@ video=tridentfb
17 31
18The parameters for tridentfb are concatenated with a ':' as in this example. 32The parameters for tridentfb are concatenated with a ':' as in this example.
19 33
20video=tridentfb:800x600,bpp=16,noaccel 34video=tridentfb:800x600-16@75,noaccel
21 35
22The second level parameters that tridentfb understands are: 36The second level parameters that tridentfb understands are:
23 37
24noaccel - turns off acceleration (when it doesn't work for your card) 38noaccel - turns off acceleration (when it doesn't work for your card)
25accel - force text acceleration (for boards which by default are noacceled)
26 39
27fp - use flat panel related stuff 40fp - use flat panel related stuff
28crt - assume monitor is present instead of fp 41crt - assume monitor is present instead of fp
@@ -31,21 +44,24 @@ center - for flat panels and resolutions smaller than native size center the
31 image, otherwise use 44 image, otherwise use
32stretch 45stretch
33 46
34memsize - integer value in Kb, use if your card's memory size is misdetected. 47memsize - integer value in KB, use if your card's memory size is misdetected.
35 look at the driver output to see what it says when initializing. 48 look at the driver output to see what it says when initializing.
36memdiff - integer value in Kb,should be nonzero if your card reports 49
37 more memory than it actually has.For instance mine is 192K less than 50memdiff - integer value in KB, should be nonzero if your card reports
51 more memory than it actually has. For instance mine is 192K less than
38 detection says in all three BIOS selectable situations 2M, 4M, 8M. 52 detection says in all three BIOS selectable situations 2M, 4M, 8M.
39 Only use if your video memory is taken from main memory hence of 53 Only use if your video memory is taken from main memory hence of
40 configurable size.Otherwise use memsize. 54 configurable size. Otherwise use memsize.
41 If in some modes which barely fit the memory you see garbage at the bottom 55 If in some modes which barely fit the memory you see garbage
42 this might help by not letting change to that mode anymore. 56 at the bottom this might help by not letting change to that mode
57 anymore.
43 58
44nativex - the width in pixels of the flat panel.If you know it (usually 1024 59nativex - the width in pixels of the flat panel.If you know it (usually 1024
45 800 or 1280) and it is not what the driver seems to detect use it. 60 800 or 1280) and it is not what the driver seems to detect use it.
46 61
47bpp - bits per pixel (8,16 or 32) 62bpp - bits per pixel (8,16 or 32)
48mode - a mode name like 800x600 (as described in Documentation/fb/modedb.txt) 63mode - a mode name like 800x600-8@75 as described in
64 Documentation/fb/modedb.txt
49 65
50Using insane values for the above parameters will probably result in driver 66Using insane values for the above parameters will probably result in driver
51misbehaviour so take care(for instance memsize=12345678 or memdiff=23784 or 67misbehaviour so take care(for instance memsize=12345678 or memdiff=23784 or
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 9f73587219e8..721c71b86e06 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -138,24 +138,6 @@ Who: Kay Sievers <kay.sievers@suse.de>
138 138
139--------------------------- 139---------------------------
140 140
141What: find_task_by_pid
142When: 2.6.26
143Why: With pid namespaces, calling this funciton will return the
144 wrong task when called from inside a namespace.
145
146 The best way to save a task pid and find a task by this
147 pid later, is to find this task's struct pid pointer (or get
148 it directly from the task) and call pid_task() later.
149
150 If someone really needs to get a task by its pid_t, then
151 he most likely needs the find_task_by_vpid() to get the
152 task from the same namespace as the current task is in, but
153 this may be not so in general.
154
155Who: Pavel Emelyanov <xemul@openvz.org>
156
157---------------------------
158
159What: ACPI procfs interface 141What: ACPI procfs interface
160When: July 2008 142When: July 2008
161Why: ACPI sysfs conversion should be finished by January 2008. 143Why: ACPI sysfs conversion should be finished by January 2008.
@@ -300,14 +282,6 @@ Who: ocfs2-devel@oss.oracle.com
300 282
301--------------------------- 283---------------------------
302 284
303What: asm/semaphore.h
304When: 2.6.26
305Why: Implementation became generic; users should now include
306 linux/semaphore.h instead.
307Who: Matthew Wilcox <willy@linux.intel.com>
308
309---------------------------
310
311What: SCTP_GET_PEER_ADDRS_NUM_OLD, SCTP_GET_PEER_ADDRS_OLD, 285What: SCTP_GET_PEER_ADDRS_NUM_OLD, SCTP_GET_PEER_ADDRS_OLD,
312 SCTP_GET_LOCAL_ADDRS_NUM_OLD, SCTP_GET_LOCAL_ADDRS_OLD 286 SCTP_GET_LOCAL_ADDRS_NUM_OLD, SCTP_GET_LOCAL_ADDRS_OLD
313When: June 2009 287When: June 2009
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 8b22d7d8b991..680fb566b928 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -510,6 +510,7 @@ prototypes:
510 void (*close)(struct vm_area_struct*); 510 void (*close)(struct vm_area_struct*);
511 int (*fault)(struct vm_area_struct*, struct vm_fault *); 511 int (*fault)(struct vm_area_struct*, struct vm_fault *);
512 int (*page_mkwrite)(struct vm_area_struct *, struct page *); 512 int (*page_mkwrite)(struct vm_area_struct *, struct page *);
513 int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
513 514
514locking rules: 515locking rules:
515 BKL mmap_sem PageLocked(page) 516 BKL mmap_sem PageLocked(page)
@@ -517,6 +518,7 @@ open: no yes
517close: no yes 518close: no yes
518fault: no yes 519fault: no yes
519page_mkwrite: no yes no 520page_mkwrite: no yes no
521access: no yes
520 522
521 ->page_mkwrite() is called when a previously read-only page is 523 ->page_mkwrite() is called when a previously read-only page is
522about to become writeable. The file system is responsible for 524about to become writeable. The file system is responsible for
@@ -525,6 +527,11 @@ taking to lock out truncate, the page range should be verified to be
525within i_size. The page mapping should also be checked that it is not 527within i_size. The page mapping should also be checked that it is not
526NULL. 528NULL.
527 529
530 ->access() is called when get_user_pages() fails in
531acces_process_vm(), typically used to debug a process through
532/proc/pid/mem or ptrace. This function is needed only for
533VM_IO | VM_PFNMAP VMAs.
534
528================================================================================ 535================================================================================
529 Dubious stuff 536 Dubious stuff
530 537
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 7f268f327d75..8c6384bdfed4 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -296,6 +296,7 @@ Table 1-4: Kernel info in /proc
296 uptime System uptime 296 uptime System uptime
297 version Kernel version 297 version Kernel version
298 video bttv info of video resources (2.4) 298 video bttv info of video resources (2.4)
299 vmallocinfo Show vmalloced areas
299.............................................................................. 300..............................................................................
300 301
301You can, for example, check which interrupts are currently in use and what 302You can, for example, check which interrupts are currently in use and what
@@ -557,6 +558,49 @@ VmallocTotal: total size of vmalloc memory area
557 VmallocUsed: amount of vmalloc area which is used 558 VmallocUsed: amount of vmalloc area which is used
558VmallocChunk: largest contigious block of vmalloc area which is free 559VmallocChunk: largest contigious block of vmalloc area which is free
559 560
561..............................................................................
562
563vmallocinfo:
564
565Provides information about vmalloced/vmaped areas. One line per area,
566containing the virtual address range of the area, size in bytes,
567caller information of the creator, and optional information depending
568on the kind of area :
569
570 pages=nr number of pages
571 phys=addr if a physical address was specified
572 ioremap I/O mapping (ioremap() and friends)
573 vmalloc vmalloc() area
574 vmap vmap()ed pages
575 user VM_USERMAP area
576 vpages buffer for pages pointers was vmalloced (huge area)
577 N<node>=nr (Only on NUMA kernels)
578 Number of pages allocated on memory node <node>
579
580> cat /proc/vmallocinfo
5810xffffc20000000000-0xffffc20000201000 2101248 alloc_large_system_hash+0x204 ...
582 /0x2c0 pages=512 vmalloc N0=128 N1=128 N2=128 N3=128
5830xffffc20000201000-0xffffc20000302000 1052672 alloc_large_system_hash+0x204 ...
584 /0x2c0 pages=256 vmalloc N0=64 N1=64 N2=64 N3=64
5850xffffc20000302000-0xffffc20000304000 8192 acpi_tb_verify_table+0x21/0x4f...
586 phys=7fee8000 ioremap
5870xffffc20000304000-0xffffc20000307000 12288 acpi_tb_verify_table+0x21/0x4f...
588 phys=7fee7000 ioremap
5890xffffc2000031d000-0xffffc2000031f000 8192 init_vdso_vars+0x112/0x210
5900xffffc2000031f000-0xffffc2000032b000 49152 cramfs_uncompress_init+0x2e ...
591 /0x80 pages=11 vmalloc N0=3 N1=3 N2=2 N3=3
5920xffffc2000033a000-0xffffc2000033d000 12288 sys_swapon+0x640/0xac0 ...
593 pages=2 vmalloc N1=2
5940xffffc20000347000-0xffffc2000034c000 20480 xt_alloc_table_info+0xfe ...
595 /0x130 [x_tables] pages=4 vmalloc N0=4
5960xffffffffa0000000-0xffffffffa000f000 61440 sys_init_module+0xc27/0x1d00 ...
597 pages=14 vmalloc N2=14
5980xffffffffa000f000-0xffffffffa0014000 20480 sys_init_module+0xc27/0x1d00 ...
599 pages=4 vmalloc N1=4
6000xffffffffa0014000-0xffffffffa0017000 12288 sys_init_module+0xc27/0x1d00 ...
601 pages=2 vmalloc N1=2
6020xffffffffa0017000-0xffffffffa0022000 45056 sys_init_module+0xc27/0x1d00 ...
603 pages=10 vmalloc N0=10
560 604
5611.3 IDE devices in /proc/ide 6051.3 IDE devices in /proc/ide
562---------------------------- 606----------------------------
diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt
index 2d5e1e582e13..bbac4f1d9056 100644
--- a/Documentation/filesystems/vfat.txt
+++ b/Documentation/filesystems/vfat.txt
@@ -96,6 +96,14 @@ shortname=lower|win95|winnt|mixed
96 emulate the Windows 95 rule for create. 96 emulate the Windows 95 rule for create.
97 Default setting is `lower'. 97 Default setting is `lower'.
98 98
99tz=UTC -- Interpret timestamps as UTC rather than local time.
100 This option disables the conversion of timestamps
101 between local time (as used by Windows on FAT) and UTC
102 (which Linux uses internally). This is particuluarly
103 useful when mounting devices (like digital cameras)
104 that are set to UTC in order to avoid the pitfalls of
105 local time.
106
99<bool>: 0,1,yes,no,true,false 107<bool>: 0,1,yes,no,true,false
100 108
101TODO 109TODO
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index c35ca9e40d4c..18022e249c53 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -347,15 +347,12 @@ necessarily be nonportable.
347Dynamic definition of GPIOs is not currently standard; for example, as 347Dynamic definition of GPIOs is not currently standard; for example, as
348a side effect of configuring an add-on board with some GPIO expanders. 348a side effect of configuring an add-on board with some GPIO expanders.
349 349
350These calls are purely for kernel space, but a userspace API could be built
351on top of them.
352
353 350
354GPIO implementor's framework (OPTIONAL) 351GPIO implementor's framework (OPTIONAL)
355======================================= 352=======================================
356As noted earlier, there is an optional implementation framework making it 353As noted earlier, there is an optional implementation framework making it
357easier for platforms to support different kinds of GPIO controller using 354easier for platforms to support different kinds of GPIO controller using
358the same programming interface. 355the same programming interface. This framework is called "gpiolib".
359 356
360As a debugging aid, if debugfs is available a /sys/kernel/debug/gpio file 357As a debugging aid, if debugfs is available a /sys/kernel/debug/gpio file
361will be found there. That will list all the controllers registered through 358will be found there. That will list all the controllers registered through
@@ -392,11 +389,21 @@ either NULL or the label associated with that GPIO when it was requested.
392 389
393Platform Support 390Platform Support
394---------------- 391----------------
395To support this framework, a platform's Kconfig will "select HAVE_GPIO_LIB" 392To support this framework, a platform's Kconfig will "select" either
393ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB
396and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines 394and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines
397three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep(). 395three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
398They may also want to provide a custom value for ARCH_NR_GPIOS. 396They may also want to provide a custom value for ARCH_NR_GPIOS.
399 397
398ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled
399into the kernel on that architecture.
400
401ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user
402can enable it and build it into the kernel optionally.
403
404If neither of these options are selected, the platform does not support
405GPIOs through GPIO-lib and the code cannot be enabled by the user.
406
400Trivial implementations of those functions can directly use framework 407Trivial implementations of those functions can directly use framework
401code, which always dispatches through the gpio_chip: 408code, which always dispatches through the gpio_chip:
402 409
@@ -439,4 +446,120 @@ becomes available. That may mean the device should not be registered until
439calls for that GPIO can work. One way to address such dependencies is for 446calls for that GPIO can work. One way to address such dependencies is for
440such gpio_chip controllers to provide setup() and teardown() callbacks to 447such gpio_chip controllers to provide setup() and teardown() callbacks to
441board specific code; those board specific callbacks would register devices 448board specific code; those board specific callbacks would register devices
442once all the necessary resources are available. 449once all the necessary resources are available, and remove them later when
450the GPIO controller device becomes unavailable.
451
452
453Sysfs Interface for Userspace (OPTIONAL)
454========================================
455Platforms which use the "gpiolib" implementors framework may choose to
456configure a sysfs user interface to GPIOs. This is different from the
457debugfs interface, since it provides control over GPIO direction and
458value instead of just showing a gpio state summary. Plus, it could be
459present on production systems without debugging support.
460
461Given approprate hardware documentation for the system, userspace could
462know for example that GPIO #23 controls the write protect line used to
463protect boot loader segments in flash memory. System upgrade procedures
464may need to temporarily remove that protection, first importing a GPIO,
465then changing its output state, then updating the code before re-enabling
466the write protection. In normal use, GPIO #23 would never be touched,
467and the kernel would have no need to know about it.
468
469Again depending on appropriate hardware documentation, on some systems
470userspace GPIO can be used to determine system configuration data that
471standard kernels won't know about. And for some tasks, simple userspace
472GPIO drivers could be all that the system really needs.
473
474Note that standard kernel drivers exist for common "LEDs and Buttons"
475GPIO tasks: "leds-gpio" and "gpio_keys", respectively. Use those
476instead of talking directly to the GPIOs; they integrate with kernel
477frameworks better than your userspace code could.
478
479
480Paths in Sysfs
481--------------
482There are three kinds of entry in /sys/class/gpio:
483
484 - Control interfaces used to get userspace control over GPIOs;
485
486 - GPIOs themselves; and
487
488 - GPIO controllers ("gpio_chip" instances).
489
490That's in addition to standard files including the "device" symlink.
491
492The control interfaces are write-only:
493
494 /sys/class/gpio/
495
496 "export" ... Userspace may ask the kernel to export control of
497 a GPIO to userspace by writing its number to this file.
498
499 Example: "echo 19 > export" will create a "gpio19" node
500 for GPIO #19, if that's not requested by kernel code.
501
502 "unexport" ... Reverses the effect of exporting to userspace.
503
504 Example: "echo 19 > unexport" will remove a "gpio19"
505 node exported using the "export" file.
506
507GPIO signals have paths like /sys/class/gpio/gpio42/ (for GPIO #42)
508and have the following read/write attributes:
509
510 /sys/class/gpio/gpioN/
511
512 "direction" ... reads as either "in" or "out". This value may
513 normally be written. Writing as "out" defaults to
514 initializing the value as low. To ensure glitch free
515 operation, values "low" and "high" may be written to
516 configure the GPIO as an output with that initial value.
517
518 Note that this attribute *will not exist* if the kernel
519 doesn't support changing the direction of a GPIO, or
520 it was exported by kernel code that didn't explicitly
521 allow userspace to reconfigure this GPIO's direction.
522
523 "value" ... reads as either 0 (low) or 1 (high). If the GPIO
524 is configured as an output, this value may be written;
525 any nonzero value is treated as high.
526
527GPIO controllers have paths like /sys/class/gpio/chipchip42/ (for the
528controller implementing GPIOs starting at #42) and have the following
529read-only attributes:
530
531 /sys/class/gpio/gpiochipN/
532
533 "base" ... same as N, the first GPIO managed by this chip
534
535 "label" ... provided for diagnostics (not always unique)
536
537 "ngpio" ... how many GPIOs this manges (N to N + ngpio - 1)
538
539Board documentation should in most cases cover what GPIOs are used for
540what purposes. However, those numbers are not always stable; GPIOs on
541a daughtercard might be different depending on the base board being used,
542or other cards in the stack. In such cases, you may need to use the
543gpiochip nodes (possibly in conjunction with schematics) to determine
544the correct GPIO number to use for a given signal.
545
546
547Exporting from Kernel code
548--------------------------
549Kernel code can explicitly manage exports of GPIOs which have already been
550requested using gpio_request():
551
552 /* export the GPIO to userspace */
553 int gpio_export(unsigned gpio, bool direction_may_change);
554
555 /* reverse gpio_export() */
556 void gpio_unexport();
557
558After a kernel driver requests a GPIO, it may only be made available in
559the sysfs interface by gpio_export(). The driver can control whether the
560signal direction may change. This helps drivers prevent userspace code
561from accidentally clobbering important system state.
562
563This explicit exporting can help with debugging (by making some kinds
564of experiments easier), or can provide an always-there interface that's
565suitable for documenting as part of a board support package.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 30d44b78171a..e7bea3e85304 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -87,7 +87,8 @@ parameter is applicable:
87 SH SuperH architecture is enabled. 87 SH SuperH architecture is enabled.
88 SMP The kernel is an SMP kernel. 88 SMP The kernel is an SMP kernel.
89 SPARC Sparc architecture is enabled. 89 SPARC Sparc architecture is enabled.
90 SWSUSP Software suspend is enabled. 90 SWSUSP Software suspend (hibernation) is enabled.
91 SUSPEND System suspend states are enabled.
91 TS Appropriate touchscreen support is enabled. 92 TS Appropriate touchscreen support is enabled.
92 USB USB support is enabled. 93 USB USB support is enabled.
93 USBHID USB Human Interface Device support is enabled. 94 USBHID USB Human Interface Device support is enabled.
@@ -147,10 +148,12 @@ and is between 256 and 4096 characters. It is defined in the file
147 default: 0 148 default: 0
148 149
149 acpi_sleep= [HW,ACPI] Sleep options 150 acpi_sleep= [HW,ACPI] Sleep options
150 Format: { s3_bios, s3_mode, s3_beep, old_ordering } 151 Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig, old_ordering }
151 See Documentation/power/video.txt for s3_bios and s3_mode. 152 See Documentation/power/video.txt for s3_bios and s3_mode.
152 s3_beep is for debugging; it makes the PC's speaker beep 153 s3_beep is for debugging; it makes the PC's speaker beep
153 as soon as the kernel's real-mode entry point is called. 154 as soon as the kernel's real-mode entry point is called.
155 s4_nohwsig prevents ACPI hardware signature from being
156 used during resume from hibernation.
154 old_ordering causes the ACPI 1.0 ordering of the _PTS 157 old_ordering causes the ACPI 1.0 ordering of the _PTS
155 control method, wrt putting devices into low power 158 control method, wrt putting devices into low power
156 states, to be enforced (the ACPI 2.0 ordering of _PTS is 159 states, to be enforced (the ACPI 2.0 ordering of _PTS is
@@ -774,8 +777,22 @@ and is between 256 and 4096 characters. It is defined in the file
774 hisax= [HW,ISDN] 777 hisax= [HW,ISDN]
775 See Documentation/isdn/README.HiSax. 778 See Documentation/isdn/README.HiSax.
776 779
777 hugepages= [HW,X86-32,IA-64] Maximal number of HugeTLB pages. 780 hugepages= [HW,X86-32,IA-64] HugeTLB pages to allocate at boot.
778 hugepagesz= [HW,IA-64,PPC] The size of the HugeTLB pages. 781 hugepagesz= [HW,IA-64,PPC,X86-64] The size of the HugeTLB pages.
782 On x86-64 and powerpc, this option can be specified
783 multiple times interleaved with hugepages= to reserve
784 huge pages of different sizes. Valid pages sizes on
785 x86-64 are 2M (when the CPU supports "pse") and 1G
786 (when the CPU supports the "pdpe1gb" cpuinfo flag)
787 Note that 1GB pages can only be allocated at boot time
788 using hugepages= and not freed afterwards.
789 default_hugepagesz=
790 [same as hugepagesz=] The size of the default
791 HugeTLB page size. This is the size represented by
792 the legacy /proc/ hugepages APIs, used for SHM, and
793 default size when mounting hugetlbfs filesystems.
794 Defaults to the default architecture's huge page size
795 if not specified.
779 796
780 i8042.direct [HW] Put keyboard port into non-translated mode 797 i8042.direct [HW] Put keyboard port into non-translated mode
781 i8042.dumbkbd [HW] Pretend that controller can only read data from 798 i8042.dumbkbd [HW] Pretend that controller can only read data from
@@ -1225,6 +1242,14 @@ and is between 256 and 4096 characters. It is defined in the file
1225 1242
1226 mga= [HW,DRM] 1243 mga= [HW,DRM]
1227 1244
1245 mminit_loglevel=
1246 [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
1247 parameter allows control of the logging verbosity for
1248 the additional memory initialisation checks. A value
1249 of 0 disables mminit logging and a level of 4 will
1250 log everything. Information is printed at KERN_DEBUG
1251 so loglevel=8 may also need to be specified.
1252
1228 mousedev.tap_time= 1253 mousedev.tap_time=
1229 [MOUSE] Maximum time between finger touching and 1254 [MOUSE] Maximum time between finger touching and
1230 leaving touchpad surface for touch to be considered 1255 leaving touchpad surface for touch to be considered
@@ -2034,6 +2059,9 @@ and is between 256 and 4096 characters. It is defined in the file
2034 2059
2035 snd-ymfpci= [HW,ALSA] 2060 snd-ymfpci= [HW,ALSA]
2036 2061
2062 softlockup_panic=
2063 [KNL] Should the soft-lockup detector generate panics.
2064
2037 sonypi.*= [HW] Sony Programmable I/O Control Device driver 2065 sonypi.*= [HW] Sony Programmable I/O Control Device driver
2038 See Documentation/sonypi.txt 2066 See Documentation/sonypi.txt
2039 2067
@@ -2098,6 +2126,12 @@ and is between 256 and 4096 characters. It is defined in the file
2098 2126
2099 tdfx= [HW,DRM] 2127 tdfx= [HW,DRM]
2100 2128
2129 test_suspend= [SUSPEND]
2130 Specify "mem" (for Suspend-to-RAM) or "standby" (for
2131 standby suspend) as the system sleep state to briefly
2132 enter during system startup. The system is woken from
2133 this state using a wakeup-capable RTC alarm.
2134
2101 thash_entries= [KNL,NET] 2135 thash_entries= [KNL,NET]
2102 Set number of hash buckets for TCP connection 2136 Set number of hash buckets for TCP connection
2103 2137
@@ -2125,13 +2159,6 @@ and is between 256 and 4096 characters. It is defined in the file
2125 <deci-seconds>: poll all this frequency 2159 <deci-seconds>: poll all this frequency
2126 0: no polling (default) 2160 0: no polling (default)
2127 2161
2128 tipar.timeout= [HW,PPT]
2129 Set communications timeout in tenths of a second
2130 (default 15).
2131
2132 tipar.delay= [HW,PPT]
2133 Set inter-bit delay in microseconds (default 10).
2134
2135 tmscsim= [HW,SCSI] 2162 tmscsim= [HW,SCSI]
2136 See comment before function dc390_setup() in 2163 See comment before function dc390_setup() in
2137 drivers/scsi/tmscsim.c. 2164 drivers/scsi/tmscsim.c.
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index 64b3f146e4b0..02dc748b76c4 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -1,7 +1,7 @@
1 ThinkPad ACPI Extras Driver 1 ThinkPad ACPI Extras Driver
2 2
3 Version 0.20 3 Version 0.21
4 April 09th, 2008 4 May 29th, 2008
5 5
6 Borislav Deianov <borislav@users.sf.net> 6 Borislav Deianov <borislav@users.sf.net>
7 Henrique de Moraes Holschuh <hmh@hmh.eng.br> 7 Henrique de Moraes Holschuh <hmh@hmh.eng.br>
@@ -621,7 +621,8 @@ Bluetooth
621--------- 621---------
622 622
623procfs: /proc/acpi/ibm/bluetooth 623procfs: /proc/acpi/ibm/bluetooth
624sysfs device attribute: bluetooth_enable 624sysfs device attribute: bluetooth_enable (deprecated)
625sysfs rfkill class: switch "tpacpi_bluetooth_sw"
625 626
626This feature shows the presence and current state of a ThinkPad 627This feature shows the presence and current state of a ThinkPad
627Bluetooth device in the internal ThinkPad CDC slot. 628Bluetooth device in the internal ThinkPad CDC slot.
@@ -643,8 +644,12 @@ Sysfs notes:
643 0: disables Bluetooth / Bluetooth is disabled 644 0: disables Bluetooth / Bluetooth is disabled
644 1: enables Bluetooth / Bluetooth is enabled. 645 1: enables Bluetooth / Bluetooth is enabled.
645 646
646 Note: this interface will be probably be superseded by the 647 Note: this interface has been superseded by the generic rfkill
647 generic rfkill class, so it is NOT to be considered stable yet. 648 class. It has been deprecated, and it will be removed in year
649 2010.
650
651 rfkill controller switch "tpacpi_bluetooth_sw": refer to
652 Documentation/rfkill.txt for details.
648 653
649Video output control -- /proc/acpi/ibm/video 654Video output control -- /proc/acpi/ibm/video
650-------------------------------------------- 655--------------------------------------------
@@ -1374,7 +1379,8 @@ EXPERIMENTAL: WAN
1374----------------- 1379-----------------
1375 1380
1376procfs: /proc/acpi/ibm/wan 1381procfs: /proc/acpi/ibm/wan
1377sysfs device attribute: wwan_enable 1382sysfs device attribute: wwan_enable (deprecated)
1383sysfs rfkill class: switch "tpacpi_wwan_sw"
1378 1384
1379This feature is marked EXPERIMENTAL because the implementation 1385This feature is marked EXPERIMENTAL because the implementation
1380directly accesses hardware registers and may not work as expected. USE 1386directly accesses hardware registers and may not work as expected. USE
@@ -1404,8 +1410,12 @@ Sysfs notes:
1404 0: disables WWAN card / WWAN card is disabled 1410 0: disables WWAN card / WWAN card is disabled
1405 1: enables WWAN card / WWAN card is enabled. 1411 1: enables WWAN card / WWAN card is enabled.
1406 1412
1407 Note: this interface will be probably be superseded by the 1413 Note: this interface has been superseded by the generic rfkill
1408 generic rfkill class, so it is NOT to be considered stable yet. 1414 class. It has been deprecated, and it will be removed in year
1415 2010.
1416
1417 rfkill controller switch "tpacpi_wwan_sw": refer to
1418 Documentation/rfkill.txt for details.
1409 1419
1410Multiple Commands, Module Parameters 1420Multiple Commands, Module Parameters
1411------------------------------------ 1421------------------------------------
diff --git a/Documentation/moxa-smartio b/Documentation/moxa-smartio
index fe24ecc6372e..5337e80a5b96 100644
--- a/Documentation/moxa-smartio
+++ b/Documentation/moxa-smartio
@@ -1,14 +1,22 @@
1============================================================================= 1=============================================================================
2 2 MOXA Smartio/Industio Family Device Driver Installation Guide
3 MOXA Smartio Family Device Driver Ver 1.1 Installation Guide 3 for Linux Kernel 2.4.x, 2.6.x
4 for Linux Kernel 2.2.x and 2.0.3x 4 Copyright (C) 2008, Moxa Inc.
5 Copyright (C) 1999, Moxa Technologies Co, Ltd.
6============================================================================= 5=============================================================================
6Date: 01/21/2008
7
7Content 8Content
8 9
91. Introduction 101. Introduction
102. System Requirement 112. System Requirement
113. Installation 123. Installation
13 3.1 Hardware installation
14 3.2 Driver files
15 3.3 Device naming convention
16 3.4 Module driver configuration
17 3.5 Static driver configuration for Linux kernel 2.4.x and 2.6.x.
18 3.6 Custom configuration
19 3.7 Verify driver installation
124. Utilities 204. Utilities
135. Setserial 215. Setserial
146. Troubleshooting 226. Troubleshooting
@@ -16,27 +24,48 @@ Content
16----------------------------------------------------------------------------- 24-----------------------------------------------------------------------------
171. Introduction 251. Introduction
18 26
19 The Smartio family Linux driver, Ver. 1.1, supports following multiport 27 The Smartio/Industio/UPCI family Linux driver supports following multiport
20 boards. 28 boards.
21 29
22 -C104P/H/HS, C104H/PCI, C104HS/PCI, CI-104J 4 port multiport board. 30 - 2 ports multiport board
23 -C168P/H/HS, C168H/PCI 8 port multiport board. 31 CP-102U, CP-102UL, CP-102UF
24 32 CP-132U-I, CP-132UL,
25 This driver has been modified a little and cleaned up from the Moxa 33 CP-132, CP-132I, CP132S, CP-132IS,
26 contributed driver code and merged into Linux 2.2.14pre. In particular 34 CI-132, CI-132I, CI-132IS,
27 official major/minor numbers have been assigned which are different to 35 (C102H, C102HI, C102HIS, C102P, CP-102, CP-102S)
28 those the original Moxa supplied driver used. 36
37 - 4 ports multiport board
38 CP-104EL,
39 CP-104UL, CP-104JU,
40 CP-134U, CP-134U-I,
41 C104H/PCI, C104HS/PCI,
42 CP-114, CP-114I, CP-114S, CP-114IS, CP-114UL,
43 C104H, C104HS,
44 CI-104J, CI-104JS,
45 CI-134, CI-134I, CI-134IS,
46 (C114HI, CT-114I, C104P)
47 POS-104UL,
48 CB-114,
49 CB-134I
50
51 - 8 ports multiport board
52 CP-118EL, CP-168EL,
53 CP-118U, CP-168U,
54 C168H/PCI,
55 C168H, C168HS,
56 (C168P),
57 CB-108
29 58
30 This driver and installation procedure have been developed upon Linux Kernel 59 This driver and installation procedure have been developed upon Linux Kernel
31 2.2.5 and backward compatible to 2.0.3x. This driver supports Intel x86 and 60 2.4.x and 2.6.x. This driver supports Intel x86 hardware platform. In order
32 Alpha hardware platform. In order to maintain compatibility, this version 61 to maintain compatibility, this version has also been properly tested with
33 has also been properly tested with RedHat, OpenLinux, TurboLinux and 62 RedHat, Mandrake, Fedora and S.u.S.E Linux. However, if compatibility problem
34 S.u.S.E Linux. However, if compatibility problem occurs, please contact 63 occurs, please contact Moxa at support@moxa.com.tw.
35 Moxa at support@moxa.com.tw.
36 64
37 In addition to device driver, useful utilities are also provided in this 65 In addition to device driver, useful utilities are also provided in this
38 version. They are 66 version. They are
39 - msdiag Diagnostic program for detecting installed Moxa Smartio boards. 67 - msdiag Diagnostic program for displaying installed Moxa
68 Smartio/Industio boards.
40 - msmon Monitor program to observe data count and line status signals. 69 - msmon Monitor program to observe data count and line status signals.
41 - msterm A simple terminal program which is useful in testing serial 70 - msterm A simple terminal program which is useful in testing serial
42 ports. 71 ports.
@@ -47,8 +76,7 @@ Content
47 GNU General Public License in this version. Please refer to GNU General 76 GNU General Public License in this version. Please refer to GNU General
48 Public License announcement in each source code file for more detail. 77 Public License announcement in each source code file for more detail.
49 78
50 In Moxa's ftp sites, you may always find latest driver at 79 In Moxa's Web sites, you may always find latest driver at http://web.moxa.com.
51 ftp://ftp.moxa.com or ftp://ftp.moxa.com.tw.
52 80
53 This version of driver can be installed as Loadable Module (Module driver) 81 This version of driver can be installed as Loadable Module (Module driver)
54 or built-in into kernel (Static driver). You may refer to following 82 or built-in into kernel (Static driver). You may refer to following
@@ -61,8 +89,8 @@ Content
61 89
62----------------------------------------------------------------------------- 90-----------------------------------------------------------------------------
632. System Requirement 912. System Requirement
64 - Hardware platform: Intel x86 or Alpha machine 92 - Hardware platform: Intel x86 machine
65 - Kernel version: 2.0.3x or 2.2.x 93 - Kernel version: 2.4.x or 2.6.x
66 - gcc version 2.72 or later 94 - gcc version 2.72 or later
67 - Maximum 4 boards can be installed in combination 95 - Maximum 4 boards can be installed in combination
68 96
@@ -70,9 +98,18 @@ Content
703. Installation 983. Installation
71 99
72 3.1 Hardware installation 100 3.1 Hardware installation
101 3.2 Driver files
102 3.3 Device naming convention
103 3.4 Module driver configuration
104 3.5 Static driver configuration for Linux kernel 2.4.x, 2.6.x.
105 3.6 Custom configuration
106 3.7 Verify driver installation
107
108
109 3.1 Hardware installation
73 110
74 There are two types of buses, ISA and PCI, for Smartio family multiport 111 There are two types of buses, ISA and PCI, for Smartio/Industio
75 board. 112 family multiport board.
76 113
77 ISA board 114 ISA board
78 --------- 115 ---------
@@ -81,47 +118,57 @@ Content
81 installation procedure in User's Manual before proceed any further. 118 installation procedure in User's Manual before proceed any further.
82 Please make sure the JP1 is open after the ISA board is set properly. 119 Please make sure the JP1 is open after the ISA board is set properly.
83 120
84 PCI board 121 PCI/UPCI board
85 --------- 122 --------------
86 You may need to adjust IRQ usage in BIOS to avoid from IRQ conflict 123 You may need to adjust IRQ usage in BIOS to avoid from IRQ conflict
87 with other ISA devices. Please refer to hardware installation 124 with other ISA devices. Please refer to hardware installation
88 procedure in User's Manual in advance. 125 procedure in User's Manual in advance.
89 126
90 IRQ Sharing 127 PCI IRQ Sharing
91 ----------- 128 -----------
92 Each port within the same multiport board shares the same IRQ. Up to 129 Each port within the same multiport board shares the same IRQ. Up to
93 4 Moxa Smartio Family multiport boards can be installed together on 130 4 Moxa Smartio/Industio PCI Family multiport boards can be installed
94 one system and they can share the same IRQ. 131 together on one system and they can share the same IRQ.
132
95 133
96 3.2 Driver files and device naming convention 134 3.2 Driver files
97 135
98 The driver file may be obtained from ftp, CD-ROM or floppy disk. The 136 The driver file may be obtained from ftp, CD-ROM or floppy disk. The
99 first step, anyway, is to copy driver file "mxser.tgz" into specified 137 first step, anyway, is to copy driver file "mxser.tgz" into specified
100 directory. e.g. /moxa. The execute commands as below. 138 directory. e.g. /moxa. The execute commands as below.
101 139
140 # cd /
141 # mkdir moxa
102 # cd /moxa 142 # cd /moxa
103 # tar xvf /dev/fd0 143 # tar xvf /dev/fd0
144
104 or 145 or
146
147 # cd /
148 # mkdir moxa
105 # cd /moxa 149 # cd /moxa
106 # cp /mnt/cdrom/<driver directory>/mxser.tgz . 150 # cp /mnt/cdrom/<driver directory>/mxser.tgz .
107 # tar xvfz mxser.tgz 151 # tar xvfz mxser.tgz
108 152
153
154 3.3 Device naming convention
155
109 You may find all the driver and utilities files in /moxa/mxser. 156 You may find all the driver and utilities files in /moxa/mxser.
110 Following installation procedure depends on the model you'd like to 157 Following installation procedure depends on the model you'd like to
111 run the driver. If you prefer module driver, please refer to 3.3. 158 run the driver. If you prefer module driver, please refer to 3.4.
112 If static driver is required, please refer to 3.4. 159 If static driver is required, please refer to 3.5.
113 160
114 Dialin and callout port 161 Dialin and callout port
115 ----------------------- 162 -----------------------
116 This driver remains traditional serial device properties. There're 163 This driver remains traditional serial device properties. There are
117 two special file name for each serial port. One is dial-in port 164 two special file name for each serial port. One is dial-in port
118 which is named "ttyMxx". For callout port, the naming convention 165 which is named "ttyMxx". For callout port, the naming convention
119 is "cumxx". 166 is "cumxx".
120 167
121 Device naming when more than 2 boards installed 168 Device naming when more than 2 boards installed
122 ----------------------------------------------- 169 -----------------------------------------------
123 Naming convention for each Smartio multiport board is pre-defined 170 Naming convention for each Smartio/Industio multiport board is
124 as below. 171 pre-defined as below.
125 172
126 Board Num. Dial-in Port Callout port 173 Board Num. Dial-in Port Callout port
127 1st board ttyM0 - ttyM7 cum0 - cum7 174 1st board ttyM0 - ttyM7 cum0 - cum7
@@ -129,6 +176,12 @@ Content
129 3rd board ttyM16 - ttyM23 cum16 - cum23 176 3rd board ttyM16 - ttyM23 cum16 - cum23
130 4th board ttyM24 - ttym31 cum24 - cum31 177 4th board ttyM24 - ttym31 cum24 - cum31
131 178
179
180 !!!!!!!!!!!!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
181 Under Kernel 2.6 the cum Device is Obsolete. So use ttyM*
182 device instead.
183 !!!!!!!!!!!!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
184
132 Board sequence 185 Board sequence
133 -------------- 186 --------------
134 This driver will activate ISA boards according to the parameter set 187 This driver will activate ISA boards according to the parameter set
@@ -138,69 +191,131 @@ Content
138 For PCI boards, their sequence will be after ISA boards and C168H/PCI 191 For PCI boards, their sequence will be after ISA boards and C168H/PCI
139 has higher priority than C104H/PCI boards. 192 has higher priority than C104H/PCI boards.
140 193
141 3.3 Module driver configuration 194 3.4 Module driver configuration
142 Module driver is easiest way to install. If you prefer static driver 195 Module driver is easiest way to install. If you prefer static driver
143 installation, please skip this paragraph. 196 installation, please skip this paragraph.
144 1. Find "Makefile" in /moxa/mxser, then run
145 197
146 # make install 198
199 ------------- Prepare to use the MOXA driver--------------------
200 3.4.1 Create tty device with correct major number
201 Before using MOXA driver, your system must have the tty devices
202 which are created with driver's major number. We offer one shell
203 script "msmknod" to simplify the procedure.
204 This step is only needed to be executed once. But you still
205 need to do this procedure when:
206 a. You change the driver's major number. Please refer the "3.7"
207 section.
208 b. Your total installed MOXA boards number is changed. Maybe you
209 add/delete one MOXA board.
210 c. You want to change the tty name. This needs to modify the
211 shell script "msmknod"
212
213 The procedure is:
214 # cd /moxa/mxser/driver
215 # ./msmknod
216
217 This shell script will require the major number for dial-in
218 device and callout device to create tty device. You also need
219 to specify the total installed MOXA board number. Default major
220 numbers for dial-in device and callout device are 30, 35. If
221 you need to change to other number, please refer section "3.7"
222 for more detailed procedure.
223 Msmknod will delete any special files occupying the same device
224 naming.
225
226 3.4.2 Build the MOXA driver and utilities
227 Before using the MOXA driver and utilities, you need compile the
228 all the source code. This step is only need to be executed once.
229 But you still re-compile the source code if you modify the source
230 code. For example, if you change the driver's major number (see
231 "3.7" section), then you need to do this step again.
232
233 Find "Makefile" in /moxa/mxser, then run
234
235 # make clean; make install
236
237 !!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!
238 For Red Hat 9, Red Hat Enterprise Linux AS3/ES3/WS3 & Fedora Core1:
239 # make clean; make installsp1
240
241 For Red Hat Enterprise Linux AS4/ES4/WS4:
242 # make clean; make installsp2
243 !!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!
147 244
148 The driver files "mxser.o" and utilities will be properly compiled 245 The driver files "mxser.o" and utilities will be properly compiled
149 and copied to system directories respectively.Then run 246 and copied to system directories respectively.
150 247
151 # insmod mxser 248 ------------- Load MOXA driver--------------------
249 3.4.3 Load the MOXA driver
152 250
153 to activate the modular driver. You may run "lsmod" to check 251 # modprobe mxser <argument>
154 if "mxser.o" is activated.
155 252
156 2. Create special files by executing "msmknod". 253 will activate the module driver. You may run "lsmod" to check
157 # cd /moxa/mxser/driver 254 if "mxser" is activated. If the MOXA board is ISA board, the
158 # ./msmknod 255 <argument> is needed. Please refer to section "3.4.5" for more
256 information.
257
258
259 ------------- Load MOXA driver on boot --------------------
260 3.4.4 For the above description, you may manually execute
261 "modprobe mxser" to activate this driver and run
262 "rmmod mxser" to remove it.
263 However, it's better to have a boot time configuration to
264 eliminate manual operation. Boot time configuration can be
265 achieved by rc file. We offer one "rc.mxser" file to simplify
266 the procedure under "moxa/mxser/driver".
159 267
160 Default major numbers for dial-in device and callout device are 268 But if you use ISA board, please modify the "modprobe ..." command
161 174, 175. Msmknod will delete any special files occupying the same 269 to add the argument (see "3.4.5" section). After modifying the
162 device naming. 270 rc.mxser, please try to execute "/moxa/mxser/driver/rc.mxser"
271 manually to make sure the modification is ok. If any error
272 encountered, please try to modify again. If the modification is
273 completed, follow the below step.
163 274
164 3. Up to now, you may manually execute "insmod mxser" to activate 275 Run following command for setting rc files.
165 this driver and run "rmmod mxser" to remove it. However, it's
166 better to have a boot time configuration to eliminate manual
167 operation.
168 Boot time configuration can be achieved by rc file. Run following
169 command for setting rc files.
170 276
171 # cd /moxa/mxser/driver 277 # cd /moxa/mxser/driver
172 # cp ./rc.mxser /etc/rc.d 278 # cp ./rc.mxser /etc/rc.d
173 # cd /etc/rc.d 279 # cd /etc/rc.d
174 280
175 You may have to modify part of the content in rc.mxser to specify 281 Check "rc.serial" is existed or not. If "rc.serial" doesn't exist,
176 parameters for ISA board. Please refer to rc.mxser for more detail. 282 create it by vi, run "chmod 755 rc.serial" to change the permission.
177 Find "rc.serial". If "rc.serial" doesn't exist, create it by vi. 283 Add "/etc/rc.d/rc.mxser" in last line,
178 Add "rc.mxser" in last line. Next, open rc.local by vi
179 and append following content.
180 284
181 if [ -f /etc/rc.d/rc.serial ]; then 285 Reboot and check if moxa.o activated by "lsmod" command.
182 sh /etc/rc.d/rc.serial
183 fi
184 286
185 4. Reboot and check if mxser.o activated by "lsmod" command. 287 3.4.5. If you'd like to drive Smartio/Industio ISA boards in the system,
186 5. If you'd like to drive Smartio ISA boards in the system, you'll 288 you'll have to add parameter to specify CAP address of given
187 have to add parameter to specify CAP address of given board while 289 board while activating "mxser.o". The format for parameters are
188 activating "mxser.o". The format for parameters are as follows. 290 as follows.
189 291
190 insmod mxser ioaddr=0x???,0x???,0x???,0x??? 292 modprobe mxser ioaddr=0x???,0x???,0x???,0x???
191 | | | | 293 | | | |
192 | | | +- 4th ISA board 294 | | | +- 4th ISA board
193 | | +------ 3rd ISA board 295 | | +------ 3rd ISA board
194 | +------------ 2nd ISA board 296 | +------------ 2nd ISA board
195 +------------------- 1st ISA board 297 +------------------- 1st ISA board
196 298
197 3.4 Static driver configuration 299 3.5 Static driver configuration for Linux kernel 2.4.x and 2.6.x
300
301 Note: To use static driver, you must install the linux kernel
302 source package.
303
304 3.5.1 Backup the built-in driver in the kernel.
305 # cd /usr/src/linux/drivers/char
306 # mv mxser.c mxser.c.old
307
308 For Red Hat 7.x user, you need to create link:
309 # cd /usr/src
310 # ln -s linux-2.4 linux
198 311
199 1. Create link 312 3.5.2 Create link
200 # cd /usr/src/linux/drivers/char 313 # cd /usr/src/linux/drivers/char
201 # ln -s /moxa/mxser/driver/mxser.c mxser.c 314 # ln -s /moxa/mxser/driver/mxser.c mxser.c
202 315
203 2. Add CAP address list for ISA boards 316 3.5.3 Add CAP address list for ISA boards. For PCI boards user,
317 please skip this step.
318
204 In module mode, the CAP address for ISA board is given by 319 In module mode, the CAP address for ISA board is given by
205 parameter. In static driver configuration, you'll have to 320 parameter. In static driver configuration, you'll have to
206 assign it within driver's source code. If you will not 321 assign it within driver's source code. If you will not
@@ -222,73 +337,55 @@ Content
222 static int mxserBoardCAP[] 337 static int mxserBoardCAP[]
223 = {0x280, 0x180, 0x00, 0x00}; 338 = {0x280, 0x180, 0x00, 0x00};
224 339
225 3. Modify tty_io.c 340 3.5.4 Setup kernel configuration
226 # cd /usr/src/linux/drivers/char/
227 # vi tty_io.c
228 Find pty_init(), insert "mxser_init()" as
229 341
230 pty_init(); 342 Configure the kernel:
231 mxser_init();
232 343
233 4. Modify tty.h 344 # cd /usr/src/linux
234 # cd /usr/src/linux/include/linux 345 # make menuconfig
235 # vi tty.h
236 Find extern int tty_init(void), insert "mxser_init()" as
237 346
238 extern int tty_init(void); 347 You will go into a menu-driven system. Please select [Character
239 extern int mxser_init(void); 348 devices][Non-standard serial port support], enable the [Moxa
240 349 SmartIO support] driver with "[*]" for built-in (not "[M]"), then
241 5. Modify Makefile 350 select [Exit] to exit this program.
242 # cd /usr/src/linux/drivers/char
243 # vi Makefile
244 Find L_OBJS := tty_io.o ...... random.o, add
245 "mxser.o" at last of this line as
246 L_OBJS := tty_io.o ....... mxser.o
247 351
248 6. Rebuild kernel 352 3.5.5 Rebuild kernel
249 The following are for Linux kernel rebuilding,for your reference only. 353 The following are for Linux kernel rebuilding, for your
354 reference only.
250 For appropriate details, please refer to the Linux document. 355 For appropriate details, please refer to the Linux document.
251 356
252 If 'lilo' utility is installed, please use 'make zlilo' to rebuild
253 kernel. If 'lilo' is not installed, please follow the following steps.
254
255 a. cd /usr/src/linux 357 a. cd /usr/src/linux
256 b. make clean /* take a few minutes */ 358 b. make clean /* take a few minutes */
257 c. make bzImage /* take probably 10-20 minutes */ 359 c. make dep /* take a few minutes */
258 d. Backup original boot kernel. /* optional step */ 360 d. make bzImage /* take probably 10-20 minutes */
259 e. cp /usr/src/linux/arch/i386/boot/bzImage /boot/vmlinuz 361 e. make install /* copy boot image to correct position */
260 f. Please make sure the boot kernel (vmlinuz) is in the 362 f. Please make sure the boot kernel (vmlinuz) is in the
261 correct position. If you use 'lilo' utility, you should 363 correct position.
262 check /etc/lilo.conf 'image' item specified the path 364 g. If you use 'lilo' utility, you should check /etc/lilo.conf
263 which is the 'vmlinuz' path, or you will load wrong 365 'image' item specified the path which is the 'vmlinuz' path,
264 (or old) boot kernel image (vmlinuz). 366 or you will load wrong (or old) boot kernel image (vmlinuz).
265 g. chmod 400 /vmlinuz 367 After checking /etc/lilo.conf, please run "lilo".
266 h. lilo 368
267 i. rdev -R /vmlinuz 1 369 Note that if the result of "make bzImage" is ERROR, then you have to
268 j. sync 370 go back to Linux configuration Setup. Type "make menuconfig" in
269 371 directory /usr/src/linux.
270 Note that if the result of "make zImage" is ERROR, then you have to 372
271 go back to Linux configuration Setup. Type "make config" in directory 373
272 /usr/src/linux or "setup". 374 3.5.6 Make tty device and special file
273
274 Since system include file, /usr/src/linux/include/linux/interrupt.h,
275 is modified each time the MOXA driver is installed, kernel rebuilding
276 is inevitable. And it takes about 10 to 20 minutes depends on the
277 machine.
278
279 7. Make utility
280 # cd /moxa/mxser/utility
281 # make install
282
283 8. Make special file
284 # cd /moxa/mxser/driver 375 # cd /moxa/mxser/driver
285 # ./msmknod 376 # ./msmknod
286 377
287 9. Reboot 378 3.5.7 Make utility
379 # cd /moxa/mxser/utility
380 # make clean; make install
381
382 3.5.8 Reboot
288 383
289 3.5 Custom configuration 384
385
386 3.6 Custom configuration
290 Although this driver already provides you default configuration, you 387 Although this driver already provides you default configuration, you
291 still can change the device name and major number.The instruction to 388 still can change the device name and major number. The instruction to
292 change these parameters are shown as below. 389 change these parameters are shown as below.
293 390
294 Change Device name 391 Change Device name
@@ -306,33 +403,37 @@ Content
306 2 free major numbers for this driver. There are 3 steps to change 403 2 free major numbers for this driver. There are 3 steps to change
307 major numbers. 404 major numbers.
308 405
309 1. Find free major numbers 406 3.6.1 Find free major numbers
310 In /proc/devices, you may find all the major numbers occupied 407 In /proc/devices, you may find all the major numbers occupied
311 in the system. Please select 2 major numbers that are available. 408 in the system. Please select 2 major numbers that are available.
312 e.g. 40, 45. 409 e.g. 40, 45.
313 2. Create special files 410 3.6.2 Create special files
314 Run /moxa/mxser/driver/msmknod to create special files with 411 Run /moxa/mxser/driver/msmknod to create special files with
315 specified major numbers. 412 specified major numbers.
316 3. Modify driver with new major number 413 3.6.3 Modify driver with new major number
317 Run vi to open /moxa/mxser/driver/mxser.c. Locate the line 414 Run vi to open /moxa/mxser/driver/mxser.c. Locate the line
318 contains "MXSERMAJOR". Change the content as below. 415 contains "MXSERMAJOR". Change the content as below.
319 #define MXSERMAJOR 40 416 #define MXSERMAJOR 40
320 #define MXSERCUMAJOR 45 417 #define MXSERCUMAJOR 45
321 4. Run # make install in /moxa/mxser/driver. 418 3.6.4 Run "make clean; make install" in /moxa/mxser/driver.
322 419
323 3.6 Verify driver installation 420 3.7 Verify driver installation
324 You may refer to /var/log/messages to check the latest status 421 You may refer to /var/log/messages to check the latest status
325 log reported by this driver whenever it's activated. 422 log reported by this driver whenever it's activated.
423
326----------------------------------------------------------------------------- 424-----------------------------------------------------------------------------
3274. Utilities 4254. Utilities
328 There are 3 utilities contained in this driver. They are msdiag, msmon and 426 There are 3 utilities contained in this driver. They are msdiag, msmon and
329 msterm. These 3 utilities are released in form of source code. They should 427 msterm. These 3 utilities are released in form of source code. They should
330 be compiled into executable file and copied into /usr/bin. 428 be compiled into executable file and copied into /usr/bin.
331 429
430 Before using these utilities, please load driver (refer 3.4 & 3.5) and
431 make sure you had run the "msmknod" utility.
432
332 msdiag - Diagnostic 433 msdiag - Diagnostic
333 -------------------- 434 --------------------
334 This utility provides the function to detect what Moxa Smartio multiport 435 This utility provides the function to display what Moxa Smartio/Industio
335 board exists in the system. 436 board found by driver in the system.
336 437
337 msmon - Port Monitoring 438 msmon - Port Monitoring
338 ----------------------- 439 -----------------------
@@ -353,12 +454,13 @@ Content
353 application, for example, sending AT command to a modem connected to the 454 application, for example, sending AT command to a modem connected to the
354 port or used as a terminal for login purpose. Note that this is only a 455 port or used as a terminal for login purpose. Note that this is only a
355 dumb terminal emulation without handling full screen operation. 456 dumb terminal emulation without handling full screen operation.
457
356----------------------------------------------------------------------------- 458-----------------------------------------------------------------------------
3575. Setserial 4595. Setserial
358 460
359 Supported Setserial parameters are listed as below. 461 Supported Setserial parameters are listed as below.
360 462
361 uart set UART type(16450-->disable FIFO, 16550A-->enable FIFO) 463 uart set UART type(16450-->disable FIFO, 16550A-->enable FIFO)
362 close_delay set the amount of time(in 1/100 of a second) that DTR 464 close_delay set the amount of time(in 1/100 of a second) that DTR
363 should be kept low while being closed. 465 should be kept low while being closed.
364 closing_wait set the amount of time(in 1/100 of a second) that the 466 closing_wait set the amount of time(in 1/100 of a second) that the
@@ -366,7 +468,13 @@ Content
366 being closed, before the receiver is disable. 468 being closed, before the receiver is disable.
367 spd_hi Use 57.6kb when the application requests 38.4kb. 469 spd_hi Use 57.6kb when the application requests 38.4kb.
368 spd_vhi Use 115.2kb when the application requests 38.4kb. 470 spd_vhi Use 115.2kb when the application requests 38.4kb.
471 spd_shi Use 230.4kb when the application requests 38.4kb.
472 spd_warp Use 460.8kb when the application requests 38.4kb.
369 spd_normal Use 38.4kb when the application requests 38.4kb. 473 spd_normal Use 38.4kb when the application requests 38.4kb.
474 spd_cust Use the custom divisor to set the speed when the
475 application requests 38.4kb.
476 divisor This option set the custom divison.
477 baud_base This option set the base baud rate.
370 478
371----------------------------------------------------------------------------- 479-----------------------------------------------------------------------------
3726. Troubleshooting 4806. Troubleshooting
@@ -375,8 +483,9 @@ Content
375 possible. If all the possible solutions fail, please contact our technical 483 possible. If all the possible solutions fail, please contact our technical
376 support team to get more help. 484 support team to get more help.
377 485
378 Error msg: More than 4 Moxa Smartio family boards found. Fifth board and 486
379 after are ignored. 487 Error msg: More than 4 Moxa Smartio/Industio family boards found. Fifth board
488 and after are ignored.
380 Solution: 489 Solution:
381 To avoid this problem, please unplug fifth and after board, because Moxa 490 To avoid this problem, please unplug fifth and after board, because Moxa
382 driver supports up to 4 boards. 491 driver supports up to 4 boards.
@@ -384,7 +493,7 @@ Content
384 Error msg: Request_irq fail, IRQ(?) may be conflict with another device. 493 Error msg: Request_irq fail, IRQ(?) may be conflict with another device.
385 Solution: 494 Solution:
386 Other PCI or ISA devices occupy the assigned IRQ. If you are not sure 495 Other PCI or ISA devices occupy the assigned IRQ. If you are not sure
387 which device causes the situation,please check /proc/interrupts to find 496 which device causes the situation, please check /proc/interrupts to find
388 free IRQ and simply change another free IRQ for Moxa board. 497 free IRQ and simply change another free IRQ for Moxa board.
389 498
390 Error msg: Board #: C1xx Series(CAP=xxx) interrupt number invalid. 499 Error msg: Board #: C1xx Series(CAP=xxx) interrupt number invalid.
@@ -397,15 +506,18 @@ Content
397 Moxa ISA board needs an interrupt vector.Please refer to user's manual 506 Moxa ISA board needs an interrupt vector.Please refer to user's manual
398 "Hardware Installation" chapter to set interrupt vector. 507 "Hardware Installation" chapter to set interrupt vector.
399 508
400 Error msg: Couldn't install MOXA Smartio family driver! 509 Error msg: Couldn't install MOXA Smartio/Industio family driver!
401 Solution: 510 Solution:
402 Load Moxa driver fail, the major number may conflict with other devices. 511 Load Moxa driver fail, the major number may conflict with other devices.
403 Please refer to previous section 3.5 to change a free major number for 512 Please refer to previous section 3.7 to change a free major number for
404 Moxa driver. 513 Moxa driver.
405 514
406 Error msg: Couldn't install MOXA Smartio family callout driver! 515 Error msg: Couldn't install MOXA Smartio/Industio family callout driver!
407 Solution: 516 Solution:
408 Load Moxa callout driver fail, the callout device major number may 517 Load Moxa callout driver fail, the callout device major number may
409 conflict with other devices. Please refer to previous section 3.5 to 518 conflict with other devices. Please refer to previous section 3.7 to
410 change a free callout device major number for Moxa driver. 519 change a free callout device major number for Moxa driver.
520
521
411----------------------------------------------------------------------------- 522-----------------------------------------------------------------------------
523
diff --git a/Documentation/power/00-INDEX b/Documentation/power/00-INDEX
index a55d7f1c836d..fb742c213c9e 100644
--- a/Documentation/power/00-INDEX
+++ b/Documentation/power/00-INDEX
@@ -1,5 +1,7 @@
100-INDEX 100-INDEX
2 - This file 2 - This file
3apm-acpi.txt
4 - basic info about the APM and ACPI support.
3basic-pm-debugging.txt 5basic-pm-debugging.txt
4 - Debugging suspend and resume 6 - Debugging suspend and resume
5devices.txt 7devices.txt
@@ -14,8 +16,6 @@ notifiers.txt
14 - Registering suspend notifiers in device drivers 16 - Registering suspend notifiers in device drivers
15pci.txt 17pci.txt
16 - How the PCI Subsystem Does Power Management 18 - How the PCI Subsystem Does Power Management
17pm.txt
18 - info on Linux power management support.
19pm_qos_interface.txt 19pm_qos_interface.txt
20 - info on Linux PM Quality of Service interface 20 - info on Linux PM Quality of Service interface
21power_supply_class.txt 21power_supply_class.txt
diff --git a/Documentation/power/apm-acpi.txt b/Documentation/power/apm-acpi.txt
new file mode 100644
index 000000000000..1bd799dc17e8
--- /dev/null
+++ b/Documentation/power/apm-acpi.txt
@@ -0,0 +1,32 @@
1APM or ACPI?
2------------
3If you have a relatively recent x86 mobile, desktop, or server system,
4odds are it supports either Advanced Power Management (APM) or
5Advanced Configuration and Power Interface (ACPI). ACPI is the newer
6of the two technologies and puts power management in the hands of the
7operating system, allowing for more intelligent power management than
8is possible with BIOS controlled APM.
9
10The best way to determine which, if either, your system supports is to
11build a kernel with both ACPI and APM enabled (as of 2.3.x ACPI is
12enabled by default). If a working ACPI implementation is found, the
13ACPI driver will override and disable APM, otherwise the APM driver
14will be used.
15
16No, sorry, you cannot have both ACPI and APM enabled and running at
17once. Some people with broken ACPI or broken APM implementations
18would like to use both to get a full set of working features, but you
19simply cannot mix and match the two. Only one power management
20interface can be in control of the machine at once. Think about it..
21
22User-space Daemons
23------------------
24Both APM and ACPI rely on user-space daemons, apmd and acpid
25respectively, to be completely functional. Obtain both of these
26daemons from your Linux distribution or from the Internet (see below)
27and be sure that they are started sometime in the system boot process.
28Go ahead and start both. If ACPI or APM is not available on your
29system the associated daemon will exit gracefully.
30
31 apmd: http://worldvisions.ca/~apenwarr/apmd/
32 acpid: http://acpid.sf.net/
diff --git a/Documentation/power/pm.txt b/Documentation/power/pm.txt
deleted file mode 100644
index be841507e43f..000000000000
--- a/Documentation/power/pm.txt
+++ /dev/null
@@ -1,257 +0,0 @@
1 Linux Power Management Support
2
3This document briefly describes how to use power management with your
4Linux system and how to add power management support to Linux drivers.
5
6APM or ACPI?
7------------
8If you have a relatively recent x86 mobile, desktop, or server system,
9odds are it supports either Advanced Power Management (APM) or
10Advanced Configuration and Power Interface (ACPI). ACPI is the newer
11of the two technologies and puts power management in the hands of the
12operating system, allowing for more intelligent power management than
13is possible with BIOS controlled APM.
14
15The best way to determine which, if either, your system supports is to
16build a kernel with both ACPI and APM enabled (as of 2.3.x ACPI is
17enabled by default). If a working ACPI implementation is found, the
18ACPI driver will override and disable APM, otherwise the APM driver
19will be used.
20
21No, sorry, you cannot have both ACPI and APM enabled and running at
22once. Some people with broken ACPI or broken APM implementations
23would like to use both to get a full set of working features, but you
24simply cannot mix and match the two. Only one power management
25interface can be in control of the machine at once. Think about it..
26
27User-space Daemons
28------------------
29Both APM and ACPI rely on user-space daemons, apmd and acpid
30respectively, to be completely functional. Obtain both of these
31daemons from your Linux distribution or from the Internet (see below)
32and be sure that they are started sometime in the system boot process.
33Go ahead and start both. If ACPI or APM is not available on your
34system the associated daemon will exit gracefully.
35
36 apmd: http://worldvisions.ca/~apenwarr/apmd/
37 acpid: http://acpid.sf.net/
38
39Driver Interface -- OBSOLETE, DO NOT USE!
40----------------*************************
41
42Note: pm_register(), pm_access(), pm_dev_idle() and friends are
43obsolete. Please do not use them. Instead you should properly hook
44your driver into the driver model, and use its suspend()/resume()
45callbacks to do this kind of stuff.
46
47If you are writing a new driver or maintaining an old driver, it
48should include power management support. Without power management
49support, a single driver may prevent a system with power management
50capabilities from ever being able to suspend (safely).
51
52Overview:
531) Register each instance of a device with "pm_register"
542) Call "pm_access" before accessing the hardware.
55 (this will ensure that the hardware is awake and ready)
563) Your "pm_callback" is called before going into a
57 suspend state (ACPI D1-D3) or after resuming (ACPI D0)
58 from a suspend.
594) Call "pm_dev_idle" when the device is not being used
60 (optional but will improve device idle detection)
615) When unloaded, unregister the device with "pm_unregister"
62
63/*
64 * Description: Register a device with the power-management subsystem
65 *
66 * Parameters:
67 * type - device type (PCI device, system device, ...)
68 * id - instance number or unique identifier
69 * cback - request handler callback (suspend, resume, ...)
70 *
71 * Returns: Registered PM device or NULL on error
72 *
73 * Examples:
74 * dev = pm_register(PM_SYS_DEV, PM_SYS_VGA, vga_callback);
75 *
76 * struct pci_dev *pci_dev = pci_find_dev(...);
77 * dev = pm_register(PM_PCI_DEV, PM_PCI_ID(pci_dev), callback);
78 */
79struct pm_dev *pm_register(pm_dev_t type, unsigned long id, pm_callback cback);
80
81/*
82 * Description: Unregister a device with the power management subsystem
83 *
84 * Parameters:
85 * dev - PM device previously returned from pm_register
86 */
87void pm_unregister(struct pm_dev *dev);
88
89/*
90 * Description: Unregister all devices with a matching callback function
91 *
92 * Parameters:
93 * cback - previously registered request callback
94 *
95 * Notes: Provided for easier porting from old APM interface
96 */
97void pm_unregister_all(pm_callback cback);
98
99/*
100 * Power management request callback
101 *
102 * Parameters:
103 * dev - PM device previously returned from pm_register
104 * rqst - request type
105 * data - data, if any, associated with the request
106 *
107 * Returns: 0 if the request is successful
108 * EINVAL if the request is not supported
109 * EBUSY if the device is now busy and cannot handle the request
110 * ENOMEM if the device was unable to handle the request due to memory
111 *
112 * Details: The device request callback will be called before the
113 * device/system enters a suspend state (ACPI D1-D3) or
114 * or after the device/system resumes from suspend (ACPI D0).
115 * For PM_SUSPEND, the ACPI D-state being entered is passed
116 * as the "data" argument to the callback. The device
117 * driver should save (PM_SUSPEND) or restore (PM_RESUME)
118 * device context when the request callback is called.
119 *
120 * Once a driver returns 0 (success) from a suspend
121 * request, it should not process any further requests or
122 * access the device hardware until a call to "pm_access" is made.
123 */
124typedef int (*pm_callback)(struct pm_dev *dev, pm_request_t rqst, void *data);
125
126Driver Details
127--------------
128This is just a quick Q&A as a stopgap until a real driver writers'
129power management guide is available.
130
131Q: When is a device suspended?
132
133Devices can be suspended based on direct user request (eg. laptop lid
134closes), system power policy (eg. sleep after 30 minutes of console
135inactivity), or device power policy (eg. power down device after 5
136minutes of inactivity)
137
138Q: Must a driver honor a suspend request?
139
140No, a driver can return -EBUSY from a suspend request and this
141will stop the system from suspending. When a suspend request
142fails, all suspended devices are resumed and the system continues
143to run. Suspend can be retried at a later time.
144
145Q: Can the driver block suspend/resume requests?
146
147Yes, a driver can delay its return from a suspend or resume
148request until the device is ready to handle requests. It
149is advantageous to return as quickly as possible from a
150request as suspend/resume are done serially.
151
152Q: What context is a suspend/resume initiated from?
153
154A suspend or resume is initiated from a kernel thread context.
155It is safe to block, allocate memory, initiate requests
156or anything else you can do within the kernel.
157
158Q: Will requests continue to arrive after a suspend?
159
160Possibly. It is the driver's responsibility to queue(*),
161fail, or drop any requests that arrive after returning
162success to a suspend request. It is important that the
163driver not access its device until after it receives
164a resume request as the device's bus may no longer
165be active.
166
167(*) If a driver queues requests for processing after
168 resume be aware that the device, network, etc.
169 might be in a different state than at suspend time.
170 It's probably better to drop requests unless
171 the driver is a storage device.
172
173Q: Do I have to manage bus-specific power management registers
174
175No. It is the responsibility of the bus driver to manage
176PCI, USB, etc. power management registers. The bus driver
177or the power management subsystem will also enable any
178wake-on functionality that the device has.
179
180Q: So, really, what do I need to do to support suspend/resume?
181
182You need to save any device context that would
183be lost if the device was powered off and then restore
184it at resume time. When ACPI is active, there are
185three levels of device suspend states; D1, D2, and D3.
186(The suspend state is passed as the "data" argument
187to the device callback.) With D3, the device is powered
188off and loses all context, D1 and D2 are shallower power
189states and require less device context to be saved. To
190play it safe, just save everything at suspend and restore
191everything at resume.
192
193Q: Where do I store device context for suspend?
194
195Anywhere in memory, kmalloc a buffer or store it
196in the device descriptor. You are guaranteed that the
197contents of memory will be restored and accessible
198before resume, even when the system suspends to disk.
199
200Q: What do I need to do for ACPI vs. APM vs. etc?
201
202Drivers need not be aware of the specific power management
203technology that is active. They just need to be aware
204of when the overlying power management system requests
205that they suspend or resume.
206
207Q: What about device dependencies?
208
209When a driver registers a device, the power management
210subsystem uses the information provided to build a
211tree of device dependencies (eg. USB device X is on
212USB controller Y which is on PCI bus Z) When power
213management wants to suspend a device, it first sends
214a suspend request to its driver, then the bus driver,
215and so on up to the system bus. Device resumes
216proceed in the opposite direction.
217
218Q: Who do I contact for additional information about
219 enabling power management for my specific driver/device?
220
221ACPI Development mailing list: linux-acpi@vger.kernel.org
222
223System Interface -- OBSOLETE, DO NOT USE!
224----------------*************************
225If you are providing new power management support to Linux (ie.
226adding support for something like APM or ACPI), you should
227communicate with drivers through the existing generic power
228management interface.
229
230/*
231 * Send a request to all devices
232 *
233 * Parameters:
234 * rqst - request type
235 * data - data, if any, associated with the request
236 *
237 * Returns: 0 if the request is successful
238 * See "pm_callback" return for errors
239 *
240 * Details: Walk list of registered devices and call pm_send
241 * for each until complete or an error is encountered.
242 * If an error is encountered for a suspend request,
243 * return all devices to the state they were in before
244 * the suspend request.
245 */
246int pm_send_all(pm_request_t rqst, void *data);
247
248/*
249 * Find a matching device
250 *
251 * Parameters:
252 * type - device type (PCI device, system device, or 0 to match all devices)
253 * from - previous match or NULL to start from the beginning
254 *
255 * Returns: Matching device or NULL if none found
256 */
257struct pm_dev *pm_find(pm_dev_t type, struct pm_dev *from);
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index ea1b70b35793..99514ced82c5 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -59,6 +59,7 @@ Table of Contents
59 p) Freescale Synchronous Serial Interface 59 p) Freescale Synchronous Serial Interface
60 q) USB EHCI controllers 60 q) USB EHCI controllers
61 r) MDIO on GPIOs 61 r) MDIO on GPIOs
62 s) SPI busses
62 63
63 VII - Marvell Discovery mv64[345]6x System Controller chips 64 VII - Marvell Discovery mv64[345]6x System Controller chips
64 1) The /system-controller node 65 1) The /system-controller node
@@ -1883,6 +1884,62 @@ platforms are moved over to use the flattened-device-tree model.
1883 &qe_pio_c 6>; 1884 &qe_pio_c 6>;
1884 }; 1885 };
1885 1886
1887 s) SPI (Serial Peripheral Interface) busses
1888
1889 SPI busses can be described with a node for the SPI master device
1890 and a set of child nodes for each SPI slave on the bus. For this
1891 discussion, it is assumed that the system's SPI controller is in
1892 SPI master mode. This binding does not describe SPI controllers
1893 in slave mode.
1894
1895 The SPI master node requires the following properties:
1896 - #address-cells - number of cells required to define a chip select
1897 address on the SPI bus.
1898 - #size-cells - should be zero.
1899 - compatible - name of SPI bus controller following generic names
1900 recommended practice.
1901 No other properties are required in the SPI bus node. It is assumed
1902 that a driver for an SPI bus device will understand that it is an SPI bus.
1903 However, the binding does not attempt to define the specific method for
1904 assigning chip select numbers. Since SPI chip select configuration is
1905 flexible and non-standardized, it is left out of this binding with the
1906 assumption that board specific platform code will be used to manage
1907 chip selects. Individual drivers can define additional properties to
1908 support describing the chip select layout.
1909
1910 SPI slave nodes must be children of the SPI master node and can
1911 contain the following properties.
1912 - reg - (required) chip select address of device.
1913 - compatible - (required) name of SPI device following generic names
1914 recommended practice
1915 - spi-max-frequency - (required) Maximum SPI clocking speed of device in Hz
1916 - spi-cpol - (optional) Empty property indicating device requires
1917 inverse clock polarity (CPOL) mode
1918 - spi-cpha - (optional) Empty property indicating device requires
1919 shifted clock phase (CPHA) mode
1920
1921 SPI example for an MPC5200 SPI bus:
1922 spi@f00 {
1923 #address-cells = <1>;
1924 #size-cells = <0>;
1925 compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
1926 reg = <0xf00 0x20>;
1927 interrupts = <2 13 0 2 14 0>;
1928 interrupt-parent = <&mpc5200_pic>;
1929
1930 ethernet-switch@0 {
1931 compatible = "micrel,ks8995m";
1932 spi-max-frequency = <1000000>;
1933 reg = <0>;
1934 };
1935
1936 codec@1 {
1937 compatible = "ti,tlv320aic26";
1938 spi-max-frequency = <100000>;
1939 reg = <1>;
1940 };
1941 };
1942
1886VII - Marvell Discovery mv64[345]6x System Controller chips 1943VII - Marvell Discovery mv64[345]6x System Controller chips
1887=========================================================== 1944===========================================================
1888 1945
diff --git a/Documentation/unaligned-memory-access.txt b/Documentation/unaligned-memory-access.txt
index b0472ac5226a..f866c72291bf 100644
--- a/Documentation/unaligned-memory-access.txt
+++ b/Documentation/unaligned-memory-access.txt
@@ -218,9 +218,35 @@ If use of such macros is not convenient, another option is to use memcpy(),
218where the source or destination (or both) are of type u8* or unsigned char*. 218where the source or destination (or both) are of type u8* or unsigned char*.
219Due to the byte-wise nature of this operation, unaligned accesses are avoided. 219Due to the byte-wise nature of this operation, unaligned accesses are avoided.
220 220
221
222Alignment vs. Networking
223========================
224
225On architectures that require aligned loads, networking requires that the IP
226header is aligned on a four-byte boundary to optimise the IP stack. For
227regular ethernet hardware, the constant NET_IP_ALIGN is used. On most
228architectures this constant has the value 2 because the normal ethernet
229header is 14 bytes long, so in order to get proper alignment one needs to
230DMA to an address which can be expressed as 4*n + 2. One notable exception
231here is powerpc which defines NET_IP_ALIGN to 0 because DMA to unaligned
232addresses can be very expensive and dwarf the cost of unaligned loads.
233
234For some ethernet hardware that cannot DMA to unaligned addresses like
2354*n+2 or non-ethernet hardware, this can be a problem, and it is then
236required to copy the incoming frame into an aligned buffer. Because this is
237unnecessary on architectures that can do unaligned accesses, the code can be
238made dependent on CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS like so:
239
240#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
241 skb = original skb
242#else
243 skb = copy skb
244#endif
245
221-- 246--
222Author: Daniel Drake <dsd@gentoo.org> 247Authors: Daniel Drake <dsd@gentoo.org>,
248 Johannes Berg <johannes@sipsolutions.net>
223With help from: Alan Cox, Avuton Olrich, Heikki Orsila, Jan Engelhardt, 249With help from: Alan Cox, Avuton Olrich, Heikki Orsila, Jan Engelhardt,
224Johannes Berg, Kyle McMartin, Kyle Moffett, Randy Dunlap, Robert Hancock, 250Kyle McMartin, Kyle Moffett, Randy Dunlap, Robert Hancock, Uli Kunitz,
225Uli Kunitz, Vadim Lobanov 251Vadim Lobanov
226 252
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index 3102b81bef88..8a5b5763f0fe 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -95,6 +95,29 @@ this condition holds, however, no more surplus huge pages will be
95allowed on the system until one of the two sysctls are increased 95allowed on the system until one of the two sysctls are increased
96sufficiently, or the surplus huge pages go out of use and are freed. 96sufficiently, or the surplus huge pages go out of use and are freed.
97 97
98With support for multiple hugepage pools at run-time available, much of
99the hugepage userspace interface has been duplicated in sysfs. The above
100information applies to the default hugepage size (which will be
101controlled by the proc interfaces for backwards compatibility). The root
102hugepage control directory is
103
104 /sys/kernel/mm/hugepages
105
106For each hugepage size supported by the running kernel, a subdirectory
107will exist, of the form
108
109 hugepages-${size}kB
110
111Inside each of these directories, the same set of files will exist:
112
113 nr_hugepages
114 nr_overcommit_hugepages
115 free_hugepages
116 resv_hugepages
117 surplus_hugepages
118
119which function as described above for the default hugepage-sized case.
120
98If the user applications are going to request hugepages using mmap system 121If the user applications are going to request hugepages using mmap system
99call, then it is required that system administrator mount a file system of 122call, then it is required that system administrator mount a file system of
100type hugetlbfs: 123type hugetlbfs:
diff --git a/MAINTAINERS b/MAINTAINERS
index 0652ab384d51..4cbf6016a9b9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -441,10 +441,7 @@ M: spyro@f2s.com
441S: Maintained 441S: Maintained
442 442
443ARM PRIMECELL MMCI PL180/1 DRIVER 443ARM PRIMECELL MMCI PL180/1 DRIVER
444P: Russell King 444S: Orphan
445M: rmk@arm.linux.org.uk
446L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
447S: Maintained
448 445
449ARM/ADI ROADRUNNER MACHINE SUPPORT 446ARM/ADI ROADRUNNER MACHINE SUPPORT
450P: Lennert Buytenhek 447P: Lennert Buytenhek
@@ -483,11 +480,28 @@ M: kernel@wantstofly.org
483L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) 480L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
484S: Maintained 481S: Maintained
485 482
483ARM/COMPULAB CM-X270/EM-X270 MACHINE SUPPORT
484P: Mike Rapoport
485M: mike@compulab.co.il
486L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
487S: Maintained
488
486ARM/CORGI MACHINE SUPPORT 489ARM/CORGI MACHINE SUPPORT
487P: Richard Purdie 490P: Richard Purdie
488M: rpurdie@rpsys.net 491M: rpurdie@rpsys.net
489S: Maintained 492S: Maintained
490 493
494ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6)
495P: Daniel Ribeiro
496M: drwyrm@gmail.com
497P: Stefan Schmidt
498M: stefan@openezx.org
499P: Harald Welte
500M: laforge@openezx.org
501L: openezx-devel@lists.openezx.org (subscribers-only)
502W: http://www.openezx.org/
503S: Maintained
504
491ARM/GLOMATION GESBC9312SX MACHINE SUPPORT 505ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
492P: Lennert Buytenhek 506P: Lennert Buytenhek
493M: kernel@wantstofly.org 507M: kernel@wantstofly.org
@@ -575,10 +589,18 @@ L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
575S: Maintained 589S: Maintained
576 590
577ARM/TOSA MACHINE SUPPORT 591ARM/TOSA MACHINE SUPPORT
592P: Dmitry Baryshkov
593M: dbaryshkov@gmail.com
578P: Dirk Opfer 594P: Dirk Opfer
579M: dirk@opfer-online.de 595M: dirk@opfer-online.de
580S: Maintained 596S: Maintained
581 597
598ARM/PALMTX SUPPORT
599P: Marek Vasut
600M: marek.vasut@gmail.com
601W: http://hackndev.com
602S: Maintained
603
582ARM/PLEB SUPPORT 604ARM/PLEB SUPPORT
583P: Peter Chubb 605P: Peter Chubb
584M: pleb@gelato.unsw.edu.au 606M: pleb@gelato.unsw.edu.au
@@ -1021,6 +1043,12 @@ M: fujita.tomonori@lab.ntt.co.jp
1021L: linux-scsi@vger.kernel.org 1043L: linux-scsi@vger.kernel.org
1022S: Supported 1044S: Supported
1023 1045
1046BT8XXGPIO DRIVER
1047P: Michael Buesch
1048M: mb@bu3sch.de
1049W: http://bu3sch.de/btgpio.php
1050S: Maintained
1051
1024BTTV VIDEO4LINUX DRIVER 1052BTTV VIDEO4LINUX DRIVER
1025P: Mauro Carvalho Chehab 1053P: Mauro Carvalho Chehab
1026M: mchehab@infradead.org 1054M: mchehab@infradead.org
@@ -1962,7 +1990,7 @@ P: Carlos Corbacho
1962M: carlos@strangeworlds.co.uk 1990M: carlos@strangeworlds.co.uk
1963S: Odd Fixes 1991S: Odd Fixes
1964 1992
1965HPET: High Precision Event Timers driver (hpet.c) 1993HPET: High Precision Event Timers driver (drivers/char/hpet.c)
1966P: Clemens Ladisch 1994P: Clemens Ladisch
1967M: clemens@ladisch.de 1995M: clemens@ladisch.de
1968S: Maintained 1996S: Maintained
@@ -2915,8 +2943,6 @@ P: Faisal Latif
2915M: flatif@neteffect.com 2943M: flatif@neteffect.com
2916P: Chien Tung 2944P: Chien Tung
2917M: ctung@neteffect.com 2945M: ctung@neteffect.com
2918P: Glenn Streiff
2919M: gstreiff@neteffect.com
2920L: general@lists.openfabrics.org 2946L: general@lists.openfabrics.org
2921W: http://www.neteffect.com 2947W: http://www.neteffect.com
2922S: Supported 2948S: Supported
@@ -4058,12 +4084,6 @@ W: http://www.prosec.rub.de/tpm/
4058L: tpmdd-devel@lists.sourceforge.net 4084L: tpmdd-devel@lists.sourceforge.net
4059S: Maintained 4085S: Maintained
4060 4086
4061TRIDENT 4DWAVE/SIS 7018 PCI AUDIO CORE
4062P: Muli Ben-Yehuda
4063M: mulix@mulix.org
4064L: linux-kernel@vger.kernel.org
4065S: Maintained
4066
4067TRIVIAL PATCHES 4087TRIVIAL PATCHES
4068P: Jesper Juhl 4088P: Jesper Juhl
4069M: trivial@kernel.org 4089M: trivial@kernel.org
@@ -4109,9 +4129,6 @@ W: http://www.uclinux.org/
4109L: uclinux-dev@uclinux.org (subscribers-only) 4129L: uclinux-dev@uclinux.org (subscribers-only)
4110S: Maintained 4130S: Maintained
4111 4131
4112UCLINUX FOR NEC V850
4113P: Miles Bader
4114
4115UCLINUX FOR RENESAS H8/300 4132UCLINUX FOR RENESAS H8/300
4116P: Yoshinori Sato 4133P: Yoshinori Sato
4117M: ysato@users.sourceforge.jp 4134M: ysato@users.sourceforge.jp
diff --git a/Makefile b/Makefile
index 4bcd1cf90cb1..3cad7db5eba7 100644
--- a/Makefile
+++ b/Makefile
@@ -1061,6 +1061,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
1061 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order 1061 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
1062 @echo ' Building modules, stage 2.'; 1062 @echo ' Building modules, stage 2.';
1063 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost 1063 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
1064 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modbuild
1064 1065
1065 1066
1066# Target to prepare building external modules 1067# Target to prepare building external modules
diff --git a/arch/Kconfig b/arch/Kconfig
index ad89a33d8c6e..b0fabfa864ff 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -27,10 +27,32 @@ config KPROBES
27 for kernel debugging, non-intrusive instrumentation and testing. 27 for kernel debugging, non-intrusive instrumentation and testing.
28 If in doubt, say "N". 28 If in doubt, say "N".
29 29
30config HAVE_EFFICIENT_UNALIGNED_ACCESS
31 def_bool n
32 help
33 Some architectures are unable to perform unaligned accesses
34 without the use of get_unaligned/put_unaligned. Others are
35 unable to perform such accesses efficiently (e.g. trap on
36 unaligned access and require fixing it up in the exception
37 handler.)
38
39 This symbol should be selected by an architecture if it can
40 perform unaligned accesses efficiently to allow different
41 code paths to be selected for these cases. Some network
42 drivers, for example, could opt to not fix up alignment
43 problems with received packets if doing so would not help
44 much.
45
46 See Documentation/unaligned-memory-access.txt for more
47 information on the topic of unaligned memory accesses.
48
30config KRETPROBES 49config KRETPROBES
31 def_bool y 50 def_bool y
32 depends on KPROBES && HAVE_KRETPROBES 51 depends on KPROBES && HAVE_KRETPROBES
33 52
53config HAVE_IOREMAP_PROT
54 def_bool n
55
34config HAVE_KPROBES 56config HAVE_KPROBES
35 def_bool n 57 def_bool n
36 58
@@ -42,3 +64,10 @@ config HAVE_DMA_ATTRS
42 64
43config USE_GENERIC_SMP_HELPERS 65config USE_GENERIC_SMP_HELPERS
44 def_bool n 66 def_bool n
67
68config HAVE_CLK
69 def_bool n
70 help
71 The <linux/clk.h> calls support software clock gating and
72 thus are a key power management tool on many systems.
73
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index dbe8c280fea9..1bec55d63ef6 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -333,11 +333,6 @@ config PCI_SYSCALL
333config IOMMU_HELPER 333config IOMMU_HELPER
334 def_bool PCI 334 def_bool PCI
335 335
336config ALPHA_CORE_AGP
337 bool
338 depends on ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL
339 default y
340
341config ALPHA_NONAME 336config ALPHA_NONAME
342 bool 337 bool
343 depends on ALPHA_BOOK1 || ALPHA_NONAME_CH 338 depends on ALPHA_BOOK1 || ALPHA_NONAME_CH
diff --git a/arch/alpha/boot/misc.c b/arch/alpha/boot/misc.c
index c00646b25f6e..3047a1b3a517 100644
--- a/arch/alpha/boot/misc.c
+++ b/arch/alpha/boot/misc.c
@@ -78,8 +78,6 @@ static unsigned outcnt; /* bytes in output buffer */
78static int fill_inbuf(void); 78static int fill_inbuf(void);
79static void flush_window(void); 79static void flush_window(void);
80static void error(char *m); 80static void error(char *m);
81static void gzip_mark(void **);
82static void gzip_release(void **);
83 81
84static char *input_data; 82static char *input_data;
85static int input_data_size; 83static int input_data_size;
@@ -88,51 +86,18 @@ static uch *output_data;
88static ulg output_ptr; 86static ulg output_ptr;
89static ulg bytes_out; 87static ulg bytes_out;
90 88
91static void *malloc(int size);
92static void free(void *where);
93static void error(char *m); 89static void error(char *m);
94static void gzip_mark(void **); 90static void gzip_mark(void **);
95static void gzip_release(void **); 91static void gzip_release(void **);
96 92
97extern int end; 93extern int end;
98static ulg free_mem_ptr; 94static ulg free_mem_ptr;
99static ulg free_mem_ptr_end; 95static ulg free_mem_end_ptr;
100 96
101#define HEAP_SIZE 0x3000 97#define HEAP_SIZE 0x3000
102 98
103#include "../../../lib/inflate.c" 99#include "../../../lib/inflate.c"
104 100
105static void *malloc(int size)
106{
107 void *p;
108
109 if (size <0) error("Malloc error");
110 if (free_mem_ptr <= 0) error("Memory error");
111
112 free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
113
114 p = (void *)free_mem_ptr;
115 free_mem_ptr += size;
116
117 if (free_mem_ptr >= free_mem_ptr_end)
118 error("Out of memory");
119 return p;
120}
121
122static void free(void *where)
123{ /* gzip_mark & gzip_release do the free */
124}
125
126static void gzip_mark(void **ptr)
127{
128 *ptr = (void *) free_mem_ptr;
129}
130
131static void gzip_release(void **ptr)
132{
133 free_mem_ptr = (long) *ptr;
134}
135
136/* =========================================================================== 101/* ===========================================================================
137 * Fill the input buffer. This is called only when the buffer is empty 102 * Fill the input buffer. This is called only when the buffer is empty
138 * and at least one byte is really needed. 103 * and at least one byte is really needed.
@@ -193,7 +158,7 @@ decompress_kernel(void *output_start,
193 158
194 /* FIXME FIXME FIXME */ 159 /* FIXME FIXME FIXME */
195 free_mem_ptr = (ulg)output_start + ksize; 160 free_mem_ptr = (ulg)output_start + ksize;
196 free_mem_ptr_end = (ulg)output_start + ksize + 0x200000; 161 free_mem_end_ptr = (ulg)output_start + ksize + 0x200000;
197 /* FIXME FIXME FIXME */ 162 /* FIXME FIXME FIXME */
198 163
199 /* put in temp area to reduce initial footprint */ 164 /* put in temp area to reduce initial footprint */
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index 10ab7833e83c..d8c4ceaf00b9 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -19,7 +19,6 @@
19#include <asm/pgalloc.h> 19#include <asm/pgalloc.h>
20 20
21pg_data_t node_data[MAX_NUMNODES]; 21pg_data_t node_data[MAX_NUMNODES];
22bootmem_data_t node_bdata[MAX_NUMNODES];
23EXPORT_SYMBOL(node_data); 22EXPORT_SYMBOL(node_data);
24 23
25#undef DEBUG_DISCONTIG 24#undef DEBUG_DISCONTIG
@@ -141,7 +140,7 @@ setup_memory_node(int nid, void *kernel_end)
141 printk(" not enough mem to reserve NODE_DATA"); 140 printk(" not enough mem to reserve NODE_DATA");
142 return; 141 return;
143 } 142 }
144 NODE_DATA(nid)->bdata = &node_bdata[nid]; 143 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
145 144
146 printk(" Detected node memory: start %8lu, end %8lu\n", 145 printk(" Detected node memory: start %8lu, end %8lu\n",
147 node_min_pfn, node_max_pfn); 146 node_min_pfn, node_max_pfn);
@@ -304,8 +303,9 @@ void __init paging_init(void)
304 dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; 303 dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
305 304
306 for_each_online_node(nid) { 305 for_each_online_node(nid) {
307 unsigned long start_pfn = node_bdata[nid].node_boot_start >> PAGE_SHIFT; 306 bootmem_data_t *bdata = &bootmem_node_data[nid];
308 unsigned long end_pfn = node_bdata[nid].node_low_pfn; 307 unsigned long start_pfn = bdata->node_min_pfn;
308 unsigned long end_pfn = bdata->node_low_pfn;
309 309
310 if (dma_local_pfn >= end_pfn - start_pfn) 310 if (dma_local_pfn >= end_pfn - start_pfn)
311 zones_size[ZONE_DMA] = end_pfn - start_pfn; 311 zones_size[ZONE_DMA] = end_pfn - start_pfn;
@@ -313,7 +313,7 @@ void __init paging_init(void)
313 zones_size[ZONE_DMA] = dma_local_pfn; 313 zones_size[ZONE_DMA] = dma_local_pfn;
314 zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn; 314 zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn;
315 } 315 }
316 free_area_init_node(nid, NODE_DATA(nid), zones_size, start_pfn, NULL); 316 free_area_init_node(nid, zones_size, start_pfn, NULL);
317 } 317 }
318 318
319 /* Initialize the kernel's ZERO_PGE. */ 319 /* Initialize the kernel's ZERO_PGE. */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c7ad324ddf2c..dabb015aa40b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -12,6 +12,7 @@ config ARM
12 select RTC_LIB 12 select RTC_LIB
13 select SYS_SUPPORTS_APM_EMULATION 13 select SYS_SUPPORTS_APM_EMULATION
14 select HAVE_OPROFILE 14 select HAVE_OPROFILE
15 select HAVE_ARCH_KGDB
15 select HAVE_KPROBES if (!XIP_KERNEL) 16 select HAVE_KPROBES if (!XIP_KERNEL)
16 select HAVE_KRETPROBES if (HAVE_KPROBES) 17 select HAVE_KRETPROBES if (HAVE_KPROBES)
17 select HAVE_FTRACE if (!XIP_KERNEL) 18 select HAVE_FTRACE if (!XIP_KERNEL)
@@ -197,12 +198,14 @@ choice
197config ARCH_AAEC2000 198config ARCH_AAEC2000
198 bool "Agilent AAEC-2000 based" 199 bool "Agilent AAEC-2000 based"
199 select ARM_AMBA 200 select ARM_AMBA
201 select HAVE_CLK
200 help 202 help
201 This enables support for systems based on the Agilent AAEC-2000 203 This enables support for systems based on the Agilent AAEC-2000
202 204
203config ARCH_INTEGRATOR 205config ARCH_INTEGRATOR
204 bool "ARM Ltd. Integrator family" 206 bool "ARM Ltd. Integrator family"
205 select ARM_AMBA 207 select ARM_AMBA
208 select HAVE_CLK
206 select ICST525 209 select ICST525
207 help 210 help
208 Support for ARM's Integrator platform. 211 Support for ARM's Integrator platform.
@@ -210,6 +213,7 @@ config ARCH_INTEGRATOR
210config ARCH_REALVIEW 213config ARCH_REALVIEW
211 bool "ARM Ltd. RealView family" 214 bool "ARM Ltd. RealView family"
212 select ARM_AMBA 215 select ARM_AMBA
216 select HAVE_CLK
213 select ICST307 217 select ICST307
214 select GENERIC_TIME 218 select GENERIC_TIME
215 select GENERIC_CLOCKEVENTS 219 select GENERIC_CLOCKEVENTS
@@ -220,6 +224,7 @@ config ARCH_VERSATILE
220 bool "ARM Ltd. Versatile family" 224 bool "ARM Ltd. Versatile family"
221 select ARM_AMBA 225 select ARM_AMBA
222 select ARM_VIC 226 select ARM_VIC
227 select HAVE_CLK
223 select ICST307 228 select ICST307
224 select GENERIC_TIME 229 select GENERIC_TIME
225 select GENERIC_CLOCKEVENTS 230 select GENERIC_CLOCKEVENTS
@@ -261,7 +266,9 @@ config ARCH_EP93XX
261 select ARM_AMBA 266 select ARM_AMBA
262 select ARM_VIC 267 select ARM_VIC
263 select GENERIC_GPIO 268 select GENERIC_GPIO
264 select HAVE_GPIO_LIB 269 select HAVE_CLK
270 select HAVE_CLK
271 select ARCH_REQUIRE_GPIOLIB
265 help 272 help
266 This enables support for the Cirrus EP93xx series of CPUs. 273 This enables support for the Cirrus EP93xx series of CPUs.
267 274
@@ -380,6 +387,7 @@ config ARCH_NS9XXX
380 select GENERIC_GPIO 387 select GENERIC_GPIO
381 select GENERIC_TIME 388 select GENERIC_TIME
382 select GENERIC_CLOCKEVENTS 389 select GENERIC_CLOCKEVENTS
390 select HAVE_CLK
383 help 391 help
384 Say Y here if you intend to run this kernel on a NetSilicon NS9xxx 392 Say Y here if you intend to run this kernel on a NetSilicon NS9xxx
385 System. 393 System.
@@ -429,6 +437,7 @@ config ARCH_ORION5X
429 437
430config ARCH_PNX4008 438config ARCH_PNX4008
431 bool "Philips Nexperia PNX4008 Mobile" 439 bool "Philips Nexperia PNX4008 Mobile"
440 select HAVE_CLK
432 help 441 help
433 This enables support for Philips PNX4008 mobile platform. 442 This enables support for Philips PNX4008 mobile platform.
434 443
@@ -437,7 +446,8 @@ config ARCH_PXA
437 depends on MMU 446 depends on MMU
438 select ARCH_MTD_XIP 447 select ARCH_MTD_XIP
439 select GENERIC_GPIO 448 select GENERIC_GPIO
440 select HAVE_GPIO_LIB 449 select HAVE_CLK
450 select ARCH_REQUIRE_GPIOLIB
441 select GENERIC_TIME 451 select GENERIC_TIME
442 select GENERIC_CLOCKEVENTS 452 select GENERIC_CLOCKEVENTS
443 select TICK_ONESHOT 453 select TICK_ONESHOT
@@ -467,14 +477,16 @@ config ARCH_SA1100
467 select GENERIC_GPIO 477 select GENERIC_GPIO
468 select GENERIC_TIME 478 select GENERIC_TIME
469 select GENERIC_CLOCKEVENTS 479 select GENERIC_CLOCKEVENTS
480 select HAVE_CLK
470 select TICK_ONESHOT 481 select TICK_ONESHOT
471 select HAVE_GPIO_LIB 482 select ARCH_REQUIRE_GPIOLIB
472 help 483 help
473 Support for StrongARM 11x0 based boards. 484 Support for StrongARM 11x0 based boards.
474 485
475config ARCH_S3C2410 486config ARCH_S3C2410
476 bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442, S3C2443" 487 bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442, S3C2443"
477 select GENERIC_GPIO 488 select GENERIC_GPIO
489 select HAVE_CLK
478 help 490 help
479 Samsung S3C2410X CPU based systems, such as the Simtec Electronics 491 Samsung S3C2410X CPU based systems, such as the Simtec Electronics
480 BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or 492 BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or
@@ -502,13 +514,15 @@ config ARCH_DAVINCI
502 select GENERIC_TIME 514 select GENERIC_TIME
503 select GENERIC_CLOCKEVENTS 515 select GENERIC_CLOCKEVENTS
504 select GENERIC_GPIO 516 select GENERIC_GPIO
517 select HAVE_CLK
505 help 518 help
506 Support for TI's DaVinci platform. 519 Support for TI's DaVinci platform.
507 520
508config ARCH_OMAP 521config ARCH_OMAP
509 bool "TI OMAP" 522 bool "TI OMAP"
510 select GENERIC_GPIO 523 select GENERIC_GPIO
511 select HAVE_GPIO_LIB 524 select HAVE_CLK
525 select ARCH_REQUIRE_GPIOLIB
512 select GENERIC_TIME 526 select GENERIC_TIME
513 select GENERIC_CLOCKEVENTS 527 select GENERIC_CLOCKEVENTS
514 help 528 help
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c
index 9b444022cb9b..7145cc7c04f0 100644
--- a/arch/arm/boot/compressed/misc.c
+++ b/arch/arm/boot/compressed/misc.c
@@ -217,8 +217,6 @@ static unsigned outcnt; /* bytes in output buffer */
217static int fill_inbuf(void); 217static int fill_inbuf(void);
218static void flush_window(void); 218static void flush_window(void);
219static void error(char *m); 219static void error(char *m);
220static void gzip_mark(void **);
221static void gzip_release(void **);
222 220
223extern char input_data[]; 221extern char input_data[];
224extern char input_data_end[]; 222extern char input_data_end[];
@@ -227,64 +225,21 @@ static uch *output_data;
227static ulg output_ptr; 225static ulg output_ptr;
228static ulg bytes_out; 226static ulg bytes_out;
229 227
230static void *malloc(int size);
231static void free(void *where);
232static void error(char *m); 228static void error(char *m);
233static void gzip_mark(void **);
234static void gzip_release(void **);
235 229
236static void putstr(const char *); 230static void putstr(const char *);
237 231
238extern int end; 232extern int end;
239static ulg free_mem_ptr; 233static ulg free_mem_ptr;
240static ulg free_mem_ptr_end; 234static ulg free_mem_end_ptr;
241 235
242#define HEAP_SIZE 0x3000 236#ifdef STANDALONE_DEBUG
243 237#define NO_INFLATE_MALLOC
244#include "../../../../lib/inflate.c" 238#endif
245
246#ifndef STANDALONE_DEBUG
247static void *malloc(int size)
248{
249 void *p;
250
251 if (size <0) error("Malloc error");
252 if (free_mem_ptr <= 0) error("Memory error");
253
254 free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
255
256 p = (void *)free_mem_ptr;
257 free_mem_ptr += size;
258
259 if (free_mem_ptr >= free_mem_ptr_end)
260 error("Out of memory");
261 return p;
262}
263
264static void free(void *where)
265{ /* gzip_mark & gzip_release do the free */
266}
267
268static void gzip_mark(void **ptr)
269{
270 arch_decomp_wdog();
271 *ptr = (void *) free_mem_ptr;
272}
273 239
274static void gzip_release(void **ptr) 240#define ARCH_HAS_DECOMP_WDOG
275{
276 arch_decomp_wdog();
277 free_mem_ptr = (long) *ptr;
278}
279#else
280static void gzip_mark(void **ptr)
281{
282}
283 241
284static void gzip_release(void **ptr) 242#include "../../../../lib/inflate.c"
285{
286}
287#endif
288 243
289/* =========================================================================== 244/* ===========================================================================
290 * Fill the input buffer. This is called only when the buffer is empty 245 * Fill the input buffer. This is called only when the buffer is empty
@@ -348,7 +303,7 @@ decompress_kernel(ulg output_start, ulg free_mem_ptr_p, ulg free_mem_ptr_end_p,
348{ 303{
349 output_data = (uch *)output_start; /* Points to kernel start */ 304 output_data = (uch *)output_start; /* Points to kernel start */
350 free_mem_ptr = free_mem_ptr_p; 305 free_mem_ptr = free_mem_ptr_p;
351 free_mem_ptr_end = free_mem_ptr_end_p; 306 free_mem_end_ptr = free_mem_ptr_end_p;
352 __machine_arch_type = arch_id; 307 __machine_arch_type = arch_id;
353 308
354 arch_decomp_setup(); 309 arch_decomp_setup();
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index c8e8f0ea59e1..0a8e1ff2af8a 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -627,7 +627,7 @@ __sa1111_probe(struct device *me, struct resource *mem, int irq)
627 if (!sachip) 627 if (!sachip)
628 return -ENOMEM; 628 return -ENOMEM;
629 629
630 sachip->clk = clk_get(me, "GPIO27_CLK"); 630 sachip->clk = clk_get(me, "SA1111_CLK");
631 if (!sachip->clk) { 631 if (!sachip->clk) {
632 ret = PTR_ERR(sachip->clk); 632 ret = PTR_ERR(sachip->clk);
633 goto err_free; 633 goto err_free;
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
new file mode 100644
index 000000000000..2a84d557adc2
--- /dev/null
+++ b/arch/arm/configs/ezx_defconfig
@@ -0,0 +1,1614 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc3
4# Mon Jul 7 17:52:21 2008
5#
6CONFIG_ARM=y
7CONFIG_HAVE_PWM=y
8CONFIG_SYS_SUPPORTS_APM_EMULATION=y
9CONFIG_GENERIC_GPIO=y
10CONFIG_GENERIC_TIME=y
11CONFIG_GENERIC_CLOCKEVENTS=y
12CONFIG_MMU=y
13# CONFIG_NO_IOPORT is not set
14CONFIG_GENERIC_HARDIRQS=y
15CONFIG_STACKTRACE_SUPPORT=y
16CONFIG_LOCKDEP_SUPPORT=y
17CONFIG_TRACE_IRQFLAGS_SUPPORT=y
18CONFIG_HARDIRQS_SW_RESEND=y
19CONFIG_GENERIC_IRQ_PROBE=y
20CONFIG_RWSEM_GENERIC_SPINLOCK=y
21# CONFIG_ARCH_HAS_ILOG2_U32 is not set
22# CONFIG_ARCH_HAS_ILOG2_U64 is not set
23CONFIG_GENERIC_HWEIGHT=y
24CONFIG_GENERIC_CALIBRATE_DELAY=y
25CONFIG_ARCH_SUPPORTS_AOUT=y
26CONFIG_ZONE_DMA=y
27CONFIG_ARCH_MTD_XIP=y
28CONFIG_VECTORS_BASE=0xffff0000
29CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
30
31#
32# General setup
33#
34CONFIG_EXPERIMENTAL=y
35CONFIG_BROKEN_ON_SMP=y
36CONFIG_LOCK_KERNEL=y
37CONFIG_INIT_ENV_ARG_LIMIT=32
38CONFIG_LOCALVERSION="-ezxdev"
39# CONFIG_LOCALVERSION_AUTO is not set
40CONFIG_SWAP=y
41CONFIG_SYSVIPC=y
42CONFIG_SYSVIPC_SYSCTL=y
43# CONFIG_POSIX_MQUEUE is not set
44# CONFIG_BSD_PROCESS_ACCT is not set
45# CONFIG_TASKSTATS is not set
46# CONFIG_AUDIT is not set
47CONFIG_IKCONFIG=y
48CONFIG_IKCONFIG_PROC=y
49CONFIG_LOG_BUF_SHIFT=14
50# CONFIG_CGROUPS is not set
51CONFIG_GROUP_SCHED=y
52CONFIG_FAIR_GROUP_SCHED=y
53# CONFIG_RT_GROUP_SCHED is not set
54CONFIG_USER_SCHED=y
55# CONFIG_CGROUP_SCHED is not set
56CONFIG_SYSFS_DEPRECATED=y
57CONFIG_SYSFS_DEPRECATED_V2=y
58# CONFIG_RELAY is not set
59# CONFIG_NAMESPACES is not set
60# CONFIG_BLK_DEV_INITRD is not set
61CONFIG_CC_OPTIMIZE_FOR_SIZE=y
62CONFIG_SYSCTL=y
63CONFIG_EMBEDDED=y
64CONFIG_UID16=y
65CONFIG_SYSCTL_SYSCALL=y
66CONFIG_SYSCTL_SYSCALL_CHECK=y
67CONFIG_KALLSYMS=y
68# CONFIG_KALLSYMS_EXTRA_PASS is not set
69CONFIG_HOTPLUG=y
70CONFIG_PRINTK=y
71CONFIG_BUG=y
72CONFIG_ELF_CORE=y
73# CONFIG_COMPAT_BRK is not set
74CONFIG_BASE_FULL=y
75CONFIG_FUTEX=y
76CONFIG_ANON_INODES=y
77CONFIG_EPOLL=y
78CONFIG_SIGNALFD=y
79CONFIG_TIMERFD=y
80CONFIG_EVENTFD=y
81CONFIG_SHMEM=y
82CONFIG_VM_EVENT_COUNTERS=y
83CONFIG_SLAB=y
84# CONFIG_SLUB is not set
85# CONFIG_SLOB is not set
86# CONFIG_PROFILING is not set
87# CONFIG_MARKERS is not set
88CONFIG_HAVE_OPROFILE=y
89# CONFIG_KPROBES is not set
90CONFIG_HAVE_KPROBES=y
91CONFIG_HAVE_KRETPROBES=y
92# CONFIG_HAVE_DMA_ATTRS is not set
93CONFIG_PROC_PAGE_MONITOR=y
94CONFIG_SLABINFO=y
95CONFIG_RT_MUTEXES=y
96# CONFIG_TINY_SHMEM is not set
97CONFIG_BASE_SMALL=0
98CONFIG_MODULES=y
99# CONFIG_MODULE_FORCE_LOAD is not set
100CONFIG_MODULE_UNLOAD=y
101CONFIG_MODULE_FORCE_UNLOAD=y
102CONFIG_MODVERSIONS=y
103# CONFIG_MODULE_SRCVERSION_ALL is not set
104CONFIG_KMOD=y
105CONFIG_BLOCK=y
106# CONFIG_LBD is not set
107# CONFIG_BLK_DEV_IO_TRACE is not set
108# CONFIG_LSF is not set
109# CONFIG_BLK_DEV_BSG is not set
110
111#
112# IO Schedulers
113#
114CONFIG_IOSCHED_NOOP=y
115# CONFIG_IOSCHED_AS is not set
116CONFIG_IOSCHED_DEADLINE=y
117# CONFIG_IOSCHED_CFQ is not set
118# CONFIG_DEFAULT_AS is not set
119CONFIG_DEFAULT_DEADLINE=y
120# CONFIG_DEFAULT_CFQ is not set
121# CONFIG_DEFAULT_NOOP is not set
122CONFIG_DEFAULT_IOSCHED="deadline"
123CONFIG_CLASSIC_RCU=y
124
125#
126# System Type
127#
128# CONFIG_ARCH_AAEC2000 is not set
129# CONFIG_ARCH_INTEGRATOR is not set
130# CONFIG_ARCH_REALVIEW is not set
131# CONFIG_ARCH_VERSATILE is not set
132# CONFIG_ARCH_AT91 is not set
133# CONFIG_ARCH_CLPS7500 is not set
134# CONFIG_ARCH_CLPS711X is not set
135# CONFIG_ARCH_CO285 is not set
136# CONFIG_ARCH_EBSA110 is not set
137# CONFIG_ARCH_EP93XX is not set
138# CONFIG_ARCH_FOOTBRIDGE is not set
139# CONFIG_ARCH_NETX is not set
140# CONFIG_ARCH_H720X is not set
141# CONFIG_ARCH_IMX is not set
142# CONFIG_ARCH_IOP13XX is not set
143# CONFIG_ARCH_IOP32X is not set
144# CONFIG_ARCH_IOP33X is not set
145# CONFIG_ARCH_IXP23XX is not set
146# CONFIG_ARCH_IXP2000 is not set
147# CONFIG_ARCH_IXP4XX is not set
148# CONFIG_ARCH_L7200 is not set
149# CONFIG_ARCH_KS8695 is not set
150# CONFIG_ARCH_NS9XXX is not set
151# CONFIG_ARCH_MXC is not set
152# CONFIG_ARCH_ORION5X is not set
153# CONFIG_ARCH_PNX4008 is not set
154CONFIG_ARCH_PXA=y
155# CONFIG_ARCH_RPC is not set
156# CONFIG_ARCH_SA1100 is not set
157# CONFIG_ARCH_S3C2410 is not set
158# CONFIG_ARCH_SHARK is not set
159# CONFIG_ARCH_LH7A40X is not set
160# CONFIG_ARCH_DAVINCI is not set
161# CONFIG_ARCH_OMAP is not set
162# CONFIG_ARCH_MSM7X00A is not set
163
164#
165# Intel PXA2xx/PXA3xx Implementations
166#
167# CONFIG_ARCH_GUMSTIX is not set
168# CONFIG_ARCH_LUBBOCK is not set
169# CONFIG_MACH_LOGICPD_PXA270 is not set
170# CONFIG_MACH_MAINSTONE is not set
171# CONFIG_ARCH_PXA_IDP is not set
172# CONFIG_PXA_SHARPSL is not set
173# CONFIG_ARCH_PXA_ESERIES is not set
174# CONFIG_MACH_TRIZEPS4 is not set
175# CONFIG_MACH_EM_X270 is not set
176# CONFIG_MACH_COLIBRI is not set
177# CONFIG_MACH_ZYLONITE is not set
178# CONFIG_MACH_LITTLETON is not set
179# CONFIG_MACH_ARMCORE is not set
180# CONFIG_MACH_MAGICIAN is not set
181# CONFIG_MACH_PCM027 is not set
182CONFIG_PXA_EZX=y
183CONFIG_MACH_EZX_A780=y
184CONFIG_MACH_EZX_E680=y
185CONFIG_MACH_EZX_A1200=y
186CONFIG_MACH_EZX_A910=y
187CONFIG_MACH_EZX_E6=y
188CONFIG_MACH_EZX_E2=y
189CONFIG_PXA27x=y
190CONFIG_PXA_SSP=y
191CONFIG_PXA_PWM=y
192
193#
194# Boot options
195#
196
197#
198# Power management
199#
200
201#
202# Processor Type
203#
204CONFIG_CPU_32=y
205CONFIG_CPU_XSCALE=y
206CONFIG_CPU_32v5=y
207CONFIG_CPU_ABRT_EV5T=y
208CONFIG_CPU_PABRT_NOIFAR=y
209CONFIG_CPU_CACHE_VIVT=y
210CONFIG_CPU_TLB_V4WBI=y
211CONFIG_CPU_CP15=y
212CONFIG_CPU_CP15_MMU=y
213
214#
215# Processor Features
216#
217CONFIG_ARM_THUMB=y
218# CONFIG_CPU_DCACHE_DISABLE is not set
219# CONFIG_OUTER_CACHE is not set
220CONFIG_IWMMXT=y
221CONFIG_XSCALE_PMU=y
222
223#
224# Bus support
225#
226# CONFIG_PCI_SYSCALL is not set
227# CONFIG_ARCH_SUPPORTS_MSI is not set
228# CONFIG_PCCARD is not set
229
230#
231# Kernel Features
232#
233CONFIG_TICK_ONESHOT=y
234# CONFIG_NO_HZ is not set
235CONFIG_HIGH_RES_TIMERS=y
236CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
237CONFIG_PREEMPT=y
238CONFIG_HZ=100
239CONFIG_AEABI=y
240CONFIG_OABI_COMPAT=y
241# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
242CONFIG_SELECT_MEMORY_MODEL=y
243CONFIG_FLATMEM_MANUAL=y
244# CONFIG_DISCONTIGMEM_MANUAL is not set
245# CONFIG_SPARSEMEM_MANUAL is not set
246CONFIG_FLATMEM=y
247CONFIG_FLAT_NODE_MEM_MAP=y
248# CONFIG_SPARSEMEM_STATIC is not set
249# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
250CONFIG_PAGEFLAGS_EXTENDED=y
251CONFIG_SPLIT_PTLOCK_CPUS=4096
252# CONFIG_RESOURCES_64BIT is not set
253CONFIG_ZONE_DMA_FLAG=1
254CONFIG_BOUNCE=y
255CONFIG_VIRT_TO_BUS=y
256CONFIG_ALIGNMENT_TRAP=y
257
258#
259# Boot options
260#
261CONFIG_ZBOOT_ROM_TEXT=0x0
262CONFIG_ZBOOT_ROM_BSS=0x0
263CONFIG_CMDLINE="console=tty1 root=/dev/mmcblk0p2 rootfstype=ext2 rootdelay=1 ip=192.168.0.202:192.168.0.200:192.168.0.200:255.255.255.0 debug"
264# CONFIG_XIP_KERNEL is not set
265CONFIG_KEXEC=y
266CONFIG_ATAGS_PROC=y
267
268#
269# CPU Frequency scaling
270#
271# CONFIG_CPU_FREQ is not set
272
273#
274# Floating point emulation
275#
276
277#
278# At least one emulation must be selected
279#
280CONFIG_FPE_NWFPE=y
281# CONFIG_FPE_NWFPE_XP is not set
282# CONFIG_FPE_FASTFPE is not set
283
284#
285# Userspace binary formats
286#
287CONFIG_BINFMT_ELF=y
288CONFIG_BINFMT_AOUT=m
289CONFIG_BINFMT_MISC=m
290
291#
292# Power management options
293#
294CONFIG_PM=y
295# CONFIG_PM_DEBUG is not set
296CONFIG_PM_SLEEP=y
297CONFIG_SUSPEND=y
298CONFIG_SUSPEND_FREEZER=y
299CONFIG_APM_EMULATION=y
300CONFIG_ARCH_SUSPEND_POSSIBLE=y
301
302#
303# Networking
304#
305CONFIG_NET=y
306
307#
308# Networking options
309#
310CONFIG_PACKET=y
311CONFIG_PACKET_MMAP=y
312CONFIG_UNIX=y
313CONFIG_XFRM=y
314# CONFIG_XFRM_USER is not set
315# CONFIG_XFRM_SUB_POLICY is not set
316# CONFIG_XFRM_MIGRATE is not set
317# CONFIG_XFRM_STATISTICS is not set
318# CONFIG_NET_KEY is not set
319CONFIG_INET=y
320# CONFIG_IP_MULTICAST is not set
321# CONFIG_IP_ADVANCED_ROUTER is not set
322CONFIG_IP_FIB_HASH=y
323CONFIG_IP_PNP=y
324CONFIG_IP_PNP_DHCP=y
325CONFIG_IP_PNP_BOOTP=y
326CONFIG_IP_PNP_RARP=y
327# CONFIG_NET_IPIP is not set
328# CONFIG_NET_IPGRE is not set
329# CONFIG_ARPD is not set
330CONFIG_SYN_COOKIES=y
331# CONFIG_INET_AH is not set
332# CONFIG_INET_ESP is not set
333# CONFIG_INET_IPCOMP is not set
334# CONFIG_INET_XFRM_TUNNEL is not set
335CONFIG_INET_TUNNEL=m
336# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
337# CONFIG_INET_XFRM_MODE_TUNNEL is not set
338# CONFIG_INET_XFRM_MODE_BEET is not set
339# CONFIG_INET_LRO is not set
340# CONFIG_INET_DIAG is not set
341# CONFIG_TCP_CONG_ADVANCED is not set
342CONFIG_TCP_CONG_CUBIC=y
343CONFIG_DEFAULT_TCP_CONG="cubic"
344# CONFIG_TCP_MD5SIG is not set
345# CONFIG_IP_VS is not set
346CONFIG_IPV6=m
347# CONFIG_IPV6_PRIVACY is not set
348# CONFIG_IPV6_ROUTER_PREF is not set
349# CONFIG_IPV6_OPTIMISTIC_DAD is not set
350CONFIG_INET6_AH=m
351CONFIG_INET6_ESP=m
352CONFIG_INET6_IPCOMP=m
353CONFIG_IPV6_MIP6=m
354CONFIG_INET6_XFRM_TUNNEL=m
355CONFIG_INET6_TUNNEL=m
356CONFIG_INET6_XFRM_MODE_TRANSPORT=m
357CONFIG_INET6_XFRM_MODE_TUNNEL=m
358CONFIG_INET6_XFRM_MODE_BEET=m
359# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
360CONFIG_IPV6_SIT=m
361CONFIG_IPV6_NDISC_NODETYPE=y
362CONFIG_IPV6_TUNNEL=m
363CONFIG_IPV6_MULTIPLE_TABLES=y
364CONFIG_IPV6_SUBTREES=y
365# CONFIG_IPV6_MROUTE is not set
366# CONFIG_NETWORK_SECMARK is not set
367CONFIG_NETFILTER=y
368# CONFIG_NETFILTER_DEBUG is not set
369CONFIG_NETFILTER_ADVANCED=y
370CONFIG_BRIDGE_NETFILTER=y
371
372#
373# Core Netfilter Configuration
374#
375CONFIG_NETFILTER_NETLINK=m
376CONFIG_NETFILTER_NETLINK_QUEUE=m
377CONFIG_NETFILTER_NETLINK_LOG=m
378CONFIG_NF_CONNTRACK=m
379CONFIG_NF_CT_ACCT=y
380CONFIG_NF_CONNTRACK_MARK=y
381CONFIG_NF_CONNTRACK_EVENTS=y
382# CONFIG_NF_CT_PROTO_DCCP is not set
383CONFIG_NF_CT_PROTO_GRE=m
384CONFIG_NF_CT_PROTO_SCTP=m
385CONFIG_NF_CT_PROTO_UDPLITE=m
386CONFIG_NF_CONNTRACK_AMANDA=m
387CONFIG_NF_CONNTRACK_FTP=m
388CONFIG_NF_CONNTRACK_H323=m
389CONFIG_NF_CONNTRACK_IRC=m
390CONFIG_NF_CONNTRACK_NETBIOS_NS=m
391CONFIG_NF_CONNTRACK_PPTP=m
392CONFIG_NF_CONNTRACK_SANE=m
393CONFIG_NF_CONNTRACK_SIP=m
394CONFIG_NF_CONNTRACK_TFTP=m
395CONFIG_NF_CT_NETLINK=m
396CONFIG_NETFILTER_XTABLES=m
397CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
398# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
399# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
400CONFIG_NETFILTER_XT_TARGET_MARK=m
401CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
402CONFIG_NETFILTER_XT_TARGET_NFLOG=m
403# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
404# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
405# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
406CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
407# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
408CONFIG_NETFILTER_XT_MATCH_COMMENT=m
409CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
410CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
411CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
412CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
413CONFIG_NETFILTER_XT_MATCH_DCCP=m
414CONFIG_NETFILTER_XT_MATCH_DSCP=m
415CONFIG_NETFILTER_XT_MATCH_ESP=m
416CONFIG_NETFILTER_XT_MATCH_HELPER=m
417# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
418CONFIG_NETFILTER_XT_MATCH_LENGTH=m
419CONFIG_NETFILTER_XT_MATCH_LIMIT=m
420CONFIG_NETFILTER_XT_MATCH_MAC=m
421CONFIG_NETFILTER_XT_MATCH_MARK=m
422# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
423CONFIG_NETFILTER_XT_MATCH_POLICY=m
424CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
425# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set
426CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
427CONFIG_NETFILTER_XT_MATCH_QUOTA=m
428# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
429CONFIG_NETFILTER_XT_MATCH_REALM=m
430CONFIG_NETFILTER_XT_MATCH_SCTP=m
431CONFIG_NETFILTER_XT_MATCH_STATE=m
432CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
433CONFIG_NETFILTER_XT_MATCH_STRING=m
434CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
435CONFIG_NETFILTER_XT_MATCH_TIME=m
436CONFIG_NETFILTER_XT_MATCH_U32=m
437CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
438
439#
440# IP: Netfilter Configuration
441#
442CONFIG_NF_CONNTRACK_IPV4=m
443CONFIG_NF_CONNTRACK_PROC_COMPAT=y
444CONFIG_IP_NF_QUEUE=m
445CONFIG_IP_NF_IPTABLES=m
446CONFIG_IP_NF_MATCH_RECENT=m
447CONFIG_IP_NF_MATCH_ECN=m
448CONFIG_IP_NF_MATCH_AH=m
449CONFIG_IP_NF_MATCH_TTL=m
450CONFIG_IP_NF_MATCH_ADDRTYPE=m
451CONFIG_IP_NF_FILTER=m
452CONFIG_IP_NF_TARGET_REJECT=m
453CONFIG_IP_NF_TARGET_LOG=m
454CONFIG_IP_NF_TARGET_ULOG=m
455CONFIG_NF_NAT=m
456CONFIG_NF_NAT_NEEDED=y
457CONFIG_IP_NF_TARGET_MASQUERADE=m
458CONFIG_IP_NF_TARGET_REDIRECT=m
459CONFIG_IP_NF_TARGET_NETMAP=m
460CONFIG_NF_NAT_SNMP_BASIC=m
461CONFIG_NF_NAT_PROTO_GRE=m
462CONFIG_NF_NAT_PROTO_UDPLITE=m
463CONFIG_NF_NAT_PROTO_SCTP=m
464CONFIG_NF_NAT_FTP=m
465CONFIG_NF_NAT_IRC=m
466CONFIG_NF_NAT_TFTP=m
467CONFIG_NF_NAT_AMANDA=m
468CONFIG_NF_NAT_PPTP=m
469CONFIG_NF_NAT_H323=m
470CONFIG_NF_NAT_SIP=m
471CONFIG_IP_NF_MANGLE=m
472CONFIG_IP_NF_TARGET_ECN=m
473CONFIG_IP_NF_TARGET_TTL=m
474CONFIG_IP_NF_TARGET_CLUSTERIP=m
475CONFIG_IP_NF_RAW=m
476CONFIG_IP_NF_ARPTABLES=m
477CONFIG_IP_NF_ARPFILTER=m
478CONFIG_IP_NF_ARP_MANGLE=m
479
480#
481# IPv6: Netfilter Configuration
482#
483CONFIG_NF_CONNTRACK_IPV6=m
484CONFIG_IP6_NF_QUEUE=m
485CONFIG_IP6_NF_IPTABLES=m
486CONFIG_IP6_NF_MATCH_RT=m
487CONFIG_IP6_NF_MATCH_OPTS=m
488CONFIG_IP6_NF_MATCH_FRAG=m
489CONFIG_IP6_NF_MATCH_HL=m
490CONFIG_IP6_NF_MATCH_IPV6HEADER=m
491CONFIG_IP6_NF_MATCH_AH=m
492CONFIG_IP6_NF_MATCH_MH=m
493CONFIG_IP6_NF_MATCH_EUI64=m
494CONFIG_IP6_NF_FILTER=m
495CONFIG_IP6_NF_TARGET_LOG=m
496CONFIG_IP6_NF_TARGET_REJECT=m
497CONFIG_IP6_NF_MANGLE=m
498CONFIG_IP6_NF_TARGET_HL=m
499CONFIG_IP6_NF_RAW=m
500
501#
502# Bridge: Netfilter Configuration
503#
504# CONFIG_BRIDGE_NF_EBTABLES is not set
505# CONFIG_IP_DCCP is not set
506# CONFIG_IP_SCTP is not set
507# CONFIG_TIPC is not set
508# CONFIG_ATM is not set
509CONFIG_BRIDGE=m
510# CONFIG_VLAN_8021Q is not set
511# CONFIG_DECNET is not set
512CONFIG_LLC=m
513# CONFIG_LLC2 is not set
514# CONFIG_IPX is not set
515# CONFIG_ATALK is not set
516# CONFIG_X25 is not set
517# CONFIG_LAPB is not set
518# CONFIG_ECONET is not set
519# CONFIG_WAN_ROUTER is not set
520# CONFIG_NET_SCHED is not set
521CONFIG_NET_CLS_ROUTE=y
522CONFIG_NET_SCH_FIFO=y
523
524#
525# Network testing
526#
527# CONFIG_NET_PKTGEN is not set
528# CONFIG_HAMRADIO is not set
529# CONFIG_CAN is not set
530# CONFIG_IRDA is not set
531CONFIG_BT=y
532CONFIG_BT_L2CAP=m
533CONFIG_BT_SCO=y
534CONFIG_BT_RFCOMM=m
535CONFIG_BT_RFCOMM_TTY=y
536CONFIG_BT_BNEP=m
537CONFIG_BT_BNEP_MC_FILTER=y
538CONFIG_BT_BNEP_PROTO_FILTER=y
539CONFIG_BT_HIDP=m
540
541#
542# Bluetooth device drivers
543#
544# CONFIG_BT_HCIUSB is not set
545# CONFIG_BT_HCIBTUSB is not set
546# CONFIG_BT_HCIBTSDIO is not set
547CONFIG_BT_HCIUART=y
548CONFIG_BT_HCIUART_H4=y
549# CONFIG_BT_HCIUART_BCSP is not set
550# CONFIG_BT_HCIUART_LL is not set
551# CONFIG_BT_HCIBCM203X is not set
552# CONFIG_BT_HCIBPA10X is not set
553# CONFIG_BT_HCIBFUSB is not set
554# CONFIG_BT_HCIVHCI is not set
555# CONFIG_AF_RXRPC is not set
556CONFIG_FIB_RULES=y
557
558#
559# Wireless
560#
561CONFIG_CFG80211=m
562CONFIG_NL80211=y
563CONFIG_WIRELESS_EXT=y
564CONFIG_MAC80211=m
565
566#
567# Rate control algorithm selection
568#
569CONFIG_MAC80211_RC_DEFAULT_PID=y
570# CONFIG_MAC80211_RC_DEFAULT_NONE is not set
571
572#
573# Selecting 'y' for an algorithm will
574#
575
576#
577# build the algorithm into mac80211.
578#
579CONFIG_MAC80211_RC_DEFAULT="pid"
580CONFIG_MAC80211_RC_PID=y
581# CONFIG_MAC80211_MESH is not set
582CONFIG_MAC80211_LEDS=y
583# CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT is not set
584# CONFIG_MAC80211_DEBUG is not set
585CONFIG_IEEE80211=m
586# CONFIG_IEEE80211_DEBUG is not set
587CONFIG_IEEE80211_CRYPT_WEP=m
588CONFIG_IEEE80211_CRYPT_CCMP=m
589CONFIG_IEEE80211_CRYPT_TKIP=m
590# CONFIG_RFKILL is not set
591# CONFIG_NET_9P is not set
592
593#
594# Device Drivers
595#
596
597#
598# Generic Driver Options
599#
600CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
601CONFIG_STANDALONE=y
602CONFIG_PREVENT_FIRMWARE_BUILD=y
603CONFIG_FW_LOADER=m
604# CONFIG_SYS_HYPERVISOR is not set
605CONFIG_CONNECTOR=m
606CONFIG_MTD=y
607# CONFIG_MTD_DEBUG is not set
608# CONFIG_MTD_CONCAT is not set
609CONFIG_MTD_PARTITIONS=y
610# CONFIG_MTD_REDBOOT_PARTS is not set
611# CONFIG_MTD_CMDLINE_PARTS is not set
612# CONFIG_MTD_AFS_PARTS is not set
613# CONFIG_MTD_AR7_PARTS is not set
614
615#
616# User Modules And Translation Layers
617#
618CONFIG_MTD_CHAR=y
619# CONFIG_MTD_BLKDEVS is not set
620# CONFIG_MTD_BLOCK is not set
621# CONFIG_MTD_BLOCK_RO is not set
622# CONFIG_FTL is not set
623# CONFIG_NFTL is not set
624# CONFIG_INFTL is not set
625# CONFIG_RFD_FTL is not set
626# CONFIG_SSFDC is not set
627# CONFIG_MTD_OOPS is not set
628
629#
630# RAM/ROM/Flash chip drivers
631#
632CONFIG_MTD_CFI=y
633# CONFIG_MTD_JEDECPROBE is not set
634CONFIG_MTD_GEN_PROBE=y
635CONFIG_MTD_CFI_ADV_OPTIONS=y
636CONFIG_MTD_CFI_NOSWAP=y
637# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
638# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
639CONFIG_MTD_CFI_GEOMETRY=y
640# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
641CONFIG_MTD_MAP_BANK_WIDTH_2=y
642# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
643# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
644# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
645# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
646CONFIG_MTD_CFI_I1=y
647# CONFIG_MTD_CFI_I2 is not set
648# CONFIG_MTD_CFI_I4 is not set
649# CONFIG_MTD_CFI_I8 is not set
650# CONFIG_MTD_OTP is not set
651CONFIG_MTD_CFI_INTELEXT=y
652# CONFIG_MTD_CFI_AMDSTD is not set
653# CONFIG_MTD_CFI_STAA is not set
654CONFIG_MTD_CFI_UTIL=y
655# CONFIG_MTD_RAM is not set
656# CONFIG_MTD_ROM is not set
657# CONFIG_MTD_ABSENT is not set
658CONFIG_MTD_XIP=y
659
660#
661# Mapping drivers for chip access
662#
663# CONFIG_MTD_COMPLEX_MAPPINGS is not set
664CONFIG_MTD_PHYSMAP=y
665CONFIG_MTD_PHYSMAP_START=0x0
666CONFIG_MTD_PHYSMAP_LEN=0x0
667CONFIG_MTD_PHYSMAP_BANKWIDTH=2
668# CONFIG_MTD_PXA2XX is not set
669# CONFIG_MTD_ARM_INTEGRATOR is not set
670# CONFIG_MTD_SHARP_SL is not set
671# CONFIG_MTD_PLATRAM is not set
672
673#
674# Self-contained MTD device drivers
675#
676# CONFIG_MTD_DATAFLASH is not set
677# CONFIG_MTD_M25P80 is not set
678# CONFIG_MTD_SLRAM is not set
679# CONFIG_MTD_PHRAM is not set
680# CONFIG_MTD_MTDRAM is not set
681# CONFIG_MTD_BLOCK2MTD is not set
682
683#
684# Disk-On-Chip Device Drivers
685#
686# CONFIG_MTD_DOC2000 is not set
687# CONFIG_MTD_DOC2001 is not set
688# CONFIG_MTD_DOC2001PLUS is not set
689# CONFIG_MTD_NAND is not set
690# CONFIG_MTD_ONENAND is not set
691
692#
693# UBI - Unsorted block images
694#
695# CONFIG_MTD_UBI is not set
696# CONFIG_PARPORT is not set
697CONFIG_BLK_DEV=y
698# CONFIG_BLK_DEV_COW_COMMON is not set
699CONFIG_BLK_DEV_LOOP=m
700CONFIG_BLK_DEV_CRYPTOLOOP=m
701CONFIG_BLK_DEV_NBD=m
702# CONFIG_BLK_DEV_UB is not set
703CONFIG_BLK_DEV_RAM=m
704CONFIG_BLK_DEV_RAM_COUNT=16
705CONFIG_BLK_DEV_RAM_SIZE=4096
706# CONFIG_BLK_DEV_XIP is not set
707# CONFIG_CDROM_PKTCDVD is not set
708# CONFIG_ATA_OVER_ETH is not set
709CONFIG_MISC_DEVICES=y
710# CONFIG_EEPROM_93CX6 is not set
711# CONFIG_ENCLOSURE_SERVICES is not set
712CONFIG_HAVE_IDE=y
713# CONFIG_IDE is not set
714
715#
716# SCSI device support
717#
718# CONFIG_RAID_ATTRS is not set
719# CONFIG_SCSI is not set
720# CONFIG_SCSI_DMA is not set
721# CONFIG_SCSI_NETLINK is not set
722# CONFIG_ATA is not set
723# CONFIG_MD is not set
724CONFIG_NETDEVICES=y
725# CONFIG_NETDEVICES_MULTIQUEUE is not set
726CONFIG_DUMMY=y
727# CONFIG_BONDING is not set
728# CONFIG_MACVLAN is not set
729# CONFIG_EQUALIZER is not set
730# CONFIG_TUN is not set
731# CONFIG_VETH is not set
732# CONFIG_NET_ETHERNET is not set
733# CONFIG_NETDEV_1000 is not set
734# CONFIG_NETDEV_10000 is not set
735
736#
737# Wireless LAN
738#
739# CONFIG_WLAN_PRE80211 is not set
740# CONFIG_WLAN_80211 is not set
741# CONFIG_IWLWIFI_LEDS is not set
742
743#
744# USB Network Adapters
745#
746# CONFIG_USB_CATC is not set
747# CONFIG_USB_KAWETH is not set
748# CONFIG_USB_PEGASUS is not set
749# CONFIG_USB_RTL8150 is not set
750# CONFIG_USB_USBNET is not set
751# CONFIG_WAN is not set
752CONFIG_PPP=m
753CONFIG_PPP_MULTILINK=y
754CONFIG_PPP_FILTER=y
755CONFIG_PPP_ASYNC=m
756CONFIG_PPP_SYNC_TTY=m
757CONFIG_PPP_DEFLATE=m
758CONFIG_PPP_BSDCOMP=m
759# CONFIG_PPP_MPPE is not set
760# CONFIG_PPPOE is not set
761# CONFIG_PPPOL2TP is not set
762# CONFIG_SLIP is not set
763CONFIG_SLHC=m
764# CONFIG_NETCONSOLE is not set
765# CONFIG_NETPOLL is not set
766# CONFIG_NET_POLL_CONTROLLER is not set
767# CONFIG_ISDN is not set
768
769#
770# Input device support
771#
772CONFIG_INPUT=y
773# CONFIG_INPUT_FF_MEMLESS is not set
774# CONFIG_INPUT_POLLDEV is not set
775
776#
777# Userland interfaces
778#
779# CONFIG_INPUT_MOUSEDEV is not set
780# CONFIG_INPUT_JOYDEV is not set
781CONFIG_INPUT_EVDEV=y
782# CONFIG_INPUT_EVBUG is not set
783# CONFIG_INPUT_APMPOWER is not set
784
785#
786# Input Device Drivers
787#
788CONFIG_INPUT_KEYBOARD=y
789# CONFIG_KEYBOARD_ATKBD is not set
790# CONFIG_KEYBOARD_SUNKBD is not set
791# CONFIG_KEYBOARD_LKKBD is not set
792# CONFIG_KEYBOARD_XTKBD is not set
793# CONFIG_KEYBOARD_NEWTON is not set
794# CONFIG_KEYBOARD_STOWAWAY is not set
795CONFIG_KEYBOARD_PXA27x=y
796CONFIG_KEYBOARD_GPIO=y
797# CONFIG_INPUT_MOUSE is not set
798# CONFIG_INPUT_JOYSTICK is not set
799# CONFIG_INPUT_TABLET is not set
800CONFIG_INPUT_TOUCHSCREEN=y
801# CONFIG_TOUCHSCREEN_ADS7846 is not set
802# CONFIG_TOUCHSCREEN_FUJITSU is not set
803# CONFIG_TOUCHSCREEN_GUNZE is not set
804# CONFIG_TOUCHSCREEN_ELO is not set
805# CONFIG_TOUCHSCREEN_MTOUCH is not set
806# CONFIG_TOUCHSCREEN_MK712 is not set
807# CONFIG_TOUCHSCREEN_PENMOUNT is not set
808# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
809# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
810# CONFIG_TOUCHSCREEN_UCB1400 is not set
811# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
812CONFIG_TOUCHSCREEN_PCAP=y
813CONFIG_INPUT_MISC=y
814# CONFIG_INPUT_ATI_REMOTE is not set
815# CONFIG_INPUT_ATI_REMOTE2 is not set
816# CONFIG_INPUT_KEYSPAN_REMOTE is not set
817# CONFIG_INPUT_POWERMATE is not set
818# CONFIG_INPUT_YEALINK is not set
819CONFIG_INPUT_UINPUT=y
820
821#
822# Hardware I/O ports
823#
824# CONFIG_SERIO is not set
825# CONFIG_GAMEPORT is not set
826
827#
828# Character devices
829#
830CONFIG_VT=y
831CONFIG_VT_CONSOLE=y
832CONFIG_HW_CONSOLE=y
833# CONFIG_VT_HW_CONSOLE_BINDING is not set
834CONFIG_DEVKMEM=y
835# CONFIG_SERIAL_NONSTANDARD is not set
836
837#
838# Serial drivers
839#
840# CONFIG_SERIAL_8250 is not set
841
842#
843# Non-8250 serial port support
844#
845CONFIG_SERIAL_PXA=y
846CONFIG_SERIAL_PXA_CONSOLE=y
847CONFIG_SERIAL_CORE=y
848CONFIG_SERIAL_CORE_CONSOLE=y
849CONFIG_UNIX98_PTYS=y
850CONFIG_LEGACY_PTYS=y
851CONFIG_LEGACY_PTY_COUNT=8
852# CONFIG_IPMI_HANDLER is not set
853CONFIG_HW_RANDOM=y
854# CONFIG_NVRAM is not set
855# CONFIG_R3964 is not set
856# CONFIG_RAW_DRIVER is not set
857# CONFIG_TCG_TPM is not set
858CONFIG_I2C=y
859CONFIG_I2C_BOARDINFO=y
860CONFIG_I2C_CHARDEV=y
861
862#
863# I2C Hardware Bus support
864#
865# CONFIG_I2C_GPIO is not set
866CONFIG_I2C_PXA=y
867# CONFIG_I2C_PXA_SLAVE is not set
868# CONFIG_I2C_OCORES is not set
869# CONFIG_I2C_PARPORT_LIGHT is not set
870# CONFIG_I2C_SIMTEC is not set
871# CONFIG_I2C_TAOS_EVM is not set
872# CONFIG_I2C_STUB is not set
873# CONFIG_I2C_TINY_USB is not set
874# CONFIG_I2C_PCA_PLATFORM is not set
875
876#
877# Miscellaneous I2C Chip support
878#
879# CONFIG_DS1682 is not set
880# CONFIG_SENSORS_EEPROM is not set
881# CONFIG_SENSORS_PCF8574 is not set
882# CONFIG_PCF8575 is not set
883# CONFIG_SENSORS_PCF8591 is not set
884# CONFIG_TPS65010 is not set
885# CONFIG_SENSORS_MAX6875 is not set
886# CONFIG_SENSORS_TSL2550 is not set
887# CONFIG_I2C_DEBUG_CORE is not set
888# CONFIG_I2C_DEBUG_ALGO is not set
889# CONFIG_I2C_DEBUG_BUS is not set
890# CONFIG_I2C_DEBUG_CHIP is not set
891CONFIG_SPI=y
892CONFIG_SPI_MASTER=y
893
894#
895# SPI Master Controller Drivers
896#
897# CONFIG_SPI_BITBANG is not set
898CONFIG_SPI_PXA2XX=m
899
900#
901# SPI Protocol Masters
902#
903# CONFIG_SPI_AT25 is not set
904# CONFIG_SPI_SPIDEV is not set
905# CONFIG_SPI_TLE62X0 is not set
906CONFIG_HAVE_GPIO_LIB=y
907
908#
909# GPIO Support
910#
911
912#
913# I2C GPIO expanders:
914#
915# CONFIG_GPIO_PCA953X is not set
916# CONFIG_GPIO_PCF857X is not set
917
918#
919# SPI GPIO expanders:
920#
921# CONFIG_GPIO_MCP23S08 is not set
922# CONFIG_W1 is not set
923# CONFIG_POWER_SUPPLY is not set
924# CONFIG_HWMON is not set
925# CONFIG_WATCHDOG is not set
926
927#
928# Sonics Silicon Backplane
929#
930CONFIG_SSB_POSSIBLE=y
931# CONFIG_SSB is not set
932
933#
934# Multifunction device drivers
935#
936# CONFIG_MFD_CORE is not set
937# CONFIG_MFD_SM501 is not set
938# CONFIG_MFD_ASIC3 is not set
939# CONFIG_HTC_EGPIO is not set
940# CONFIG_HTC_PASIC3 is not set
941# CONFIG_MFD_TC6393XB is not set
942CONFIG_EZX_PCAP=y
943
944#
945# Multimedia devices
946#
947
948#
949# Multimedia core support
950#
951CONFIG_VIDEO_DEV=m
952CONFIG_VIDEO_V4L2_COMMON=m
953CONFIG_VIDEO_ALLOW_V4L1=y
954CONFIG_VIDEO_V4L1_COMPAT=y
955# CONFIG_DVB_CORE is not set
956CONFIG_VIDEO_MEDIA=m
957
958#
959# Multimedia drivers
960#
961# CONFIG_MEDIA_ATTACH is not set
962CONFIG_MEDIA_TUNER=m
963# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
964CONFIG_MEDIA_TUNER_SIMPLE=m
965CONFIG_MEDIA_TUNER_TDA8290=m
966CONFIG_MEDIA_TUNER_TDA9887=m
967CONFIG_MEDIA_TUNER_TEA5761=m
968CONFIG_MEDIA_TUNER_TEA5767=m
969CONFIG_MEDIA_TUNER_MT20XX=m
970CONFIG_MEDIA_TUNER_XC2028=m
971CONFIG_MEDIA_TUNER_XC5000=m
972CONFIG_VIDEO_V4L2=m
973CONFIG_VIDEO_V4L1=m
974CONFIG_VIDEO_CAPTURE_DRIVERS=y
975# CONFIG_VIDEO_ADV_DEBUG is not set
976CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
977# CONFIG_VIDEO_VIVI is not set
978# CONFIG_VIDEO_CPIA is not set
979# CONFIG_VIDEO_CPIA2 is not set
980# CONFIG_VIDEO_SAA5246A is not set
981# CONFIG_VIDEO_SAA5249 is not set
982# CONFIG_TUNER_3036 is not set
983# CONFIG_V4L_USB_DRIVERS is not set
984# CONFIG_SOC_CAMERA is not set
985# CONFIG_VIDEO_PXA27x is not set
986CONFIG_RADIO_ADAPTERS=y
987# CONFIG_USB_DSBR is not set
988# CONFIG_USB_SI470X is not set
989# CONFIG_DAB is not set
990
991#
992# Graphics support
993#
994# CONFIG_VGASTATE is not set
995# CONFIG_VIDEO_OUTPUT_CONTROL is not set
996CONFIG_FB=y
997# CONFIG_FIRMWARE_EDID is not set
998# CONFIG_FB_DDC is not set
999CONFIG_FB_CFB_FILLRECT=y
1000CONFIG_FB_CFB_COPYAREA=y
1001CONFIG_FB_CFB_IMAGEBLIT=y
1002# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
1003# CONFIG_FB_SYS_FILLRECT is not set
1004# CONFIG_FB_SYS_COPYAREA is not set
1005# CONFIG_FB_SYS_IMAGEBLIT is not set
1006# CONFIG_FB_FOREIGN_ENDIAN is not set
1007# CONFIG_FB_SYS_FOPS is not set
1008# CONFIG_FB_SVGALIB is not set
1009# CONFIG_FB_MACMODES is not set
1010# CONFIG_FB_BACKLIGHT is not set
1011# CONFIG_FB_MODE_HELPERS is not set
1012# CONFIG_FB_TILEBLITTING is not set
1013
1014#
1015# Frame buffer hardware drivers
1016#
1017# CONFIG_FB_UVESA is not set
1018# CONFIG_FB_S1D13XXX is not set
1019CONFIG_FB_PXA=y
1020# CONFIG_FB_PXA_SMARTPANEL is not set
1021CONFIG_FB_PXA_PARAMETERS=y
1022# CONFIG_FB_MBX is not set
1023# CONFIG_FB_AM200EPD is not set
1024# CONFIG_FB_VIRTUAL is not set
1025CONFIG_BACKLIGHT_LCD_SUPPORT=y
1026# CONFIG_LCD_CLASS_DEVICE is not set
1027CONFIG_BACKLIGHT_CLASS_DEVICE=y
1028# CONFIG_BACKLIGHT_CORGI is not set
1029CONFIG_BACKLIGHT_PWM=y
1030
1031#
1032# Display device support
1033#
1034# CONFIG_DISPLAY_SUPPORT is not set
1035
1036#
1037# Console display driver support
1038#
1039# CONFIG_VGA_CONSOLE is not set
1040CONFIG_DUMMY_CONSOLE=y
1041CONFIG_FRAMEBUFFER_CONSOLE=y
1042# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
1043# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
1044CONFIG_FONTS=y
1045# CONFIG_FONT_8x8 is not set
1046# CONFIG_FONT_8x16 is not set
1047# CONFIG_FONT_6x11 is not set
1048# CONFIG_FONT_7x14 is not set
1049# CONFIG_FONT_PEARL_8x8 is not set
1050# CONFIG_FONT_ACORN_8x8 is not set
1051CONFIG_FONT_MINI_4x6=y
1052# CONFIG_FONT_SUN8x16 is not set
1053# CONFIG_FONT_SUN12x22 is not set
1054# CONFIG_FONT_10x18 is not set
1055# CONFIG_LOGO is not set
1056
1057#
1058# Sound
1059#
1060CONFIG_SOUND=y
1061
1062#
1063# Advanced Linux Sound Architecture
1064#
1065CONFIG_SND=y
1066CONFIG_SND_TIMER=y
1067CONFIG_SND_PCM=y
1068# CONFIG_SND_SEQUENCER is not set
1069CONFIG_SND_OSSEMUL=y
1070CONFIG_SND_MIXER_OSS=y
1071CONFIG_SND_PCM_OSS=y
1072CONFIG_SND_PCM_OSS_PLUGINS=y
1073# CONFIG_SND_DYNAMIC_MINORS is not set
1074CONFIG_SND_SUPPORT_OLD_API=y
1075CONFIG_SND_VERBOSE_PROCFS=y
1076# CONFIG_SND_VERBOSE_PRINTK is not set
1077# CONFIG_SND_DEBUG is not set
1078
1079#
1080# Generic devices
1081#
1082# CONFIG_SND_DUMMY is not set
1083# CONFIG_SND_MTPAV is not set
1084# CONFIG_SND_SERIAL_U16550 is not set
1085# CONFIG_SND_MPU401 is not set
1086
1087#
1088# ALSA ARM devices
1089#
1090# CONFIG_SND_PXA2XX_AC97 is not set
1091
1092#
1093# SPI devices
1094#
1095
1096#
1097# USB devices
1098#
1099# CONFIG_SND_USB_AUDIO is not set
1100# CONFIG_SND_USB_CAIAQ is not set
1101
1102#
1103# System on Chip audio support
1104#
1105CONFIG_SND_SOC=y
1106CONFIG_SND_PXA2XX_SOC=y
1107
1108#
1109# ALSA SoC audio for Freescale SOCs
1110#
1111
1112#
1113# SoC Audio for the Texas Instruments OMAP
1114#
1115
1116#
1117# Open Sound System
1118#
1119# CONFIG_SOUND_PRIME is not set
1120CONFIG_HID_SUPPORT=y
1121CONFIG_HID=y
1122# CONFIG_HID_DEBUG is not set
1123# CONFIG_HIDRAW is not set
1124
1125#
1126# USB Input Devices
1127#
1128# CONFIG_USB_HID is not set
1129
1130#
1131# USB HID Boot Protocol drivers
1132#
1133# CONFIG_USB_KBD is not set
1134# CONFIG_USB_MOUSE is not set
1135CONFIG_USB_SUPPORT=y
1136CONFIG_USB_ARCH_HAS_HCD=y
1137CONFIG_USB_ARCH_HAS_OHCI=y
1138# CONFIG_USB_ARCH_HAS_EHCI is not set
1139CONFIG_USB=y
1140# CONFIG_USB_DEBUG is not set
1141# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
1142
1143#
1144# Miscellaneous USB options
1145#
1146# CONFIG_USB_DEVICEFS is not set
1147# CONFIG_USB_DEVICE_CLASS is not set
1148# CONFIG_USB_DYNAMIC_MINORS is not set
1149# CONFIG_USB_SUSPEND is not set
1150# CONFIG_USB_OTG is not set
1151# CONFIG_USB_OTG_WHITELIST is not set
1152# CONFIG_USB_OTG_BLACKLIST_HUB is not set
1153
1154#
1155# USB Host Controller Drivers
1156#
1157# CONFIG_USB_C67X00_HCD is not set
1158# CONFIG_USB_ISP116X_HCD is not set
1159# CONFIG_USB_ISP1760_HCD is not set
1160CONFIG_USB_OHCI_HCD=y
1161# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
1162# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
1163CONFIG_USB_OHCI_LITTLE_ENDIAN=y
1164# CONFIG_USB_SL811_HCD is not set
1165# CONFIG_USB_R8A66597_HCD is not set
1166
1167#
1168# USB Device Class drivers
1169#
1170# CONFIG_USB_ACM is not set
1171# CONFIG_USB_PRINTER is not set
1172
1173#
1174# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
1175#
1176
1177#
1178# may also be needed; see USB_STORAGE Help for more information
1179#
1180# CONFIG_USB_LIBUSUAL is not set
1181
1182#
1183# USB Imaging devices
1184#
1185# CONFIG_USB_MDC800 is not set
1186# CONFIG_USB_MON is not set
1187
1188#
1189# USB port drivers
1190#
1191# CONFIG_USB_SERIAL is not set
1192
1193#
1194# USB Miscellaneous drivers
1195#
1196# CONFIG_USB_EMI62 is not set
1197# CONFIG_USB_EMI26 is not set
1198# CONFIG_USB_ADUTUX is not set
1199# CONFIG_USB_AUERSWALD is not set
1200# CONFIG_USB_RIO500 is not set
1201# CONFIG_USB_LEGOTOWER is not set
1202# CONFIG_USB_LCD is not set
1203# CONFIG_USB_BERRY_CHARGE is not set
1204# CONFIG_USB_LED is not set
1205# CONFIG_USB_CYPRESS_CY7C63 is not set
1206# CONFIG_USB_CYTHERM is not set
1207# CONFIG_USB_PHIDGET is not set
1208# CONFIG_USB_IDMOUSE is not set
1209# CONFIG_USB_FTDI_ELAN is not set
1210# CONFIG_USB_APPLEDISPLAY is not set
1211# CONFIG_USB_LD is not set
1212# CONFIG_USB_TRANCEVIBRATOR is not set
1213# CONFIG_USB_IOWARRIOR is not set
1214CONFIG_USB_GADGET=y
1215# CONFIG_USB_GADGET_DEBUG_FILES is not set
1216CONFIG_USB_GADGET_SELECTED=y
1217# CONFIG_USB_GADGET_AMD5536UDC is not set
1218# CONFIG_USB_GADGET_ATMEL_USBA is not set
1219# CONFIG_USB_GADGET_FSL_USB2 is not set
1220# CONFIG_USB_GADGET_NET2280 is not set
1221# CONFIG_USB_GADGET_PXA25X is not set
1222# CONFIG_USB_GADGET_M66592 is not set
1223CONFIG_USB_GADGET_PXA27X=y
1224CONFIG_USB_PXA27X=y
1225# CONFIG_USB_GADGET_GOKU is not set
1226# CONFIG_USB_GADGET_LH7A40X is not set
1227# CONFIG_USB_GADGET_OMAP is not set
1228# CONFIG_USB_GADGET_S3C2410 is not set
1229# CONFIG_USB_GADGET_AT91 is not set
1230# CONFIG_USB_GADGET_DUMMY_HCD is not set
1231# CONFIG_USB_GADGET_DUALSPEED is not set
1232# CONFIG_USB_ZERO is not set
1233CONFIG_USB_ETH=y
1234# CONFIG_USB_ETH_RNDIS is not set
1235# CONFIG_USB_GADGETFS is not set
1236# CONFIG_USB_FILE_STORAGE is not set
1237# CONFIG_USB_G_SERIAL is not set
1238# CONFIG_USB_MIDI_GADGET is not set
1239# CONFIG_USB_G_PRINTER is not set
1240CONFIG_MMC=y
1241# CONFIG_MMC_DEBUG is not set
1242CONFIG_MMC_UNSAFE_RESUME=y
1243
1244#
1245# MMC/SD Card Drivers
1246#
1247CONFIG_MMC_BLOCK=y
1248CONFIG_MMC_BLOCK_BOUNCE=y
1249CONFIG_SDIO_UART=y
1250
1251#
1252# MMC/SD Host Controller Drivers
1253#
1254CONFIG_MMC_PXA=y
1255# CONFIG_MMC_SPI is not set
1256CONFIG_NEW_LEDS=y
1257CONFIG_LEDS_CLASS=y
1258
1259#
1260# LED drivers
1261#
1262# CONFIG_LEDS_GPIO is not set
1263
1264#
1265# LED Triggers
1266#
1267CONFIG_LEDS_TRIGGERS=y
1268CONFIG_LEDS_TRIGGER_TIMER=y
1269CONFIG_LEDS_TRIGGER_HEARTBEAT=y
1270# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
1271CONFIG_RTC_LIB=y
1272CONFIG_RTC_CLASS=y
1273CONFIG_RTC_HCTOSYS=y
1274CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
1275# CONFIG_RTC_DEBUG is not set
1276
1277#
1278# RTC interfaces
1279#
1280CONFIG_RTC_INTF_SYSFS=y
1281CONFIG_RTC_INTF_PROC=y
1282CONFIG_RTC_INTF_DEV=y
1283# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
1284# CONFIG_RTC_DRV_TEST is not set
1285
1286#
1287# I2C RTC drivers
1288#
1289# CONFIG_RTC_DRV_DS1307 is not set
1290# CONFIG_RTC_DRV_DS1374 is not set
1291# CONFIG_RTC_DRV_DS1672 is not set
1292# CONFIG_RTC_DRV_MAX6900 is not set
1293# CONFIG_RTC_DRV_RS5C372 is not set
1294# CONFIG_RTC_DRV_ISL1208 is not set
1295# CONFIG_RTC_DRV_X1205 is not set
1296# CONFIG_RTC_DRV_PCF8563 is not set
1297# CONFIG_RTC_DRV_PCF8583 is not set
1298# CONFIG_RTC_DRV_M41T80 is not set
1299# CONFIG_RTC_DRV_S35390A is not set
1300
1301#
1302# SPI RTC drivers
1303#
1304# CONFIG_RTC_DRV_MAX6902 is not set
1305# CONFIG_RTC_DRV_R9701 is not set
1306# CONFIG_RTC_DRV_RS5C348 is not set
1307
1308#
1309# Platform RTC drivers
1310#
1311# CONFIG_RTC_DRV_CMOS is not set
1312# CONFIG_RTC_DRV_DS1511 is not set
1313# CONFIG_RTC_DRV_DS1553 is not set
1314# CONFIG_RTC_DRV_DS1742 is not set
1315# CONFIG_RTC_DRV_STK17TA8 is not set
1316# CONFIG_RTC_DRV_M48T86 is not set
1317# CONFIG_RTC_DRV_M48T59 is not set
1318# CONFIG_RTC_DRV_V3020 is not set
1319
1320#
1321# on-CPU RTC drivers
1322#
1323CONFIG_RTC_DRV_SA1100=m
1324# CONFIG_UIO is not set
1325
1326#
1327# File systems
1328#
1329CONFIG_EXT2_FS=y
1330# CONFIG_EXT2_FS_XATTR is not set
1331# CONFIG_EXT2_FS_XIP is not set
1332CONFIG_EXT3_FS=m
1333CONFIG_EXT3_FS_XATTR=y
1334# CONFIG_EXT3_FS_POSIX_ACL is not set
1335# CONFIG_EXT3_FS_SECURITY is not set
1336# CONFIG_EXT4DEV_FS is not set
1337CONFIG_JBD=m
1338CONFIG_FS_MBCACHE=y
1339CONFIG_REISERFS_FS=m
1340# CONFIG_REISERFS_CHECK is not set
1341# CONFIG_REISERFS_PROC_INFO is not set
1342CONFIG_REISERFS_FS_XATTR=y
1343CONFIG_REISERFS_FS_POSIX_ACL=y
1344CONFIG_REISERFS_FS_SECURITY=y
1345# CONFIG_JFS_FS is not set
1346CONFIG_FS_POSIX_ACL=y
1347CONFIG_XFS_FS=m
1348# CONFIG_XFS_QUOTA is not set
1349# CONFIG_XFS_POSIX_ACL is not set
1350# CONFIG_XFS_RT is not set
1351# CONFIG_XFS_DEBUG is not set
1352# CONFIG_OCFS2_FS is not set
1353CONFIG_DNOTIFY=y
1354CONFIG_INOTIFY=y
1355CONFIG_INOTIFY_USER=y
1356# CONFIG_QUOTA is not set
1357CONFIG_AUTOFS_FS=y
1358CONFIG_AUTOFS4_FS=y
1359CONFIG_FUSE_FS=m
1360
1361#
1362# CD-ROM/DVD Filesystems
1363#
1364CONFIG_ISO9660_FS=m
1365CONFIG_JOLIET=y
1366CONFIG_ZISOFS=y
1367# CONFIG_UDF_FS is not set
1368
1369#
1370# DOS/FAT/NT Filesystems
1371#
1372CONFIG_FAT_FS=m
1373CONFIG_MSDOS_FS=m
1374CONFIG_VFAT_FS=m
1375CONFIG_FAT_DEFAULT_CODEPAGE=437
1376CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1377# CONFIG_NTFS_FS is not set
1378
1379#
1380# Pseudo filesystems
1381#
1382CONFIG_PROC_FS=y
1383CONFIG_PROC_SYSCTL=y
1384CONFIG_SYSFS=y
1385CONFIG_TMPFS=y
1386# CONFIG_TMPFS_POSIX_ACL is not set
1387# CONFIG_HUGETLB_PAGE is not set
1388# CONFIG_CONFIGFS_FS is not set
1389
1390#
1391# Miscellaneous filesystems
1392#
1393# CONFIG_ADFS_FS is not set
1394# CONFIG_AFFS_FS is not set
1395# CONFIG_HFS_FS is not set
1396# CONFIG_HFSPLUS_FS is not set
1397# CONFIG_BEFS_FS is not set
1398# CONFIG_BFS_FS is not set
1399# CONFIG_EFS_FS is not set
1400# CONFIG_JFFS2_FS is not set
1401CONFIG_CRAMFS=m
1402# CONFIG_VXFS_FS is not set
1403# CONFIG_MINIX_FS is not set
1404# CONFIG_HPFS_FS is not set
1405# CONFIG_QNX4FS_FS is not set
1406# CONFIG_ROMFS_FS is not set
1407# CONFIG_SYSV_FS is not set
1408# CONFIG_UFS_FS is not set
1409CONFIG_NETWORK_FILESYSTEMS=y
1410CONFIG_NFS_FS=y
1411CONFIG_NFS_V3=y
1412CONFIG_NFS_V3_ACL=y
1413# CONFIG_NFS_V4 is not set
1414CONFIG_NFSD=m
1415CONFIG_NFSD_V2_ACL=y
1416CONFIG_NFSD_V3=y
1417CONFIG_NFSD_V3_ACL=y
1418# CONFIG_NFSD_V4 is not set
1419# CONFIG_ROOT_NFS is not set
1420CONFIG_LOCKD=y
1421CONFIG_LOCKD_V4=y
1422CONFIG_EXPORTFS=m
1423CONFIG_NFS_ACL_SUPPORT=y
1424CONFIG_NFS_COMMON=y
1425CONFIG_SUNRPC=y
1426# CONFIG_SUNRPC_BIND34 is not set
1427# CONFIG_RPCSEC_GSS_KRB5 is not set
1428# CONFIG_RPCSEC_GSS_SPKM3 is not set
1429CONFIG_SMB_FS=m
1430# CONFIG_SMB_NLS_DEFAULT is not set
1431CONFIG_CIFS=m
1432CONFIG_CIFS_STATS=y
1433# CONFIG_CIFS_STATS2 is not set
1434CONFIG_CIFS_WEAK_PW_HASH=y
1435CONFIG_CIFS_XATTR=y
1436CONFIG_CIFS_POSIX=y
1437# CONFIG_CIFS_DEBUG2 is not set
1438# CONFIG_CIFS_EXPERIMENTAL is not set
1439# CONFIG_NCP_FS is not set
1440# CONFIG_CODA_FS is not set
1441# CONFIG_AFS_FS is not set
1442
1443#
1444# Partition Types
1445#
1446# CONFIG_PARTITION_ADVANCED is not set
1447CONFIG_MSDOS_PARTITION=y
1448CONFIG_NLS=y
1449CONFIG_NLS_DEFAULT="iso8859-1"
1450CONFIG_NLS_CODEPAGE_437=m
1451CONFIG_NLS_CODEPAGE_737=m
1452CONFIG_NLS_CODEPAGE_775=m
1453CONFIG_NLS_CODEPAGE_850=m
1454CONFIG_NLS_CODEPAGE_852=m
1455CONFIG_NLS_CODEPAGE_855=m
1456CONFIG_NLS_CODEPAGE_857=m
1457CONFIG_NLS_CODEPAGE_860=m
1458CONFIG_NLS_CODEPAGE_861=m
1459CONFIG_NLS_CODEPAGE_862=m
1460CONFIG_NLS_CODEPAGE_863=m
1461CONFIG_NLS_CODEPAGE_864=m
1462CONFIG_NLS_CODEPAGE_865=m
1463CONFIG_NLS_CODEPAGE_866=m
1464CONFIG_NLS_CODEPAGE_869=m
1465CONFIG_NLS_CODEPAGE_936=m
1466CONFIG_NLS_CODEPAGE_950=m
1467CONFIG_NLS_CODEPAGE_932=m
1468CONFIG_NLS_CODEPAGE_949=m
1469CONFIG_NLS_CODEPAGE_874=m
1470CONFIG_NLS_ISO8859_8=m
1471CONFIG_NLS_CODEPAGE_1250=m
1472CONFIG_NLS_CODEPAGE_1251=m
1473CONFIG_NLS_ASCII=m
1474CONFIG_NLS_ISO8859_1=m
1475CONFIG_NLS_ISO8859_2=m
1476CONFIG_NLS_ISO8859_3=m
1477CONFIG_NLS_ISO8859_4=m
1478CONFIG_NLS_ISO8859_5=m
1479CONFIG_NLS_ISO8859_6=m
1480CONFIG_NLS_ISO8859_7=m
1481CONFIG_NLS_ISO8859_9=m
1482CONFIG_NLS_ISO8859_13=m
1483CONFIG_NLS_ISO8859_14=m
1484CONFIG_NLS_ISO8859_15=m
1485CONFIG_NLS_KOI8_R=m
1486CONFIG_NLS_KOI8_U=m
1487CONFIG_NLS_UTF8=m
1488# CONFIG_DLM is not set
1489
1490#
1491# Kernel hacking
1492#
1493# CONFIG_PRINTK_TIME is not set
1494CONFIG_ENABLE_WARN_DEPRECATED=y
1495# CONFIG_ENABLE_MUST_CHECK is not set
1496CONFIG_FRAME_WARN=1024
1497# CONFIG_MAGIC_SYSRQ is not set
1498# CONFIG_UNUSED_SYMBOLS is not set
1499# CONFIG_DEBUG_FS is not set
1500# CONFIG_HEADERS_CHECK is not set
1501# CONFIG_DEBUG_KERNEL is not set
1502# CONFIG_DEBUG_BUGVERBOSE is not set
1503CONFIG_FRAME_POINTER=y
1504# CONFIG_SAMPLES is not set
1505# CONFIG_DEBUG_USER is not set
1506
1507#
1508# Security options
1509#
1510# CONFIG_KEYS is not set
1511# CONFIG_SECURITY is not set
1512# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1513CONFIG_CRYPTO=y
1514
1515#
1516# Crypto core or helper
1517#
1518CONFIG_CRYPTO_ALGAPI=m
1519CONFIG_CRYPTO_AEAD=m
1520CONFIG_CRYPTO_BLKCIPHER=m
1521CONFIG_CRYPTO_HASH=m
1522CONFIG_CRYPTO_MANAGER=m
1523CONFIG_CRYPTO_GF128MUL=m
1524CONFIG_CRYPTO_NULL=m
1525CONFIG_CRYPTO_CRYPTD=m
1526CONFIG_CRYPTO_AUTHENC=m
1527CONFIG_CRYPTO_TEST=m
1528
1529#
1530# Authenticated Encryption with Associated Data
1531#
1532# CONFIG_CRYPTO_CCM is not set
1533# CONFIG_CRYPTO_GCM is not set
1534# CONFIG_CRYPTO_SEQIV is not set
1535
1536#
1537# Block modes
1538#
1539CONFIG_CRYPTO_CBC=m
1540# CONFIG_CRYPTO_CTR is not set
1541# CONFIG_CRYPTO_CTS is not set
1542CONFIG_CRYPTO_ECB=m
1543CONFIG_CRYPTO_LRW=m
1544CONFIG_CRYPTO_PCBC=m
1545CONFIG_CRYPTO_XTS=m
1546
1547#
1548# Hash modes
1549#
1550CONFIG_CRYPTO_HMAC=m
1551CONFIG_CRYPTO_XCBC=m
1552
1553#
1554# Digest
1555#
1556CONFIG_CRYPTO_CRC32C=m
1557CONFIG_CRYPTO_MD4=m
1558CONFIG_CRYPTO_MD5=m
1559CONFIG_CRYPTO_MICHAEL_MIC=m
1560CONFIG_CRYPTO_SHA1=m
1561CONFIG_CRYPTO_SHA256=m
1562CONFIG_CRYPTO_SHA512=m
1563CONFIG_CRYPTO_TGR192=m
1564# CONFIG_CRYPTO_WP512 is not set
1565
1566#
1567# Ciphers
1568#
1569CONFIG_CRYPTO_AES=m
1570# CONFIG_CRYPTO_ANUBIS is not set
1571CONFIG_CRYPTO_ARC4=m
1572CONFIG_CRYPTO_BLOWFISH=m
1573# CONFIG_CRYPTO_CAMELLIA is not set
1574CONFIG_CRYPTO_CAST5=m
1575CONFIG_CRYPTO_CAST6=m
1576CONFIG_CRYPTO_DES=m
1577CONFIG_CRYPTO_FCRYPT=m
1578CONFIG_CRYPTO_KHAZAD=m
1579# CONFIG_CRYPTO_SALSA20 is not set
1580CONFIG_CRYPTO_SEED=m
1581CONFIG_CRYPTO_SERPENT=m
1582CONFIG_CRYPTO_TEA=m
1583CONFIG_CRYPTO_TWOFISH=m
1584CONFIG_CRYPTO_TWOFISH_COMMON=m
1585
1586#
1587# Compression
1588#
1589CONFIG_CRYPTO_DEFLATE=m
1590# CONFIG_CRYPTO_LZO is not set
1591CONFIG_CRYPTO_HW=y
1592
1593#
1594# Library routines
1595#
1596CONFIG_BITREVERSE=y
1597# CONFIG_GENERIC_FIND_FIRST_BIT is not set
1598# CONFIG_GENERIC_FIND_NEXT_BIT is not set
1599CONFIG_CRC_CCITT=m
1600CONFIG_CRC16=m
1601# CONFIG_CRC_ITU_T is not set
1602CONFIG_CRC32=y
1603# CONFIG_CRC7 is not set
1604CONFIG_LIBCRC32C=m
1605CONFIG_ZLIB_INFLATE=m
1606CONFIG_ZLIB_DEFLATE=m
1607CONFIG_TEXTSEARCH=y
1608CONFIG_TEXTSEARCH_KMP=m
1609CONFIG_TEXTSEARCH_BM=m
1610CONFIG_TEXTSEARCH_FSM=m
1611CONFIG_PLIST=y
1612CONFIG_HAS_IOMEM=y
1613CONFIG_HAS_IOPORT=y
1614CONFIG_HAS_DMA=y
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index eb9092ca8008..1d296fc8494e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o
28obj-$(CONFIG_ATAGS_PROC) += atags.o 28obj-$(CONFIG_ATAGS_PROC) += atags.o
29obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o 29obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
30obj-$(CONFIG_ARM_THUMBEE) += thumbee.o 30obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
31obj-$(CONFIG_KGDB) += kgdb.o
31 32
32obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o 33obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
33AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 34AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
new file mode 100644
index 000000000000..aaffaecffcd1
--- /dev/null
+++ b/arch/arm/kernel/kgdb.c
@@ -0,0 +1,201 @@
1/*
2 * arch/arm/kernel/kgdb.c
3 *
4 * ARM KGDB support
5 *
6 * Copyright (c) 2002-2004 MontaVista Software, Inc
7 * Copyright (c) 2008 Wind River Systems, Inc.
8 *
9 * Authors: George Davis <davis_g@mvista.com>
10 * Deepak Saxena <dsaxena@plexity.net>
11 */
12#include <linux/kgdb.h>
13#include <asm/traps.h>
14
15/* Make a local copy of the registers passed into the handler (bletch) */
16void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
17{
18 int regno;
19
20 /* Initialize all to zero. */
21 for (regno = 0; regno < GDB_MAX_REGS; regno++)
22 gdb_regs[regno] = 0;
23
24 gdb_regs[_R0] = kernel_regs->ARM_r0;
25 gdb_regs[_R1] = kernel_regs->ARM_r1;
26 gdb_regs[_R2] = kernel_regs->ARM_r2;
27 gdb_regs[_R3] = kernel_regs->ARM_r3;
28 gdb_regs[_R4] = kernel_regs->ARM_r4;
29 gdb_regs[_R5] = kernel_regs->ARM_r5;
30 gdb_regs[_R6] = kernel_regs->ARM_r6;
31 gdb_regs[_R7] = kernel_regs->ARM_r7;
32 gdb_regs[_R8] = kernel_regs->ARM_r8;
33 gdb_regs[_R9] = kernel_regs->ARM_r9;
34 gdb_regs[_R10] = kernel_regs->ARM_r10;
35 gdb_regs[_FP] = kernel_regs->ARM_fp;
36 gdb_regs[_IP] = kernel_regs->ARM_ip;
37 gdb_regs[_SPT] = kernel_regs->ARM_sp;
38 gdb_regs[_LR] = kernel_regs->ARM_lr;
39 gdb_regs[_PC] = kernel_regs->ARM_pc;
40 gdb_regs[_CPSR] = kernel_regs->ARM_cpsr;
41}
42
43/* Copy local gdb registers back to kgdb regs, for later copy to kernel */
44void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
45{
46 kernel_regs->ARM_r0 = gdb_regs[_R0];
47 kernel_regs->ARM_r1 = gdb_regs[_R1];
48 kernel_regs->ARM_r2 = gdb_regs[_R2];
49 kernel_regs->ARM_r3 = gdb_regs[_R3];
50 kernel_regs->ARM_r4 = gdb_regs[_R4];
51 kernel_regs->ARM_r5 = gdb_regs[_R5];
52 kernel_regs->ARM_r6 = gdb_regs[_R6];
53 kernel_regs->ARM_r7 = gdb_regs[_R7];
54 kernel_regs->ARM_r8 = gdb_regs[_R8];
55 kernel_regs->ARM_r9 = gdb_regs[_R9];
56 kernel_regs->ARM_r10 = gdb_regs[_R10];
57 kernel_regs->ARM_fp = gdb_regs[_FP];
58 kernel_regs->ARM_ip = gdb_regs[_IP];
59 kernel_regs->ARM_sp = gdb_regs[_SPT];
60 kernel_regs->ARM_lr = gdb_regs[_LR];
61 kernel_regs->ARM_pc = gdb_regs[_PC];
62 kernel_regs->ARM_cpsr = gdb_regs[_CPSR];
63}
64
65void
66sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
67{
68 struct pt_regs *thread_regs;
69 int regno;
70
71 /* Just making sure... */
72 if (task == NULL)
73 return;
74
75 /* Initialize to zero */
76 for (regno = 0; regno < GDB_MAX_REGS; regno++)
77 gdb_regs[regno] = 0;
78
79 /* Otherwise, we have only some registers from switch_to() */
80 thread_regs = task_pt_regs(task);
81 gdb_regs[_R0] = thread_regs->ARM_r0;
82 gdb_regs[_R1] = thread_regs->ARM_r1;
83 gdb_regs[_R2] = thread_regs->ARM_r2;
84 gdb_regs[_R3] = thread_regs->ARM_r3;
85 gdb_regs[_R4] = thread_regs->ARM_r4;
86 gdb_regs[_R5] = thread_regs->ARM_r5;
87 gdb_regs[_R6] = thread_regs->ARM_r6;
88 gdb_regs[_R7] = thread_regs->ARM_r7;
89 gdb_regs[_R8] = thread_regs->ARM_r8;
90 gdb_regs[_R9] = thread_regs->ARM_r9;
91 gdb_regs[_R10] = thread_regs->ARM_r10;
92 gdb_regs[_FP] = thread_regs->ARM_fp;
93 gdb_regs[_IP] = thread_regs->ARM_ip;
94 gdb_regs[_SPT] = thread_regs->ARM_sp;
95 gdb_regs[_LR] = thread_regs->ARM_lr;
96 gdb_regs[_PC] = thread_regs->ARM_pc;
97 gdb_regs[_CPSR] = thread_regs->ARM_cpsr;
98}
99
100static int compiled_break;
101
102int kgdb_arch_handle_exception(int exception_vector, int signo,
103 int err_code, char *remcom_in_buffer,
104 char *remcom_out_buffer,
105 struct pt_regs *linux_regs)
106{
107 unsigned long addr;
108 char *ptr;
109
110 switch (remcom_in_buffer[0]) {
111 case 'D':
112 case 'k':
113 case 'c':
114 kgdb_contthread = NULL;
115
116 /*
117 * Try to read optional parameter, pc unchanged if no parm.
118 * If this was a compiled breakpoint, we need to move
119 * to the next instruction or we will just breakpoint
120 * over and over again.
121 */
122 ptr = &remcom_in_buffer[1];
123 if (kgdb_hex2long(&ptr, &addr))
124 linux_regs->ARM_pc = addr;
125 else if (compiled_break == 1)
126 linux_regs->ARM_pc += 4;
127
128 compiled_break = 0;
129
130 return 0;
131 }
132
133 return -1;
134}
135
136static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr)
137{
138 kgdb_handle_exception(1, SIGTRAP, 0, regs);
139
140 return 0;
141}
142
143static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
144{
145 compiled_break = 1;
146 kgdb_handle_exception(1, SIGTRAP, 0, regs);
147
148 return 0;
149}
150
151static struct undef_hook kgdb_brkpt_hook = {
152 .instr_mask = 0xffffffff,
153 .instr_val = KGDB_BREAKINST,
154 .fn = kgdb_brk_fn
155};
156
157static struct undef_hook kgdb_compiled_brkpt_hook = {
158 .instr_mask = 0xffffffff,
159 .instr_val = KGDB_COMPILED_BREAK,
160 .fn = kgdb_compiled_brk_fn
161};
162
163/**
164 * kgdb_arch_init - Perform any architecture specific initalization.
165 *
166 * This function will handle the initalization of any architecture
167 * specific callbacks.
168 */
169int kgdb_arch_init(void)
170{
171 register_undef_hook(&kgdb_brkpt_hook);
172 register_undef_hook(&kgdb_compiled_brkpt_hook);
173
174 return 0;
175}
176
177/**
178 * kgdb_arch_exit - Perform any architecture specific uninitalization.
179 *
180 * This function will handle the uninitalization of any architecture
181 * specific callbacks, for dynamic registration and unregistration.
182 */
183void kgdb_arch_exit(void)
184{
185 unregister_undef_hook(&kgdb_brkpt_hook);
186 unregister_undef_hook(&kgdb_compiled_brkpt_hook);
187}
188
189/*
190 * Register our undef instruction hooks with ARM undef core.
191 * We regsiter a hook specifically looking for the KGB break inst
192 * and we handle the normal undef case within the do_undefinstr
193 * handler.
194 */
195struct kgdb_arch arch_kgdb_ops = {
196#ifndef __ARMEB__
197 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
198#else /* ! __ARMEB__ */
199 .gdb_bpt_instr = {0xe7, 0xff, 0xde, 0xfe}
200#endif
201};
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 5ee39e10c8d1..d28513f14d05 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -296,8 +296,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
296 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 296 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
297 297
298 INIT_HLIST_HEAD(&empty_rp); 298 INIT_HLIST_HEAD(&empty_rp);
299 spin_lock_irqsave(&kretprobe_lock, flags); 299 kretprobe_hash_lock(current, &head, &flags);
300 head = kretprobe_inst_table_head(current);
301 300
302 /* 301 /*
303 * It is possible to have multiple instances associated with a given 302 * It is possible to have multiple instances associated with a given
@@ -337,7 +336,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
337 } 336 }
338 337
339 kretprobe_assert(ri, orig_ret_address, trampoline_address); 338 kretprobe_assert(ri, orig_ret_address, trampoline_address);
340 spin_unlock_irqrestore(&kretprobe_lock, flags); 339 kretprobe_hash_unlock(current, &flags);
341 340
342 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 341 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
343 hlist_del(&ri->hlist); 342 hlist_del(&ri->hlist);
@@ -347,7 +346,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
347 return (void *)orig_ret_address; 346 return (void *)orig_ret_address;
348} 347}
349 348
350/* Called with kretprobe_lock held. */
351void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 349void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
352 struct pt_regs *regs) 350 struct pt_regs *regs)
353{ 351{
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 79b7e5cf5416..a68259a0cccd 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/moduleloader.h> 14#include <linux/moduleloader.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/mm.h>
16#include <linux/elf.h> 17#include <linux/elf.h>
17#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 199b3680118b..89bfded70a1f 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -162,7 +162,7 @@ void cpu_idle(void)
162 if (!idle) 162 if (!idle)
163 idle = default_idle; 163 idle = default_idle;
164 leds_event(led_idle_start); 164 leds_event(led_idle_start);
165 tick_nohz_stop_sched_tick(); 165 tick_nohz_stop_sched_tick(1);
166 while (!need_resched()) 166 while (!need_resched())
167 idle(); 167 idle();
168 leds_event(led_idle_end); 168 leds_event(led_idle_end);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b7b0720bc1bb..38f0e7940a13 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -36,6 +36,7 @@
36#include <asm/mach/arch.h> 36#include <asm/mach/arch.h>
37#include <asm/mach/irq.h> 37#include <asm/mach/irq.h>
38#include <asm/mach/time.h> 38#include <asm/mach/time.h>
39#include <asm/traps.h>
39 40
40#include "compat.h" 41#include "compat.h"
41#include "atags.h" 42#include "atags.h"
@@ -853,6 +854,7 @@ void __init setup_arch(char **cmdline_p)
853 conswitchp = &dummy_con; 854 conswitchp = &dummy_con;
854#endif 855#endif
855#endif 856#endif
857 early_trap_init();
856} 858}
857 859
858 860
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 5595fdd75e82..7277aef83098 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -708,6 +708,11 @@ EXPORT_SYMBOL(abort);
708 708
709void __init trap_init(void) 709void __init trap_init(void)
710{ 710{
711 return;
712}
713
714void __init early_trap_init(void)
715{
711 unsigned long vectors = CONFIG_VECTORS_BASE; 716 unsigned long vectors = CONFIG_VECTORS_BASE;
712 extern char __stubs_start[], __stubs_end[]; 717 extern char __stubs_start[], __stubs_end[];
713 extern char __vectors_start[], __vectors_end[]; 718 extern char __vectors_start[], __vectors_end[];
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index 2741063bf361..28f164ea4726 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -17,6 +17,7 @@
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/f75375s.h> 19#include <linux/f75375s.h>
20#include <linux/leds-pca9532.h>
20#include <linux/delay.h> 21#include <linux/delay.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
22#include <linux/pci.h> 23#include <linux/pci.h>
@@ -206,6 +207,53 @@ static struct f75375s_platform_data n2100_f75375s = {
206 .pwm_enable = { 0, 0 }, 207 .pwm_enable = { 0, 0 },
207}; 208};
208 209
210static struct pca9532_platform_data n2100_leds = {
211 .leds = {
212 { .name = "n2100:red:satafail0",
213 .state = PCA9532_OFF,
214 .type = PCA9532_TYPE_LED,
215 },
216 { .name = "n2100:red:satafail1",
217 .state = PCA9532_OFF,
218 .type = PCA9532_TYPE_LED,
219 },
220 { .name = "n2100:blue:usb",
221 .state = PCA9532_OFF,
222 .type = PCA9532_TYPE_LED,
223 },
224 { .type = PCA9532_TYPE_NONE },
225
226 { .type = PCA9532_TYPE_NONE },
227 { .type = PCA9532_TYPE_NONE },
228 { .type = PCA9532_TYPE_NONE },
229 { .name = "n2100:red:usb",
230 .state = PCA9532_OFF,
231 .type = PCA9532_TYPE_LED,
232 },
233
234 { .type = PCA9532_TYPE_NONE }, /* power OFF gpio */
235 { .type = PCA9532_TYPE_NONE }, /* reset gpio */
236 { .type = PCA9532_TYPE_NONE },
237 { .type = PCA9532_TYPE_NONE },
238
239 { .type = PCA9532_TYPE_NONE },
240 { .name = "n2100:orange:system",
241 .state = PCA9532_OFF,
242 .type = PCA9532_TYPE_LED,
243 },
244 { .name = "n2100:red:system",
245 .state = PCA9532_OFF,
246 .type = PCA9532_TYPE_LED,
247 },
248 { .name = "N2100 beeper" ,
249 .state = PCA9532_OFF,
250 .type = PCA9532_TYPE_N2100_BEEP,
251 },
252 },
253 .psc = { 0, 0 },
254 .pwm = { 0, 0 },
255};
256
209static struct i2c_board_info __initdata n2100_i2c_devices[] = { 257static struct i2c_board_info __initdata n2100_i2c_devices[] = {
210 { 258 {
211 I2C_BOARD_INFO("rs5c372b", 0x32), 259 I2C_BOARD_INFO("rs5c372b", 0x32),
@@ -214,6 +262,10 @@ static struct i2c_board_info __initdata n2100_i2c_devices[] = {
214 I2C_BOARD_INFO("f75375", 0x2e), 262 I2C_BOARD_INFO("f75375", 0x2e),
215 .platform_data = &n2100_f75375s, 263 .platform_data = &n2100_f75375s,
216 }, 264 },
265 {
266 I2C_BOARD_INFO("pca9532", 0x60),
267 .platform_data = &n2100_leds,
268 },
217}; 269};
218 270
219/* 271/*
diff --git a/arch/arm/mach-ns9xxx/clock.c b/arch/arm/mach-ns9xxx/clock.c
index f8639161068f..44ed20d4a388 100644
--- a/arch/arm/mach-ns9xxx/clock.c
+++ b/arch/arm/mach-ns9xxx/clock.c
@@ -14,8 +14,8 @@
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/semaphore.h>
17 18
18#include <asm/semaphore.h>
19#include "clock.h" 19#include "clock.h"
20 20
21static LIST_HEAD(clocks); 21static LIST_HEAD(clocks);
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 914bb33dab92..e8ee7ec9ff6d 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -16,18 +16,24 @@ config CPU_PXA310
16config CPU_PXA320 16config CPU_PXA320
17 bool "PXA320 (codename Monahans-P)" 17 bool "PXA320 (codename Monahans-P)"
18 18
19config CPU_PXA930
20 bool "PXA930 (codename Tavor-P)"
21
19endmenu 22endmenu
20 23
21endif 24endif
22 25
23menu "Select target boards"
24
25config ARCH_GUMSTIX 26config ARCH_GUMSTIX
26 bool "Gumstix XScale boards" 27 bool "Gumstix XScale boards"
27 help 28 help
28 Say Y here if you intend to run this kernel on a 29 Say Y here if you intend to run this kernel on a
29 Gumstix Full Function Minature Computer. 30 Gumstix Full Function Minature Computer.
30 31
32config MACH_GUMSTIX_F
33 bool "Basix, Connex, ws-200ax, ws-400ax systems"
34 depends on ARCH_GUMSTIX
35 select PXA25x
36
31config ARCH_LUBBOCK 37config ARCH_LUBBOCK
32 bool "Intel DBPXA250 Development Platform" 38 bool "Intel DBPXA250 Development Platform"
33 select PXA25x 39 select PXA25x
@@ -58,6 +64,57 @@ config PXA_SHARPSL
58 SL-C3000 (Spitz), SL-C3100 (Borzoi) or SL-C6000x (Tosa) 64 SL-C3000 (Spitz), SL-C3100 (Borzoi) or SL-C6000x (Tosa)
59 handheld computer. 65 handheld computer.
60 66
67config MACH_POODLE
68 bool "Enable Sharp SL-5600 (Poodle) Support"
69 depends on PXA_SHARPSL
70 select PXA25x
71 select SHARP_LOCOMO
72 select PXA_SSP
73
74config MACH_CORGI
75 bool "Enable Sharp SL-C700 (Corgi) Support"
76 depends on PXA_SHARPSL
77 select PXA25x
78 select PXA_SHARP_C7xx
79
80config MACH_SHEPHERD
81 bool "Enable Sharp SL-C750 (Shepherd) Support"
82 depends on PXA_SHARPSL
83 select PXA25x
84 select PXA_SHARP_C7xx
85
86config MACH_HUSKY
87 bool "Enable Sharp SL-C760 (Husky) Support"
88 depends on PXA_SHARPSL
89 select PXA25x
90 select PXA_SHARP_C7xx
91
92config MACH_AKITA
93 bool "Enable Sharp SL-1000 (Akita) Support"
94 depends on PXA_SHARPSL
95 select PXA27x
96 select PXA_SHARP_Cxx00
97 select MACH_SPITZ
98 select I2C
99 select I2C_PXA
100
101config MACH_SPITZ
102 bool "Enable Sharp Zaurus SL-3000 (Spitz) Support"
103 depends on PXA_SHARPSL
104 select PXA27x
105 select PXA_SHARP_Cxx00
106
107config MACH_BORZOI
108 bool "Enable Sharp Zaurus SL-3100 (Borzoi) Support"
109 depends on PXA_SHARPSL
110 select PXA27x
111 select PXA_SHARP_Cxx00
112
113config MACH_TOSA
114 bool "Enable Sharp SL-6000x (Tosa) Support"
115 depends on PXA_SHARPSL
116 select PXA25x
117
61config ARCH_PXA_ESERIES 118config ARCH_PXA_ESERIES
62 bool "PXA based Toshiba e-series PDAs" 119 bool "PXA based Toshiba e-series PDAs"
63 select PXA25x 120 select PXA25x
@@ -70,10 +127,19 @@ config MACH_E330
70 Say Y here if you intend to run this kernel on a Toshiba 127 Say Y here if you intend to run this kernel on a Toshiba
71 e330 family PDA. 128 e330 family PDA.
72 129
130config MACH_E350
131 bool "Toshiba e350"
132 default y
133 depends on ARCH_PXA_ESERIES
134 help
135 Say Y here if you intend to run this kernel on a Toshiba
136 e350 family PDA.
137
73config MACH_E740 138config MACH_E740
74 bool "Toshiba e740" 139 bool "Toshiba e740"
75 default y 140 default y
76 depends on ARCH_PXA_ESERIES 141 depends on ARCH_PXA_ESERIES
142 select FB_W100
77 help 143 help
78 Say Y here if you intend to run this kernel on a Toshiba 144 Say Y here if you intend to run this kernel on a Toshiba
79 e740 family PDA. 145 e740 family PDA.
@@ -82,6 +148,7 @@ config MACH_E750
82 bool "Toshiba e750" 148 bool "Toshiba e750"
83 default y 149 default y
84 depends on ARCH_PXA_ESERIES 150 depends on ARCH_PXA_ESERIES
151 select FB_W100
85 help 152 help
86 Say Y here if you intend to run this kernel on a Toshiba 153 Say Y here if you intend to run this kernel on a Toshiba
87 e750 family PDA. 154 e750 family PDA.
@@ -98,6 +165,7 @@ config MACH_E800
98 bool "Toshiba e800" 165 bool "Toshiba e800"
99 default y 166 default y
100 depends on ARCH_PXA_ESERIES 167 depends on ARCH_PXA_ESERIES
168 select FB_W100
101 help 169 help
102 Say Y here if you intend to run this kernel on a Toshiba 170 Say Y here if you intend to run this kernel on a Toshiba
103 e800 family PDA. 171 e800 family PDA.
@@ -106,6 +174,10 @@ config MACH_TRIZEPS4
106 bool "Keith und Koep Trizeps4 DIMM-Module" 174 bool "Keith und Koep Trizeps4 DIMM-Module"
107 select PXA27x 175 select PXA27x
108 176
177config MACH_TRIZEPS4_CONXS
178 bool "ConXS Eval Board"
179 depends on MACH_TRIZEPS4
180
109config MACH_EM_X270 181config MACH_EM_X270
110 bool "CompuLab EM-x270 platform" 182 bool "CompuLab EM-x270 platform"
111 select PXA27x 183 select PXA27x
@@ -115,7 +187,7 @@ config MACH_COLIBRI
115 select PXA27x 187 select PXA27x
116 188
117config MACH_ZYLONITE 189config MACH_ZYLONITE
118 bool "PXA3xx Development Platform" 190 bool "PXA3xx Development Platform (aka Zylonite)"
119 select PXA3xx 191 select PXA3xx
120 select HAVE_PWM 192 select HAVE_PWM
121 193
@@ -124,6 +196,16 @@ config MACH_LITTLETON
124 select PXA3xx 196 select PXA3xx
125 select PXA_SSP 197 select PXA_SSP
126 198
199config MACH_TAVOREVB
200 bool "PXA930 Evaluation Board (aka TavorEVB)"
201 select PXA3xx
202 select PXA930
203
204config MACH_SAAR
205 bool "PXA930 Handheld Platform (aka SAAR)"
206 select PXA3xx
207 select PXA930
208
127config MACH_ARMCORE 209config MACH_ARMCORE
128 bool "CompuLab CM-X270 modules" 210 bool "CompuLab CM-X270 modules"
129 select PXA27x 211 select PXA27x
@@ -131,7 +213,6 @@ config MACH_ARMCORE
131 213
132config MACH_MAGICIAN 214config MACH_MAGICIAN
133 bool "Enable HTC Magician Support" 215 bool "Enable HTC Magician Support"
134 depends on ARCH_PXA
135 select PXA27x 216 select PXA27x
136 select IWMMXT 217 select IWMMXT
137 218
@@ -139,18 +220,26 @@ config MACH_PCM027
139 bool "Phytec phyCORE-PXA270 CPU module (PCM-027)" 220 bool "Phytec phyCORE-PXA270 CPU module (PCM-027)"
140 select PXA27x 221 select PXA27x
141 select IWMMXT 222 select IWMMXT
223 select PXA_SSP
142 224
143endmenu 225config ARCH_PXA_PALM
226 bool "PXA based Palm PDAs"
227 select HAVE_PWM
144 228
145choice 229config MACH_PALMTX
146 prompt "Used baseboard" 230 bool "Palm T|X"
147 depends on MACH_PCM027 231 default y
232 depends on ARCH_PXA_PALM
233 select PXA27x
234 select IWMMXT
235 help
236 Say Y here if you intend to run this kernel on a Palm T|X
237 handheld computer.
148 238
149config MACH_PCM990_BASEBOARD 239config MACH_PCM990_BASEBOARD
150 bool "PHYTEC PCM-990 development board" 240 bool "PHYTEC PCM-990 development board"
151 select HAVE_PWM 241 select HAVE_PWM
152 242 depends on MACH_PCM027
153endchoice
154 243
155choice 244choice
156 prompt "display on pcm990" 245 prompt "display on pcm990"
@@ -167,88 +256,45 @@ config PCM990_DISPLAY_NONE
167 256
168endchoice 257endchoice
169 258
170if ARCH_GUMSTIX
171
172choice
173 prompt "Select target Gumstix board"
174
175config MACH_GUMSTIX_F
176 bool "Basix, Connex, ws-200ax, ws-400ax systems"
177 select PXA25x
178
179endchoice
180
181endif
182 259
260config PXA_EZX
261 bool "Motorola EZX Platform"
262 select PXA27x
263 select IWMMXT
264 select HAVE_PWM
183 265
184if MACH_TRIZEPS4 266config MACH_EZX_A780
267 bool "Motorola EZX A780"
268 default y
269 depends on PXA_EZX
185 270
186choice 271config MACH_EZX_E680
187 prompt "Select base board for Trizeps 4 module" 272 bool "Motorola EZX E680"
273 default y
274 depends on PXA_EZX
188 275
189config MACH_TRIZEPS4_CONXS 276config MACH_EZX_A1200
190 bool "ConXS Eval Board" 277 bool "Motorola EZX A1200"
278 default y
279 depends on PXA_EZX
191 280
192config MACH_TRIZEPS4_ANY 281config MACH_EZX_A910
193 bool "another Board" 282 bool "Motorola EZX A910"
283 default y
284 depends on PXA_EZX
194 285
195endchoice 286config MACH_EZX_E6
287 bool "Motorola EZX E6"
288 default y
289 depends on PXA_EZX
196 290
197endif 291config MACH_EZX_E2
292 bool "Motorola EZX E2"
293 default y
294 depends on PXA_EZX
198 295
199endmenu 296endmenu
200 297
201config MACH_POODLE
202 bool "Enable Sharp SL-5600 (Poodle) Support"
203 depends on PXA_SHARPSL
204 select PXA25x
205 select SHARP_LOCOMO
206 select PXA_SSP
207
208config MACH_CORGI
209 bool "Enable Sharp SL-C700 (Corgi) Support"
210 depends on PXA_SHARPSL
211 select PXA25x
212 select PXA_SHARP_C7xx
213
214config MACH_SHEPHERD
215 bool "Enable Sharp SL-C750 (Shepherd) Support"
216 depends on PXA_SHARPSL
217 select PXA25x
218 select PXA_SHARP_C7xx
219
220config MACH_HUSKY
221 bool "Enable Sharp SL-C760 (Husky) Support"
222 depends on PXA_SHARPSL
223 select PXA25x
224 select PXA_SHARP_C7xx
225
226config MACH_AKITA
227 bool "Enable Sharp SL-1000 (Akita) Support"
228 depends on PXA_SHARPSL
229 select PXA27x
230 select PXA_SHARP_Cxx00
231 select MACH_SPITZ
232 select I2C
233 select I2C_PXA
234
235config MACH_SPITZ
236 bool "Enable Sharp Zaurus SL-3000 (Spitz) Support"
237 depends on PXA_SHARPSL
238 select PXA27x
239 select PXA_SHARP_Cxx00
240
241config MACH_BORZOI
242 bool "Enable Sharp Zaurus SL-3100 (Borzoi) Support"
243 depends on PXA_SHARPSL
244 select PXA27x
245 select PXA_SHARP_Cxx00
246
247config MACH_TOSA
248 bool "Enable Sharp SL-6000x (Tosa) Support"
249 depends on PXA_SHARPSL
250 select PXA25x
251
252config PXA25x 298config PXA25x
253 bool 299 bool
254 help 300 help
@@ -288,4 +334,13 @@ config PXA_PWM
288 default BACKLIGHT_PWM 334 default BACKLIGHT_PWM
289 help 335 help
290 Enable support for PXA2xx/PXA3xx PWM controllers 336 Enable support for PXA2xx/PXA3xx PWM controllers
337
338config TOSA_BT
339 tristate "Control the state of built-in bluetooth chip on Sharp SL-6000"
340 depends on MACH_TOSA
341 select RFKILL
342 help
343 This is a simple driver that is able to control
344 the state of built in bluetooth chip on tosa.
345
291endif 346endif
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index c4dfbe87fc4e..99ecbe7f8506 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -4,7 +4,7 @@
4 4
5# Common support (must be linked before board specific support) 5# Common support (must be linked before board specific support)
6obj-y += clock.o devices.o generic.o irq.o dma.o \ 6obj-y += clock.o devices.o generic.o irq.o dma.o \
7 time.o gpio.o 7 time.o gpio.o reset.o
8obj-$(CONFIG_PM) += pm.o sleep.o standby.o 8obj-$(CONFIG_PM) += pm.o sleep.o standby.o
9obj-$(CONFIG_CPU_FREQ) += cpu-pxa.o 9obj-$(CONFIG_CPU_FREQ) += cpu-pxa.o
10 10
@@ -18,6 +18,7 @@ obj-$(CONFIG_PXA27x) += mfp-pxa2xx.o pxa2xx.o pxa27x.o
18obj-$(CONFIG_PXA3xx) += mfp-pxa3xx.o pxa3xx.o smemc.o 18obj-$(CONFIG_PXA3xx) += mfp-pxa3xx.o pxa3xx.o smemc.o
19obj-$(CONFIG_CPU_PXA300) += pxa300.o 19obj-$(CONFIG_CPU_PXA300) += pxa300.o
20obj-$(CONFIG_CPU_PXA320) += pxa320.o 20obj-$(CONFIG_CPU_PXA320) += pxa320.o
21obj-$(CONFIG_CPU_PXA930) += pxa930.o
21 22
22# Specific board support 23# Specific board support
23obj-$(CONFIG_ARCH_GUMSTIX) += gumstix.o 24obj-$(CONFIG_ARCH_GUMSTIX) += gumstix.o
@@ -36,7 +37,12 @@ obj-$(CONFIG_MACH_PCM990_BASEBOARD) += pcm990-baseboard.o
36obj-$(CONFIG_MACH_TOSA) += tosa.o 37obj-$(CONFIG_MACH_TOSA) += tosa.o
37obj-$(CONFIG_MACH_EM_X270) += em-x270.o 38obj-$(CONFIG_MACH_EM_X270) += em-x270.o
38obj-$(CONFIG_MACH_MAGICIAN) += magician.o 39obj-$(CONFIG_MACH_MAGICIAN) += magician.o
39obj-$(CONFIG_ARCH_PXA_ESERIES) += eseries.o 40obj-$(CONFIG_ARCH_PXA_ESERIES) += eseries.o eseries_udc.o
41obj-$(CONFIG_MACH_E740) += e740_lcd.o
42obj-$(CONFIG_MACH_E750) += e750_lcd.o
43obj-$(CONFIG_MACH_E400) += e400_lcd.o
44obj-$(CONFIG_MACH_E800) += e800_lcd.o
45obj-$(CONFIG_MACH_PALMTX) += palmtx.o
40 46
41ifeq ($(CONFIG_MACH_ZYLONITE),y) 47ifeq ($(CONFIG_MACH_ZYLONITE),y)
42 obj-y += zylonite.o 48 obj-y += zylonite.o
@@ -44,8 +50,11 @@ ifeq ($(CONFIG_MACH_ZYLONITE),y)
44 obj-$(CONFIG_CPU_PXA320) += zylonite_pxa320.o 50 obj-$(CONFIG_CPU_PXA320) += zylonite_pxa320.o
45endif 51endif
46obj-$(CONFIG_MACH_LITTLETON) += littleton.o 52obj-$(CONFIG_MACH_LITTLETON) += littleton.o
53obj-$(CONFIG_MACH_TAVOREVB) += tavorevb.o
54obj-$(CONFIG_MACH_SAAR) += saar.o
47 55
48obj-$(CONFIG_MACH_ARMCORE) += cm-x270.o 56obj-$(CONFIG_MACH_ARMCORE) += cm-x270.o
57obj-$(CONFIG_PXA_EZX) += ezx.o
49 58
50# Support for blinky lights 59# Support for blinky lights
51led-y := leds.o 60led-y := leds.o
@@ -59,3 +68,5 @@ obj-$(CONFIG_LEDS) += $(led-y)
59ifeq ($(CONFIG_PCI),y) 68ifeq ($(CONFIG_PCI),y)
60obj-$(CONFIG_MACH_ARMCORE) += cm-x270-pci.o 69obj-$(CONFIG_MACH_ARMCORE) += cm-x270-pci.o
61endif 70endif
71
72obj-$(CONFIG_TOSA_BT) += tosa-bt.o
diff --git a/arch/arm/mach-pxa/clock.c b/arch/arm/mach-pxa/clock.c
index b4d04955dcb0..630063ffa6fc 100644
--- a/arch/arm/mach-pxa/clock.c
+++ b/arch/arm/mach-pxa/clock.c
@@ -101,21 +101,6 @@ unsigned long clk_get_rate(struct clk *clk)
101EXPORT_SYMBOL(clk_get_rate); 101EXPORT_SYMBOL(clk_get_rate);
102 102
103 103
104static void clk_gpio27_enable(struct clk *clk)
105{
106 pxa_gpio_mode(GPIO11_3_6MHz_MD);
107}
108
109static void clk_gpio27_disable(struct clk *clk)
110{
111}
112
113static const struct clkops clk_gpio27_ops = {
114 .enable = clk_gpio27_enable,
115 .disable = clk_gpio27_disable,
116};
117
118
119void clk_cken_enable(struct clk *clk) 104void clk_cken_enable(struct clk *clk)
120{ 105{
121 CKEN |= 1 << clk->cken; 106 CKEN |= 1 << clk->cken;
@@ -131,14 +116,6 @@ const struct clkops clk_cken_ops = {
131 .disable = clk_cken_disable, 116 .disable = clk_cken_disable,
132}; 117};
133 118
134static struct clk common_clks[] = {
135 {
136 .name = "GPIO27_CLK",
137 .ops = &clk_gpio27_ops,
138 .rate = 3686400,
139 },
140};
141
142void clks_register(struct clk *clks, size_t num) 119void clks_register(struct clk *clks, size_t num)
143{ 120{
144 int i; 121 int i;
@@ -148,10 +125,3 @@ void clks_register(struct clk *clks, size_t num)
148 list_add(&clks[i].node, &clocks); 125 list_add(&clks[i].node, &clocks);
149 mutex_unlock(&clocks_mutex); 126 mutex_unlock(&clocks_mutex);
150} 127}
151
152static int __init clk_init(void)
153{
154 clks_register(common_clks, ARRAY_SIZE(common_clks));
155 return 0;
156}
157arch_initcall(clk_init);
diff --git a/arch/arm/mach-pxa/clock.h b/arch/arm/mach-pxa/clock.h
index 83cbfaba485d..1ec8f9178aaf 100644
--- a/arch/arm/mach-pxa/clock.h
+++ b/arch/arm/mach-pxa/clock.h
@@ -47,9 +47,42 @@ struct clk {
47 .other = _other, \ 47 .other = _other, \
48 } 48 }
49 49
50#define INIT_CLK(_name, _ops, _rate, _delay, _dev) \
51 { \
52 .name = _name, \
53 .dev = _dev, \
54 .ops = _ops, \
55 .rate = _rate, \
56 .delay = _delay, \
57 }
58
50extern const struct clkops clk_cken_ops; 59extern const struct clkops clk_cken_ops;
51 60
52void clk_cken_enable(struct clk *clk); 61void clk_cken_enable(struct clk *clk);
53void clk_cken_disable(struct clk *clk); 62void clk_cken_disable(struct clk *clk);
54 63
64#ifdef CONFIG_PXA3xx
65#define PXA3xx_CKEN(_name, _cken, _rate, _delay, _dev) \
66 { \
67 .name = _name, \
68 .dev = _dev, \
69 .ops = &clk_pxa3xx_cken_ops, \
70 .rate = _rate, \
71 .cken = CKEN_##_cken, \
72 .delay = _delay, \
73 }
74
75#define PXA3xx_CK(_name, _cken, _ops, _dev) \
76 { \
77 .name = _name, \
78 .dev = _dev, \
79 .ops = _ops, \
80 .cken = CKEN_##_cken, \
81 }
82
83extern const struct clkops clk_pxa3xx_cken_ops;
84extern void clk_pxa3xx_cken_enable(struct clk *);
85extern void clk_pxa3xx_cken_disable(struct clk *);
86#endif
87
55void clks_register(struct clk *clks, size_t num); 88void clks_register(struct clk *clks, size_t num);
diff --git a/arch/arm/mach-pxa/cm-x270-pci.c b/arch/arm/mach-pxa/cm-x270-pci.c
index 319c9ff3ab9a..bcf0cde6ccc9 100644
--- a/arch/arm/mach-pxa/cm-x270-pci.c
+++ b/arch/arm/mach-pxa/cm-x270-pci.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Bits taken from various places. 6 * Bits taken from various places.
7 * 7 *
8 * Copyright (C) 2007 Compulab, Ltd. 8 * Copyright (C) 2007, 2008 Compulab, Ltd.
9 * Mike Rapoport <mike@compulab.co.il> 9 * Mike Rapoport <mike@compulab.co.il>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
@@ -19,16 +19,16 @@
19#include <linux/device.h> 19#include <linux/device.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/gpio.h>
22 23
23#include <asm/mach/pci.h> 24#include <asm/mach/pci.h>
24#include <asm/arch/cm-x270.h>
25#include <asm/arch/pxa-regs.h> 25#include <asm/arch/pxa-regs.h>
26#include <asm/arch/pxa2xx-gpio.h>
27#include <asm/mach-types.h> 26#include <asm/mach-types.h>
28 27
29#include <asm/hardware/it8152.h> 28#include <asm/hardware/it8152.h>
30 29
31unsigned long it8152_base_address = CMX270_IT8152_VIRT; 30unsigned long it8152_base_address;
31static int cmx270_it8152_irq_gpio;
32 32
33/* 33/*
34 * Only first 64MB of memory can be accessed via PCI. 34 * Only first 64MB of memory can be accessed via PCI.
@@ -42,7 +42,7 @@ void __init cmx270_pci_adjust_zones(int node, unsigned long *zone_size,
42 unsigned int sz = SZ_64M >> PAGE_SHIFT; 42 unsigned int sz = SZ_64M >> PAGE_SHIFT;
43 43
44 if (machine_is_armcore()) { 44 if (machine_is_armcore()) {
45 pr_info("Adjusting zones for CM-x270\n"); 45 pr_info("Adjusting zones for CM-X270\n");
46 46
47 /* 47 /*
48 * Only adjust if > 64M on current system 48 * Only adjust if > 64M on current system
@@ -60,19 +60,20 @@ void __init cmx270_pci_adjust_zones(int node, unsigned long *zone_size,
60static void cmx270_it8152_irq_demux(unsigned int irq, struct irq_desc *desc) 60static void cmx270_it8152_irq_demux(unsigned int irq, struct irq_desc *desc)
61{ 61{
62 /* clear our parent irq */ 62 /* clear our parent irq */
63 GEDR(GPIO_IT8152_IRQ) = GPIO_bit(GPIO_IT8152_IRQ); 63 GEDR(cmx270_it8152_irq_gpio) = GPIO_bit(cmx270_it8152_irq_gpio);
64 64
65 it8152_irq_demux(irq, desc); 65 it8152_irq_demux(irq, desc);
66} 66}
67 67
68void __cmx270_pci_init_irq(void) 68void __cmx270_pci_init_irq(int irq_gpio)
69{ 69{
70 it8152_init_irq(); 70 it8152_init_irq();
71 pxa_gpio_mode(IRQ_TO_GPIO(GPIO_IT8152_IRQ));
72 set_irq_type(IRQ_GPIO(GPIO_IT8152_IRQ), IRQT_RISING);
73 71
74 set_irq_chained_handler(IRQ_GPIO(GPIO_IT8152_IRQ), 72 cmx270_it8152_irq_gpio = irq_gpio;
75 cmx270_it8152_irq_demux); 73
74 set_irq_type(gpio_to_irq(irq_gpio), IRQT_RISING);
75
76 set_irq_chained_handler(gpio_to_irq(irq_gpio), cmx270_it8152_irq_demux);
76} 77}
77 78
78#ifdef CONFIG_PM 79#ifdef CONFIG_PM
@@ -115,8 +116,8 @@ static int __init cmx270_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
115 116
116 /* 117 /*
117 Here comes the ugly part. The routing is baseboard specific, 118 Here comes the ugly part. The routing is baseboard specific,
118 but defining a platform for each possible base of CM-x270 is 119 but defining a platform for each possible base of CM-X270 is
119 unrealistic. Here we keep mapping for ATXBase and SB-x270. 120 unrealistic. Here we keep mapping for ATXBase and SB-X270.
120 */ 121 */
121 /* ATXBASE PCI slot */ 122 /* ATXBASE PCI slot */
122 if (slot == 7) 123 if (slot == 7)
diff --git a/arch/arm/mach-pxa/cm-x270-pci.h b/arch/arm/mach-pxa/cm-x270-pci.h
index ffe37b66f9a0..48f532f4cb51 100644
--- a/arch/arm/mach-pxa/cm-x270-pci.h
+++ b/arch/arm/mach-pxa/cm-x270-pci.h
@@ -1,13 +1,13 @@
1extern void __cmx270_pci_init_irq(void); 1extern void __cmx270_pci_init_irq(int irq_gpio);
2extern void __cmx270_pci_suspend(void); 2extern void __cmx270_pci_suspend(void);
3extern void __cmx270_pci_resume(void); 3extern void __cmx270_pci_resume(void);
4 4
5#ifdef CONFIG_PCI 5#ifdef CONFIG_PCI
6#define cmx270_pci_init_irq __cmx270_pci_init_irq 6#define cmx270_pci_init_irq(x) __cmx270_pci_init_irq(x)
7#define cmx270_pci_suspend __cmx270_pci_suspend 7#define cmx270_pci_suspend(x) __cmx270_pci_suspend(x)
8#define cmx270_pci_resume __cmx270_pci_resume 8#define cmx270_pci_resume(x) __cmx270_pci_resume(x)
9#else 9#else
10#define cmx270_pci_init_irq() do {} while (0) 10#define cmx270_pci_init_irq(x) do {} while (0)
11#define cmx270_pci_suspend() do {} while (0) 11#define cmx270_pci_suspend(x) do {} while (0)
12#define cmx270_pci_resume() do {} while (0) 12#define cmx270_pci_resume(x) do {} while (0)
13#endif 13#endif
diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c
index 01b9964acec1..402e807eae54 100644
--- a/arch/arm/mach-pxa/cm-x270.c
+++ b/arch/arm/mach-pxa/cm-x270.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/arch/arm/mach-pxa/cm-x270.c 2 * linux/arch/arm/mach-pxa/cm-x270.c
3 * 3 *
4 * Copyright (C) 2007 CompuLab, Ltd. 4 * Copyright (C) 2007, 2008 CompuLab, Ltd.
5 * Mike Rapoport <mike@compulab.co.il> 5 * Mike Rapoport <mike@compulab.co.il>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -9,44 +9,156 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/types.h>
13#include <linux/pm.h>
14#include <linux/fb.h>
15#include <linux/platform_device.h> 12#include <linux/platform_device.h>
16#include <linux/irq.h>
17#include <linux/sysdev.h> 13#include <linux/sysdev.h>
18#include <linux/io.h> 14#include <linux/irq.h>
19#include <linux/delay.h> 15#include <linux/gpio.h>
20 16
21#include <linux/dm9000.h> 17#include <linux/dm9000.h>
22#include <linux/rtc-v3020.h> 18#include <linux/rtc-v3020.h>
23#include <linux/serial_8250.h>
24
25#include <video/mbxfb.h> 19#include <video/mbxfb.h>
20#include <linux/leds.h>
26 21
27#include <asm/mach/arch.h> 22#include <asm/mach/arch.h>
28#include <asm/mach-types.h> 23#include <asm/mach-types.h>
29#include <asm/mach/map.h> 24#include <asm/mach/map.h>
30 25
31#include <asm/arch/pxa-regs.h>
32#include <asm/arch/pxa2xx-regs.h> 26#include <asm/arch/pxa2xx-regs.h>
33#include <asm/arch/pxa2xx-gpio.h> 27#include <asm/arch/mfp-pxa27x.h>
28#include <asm/arch/pxa-regs.h>
34#include <asm/arch/audio.h> 29#include <asm/arch/audio.h>
35#include <asm/arch/pxafb.h> 30#include <asm/arch/pxafb.h>
36#include <asm/arch/ohci.h> 31#include <asm/arch/ohci.h>
37#include <asm/arch/mmc.h> 32#include <asm/arch/mmc.h>
38#include <asm/arch/bitfield.h> 33#include <asm/arch/bitfield.h>
39#include <asm/arch/cm-x270.h>
40 34
41#include <asm/hardware/it8152.h> 35#include <asm/hardware/it8152.h>
42 36
43#include "generic.h" 37#include "generic.h"
44#include "cm-x270-pci.h" 38#include "cm-x270-pci.h"
45 39
40/* virtual addresses for statically mapped regions */
41#define CMX270_VIRT_BASE (0xe8000000)
42#define CMX270_IT8152_VIRT (CMX270_VIRT_BASE)
43
46#define RTC_PHYS_BASE (PXA_CS1_PHYS + (5 << 22)) 44#define RTC_PHYS_BASE (PXA_CS1_PHYS + (5 << 22))
47#define DM9000_PHYS_BASE (PXA_CS1_PHYS + (6 << 22)) 45#define DM9000_PHYS_BASE (PXA_CS1_PHYS + (6 << 22))
48 46
49static struct resource cmx270_dm9k_resource[] = { 47/* GPIO IRQ usage */
48#define GPIO10_ETHIRQ (10)
49#define GPIO22_IT8152_IRQ (22)
50#define GPIO83_MMC_IRQ (83)
51#define GPIO95_GFXIRQ (95)
52
53#define CMX270_ETHIRQ IRQ_GPIO(GPIO10_ETHIRQ)
54#define CMX270_IT8152_IRQ IRQ_GPIO(GPIO22_IT8152_IRQ)
55#define CMX270_MMC_IRQ IRQ_GPIO(GPIO83_MMC_IRQ)
56#define CMX270_GFXIRQ IRQ_GPIO(GPIO95_GFXIRQ)
57
58/* MMC power enable */
59#define GPIO105_MMC_POWER (105)
60
61static unsigned long cmx270_pin_config[] = {
62 /* AC'97 */
63 GPIO28_AC97_BITCLK,
64 GPIO29_AC97_SDATA_IN_0,
65 GPIO30_AC97_SDATA_OUT,
66 GPIO31_AC97_SYNC,
67 GPIO98_AC97_SYSCLK,
68 GPIO113_AC97_nRESET,
69
70 /* BTUART */
71 GPIO42_BTUART_RXD,
72 GPIO43_BTUART_TXD,
73 GPIO44_BTUART_CTS,
74 GPIO45_BTUART_RTS,
75
76 /* STUART */
77 GPIO46_STUART_RXD,
78 GPIO47_STUART_TXD,
79
80 /* MCI controller */
81 GPIO32_MMC_CLK,
82 GPIO112_MMC_CMD,
83 GPIO92_MMC_DAT_0,
84 GPIO109_MMC_DAT_1,
85 GPIO110_MMC_DAT_2,
86 GPIO111_MMC_DAT_3,
87
88 /* LCD */
89 GPIO58_LCD_LDD_0,
90 GPIO59_LCD_LDD_1,
91 GPIO60_LCD_LDD_2,
92 GPIO61_LCD_LDD_3,
93 GPIO62_LCD_LDD_4,
94 GPIO63_LCD_LDD_5,
95 GPIO64_LCD_LDD_6,
96 GPIO65_LCD_LDD_7,
97 GPIO66_LCD_LDD_8,
98 GPIO67_LCD_LDD_9,
99 GPIO68_LCD_LDD_10,
100 GPIO69_LCD_LDD_11,
101 GPIO70_LCD_LDD_12,
102 GPIO71_LCD_LDD_13,
103 GPIO72_LCD_LDD_14,
104 GPIO73_LCD_LDD_15,
105 GPIO74_LCD_FCLK,
106 GPIO75_LCD_LCLK,
107 GPIO76_LCD_PCLK,
108 GPIO77_LCD_BIAS,
109
110 /* I2C */
111 GPIO117_I2C_SCL,
112 GPIO118_I2C_SDA,
113
114 /* SSP1 */
115 GPIO23_SSP1_SCLK,
116 GPIO24_SSP1_SFRM,
117 GPIO25_SSP1_TXD,
118 GPIO26_SSP1_RXD,
119
120 /* SSP2 */
121 GPIO19_SSP2_SCLK,
122 GPIO14_SSP2_SFRM,
123 GPIO87_SSP2_TXD,
124 GPIO88_SSP2_RXD,
125
126 /* PC Card */
127 GPIO48_nPOE,
128 GPIO49_nPWE,
129 GPIO50_nPIOR,
130 GPIO51_nPIOW,
131 GPIO85_nPCE_1,
132 GPIO54_nPCE_2,
133 GPIO55_nPREG,
134 GPIO56_nPWAIT,
135 GPIO57_nIOIS16,
136
137 /* SDRAM and local bus */
138 GPIO15_nCS_1,
139 GPIO78_nCS_2,
140 GPIO79_nCS_3,
141 GPIO80_nCS_4,
142 GPIO33_nCS_5,
143 GPIO49_nPWE,
144 GPIO18_RDY,
145
146 /* GPIO */
147 GPIO0_GPIO | WAKEUP_ON_EDGE_BOTH,
148 GPIO105_GPIO | MFP_LPM_DRIVE_HIGH, /* MMC/SD power */
149 GPIO53_GPIO, /* PC card reset */
150
151 /* NAND controls */
152 GPIO11_GPIO | MFP_LPM_DRIVE_HIGH, /* NAND CE# */
153 GPIO89_GPIO, /* NAND Ready/Busy */
154
155 /* interrupts */
156 GPIO10_GPIO, /* DM9000 interrupt */
157 GPIO83_GPIO, /* MMC card detect */
158};
159
160#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
161static struct resource cmx270_dm9000_resource[] = {
50 [0] = { 162 [0] = {
51 .start = DM9000_PHYS_BASE, 163 .start = DM9000_PHYS_BASE,
52 .end = DM9000_PHYS_BASE + 4, 164 .end = DM9000_PHYS_BASE + 4,
@@ -64,31 +176,45 @@ static struct resource cmx270_dm9k_resource[] = {
64 } 176 }
65}; 177};
66 178
67/* for the moment we limit ourselves to 32bit IO until some 179static struct dm9000_plat_data cmx270_dm9000_platdata = {
68 * better IO routines can be written and tested
69 */
70static struct dm9000_plat_data cmx270_dm9k_platdata = {
71 .flags = DM9000_PLATF_32BITONLY, 180 .flags = DM9000_PLATF_32BITONLY,
72}; 181};
73 182
74/* Ethernet device */ 183static struct platform_device cmx270_dm9000_device = {
75static struct platform_device cmx270_device_dm9k = {
76 .name = "dm9000", 184 .name = "dm9000",
77 .id = 0, 185 .id = 0,
78 .num_resources = ARRAY_SIZE(cmx270_dm9k_resource), 186 .num_resources = ARRAY_SIZE(cmx270_dm9000_resource),
79 .resource = cmx270_dm9k_resource, 187 .resource = cmx270_dm9000_resource,
80 .dev = { 188 .dev = {
81 .platform_data = &cmx270_dm9k_platdata, 189 .platform_data = &cmx270_dm9000_platdata,
82 } 190 }
83}; 191};
84 192
85/* touchscreen controller */ 193static void __init cmx270_init_dm9000(void)
194{
195 platform_device_register(&cmx270_dm9000_device);
196}
197#else
198static inline void cmx270_init_dm9000(void) {}
199#endif
200
201/* UCB1400 touchscreen controller */
202#if defined(CONFIG_TOUCHSCREEN_UCB1400) || defined(CONFIG_TOUCHSCREEN_UCB1400_MODULE)
86static struct platform_device cmx270_ts_device = { 203static struct platform_device cmx270_ts_device = {
87 .name = "ucb1400_ts", 204 .name = "ucb1400_ts",
88 .id = -1, 205 .id = -1,
89}; 206};
90 207
91/* RTC */ 208static void __init cmx270_init_touchscreen(void)
209{
210 platform_device_register(&cmx270_ts_device);
211}
212#else
213static inline void cmx270_init_touchscreen(void) {}
214#endif
215
216/* V3020 RTC */
217#if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE)
92static struct resource cmx270_v3020_resource[] = { 218static struct resource cmx270_v3020_resource[] = {
93 [0] = { 219 [0] = {
94 .start = RTC_PHYS_BASE, 220 .start = RTC_PHYS_BASE,
@@ -111,28 +237,67 @@ static struct platform_device cmx270_rtc_device = {
111 } 237 }
112}; 238};
113 239
114/* 240static void __init cmx270_init_rtc(void)
115 * CM-X270 LEDs 241{
116 */ 242 platform_device_register(&cmx270_rtc_device);
243}
244#else
245static inline void cmx270_init_rtc(void) {}
246#endif
247
248/* CM-X270 LEDs */
249#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
250static struct gpio_led cmx270_leds[] = {
251 [0] = {
252 .name = "cm-x270:red",
253 .default_trigger = "nand-disk",
254 .gpio = 93,
255 .active_low = 1,
256 },
257 [1] = {
258 .name = "cm-x270:green",
259 .default_trigger = "heartbeat",
260 .gpio = 94,
261 .active_low = 1,
262 },
263};
264
265static struct gpio_led_platform_data cmx270_gpio_led_pdata = {
266 .num_leds = ARRAY_SIZE(cmx270_leds),
267 .leds = cmx270_leds,
268};
269
117static struct platform_device cmx270_led_device = { 270static struct platform_device cmx270_led_device = {
118 .name = "cm-x270-led", 271 .name = "leds-gpio",
119 .id = -1, 272 .id = -1,
273 .dev = {
274 .platform_data = &cmx270_gpio_led_pdata,
275 },
120}; 276};
121 277
278static void __init cmx270_init_leds(void)
279{
280 platform_device_register(&cmx270_led_device);
281}
282#else
283static inline void cmx270_init_leds(void) {}
284#endif
285
122/* 2700G graphics */ 286/* 2700G graphics */
287#if defined(CONFIG_FB_MBX) || defined(CONFIG_FB_MBX_MODULE)
123static u64 fb_dma_mask = ~(u64)0; 288static u64 fb_dma_mask = ~(u64)0;
124 289
125static struct resource cmx270_2700G_resource[] = { 290static struct resource cmx270_2700G_resource[] = {
126 /* frame buffer memory including ODFB and External SDRAM */ 291 /* frame buffer memory including ODFB and External SDRAM */
127 [0] = { 292 [0] = {
128 .start = MARATHON_PHYS, 293 .start = PXA_CS2_PHYS,
129 .end = MARATHON_PHYS + 0x02000000, 294 .end = PXA_CS2_PHYS + 0x01ffffff,
130 .flags = IORESOURCE_MEM, 295 .flags = IORESOURCE_MEM,
131 }, 296 },
132 /* Marathon registers */ 297 /* Marathon registers */
133 [1] = { 298 [1] = {
134 .start = MARATHON_PHYS + 0x03fe0000, 299 .start = PXA_CS2_PHYS + 0x03fe0000,
135 .end = MARATHON_PHYS + 0x03ffffff, 300 .end = PXA_CS2_PHYS + 0x03ffffff,
136 .flags = IORESOURCE_MEM, 301 .flags = IORESOURCE_MEM,
137 }, 302 },
138}; 303};
@@ -200,43 +365,15 @@ static struct platform_device cmx270_2700G = {
200 .id = -1, 365 .id = -1,
201}; 366};
202 367
203static u64 ata_dma_mask = ~(u64)0; 368static void __init cmx270_init_2700G(void)
204 369{
205static struct platform_device cmx270_ata = { 370 platform_device_register(&cmx270_2700G);
206 .name = "pata_cm_x270", 371}
207 .id = -1, 372#else
208 .dev = { 373static inline void cmx270_init_2700G(void) {}
209 .dma_mask = &ata_dma_mask, 374#endif
210 .coherent_dma_mask = 0xffffffff,
211 },
212};
213
214/* platform devices */
215static struct platform_device *platform_devices[] __initdata = {
216 &cmx270_device_dm9k,
217 &cmx270_rtc_device,
218 &cmx270_2700G,
219 &cmx270_led_device,
220 &cmx270_ts_device,
221 &cmx270_ata,
222};
223
224/* Map PCI companion and IDE/General Purpose CS statically */
225static struct map_desc cmx270_io_desc[] __initdata = {
226 [0] = { /* IDE/general purpose space */
227 .virtual = CMX270_IDE104_VIRT,
228 .pfn = __phys_to_pfn(CMX270_IDE104_PHYS),
229 .length = SZ_64M - SZ_8M,
230 .type = MT_DEVICE
231 },
232 [1] = { /* PCI bridge */
233 .virtual = CMX270_IT8152_VIRT,
234 .pfn = __phys_to_pfn(CMX270_IT8152_PHYS),
235 .length = SZ_64M,
236 .type = MT_DEVICE
237 },
238};
239 375
376#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
240/* 377/*
241 Display definitions 378 Display definitions
242 keep these for backwards compatibility, although symbolic names (as 379 keep these for backwards compatibility, although symbolic names (as
@@ -446,7 +583,16 @@ static int __init cmx270_set_display(char *str)
446*/ 583*/
447__setup("monitor=", cmx270_set_display); 584__setup("monitor=", cmx270_set_display);
448 585
586static void __init cmx270_init_display(void)
587{
588 set_pxa_fb_info(cmx270_display);
589}
590#else
591static inline void cmx270_init_display(void) {}
592#endif
593
449/* PXA27x OHCI controller setup */ 594/* PXA27x OHCI controller setup */
595#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
450static int cmx270_ohci_init(struct device *dev) 596static int cmx270_ohci_init(struct device *dev)
451{ 597{
452 /* Set the Power Control Polarity Low */ 598 /* Set the Power Control Polarity Low */
@@ -461,35 +607,37 @@ static struct pxaohci_platform_data cmx270_ohci_platform_data = {
461 .init = cmx270_ohci_init, 607 .init = cmx270_ohci_init,
462}; 608};
463 609
610static void __init cmx270_init_ohci(void)
611{
612 pxa_set_ohci_info(&cmx270_ohci_platform_data);
613}
614#else
615static inline void cmx270_init_ohci(void) {}
616#endif
464 617
618#if defined(CONFIG_MMC) || defined(CONFIG_MMC_MODULE)
465static int cmx270_mci_init(struct device *dev, 619static int cmx270_mci_init(struct device *dev,
466 irq_handler_t cmx270_detect_int, 620 irq_handler_t cmx270_detect_int,
467 void *data) 621 void *data)
468{ 622{
469 int err; 623 int err;
470 624
471 /* 625 err = gpio_request(GPIO105_MMC_POWER, "MMC/SD power");
472 * setup GPIO for PXA27x MMC controller 626 if (err) {
473 */ 627 dev_warn(dev, "power gpio unavailable\n");
474 pxa_gpio_mode(GPIO32_MMCCLK_MD); 628 return err;
475 pxa_gpio_mode(GPIO112_MMCCMD_MD); 629 }
476 pxa_gpio_mode(GPIO92_MMCDAT0_MD);
477 pxa_gpio_mode(GPIO109_MMCDAT1_MD);
478 pxa_gpio_mode(GPIO110_MMCDAT2_MD);
479 pxa_gpio_mode(GPIO111_MMCDAT3_MD);
480
481 /* SB-X270 uses GPIO105 as SD power enable */
482 pxa_gpio_mode(105 | GPIO_OUT);
483 630
484 /* card detect IRQ on GPIO 83 */ 631 gpio_direction_output(GPIO105_MMC_POWER, 0);
485 pxa_gpio_mode(IRQ_TO_GPIO(CMX270_MMC_IRQ));
486 632
487 err = request_irq(CMX270_MMC_IRQ, cmx270_detect_int, 633 err = request_irq(CMX270_MMC_IRQ, cmx270_detect_int,
488 IRQF_DISABLED | IRQF_TRIGGER_FALLING, 634 IRQF_DISABLED | IRQF_TRIGGER_FALLING,
489 "MMC card detect", data); 635 "MMC card detect", data);
490 if (err) 636 if (err) {
491 printk(KERN_ERR "cmx270_mci_init: MMC/SD: can't" 637 gpio_free(GPIO105_MMC_POWER);
492 " request MMC card detect IRQ\n"); 638 dev_err(dev, "cmx270_mci_init: MMC/SD: can't"
639 " request MMC card detect IRQ\n");
640 }
493 641
494 return err; 642 return err;
495} 643}
@@ -499,17 +647,18 @@ static void cmx270_mci_setpower(struct device *dev, unsigned int vdd)
499 struct pxamci_platform_data *p_d = dev->platform_data; 647 struct pxamci_platform_data *p_d = dev->platform_data;
500 648
501 if ((1 << vdd) & p_d->ocr_mask) { 649 if ((1 << vdd) & p_d->ocr_mask) {
502 printk(KERN_DEBUG "%s: on\n", __func__); 650 dev_dbg(dev, "power on\n");
503 GPCR(105) = GPIO_bit(105); 651 gpio_set_value(GPIO105_MMC_POWER, 0);
504 } else { 652 } else {
505 GPSR(105) = GPIO_bit(105); 653 gpio_set_value(GPIO105_MMC_POWER, 1);
506 printk(KERN_DEBUG "%s: off\n", __func__); 654 dev_dbg(dev, "power off\n");
507 } 655 }
508} 656}
509 657
510static void cmx270_mci_exit(struct device *dev, void *data) 658static void cmx270_mci_exit(struct device *dev, void *data)
511{ 659{
512 free_irq(CMX270_MMC_IRQ, data); 660 free_irq(CMX270_MMC_IRQ, data);
661 gpio_free(GPIO105_MMC_POWER);
513} 662}
514 663
515static struct pxamci_platform_data cmx270_mci_platform_data = { 664static struct pxamci_platform_data cmx270_mci_platform_data = {
@@ -519,6 +668,14 @@ static struct pxamci_platform_data cmx270_mci_platform_data = {
519 .exit = cmx270_mci_exit, 668 .exit = cmx270_mci_exit,
520}; 669};
521 670
671static void __init cmx270_init_mmc(void)
672{
673 pxa_set_mci_info(&cmx270_mci_platform_data);
674}
675#else
676static inline void cmx270_init_mmc(void) {}
677#endif
678
522#ifdef CONFIG_PM 679#ifdef CONFIG_PM
523static unsigned long sleep_save_msc[10]; 680static unsigned long sleep_save_msc[10];
524 681
@@ -580,53 +737,63 @@ static int __init cmx270_pm_init(void)
580static int __init cmx270_pm_init(void) { return 0; } 737static int __init cmx270_pm_init(void) { return 0; }
581#endif 738#endif
582 739
583static void __init cmx270_init(void) 740#if defined(CONFIG_SND_PXA2XX_AC97) || defined(CONFIG_SND_PXA2XX_AC97_MODULE)
741static void __init cmx270_init_ac97(void)
584{ 742{
585 cmx270_pm_init();
586
587 set_pxa_fb_info(cmx270_display);
588
589 /* register CM-X270 platform devices */
590 platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
591 pxa_set_ac97_info(NULL); 743 pxa_set_ac97_info(NULL);
744}
745#else
746static inline void cmx270_init_ac97(void) {}
747#endif
592 748
593 /* set MCI and OHCI platform parameters */ 749static void __init cmx270_init(void)
594 pxa_set_mci_info(&cmx270_mci_platform_data); 750{
595 pxa_set_ohci_info(&cmx270_ohci_platform_data); 751 cmx270_pm_init();
596
597 /* This enables the STUART */
598 pxa_gpio_mode(GPIO46_STRXD_MD);
599 pxa_gpio_mode(GPIO47_STTXD_MD);
600 752
601 /* This enables the BTUART */ 753 pxa2xx_mfp_config(ARRAY_AND_SIZE(cmx270_pin_config));
602 pxa_gpio_mode(GPIO42_BTRXD_MD); 754
603 pxa_gpio_mode(GPIO43_BTTXD_MD); 755 cmx270_init_dm9000();
604 pxa_gpio_mode(GPIO44_BTCTS_MD); 756 cmx270_init_rtc();
605 pxa_gpio_mode(GPIO45_BTRTS_MD); 757 cmx270_init_display();
758 cmx270_init_mmc();
759 cmx270_init_ohci();
760 cmx270_init_ac97();
761 cmx270_init_touchscreen();
762 cmx270_init_leds();
763 cmx270_init_2700G();
606} 764}
607 765
608static void __init cmx270_init_irq(void) 766static void __init cmx270_init_irq(void)
609{ 767{
610 pxa27x_init_irq(); 768 pxa27x_init_irq();
611 769
770 cmx270_pci_init_irq(GPIO22_IT8152_IRQ);
771}
612 772
613 cmx270_pci_init_irq(); 773#ifdef CONFIG_PCI
774/* Map PCI companion statically */
775static struct map_desc cmx270_io_desc[] __initdata = {
776 [0] = { /* PCI bridge */
777 .virtual = CMX270_IT8152_VIRT,
778 .pfn = __phys_to_pfn(PXA_CS4_PHYS),
779 .length = SZ_64M,
780 .type = MT_DEVICE
781 },
782};
614 783
615 /* Setup interrupt for dm9000 */ 784static void __init cmx270_map_io(void)
616 pxa_gpio_mode(IRQ_TO_GPIO(CMX270_ETHIRQ)); 785{
617 set_irq_type(CMX270_ETHIRQ, IRQT_RISING); 786 pxa_map_io();
787 iotable_init(cmx270_io_desc, ARRAY_SIZE(cmx270_io_desc));
618 788
619 /* Setup interrupt for 2700G */ 789 it8152_base_address = CMX270_IT8152_VIRT;
620 pxa_gpio_mode(IRQ_TO_GPIO(CMX270_GFXIRQ));
621 set_irq_type(CMX270_GFXIRQ, IRQT_FALLING);
622} 790}
623 791#else
624static void __init cmx270_map_io(void) 792static void __init cmx270_map_io(void)
625{ 793{
626 pxa_map_io(); 794 pxa_map_io();
627 iotable_init(cmx270_io_desc, ARRAY_SIZE(cmx270_io_desc));
628} 795}
629 796#endif
630 797
631MACHINE_START(ARMCORE, "Compulab CM-x270") 798MACHINE_START(ARMCORE, "Compulab CM-x270")
632 .boot_params = 0xa0000100, 799 .boot_params = 0xa0000100,
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index b37671b71886..e58504edb140 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -465,6 +465,7 @@ static void corgi_irda_transceiver_mode(struct device *dev, int mode)
465 GPSR(CORGI_GPIO_IR_ON) = GPIO_bit(CORGI_GPIO_IR_ON); 465 GPSR(CORGI_GPIO_IR_ON) = GPIO_bit(CORGI_GPIO_IR_ON);
466 else 466 else
467 GPCR(CORGI_GPIO_IR_ON) = GPIO_bit(CORGI_GPIO_IR_ON); 467 GPCR(CORGI_GPIO_IR_ON) = GPIO_bit(CORGI_GPIO_IR_ON);
468 pxa2xx_transceiver_mode(dev, mode);
468} 469}
469 470
470static struct pxaficp_platform_data corgi_ficp_platform_data = { 471static struct pxaficp_platform_data corgi_ficp_platform_data = {
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index a6f2390ce662..84489dc51d81 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -13,8 +13,10 @@
13#include <asm/arch/mfp-pxa27x.h> 13#include <asm/arch/mfp-pxa27x.h>
14#include <asm/arch/ohci.h> 14#include <asm/arch/ohci.h>
15#include <asm/arch/pxa27x_keypad.h> 15#include <asm/arch/pxa27x_keypad.h>
16#include <asm/arch/pxa2xx_spi.h>
16#include <asm/arch/camera.h> 17#include <asm/arch/camera.h>
17#include <asm/arch/audio.h> 18#include <asm/arch/audio.h>
19#include <asm/arch/pxa3xx_nand.h>
18 20
19#include "devices.h" 21#include "devices.h"
20#include "generic.h" 22#include "generic.h"
@@ -830,4 +832,63 @@ void __init pxa3xx_set_mci3_info(struct pxamci_platform_data *info)
830 pxa_register_device(&pxa3xx_device_mci3, info); 832 pxa_register_device(&pxa3xx_device_mci3, info);
831} 833}
832 834
835static struct resource pxa3xx_resources_nand[] = {
836 [0] = {
837 .start = 0x43100000,
838 .end = 0x43100053,
839 .flags = IORESOURCE_MEM,
840 },
841 [1] = {
842 .start = IRQ_NAND,
843 .end = IRQ_NAND,
844 .flags = IORESOURCE_IRQ,
845 },
846 [2] = {
847 /* DRCMR for Data DMA */
848 .start = 97,
849 .end = 97,
850 .flags = IORESOURCE_DMA,
851 },
852 [3] = {
853 /* DRCMR for Command DMA */
854 .start = 99,
855 .end = 99,
856 .flags = IORESOURCE_DMA,
857 },
858};
859
860static u64 pxa3xx_nand_dma_mask = DMA_BIT_MASK(32);
861
862struct platform_device pxa3xx_device_nand = {
863 .name = "pxa3xx-nand",
864 .id = -1,
865 .dev = {
866 .dma_mask = &pxa3xx_nand_dma_mask,
867 .coherent_dma_mask = DMA_BIT_MASK(32),
868 },
869 .num_resources = ARRAY_SIZE(pxa3xx_resources_nand),
870 .resource = pxa3xx_resources_nand,
871};
872
873void __init pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info)
874{
875 pxa_register_device(&pxa3xx_device_nand, info);
876}
833#endif /* CONFIG_PXA3xx */ 877#endif /* CONFIG_PXA3xx */
878
879/* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1.
880 * See comment in arch/arm/mach-pxa/ssp.c::ssp_probe() */
881void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info)
882{
883 struct platform_device *pd;
884
885 pd = platform_device_alloc("pxa2xx-spi", id);
886 if (pd == NULL) {
887 printk(KERN_ERR "pxa2xx-spi: failed to allocate device id %d\n",
888 id);
889 return;
890 }
891
892 pd->dev.platform_data = info;
893 platform_device_add(pd);
894}
diff --git a/arch/arm/mach-pxa/devices.h b/arch/arm/mach-pxa/devices.h
index b852eb18daa5..887c738f5911 100644
--- a/arch/arm/mach-pxa/devices.h
+++ b/arch/arm/mach-pxa/devices.h
@@ -31,4 +31,6 @@ extern struct platform_device pxa25x_device_pwm1;
31extern struct platform_device pxa27x_device_pwm0; 31extern struct platform_device pxa27x_device_pwm0;
32extern struct platform_device pxa27x_device_pwm1; 32extern struct platform_device pxa27x_device_pwm1;
33 33
34extern struct platform_device pxa3xx_device_nand;
35
34void __init pxa_register_device(struct platform_device *dev, void *data); 36void __init pxa_register_device(struct platform_device *dev, void *data);
diff --git a/arch/arm/mach-pxa/e400_lcd.c b/arch/arm/mach-pxa/e400_lcd.c
new file mode 100644
index 000000000000..16c023630626
--- /dev/null
+++ b/arch/arm/mach-pxa/e400_lcd.c
@@ -0,0 +1,56 @@
1/*
2 * e400_lcd.c
3 *
4 * (c) 2005 Ian Molton <spyro@f2s.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15
16#include <asm/mach-types.h>
17#include <asm/arch/pxa-regs.h>
18#include <asm/arch/pxafb.h>
19
20static struct pxafb_mode_info e400_pxafb_mode_info = {
21 .pixclock = 140703,
22 .xres = 240,
23 .yres = 320,
24 .bpp = 16,
25 .hsync_len = 4,
26 .left_margin = 28,
27 .right_margin = 8,
28 .vsync_len = 3,
29 .upper_margin = 5,
30 .lower_margin = 6,
31 .sync = 0,
32};
33
34static struct pxafb_mach_info e400_pxafb_mach_info = {
35 .modes = &e400_pxafb_mode_info,
36 .num_modes = 1,
37 .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
38 .lccr3 = 0,
39 .pxafb_backlight_power = NULL,
40};
41
42static int __init e400_lcd_init(void)
43{
44 if (!machine_is_e400())
45 return -ENODEV;
46
47 set_pxa_fb_info(&e400_pxafb_mach_info);
48 return 0;
49}
50
51module_init(e400_lcd_init);
52
53MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
54MODULE_DESCRIPTION("e400 lcd driver");
55MODULE_LICENSE("GPLv2");
56
diff --git a/arch/arm/mach-pxa/e740_lcd.c b/arch/arm/mach-pxa/e740_lcd.c
new file mode 100644
index 000000000000..26bd599af178
--- /dev/null
+++ b/arch/arm/mach-pxa/e740_lcd.c
@@ -0,0 +1,123 @@
1/* e740_lcd.c
2 *
3 * This file contains the definitions for the LCD timings and functions
4 * to control the LCD power / frontlighting via the w100fb driver.
5 *
6 * (c) 2005 Ian Molton <spyro@f2s.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/device.h>
16#include <linux/fb.h>
17#include <linux/err.h>
18#include <linux/platform_device.h>
19
20#include <asm/mach-types.h>
21
22#include <video/w100fb.h>
23
24/*
25**potential** shutdown routine - to be investigated
26devmem2 0x0c010528 w 0xff3fff00
27devmem2 0x0c010190 w 0x7FFF8000
28devmem2 0x0c0101b0 w 0x00FF0000
29devmem2 0x0c01008c w 0x00000000
30devmem2 0x0c010080 w 0x000000bf
31devmem2 0x0c010098 w 0x00000015
32devmem2 0x0c010088 w 0x4b000204
33devmem2 0x0c010098 w 0x0000001d
34*/
35
36static struct w100_gen_regs e740_lcd_regs = {
37 .lcd_format = 0x00008023,
38 .lcdd_cntl1 = 0x0f000000,
39 .lcdd_cntl2 = 0x0003ffff,
40 .genlcd_cntl1 = 0x00ffff03,
41 .genlcd_cntl2 = 0x003c0f03,
42 .genlcd_cntl3 = 0x000143aa,
43};
44
45static struct w100_mode e740_lcd_mode = {
46 .xres = 240,
47 .yres = 320,
48 .left_margin = 20,
49 .right_margin = 28,
50 .upper_margin = 9,
51 .lower_margin = 8,
52 .crtc_ss = 0x80140013,
53 .crtc_ls = 0x81150110,
54 .crtc_gs = 0x80050005,
55 .crtc_vpos_gs = 0x000a0009,
56 .crtc_rev = 0x0040010a,
57 .crtc_dclk = 0xa906000a,
58 .crtc_gclk = 0x80050108,
59 .crtc_goe = 0x80050108,
60 .pll_freq = 57,
61 .pixclk_divider = 4,
62 .pixclk_divider_rotated = 4,
63 .pixclk_src = CLK_SRC_XTAL,
64 .sysclk_divider = 1,
65 .sysclk_src = CLK_SRC_PLL,
66 .crtc_ps1_active = 0x41060010,
67};
68
69
70static struct w100_gpio_regs e740_w100_gpio_info = {
71 .init_data1 = 0x21002103,
72 .gpio_dir1 = 0xffffdeff,
73 .gpio_oe1 = 0x03c00643,
74 .init_data2 = 0x003f003f,
75 .gpio_dir2 = 0xffffffff,
76 .gpio_oe2 = 0x000000ff,
77};
78
79static struct w100fb_mach_info e740_fb_info = {
80 .modelist = &e740_lcd_mode,
81 .num_modes = 1,
82 .regs = &e740_lcd_regs,
83 .gpio = &e740_w100_gpio_info,
84 .xtal_freq = 14318000,
85 .xtal_dbl = 1,
86};
87
88static struct resource e740_fb_resources[] = {
89 [0] = {
90 .start = 0x0c000000,
91 .end = 0x0cffffff,
92 .flags = IORESOURCE_MEM,
93 },
94};
95
96/* ----------------------- device declarations -------------------------- */
97
98
99static struct platform_device e740_fb_device = {
100 .name = "w100fb",
101 .id = -1,
102 .dev = {
103 .platform_data = &e740_fb_info,
104 },
105 .num_resources = ARRAY_SIZE(e740_fb_resources),
106 .resource = e740_fb_resources,
107};
108
109static int e740_lcd_init(void)
110{
111 int ret;
112
113 if (!machine_is_e740())
114 return -ENODEV;
115
116 return platform_device_register(&e740_fb_device);
117}
118
119module_init(e740_lcd_init);
120
121MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
122MODULE_DESCRIPTION("e740 lcd driver");
123MODULE_LICENSE("GPLv2");
diff --git a/arch/arm/mach-pxa/e750_lcd.c b/arch/arm/mach-pxa/e750_lcd.c
new file mode 100644
index 000000000000..75edc3b5390f
--- /dev/null
+++ b/arch/arm/mach-pxa/e750_lcd.c
@@ -0,0 +1,109 @@
1/* e750_lcd.c
2 *
3 * This file contains the definitions for the LCD timings and functions
4 * to control the LCD power / frontlighting via the w100fb driver.
5 *
6 * (c) 2005 Ian Molton <spyro@f2s.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/device.h>
16#include <linux/fb.h>
17#include <linux/err.h>
18#include <linux/platform_device.h>
19
20#include <asm/mach-types.h>
21
22#include <video/w100fb.h>
23
24static struct w100_gen_regs e750_lcd_regs = {
25 .lcd_format = 0x00008003,
26 .lcdd_cntl1 = 0x00000000,
27 .lcdd_cntl2 = 0x0003ffff,
28 .genlcd_cntl1 = 0x00fff003,
29 .genlcd_cntl2 = 0x003c0f03,
30 .genlcd_cntl3 = 0x000143aa,
31};
32
33static struct w100_mode e750_lcd_mode = {
34 .xres = 240,
35 .yres = 320,
36 .left_margin = 21,
37 .right_margin = 22,
38 .upper_margin = 5,
39 .lower_margin = 4,
40 .crtc_ss = 0x80150014,
41 .crtc_ls = 0x8014000d,
42 .crtc_gs = 0xc1000005,
43 .crtc_vpos_gs = 0x00020147,
44 .crtc_rev = 0x0040010a,
45 .crtc_dclk = 0xa1700030,
46 .crtc_gclk = 0x80cc0015,
47 .crtc_goe = 0x80cc0015,
48 .crtc_ps1_active = 0x61060017,
49 .pll_freq = 57,
50 .pixclk_divider = 4,
51 .pixclk_divider_rotated = 4,
52 .pixclk_src = CLK_SRC_XTAL,
53 .sysclk_divider = 1,
54 .sysclk_src = CLK_SRC_PLL,
55};
56
57
58static struct w100_gpio_regs e750_w100_gpio_info = {
59 .init_data1 = 0x01192f1b,
60 .gpio_dir1 = 0xd5ffdeff,
61 .gpio_oe1 = 0x000020bf,
62 .init_data2 = 0x010f010f,
63 .gpio_dir2 = 0xffffffff,
64 .gpio_oe2 = 0x000001cf,
65};
66
67static struct w100fb_mach_info e750_fb_info = {
68 .modelist = &e750_lcd_mode,
69 .num_modes = 1,
70 .regs = &e750_lcd_regs,
71 .gpio = &e750_w100_gpio_info,
72 .xtal_freq = 14318000,
73 .xtal_dbl = 1,
74};
75
76static struct resource e750_fb_resources[] = {
77 [0] = {
78 .start = 0x0c000000,
79 .end = 0x0cffffff,
80 .flags = IORESOURCE_MEM,
81 },
82};
83
84/* ----------------------- device declarations -------------------------- */
85
86
87static struct platform_device e750_fb_device = {
88 .name = "w100fb",
89 .id = -1,
90 .dev = {
91 .platform_data = &e750_fb_info,
92 },
93 .num_resources = ARRAY_SIZE(e750_fb_resources),
94 .resource = e750_fb_resources,
95};
96
97static int e750_lcd_init(void)
98{
99 if (!machine_is_e750())
100 return -ENODEV;
101
102 return platform_device_register(&e750_fb_device);
103}
104
105module_init(e750_lcd_init);
106
107MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
108MODULE_DESCRIPTION("e750 lcd driver");
109MODULE_LICENSE("GPLv2");
diff --git a/arch/arm/mach-pxa/e800_lcd.c b/arch/arm/mach-pxa/e800_lcd.c
new file mode 100644
index 000000000000..e6aeab0ebc22
--- /dev/null
+++ b/arch/arm/mach-pxa/e800_lcd.c
@@ -0,0 +1,159 @@
1/* e800_lcd.c
2 *
3 * This file contains the definitions for the LCD timings and functions
4 * to control the LCD power / frontlighting via the w100fb driver.
5 *
6 * (c) 2005 Ian Molton <spyro@f2s.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/device.h>
16#include <linux/fb.h>
17#include <linux/err.h>
18#include <linux/platform_device.h>
19
20#include <asm/mach-types.h>
21
22#include <video/w100fb.h>
23
24static struct w100_gen_regs e800_lcd_regs = {
25 .lcd_format = 0x00008003,
26 .lcdd_cntl1 = 0x02a00000,
27 .lcdd_cntl2 = 0x0003ffff,
28 .genlcd_cntl1 = 0x000ff2a3,
29 .genlcd_cntl2 = 0x000002a3,
30 .genlcd_cntl3 = 0x000102aa,
31};
32
33static struct w100_mode e800_lcd_mode[2] = {
34 [0] = {
35 .xres = 480,
36 .yres = 640,
37 .left_margin = 52,
38 .right_margin = 148,
39 .upper_margin = 2,
40 .lower_margin = 6,
41 .crtc_ss = 0x80350034,
42 .crtc_ls = 0x802b0026,
43 .crtc_gs = 0x80160016,
44 .crtc_vpos_gs = 0x00020003,
45 .crtc_rev = 0x0040001d,
46 .crtc_dclk = 0xe0000000,
47 .crtc_gclk = 0x82a50049,
48 .crtc_goe = 0x80ee001c,
49 .crtc_ps1_active = 0x00000000,
50 .pll_freq = 128,
51 .pixclk_divider = 4,
52 .pixclk_divider_rotated = 6,
53 .pixclk_src = CLK_SRC_PLL,
54 .sysclk_divider = 0,
55 .sysclk_src = CLK_SRC_PLL,
56 },
57 [1] = {
58 .xres = 240,
59 .yres = 320,
60 .left_margin = 15,
61 .right_margin = 88,
62 .upper_margin = 0,
63 .lower_margin = 7,
64 .crtc_ss = 0xd010000f,
65 .crtc_ls = 0x80070003,
66 .crtc_gs = 0x80000000,
67 .crtc_vpos_gs = 0x01460147,
68 .crtc_rev = 0x00400003,
69 .crtc_dclk = 0xa1700030,
70 .crtc_gclk = 0x814b0008,
71 .crtc_goe = 0x80cc0015,
72 .crtc_ps1_active = 0x00000000,
73 .pll_freq = 100,
74 .pixclk_divider = 6, /* Wince uses 14 which gives a 7MHz pclk. */
75 .pixclk_divider_rotated = 6, /* we want a 14MHz one (much nicer to look at) */
76 .pixclk_src = CLK_SRC_PLL,
77 .sysclk_divider = 0,
78 .sysclk_src = CLK_SRC_PLL,
79 }
80};
81
82
83static struct w100_gpio_regs e800_w100_gpio_info = {
84 .init_data1 = 0xc13fc019,
85 .gpio_dir1 = 0x3e40df7f,
86 .gpio_oe1 = 0x003c3000,
87 .init_data2 = 0x00000000,
88 .gpio_dir2 = 0x00000000,
89 .gpio_oe2 = 0x00000000,
90};
91
92static struct w100_mem_info e800_w100_mem_info = {
93 .ext_cntl = 0x09640011,
94 .sdram_mode_reg = 0x00600021,
95 .ext_timing_cntl = 0x10001545,
96 .io_cntl = 0x7ddd7333,
97 .size = 0x1fffff,
98};
99
100static void e800_tg_change(struct w100fb_par *par)
101{
102 unsigned long tmp;
103
104 tmp = w100fb_gpio_read(W100_GPIO_PORT_A);
105 if (par->mode->xres == 480)
106 tmp |= 0x100;
107 else
108 tmp &= ~0x100;
109 w100fb_gpio_write(W100_GPIO_PORT_A, tmp);
110}
111
112static struct w100_tg_info e800_tg_info = {
113 .change = e800_tg_change,
114};
115
116static struct w100fb_mach_info e800_fb_info = {
117 .modelist = e800_lcd_mode,
118 .num_modes = 2,
119 .regs = &e800_lcd_regs,
120 .gpio = &e800_w100_gpio_info,
121 .mem = &e800_w100_mem_info,
122 .tg = &e800_tg_info,
123 .xtal_freq = 16000000,
124};
125
126static struct resource e800_fb_resources[] = {
127 [0] = {
128 .start = 0x0c000000,
129 .end = 0x0cffffff,
130 .flags = IORESOURCE_MEM,
131 },
132};
133
134/* ----------------------- device declarations -------------------------- */
135
136
137static struct platform_device e800_fb_device = {
138 .name = "w100fb",
139 .id = -1,
140 .dev = {
141 .platform_data = &e800_fb_info,
142 },
143 .num_resources = ARRAY_SIZE(e800_fb_resources),
144 .resource = e800_fb_resources,
145};
146
147static int e800_lcd_init(void)
148{
149 if (!machine_is_e800())
150 return -ENODEV;
151
152 return platform_device_register(&e800_fb_device);
153}
154
155module_init(e800_lcd_init);
156
157MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
158MODULE_DESCRIPTION("e800 lcd driver");
159MODULE_LICENSE("GPLv2");
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 1bf680749928..e5cc6ca63c75 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Support for CompuLab EM-x270 platform 2 * Support for CompuLab EM-X270 platform
3 * 3 *
4 * Copyright (C) 2007 CompuLab, Ltd. 4 * Copyright (C) 2007, 2008 CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il> 5 * Author: Mike Rapoport <mike@compulab.co.il>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -14,31 +14,159 @@
14 14
15#include <linux/dm9000.h> 15#include <linux/dm9000.h>
16#include <linux/rtc-v3020.h> 16#include <linux/rtc-v3020.h>
17
18#include <linux/mtd/nand.h> 17#include <linux/mtd/nand.h>
19#include <linux/mtd/partitions.h> 18#include <linux/mtd/partitions.h>
19#include <linux/input.h>
20#include <linux/gpio_keys.h>
21#include <linux/gpio.h>
20 22
21#include <asm/mach-types.h> 23#include <asm/mach-types.h>
22
23#include <asm/mach/arch.h> 24#include <asm/mach/arch.h>
24 25
26#include <asm/arch/mfp-pxa27x.h>
25#include <asm/arch/pxa-regs.h> 27#include <asm/arch/pxa-regs.h>
26#include <asm/arch/pxa2xx-gpio.h>
27#include <asm/arch/pxa27x-udc.h> 28#include <asm/arch/pxa27x-udc.h>
28#include <asm/arch/audio.h> 29#include <asm/arch/audio.h>
29#include <asm/arch/pxafb.h> 30#include <asm/arch/pxafb.h>
30#include <asm/arch/ohci.h> 31#include <asm/arch/ohci.h>
31#include <asm/arch/mmc.h> 32#include <asm/arch/mmc.h>
32#include <asm/arch/bitfield.h> 33#include <asm/arch/pxa27x_keypad.h>
33 34
34#include "generic.h" 35#include "generic.h"
35 36
36/* GPIO IRQ usage */ 37/* GPIO IRQ usage */
37#define EM_X270_MMC_PD (105) 38#define GPIO41_ETHIRQ (41)
38#define EM_X270_ETHIRQ IRQ_GPIO(41) 39#define GPIO13_MMC_CD (13)
39#define EM_X270_MMC_IRQ IRQ_GPIO(13) 40#define EM_X270_ETHIRQ IRQ_GPIO(GPIO41_ETHIRQ)
41#define EM_X270_MMC_CD IRQ_GPIO(GPIO13_MMC_CD)
42
43/* NAND control GPIOs */
44#define GPIO11_NAND_CS (11)
45#define GPIO56_NAND_RB (56)
46
47static unsigned long em_x270_pin_config[] = {
48 /* AC'97 */
49 GPIO28_AC97_BITCLK,
50 GPIO29_AC97_SDATA_IN_0,
51 GPIO30_AC97_SDATA_OUT,
52 GPIO31_AC97_SYNC,
53 GPIO98_AC97_SYSCLK,
54 GPIO113_AC97_nRESET,
55
56 /* BTUART */
57 GPIO42_BTUART_RXD,
58 GPIO43_BTUART_TXD,
59 GPIO44_BTUART_CTS,
60 GPIO45_BTUART_RTS,
61
62 /* STUART */
63 GPIO46_STUART_RXD,
64 GPIO47_STUART_TXD,
65
66 /* MCI controller */
67 GPIO32_MMC_CLK,
68 GPIO112_MMC_CMD,
69 GPIO92_MMC_DAT_0,
70 GPIO109_MMC_DAT_1,
71 GPIO110_MMC_DAT_2,
72 GPIO111_MMC_DAT_3,
73
74 /* LCD */
75 GPIO58_LCD_LDD_0,
76 GPIO59_LCD_LDD_1,
77 GPIO60_LCD_LDD_2,
78 GPIO61_LCD_LDD_3,
79 GPIO62_LCD_LDD_4,
80 GPIO63_LCD_LDD_5,
81 GPIO64_LCD_LDD_6,
82 GPIO65_LCD_LDD_7,
83 GPIO66_LCD_LDD_8,
84 GPIO67_LCD_LDD_9,
85 GPIO68_LCD_LDD_10,
86 GPIO69_LCD_LDD_11,
87 GPIO70_LCD_LDD_12,
88 GPIO71_LCD_LDD_13,
89 GPIO72_LCD_LDD_14,
90 GPIO73_LCD_LDD_15,
91 GPIO74_LCD_FCLK,
92 GPIO75_LCD_LCLK,
93 GPIO76_LCD_PCLK,
94 GPIO77_LCD_BIAS,
95
96 /* QCI */
97 GPIO84_CIF_FV,
98 GPIO25_CIF_LV,
99 GPIO53_CIF_MCLK,
100 GPIO54_CIF_PCLK,
101 GPIO81_CIF_DD_0,
102 GPIO55_CIF_DD_1,
103 GPIO51_CIF_DD_2,
104 GPIO50_CIF_DD_3,
105 GPIO52_CIF_DD_4,
106 GPIO48_CIF_DD_5,
107 GPIO17_CIF_DD_6,
108 GPIO12_CIF_DD_7,
109
110 /* I2C */
111 GPIO117_I2C_SCL,
112 GPIO118_I2C_SDA,
113
114 /* Keypad */
115 GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH,
116 GPIO101_KP_MKIN_1 | WAKEUP_ON_LEVEL_HIGH,
117 GPIO102_KP_MKIN_2 | WAKEUP_ON_LEVEL_HIGH,
118 GPIO34_KP_MKIN_3 | WAKEUP_ON_LEVEL_HIGH,
119 GPIO39_KP_MKIN_4 | WAKEUP_ON_LEVEL_HIGH,
120 GPIO99_KP_MKIN_5 | WAKEUP_ON_LEVEL_HIGH,
121 GPIO91_KP_MKIN_6 | WAKEUP_ON_LEVEL_HIGH,
122 GPIO36_KP_MKIN_7 | WAKEUP_ON_LEVEL_HIGH,
123 GPIO103_KP_MKOUT_0,
124 GPIO104_KP_MKOUT_1,
125 GPIO105_KP_MKOUT_2,
126 GPIO106_KP_MKOUT_3,
127 GPIO107_KP_MKOUT_4,
128 GPIO108_KP_MKOUT_5,
129 GPIO96_KP_MKOUT_6,
130 GPIO22_KP_MKOUT_7,
131
132 /* SSP1 */
133 GPIO26_SSP1_RXD,
134 GPIO23_SSP1_SCLK,
135 GPIO24_SSP1_SFRM,
136 GPIO57_SSP1_TXD,
137
138 /* SSP2 */
139 GPIO19_SSP2_SCLK,
140 GPIO14_SSP2_SFRM,
141 GPIO89_SSP2_TXD,
142 GPIO88_SSP2_RXD,
143
144 /* SDRAM and local bus */
145 GPIO15_nCS_1,
146 GPIO78_nCS_2,
147 GPIO79_nCS_3,
148 GPIO80_nCS_4,
149 GPIO49_nPWE,
150 GPIO18_RDY,
151
152 /* GPIO */
153 GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH,
154
155 /* power controls */
156 GPIO20_GPIO | MFP_LPM_DRIVE_LOW, /* GPRS_PWEN */
157 GPIO115_GPIO | MFP_LPM_DRIVE_LOW, /* WLAN_PWEN */
158
159 /* NAND controls */
160 GPIO11_GPIO | MFP_LPM_DRIVE_HIGH, /* NAND CE# */
161 GPIO56_GPIO, /* NAND Ready/Busy */
162
163 /* interrupts */
164 GPIO13_GPIO, /* MMC card detect */
165 GPIO41_GPIO, /* DM9000 interrupt */
166};
40 167
41static struct resource em_x270_dm9k_resource[] = { 168#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
169static struct resource em_x270_dm9000_resource[] = {
42 [0] = { 170 [0] = {
43 .start = PXA_CS2_PHYS, 171 .start = PXA_CS2_PHYS,
44 .end = PXA_CS2_PHYS + 3, 172 .end = PXA_CS2_PHYS + 3,
@@ -56,32 +184,30 @@ static struct resource em_x270_dm9k_resource[] = {
56 } 184 }
57}; 185};
58 186
59/* for the moment we limit ourselves to 32bit IO until some 187static struct dm9000_plat_data em_x270_dm9000_platdata = {
60 * better IO routines can be written and tested
61 */
62static struct dm9000_plat_data em_x270_dm9k_platdata = {
63 .flags = DM9000_PLATF_32BITONLY, 188 .flags = DM9000_PLATF_32BITONLY,
64}; 189};
65 190
66/* Ethernet device */ 191static struct platform_device em_x270_dm9000 = {
67static struct platform_device em_x270_dm9k = {
68 .name = "dm9000", 192 .name = "dm9000",
69 .id = 0, 193 .id = 0,
70 .num_resources = ARRAY_SIZE(em_x270_dm9k_resource), 194 .num_resources = ARRAY_SIZE(em_x270_dm9000_resource),
71 .resource = em_x270_dm9k_resource, 195 .resource = em_x270_dm9000_resource,
72 .dev = { 196 .dev = {
73 .platform_data = &em_x270_dm9k_platdata, 197 .platform_data = &em_x270_dm9000_platdata,
74 } 198 }
75}; 199};
76 200
77/* WM9712 touchscreen controller. Hopefully the driver will make it to 201static void __init em_x270_init_dm9000(void)
78 * the mainstream sometime */ 202{
79static struct platform_device em_x270_ts = { 203 platform_device_register(&em_x270_dm9000);
80 .name = "wm97xx-ts", 204}
81 .id = -1, 205#else
82}; 206static inline void em_x270_init_dm9000(void) {}
207#endif
83 208
84/* RTC */ 209/* V3020 RTC */
210#if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE)
85static struct resource em_x270_v3020_resource[] = { 211static struct resource em_x270_v3020_resource[] = {
86 [0] = { 212 [0] = {
87 .start = PXA_CS4_PHYS, 213 .start = PXA_CS4_PHYS,
@@ -104,20 +230,26 @@ static struct platform_device em_x270_rtc = {
104 } 230 }
105}; 231};
106 232
107/* NAND flash */ 233static void __init em_x270_init_rtc(void)
108#define GPIO_NAND_CS (11) 234{
109#define GPIO_NAND_RB (56) 235 platform_device_register(&em_x270_rtc);
236}
237#else
238static inline void em_x270_init_rtc(void) {}
239#endif
110 240
241/* NAND flash */
242#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
111static inline void nand_cs_on(void) 243static inline void nand_cs_on(void)
112{ 244{
113 GPCR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); 245 gpio_set_value(GPIO11_NAND_CS, 0);
114} 246}
115 247
116static void nand_cs_off(void) 248static void nand_cs_off(void)
117{ 249{
118 dsb(); 250 dsb();
119 251
120 GPSR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); 252 gpio_set_value(GPIO11_NAND_CS, 1);
121} 253}
122 254
123/* hardware specific access to control-lines */ 255/* hardware specific access to control-lines */
@@ -157,7 +289,7 @@ static int em_x270_nand_device_ready(struct mtd_info *mtd)
157{ 289{
158 dsb(); 290 dsb();
159 291
160 return GPLR(GPIO_NAND_RB) & GPIO_bit(GPIO_NAND_RB); 292 return gpio_get_value(GPIO56_NAND_RB);
161} 293}
162 294
163static struct mtd_partition em_x270_partition_info[] = { 295static struct mtd_partition em_x270_partition_info[] = {
@@ -210,16 +342,35 @@ static struct platform_device em_x270_nand = {
210 } 342 }
211}; 343};
212 344
213/* platform devices */ 345static void __init em_x270_init_nand(void)
214static struct platform_device *platform_devices[] __initdata = { 346{
215 &em_x270_dm9k, 347 int err;
216 &em_x270_ts,
217 &em_x270_rtc,
218 &em_x270_nand,
219};
220 348
349 err = gpio_request(GPIO11_NAND_CS, "NAND CS");
350 if (err) {
351 pr_warning("EM-X270: failed to request NAND CS gpio\n");
352 return;
353 }
354
355 gpio_direction_output(GPIO11_NAND_CS, 1);
356
357 err = gpio_request(GPIO56_NAND_RB, "NAND R/B");
358 if (err) {
359 pr_warning("EM-X270: failed to request NAND R/B gpio\n");
360 gpio_free(GPIO11_NAND_CS);
361 return;
362 }
363
364 gpio_direction_input(GPIO56_NAND_RB);
365
366 platform_device_register(&em_x270_nand);
367}
368#else
369static inline void em_x270_init_nand(void) {}
370#endif
221 371
222/* PXA27x OHCI controller setup */ 372/* PXA27x OHCI controller setup */
373#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
223static int em_x270_ohci_init(struct device *dev) 374static int em_x270_ohci_init(struct device *dev)
224{ 375{
225 /* Set the Power Control Polarity Low */ 376 /* Set the Power Control Polarity Low */
@@ -237,27 +388,23 @@ static struct pxaohci_platform_data em_x270_ohci_platform_data = {
237 .init = em_x270_ohci_init, 388 .init = em_x270_ohci_init,
238}; 389};
239 390
391static void __init em_x270_init_ohci(void)
392{
393 pxa_set_ohci_info(&em_x270_ohci_platform_data);
394}
395#else
396static inline void em_x270_init_ohci(void) {}
397#endif
240 398
399/* MCI controller setup */
400#if defined(CONFIG_MMC) || defined(CONFIG_MMC_MODULE)
241static int em_x270_mci_init(struct device *dev, 401static int em_x270_mci_init(struct device *dev,
242 irq_handler_t em_x270_detect_int, 402 irq_handler_t em_x270_detect_int,
243 void *data) 403 void *data)
244{ 404{
245 int err; 405 int err = request_irq(EM_X270_MMC_CD, em_x270_detect_int,
246 406 IRQF_DISABLED | IRQF_TRIGGER_FALLING,
247 /* setup GPIO for PXA27x MMC controller */ 407 "MMC card detect", data);
248 pxa_gpio_mode(GPIO32_MMCCLK_MD);
249 pxa_gpio_mode(GPIO112_MMCCMD_MD);
250 pxa_gpio_mode(GPIO92_MMCDAT0_MD);
251 pxa_gpio_mode(GPIO109_MMCDAT1_MD);
252 pxa_gpio_mode(GPIO110_MMCDAT2_MD);
253 pxa_gpio_mode(GPIO111_MMCDAT3_MD);
254
255 /* EM-X270 uses GPIO13 as SD power enable */
256 pxa_gpio_mode(EM_X270_MMC_PD | GPIO_OUT);
257
258 err = request_irq(EM_X270_MMC_IRQ, em_x270_detect_int,
259 IRQF_DISABLED | IRQF_TRIGGER_FALLING,
260 "MMC card detect", data);
261 if (err) { 408 if (err) {
262 printk(KERN_ERR "%s: can't request MMC card detect IRQ: %d\n", 409 printk(KERN_ERR "%s: can't request MMC card detect IRQ: %d\n",
263 __func__, err); 410 __func__, err);
@@ -279,7 +426,8 @@ static void em_x270_mci_setpower(struct device *dev, unsigned int vdd)
279 426
280static void em_x270_mci_exit(struct device *dev, void *data) 427static void em_x270_mci_exit(struct device *dev, void *data)
281{ 428{
282 free_irq(EM_X270_MMC_IRQ, data); 429 int irq = gpio_to_irq(GPIO13_MMC_CD);
430 free_irq(irq, data);
283} 431}
284 432
285static struct pxamci_platform_data em_x270_mci_platform_data = { 433static struct pxamci_platform_data em_x270_mci_platform_data = {
@@ -289,7 +437,16 @@ static struct pxamci_platform_data em_x270_mci_platform_data = {
289 .exit = em_x270_mci_exit, 437 .exit = em_x270_mci_exit,
290}; 438};
291 439
440static void __init em_x270_init_mmc(void)
441{
442 pxa_set_mci_info(&em_x270_mci_platform_data);
443}
444#else
445static inline void em_x270_init_mmc(void) {}
446#endif
447
292/* LCD 480x640 */ 448/* LCD 480x640 */
449#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
293static struct pxafb_mode_info em_x270_lcd_mode = { 450static struct pxafb_mode_info em_x270_lcd_mode = {
294 .pixclock = 50000, 451 .pixclock = 50000,
295 .bpp = 16, 452 .bpp = 16,
@@ -307,40 +464,96 @@ static struct pxafb_mode_info em_x270_lcd_mode = {
307static struct pxafb_mach_info em_x270_lcd = { 464static struct pxafb_mach_info em_x270_lcd = {
308 .modes = &em_x270_lcd_mode, 465 .modes = &em_x270_lcd_mode,
309 .num_modes = 1, 466 .num_modes = 1,
310 .cmap_inverse = 0, 467 .lcd_conn = LCD_COLOR_TFT_16BPP,
311 .cmap_static = 0,
312 .lccr0 = LCCR0_PAS,
313 .lccr3 = LCCR3_PixClkDiv(0x01) | LCCR3_Acb(0xff),
314}; 468};
315 469static void __init em_x270_init_lcd(void)
316static void __init em_x270_init(void)
317{ 470{
318 /* setup LCD */
319 set_pxa_fb_info(&em_x270_lcd); 471 set_pxa_fb_info(&em_x270_lcd);
472}
473#else
474static inline void em_x270_init_lcd(void) {}
475#endif
320 476
321 /* register EM-X270 platform devices */ 477#if defined(CONFIG_SND_PXA2XX_AC97) || defined(CONFIG_SND_PXA2XX_AC97_MODULE)
322 platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); 478static void __init em_x270_init_ac97(void)
479{
323 pxa_set_ac97_info(NULL); 480 pxa_set_ac97_info(NULL);
481}
482#else
483static inline void em_x270_init_ac97(void) {}
484#endif
485
486#if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE)
487static unsigned int em_x270_matrix_keys[] = {
488 KEY(0, 0, KEY_A), KEY(1, 0, KEY_UP), KEY(2, 1, KEY_B),
489 KEY(0, 2, KEY_LEFT), KEY(1, 1, KEY_ENTER), KEY(2, 0, KEY_RIGHT),
490 KEY(0, 1, KEY_C), KEY(1, 2, KEY_DOWN), KEY(2, 2, KEY_D),
491};
324 492
325 /* set MCI and OHCI platform parameters */ 493struct pxa27x_keypad_platform_data em_x270_keypad_info = {
326 pxa_set_mci_info(&em_x270_mci_platform_data); 494 /* code map for the matrix keys */
327 pxa_set_ohci_info(&em_x270_ohci_platform_data); 495 .matrix_key_rows = 3,
496 .matrix_key_cols = 3,
497 .matrix_key_map = em_x270_matrix_keys,
498 .matrix_key_map_size = ARRAY_SIZE(em_x270_matrix_keys),
499};
500
501static void __init em_x270_init_keypad(void)
502{
503 pxa_set_keypad_info(&em_x270_keypad_info);
504}
505#else
506static inline void em_x270_init_keypad(void) {}
507#endif
328 508
329 /* setup STUART GPIOs */ 509#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
330 pxa_gpio_mode(GPIO46_STRXD_MD); 510static struct gpio_keys_button gpio_keys_button[] = {
331 pxa_gpio_mode(GPIO47_STTXD_MD); 511 [0] = {
512 .desc = "sleep/wakeup",
513 .code = KEY_SUSPEND,
514 .type = EV_PWR,
515 .gpio = 1,
516 .wakeup = 1,
517 },
518};
332 519
333 /* setup BTUART GPIOs */ 520static struct gpio_keys_platform_data em_x270_gpio_keys_data = {
334 pxa_gpio_mode(GPIO42_BTRXD_MD); 521 .buttons = gpio_keys_button,
335 pxa_gpio_mode(GPIO43_BTTXD_MD); 522 .nbuttons = 1,
336 pxa_gpio_mode(GPIO44_BTCTS_MD); 523};
337 pxa_gpio_mode(GPIO45_BTRTS_MD);
338 524
339 /* Setup interrupt for dm9000 */ 525static struct platform_device em_x270_gpio_keys = {
340 set_irq_type(EM_X270_ETHIRQ, IRQT_RISING); 526 .name = "gpio-keys",
527 .id = -1,
528 .dev = {
529 .platform_data = &em_x270_gpio_keys_data,
530 },
531};
532
533static void __init em_x270_init_gpio_keys(void)
534{
535 platform_device_register(&em_x270_gpio_keys);
536}
537#else
538static inline void em_x270_init_gpio_keys(void) {}
539#endif
540
541static void __init em_x270_init(void)
542{
543 pxa2xx_mfp_config(ARRAY_AND_SIZE(em_x270_pin_config));
544
545 em_x270_init_dm9000();
546 em_x270_init_rtc();
547 em_x270_init_nand();
548 em_x270_init_lcd();
549 em_x270_init_mmc();
550 em_x270_init_ohci();
551 em_x270_init_keypad();
552 em_x270_init_gpio_keys();
553 em_x270_init_ac97();
341} 554}
342 555
343MACHINE_START(EM_X270, "Compulab EM-x270") 556MACHINE_START(EM_X270, "Compulab EM-X270")
344 .boot_params = 0xa0000100, 557 .boot_params = 0xa0000100,
345 .phys_io = 0x40000000, 558 .phys_io = 0x40000000,
346 .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 559 .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c
index ee0ae93c876a..c29b7b21c11b 100644
--- a/arch/arm/mach-pxa/eseries.c
+++ b/arch/arm/mach-pxa/eseries.c
@@ -17,7 +17,7 @@
17#include <asm/arch/hardware.h> 17#include <asm/arch/hardware.h>
18#include <asm/mach-types.h> 18#include <asm/mach-types.h>
19 19
20#include <generic.h> 20#include "generic.h"
21 21
22/* Only e800 has 128MB RAM */ 22/* Only e800 has 128MB RAM */
23static void __init eseries_fixup(struct machine_desc *desc, 23static void __init eseries_fixup(struct machine_desc *desc,
@@ -47,6 +47,19 @@ MACHINE_START(E330, "Toshiba e330")
47MACHINE_END 47MACHINE_END
48#endif 48#endif
49 49
50#ifdef CONFIG_MACH_E350
51MACHINE_START(E350, "Toshiba e350")
52 /* Maintainer: Ian Molton (spyro@f2s.com) */
53 .phys_io = 0x40000000,
54 .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
55 .boot_params = 0xa0000100,
56 .map_io = pxa_map_io,
57 .init_irq = pxa25x_init_irq,
58 .fixup = eseries_fixup,
59 .timer = &pxa_timer,
60MACHINE_END
61#endif
62
50#ifdef CONFIG_MACH_E740 63#ifdef CONFIG_MACH_E740
51MACHINE_START(E740, "Toshiba e740") 64MACHINE_START(E740, "Toshiba e740")
52 /* Maintainer: Ian Molton (spyro@f2s.com) */ 65 /* Maintainer: Ian Molton (spyro@f2s.com) */
diff --git a/arch/arm/mach-pxa/eseries_udc.c b/arch/arm/mach-pxa/eseries_udc.c
new file mode 100644
index 000000000000..362847a10998
--- /dev/null
+++ b/arch/arm/mach-pxa/eseries_udc.c
@@ -0,0 +1,57 @@
1/*
2 * UDC functions for the Toshiba e-series PDAs
3 *
4 * Copyright (c) Ian Molton 2003
5 *
6 * This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 *
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/device.h>
16
17#include <asm/arch/udc.h>
18#include <asm/arch/eseries-gpio.h>
19#include <asm/arch/hardware.h>
20#include <asm/arch/pxa-regs.h>
21#include <asm/mach/arch.h>
22#include <asm/mach-types.h>
23#include <asm/mach/map.h>
24#include <asm/domain.h>
25
26/* local PXA generic code */
27#include "generic.h"
28
29static struct pxa2xx_udc_mach_info e7xx_udc_mach_info = {
30 .gpio_vbus = GPIO_E7XX_USB_DISC,
31 .gpio_pullup = GPIO_E7XX_USB_PULLUP,
32 .gpio_pullup_inverted = 1
33};
34
35static struct pxa2xx_udc_mach_info e800_udc_mach_info = {
36 .gpio_vbus = GPIO_E800_USB_DISC,
37 .gpio_pullup = GPIO_E800_USB_PULLUP,
38 .gpio_pullup_inverted = 1
39};
40
41static int __init eseries_udc_init(void)
42{
43 if (machine_is_e330() || machine_is_e350() ||
44 machine_is_e740() || machine_is_e750() ||
45 machine_is_e400())
46 pxa_set_udc_info(&e7xx_udc_mach_info);
47 else if (machine_is_e800())
48 pxa_set_udc_info(&e800_udc_mach_info);
49
50 return 0;
51}
52
53module_init(eseries_udc_init);
54
55MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
56MODULE_DESCRIPTION("eseries UDC support");
57MODULE_LICENSE("GPLv2");
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
new file mode 100644
index 000000000000..0143eed65398
--- /dev/null
+++ b/arch/arm/mach-pxa/ezx.c
@@ -0,0 +1,220 @@
1/*
2 * ezx.c - Common code for the EZX platform.
3 *
4 * Copyright (C) 2005-2006 Harald Welte <laforge@openezx.org>,
5 * 2007-2008 Daniel Ribeiro <drwyrm@gmail.com>,
6 * 2007-2008 Stefan Schmidt <stefan@datenfreihafen.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/platform_device.h>
17#include <linux/delay.h>
18#include <linux/pwm_backlight.h>
19
20#include <asm/setup.h>
21#include <asm/arch/pxafb.h>
22#include <asm/arch/ohci.h>
23#include <asm/arch/i2c.h>
24
25#include <asm/arch/mfp-pxa27x.h>
26#include <asm/arch/pxa-regs.h>
27#include <asm/arch/pxa2xx-regs.h>
28#include <asm/mach-types.h>
29#include <asm/mach/arch.h>
30
31#include "devices.h"
32#include "generic.h"
33
34static struct platform_pwm_backlight_data ezx_backlight_data = {
35 .pwm_id = 0,
36 .max_brightness = 1023,
37 .dft_brightness = 1023,
38 .pwm_period_ns = 78770,
39};
40
41static struct platform_device ezx_backlight_device = {
42 .name = "pwm-backlight",
43 .dev = {
44 .parent = &pxa27x_device_pwm0.dev,
45 .platform_data = &ezx_backlight_data,
46 },
47};
48
49static struct pxafb_mode_info mode_ezx_old = {
50 .pixclock = 150000,
51 .xres = 240,
52 .yres = 320,
53 .bpp = 16,
54 .hsync_len = 10,
55 .left_margin = 20,
56 .right_margin = 10,
57 .vsync_len = 2,
58 .upper_margin = 3,
59 .lower_margin = 2,
60 .sync = 0,
61};
62
63static struct pxafb_mach_info ezx_fb_info_1 = {
64 .modes = &mode_ezx_old,
65 .num_modes = 1,
66 .lcd_conn = LCD_COLOR_TFT_16BPP,
67};
68
69static struct pxafb_mode_info mode_72r89803y01 = {
70 .pixclock = 192308,
71 .xres = 240,
72 .yres = 320,
73 .bpp = 32,
74 .depth = 18,
75 .hsync_len = 10,
76 .left_margin = 20,
77 .right_margin = 10,
78 .vsync_len = 2,
79 .upper_margin = 3,
80 .lower_margin = 2,
81 .sync = 0,
82};
83
84static struct pxafb_mach_info ezx_fb_info_2 = {
85 .modes = &mode_72r89803y01,
86 .num_modes = 1,
87 .lcd_conn = LCD_COLOR_TFT_18BPP,
88};
89
90static struct platform_device *devices[] __initdata = {
91 &ezx_backlight_device,
92};
93
94static unsigned long ezx_pin_config[] __initdata = {
95 /* PWM backlight */
96 GPIO16_PWM0_OUT,
97
98 /* BTUART */
99 GPIO42_BTUART_RXD,
100 GPIO43_BTUART_TXD,
101 GPIO44_BTUART_CTS,
102 GPIO45_BTUART_RTS,
103
104 /* STUART */
105 GPIO46_STUART_RXD,
106 GPIO47_STUART_TXD,
107
108 /* For A780 support (connected with Neptune GSM chip) */
109 GPIO30_USB_P3_2, /* ICL_TXENB */
110 GPIO31_USB_P3_6, /* ICL_VPOUT */
111 GPIO90_USB_P3_5, /* ICL_VPIN */
112 GPIO91_USB_P3_1, /* ICL_XRXD */
113 GPIO56_USB_P3_4, /* ICL_VMOUT */
114 GPIO113_USB_P3_3, /* /ICL_VMIN */
115};
116
117static void __init ezx_init(void)
118{
119 pxa2xx_mfp_config(ARRAY_AND_SIZE(ezx_pin_config));
120 pxa_set_i2c_info(NULL);
121 if (machine_is_ezx_a780() || machine_is_ezx_e680())
122 set_pxa_fb_info(&ezx_fb_info_1);
123 else
124 set_pxa_fb_info(&ezx_fb_info_2);
125
126 platform_add_devices(devices, ARRAY_SIZE(devices));
127}
128
129static void __init ezx_fixup(struct machine_desc *desc, struct tag *tags,
130 char **cmdline, struct meminfo *mi)
131{
132 /* We have two ram chips. First one with 32MB at 0xA0000000 and a second
133 * 16MB one at 0xAC000000
134 */
135 mi->nr_banks = 2;
136 mi->bank[0].start = 0xa0000000;
137 mi->bank[0].node = 0;
138 mi->bank[0].size = (32*1024*1024);
139 mi->bank[1].start = 0xac000000;
140 mi->bank[1].node = 1;
141 mi->bank[1].size = (16*1024*1024);
142}
143
144#ifdef CONFIG_MACH_EZX_A780
145MACHINE_START(EZX_A780, "Motorola EZX A780")
146 .phys_io = 0x40000000,
147 .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
148 .fixup = ezx_fixup,
149 .boot_params = 0xa0000100,
150 .map_io = pxa_map_io,
151 .init_irq = pxa27x_init_irq,
152 .timer = &pxa_timer,
153 .init_machine = &ezx_init,
154MACHINE_END
155#endif
156
157#ifdef CONFIG_MACH_EZX_E680
158MACHINE_START(EZX_E680, "Motorola EZX E680")
159 .phys_io = 0x40000000,
160 .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
161 .fixup = ezx_fixup,
162 .boot_params = 0xa0000100,
163 .map_io = pxa_map_io,
164 .init_irq = pxa27x_init_irq,
165 .timer = &pxa_timer,
166 .init_machine = &ezx_init,
167MACHINE_END
168#endif
169
170#ifdef CONFIG_MACH_EZX_A1200
171MACHINE_START(EZX_A1200, "Motorola EZX A1200")
172 .phys_io = 0x40000000,
173 .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
174 .fixup = ezx_fixup,
175 .boot_params = 0xa0000100,
176 .map_io = pxa_map_io,
177 .init_irq = pxa27x_init_irq,
178 .timer = &pxa_timer,
179 .init_machine = &ezx_init,
180MACHINE_END
181#endif
182
183#ifdef CONFIG_MACH_EZX_A910
184MACHINE_START(EZX_A910, "Motorola EZX A910")
185 .phys_io = 0x40000000,
186 .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
187 .fixup = ezx_fixup,
188 .boot_params = 0xa0000100,
189 .map_io = pxa_map_io,
190 .init_irq = pxa27x_init_irq,
191 .timer = &pxa_timer,
192 .init_machine = &ezx_init,
193MACHINE_END
194#endif
195
196#ifdef CONFIG_MACH_EZX_E6
197MACHINE_START(EZX_E6, "Motorola EZX E6")
198 .phys_io = 0x40000000,
199 .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
200 .fixup = ezx_fixup,
201 .boot_params = 0xa0000100,
202 .map_io = pxa_map_io,
203 .init_irq = pxa27x_init_irq,
204 .timer = &pxa_timer,
205 .init_machine = &ezx_init,
206MACHINE_END
207#endif
208
209#ifdef CONFIG_MACH_EZX_E2
210MACHINE_START(EZX_E2, "Motorola EZX E2")
211 .phys_io = 0x40000000,
212 .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
213 .fixup = ezx_fixup,
214 .boot_params = 0xa0000100,
215 .map_io = pxa_map_io,
216 .init_irq = pxa27x_init_irq,
217 .timer = &pxa_timer,
218 .init_machine = &ezx_init,
219MACHINE_END
220#endif
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index 530654474bb2..dd759d03a9fd 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/smc91x.h>
23 24
24#include <asm/types.h> 25#include <asm/types.h>
25#include <asm/setup.h> 26#include <asm/setup.h>
@@ -38,6 +39,7 @@
38#include <asm/arch/pxafb.h> 39#include <asm/arch/pxafb.h>
39#include <asm/arch/ssp.h> 40#include <asm/arch/ssp.h>
40#include <asm/arch/pxa27x_keypad.h> 41#include <asm/arch/pxa27x_keypad.h>
42#include <asm/arch/pxa3xx_nand.h>
41#include <asm/arch/littleton.h> 43#include <asm/arch/littleton.h>
42 44
43#include "generic.h" 45#include "generic.h"
@@ -101,18 +103,26 @@ static struct resource smc91x_resources[] = {
101 [1] = { 103 [1] = {
102 .start = IRQ_GPIO(mfp_to_gpio(MFP_PIN_GPIO90)), 104 .start = IRQ_GPIO(mfp_to_gpio(MFP_PIN_GPIO90)),
103 .end = IRQ_GPIO(mfp_to_gpio(MFP_PIN_GPIO90)), 105 .end = IRQ_GPIO(mfp_to_gpio(MFP_PIN_GPIO90)),
104 .flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING, 106 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
105 } 107 }
106}; 108};
107 109
110static struct smc91x_platdata littleton_smc91x_info = {
111 .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT |
112 SMC91X_NOWAIT | SMC91X_USE_DMA,
113};
114
108static struct platform_device smc91x_device = { 115static struct platform_device smc91x_device = {
109 .name = "smc91x", 116 .name = "smc91x",
110 .id = 0, 117 .id = 0,
111 .num_resources = ARRAY_SIZE(smc91x_resources), 118 .num_resources = ARRAY_SIZE(smc91x_resources),
112 .resource = smc91x_resources, 119 .resource = smc91x_resources,
120 .dev = {
121 .platform_data = &littleton_smc91x_info,
122 },
113}; 123};
114 124
115#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULES) 125#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
116/* use bit 30, 31 as the indicator of command parameter number */ 126/* use bit 30, 31 as the indicator of command parameter number */
117#define CMD0(x) ((0x00000000) | ((x) << 9)) 127#define CMD0(x) ((0x00000000) | ((x) << 9))
118#define CMD1(x, x1) ((0x40000000) | ((x) << 9) | 0x100 | (x1)) 128#define CMD1(x, x1) ((0x40000000) | ((x) << 9) | 0x100 | (x1))
@@ -311,9 +321,9 @@ static void littleton_init_lcd(void)
311} 321}
312#else 322#else
313static inline void littleton_init_lcd(void) {}; 323static inline void littleton_init_lcd(void) {};
314#endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULES */ 324#endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */
315 325
316#if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULES) 326#if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE)
317static unsigned int littleton_matrix_key_map[] = { 327static unsigned int littleton_matrix_key_map[] = {
318 /* KEY(row, col, key_code) */ 328 /* KEY(row, col, key_code) */
319 KEY(1, 3, KEY_0), KEY(0, 0, KEY_1), KEY(1, 0, KEY_2), KEY(2, 0, KEY_3), 329 KEY(1, 3, KEY_0), KEY(0, 0, KEY_1), KEY(1, 0, KEY_2), KEY(2, 0, KEY_3),
@@ -361,6 +371,57 @@ static void __init littleton_init_keypad(void)
361static inline void littleton_init_keypad(void) {} 371static inline void littleton_init_keypad(void) {}
362#endif 372#endif
363 373
374#if defined(CONFIG_MTD_NAND_PXA3xx) || defined(CONFIG_MTD_NAND_PXA3xx_MODULE)
375static struct mtd_partition littleton_nand_partitions[] = {
376 [0] = {
377 .name = "Bootloader",
378 .offset = 0,
379 .size = 0x060000,
380 .mask_flags = MTD_WRITEABLE, /* force read-only */
381 },
382 [1] = {
383 .name = "Kernel",
384 .offset = 0x060000,
385 .size = 0x200000,
386 .mask_flags = MTD_WRITEABLE, /* force read-only */
387 },
388 [2] = {
389 .name = "Filesystem",
390 .offset = 0x0260000,
391 .size = 0x3000000, /* 48M - rootfs */
392 },
393 [3] = {
394 .name = "MassStorage",
395 .offset = 0x3260000,
396 .size = 0x3d40000,
397 },
398 [4] = {
399 .name = "BBT",
400 .offset = 0x6FA0000,
401 .size = 0x80000,
402 .mask_flags = MTD_WRITEABLE, /* force read-only */
403 },
404 /* NOTE: we reserve some blocks at the end of the NAND flash for
405 * bad block management, and the max number of relocation blocks
406 * differs on different platforms. Please take care with it when
407 * defining the partition table.
408 */
409};
410
411static struct pxa3xx_nand_platform_data littleton_nand_info = {
412 .enable_arbiter = 1,
413 .parts = littleton_nand_partitions,
414 .nr_parts = ARRAY_SIZE(littleton_nand_partitions),
415};
416
417static void __init littleton_init_nand(void)
418{
419 pxa3xx_set_nand_info(&littleton_nand_info);
420}
421#else
422static inline void littleton_init_nand(void) {}
423#endif /* CONFIG_MTD_NAND_PXA3xx || CONFIG_MTD_NAND_PXA3xx_MODULE */
424
364static void __init littleton_init(void) 425static void __init littleton_init(void)
365{ 426{
366 /* initialize MFP configurations */ 427 /* initialize MFP configurations */
@@ -374,6 +435,7 @@ static void __init littleton_init(void)
374 435
375 littleton_init_lcd(); 436 littleton_init_lcd();
376 littleton_init_keypad(); 437 littleton_init_keypad();
438 littleton_init_nand();
377} 439}
378 440
379MACHINE_START(LITTLETON, "Marvell Form Factor Development Platform (aka Littleton)") 441MACHINE_START(LITTLETON, "Marvell Form Factor Development Platform (aka Littleton)")
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index a3fae4139203..ac26423cd20c 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -21,6 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/mtd/mtd.h> 22#include <linux/mtd/mtd.h>
23#include <linux/mtd/partitions.h> 23#include <linux/mtd/partitions.h>
24#include <linux/smc91x.h>
24 25
25#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
26#include <linux/spi/ads7846.h> 27#include <linux/spi/ads7846.h>
@@ -226,14 +227,6 @@ static struct pxa2xx_spi_master pxa_ssp_master_info = {
226 .num_chipselect = 0, 227 .num_chipselect = 0,
227}; 228};
228 229
229static struct platform_device pxa_ssp = {
230 .name = "pxa2xx-spi",
231 .id = 1,
232 .dev = {
233 .platform_data = &pxa_ssp_master_info,
234 },
235};
236
237static int lubbock_ads7846_pendown_state(void) 230static int lubbock_ads7846_pendown_state(void)
238{ 231{
239 /* TS_BUSY is bit 8 in LUB_MISC_RD, but pendown is irq-only */ 232 /* TS_BUSY is bit 8 in LUB_MISC_RD, but pendown is irq-only */
@@ -292,11 +285,18 @@ static struct resource smc91x_resources[] = {
292 }, 285 },
293}; 286};
294 287
288static struct smc91x_platdata lubbock_smc91x_info = {
289 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT | SMC91X_IO_SHIFT_2,
290};
291
295static struct platform_device smc91x_device = { 292static struct platform_device smc91x_device = {
296 .name = "smc91x", 293 .name = "smc91x",
297 .id = -1, 294 .id = -1,
298 .num_resources = ARRAY_SIZE(smc91x_resources), 295 .num_resources = ARRAY_SIZE(smc91x_resources),
299 .resource = smc91x_resources, 296 .resource = smc91x_resources,
297 .dev = {
298 .platform_data = &lubbock_smc91x_info,
299 },
300}; 300};
301 301
302static struct resource flash_resources[] = { 302static struct resource flash_resources[] = {
@@ -367,7 +367,6 @@ static struct platform_device *devices[] __initdata = {
367 &smc91x_device, 367 &smc91x_device,
368 &lubbock_flash_device[0], 368 &lubbock_flash_device[0],
369 &lubbock_flash_device[1], 369 &lubbock_flash_device[1],
370 &pxa_ssp,
371}; 370};
372 371
373static struct pxafb_mode_info sharp_lm8v31_mode = { 372static struct pxafb_mode_info sharp_lm8v31_mode = {
@@ -471,6 +470,7 @@ static void lubbock_irda_transceiver_mode(struct device *dev, int mode)
471 } else if (mode & IR_FIRMODE) { 470 } else if (mode & IR_FIRMODE) {
472 LUB_MISC_WR |= 1 << 4; 471 LUB_MISC_WR |= 1 << 4;
473 } 472 }
473 pxa2xx_transceiver_mode(dev, mode);
474 local_irq_restore(flags); 474 local_irq_restore(flags);
475} 475}
476 476
@@ -501,6 +501,7 @@ static void __init lubbock_init(void)
501 lubbock_flash_data[flashboot].name = "boot-rom"; 501 lubbock_flash_data[flashboot].name = "boot-rom";
502 (void) platform_add_devices(devices, ARRAY_SIZE(devices)); 502 (void) platform_add_devices(devices, ARRAY_SIZE(devices));
503 503
504 pxa2xx_set_spi_info(1, &pxa_ssp_master_info);
504 spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info)); 505 spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
505} 506}
506 507
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index 01b2fa790217..c9d274f0048f 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -17,17 +17,15 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/gpio.h>
20#include <linux/gpio_keys.h> 21#include <linux/gpio_keys.h>
21#include <linux/input.h> 22#include <linux/input.h>
22#include <linux/mfd/htc-egpio.h> 23#include <linux/mfd/htc-egpio.h>
23#include <linux/mfd/htc-pasic3.h> 24#include <linux/mfd/htc-pasic3.h>
24#include <linux/mtd/mtd.h>
25#include <linux/mtd/map.h>
26#include <linux/mtd/physmap.h> 25#include <linux/mtd/physmap.h>
27#include <linux/pda_power.h> 26#include <linux/pda_power.h>
28#include <linux/pwm_backlight.h> 27#include <linux/pwm_backlight.h>
29 28
30#include <asm/gpio.h>
31#include <asm/hardware.h> 29#include <asm/hardware.h>
32#include <asm/mach-types.h> 30#include <asm/mach-types.h>
33#include <asm/mach/arch.h> 31#include <asm/mach/arch.h>
@@ -44,7 +42,7 @@
44#include "devices.h" 42#include "devices.h"
45#include "generic.h" 43#include "generic.h"
46 44
47static unsigned long magician_pin_config[] = { 45static unsigned long magician_pin_config[] __initdata = {
48 46
49 /* SDRAM and Static Memory I/O Signals */ 47 /* SDRAM and Static Memory I/O Signals */
50 GPIO20_nSDCS_2, 48 GPIO20_nSDCS_2,
@@ -134,6 +132,7 @@ static unsigned long magician_pin_config[] = {
134static void magician_irda_transceiver_mode(struct device *dev, int mode) 132static void magician_irda_transceiver_mode(struct device *dev, int mode)
135{ 133{
136 gpio_set_value(GPIO83_MAGICIAN_nIR_EN, mode & IR_OFF); 134 gpio_set_value(GPIO83_MAGICIAN_nIR_EN, mode & IR_OFF);
135 pxa2xx_transceiver_mode(dev, mode);
137} 136}
138 137
139static struct pxaficp_platform_data magician_ficp_info = { 138static struct pxaficp_platform_data magician_ficp_info = {
@@ -399,6 +398,7 @@ static struct platform_pwm_backlight_data backlight_data = {
399 398
400static struct platform_device backlight = { 399static struct platform_device backlight = {
401 .name = "pwm-backlight", 400 .name = "pwm-backlight",
401 .id = -1,
402 .dev = { 402 .dev = {
403 .parent = &pxa27x_device_pwm0.dev, 403 .parent = &pxa27x_device_pwm0.dev,
404 .platform_data = &backlight_data, 404 .platform_data = &backlight_data,
@@ -511,6 +511,37 @@ static struct platform_device pasic3 = {
511 * External power 511 * External power
512 */ 512 */
513 513
514static int power_supply_init(struct device *dev)
515{
516 int ret;
517
518 ret = gpio_request(EGPIO_MAGICIAN_CABLE_STATE_AC, "CABLE_STATE_AC");
519 if (ret)
520 goto err_cs_ac;
521 ret = gpio_request(EGPIO_MAGICIAN_CABLE_STATE_USB, "CABLE_STATE_USB");
522 if (ret)
523 goto err_cs_usb;
524 ret = gpio_request(EGPIO_MAGICIAN_CHARGE_EN, "CHARGE_EN");
525 if (ret)
526 goto err_chg_en;
527 ret = gpio_request(GPIO30_MAGICIAN_nCHARGE_EN, "nCHARGE_EN");
528 if (!ret)
529 ret = gpio_direction_output(GPIO30_MAGICIAN_nCHARGE_EN, 0);
530 if (ret)
531 goto err_nchg_en;
532
533 return 0;
534
535err_nchg_en:
536 gpio_free(EGPIO_MAGICIAN_CHARGE_EN);
537err_chg_en:
538 gpio_free(EGPIO_MAGICIAN_CABLE_STATE_USB);
539err_cs_usb:
540 gpio_free(EGPIO_MAGICIAN_CABLE_STATE_AC);
541err_cs_ac:
542 return ret;
543}
544
514static int magician_is_ac_online(void) 545static int magician_is_ac_online(void)
515{ 546{
516 return gpio_get_value(EGPIO_MAGICIAN_CABLE_STATE_AC); 547 return gpio_get_value(EGPIO_MAGICIAN_CABLE_STATE_AC);
@@ -527,14 +558,24 @@ static void magician_set_charge(int flags)
527 gpio_set_value(EGPIO_MAGICIAN_CHARGE_EN, flags); 558 gpio_set_value(EGPIO_MAGICIAN_CHARGE_EN, flags);
528} 559}
529 560
561static void power_supply_exit(struct device *dev)
562{
563 gpio_free(GPIO30_MAGICIAN_nCHARGE_EN);
564 gpio_free(EGPIO_MAGICIAN_CHARGE_EN);
565 gpio_free(EGPIO_MAGICIAN_CABLE_STATE_USB);
566 gpio_free(EGPIO_MAGICIAN_CABLE_STATE_AC);
567}
568
530static char *magician_supplicants[] = { 569static char *magician_supplicants[] = {
531 "ds2760-battery.0", "backup-battery" 570 "ds2760-battery.0", "backup-battery"
532}; 571};
533 572
534static struct pda_power_pdata power_supply_info = { 573static struct pda_power_pdata power_supply_info = {
574 .init = power_supply_init,
535 .is_ac_online = magician_is_ac_online, 575 .is_ac_online = magician_is_ac_online,
536 .is_usb_online = magician_is_usb_online, 576 .is_usb_online = magician_is_usb_online,
537 .set_charge = magician_set_charge, 577 .set_charge = magician_set_charge,
578 .exit = power_supply_exit,
538 .supplied_to = magician_supplicants, 579 .supplied_to = magician_supplicants,
539 .num_supplicants = ARRAY_SIZE(magician_supplicants), 580 .num_supplicants = ARRAY_SIZE(magician_supplicants),
540}; 581};
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index f2e9e7c4da8e..851ec2d9b699 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -26,6 +26,7 @@
26#include <linux/input.h> 26#include <linux/input.h>
27#include <linux/gpio_keys.h> 27#include <linux/gpio_keys.h>
28#include <linux/pwm_backlight.h> 28#include <linux/pwm_backlight.h>
29#include <linux/smc91x.h>
29 30
30#include <asm/types.h> 31#include <asm/types.h>
31#include <asm/setup.h> 32#include <asm/setup.h>
@@ -110,9 +111,9 @@ static unsigned long mainstone_pin_config[] = {
110 GPIO45_AC97_SYSCLK, 111 GPIO45_AC97_SYSCLK,
111 112
112 /* Keypad */ 113 /* Keypad */
113 GPIO93_KP_DKIN_0 | WAKEUP_ON_LEVEL_HIGH, 114 GPIO93_KP_DKIN_0,
114 GPIO94_KP_DKIN_1 | WAKEUP_ON_LEVEL_HIGH, 115 GPIO94_KP_DKIN_1,
115 GPIO95_KP_DKIN_2 | WAKEUP_ON_LEVEL_HIGH, 116 GPIO95_KP_DKIN_2,
116 GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH, 117 GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH,
117 GPIO101_KP_MKIN_1 | WAKEUP_ON_LEVEL_HIGH, 118 GPIO101_KP_MKIN_1 | WAKEUP_ON_LEVEL_HIGH,
118 GPIO102_KP_MKIN_2 | WAKEUP_ON_LEVEL_HIGH, 119 GPIO102_KP_MKIN_2 | WAKEUP_ON_LEVEL_HIGH,
@@ -240,11 +241,19 @@ static struct resource smc91x_resources[] = {
240 } 241 }
241}; 242};
242 243
244static struct smc91x_platdata mainstone_smc91x_info = {
245 .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
246 SMC91X_NOWAIT | SMC91X_USE_DMA,
247};
248
243static struct platform_device smc91x_device = { 249static struct platform_device smc91x_device = {
244 .name = "smc91x", 250 .name = "smc91x",
245 .id = 0, 251 .id = 0,
246 .num_resources = ARRAY_SIZE(smc91x_resources), 252 .num_resources = ARRAY_SIZE(smc91x_resources),
247 .resource = smc91x_resources, 253 .resource = smc91x_resources,
254 .dev = {
255 .platform_data = &mainstone_smc91x_info,
256 },
248}; 257};
249 258
250static int mst_audio_startup(struct snd_pcm_substream *substream, void *priv) 259static int mst_audio_startup(struct snd_pcm_substream *substream, void *priv)
@@ -455,6 +464,7 @@ static void mainstone_irda_transceiver_mode(struct device *dev, int mode)
455 } else if (mode & IR_FIRMODE) { 464 } else if (mode & IR_FIRMODE) {
456 MST_MSCWR1 |= MST_MSCWR1_IRDA_FIR; 465 MST_MSCWR1 |= MST_MSCWR1_IRDA_FIR;
457 } 466 }
467 pxa2xx_transceiver_mode(dev, mode);
458 if (mode & IR_OFF) { 468 if (mode & IR_OFF) {
459 MST_MSCWR1 = (MST_MSCWR1 & ~MST_MSCWR1_IRDA_MASK) | MST_MSCWR1_IRDA_OFF; 469 MST_MSCWR1 = (MST_MSCWR1 & ~MST_MSCWR1_IRDA_MASK) | MST_MSCWR1_IRDA_OFF;
460 } else { 470 } else {
@@ -513,7 +523,7 @@ static struct pxaohci_platform_data mainstone_ohci_platform_data = {
513 .init = mainstone_ohci_init, 523 .init = mainstone_ohci_init,
514}; 524};
515 525
516#if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULES) 526#if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE)
517static unsigned int mainstone_matrix_keys[] = { 527static unsigned int mainstone_matrix_keys[] = {
518 KEY(0, 0, KEY_A), KEY(1, 0, KEY_B), KEY(2, 0, KEY_C), 528 KEY(0, 0, KEY_A), KEY(1, 0, KEY_B), KEY(2, 0, KEY_C),
519 KEY(3, 0, KEY_D), KEY(4, 0, KEY_E), KEY(5, 0, KEY_F), 529 KEY(3, 0, KEY_D), KEY(4, 0, KEY_E), KEY(5, 0, KEY_F),
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c
index d1cdb4ecb0b8..fd4545eab803 100644
--- a/arch/arm/mach-pxa/mfp-pxa2xx.c
+++ b/arch/arm/mach-pxa/mfp-pxa2xx.c
@@ -39,6 +39,28 @@ struct gpio_desc {
39 39
40static struct gpio_desc gpio_desc[MFP_PIN_GPIO127 + 1]; 40static struct gpio_desc gpio_desc[MFP_PIN_GPIO127 + 1];
41 41
42static int __mfp_config_lpm(unsigned gpio, unsigned long lpm)
43{
44 unsigned mask = GPIO_bit(gpio);
45
46 /* low power state */
47 switch (lpm) {
48 case MFP_LPM_DRIVE_HIGH:
49 PGSR(gpio) |= mask;
50 break;
51 case MFP_LPM_DRIVE_LOW:
52 PGSR(gpio) &= ~mask;
53 break;
54 case MFP_LPM_INPUT:
55 break;
56 default:
57 pr_warning("%s: invalid low power state for GPIO%d\n",
58 __func__, gpio);
59 return -EINVAL;
60 }
61 return 0;
62}
63
42static int __mfp_config_gpio(unsigned gpio, unsigned long c) 64static int __mfp_config_gpio(unsigned gpio, unsigned long c)
43{ 65{
44 unsigned long gafr, mask = GPIO_bit(gpio); 66 unsigned long gafr, mask = GPIO_bit(gpio);
@@ -57,21 +79,8 @@ static int __mfp_config_gpio(unsigned gpio, unsigned long c)
57 else 79 else
58 GPDR(gpio) &= ~mask; 80 GPDR(gpio) &= ~mask;
59 81
60 /* low power state */ 82 if (__mfp_config_lpm(gpio, c & MFP_LPM_STATE_MASK))
61 switch (c & MFP_LPM_STATE_MASK) {
62 case MFP_LPM_DRIVE_HIGH:
63 PGSR(gpio) |= mask;
64 break;
65 case MFP_LPM_DRIVE_LOW:
66 PGSR(gpio) &= ~mask;
67 break;
68 case MFP_LPM_INPUT:
69 break;
70 default:
71 pr_warning("%s: invalid low power state for GPIO%d\n",
72 __func__, gpio);
73 return -EINVAL; 83 return -EINVAL;
74 }
75 84
76 /* give early warning if MFP_LPM_CAN_WAKEUP is set on the 85 /* give early warning if MFP_LPM_CAN_WAKEUP is set on the
77 * configurations of those pins not able to wakeup 86 * configurations of those pins not able to wakeup
@@ -91,6 +100,18 @@ static int __mfp_config_gpio(unsigned gpio, unsigned long c)
91 return 0; 100 return 0;
92} 101}
93 102
103static inline int __mfp_validate(int mfp)
104{
105 int gpio = mfp_to_gpio(mfp);
106
107 if ((mfp > MFP_PIN_GPIO127) || !gpio_desc[gpio].valid) {
108 pr_warning("%s: GPIO%d is invalid pin\n", __func__, gpio);
109 return -1;
110 }
111
112 return gpio;
113}
114
94void pxa2xx_mfp_config(unsigned long *mfp_cfgs, int num) 115void pxa2xx_mfp_config(unsigned long *mfp_cfgs, int num)
95{ 116{
96 unsigned long flags; 117 unsigned long flags;
@@ -99,13 +120,9 @@ void pxa2xx_mfp_config(unsigned long *mfp_cfgs, int num)
99 120
100 for (i = 0, c = mfp_cfgs; i < num; i++, c++) { 121 for (i = 0, c = mfp_cfgs; i < num; i++, c++) {
101 122
102 gpio = mfp_to_gpio(MFP_PIN(*c)); 123 gpio = __mfp_validate(MFP_PIN(*c));
103 124 if (gpio < 0)
104 if (!gpio_desc[gpio].valid) {
105 pr_warning("%s: GPIO%d is invalid pin\n",
106 __func__, gpio);
107 continue; 125 continue;
108 }
109 126
110 local_irq_save(flags); 127 local_irq_save(flags);
111 128
@@ -116,6 +133,20 @@ void pxa2xx_mfp_config(unsigned long *mfp_cfgs, int num)
116 } 133 }
117} 134}
118 135
136void pxa2xx_mfp_set_lpm(int mfp, unsigned long lpm)
137{
138 unsigned long flags;
139 int gpio;
140
141 gpio = __mfp_validate(mfp);
142 if (gpio < 0)
143 return;
144
145 local_irq_save(flags);
146 __mfp_config_lpm(gpio, lpm);
147 local_irq_restore(flags);
148}
149
119int gpio_set_wake(unsigned int gpio, unsigned int on) 150int gpio_set_wake(unsigned int gpio, unsigned int on)
120{ 151{
121 struct gpio_desc *d; 152 struct gpio_desc *d;
diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c
new file mode 100644
index 000000000000..408657a24f8c
--- /dev/null
+++ b/arch/arm/mach-pxa/palmtx.c
@@ -0,0 +1,416 @@
1/*
2 * Hardware definitions for PalmTX
3 *
4 * Author: Marek Vasut <marek.vasut@gmail.com>
5 *
6 * Based on work of:
7 * Alex Osborne <ato@meshy.org>
8 * Cristiano P. <cristianop@users.sourceforge.net>
9 * Jan Herman <2hp@seznam.cz>
10 * Michal Hrusecky
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 * (find more info at www.hackndev.com)
17 *
18 */
19
20#include <linux/platform_device.h>
21#include <linux/delay.h>
22#include <linux/irq.h>
23#include <linux/gpio_keys.h>
24#include <linux/input.h>
25#include <linux/pda_power.h>
26#include <linux/pwm_backlight.h>
27#include <linux/gpio.h>
28
29#include <asm/mach-types.h>
30#include <asm/mach/arch.h>
31#include <asm/mach/map.h>
32
33#include <asm/arch/audio.h>
34#include <asm/arch/palmtx.h>
35#include <asm/arch/mmc.h>
36#include <asm/arch/pxafb.h>
37#include <asm/arch/pxa-regs.h>
38#include <asm/arch/mfp-pxa27x.h>
39#include <asm/arch/irda.h>
40#include <asm/arch/pxa27x_keypad.h>
41#include <asm/arch/udc.h>
42
43#include "generic.h"
44#include "devices.h"
45
46/******************************************************************************
47 * Pin configuration
48 ******************************************************************************/
49static unsigned long palmtx_pin_config[] __initdata = {
50 /* MMC */
51 GPIO32_MMC_CLK,
52 GPIO92_MMC_DAT_0,
53 GPIO109_MMC_DAT_1,
54 GPIO110_MMC_DAT_2,
55 GPIO111_MMC_DAT_3,
56 GPIO112_MMC_CMD,
57
58 /* AC97 */
59 GPIO28_AC97_BITCLK,
60 GPIO29_AC97_SDATA_IN_0,
61 GPIO30_AC97_SDATA_OUT,
62 GPIO31_AC97_SYNC,
63
64 /* IrDA */
65 GPIO46_FICP_RXD,
66 GPIO47_FICP_TXD,
67
68 /* PWM */
69 GPIO16_PWM0_OUT,
70
71 /* USB */
72 GPIO13_GPIO,
73
74 /* PCMCIA */
75 GPIO48_nPOE,
76 GPIO49_nPWE,
77 GPIO50_nPIOR,
78 GPIO51_nPIOW,
79 GPIO85_nPCE_1,
80 GPIO54_nPCE_2,
81 GPIO79_PSKTSEL,
82 GPIO55_nPREG,
83 GPIO56_nPWAIT,
84 GPIO57_nIOIS16,
85};
86
87/******************************************************************************
88 * SD/MMC card controller
89 ******************************************************************************/
90static int palmtx_mci_init(struct device *dev, irq_handler_t palmtx_detect_int,
91 void *data)
92{
93 int err = 0;
94
95 /* Setup an interrupt for detecting card insert/remove events */
96 err = request_irq(IRQ_GPIO_PALMTX_SD_DETECT_N, palmtx_detect_int,
97 IRQF_DISABLED | IRQF_SAMPLE_RANDOM |
98 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
99 "SD/MMC card detect", data);
100 if (err) {
101 printk(KERN_ERR "%s: cannot request SD/MMC card detect IRQ\n",
102 __func__);
103 return err;
104 }
105
106 err = gpio_request(GPIO_NR_PALMTX_SD_POWER, "SD_POWER");
107 if (err)
108 goto pwr_err;
109
110 err = gpio_request(GPIO_NR_PALMTX_SD_READONLY, "SD_READONLY");
111 if (err)
112 goto ro_err;
113
114 printk(KERN_DEBUG "%s: irq registered\n", __func__);
115
116 return 0;
117
118ro_err:
119 gpio_free(GPIO_NR_PALMTX_SD_POWER);
120pwr_err:
121 free_irq(IRQ_GPIO_PALMTX_SD_DETECT_N, data);
122 return err;
123}
124
125static void palmtx_mci_exit(struct device *dev, void *data)
126{
127 gpio_free(GPIO_NR_PALMTX_SD_READONLY);
128 gpio_free(GPIO_NR_PALMTX_SD_POWER);
129 free_irq(IRQ_GPIO_PALMTX_SD_DETECT_N, data);
130}
131
132static void palmtx_mci_power(struct device *dev, unsigned int vdd)
133{
134 struct pxamci_platform_data *p_d = dev->platform_data;
135 gpio_set_value(GPIO_NR_PALMTX_SD_POWER, p_d->ocr_mask & (1 << vdd));
136}
137
138static int palmtx_mci_get_ro(struct device *dev)
139{
140 return gpio_get_value(GPIO_NR_PALMTX_SD_READONLY);
141}
142
143static struct pxamci_platform_data palmtx_mci_platform_data = {
144 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
145 .setpower = palmtx_mci_power,
146 .get_ro = palmtx_mci_get_ro,
147 .init = palmtx_mci_init,
148 .exit = palmtx_mci_exit,
149};
150
151/******************************************************************************
152 * GPIO keyboard
153 ******************************************************************************/
154static unsigned int palmtx_matrix_keys[] = {
155 KEY(0, 0, KEY_POWER),
156 KEY(0, 1, KEY_F1),
157 KEY(0, 2, KEY_ENTER),
158
159 KEY(1, 0, KEY_F2),
160 KEY(1, 1, KEY_F3),
161 KEY(1, 2, KEY_F4),
162
163 KEY(2, 0, KEY_UP),
164 KEY(2, 2, KEY_DOWN),
165
166 KEY(3, 0, KEY_RIGHT),
167 KEY(3, 2, KEY_LEFT),
168
169};
170
171static struct pxa27x_keypad_platform_data palmtx_keypad_platform_data = {
172 .matrix_key_rows = 4,
173 .matrix_key_cols = 3,
174 .matrix_key_map = palmtx_matrix_keys,
175 .matrix_key_map_size = ARRAY_SIZE(palmtx_matrix_keys),
176
177 .debounce_interval = 30,
178};
179
180/******************************************************************************
181 * GPIO keys
182 ******************************************************************************/
183static struct gpio_keys_button palmtx_pxa_buttons[] = {
184 {KEY_F8, GPIO_NR_PALMTX_HOTSYNC_BUTTON_N, 1, "HotSync Button" },
185};
186
187static struct gpio_keys_platform_data palmtx_pxa_keys_data = {
188 .buttons = palmtx_pxa_buttons,
189 .nbuttons = ARRAY_SIZE(palmtx_pxa_buttons),
190};
191
192static struct platform_device palmtx_pxa_keys = {
193 .name = "gpio-keys",
194 .id = -1,
195 .dev = {
196 .platform_data = &palmtx_pxa_keys_data,
197 },
198};
199
200/******************************************************************************
201 * Backlight
202 ******************************************************************************/
203static int palmtx_backlight_init(struct device *dev)
204{
205 int ret;
206
207 ret = gpio_request(GPIO_NR_PALMTX_BL_POWER, "BL POWER");
208 if (ret)
209 goto err;
210 ret = gpio_request(GPIO_NR_PALMTX_LCD_POWER, "LCD POWER");
211 if (ret)
212 goto err2;
213
214 return 0;
215err2:
216 gpio_free(GPIO_NR_PALMTX_BL_POWER);
217err:
218 return ret;
219}
220
221static int palmtx_backlight_notify(int brightness)
222{
223 gpio_set_value(GPIO_NR_PALMTX_BL_POWER, brightness);
224 gpio_set_value(GPIO_NR_PALMTX_LCD_POWER, brightness);
225 return brightness;
226}
227
228static void palmtx_backlight_exit(struct device *dev)
229{
230 gpio_free(GPIO_NR_PALMTX_BL_POWER);
231 gpio_free(GPIO_NR_PALMTX_LCD_POWER);
232}
233
234static struct platform_pwm_backlight_data palmtx_backlight_data = {
235 .pwm_id = 0,
236 .max_brightness = PALMTX_MAX_INTENSITY,
237 .dft_brightness = PALMTX_MAX_INTENSITY,
238 .pwm_period_ns = PALMTX_PERIOD_NS,
239 .init = palmtx_backlight_init,
240 .notify = palmtx_backlight_notify,
241 .exit = palmtx_backlight_exit,
242};
243
244static struct platform_device palmtx_backlight = {
245 .name = "pwm-backlight",
246 .dev = {
247 .parent = &pxa27x_device_pwm0.dev,
248 .platform_data = &palmtx_backlight_data,
249 },
250};
251
252/******************************************************************************
253 * IrDA
254 ******************************************************************************/
255static void palmtx_irda_transceiver_mode(struct device *dev, int mode)
256{
257 gpio_set_value(GPIO_NR_PALMTX_IR_DISABLE, mode & IR_OFF);
258 pxa2xx_transceiver_mode(dev, mode);
259}
260
261static struct pxaficp_platform_data palmtx_ficp_platform_data = {
262 .transceiver_cap = IR_SIRMODE | IR_FIRMODE | IR_OFF,
263 .transceiver_mode = palmtx_irda_transceiver_mode,
264};
265
266/******************************************************************************
267 * UDC
268 ******************************************************************************/
269static void palmtx_udc_command(int cmd)
270{
271 gpio_set_value(GPIO_NR_PALMTX_USB_POWER, !cmd);
272 udelay(50);
273 gpio_set_value(GPIO_NR_PALMTX_USB_PULLUP, !cmd);
274}
275
276static struct pxa2xx_udc_mach_info palmtx_udc_info __initdata = {
277 .gpio_vbus = GPIO_NR_PALMTX_USB_DETECT_N,
278 .gpio_vbus_inverted = 1,
279 .udc_command = palmtx_udc_command,
280};
281
282/******************************************************************************
283 * Power supply
284 ******************************************************************************/
285static int power_supply_init(struct device *dev)
286{
287 int ret;
288
289 ret = gpio_request(GPIO_NR_PALMTX_POWER_DETECT, "CABLE_STATE_AC");
290 if (ret)
291 goto err_cs_ac;
292
293 ret = gpio_request(GPIO_NR_PALMTX_USB_DETECT_N, "CABLE_STATE_USB");
294 if (ret)
295 goto err_cs_usb;
296
297 return 0;
298
299err_cs_usb:
300 gpio_free(GPIO_NR_PALMTX_POWER_DETECT);
301err_cs_ac:
302 return ret;
303}
304
305static int palmtx_is_ac_online(void)
306{
307 return gpio_get_value(GPIO_NR_PALMTX_POWER_DETECT);
308}
309
310static int palmtx_is_usb_online(void)
311{
312 return !gpio_get_value(GPIO_NR_PALMTX_USB_DETECT_N);
313}
314
315static void power_supply_exit(struct device *dev)
316{
317 gpio_free(GPIO_NR_PALMTX_USB_DETECT_N);
318 gpio_free(GPIO_NR_PALMTX_POWER_DETECT);
319}
320
321static char *palmtx_supplicants[] = {
322 "main-battery",
323};
324
325static struct pda_power_pdata power_supply_info = {
326 .init = power_supply_init,
327 .is_ac_online = palmtx_is_ac_online,
328 .is_usb_online = palmtx_is_usb_online,
329 .exit = power_supply_exit,
330 .supplied_to = palmtx_supplicants,
331 .num_supplicants = ARRAY_SIZE(palmtx_supplicants),
332};
333
334static struct platform_device power_supply = {
335 .name = "pda-power",
336 .id = -1,
337 .dev = {
338 .platform_data = &power_supply_info,
339 },
340};
341
342/******************************************************************************
343 * Framebuffer
344 ******************************************************************************/
345static struct pxafb_mode_info palmtx_lcd_modes[] = {
346{
347 .pixclock = 57692,
348 .xres = 320,
349 .yres = 480,
350 .bpp = 16,
351
352 .left_margin = 32,
353 .right_margin = 1,
354 .upper_margin = 7,
355 .lower_margin = 1,
356
357 .hsync_len = 4,
358 .vsync_len = 1,
359},
360};
361
362static struct pxafb_mach_info palmtx_lcd_screen = {
363 .modes = palmtx_lcd_modes,
364 .num_modes = ARRAY_SIZE(palmtx_lcd_modes),
365 .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
366};
367
368/******************************************************************************
369 * Machine init
370 ******************************************************************************/
371static struct platform_device *devices[] __initdata = {
372#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
373 &palmtx_pxa_keys,
374#endif
375 &palmtx_backlight,
376 &power_supply,
377};
378
379static struct map_desc palmtx_io_desc[] __initdata = {
380{
381 .virtual = PALMTX_PCMCIA_VIRT,
382 .pfn = __phys_to_pfn(PALMTX_PCMCIA_PHYS),
383 .length = PALMTX_PCMCIA_SIZE,
384 .type = MT_DEVICE
385},
386};
387
388static void __init palmtx_map_io(void)
389{
390 pxa_map_io();
391 iotable_init(palmtx_io_desc, ARRAY_SIZE(palmtx_io_desc));
392}
393
394static void __init palmtx_init(void)
395{
396 pxa2xx_mfp_config(ARRAY_AND_SIZE(palmtx_pin_config));
397
398 set_pxa_fb_info(&palmtx_lcd_screen);
399 pxa_set_mci_info(&palmtx_mci_platform_data);
400 pxa_set_udc_info(&palmtx_udc_info);
401 pxa_set_ac97_info(NULL);
402 pxa_set_ficp_info(&palmtx_ficp_platform_data);
403 pxa_set_keypad_info(&palmtx_keypad_platform_data);
404
405 platform_add_devices(devices, ARRAY_SIZE(devices));
406}
407
408MACHINE_START(PALMTX, "Palm T|X")
409 .phys_io = PALMTX_PHYS_IO_START,
410 .io_pg_offst = io_p2v(0x40000000),
411 .boot_params = 0xa0000100,
412 .map_io = palmtx_map_io,
413 .init_irq = pxa27x_init_irq,
414 .timer = &pxa_timer,
415 .init_machine = palmtx_init
416MACHINE_END
diff --git a/arch/arm/mach-pxa/pcm027.c b/arch/arm/mach-pxa/pcm027.c
index 3b945eb0aee3..377f3be8ce57 100644
--- a/arch/arm/mach-pxa/pcm027.c
+++ b/arch/arm/mach-pxa/pcm027.c
@@ -24,7 +24,9 @@
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/mtd/physmap.h> 25#include <linux/mtd/physmap.h>
26#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
27#include <linux/spi/max7301.h>
27#include <linux/leds.h> 28#include <linux/leds.h>
29
28#include <asm/mach-types.h> 30#include <asm/mach-types.h>
29#include <asm/mach/arch.h> 31#include <asm/mach/arch.h>
30#include <asm/arch/hardware.h> 32#include <asm/arch/hardware.h>
@@ -108,6 +110,32 @@ static struct platform_device smc91x_device = {
108 .resource = smc91x_resources, 110 .resource = smc91x_resources,
109}; 111};
110 112
113/*
114 * SPI host and devices
115 */
116static struct pxa2xx_spi_master pxa_ssp_master_info = {
117 .num_chipselect = 1,
118};
119
120static struct max7301_platform_data max7301_info = {
121 .base = -1,
122};
123
124/* bus_num must match id in pxa2xx_set_spi_info() call */
125static struct spi_board_info spi_board_info[] __initdata = {
126 {
127 .modalias = "max7301",
128 .platform_data = &max7301_info,
129 .max_speed_hz = 13000000,
130 .bus_num = 1,
131 .chip_select = 0,
132 .mode = SPI_MODE_0,
133 },
134};
135
136/*
137 * NOR flash
138 */
111static struct physmap_flash_data pcm027_flash_data = { 139static struct physmap_flash_data pcm027_flash_data = {
112 .width = 4, 140 .width = 4,
113}; 141};
@@ -190,6 +218,9 @@ static void __init pcm027_init(void)
190#ifdef CONFIG_MACH_PCM990_BASEBOARD 218#ifdef CONFIG_MACH_PCM990_BASEBOARD
191 pcm990_baseboard_init(); 219 pcm990_baseboard_init();
192#endif 220#endif
221
222 pxa2xx_set_spi_info(1, &pxa_ssp_master_info);
223 spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
193} 224}
194 225
195static void __init pcm027_map_io(void) 226static void __init pcm027_map_io(void)
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index 5d87c7c866e4..30023b00e476 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -33,14 +33,30 @@
33#include <asm/arch/camera.h> 33#include <asm/arch/camera.h>
34#include <asm/mach/map.h> 34#include <asm/mach/map.h>
35#include <asm/arch/pxa-regs.h> 35#include <asm/arch/pxa-regs.h>
36#include <asm/arch/pxa2xx-gpio.h>
37#include <asm/arch/audio.h> 36#include <asm/arch/audio.h>
38#include <asm/arch/mmc.h> 37#include <asm/arch/mmc.h>
39#include <asm/arch/ohci.h> 38#include <asm/arch/ohci.h>
40#include <asm/arch/pcm990_baseboard.h> 39#include <asm/arch/pcm990_baseboard.h>
41#include <asm/arch/pxafb.h> 40#include <asm/arch/pxafb.h>
41#include <asm/arch/mfp-pxa27x.h>
42 42
43#include "devices.h" 43#include "devices.h"
44#include "generic.h"
45
46static unsigned long pcm990_pin_config[] __initdata = {
47 /* MMC */
48 GPIO32_MMC_CLK,
49 GPIO112_MMC_CMD,
50 GPIO92_MMC_DAT_0,
51 GPIO109_MMC_DAT_1,
52 GPIO110_MMC_DAT_2,
53 GPIO111_MMC_DAT_3,
54 /* USB */
55 GPIO88_USBH1_PWR,
56 GPIO89_USBH1_PEN,
57 /* PWM0 */
58 GPIO16_PWM0_OUT,
59};
44 60
45/* 61/*
46 * pcm990_lcd_power - control power supply to the LCD 62 * pcm990_lcd_power - control power supply to the LCD
@@ -277,16 +293,6 @@ static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int,
277{ 293{
278 int err; 294 int err;
279 295
280 /*
281 * enable GPIO for PXA27x MMC controller
282 */
283 pxa_gpio_mode(GPIO32_MMCCLK_MD);
284 pxa_gpio_mode(GPIO112_MMCCMD_MD);
285 pxa_gpio_mode(GPIO92_MMCDAT0_MD);
286 pxa_gpio_mode(GPIO109_MMCDAT1_MD);
287 pxa_gpio_mode(GPIO110_MMCDAT2_MD);
288 pxa_gpio_mode(GPIO111_MMCDAT3_MD);
289
290 err = request_irq(PCM027_MMCDET_IRQ, mci_detect_int, IRQF_DISABLED, 296 err = request_irq(PCM027_MMCDET_IRQ, mci_detect_int, IRQF_DISABLED,
291 "MMC card detect", data); 297 "MMC card detect", data);
292 if (err) 298 if (err)
@@ -333,8 +339,6 @@ static struct pxamci_platform_data pcm990_mci_platform_data = {
333 */ 339 */
334static int pcm990_ohci_init(struct device *dev) 340static int pcm990_ohci_init(struct device *dev)
335{ 341{
336 pxa_gpio_mode(PCM990_USB_OVERCURRENT);
337 pxa_gpio_mode(PCM990_USB_PWR_EN);
338 /* 342 /*
339 * disable USB port 2 and 3 343 * disable USB port 2 and 3
340 * power sense is active low 344 * power sense is active low
@@ -361,23 +365,27 @@ static struct pxaohci_platform_data pcm990_ohci_platform_data = {
361 * PXA27x Camera specific stuff 365 * PXA27x Camera specific stuff
362 */ 366 */
363#if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE) 367#if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE)
368static unsigned long pcm990_camera_pin_config[] = {
369 /* CIF */
370 GPIO98_CIF_DD_0,
371 GPIO105_CIF_DD_1,
372 GPIO104_CIF_DD_2,
373 GPIO103_CIF_DD_3,
374 GPIO95_CIF_DD_4,
375 GPIO94_CIF_DD_5,
376 GPIO93_CIF_DD_6,
377 GPIO108_CIF_DD_7,
378 GPIO107_CIF_DD_8,
379 GPIO106_CIF_DD_9,
380 GPIO42_CIF_MCLK,
381 GPIO45_CIF_PCLK,
382 GPIO43_CIF_FV,
383 GPIO44_CIF_LV,
384};
385
364static int pcm990_pxacamera_init(struct device *dev) 386static int pcm990_pxacamera_init(struct device *dev)
365{ 387{
366 pxa_gpio_mode(GPIO98_CIF_DD_0_MD); 388 pxa2xx_mfp_config(ARRAY_AND_SIZE(pcm990_camera_pin_config));
367 pxa_gpio_mode(GPIO105_CIF_DD_1_MD);
368 pxa_gpio_mode(GPIO104_CIF_DD_2_MD);
369 pxa_gpio_mode(GPIO103_CIF_DD_3_MD);
370 pxa_gpio_mode(GPIO95_CIF_DD_4_MD);
371 pxa_gpio_mode(GPIO94_CIF_DD_5_MD);
372 pxa_gpio_mode(GPIO93_CIF_DD_6_MD);
373 pxa_gpio_mode(GPIO108_CIF_DD_7_MD);
374 pxa_gpio_mode(GPIO107_CIF_DD_8_MD);
375 pxa_gpio_mode(GPIO106_CIF_DD_9_MD);
376 pxa_gpio_mode(GPIO42_CIF_MCLK_MD);
377 pxa_gpio_mode(GPIO45_CIF_PCLK_MD);
378 pxa_gpio_mode(GPIO43_CIF_FV_MD);
379 pxa_gpio_mode(GPIO44_CIF_LV_MD);
380
381 return 0; 389 return 0;
382} 390}
383 391
@@ -449,8 +457,10 @@ static struct map_desc pcm990_io_desc[] __initdata = {
449 */ 457 */
450void __init pcm990_baseboard_init(void) 458void __init pcm990_baseboard_init(void)
451{ 459{
460 pxa2xx_mfp_config(ARRAY_AND_SIZE(pcm990_pin_config));
461
452 /* register CPLD access */ 462 /* register CPLD access */
453 iotable_init(pcm990_io_desc, ARRAY_SIZE(pcm990_io_desc)); 463 iotable_init(ARRAY_AND_SIZE(pcm990_io_desc));
454 464
455 /* register CPLD's IRQ controller */ 465 /* register CPLD's IRQ controller */
456 pcm990_init_irq(); 466 pcm990_init_irq();
@@ -458,7 +468,6 @@ void __init pcm990_baseboard_init(void)
458#ifndef CONFIG_PCM990_DISPLAY_NONE 468#ifndef CONFIG_PCM990_DISPLAY_NONE
459 set_pxa_fb_info(&pcm990_fbinfo); 469 set_pxa_fb_info(&pcm990_fbinfo);
460#endif 470#endif
461 pxa_gpio_mode(GPIO16_PWM0_MD);
462 platform_device_register(&pcm990_backlight_device); 471 platform_device_register(&pcm990_backlight_device);
463 472
464 /* MMC */ 473 /* MMC */
@@ -473,9 +482,8 @@ void __init pcm990_baseboard_init(void)
473#if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE) 482#if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE)
474 pxa_set_camera_info(&pcm990_pxacamera_platform_data); 483 pxa_set_camera_info(&pcm990_pxacamera_platform_data);
475 484
476 i2c_register_board_info(0, pcm990_i2c_devices, 485 i2c_register_board_info(0, ARRAY_AND_SIZE(pcm990_i2c_devices));
477 ARRAY_SIZE(pcm990_i2c_devices));
478#endif 486#endif
479 487
480 printk(KERN_INFO"PCM-990 Evaluation baseboard initialized\n"); 488 printk(KERN_INFO "PCM-990 Evaluation baseboard initialized\n");
481} 489}
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index f81c10cafd48..39612cfa0b4d 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -267,6 +267,7 @@ static void poodle_irda_transceiver_mode(struct device *dev, int mode)
267 } else { 267 } else {
268 GPCR(POODLE_GPIO_IR_ON) = GPIO_bit(POODLE_GPIO_IR_ON); 268 GPCR(POODLE_GPIO_IR_ON) = GPIO_bit(POODLE_GPIO_IR_ON);
269 } 269 }
270 pxa2xx_transceiver_mode(dev, mode);
270} 271}
271 272
272static struct pxaficp_platform_data poodle_ficp_platform_data = { 273static struct pxaficp_platform_data poodle_ficp_platform_data = {
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index 4cd50e3005e9..c5b845b935bb 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -109,6 +109,52 @@ static const struct clkops clk_pxa25x_lcd_ops = {
109 .getrate = clk_pxa25x_lcd_getrate, 109 .getrate = clk_pxa25x_lcd_getrate,
110}; 110};
111 111
112static unsigned long gpio12_config_32k[] = {
113 GPIO12_32KHz,
114};
115
116static unsigned long gpio12_config_gpio[] = {
117 GPIO12_GPIO,
118};
119
120static void clk_gpio12_enable(struct clk *clk)
121{
122 pxa2xx_mfp_config(gpio12_config_32k, 1);
123}
124
125static void clk_gpio12_disable(struct clk *clk)
126{
127 pxa2xx_mfp_config(gpio12_config_gpio, 1);
128}
129
130static const struct clkops clk_pxa25x_gpio12_ops = {
131 .enable = clk_gpio12_enable,
132 .disable = clk_gpio12_disable,
133};
134
135static unsigned long gpio11_config_3m6[] = {
136 GPIO11_3_6MHz,
137};
138
139static unsigned long gpio11_config_gpio[] = {
140 GPIO11_GPIO,
141};
142
143static void clk_gpio11_enable(struct clk *clk)
144{
145 pxa2xx_mfp_config(gpio11_config_3m6, 1);
146}
147
148static void clk_gpio11_disable(struct clk *clk)
149{
150 pxa2xx_mfp_config(gpio11_config_gpio, 1);
151}
152
153static const struct clkops clk_pxa25x_gpio11_ops = {
154 .enable = clk_gpio11_enable,
155 .disable = clk_gpio11_disable,
156};
157
112/* 158/*
113 * 3.6864MHz -> OST, GPIO, SSP, PWM, PLLs (95.842MHz, 147.456MHz) 159 * 3.6864MHz -> OST, GPIO, SSP, PWM, PLLs (95.842MHz, 147.456MHz)
114 * 95.842MHz -> MMC 19.169MHz, I2C 31.949MHz, FICP 47.923MHz, USB 47.923MHz 160 * 95.842MHz -> MMC 19.169MHz, I2C 31.949MHz, FICP 47.923MHz, USB 47.923MHz
@@ -128,6 +174,8 @@ static struct clk pxa25x_clks[] = {
128 INIT_CKEN("UARTCLK", BTUART, 14745600, 1, &pxa_device_btuart.dev), 174 INIT_CKEN("UARTCLK", BTUART, 14745600, 1, &pxa_device_btuart.dev),
129 INIT_CKEN("UARTCLK", STUART, 14745600, 1, NULL), 175 INIT_CKEN("UARTCLK", STUART, 14745600, 1, NULL),
130 INIT_CKEN("UDCCLK", USB, 47923000, 5, &pxa25x_device_udc.dev), 176 INIT_CKEN("UDCCLK", USB, 47923000, 5, &pxa25x_device_udc.dev),
177 INIT_CLK("GPIO11_CLK", &clk_pxa25x_gpio11_ops, 3686400, 0, NULL),
178 INIT_CLK("GPIO12_CLK", &clk_pxa25x_gpio12_ops, 32768, 0, NULL),
131 INIT_CKEN("MMCCLK", MMC, 19169000, 0, &pxa_device_mci.dev), 179 INIT_CKEN("MMCCLK", MMC, 19169000, 0, &pxa_device_mci.dev),
132 INIT_CKEN("I2CCLK", I2C, 31949000, 0, &pxa_device_i2c.dev), 180 INIT_CKEN("I2CCLK", I2C, 31949000, 0, &pxa_device_i2c.dev),
133 181
@@ -145,7 +193,10 @@ static struct clk pxa25x_clks[] = {
145 INIT_CKEN("FICPCLK", FICP, 47923000, 0, NULL), 193 INIT_CKEN("FICPCLK", FICP, 47923000, 0, NULL),
146}; 194};
147 195
148static struct clk gpio7_clk = INIT_CKOTHER("GPIO7_CK", &pxa25x_clks[4], NULL); 196static struct clk pxa2xx_clk_aliases[] = {
197 INIT_CKOTHER("GPIO7_CLK", &pxa25x_clks[4], NULL),
198 INIT_CKOTHER("SA1111_CLK", &pxa25x_clks[5], NULL),
199};
149 200
150#ifdef CONFIG_PM 201#ifdef CONFIG_PM
151 202
@@ -293,7 +344,7 @@ static int __init pxa25x_init(void)
293 int i, ret = 0; 344 int i, ret = 0;
294 345
295 /* Only add HWUART for PXA255/26x; PXA210/250/27x do not have it. */ 346 /* Only add HWUART for PXA255/26x; PXA210/250/27x do not have it. */
296 if (cpu_is_pxa25x()) 347 if (cpu_is_pxa255())
297 clks_register(&pxa25x_hwuart_clk, 1); 348 clks_register(&pxa25x_hwuart_clk, 1);
298 349
299 if (cpu_is_pxa21x() || cpu_is_pxa25x()) { 350 if (cpu_is_pxa21x() || cpu_is_pxa25x()) {
@@ -317,10 +368,10 @@ static int __init pxa25x_init(void)
317 } 368 }
318 369
319 /* Only add HWUART for PXA255/26x; PXA210/250/27x do not have it. */ 370 /* Only add HWUART for PXA255/26x; PXA210/250/27x do not have it. */
320 if (cpu_is_pxa25x()) 371 if (cpu_is_pxa255())
321 ret = platform_device_register(&pxa_device_hwuart); 372 ret = platform_device_register(&pxa_device_hwuart);
322 373
323 clks_register(&gpio7_clk, 1); 374 clks_register(pxa2xx_clk_aliases, ARRAY_SIZE(pxa2xx_clk_aliases));
324 375
325 return ret; 376 return ret;
326} 377}
diff --git a/arch/arm/mach-pxa/pxa300.c b/arch/arm/mach-pxa/pxa300.c
index 0a0d3877f212..da92e9733886 100644
--- a/arch/arm/mach-pxa/pxa300.c
+++ b/arch/arm/mach-pxa/pxa300.c
@@ -15,10 +15,16 @@
15 15
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/platform_device.h>
18 19
19#include <asm/hardware.h> 20#include <asm/hardware.h>
21#include <asm/arch/pxa3xx-regs.h>
20#include <asm/arch/mfp-pxa300.h> 22#include <asm/arch/mfp-pxa300.h>
21 23
24#include "generic.h"
25#include "devices.h"
26#include "clock.h"
27
22static struct pxa3xx_mfp_addr_map pxa300_mfp_addr_map[] __initdata = { 28static struct pxa3xx_mfp_addr_map pxa300_mfp_addr_map[] __initdata = {
23 29
24 MFP_ADDR_X(GPIO0, GPIO2, 0x00b4), 30 MFP_ADDR_X(GPIO0, GPIO2, 0x00b4),
@@ -79,15 +85,26 @@ static struct pxa3xx_mfp_addr_map pxa310_mfp_addr_map[] __initdata = {
79 MFP_ADDR_END, 85 MFP_ADDR_END,
80}; 86};
81 87
88static struct clk common_clks[] = {
89 PXA3xx_CKEN("NANDCLK", NAND, 156000000, 0, &pxa3xx_device_nand.dev),
90};
91
92static struct clk pxa310_clks[] = {
93 PXA3xx_CKEN("MMCCLK", MMC3, 19500000, 0, &pxa3xx_device_mci3.dev),
94};
95
82static int __init pxa300_init(void) 96static int __init pxa300_init(void)
83{ 97{
84 if (cpu_is_pxa300() || cpu_is_pxa310()) { 98 if (cpu_is_pxa300() || cpu_is_pxa310()) {
85 pxa3xx_init_mfp(); 99 pxa3xx_init_mfp();
86 pxa3xx_mfp_init_addr(pxa300_mfp_addr_map); 100 pxa3xx_mfp_init_addr(pxa300_mfp_addr_map);
101 clks_register(ARRAY_AND_SIZE(common_clks));
87 } 102 }
88 103
89 if (cpu_is_pxa310()) 104 if (cpu_is_pxa310()) {
90 pxa3xx_mfp_init_addr(pxa310_mfp_addr_map); 105 pxa3xx_mfp_init_addr(pxa310_mfp_addr_map);
106 clks_register(ARRAY_AND_SIZE(pxa310_clks));
107 }
91 108
92 return 0; 109 return 0;
93} 110}
diff --git a/arch/arm/mach-pxa/pxa320.c b/arch/arm/mach-pxa/pxa320.c
index 74128eb8f8d0..c557c23a1efe 100644
--- a/arch/arm/mach-pxa/pxa320.c
+++ b/arch/arm/mach-pxa/pxa320.c
@@ -15,11 +15,17 @@
15 15
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/platform_device.h>
18 19
19#include <asm/hardware.h> 20#include <asm/hardware.h>
20#include <asm/arch/mfp.h> 21#include <asm/arch/mfp.h>
22#include <asm/arch/pxa3xx-regs.h>
21#include <asm/arch/mfp-pxa320.h> 23#include <asm/arch/mfp-pxa320.h>
22 24
25#include "generic.h"
26#include "devices.h"
27#include "clock.h"
28
23static struct pxa3xx_mfp_addr_map pxa320_mfp_addr_map[] __initdata = { 29static struct pxa3xx_mfp_addr_map pxa320_mfp_addr_map[] __initdata = {
24 30
25 MFP_ADDR_X(GPIO0, GPIO4, 0x0124), 31 MFP_ADDR_X(GPIO0, GPIO4, 0x0124),
@@ -74,16 +80,17 @@ static struct pxa3xx_mfp_addr_map pxa320_mfp_addr_map[] __initdata = {
74 MFP_ADDR_END, 80 MFP_ADDR_END,
75}; 81};
76 82
77static void __init pxa320_init_mfp(void) 83static struct clk pxa320_clks[] = {
78{ 84 PXA3xx_CKEN("NANDCLK", NAND, 104000000, 0, &pxa3xx_device_nand.dev),
79 pxa3xx_init_mfp(); 85};
80 pxa3xx_mfp_init_addr(pxa320_mfp_addr_map);
81}
82 86
83static int __init pxa320_init(void) 87static int __init pxa320_init(void)
84{ 88{
85 if (cpu_is_pxa320()) 89 if (cpu_is_pxa320()) {
86 pxa320_init_mfp(); 90 pxa3xx_init_mfp();
91 pxa3xx_mfp_init_addr(pxa320_mfp_addr_map);
92 clks_register(ARRAY_AND_SIZE(pxa320_clks));
93 }
87 94
88 return 0; 95 return 0;
89} 96}
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 15685d2b8f8c..f491025a0c82 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -144,7 +144,7 @@ static unsigned long clk_pxa3xx_hsio_getrate(struct clk *clk)
144 return hsio_clk; 144 return hsio_clk;
145} 145}
146 146
147static void clk_pxa3xx_cken_enable(struct clk *clk) 147void clk_pxa3xx_cken_enable(struct clk *clk)
148{ 148{
149 unsigned long mask = 1ul << (clk->cken & 0x1f); 149 unsigned long mask = 1ul << (clk->cken & 0x1f);
150 150
@@ -154,7 +154,7 @@ static void clk_pxa3xx_cken_enable(struct clk *clk)
154 CKENB |= mask; 154 CKENB |= mask;
155} 155}
156 156
157static void clk_pxa3xx_cken_disable(struct clk *clk) 157void clk_pxa3xx_cken_disable(struct clk *clk)
158{ 158{
159 unsigned long mask = 1ul << (clk->cken & 0x1f); 159 unsigned long mask = 1ul << (clk->cken & 0x1f);
160 160
@@ -164,7 +164,7 @@ static void clk_pxa3xx_cken_disable(struct clk *clk)
164 CKENB &= ~mask; 164 CKENB &= ~mask;
165} 165}
166 166
167static const struct clkops clk_pxa3xx_cken_ops = { 167const struct clkops clk_pxa3xx_cken_ops = {
168 .enable = clk_pxa3xx_cken_enable, 168 .enable = clk_pxa3xx_cken_enable,
169 .disable = clk_pxa3xx_cken_disable, 169 .disable = clk_pxa3xx_cken_disable,
170}; 170};
@@ -196,24 +196,6 @@ static const struct clkops clk_pout_ops = {
196 .disable = clk_pout_disable, 196 .disable = clk_pout_disable,
197}; 197};
198 198
199#define PXA3xx_CKEN(_name, _cken, _rate, _delay, _dev) \
200 { \
201 .name = _name, \
202 .dev = _dev, \
203 .ops = &clk_pxa3xx_cken_ops, \
204 .rate = _rate, \
205 .cken = CKEN_##_cken, \
206 .delay = _delay, \
207 }
208
209#define PXA3xx_CK(_name, _cken, _ops, _dev) \
210 { \
211 .name = _name, \
212 .dev = _dev, \
213 .ops = _ops, \
214 .cken = CKEN_##_cken, \
215 }
216
217static struct clk pxa3xx_clks[] = { 199static struct clk pxa3xx_clks[] = {
218 { 200 {
219 .name = "CLK_POUT", 201 .name = "CLK_POUT",
@@ -244,7 +226,6 @@ static struct clk pxa3xx_clks[] = {
244 226
245 PXA3xx_CKEN("MMCCLK", MMC1, 19500000, 0, &pxa_device_mci.dev), 227 PXA3xx_CKEN("MMCCLK", MMC1, 19500000, 0, &pxa_device_mci.dev),
246 PXA3xx_CKEN("MMCCLK", MMC2, 19500000, 0, &pxa3xx_device_mci2.dev), 228 PXA3xx_CKEN("MMCCLK", MMC2, 19500000, 0, &pxa3xx_device_mci2.dev),
247 PXA3xx_CKEN("MMCCLK", MMC3, 19500000, 0, &pxa3xx_device_mci3.dev),
248}; 229};
249 230
250#ifdef CONFIG_PM 231#ifdef CONFIG_PM
diff --git a/arch/arm/mach-pxa/pxa930.c b/arch/arm/mach-pxa/pxa930.c
new file mode 100644
index 000000000000..9503897d049c
--- /dev/null
+++ b/arch/arm/mach-pxa/pxa930.c
@@ -0,0 +1,190 @@
1/*
2 * linux/arch/arm/mach-pxa/pxa930.c
3 *
4 * Code specific to PXA930
5 *
6 * Copyright (C) 2007-2008 Marvell Internation Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/platform_device.h>
16#include <linux/irq.h>
17#include <linux/dma-mapping.h>
18
19#include <asm/hardware.h>
20#include <asm/arch/mfp-pxa930.h>
21
22static struct pxa3xx_mfp_addr_map pxa930_mfp_addr_map[] __initdata = {
23
24 MFP_ADDR(GPIO0, 0x02e0),
25 MFP_ADDR(GPIO1, 0x02dc),
26 MFP_ADDR(GPIO2, 0x02e8),
27 MFP_ADDR(GPIO3, 0x02d8),
28 MFP_ADDR(GPIO4, 0x02e4),
29 MFP_ADDR(GPIO5, 0x02ec),
30 MFP_ADDR(GPIO6, 0x02f8),
31 MFP_ADDR(GPIO7, 0x02fc),
32 MFP_ADDR(GPIO8, 0x0300),
33 MFP_ADDR(GPIO9, 0x02d4),
34 MFP_ADDR(GPIO10, 0x02f4),
35 MFP_ADDR(GPIO11, 0x02f0),
36 MFP_ADDR(GPIO12, 0x0304),
37 MFP_ADDR(GPIO13, 0x0310),
38 MFP_ADDR(GPIO14, 0x0308),
39 MFP_ADDR(GPIO15, 0x030c),
40 MFP_ADDR(GPIO16, 0x04e8),
41 MFP_ADDR(GPIO17, 0x04f4),
42 MFP_ADDR(GPIO18, 0x04f8),
43 MFP_ADDR(GPIO19, 0x04fc),
44 MFP_ADDR(GPIO20, 0x0518),
45 MFP_ADDR(GPIO21, 0x051c),
46 MFP_ADDR(GPIO22, 0x04ec),
47 MFP_ADDR(GPIO23, 0x0500),
48 MFP_ADDR(GPIO24, 0x04f0),
49 MFP_ADDR(GPIO25, 0x0504),
50 MFP_ADDR(GPIO26, 0x0510),
51 MFP_ADDR(GPIO27, 0x0514),
52 MFP_ADDR(GPIO28, 0x0520),
53 MFP_ADDR(GPIO29, 0x0600),
54 MFP_ADDR(GPIO30, 0x0618),
55 MFP_ADDR(GPIO31, 0x0610),
56 MFP_ADDR(GPIO32, 0x060c),
57 MFP_ADDR(GPIO33, 0x061c),
58 MFP_ADDR(GPIO34, 0x0620),
59 MFP_ADDR(GPIO35, 0x0628),
60 MFP_ADDR(GPIO36, 0x062c),
61 MFP_ADDR(GPIO37, 0x0630),
62 MFP_ADDR(GPIO38, 0x0634),
63 MFP_ADDR(GPIO39, 0x0638),
64 MFP_ADDR(GPIO40, 0x063c),
65 MFP_ADDR(GPIO41, 0x0614),
66 MFP_ADDR(GPIO42, 0x0624),
67 MFP_ADDR(GPIO43, 0x0608),
68 MFP_ADDR(GPIO44, 0x0604),
69 MFP_ADDR(GPIO45, 0x050c),
70 MFP_ADDR(GPIO46, 0x0508),
71 MFP_ADDR(GPIO47, 0x02bc),
72 MFP_ADDR(GPIO48, 0x02b4),
73 MFP_ADDR(GPIO49, 0x02b8),
74 MFP_ADDR(GPIO50, 0x02c8),
75 MFP_ADDR(GPIO51, 0x02c0),
76 MFP_ADDR(GPIO52, 0x02c4),
77 MFP_ADDR(GPIO53, 0x02d0),
78 MFP_ADDR(GPIO54, 0x02cc),
79 MFP_ADDR(GPIO55, 0x029c),
80 MFP_ADDR(GPIO56, 0x02a0),
81 MFP_ADDR(GPIO57, 0x0294),
82 MFP_ADDR(GPIO58, 0x0298),
83 MFP_ADDR(GPIO59, 0x02a4),
84 MFP_ADDR(GPIO60, 0x02a8),
85 MFP_ADDR(GPIO61, 0x02b0),
86 MFP_ADDR(GPIO62, 0x02ac),
87 MFP_ADDR(GPIO63, 0x0640),
88 MFP_ADDR(GPIO64, 0x065c),
89 MFP_ADDR(GPIO65, 0x0648),
90 MFP_ADDR(GPIO66, 0x0644),
91 MFP_ADDR(GPIO67, 0x0674),
92 MFP_ADDR(GPIO68, 0x0658),
93 MFP_ADDR(GPIO69, 0x0654),
94 MFP_ADDR(GPIO70, 0x0660),
95 MFP_ADDR(GPIO71, 0x0668),
96 MFP_ADDR(GPIO72, 0x0664),
97 MFP_ADDR(GPIO73, 0x0650),
98 MFP_ADDR(GPIO74, 0x066c),
99 MFP_ADDR(GPIO75, 0x064c),
100 MFP_ADDR(GPIO76, 0x0670),
101 MFP_ADDR(GPIO77, 0x0678),
102 MFP_ADDR(GPIO78, 0x067c),
103 MFP_ADDR(GPIO79, 0x0694),
104 MFP_ADDR(GPIO80, 0x069c),
105 MFP_ADDR(GPIO81, 0x06a0),
106 MFP_ADDR(GPIO82, 0x06a4),
107 MFP_ADDR(GPIO83, 0x0698),
108 MFP_ADDR(GPIO84, 0x06bc),
109 MFP_ADDR(GPIO85, 0x06b4),
110 MFP_ADDR(GPIO86, 0x06b0),
111 MFP_ADDR(GPIO87, 0x06c0),
112 MFP_ADDR(GPIO88, 0x06c4),
113 MFP_ADDR(GPIO89, 0x06ac),
114 MFP_ADDR(GPIO90, 0x0680),
115 MFP_ADDR(GPIO91, 0x0684),
116 MFP_ADDR(GPIO92, 0x0688),
117 MFP_ADDR(GPIO93, 0x0690),
118 MFP_ADDR(GPIO94, 0x068c),
119 MFP_ADDR(GPIO95, 0x06a8),
120 MFP_ADDR(GPIO96, 0x06b8),
121 MFP_ADDR(GPIO97, 0x0410),
122 MFP_ADDR(GPIO98, 0x0418),
123 MFP_ADDR(GPIO99, 0x041c),
124 MFP_ADDR(GPIO100, 0x0414),
125 MFP_ADDR(GPIO101, 0x0408),
126 MFP_ADDR(GPIO102, 0x0324),
127 MFP_ADDR(GPIO103, 0x040c),
128 MFP_ADDR(GPIO104, 0x0400),
129 MFP_ADDR(GPIO105, 0x0328),
130 MFP_ADDR(GPIO106, 0x0404),
131
132 MFP_ADDR(nXCVREN, 0x0204),
133 MFP_ADDR(DF_CLE_nOE, 0x020c),
134 MFP_ADDR(DF_nADV1_ALE, 0x0218),
135 MFP_ADDR(DF_SCLK_E, 0x0214),
136 MFP_ADDR(DF_SCLK_S, 0x0210),
137 MFP_ADDR(nBE0, 0x021c),
138 MFP_ADDR(nBE1, 0x0220),
139 MFP_ADDR(DF_nADV2_ALE, 0x0224),
140 MFP_ADDR(DF_INT_RnB, 0x0228),
141 MFP_ADDR(DF_nCS0, 0x022c),
142 MFP_ADDR(DF_nCS1, 0x0230),
143 MFP_ADDR(nLUA, 0x0254),
144 MFP_ADDR(nLLA, 0x0258),
145 MFP_ADDR(DF_nWE, 0x0234),
146 MFP_ADDR(DF_nRE_nOE, 0x0238),
147 MFP_ADDR(DF_ADDR0, 0x024c),
148 MFP_ADDR(DF_ADDR1, 0x0250),
149 MFP_ADDR(DF_ADDR2, 0x025c),
150 MFP_ADDR(DF_ADDR3, 0x0260),
151 MFP_ADDR(DF_IO0, 0x023c),
152 MFP_ADDR(DF_IO1, 0x0240),
153 MFP_ADDR(DF_IO2, 0x0244),
154 MFP_ADDR(DF_IO3, 0x0248),
155 MFP_ADDR(DF_IO4, 0x0264),
156 MFP_ADDR(DF_IO5, 0x0268),
157 MFP_ADDR(DF_IO6, 0x026c),
158 MFP_ADDR(DF_IO7, 0x0270),
159 MFP_ADDR(DF_IO8, 0x0274),
160 MFP_ADDR(DF_IO9, 0x0278),
161 MFP_ADDR(DF_IO10, 0x027c),
162 MFP_ADDR(DF_IO11, 0x0280),
163 MFP_ADDR(DF_IO12, 0x0284),
164 MFP_ADDR(DF_IO13, 0x0288),
165 MFP_ADDR(DF_IO14, 0x028c),
166 MFP_ADDR(DF_IO15, 0x0290),
167
168 MFP_ADDR(GSIM_UIO, 0x0314),
169 MFP_ADDR(GSIM_UCLK, 0x0318),
170 MFP_ADDR(GSIM_UDET, 0x031c),
171 MFP_ADDR(GSIM_nURST, 0x0320),
172
173 MFP_ADDR(PMIC_INT, 0x06c8),
174
175 MFP_ADDR(RDY, 0x0200),
176
177 MFP_ADDR_END,
178};
179
180static int __init pxa930_init(void)
181{
182 if (cpu_is_pxa930()) {
183 pxa3xx_init_mfp();
184 pxa3xx_mfp_init_addr(pxa930_mfp_addr_map);
185 }
186
187 return 0;
188}
189
190core_initcall(pxa930_init);
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
new file mode 100644
index 000000000000..9d39dea57ce2
--- /dev/null
+++ b/arch/arm/mach-pxa/reset.c
@@ -0,0 +1,96 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/delay.h>
9#include <linux/gpio.h>
10#include <asm/io.h>
11#include <asm/proc-fns.h>
12
13#include <asm/arch/pxa-regs.h>
14#include <asm/arch/pxa2xx-regs.h>
15
16static void do_hw_reset(void);
17
18static int reset_gpio = -1;
19
20int init_gpio_reset(int gpio)
21{
22 int rc;
23
24 rc = gpio_request(gpio, "reset generator");
25 if (rc) {
26 printk(KERN_ERR "Can't request reset_gpio\n");
27 goto out;
28 }
29
30 rc = gpio_direction_input(gpio);
31 if (rc) {
32 printk(KERN_ERR "Can't configure reset_gpio for input\n");
33 gpio_free(gpio);
34 goto out;
35 }
36
37out:
38 if (!rc)
39 reset_gpio = gpio;
40
41 return rc;
42}
43
44/*
45 * Trigger GPIO reset.
46 * This covers various types of logic connecting gpio pin
47 * to RESET pins (nRESET or GPIO_RESET):
48 */
49static void do_gpio_reset(void)
50{
51 BUG_ON(reset_gpio == -1);
52
53 /* drive it low */
54 gpio_direction_output(reset_gpio, 0);
55 mdelay(2);
56 /* rising edge or drive high */
57 gpio_set_value(reset_gpio, 1);
58 mdelay(2);
59 /* falling edge */
60 gpio_set_value(reset_gpio, 0);
61
62 /* give it some time */
63 mdelay(10);
64
65 WARN_ON(1);
66 /* fallback */
67 do_hw_reset();
68}
69
70static void do_hw_reset(void)
71{
72 /* Initialize the watchdog and let it fire */
73 OWER = OWER_WME;
74 OSSR = OSSR_M3;
75 OSMR3 = OSCR + 368640; /* ... in 100 ms */
76}
77
78void arch_reset(char mode)
79{
80 if (cpu_is_pxa2xx())
81 RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR;
82
83 switch (mode) {
84 case 's':
85 /* Jump into ROM at address 0 */
86 cpu_reset(0);
87 break;
88 case 'h':
89 do_hw_reset();
90 break;
91 case 'g':
92 do_gpio_reset();
93 break;
94 }
95}
96
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
new file mode 100644
index 000000000000..d02bc6f8bb93
--- /dev/null
+++ b/arch/arm/mach-pxa/saar.c
@@ -0,0 +1,84 @@
1/*
2 * linux/arch/arm/mach-pxa/saar.c
3 *
4 * Support for the Marvell PXA930 Handheld Platform (aka SAAR)
5 *
6 * Copyright (C) 2007-2008 Marvell International Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * publishhed by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/platform_device.h>
18#include <linux/clk.h>
19#include <linux/gpio.h>
20#include <linux/smc91x.h>
21
22#include <asm/mach-types.h>
23#include <asm/mach/arch.h>
24#include <asm/hardware.h>
25#include <asm/arch/pxa3xx-regs.h>
26#include <asm/arch/mfp-pxa930.h>
27
28#include "devices.h"
29#include "generic.h"
30
31/* SAAR MFP configurations */
32static mfp_cfg_t saar_mfp_cfg[] __initdata = {
33 /* Ethernet */
34 DF_nCS1_nCS3,
35 GPIO97_GPIO,
36};
37
38#define SAAR_ETH_PHYS (0x14000000)
39
40static struct resource smc91x_resources[] = {
41 [0] = {
42 .start = (SAAR_ETH_PHYS + 0x300),
43 .end = (SAAR_ETH_PHYS + 0xfffff),
44 .flags = IORESOURCE_MEM,
45 },
46 [1] = {
47 .start = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO97)),
48 .end = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO97)),
49 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
50 }
51};
52
53static struct smc91x_platdata saar_smc91x_info = {
54 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT | SMC91X_USE_DMA,
55};
56
57static struct platform_device smc91x_device = {
58 .name = "smc91x",
59 .id = 0,
60 .num_resources = ARRAY_SIZE(smc91x_resources),
61 .resource = smc91x_resources,
62 .dev = {
63 .platform_data = &saar_smc91x_info,
64 },
65};
66
67static void __init saar_init(void)
68{
69 /* initialize MFP configurations */
70 pxa3xx_mfp_config(ARRAY_AND_SIZE(saar_mfp_cfg));
71
72 platform_device_register(&smc91x_device);
73}
74
75MACHINE_START(SAAR, "PXA930 Handheld Platform (aka SAAR)")
76 /* Maintainer: Eric Miao <eric.miao@marvell.com> */
77 .phys_io = 0x40000000,
78 .boot_params = 0xa0000100,
79 .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
80 .map_io = pxa_map_io,
81 .init_irq = pxa3xx_init_irq,
82 .timer = &pxa_timer,
83 .init_machine = saar_init,
84MACHINE_END
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index e7d0fcd9b43f..762249c03ded 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -38,6 +38,7 @@
38#include <asm/arch/pxa-regs.h> 38#include <asm/arch/pxa-regs.h>
39#include <asm/arch/pxa2xx-regs.h> 39#include <asm/arch/pxa2xx-regs.h>
40#include <asm/arch/pxa2xx-gpio.h> 40#include <asm/arch/pxa2xx-gpio.h>
41#include <asm/arch/pxa27x-udc.h>
41#include <asm/arch/irda.h> 42#include <asm/arch/irda.h>
42#include <asm/arch/mmc.h> 43#include <asm/arch/mmc.h>
43#include <asm/arch/ohci.h> 44#include <asm/arch/ohci.h>
@@ -450,6 +451,7 @@ static void spitz_irda_transceiver_mode(struct device *dev, int mode)
450 set_scoop_gpio(&spitzscoop2_device.dev, SPITZ_SCP2_IR_ON); 451 set_scoop_gpio(&spitzscoop2_device.dev, SPITZ_SCP2_IR_ON);
451 else 452 else
452 reset_scoop_gpio(&spitzscoop2_device.dev, SPITZ_SCP2_IR_ON); 453 reset_scoop_gpio(&spitzscoop2_device.dev, SPITZ_SCP2_IR_ON);
454 pxa2xx_transceiver_mode(dev, mode);
453} 455}
454 456
455#ifdef CONFIG_MACH_AKITA 457#ifdef CONFIG_MACH_AKITA
@@ -459,6 +461,7 @@ static void akita_irda_transceiver_mode(struct device *dev, int mode)
459 akita_set_ioexp(&akitaioexp_device.dev, AKITA_IOEXP_IR_ON); 461 akita_set_ioexp(&akitaioexp_device.dev, AKITA_IOEXP_IR_ON);
460 else 462 else
461 akita_reset_ioexp(&akitaioexp_device.dev, AKITA_IOEXP_IR_ON); 463 akita_reset_ioexp(&akitaioexp_device.dev, AKITA_IOEXP_IR_ON);
464 pxa2xx_transceiver_mode(dev, mode);
462} 465}
463#endif 466#endif
464 467
@@ -529,11 +532,7 @@ static struct platform_device *devices[] __initdata = {
529 532
530static void spitz_poweroff(void) 533static void spitz_poweroff(void)
531{ 534{
532 pxa_gpio_mode(SPITZ_GPIO_ON_RESET | GPIO_OUT); 535 arm_machine_restart('g');
533 GPSR(SPITZ_GPIO_ON_RESET) = GPIO_bit(SPITZ_GPIO_ON_RESET);
534
535 mdelay(1000);
536 arm_machine_restart('h');
537} 536}
538 537
539static void spitz_restart(char mode) 538static void spitz_restart(char mode)
@@ -547,6 +546,7 @@ static void spitz_restart(char mode)
547 546
548static void __init common_init(void) 547static void __init common_init(void)
549{ 548{
549 init_gpio_reset(SPITZ_GPIO_ON_RESET);
550 pm_power_off = spitz_poweroff; 550 pm_power_off = spitz_poweroff;
551 arm_pm_restart = spitz_restart; 551 arm_pm_restart = spitz_restart;
552 552
diff --git a/arch/arm/mach-pxa/ssp.c b/arch/arm/mach-pxa/ssp.c
index 0bb31982fb6f..89f38683787e 100644
--- a/arch/arm/mach-pxa/ssp.c
+++ b/arch/arm/mach-pxa/ssp.c
@@ -14,13 +14,6 @@
14 * IO-based SSP applications and allows easy port setup for DMA access. 14 * IO-based SSP applications and allows easy port setup for DMA access.
15 * 15 *
16 * Author: Liam Girdwood <liam.girdwood@wolfsonmicro.com> 16 * Author: Liam Girdwood <liam.girdwood@wolfsonmicro.com>
17 *
18 * Revision history:
19 * 22nd Aug 2003 Initial version.
20 * 20th Dec 2004 Added ssp_config for changing port config without
21 * closing the port.
22 * 4th Aug 2005 Added option to disable irq handler registration and
23 * cleaned up irq and clock detection.
24 */ 17 */
25 18
26#include <linux/module.h> 19#include <linux/module.h>
@@ -285,7 +278,7 @@ int ssp_init(struct ssp_dev *dev, u32 port, u32 init_flags)
285 goto out_region; 278 goto out_region;
286 dev->irq = ssp->irq; 279 dev->irq = ssp->irq;
287 } else 280 } else
288 dev->irq = 0; 281 dev->irq = NO_IRQ;
289 282
290 /* turn on SSP port clock */ 283 /* turn on SSP port clock */
291 clk_enable(ssp->clk); 284 clk_enable(ssp->clk);
@@ -306,7 +299,8 @@ void ssp_exit(struct ssp_dev *dev)
306 struct ssp_device *ssp = dev->ssp; 299 struct ssp_device *ssp = dev->ssp;
307 300
308 ssp_disable(dev); 301 ssp_disable(dev);
309 free_irq(dev->irq, dev); 302 if (dev->irq != NO_IRQ)
303 free_irq(dev->irq, dev);
310 clk_disable(ssp->clk); 304 clk_disable(ssp->clk);
311 ssp_free(ssp); 305 ssp_free(ssp);
312} 306}
@@ -360,6 +354,7 @@ static int __devinit ssp_probe(struct platform_device *pdev, int type)
360 dev_err(&pdev->dev, "failed to allocate memory"); 354 dev_err(&pdev->dev, "failed to allocate memory");
361 return -ENOMEM; 355 return -ENOMEM;
362 } 356 }
357 ssp->pdev = pdev;
363 358
364 ssp->clk = clk_get(&pdev->dev, "SSPCLK"); 359 ssp->clk = clk_get(&pdev->dev, "SSPCLK");
365 if (IS_ERR(ssp->clk)) { 360 if (IS_ERR(ssp->clk)) {
diff --git a/arch/arm/mach-pxa/tavorevb.c b/arch/arm/mach-pxa/tavorevb.c
new file mode 100644
index 000000000000..ac283507e423
--- /dev/null
+++ b/arch/arm/mach-pxa/tavorevb.c
@@ -0,0 +1,84 @@
1/*
2 * linux/arch/arm/mach-pxa/tavorevb.c
3 *
4 * Support for the Marvell PXA930 Evaluation Board
5 *
6 * Copyright (C) 2007-2008 Marvell International Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * publishhed by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/platform_device.h>
18#include <linux/clk.h>
19#include <linux/gpio.h>
20#include <linux/smc91x.h>
21
22#include <asm/mach-types.h>
23#include <asm/mach/arch.h>
24#include <asm/hardware.h>
25#include <asm/arch/pxa3xx-regs.h>
26#include <asm/arch/mfp-pxa930.h>
27
28#include "devices.h"
29#include "generic.h"
30
31/* Tavor EVB MFP configurations */
32static mfp_cfg_t tavorevb_mfp_cfg[] __initdata = {
33 /* Ethernet */
34 DF_nCS1_nCS3,
35 GPIO47_GPIO,
36};
37
38#define TAVOREVB_ETH_PHYS (0x14000000)
39
40static struct resource smc91x_resources[] = {
41 [0] = {
42 .start = (TAVOREVB_ETH_PHYS + 0x300),
43 .end = (TAVOREVB_ETH_PHYS + 0xfffff),
44 .flags = IORESOURCE_MEM,
45 },
46 [1] = {
47 .start = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO47)),
48 .end = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO47)),
49 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
50 }
51};
52
53static struct smc91x_platdata tavorevb_smc91x_info = {
54 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT | SMC91X_USE_DMA,
55};
56
57static struct platform_device smc91x_device = {
58 .name = "smc91x",
59 .id = 0,
60 .num_resources = ARRAY_SIZE(smc91x_resources),
61 .resource = smc91x_resources,
62 .dev = {
63 .platform_data = &tavorevb_smc91x_info,
64 },
65};
66
67static void __init tavorevb_init(void)
68{
69 /* initialize MFP configurations */
70 pxa3xx_mfp_config(ARRAY_AND_SIZE(tavorevb_mfp_cfg));
71
72 platform_device_register(&smc91x_device);
73}
74
75MACHINE_START(TAVOREVB, "PXA930 Evaluation Board (aka TavorEVB)")
76 /* Maintainer: Eric Miao <eric.miao@marvell.com> */
77 .phys_io = 0x40000000,
78 .boot_params = 0xa0000100,
79 .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
80 .map_io = pxa_map_io,
81 .init_irq = pxa3xx_init_irq,
82 .timer = &pxa_timer,
83 .init_machine = tavorevb_init,
84MACHINE_END
diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c
new file mode 100644
index 000000000000..7d8505466e54
--- /dev/null
+++ b/arch/arm/mach-pxa/tosa-bt.c
@@ -0,0 +1,150 @@
1/*
2 * Bluetooth built-in chip control
3 *
4 * Copyright (c) 2008 Dmitry Baryshkov
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/gpio.h>
16#include <linux/delay.h>
17#include <linux/rfkill.h>
18
19#include <asm/arch/tosa_bt.h>
20
21static void tosa_bt_on(struct tosa_bt_data *data)
22{
23 gpio_set_value(data->gpio_reset, 0);
24 gpio_set_value(data->gpio_pwr, 1);
25 gpio_set_value(data->gpio_reset, 1);
26 mdelay(20);
27 gpio_set_value(data->gpio_reset, 0);
28}
29
30static void tosa_bt_off(struct tosa_bt_data *data)
31{
32 gpio_set_value(data->gpio_reset, 1);
33 mdelay(10);
34 gpio_set_value(data->gpio_pwr, 0);
35 gpio_set_value(data->gpio_reset, 0);
36}
37
38static int tosa_bt_toggle_radio(void *data, enum rfkill_state state)
39{
40 pr_info("BT_RADIO going: %s\n",
41 state == RFKILL_STATE_ON ? "on" : "off");
42
43 if (state == RFKILL_STATE_ON) {
44 pr_info("TOSA_BT: going ON\n");
45 tosa_bt_on(data);
46 } else {
47 pr_info("TOSA_BT: going OFF\n");
48 tosa_bt_off(data);
49 }
50 return 0;
51}
52
53static int tosa_bt_probe(struct platform_device *dev)
54{
55 int rc;
56 struct rfkill *rfk;
57
58 struct tosa_bt_data *data = dev->dev.platform_data;
59
60 rc = gpio_request(data->gpio_reset, "Bluetooth reset");
61 if (rc)
62 goto err_reset;
63 rc = gpio_direction_output(data->gpio_reset, 0);
64 if (rc)
65 goto err_reset_dir;
66 rc = gpio_request(data->gpio_pwr, "Bluetooth power");
67 if (rc)
68 goto err_pwr;
69 rc = gpio_direction_output(data->gpio_pwr, 0);
70 if (rc)
71 goto err_pwr_dir;
72
73 rfk = rfkill_allocate(&dev->dev, RFKILL_TYPE_BLUETOOTH);
74 if (!rfk) {
75 rc = -ENOMEM;
76 goto err_rfk_alloc;
77 }
78
79 rfk->name = "tosa-bt";
80 rfk->toggle_radio = tosa_bt_toggle_radio;
81 rfk->data = data;
82#ifdef CONFIG_RFKILL_LEDS
83 rfk->led_trigger.name = "tosa-bt";
84#endif
85
86 rc = rfkill_register(rfk);
87 if (rc)
88 goto err_rfkill;
89
90 platform_set_drvdata(dev, rfk);
91
92 return 0;
93
94err_rfkill:
95 if (rfk)
96 rfkill_free(rfk);
97 rfk = NULL;
98err_rfk_alloc:
99 tosa_bt_off(data);
100err_pwr_dir:
101 gpio_free(data->gpio_pwr);
102err_pwr:
103err_reset_dir:
104 gpio_free(data->gpio_reset);
105err_reset:
106 return rc;
107}
108
109static int __devexit tosa_bt_remove(struct platform_device *dev)
110{
111 struct tosa_bt_data *data = dev->dev.platform_data;
112 struct rfkill *rfk = platform_get_drvdata(dev);
113
114 platform_set_drvdata(dev, NULL);
115
116 if (rfk)
117 rfkill_unregister(rfk);
118 rfk = NULL;
119
120 tosa_bt_off(data);
121
122 gpio_free(data->gpio_pwr);
123 gpio_free(data->gpio_reset);
124
125 return 0;
126}
127
128static struct platform_driver tosa_bt_driver = {
129 .probe = tosa_bt_probe,
130 .remove = __devexit_p(tosa_bt_remove),
131
132 .driver = {
133 .name = "tosa-bt",
134 .owner = THIS_MODULE,
135 },
136};
137
138
139static int __init tosa_bt_init(void)
140{
141 return platform_driver_register(&tosa_bt_driver);
142}
143
144static void __exit tosa_bt_exit(void)
145{
146 platform_driver_unregister(&tosa_bt_driver);
147}
148
149module_init(tosa_bt_init);
150module_exit(tosa_bt_exit);
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index ab4a9f579913..fea17ce6b55f 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -18,30 +18,31 @@
18#include <linux/major.h> 18#include <linux/major.h>
19#include <linux/fs.h> 19#include <linux/fs.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/delay.h>
22#include <linux/fb.h>
21#include <linux/mmc/host.h> 23#include <linux/mmc/host.h>
24#include <linux/mfd/tc6393xb.h>
25#include <linux/mfd/tmio.h>
26#include <linux/mtd/nand.h>
27#include <linux/mtd/partitions.h>
22#include <linux/pm.h> 28#include <linux/pm.h>
23#include <linux/delay.h>
24#include <linux/gpio_keys.h> 29#include <linux/gpio_keys.h>
25#include <linux/input.h> 30#include <linux/input.h>
26#include <linux/gpio.h> 31#include <linux/gpio.h>
32#include <linux/pda_power.h>
33#include <linux/rfkill.h>
27 34
28#include <asm/setup.h> 35#include <asm/setup.h>
29#include <asm/memory.h>
30#include <asm/mach-types.h> 36#include <asm/mach-types.h>
31#include <asm/hardware.h>
32#include <asm/irq.h>
33#include <asm/system.h>
34#include <asm/arch/pxa-regs.h>
35#include <asm/arch/pxa2xx-regs.h> 37#include <asm/arch/pxa2xx-regs.h>
36#include <asm/arch/mfp-pxa25x.h> 38#include <asm/arch/mfp-pxa25x.h>
37#include <asm/arch/irda.h> 39#include <asm/arch/irda.h>
38#include <asm/arch/i2c.h> 40#include <asm/arch/i2c.h>
39#include <asm/arch/mmc.h> 41#include <asm/arch/mmc.h>
40#include <asm/arch/udc.h> 42#include <asm/arch/udc.h>
43#include <asm/arch/tosa_bt.h>
41 44
42#include <asm/mach/arch.h> 45#include <asm/mach/arch.h>
43#include <asm/mach/map.h>
44#include <asm/mach/irq.h>
45#include <asm/arch/tosa.h> 46#include <asm/arch/tosa.h>
46 47
47#include <asm/hardware/scoop.h> 48#include <asm/hardware/scoop.h>
@@ -86,7 +87,7 @@ static unsigned long tosa_pin_config[] = {
86 GPIO6_MMC_CLK, 87 GPIO6_MMC_CLK,
87 GPIO8_MMC_CS0, 88 GPIO8_MMC_CS0,
88 GPIO9_GPIO, /* Detect */ 89 GPIO9_GPIO, /* Detect */
89 // GPIO10 nSD_INT 90 GPIO10_GPIO, /* nSD_INT */
90 91
91 /* CF */ 92 /* CF */
92 GPIO13_GPIO, /* CD_IRQ */ 93 GPIO13_GPIO, /* CD_IRQ */
@@ -124,34 +125,34 @@ static unsigned long tosa_pin_config[] = {
124 GPIO44_BTUART_CTS, 125 GPIO44_BTUART_CTS,
125 GPIO45_BTUART_RTS, 126 GPIO45_BTUART_RTS,
126 127
127 /* IrDA */
128 GPIO46_STUART_RXD,
129 GPIO47_STUART_TXD,
130
131 /* Keybd */ 128 /* Keybd */
132 GPIO58_GPIO, 129 GPIO58_GPIO | MFP_LPM_DRIVE_LOW,
133 GPIO59_GPIO, 130 GPIO59_GPIO | MFP_LPM_DRIVE_LOW,
134 GPIO60_GPIO, 131 GPIO60_GPIO | MFP_LPM_DRIVE_LOW,
135 GPIO61_GPIO, 132 GPIO61_GPIO | MFP_LPM_DRIVE_LOW,
136 GPIO62_GPIO, 133 GPIO62_GPIO | MFP_LPM_DRIVE_LOW,
137 GPIO63_GPIO, 134 GPIO63_GPIO | MFP_LPM_DRIVE_LOW,
138 GPIO64_GPIO, 135 GPIO64_GPIO | MFP_LPM_DRIVE_LOW,
139 GPIO65_GPIO, 136 GPIO65_GPIO | MFP_LPM_DRIVE_LOW,
140 GPIO66_GPIO, 137 GPIO66_GPIO | MFP_LPM_DRIVE_LOW,
141 GPIO67_GPIO, 138 GPIO67_GPIO | MFP_LPM_DRIVE_LOW,
142 GPIO68_GPIO, 139 GPIO68_GPIO | MFP_LPM_DRIVE_LOW,
143 GPIO69_GPIO, 140 GPIO69_GPIO | MFP_LPM_DRIVE_LOW,
144 GPIO70_GPIO, 141 GPIO70_GPIO | MFP_LPM_DRIVE_LOW,
145 GPIO71_GPIO, 142 GPIO71_GPIO | MFP_LPM_DRIVE_LOW,
146 GPIO72_GPIO, 143 GPIO72_GPIO | MFP_LPM_DRIVE_LOW,
147 GPIO73_GPIO, 144 GPIO73_GPIO | MFP_LPM_DRIVE_LOW,
148 GPIO74_GPIO, 145 GPIO74_GPIO | MFP_LPM_DRIVE_LOW,
149 GPIO75_GPIO, 146 GPIO75_GPIO | MFP_LPM_DRIVE_LOW,
150 147
151 /* SPI */ 148 /* SPI */
152 GPIO81_SSP2_CLK_OUT, 149 GPIO81_SSP2_CLK_OUT,
153 GPIO82_SSP2_FRM_OUT, 150 GPIO82_SSP2_FRM_OUT,
154 GPIO83_SSP2_TXD, 151 GPIO83_SSP2_TXD,
152
153 /* IrDA is managed in other way */
154 GPIO46_GPIO,
155 GPIO47_GPIO,
155}; 156};
156 157
157/* 158/*
@@ -249,6 +250,15 @@ static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void
249 250
250 tosa_mci_platform_data.detect_delay = msecs_to_jiffies(250); 251 tosa_mci_platform_data.detect_delay = msecs_to_jiffies(250);
251 252
253 err = gpio_request(TOSA_GPIO_nSD_DETECT, "MMC/SD card detect");
254 if (err) {
255 printk(KERN_ERR "tosa_mci_init: can't request nSD_DETECT gpio\n");
256 goto err_gpio_detect;
257 }
258 err = gpio_direction_input(TOSA_GPIO_nSD_DETECT);
259 if (err)
260 goto err_gpio_detect_dir;
261
252 err = request_irq(TOSA_IRQ_GPIO_nSD_DETECT, tosa_detect_int, 262 err = request_irq(TOSA_IRQ_GPIO_nSD_DETECT, tosa_detect_int,
253 IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 263 IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
254 "MMC/SD card detect", data); 264 "MMC/SD card detect", data);
@@ -257,7 +267,7 @@ static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void
257 goto err_irq; 267 goto err_irq;
258 } 268 }
259 269
260 err = gpio_request(TOSA_GPIO_SD_WP, "sd_wp"); 270 err = gpio_request(TOSA_GPIO_SD_WP, "SD Write Protect");
261 if (err) { 271 if (err) {
262 printk(KERN_ERR "tosa_mci_init: can't request SD_WP gpio\n"); 272 printk(KERN_ERR "tosa_mci_init: can't request SD_WP gpio\n");
263 goto err_gpio_wp; 273 goto err_gpio_wp;
@@ -266,7 +276,7 @@ static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void
266 if (err) 276 if (err)
267 goto err_gpio_wp_dir; 277 goto err_gpio_wp_dir;
268 278
269 err = gpio_request(TOSA_GPIO_PWR_ON, "sd_pwr"); 279 err = gpio_request(TOSA_GPIO_PWR_ON, "SD Power");
270 if (err) { 280 if (err) {
271 printk(KERN_ERR "tosa_mci_init: can't request SD_PWR gpio\n"); 281 printk(KERN_ERR "tosa_mci_init: can't request SD_PWR gpio\n");
272 goto err_gpio_pwr; 282 goto err_gpio_pwr;
@@ -275,8 +285,20 @@ static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void
275 if (err) 285 if (err)
276 goto err_gpio_pwr_dir; 286 goto err_gpio_pwr_dir;
277 287
288 err = gpio_request(TOSA_GPIO_nSD_INT, "SD Int");
289 if (err) {
290 printk(KERN_ERR "tosa_mci_init: can't request SD_PWR gpio\n");
291 goto err_gpio_int;
292 }
293 err = gpio_direction_input(TOSA_GPIO_nSD_INT);
294 if (err)
295 goto err_gpio_int_dir;
296
278 return 0; 297 return 0;
279 298
299err_gpio_int_dir:
300 gpio_free(TOSA_GPIO_nSD_INT);
301err_gpio_int:
280err_gpio_pwr_dir: 302err_gpio_pwr_dir:
281 gpio_free(TOSA_GPIO_PWR_ON); 303 gpio_free(TOSA_GPIO_PWR_ON);
282err_gpio_pwr: 304err_gpio_pwr:
@@ -285,6 +307,9 @@ err_gpio_wp_dir:
285err_gpio_wp: 307err_gpio_wp:
286 free_irq(TOSA_IRQ_GPIO_nSD_DETECT, data); 308 free_irq(TOSA_IRQ_GPIO_nSD_DETECT, data);
287err_irq: 309err_irq:
310err_gpio_detect_dir:
311 gpio_free(TOSA_GPIO_nSD_DETECT);
312err_gpio_detect:
288 return err; 313 return err;
289} 314}
290 315
@@ -306,9 +331,11 @@ static int tosa_mci_get_ro(struct device *dev)
306 331
307static void tosa_mci_exit(struct device *dev, void *data) 332static void tosa_mci_exit(struct device *dev, void *data)
308{ 333{
334 gpio_free(TOSA_GPIO_nSD_INT);
309 gpio_free(TOSA_GPIO_PWR_ON); 335 gpio_free(TOSA_GPIO_PWR_ON);
310 gpio_free(TOSA_GPIO_SD_WP); 336 gpio_free(TOSA_GPIO_SD_WP);
311 free_irq(TOSA_IRQ_GPIO_nSD_DETECT, data); 337 free_irq(TOSA_IRQ_GPIO_nSD_DETECT, data);
338 gpio_free(TOSA_GPIO_nSD_DETECT);
312} 339}
313 340
314static struct pxamci_platform_data tosa_mci_platform_data = { 341static struct pxamci_platform_data tosa_mci_platform_data = {
@@ -322,29 +349,55 @@ static struct pxamci_platform_data tosa_mci_platform_data = {
322/* 349/*
323 * Irda 350 * Irda
324 */ 351 */
352static void tosa_irda_transceiver_mode(struct device *dev, int mode)
353{
354 if (mode & IR_OFF) {
355 gpio_set_value(TOSA_GPIO_IR_POWERDWN, 0);
356 pxa2xx_transceiver_mode(dev, mode);
357 gpio_direction_output(TOSA_GPIO_IRDA_TX, 0);
358 } else {
359 pxa2xx_transceiver_mode(dev, mode);
360 gpio_set_value(TOSA_GPIO_IR_POWERDWN, 1);
361 }
362}
363
325static int tosa_irda_startup(struct device *dev) 364static int tosa_irda_startup(struct device *dev)
326{ 365{
327 int ret; 366 int ret;
328 367
368 ret = gpio_request(TOSA_GPIO_IRDA_TX, "IrDA TX");
369 if (ret)
370 goto err_tx;
371 ret = gpio_direction_output(TOSA_GPIO_IRDA_TX, 0);
372 if (ret)
373 goto err_tx_dir;
374
329 ret = gpio_request(TOSA_GPIO_IR_POWERDWN, "IrDA powerdown"); 375 ret = gpio_request(TOSA_GPIO_IR_POWERDWN, "IrDA powerdown");
330 if (ret) 376 if (ret)
331 return ret; 377 goto err_pwr;
332 378
333 ret = gpio_direction_output(TOSA_GPIO_IR_POWERDWN, 0); 379 ret = gpio_direction_output(TOSA_GPIO_IR_POWERDWN, 0);
334 if (ret) 380 if (ret)
335 gpio_free(TOSA_GPIO_IR_POWERDWN); 381 goto err_pwr_dir;
336 382
337 return ret; 383 tosa_irda_transceiver_mode(dev, IR_SIRMODE | IR_OFF);
338 }
339 384
340static void tosa_irda_shutdown(struct device *dev) 385 return 0;
341{ 386
387err_pwr_dir:
342 gpio_free(TOSA_GPIO_IR_POWERDWN); 388 gpio_free(TOSA_GPIO_IR_POWERDWN);
389err_pwr:
390err_tx_dir:
391 gpio_free(TOSA_GPIO_IRDA_TX);
392err_tx:
393 return ret;
343} 394}
344 395
345static void tosa_irda_transceiver_mode(struct device *dev, int mode) 396static void tosa_irda_shutdown(struct device *dev)
346{ 397{
347 gpio_set_value(TOSA_GPIO_IR_POWERDWN, !(mode & IR_OFF)); 398 tosa_irda_transceiver_mode(dev, IR_SIRMODE | IR_OFF);
399 gpio_free(TOSA_GPIO_IR_POWERDWN);
400 gpio_free(TOSA_GPIO_IRDA_TX);
348} 401}
349 402
350static struct pxaficp_platform_data tosa_ficp_platform_data = { 403static struct pxaficp_platform_data tosa_ficp_platform_data = {
@@ -355,6 +408,70 @@ static struct pxaficp_platform_data tosa_ficp_platform_data = {
355}; 408};
356 409
357/* 410/*
411 * Tosa AC IN
412 */
413static int tosa_power_init(struct device *dev)
414{
415 int ret = gpio_request(TOSA_GPIO_AC_IN, "ac in");
416 if (ret)
417 goto err_gpio_req;
418
419 ret = gpio_direction_input(TOSA_GPIO_AC_IN);
420 if (ret)
421 goto err_gpio_in;
422
423 return 0;
424
425err_gpio_in:
426 gpio_free(TOSA_GPIO_AC_IN);
427err_gpio_req:
428 return ret;
429}
430
431static void tosa_power_exit(struct device *dev)
432{
433 gpio_free(TOSA_GPIO_AC_IN);
434}
435
436static int tosa_power_ac_online(void)
437{
438 return gpio_get_value(TOSA_GPIO_AC_IN) == 0;
439}
440
441static char *tosa_ac_supplied_to[] = {
442 "main-battery",
443 "backup-battery",
444 "jacket-battery",
445};
446
447static struct pda_power_pdata tosa_power_data = {
448 .init = tosa_power_init,
449 .is_ac_online = tosa_power_ac_online,
450 .exit = tosa_power_exit,
451 .supplied_to = tosa_ac_supplied_to,
452 .num_supplicants = ARRAY_SIZE(tosa_ac_supplied_to),
453};
454
455static struct resource tosa_power_resource[] = {
456 {
457 .name = "ac",
458 .start = gpio_to_irq(TOSA_GPIO_AC_IN),
459 .end = gpio_to_irq(TOSA_GPIO_AC_IN),
460 .flags = IORESOURCE_IRQ |
461 IORESOURCE_IRQ_HIGHEDGE |
462 IORESOURCE_IRQ_LOWEDGE,
463 },
464};
465
466static struct platform_device tosa_power_device = {
467 .name = "pda-power",
468 .id = -1,
469 .dev.platform_data = &tosa_power_data,
470 .resource = tosa_power_resource,
471 .num_resources = ARRAY_SIZE(tosa_power_resource),
472};
473
474/*
358 * Tosa Keyboard 475 * Tosa Keyboard
359 */ 476 */
360static struct platform_device tosakbd_device = { 477static struct platform_device tosakbd_device = {
@@ -439,7 +556,7 @@ static struct gpio_led tosa_gpio_leds[] = {
439 }, 556 },
440 { 557 {
441 .name = "tosa:blue:bluetooth", 558 .name = "tosa:blue:bluetooth",
442 .default_trigger = "none", 559 .default_trigger = "tosa-bt",
443 .gpio = TOSA_GPIO_BT_LED, 560 .gpio = TOSA_GPIO_BT_LED,
444 }, 561 },
445}; 562};
@@ -457,21 +574,184 @@ static struct platform_device tosaled_device = {
457 }, 574 },
458}; 575};
459 576
577/*
578 * Toshiba Mobile IO Controller
579 */
580static struct resource tc6393xb_resources[] = {
581 [0] = {
582 .start = TOSA_LCDC_PHYS,
583 .end = TOSA_LCDC_PHYS + 0x3ffffff,
584 .flags = IORESOURCE_MEM,
585 },
586
587 [1] = {
588 .start = TOSA_IRQ_GPIO_TC6393XB_INT,
589 .end = TOSA_IRQ_GPIO_TC6393XB_INT,
590 .flags = IORESOURCE_IRQ,
591 },
592};
593
594
595static int tosa_tc6393xb_enable(struct platform_device *dev)
596{
597 int rc;
598
599 rc = gpio_request(TOSA_GPIO_TC6393XB_REST_IN, "tc6393xb #pclr");
600 if (rc)
601 goto err_req_pclr;
602 rc = gpio_request(TOSA_GPIO_TC6393XB_SUSPEND, "tc6393xb #suspend");
603 if (rc)
604 goto err_req_suspend;
605 rc = gpio_request(TOSA_GPIO_TC6393XB_L3V_ON, "l3v");
606 if (rc)
607 goto err_req_l3v;
608 rc = gpio_direction_output(TOSA_GPIO_TC6393XB_L3V_ON, 0);
609 if (rc)
610 goto err_dir_l3v;
611 rc = gpio_direction_output(TOSA_GPIO_TC6393XB_SUSPEND, 0);
612 if (rc)
613 goto err_dir_suspend;
614 rc = gpio_direction_output(TOSA_GPIO_TC6393XB_REST_IN, 0);
615 if (rc)
616 goto err_dir_pclr;
617
618 mdelay(1);
619
620 gpio_set_value(TOSA_GPIO_TC6393XB_SUSPEND, 1);
621
622 mdelay(10);
623
624 gpio_set_value(TOSA_GPIO_TC6393XB_REST_IN, 1);
625 gpio_set_value(TOSA_GPIO_TC6393XB_L3V_ON, 1);
626
627 return 0;
628err_dir_pclr:
629err_dir_suspend:
630err_dir_l3v:
631 gpio_free(TOSA_GPIO_TC6393XB_L3V_ON);
632err_req_l3v:
633 gpio_free(TOSA_GPIO_TC6393XB_SUSPEND);
634err_req_suspend:
635 gpio_free(TOSA_GPIO_TC6393XB_REST_IN);
636err_req_pclr:
637 return rc;
638}
639
640static int tosa_tc6393xb_disable(struct platform_device *dev)
641{
642 gpio_free(TOSA_GPIO_TC6393XB_L3V_ON);
643 gpio_free(TOSA_GPIO_TC6393XB_SUSPEND);
644 gpio_free(TOSA_GPIO_TC6393XB_REST_IN);
645
646 return 0;
647}
648
649static int tosa_tc6393xb_resume(struct platform_device *dev)
650{
651 gpio_set_value(TOSA_GPIO_TC6393XB_SUSPEND, 1);
652 mdelay(10);
653 gpio_set_value(TOSA_GPIO_TC6393XB_L3V_ON, 1);
654 mdelay(10);
655
656 return 0;
657}
658
659static int tosa_tc6393xb_suspend(struct platform_device *dev)
660{
661 gpio_set_value(TOSA_GPIO_TC6393XB_L3V_ON, 0);
662 gpio_set_value(TOSA_GPIO_TC6393XB_SUSPEND, 0);
663 return 0;
664}
665
666static struct mtd_partition tosa_nand_partition[] = {
667 {
668 .name = "smf",
669 .offset = 0,
670 .size = 7 * 1024 * 1024,
671 },
672 {
673 .name = "root",
674 .offset = MTDPART_OFS_APPEND,
675 .size = 28 * 1024 * 1024,
676 },
677 {
678 .name = "home",
679 .offset = MTDPART_OFS_APPEND,
680 .size = MTDPART_SIZ_FULL,
681 },
682};
683
684static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
685
686static struct nand_bbt_descr tosa_tc6393xb_nand_bbt = {
687 .options = 0,
688 .offs = 4,
689 .len = 2,
690 .pattern = scan_ff_pattern
691};
692
693static struct tmio_nand_data tosa_tc6393xb_nand_config = {
694 .num_partitions = ARRAY_SIZE(tosa_nand_partition),
695 .partition = tosa_nand_partition,
696 .badblock_pattern = &tosa_tc6393xb_nand_bbt,
697};
698
699static struct tc6393xb_platform_data tosa_tc6393xb_setup = {
700 .scr_pll2cr = 0x0cc1,
701 .scr_gper = 0x3300,
702 .scr_gpo_dsr =
703 TOSA_TC6393XB_GPIO_BIT(TOSA_GPIO_CARD_VCC_ON),
704 .scr_gpo_doecr =
705 TOSA_TC6393XB_GPIO_BIT(TOSA_GPIO_CARD_VCC_ON),
706
707 .irq_base = IRQ_BOARD_START,
708 .gpio_base = TOSA_TC6393XB_GPIO_BASE,
709
710 .enable = tosa_tc6393xb_enable,
711 .disable = tosa_tc6393xb_disable,
712 .suspend = tosa_tc6393xb_suspend,
713 .resume = tosa_tc6393xb_resume,
714
715 .nand_data = &tosa_tc6393xb_nand_config,
716};
717
718
719static struct platform_device tc6393xb_device = {
720 .name = "tc6393xb",
721 .id = -1,
722 .dev = {
723 .platform_data = &tosa_tc6393xb_setup,
724 },
725 .num_resources = ARRAY_SIZE(tc6393xb_resources),
726 .resource = tc6393xb_resources,
727};
728
729static struct tosa_bt_data tosa_bt_data = {
730 .gpio_pwr = TOSA_GPIO_BT_PWR_EN,
731 .gpio_reset = TOSA_GPIO_BT_RESET,
732};
733
734static struct platform_device tosa_bt_device = {
735 .name = "tosa-bt",
736 .id = -1,
737 .dev.platform_data = &tosa_bt_data,
738};
739
740
460static struct platform_device *devices[] __initdata = { 741static struct platform_device *devices[] __initdata = {
461 &tosascoop_device, 742 &tosascoop_device,
462 &tosascoop_jc_device, 743 &tosascoop_jc_device,
744 &tc6393xb_device,
745 &tosa_power_device,
463 &tosakbd_device, 746 &tosakbd_device,
464 &tosa_gpio_keys_device, 747 &tosa_gpio_keys_device,
465 &tosaled_device, 748 &tosaled_device,
749 &tosa_bt_device,
466}; 750};
467 751
468static void tosa_poweroff(void) 752static void tosa_poweroff(void)
469{ 753{
470 gpio_direction_output(TOSA_GPIO_ON_RESET, 0); 754 arm_machine_restart('g');
471 gpio_set_value(TOSA_GPIO_ON_RESET, 1);
472
473 mdelay(1000);
474 arm_machine_restart('h');
475} 755}
476 756
477static void tosa_restart(char mode) 757static void tosa_restart(char mode)
@@ -485,10 +765,14 @@ static void tosa_restart(char mode)
485 765
486static void __init tosa_init(void) 766static void __init tosa_init(void)
487{ 767{
768 int dummy;
769
488 pxa2xx_mfp_config(ARRAY_AND_SIZE(tosa_pin_config)); 770 pxa2xx_mfp_config(ARRAY_AND_SIZE(tosa_pin_config));
489 gpio_set_wake(MFP_PIN_GPIO1, 1); 771 gpio_set_wake(MFP_PIN_GPIO1, 1);
490 /* We can't pass to gpio-keys since it will drop the Reset altfunc */ 772 /* We can't pass to gpio-keys since it will drop the Reset altfunc */
491 773
774 init_gpio_reset(TOSA_GPIO_ON_RESET);
775
492 pm_power_off = tosa_poweroff; 776 pm_power_off = tosa_poweroff;
493 arm_pm_restart = tosa_restart; 777 arm_pm_restart = tosa_restart;
494 778
@@ -497,6 +781,10 @@ static void __init tosa_init(void)
497 /* enable batt_fault */ 781 /* enable batt_fault */
498 PMCR = 0x01; 782 PMCR = 0x01;
499 783
784 dummy = gpiochip_reserve(TOSA_SCOOP_GPIO_BASE, 12);
785 dummy = gpiochip_reserve(TOSA_SCOOP_JC_GPIO_BASE, 12);
786 dummy = gpiochip_reserve(TOSA_TC6393XB_GPIO_BASE, 16);
787
500 pxa_set_mci_info(&tosa_mci_platform_data); 788 pxa_set_mci_info(&tosa_mci_platform_data);
501 pxa_set_udc_info(&udc_info); 789 pxa_set_udc_info(&udc_info);
502 pxa_set_ficp_info(&tosa_ficp_platform_data); 790 pxa_set_ficp_info(&tosa_ficp_platform_data);
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index 61e244023089..dee7bf36f013 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -254,6 +254,7 @@ static void board_irda_mode(struct device *dev, int mode)
254 /* Fast mode */ 254 /* Fast mode */
255 trizeps_conxs_ircr |= ConXS_IRCR_MODE; 255 trizeps_conxs_ircr |= ConXS_IRCR_MODE;
256 } 256 }
257 pxa2xx_transceiver_mode(dev, mode);
257 if (mode & IR_OFF) { 258 if (mode & IR_OFF) {
258 trizeps_conxs_ircr |= ConXS_IRCR_SD; 259 trizeps_conxs_ircr |= ConXS_IRCR_SD;
259 } else { 260 } else {
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
index 66b446ca273d..8fca6d890b7d 100644
--- a/arch/arm/mach-pxa/zylonite.c
+++ b/arch/arm/mach-pxa/zylonite.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/pwm_backlight.h> 21#include <linux/pwm_backlight.h>
22#include <linux/smc91x.h>
22 23
23#include <asm/mach-types.h> 24#include <asm/mach-types.h>
24#include <asm/mach/arch.h> 25#include <asm/mach/arch.h>
@@ -29,6 +30,7 @@
29#include <asm/arch/zylonite.h> 30#include <asm/arch/zylonite.h>
30#include <asm/arch/mmc.h> 31#include <asm/arch/mmc.h>
31#include <asm/arch/pxa27x_keypad.h> 32#include <asm/arch/pxa27x_keypad.h>
33#include <asm/arch/pxa3xx_nand.h>
32 34
33#include "devices.h" 35#include "devices.h"
34#include "generic.h" 36#include "generic.h"
@@ -37,6 +39,8 @@
37struct platform_mmc_slot zylonite_mmc_slot[MAX_SLOTS]; 39struct platform_mmc_slot zylonite_mmc_slot[MAX_SLOTS];
38 40
39int gpio_eth_irq; 41int gpio_eth_irq;
42int gpio_debug_led1;
43int gpio_debug_led2;
40 44
41int wm9713_irq; 45int wm9713_irq;
42 46
@@ -56,13 +60,57 @@ static struct resource smc91x_resources[] = {
56 } 60 }
57}; 61};
58 62
63static struct smc91x_platdata zylonite_smc91x_info = {
64 .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT |
65 SMC91X_NOWAIT | SMC91X_USE_DMA,
66};
67
59static struct platform_device smc91x_device = { 68static struct platform_device smc91x_device = {
60 .name = "smc91x", 69 .name = "smc91x",
61 .id = 0, 70 .id = 0,
62 .num_resources = ARRAY_SIZE(smc91x_resources), 71 .num_resources = ARRAY_SIZE(smc91x_resources),
63 .resource = smc91x_resources, 72 .resource = smc91x_resources,
73 .dev = {
74 .platform_data = &zylonite_smc91x_info,
75 },
76};
77
78#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
79static struct gpio_led zylonite_debug_leds[] = {
80 [0] = {
81 .name = "zylonite:yellow:1",
82 .default_trigger = "heartbeat",
83 },
84 [1] = {
85 .name = "zylonite:yellow:2",
86 .default_trigger = "default-on",
87 },
64}; 88};
65 89
90static struct gpio_led_platform_data zylonite_debug_leds_info = {
91 .leds = zylonite_debug_leds,
92 .num_leds = ARRAY_SIZE(zylonite_debug_leds),
93};
94
95static struct platform_device zylonite_device_leds = {
96 .name = "leds-gpio",
97 .id = -1,
98 .dev = {
99 .platform_data = &zylonite_debug_leds_info,
100 }
101};
102
103static void __init zylonite_init_leds(void)
104{
105 zylonite_debug_leds[0].gpio = gpio_debug_led1;
106 zylonite_debug_leds[1].gpio = gpio_debug_led2;
107
108 platform_device_register(&zylonite_device_leds);
109}
110#else
111static inline void zylonite_init_leds(void) {}
112#endif
113
66#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) 114#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
67static struct platform_pwm_backlight_data zylonite_backlight_data = { 115static struct platform_pwm_backlight_data zylonite_backlight_data = {
68 .pwm_id = 3, 116 .pwm_id = 3,
@@ -259,7 +307,7 @@ static void __init zylonite_init_mmc(void)
259static inline void zylonite_init_mmc(void) {} 307static inline void zylonite_init_mmc(void) {}
260#endif 308#endif
261 309
262#if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULES) 310#if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE)
263static unsigned int zylonite_matrix_key_map[] = { 311static unsigned int zylonite_matrix_key_map[] = {
264 /* KEY(row, col, key_code) */ 312 /* KEY(row, col, key_code) */
265 KEY(0, 0, KEY_A), KEY(0, 1, KEY_B), KEY(0, 2, KEY_C), KEY(0, 5, KEY_D), 313 KEY(0, 0, KEY_A), KEY(0, 1, KEY_B), KEY(0, 2, KEY_C), KEY(0, 5, KEY_D),
@@ -324,6 +372,57 @@ static void __init zylonite_init_keypad(void)
324static inline void zylonite_init_keypad(void) {} 372static inline void zylonite_init_keypad(void) {}
325#endif 373#endif
326 374
375#if defined(CONFIG_MTD_NAND_PXA3xx) || defined(CONFIG_MTD_NAND_PXA3xx_MODULE)
376static struct mtd_partition zylonite_nand_partitions[] = {
377 [0] = {
378 .name = "Bootloader",
379 .offset = 0,
380 .size = 0x060000,
381 .mask_flags = MTD_WRITEABLE, /* force read-only */
382 },
383 [1] = {
384 .name = "Kernel",
385 .offset = 0x060000,
386 .size = 0x200000,
387 .mask_flags = MTD_WRITEABLE, /* force read-only */
388 },
389 [2] = {
390 .name = "Filesystem",
391 .offset = 0x0260000,
392 .size = 0x3000000, /* 48M - rootfs */
393 },
394 [3] = {
395 .name = "MassStorage",
396 .offset = 0x3260000,
397 .size = 0x3d40000,
398 },
399 [4] = {
400 .name = "BBT",
401 .offset = 0x6FA0000,
402 .size = 0x80000,
403 .mask_flags = MTD_WRITEABLE, /* force read-only */
404 },
405 /* NOTE: we reserve some blocks at the end of the NAND flash for
406 * bad block management, and the max number of relocation blocks
407 * differs on different platforms. Please take care with it when
408 * defining the partition table.
409 */
410};
411
412static struct pxa3xx_nand_platform_data zylonite_nand_info = {
413 .enable_arbiter = 1,
414 .parts = zylonite_nand_partitions,
415 .nr_parts = ARRAY_SIZE(zylonite_nand_partitions),
416};
417
418static void __init zylonite_init_nand(void)
419{
420 pxa3xx_set_nand_info(&zylonite_nand_info);
421}
422#else
423static inline void zylonite_init_nand(void) {}
424#endif /* CONFIG_MTD_NAND_PXA3xx || CONFIG_MTD_NAND_PXA3xx_MODULE */
425
327static void __init zylonite_init(void) 426static void __init zylonite_init(void)
328{ 427{
329 /* board-processor specific initialization */ 428 /* board-processor specific initialization */
@@ -342,6 +441,8 @@ static void __init zylonite_init(void)
342 zylonite_init_lcd(); 441 zylonite_init_lcd();
343 zylonite_init_mmc(); 442 zylonite_init_mmc();
344 zylonite_init_keypad(); 443 zylonite_init_keypad();
444 zylonite_init_nand();
445 zylonite_init_leds();
345} 446}
346 447
347MACHINE_START(ZYLONITE, "PXA3xx Platform Development Kit (aka Zylonite)") 448MACHINE_START(ZYLONITE, "PXA3xx Platform Development Kit (aka Zylonite)")
diff --git a/arch/arm/mach-pxa/zylonite_pxa300.c b/arch/arm/mach-pxa/zylonite_pxa300.c
index 6f7ae972b8db..b28d46e081d3 100644
--- a/arch/arm/mach-pxa/zylonite_pxa300.c
+++ b/arch/arm/mach-pxa/zylonite_pxa300.c
@@ -16,9 +16,12 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/i2c.h>
20#include <linux/i2c/pca953x.h>
19 21
20#include <asm/gpio.h> 22#include <asm/gpio.h>
21#include <asm/arch/mfp-pxa300.h> 23#include <asm/arch/mfp-pxa300.h>
24#include <asm/arch/i2c.h>
22#include <asm/arch/zylonite.h> 25#include <asm/arch/zylonite.h>
23 26
24#include "generic.h" 27#include "generic.h"
@@ -109,6 +112,10 @@ static mfp_cfg_t common_mfp_cfg[] __initdata = {
109 GPIO12_MMC2_DAT3, 112 GPIO12_MMC2_DAT3,
110 GPIO13_MMC2_CLK, 113 GPIO13_MMC2_CLK,
111 GPIO14_MMC2_CMD, 114 GPIO14_MMC2_CMD,
115
116 /* Standard I2C */
117 GPIO21_I2C_SCL,
118 GPIO22_I2C_SDA,
112}; 119};
113 120
114static mfp_cfg_t pxa300_mfp_cfg[] __initdata = { 121static mfp_cfg_t pxa300_mfp_cfg[] __initdata = {
@@ -192,6 +199,39 @@ static void __init zylonite_detect_lcd_panel(void)
192 pxa3xx_mfp_write(lcd_detect_pins[i], mfpr_save[i]); 199 pxa3xx_mfp_write(lcd_detect_pins[i], mfpr_save[i]);
193} 200}
194 201
202#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
203static struct pca953x_platform_data gpio_exp[] = {
204 [0] = {
205 .gpio_base = 128,
206 },
207 [1] = {
208 .gpio_base = 144,
209 },
210};
211
212struct i2c_board_info zylonite_i2c_board_info[] = {
213 {
214 .type = "pca9539",
215 .addr = 0x74,
216 .platform_data = &gpio_exp[0],
217 .irq = IRQ_GPIO(18),
218 }, {
219 .type = "pca9539",
220 .addr = 0x75,
221 .platform_data = &gpio_exp[1],
222 .irq = IRQ_GPIO(19),
223 },
224};
225
226static void __init zylonite_init_i2c(void)
227{
228 pxa_set_i2c_info(NULL);
229 i2c_register_board_info(0, ARRAY_AND_SIZE(zylonite_i2c_board_info));
230}
231#else
232static inline void zylonite_init_i2c(void) {}
233#endif
234
195void __init zylonite_pxa300_init(void) 235void __init zylonite_pxa300_init(void)
196{ 236{
197 if (cpu_is_pxa300() || cpu_is_pxa310()) { 237 if (cpu_is_pxa300() || cpu_is_pxa310()) {
@@ -207,6 +247,8 @@ void __init zylonite_pxa300_init(void)
207 247
208 /* WM9713 IRQ */ 248 /* WM9713 IRQ */
209 wm9713_irq = mfp_to_gpio(MFP_PIN_GPIO26); 249 wm9713_irq = mfp_to_gpio(MFP_PIN_GPIO26);
250
251 zylonite_init_i2c();
210 } 252 }
211 253
212 if (cpu_is_pxa300()) { 254 if (cpu_is_pxa300()) {
@@ -222,4 +264,8 @@ void __init zylonite_pxa300_init(void)
222 zylonite_mmc_slot[2].gpio_cd = EXT_GPIO(30); 264 zylonite_mmc_slot[2].gpio_cd = EXT_GPIO(30);
223 zylonite_mmc_slot[2].gpio_wp = EXT_GPIO(31); 265 zylonite_mmc_slot[2].gpio_wp = EXT_GPIO(31);
224 } 266 }
267
268 /* GPIOs for Debug LEDs */
269 gpio_debug_led1 = EXT_GPIO(25);
270 gpio_debug_led2 = EXT_GPIO(26);
225} 271}
diff --git a/arch/arm/mach-pxa/zylonite_pxa320.c b/arch/arm/mach-pxa/zylonite_pxa320.c
index 2b4fc34919ac..2b7fba7a2921 100644
--- a/arch/arm/mach-pxa/zylonite_pxa320.c
+++ b/arch/arm/mach-pxa/zylonite_pxa320.c
@@ -116,6 +116,10 @@ static mfp_cfg_t mfp_cfg[] __initdata = {
116 GPIO27_MMC2_DAT3, 116 GPIO27_MMC2_DAT3,
117 GPIO28_MMC2_CLK, 117 GPIO28_MMC2_CLK,
118 GPIO29_MMC2_CMD, 118 GPIO29_MMC2_CMD,
119
120 /* Debug LEDs */
121 GPIO1_2_GPIO | MFP_LPM_DRIVE_HIGH,
122 GPIO4_2_GPIO | MFP_LPM_DRIVE_HIGH,
119}; 123};
120 124
121#define NUM_LCD_DETECT_PINS 7 125#define NUM_LCD_DETECT_PINS 7
@@ -189,6 +193,8 @@ void __init zylonite_pxa320_init(void)
189 193
190 /* GPIO pin assignment */ 194 /* GPIO pin assignment */
191 gpio_eth_irq = mfp_to_gpio(MFP_PIN_GPIO9); 195 gpio_eth_irq = mfp_to_gpio(MFP_PIN_GPIO9);
196 gpio_debug_led1 = mfp_to_gpio(MFP_PIN_GPIO1_2);
197 gpio_debug_led2 = mfp_to_gpio(MFP_PIN_GPIO4_2);
192 198
193 /* MMC card detect & write protect for controller 0 */ 199 /* MMC card detect & write protect for controller 0 */
194 zylonite_mmc_slot[0].gpio_cd = mfp_to_gpio(MFP_PIN_GPIO1); 200 zylonite_mmc_slot[0].gpio_cd = mfp_to_gpio(MFP_PIN_GPIO1);
diff --git a/arch/arm/mach-sa1100/clock.c b/arch/arm/mach-sa1100/clock.c
index fc97fe57ee6f..b5809c51d13f 100644
--- a/arch/arm/mach-sa1100/clock.c
+++ b/arch/arm/mach-sa1100/clock.c
@@ -103,7 +103,7 @@ static void clk_gpio27_disable(void)
103} 103}
104 104
105static struct clk clk_gpio27 = { 105static struct clk clk_gpio27 = {
106 .name = "GPIO27_CLK", 106 .name = "SA1111_CLK",
107 .rate = 3686400, 107 .rate = 3686400,
108 .enable = clk_gpio27_enable, 108 .enable = clk_gpio27_enable,
109 .disable = clk_gpio27_disable, 109 .disable = clk_gpio27_disable,
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index f64b92557b11..2e27a8c8372b 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -76,3 +76,5 @@ obj-$(CONFIG_CPU_V7) += proc-v7.o
76 76
77obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o 77obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
78obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o 78obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
79obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o
80
diff --git a/arch/arm/mm/discontig.c b/arch/arm/mm/discontig.c
index 1e5602189507..c8c0c4b0f0a3 100644
--- a/arch/arm/mm/discontig.c
+++ b/arch/arm/mm/discontig.c
@@ -21,26 +21,24 @@
21 * Our node_data structure for discontiguous memory. 21 * Our node_data structure for discontiguous memory.
22 */ 22 */
23 23
24static bootmem_data_t node_bootmem_data[MAX_NUMNODES];
25
26pg_data_t discontig_node_data[MAX_NUMNODES] = { 24pg_data_t discontig_node_data[MAX_NUMNODES] = {
27 { .bdata = &node_bootmem_data[0] }, 25 { .bdata = &bootmem_node_data[0] },
28 { .bdata = &node_bootmem_data[1] }, 26 { .bdata = &bootmem_node_data[1] },
29 { .bdata = &node_bootmem_data[2] }, 27 { .bdata = &bootmem_node_data[2] },
30 { .bdata = &node_bootmem_data[3] }, 28 { .bdata = &bootmem_node_data[3] },
31#if MAX_NUMNODES == 16 29#if MAX_NUMNODES == 16
32 { .bdata = &node_bootmem_data[4] }, 30 { .bdata = &bootmem_node_data[4] },
33 { .bdata = &node_bootmem_data[5] }, 31 { .bdata = &bootmem_node_data[5] },
34 { .bdata = &node_bootmem_data[6] }, 32 { .bdata = &bootmem_node_data[6] },
35 { .bdata = &node_bootmem_data[7] }, 33 { .bdata = &bootmem_node_data[7] },
36 { .bdata = &node_bootmem_data[8] }, 34 { .bdata = &bootmem_node_data[8] },
37 { .bdata = &node_bootmem_data[9] }, 35 { .bdata = &bootmem_node_data[9] },
38 { .bdata = &node_bootmem_data[10] }, 36 { .bdata = &bootmem_node_data[10] },
39 { .bdata = &node_bootmem_data[11] }, 37 { .bdata = &bootmem_node_data[11] },
40 { .bdata = &node_bootmem_data[12] }, 38 { .bdata = &bootmem_node_data[12] },
41 { .bdata = &node_bootmem_data[13] }, 39 { .bdata = &bootmem_node_data[13] },
42 { .bdata = &node_bootmem_data[14] }, 40 { .bdata = &bootmem_node_data[14] },
43 { .bdata = &node_bootmem_data[15] }, 41 { .bdata = &bootmem_node_data[15] },
44#endif 42#endif
45}; 43};
46 44
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index b657f1719af0..e6352946dde0 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -284,7 +284,7 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
284 */ 284 */
285 arch_adjust_zones(node, zone_size, zhole_size); 285 arch_adjust_zones(node, zone_size, zhole_size);
286 286
287 free_area_init_node(node, pgdat, zone_size, start_pfn, zhole_size); 287 free_area_init_node(node, zone_size, start_pfn, zhole_size);
288 288
289 return end_pfn; 289 return end_pfn;
290} 290}
diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c
index 7854f19b77cf..5d107520e6b9 100644
--- a/arch/arm/plat-omap/fb.c
+++ b/arch/arm/plat-omap/fb.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/mm.h>
26#include <linux/init.h> 27#include <linux/init.h>
27#include <linux/platform_device.h> 28#include <linux/platform_device.h>
28#include <linux/bootmem.h> 29#include <linux/bootmem.h>
@@ -182,7 +183,7 @@ void __init omapfb_reserve_sdram(void)
182 return; 183 return;
183 184
184 bdata = NODE_DATA(0)->bdata; 185 bdata = NODE_DATA(0)->bdata;
185 sdram_start = bdata->node_boot_start; 186 sdram_start = bdata->node_min_pfn << PAGE_SHIFT;
186 sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start; 187 sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start;
187 reserved = 0; 188 reserved = 0;
188 for (i = 0; ; i++) { 189 for (i = 0; ; i++) {
@@ -340,5 +341,3 @@ unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
340 341
341 342
342#endif 343#endif
343
344
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 1903a3491ee9..d8e9c2c3f0f6 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -1488,6 +1488,9 @@ static int __init _omap_gpio_init(void)
1488 bank->chip.set = gpio_set; 1488 bank->chip.set = gpio_set;
1489 if (bank_is_mpuio(bank)) { 1489 if (bank_is_mpuio(bank)) {
1490 bank->chip.label = "mpuio"; 1490 bank->chip.label = "mpuio";
1491#ifdef CONFIG_ARCH_OMAP1
1492 bank->chip.dev = &omap_mpuio_device.dev;
1493#endif
1491 bank->chip.base = OMAP_MPUIO(0); 1494 bank->chip.base = OMAP_MPUIO(0);
1492 } else { 1495 } else {
1493 bank->chip.label = "gpio"; 1496 bank->chip.label = "gpio";
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 0be5630ff568..8b8f564c3aa2 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
12# 12#
13# http://www.arm.linux.org.uk/developer/machines/?action=new 13# http://www.arm.linux.org.uk/developer/machines/?action=new
14# 14#
15# Last update: Mon Jul 7 16:25:39 2008 15# Last update: Sun Jul 13 12:04:05 2008
16# 16#
17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number 17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
18# 18#
@@ -1812,3 +1812,11 @@ jade MACH_JADE JADE 1821
1812ks8695_softplc MACH_KS8695_SOFTPLC KS8695_SOFTPLC 1822 1812ks8695_softplc MACH_KS8695_SOFTPLC KS8695_SOFTPLC 1822
1813gprisc4 MACH_GPRISC4 GPRISC4 1823 1813gprisc4 MACH_GPRISC4 GPRISC4 1823
1814stamp9260 MACH_STAMP9260 STAMP9260 1824 1814stamp9260 MACH_STAMP9260 STAMP9260 1824
1815smdk6430 MACH_SMDK6430 SMDK6430 1825
1816smdkc100 MACH_SMDKC100 SMDKC100 1826
1817tavorevb MACH_TAVOREVB TAVOREVB 1827
1818saar MACH_SAAR SAAR 1828
1819deister_eyecam MACH_DEISTER_EYECAM DEISTER_EYECAM 1829
1820at91sam9m10ek MACH_AT91SAM9M10EK AT91SAM9M10EK 1830
1821linkstation_produo MACH_LINKSTATION_PRODUO LINKSTATION_PRODUO 1831
1822hit_b0 MACH_HIT_B0 HIT_B0 1832
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 45d63c986015..7c239a916275 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -10,6 +10,7 @@ config AVR32
10 # With EMBEDDED=n, we get lots of stuff automatically selected 10 # With EMBEDDED=n, we get lots of stuff automatically selected
11 # that we usually don't need on AVR32. 11 # that we usually don't need on AVR32.
12 select EMBEDDED 12 select EMBEDDED
13 select HAVE_CLK
13 select HAVE_OPROFILE 14 select HAVE_OPROFILE
14 select HAVE_KPROBES 15 select HAVE_KPROBES
15 help 16 help
@@ -87,7 +88,7 @@ config PLATFORM_AT32AP
87 select SUBARCH_AVR32B 88 select SUBARCH_AVR32B
88 select MMU 89 select MMU
89 select PERFORMANCE_COUNTERS 90 select PERFORMANCE_COUNTERS
90 select HAVE_GPIO_LIB 91 select ARCH_REQUIRE_GPIOLIB
91 select GENERIC_ALLOCATOR 92 select GENERIC_ALLOCATOR
92 93
93# 94#
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index 6cf9df176274..ff820a9e743a 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -31,7 +31,7 @@ void cpu_idle(void)
31{ 31{
32 /* endless idle loop with no priority at all */ 32 /* endless idle loop with no priority at all */
33 while (1) { 33 while (1) {
34 tick_nohz_stop_sched_tick(); 34 tick_nohz_stop_sched_tick(1);
35 while (!need_resched()) 35 while (!need_resched())
36 cpu_idle_sleep(); 36 cpu_idle_sleep();
37 tick_nohz_restart_sched_tick(); 37 tick_nohz_restart_sched_tick();
diff --git a/arch/avr32/kernel/stacktrace.c b/arch/avr32/kernel/stacktrace.c
index f4bdb448049c..c09f0d8dd679 100644
--- a/arch/avr32/kernel/stacktrace.c
+++ b/arch/avr32/kernel/stacktrace.c
@@ -10,6 +10,7 @@
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/stacktrace.h> 11#include <linux/stacktrace.h>
12#include <linux/thread_info.h> 12#include <linux/thread_info.h>
13#include <linux/module.h>
13 14
14register unsigned long current_frame_pointer asm("r7"); 15register unsigned long current_frame_pointer asm("r7");
15 16
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 021d51217184..604f44f5dd16 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -7,6 +7,7 @@
7 */ 7 */
8#include <linux/clk.h> 8#include <linux/clk.h>
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/dw_dmac.h>
10#include <linux/fb.h> 11#include <linux/fb.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/platform_device.h> 13#include <linux/platform_device.h>
@@ -594,6 +595,17 @@ static void __init genclk_init_parent(struct clk *clk)
594 clk->parent = parent; 595 clk->parent = parent;
595} 596}
596 597
598static struct dw_dma_platform_data dw_dmac0_data = {
599 .nr_channels = 3,
600};
601
602static struct resource dw_dmac0_resource[] = {
603 PBMEM(0xff200000),
604 IRQ(2),
605};
606DEFINE_DEV_DATA(dw_dmac, 0);
607DEV_CLK(hclk, dw_dmac0, hsb, 10);
608
597/* -------------------------------------------------------------------- 609/* --------------------------------------------------------------------
598 * System peripherals 610 * System peripherals
599 * -------------------------------------------------------------------- */ 611 * -------------------------------------------------------------------- */
@@ -708,17 +720,6 @@ static struct clk pico_clk = {
708 .users = 1, 720 .users = 1,
709}; 721};
710 722
711static struct resource dmaca0_resource[] = {
712 {
713 .start = 0xff200000,
714 .end = 0xff20ffff,
715 .flags = IORESOURCE_MEM,
716 },
717 IRQ(2),
718};
719DEFINE_DEV(dmaca, 0);
720DEV_CLK(hclk, dmaca0, hsb, 10);
721
722/* -------------------------------------------------------------------- 723/* --------------------------------------------------------------------
723 * HMATRIX 724 * HMATRIX
724 * -------------------------------------------------------------------- */ 725 * -------------------------------------------------------------------- */
@@ -831,7 +832,7 @@ void __init at32_add_system_devices(void)
831 platform_device_register(&at32_eic0_device); 832 platform_device_register(&at32_eic0_device);
832 platform_device_register(&smc0_device); 833 platform_device_register(&smc0_device);
833 platform_device_register(&pdc_device); 834 platform_device_register(&pdc_device);
834 platform_device_register(&dmaca0_device); 835 platform_device_register(&dw_dmac0_device);
835 836
836 platform_device_register(&at32_tcb0_device); 837 platform_device_register(&at32_tcb0_device);
837 platform_device_register(&at32_tcb1_device); 838 platform_device_register(&at32_tcb1_device);
@@ -2032,7 +2033,7 @@ struct clk *at32_clock_list[] = {
2032 &smc0_mck, 2033 &smc0_mck,
2033 &pdc_hclk, 2034 &pdc_hclk,
2034 &pdc_pclk, 2035 &pdc_pclk,
2035 &dmaca0_hclk, 2036 &dw_dmac0_hclk,
2036 &pico_clk, 2037 &pico_clk,
2037 &pio0_mck, 2038 &pio0_mck,
2038 &pio1_mck, 2039 &pio1_mck,
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c
index 60da03ba7117..296294f8ed81 100644
--- a/arch/avr32/mach-at32ap/pio.c
+++ b/arch/avr32/mach-at32ap/pio.c
@@ -360,6 +360,8 @@ static int __init pio_probe(struct platform_device *pdev)
360 pio->chip.label = pio->name; 360 pio->chip.label = pio->name;
361 pio->chip.base = pdev->id * 32; 361 pio->chip.base = pdev->id * 32;
362 pio->chip.ngpio = 32; 362 pio->chip.ngpio = 32;
363 pio->chip.dev = &pdev->dev;
364 pio->chip.owner = THIS_MODULE;
363 365
364 pio->chip.direction_input = direction_input; 366 pio->chip.direction_input = direction_input;
365 pio->chip.get = gpio_get; 367 pio->chip.get = gpio_get;
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c
index 3f90a87527bb..3c85fdaa9487 100644
--- a/arch/avr32/mm/init.c
+++ b/arch/avr32/mm/init.c
@@ -119,8 +119,7 @@ void __init paging_init(void)
119 unsigned long zones_size[MAX_NR_ZONES]; 119 unsigned long zones_size[MAX_NR_ZONES];
120 unsigned long low, start_pfn; 120 unsigned long low, start_pfn;
121 121
122 start_pfn = pgdat->bdata->node_boot_start; 122 start_pfn = pgdat->bdata->node_min_pfn;
123 start_pfn >>= PAGE_SHIFT;
124 low = pgdat->bdata->node_low_pfn; 123 low = pgdat->bdata->node_low_pfn;
125 124
126 memset(zones_size, 0, sizeof(zones_size)); 125 memset(zones_size, 0, sizeof(zones_size));
@@ -129,7 +128,7 @@ void __init paging_init(void)
129 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", 128 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
130 nid, start_pfn, low); 129 nid, start_pfn, low);
131 130
132 free_area_init_node(nid, pgdat, zones_size, start_pfn, NULL); 131 free_area_init_node(nid, zones_size, start_pfn, NULL);
133 132
134 printk("Node %u: mem_map starts at %p\n", 133 printk("Node %u: mem_map starts at %p\n",
135 pgdat->node_id, pgdat->node_mem_map); 134 pgdat->node_id, pgdat->node_mem_map);
diff --git a/arch/avr32/mm/ioremap.c b/arch/avr32/mm/ioremap.c
index 3437c82434ac..f03b79f0e0ab 100644
--- a/arch/avr32/mm/ioremap.c
+++ b/arch/avr32/mm/ioremap.c
@@ -6,6 +6,7 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8#include <linux/vmalloc.h> 8#include <linux/vmalloc.h>
9#include <linux/mm.h>
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/io.h> 11#include <linux/io.h>
11 12
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 53c2cd255441..77800dd83e57 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -105,7 +105,7 @@ void cpu_idle(void)
105#endif 105#endif
106 if (!idle) 106 if (!idle)
107 idle = default_idle; 107 idle = default_idle;
108 tick_nohz_stop_sched_tick(); 108 tick_nohz_stop_sched_tick(1);
109 while (!need_resched()) 109 while (!need_resched())
110 idle(); 110 idle();
111 tick_nohz_restart_sched_tick(); 111 tick_nohz_restart_sched_tick();
diff --git a/arch/cris/arch-v10/boot/compressed/misc.c b/arch/cris/arch-v10/boot/compressed/misc.c
index 18e13bce1400..d933c89889db 100644
--- a/arch/cris/arch-v10/boot/compressed/misc.c
+++ b/arch/cris/arch-v10/boot/compressed/misc.c
@@ -102,50 +102,16 @@ extern char *input_data; /* lives in head.S */
102static long bytes_out = 0; 102static long bytes_out = 0;
103static uch *output_data; 103static uch *output_data;
104static unsigned long output_ptr = 0; 104static unsigned long output_ptr = 0;
105
106static void *malloc(int size);
107static void free(void *where);
108static void gzip_mark(void **);
109static void gzip_release(void **);
110
111static void puts(const char *); 105static void puts(const char *);
112 106
113/* the "heap" is put directly after the BSS ends, at end */ 107/* the "heap" is put directly after the BSS ends, at end */
114 108
115extern int _end; 109extern int _end;
116static long free_mem_ptr = (long)&_end; 110static long free_mem_ptr = (long)&_end;
111static long free_mem_end_ptr;
117 112
118#include "../../../../../lib/inflate.c" 113#include "../../../../../lib/inflate.c"
119 114
120static void *malloc(int size)
121{
122 void *p;
123
124 if (size < 0)
125 error("Malloc error");
126
127 free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
128
129 p = (void *)free_mem_ptr;
130 free_mem_ptr += size;
131
132 return p;
133}
134
135static void free(void *where)
136{ /* Don't care */
137}
138
139static void gzip_mark(void **ptr)
140{
141 *ptr = (void *) free_mem_ptr;
142}
143
144static void gzip_release(void **ptr)
145{
146 free_mem_ptr = (long) *ptr;
147}
148
149/* decompressor info and error messages to serial console */ 115/* decompressor info and error messages to serial console */
150 116
151static void 117static void
diff --git a/arch/cris/arch-v10/mm/init.c b/arch/cris/arch-v10/mm/init.c
index e0fcd1a9bfd5..742fd1974c2e 100644
--- a/arch/cris/arch-v10/mm/init.c
+++ b/arch/cris/arch-v10/mm/init.c
@@ -182,7 +182,7 @@ paging_init(void)
182 * mem_map page array. 182 * mem_map page array.
183 */ 183 */
184 184
185 free_area_init_node(0, &contig_page_data, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0); 185 free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
186} 186}
187 187
188/* Initialize remaps of some I/O-ports. It is important that this 188/* Initialize remaps of some I/O-ports. It is important that this
diff --git a/arch/cris/arch-v32/boot/compressed/misc.c b/arch/cris/arch-v32/boot/compressed/misc.c
index 55b2695c5d70..3595e16e82bc 100644
--- a/arch/cris/arch-v32/boot/compressed/misc.c
+++ b/arch/cris/arch-v32/boot/compressed/misc.c
@@ -89,20 +89,14 @@ static unsigned outcnt = 0; /* bytes in output buffer */
89 89
90static void flush_window(void); 90static void flush_window(void);
91static void error(char *m); 91static void error(char *m);
92static void gzip_mark(void **);
93static void gzip_release(void **);
94 92
95extern char *input_data; /* lives in head.S */ 93extern char *input_data; /* lives in head.S */
96 94
97static long bytes_out = 0; 95static long bytes_out;
98static uch *output_data; 96static uch *output_data;
99static unsigned long output_ptr = 0; 97static unsigned long output_ptr;
100 98
101static void *malloc(int size);
102static void free(void *where);
103static void error(char *m); 99static void error(char *m);
104static void gzip_mark(void **);
105static void gzip_release(void **);
106 100
107static void puts(const char *); 101static void puts(const char *);
108 102
@@ -110,37 +104,10 @@ static void puts(const char *);
110 104
111extern int _end; 105extern int _end;
112static long free_mem_ptr = (long)&_end; 106static long free_mem_ptr = (long)&_end;
107static long free_mem_end_ptr;
113 108
114#include "../../../../../lib/inflate.c" 109#include "../../../../../lib/inflate.c"
115 110
116static void *malloc(int size)
117{
118 void *p;
119
120 if (size <0) error("Malloc error");
121
122 free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
123
124 p = (void *)free_mem_ptr;
125 free_mem_ptr += size;
126
127 return p;
128}
129
130static void free(void *where)
131{ /* Don't care */
132}
133
134static void gzip_mark(void **ptr)
135{
136 *ptr = (void *) free_mem_ptr;
137}
138
139static void gzip_release(void **ptr)
140{
141 free_mem_ptr = (long) *ptr;
142}
143
144/* decompressor info and error messages to serial console */ 111/* decompressor info and error messages to serial console */
145 112
146static inline void 113static inline void
diff --git a/arch/cris/arch-v32/mm/init.c b/arch/cris/arch-v32/mm/init.c
index 5a9ac5834647..8a34b8b74293 100644
--- a/arch/cris/arch-v32/mm/init.c
+++ b/arch/cris/arch-v32/mm/init.c
@@ -162,7 +162,7 @@ paging_init(void)
162 * substantially higher than 0, like us (we start at PAGE_OFFSET). This 162 * substantially higher than 0, like us (we start at PAGE_OFFSET). This
163 * saves space in the mem_map page array. 163 * saves space in the mem_map page array.
164 */ 164 */
165 free_area_init_node(0, &contig_page_data, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0); 165 free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
166 166
167 mem_map = contig_page_data.node_mem_map; 167 mem_map = contig_page_data.node_mem_map;
168} 168}
diff --git a/arch/cris/kernel/profile.c b/arch/cris/kernel/profile.c
index 44f7b4f79476..9aa571169bcc 100644
--- a/arch/cris/kernel/profile.c
+++ b/arch/cris/kernel/profile.c
@@ -35,19 +35,16 @@ read_cris_profile(struct file *file, char __user *buf,
35 size_t count, loff_t *ppos) 35 size_t count, loff_t *ppos)
36{ 36{
37 unsigned long p = *ppos; 37 unsigned long p = *ppos;
38 ssize_t ret;
38 39
39 if (p > SAMPLE_BUFFER_SIZE) 40 ret = simple_read_from_buffer(buf, count, ppos, sample_buffer,
40 return 0; 41 SAMPLE_BUFFER_SIZE);
42 if (ret < 0)
43 return ret;
41 44
42 if (p + count > SAMPLE_BUFFER_SIZE) 45 memset(sample_buffer + p, 0, ret);
43 count = SAMPLE_BUFFER_SIZE - p;
44 if (copy_to_user(buf, sample_buffer + p,count))
45 return -EFAULT;
46 46
47 memset(sample_buffer + p, 0, count); 47 return ret;
48 *ppos += count;
49
50 return count;
51} 48}
52 49
53static ssize_t 50static ssize_t
diff --git a/arch/frv/kernel/pm.c b/arch/frv/kernel/pm.c
index 73f3aeefd203..d1113c5031f5 100644
--- a/arch/frv/kernel/pm.c
+++ b/arch/frv/kernel/pm.c
@@ -14,7 +14,6 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/pm.h> 16#include <linux/pm.h>
17#include <linux/pm_legacy.h>
18#include <linux/sched.h> 17#include <linux/sched.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
20#include <linux/sysctl.h> 19#include <linux/sysctl.h>
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 085dc6ec152b..396ab059efa3 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -203,20 +203,6 @@ config UNIX98_PTYS
203 Read the instructions in <file:Documentation/Changes> pertaining to 203 Read the instructions in <file:Documentation/Changes> pertaining to
204 pseudo terminals. It's safe to say N. 204 pseudo terminals. It's safe to say N.
205 205
206config UNIX98_PTY_COUNT
207 int "Maximum number of Unix98 PTYs in use (0-2048)"
208 depends on UNIX98_PTYS
209 default "256"
210 help
211 The maximum number of Unix98 PTYs that can be used at any one time.
212 The default is 256, and should be enough for desktop systems. Server
213 machines which support incoming telnet/rlogin/ssh connections and/or
214 serve several X terminals may want to increase this: every incoming
215 connection and every xterm uses up one PTY.
216
217 When not in use, each additional set of 256 PTYs occupy
218 approximately 8 KB of kernel memory on 32-bit architectures.
219
220source "drivers/char/pcmcia/Kconfig" 206source "drivers/char/pcmcia/Kconfig"
221 207
222source "drivers/serial/Kconfig" 208source "drivers/serial/Kconfig"
diff --git a/arch/h8300/boot/compressed/misc.c b/arch/h8300/boot/compressed/misc.c
index 845074588af0..51ab6cbd030f 100644
--- a/arch/h8300/boot/compressed/misc.c
+++ b/arch/h8300/boot/compressed/misc.c
@@ -67,8 +67,6 @@ static unsigned outcnt = 0; /* bytes in output buffer */
67static int fill_inbuf(void); 67static int fill_inbuf(void);
68static void flush_window(void); 68static void flush_window(void);
69static void error(char *m); 69static void error(char *m);
70static void gzip_mark(void **);
71static void gzip_release(void **);
72 70
73extern char input_data[]; 71extern char input_data[];
74extern int input_len; 72extern int input_len;
@@ -77,11 +75,7 @@ static long bytes_out = 0;
77static uch *output_data; 75static uch *output_data;
78static unsigned long output_ptr = 0; 76static unsigned long output_ptr = 0;
79 77
80static void *malloc(int size);
81static void free(void *where);
82static void error(char *m); 78static void error(char *m);
83static void gzip_mark(void **);
84static void gzip_release(void **);
85 79
86int puts(const char *); 80int puts(const char *);
87 81
@@ -98,38 +92,6 @@ static unsigned long free_mem_end_ptr;
98#define TDR *((volatile unsigned char *)0xffff8b) 92#define TDR *((volatile unsigned char *)0xffff8b)
99#define SSR *((volatile unsigned char *)0xffff8c) 93#define SSR *((volatile unsigned char *)0xffff8c)
100 94
101static void *malloc(int size)
102{
103 void *p;
104
105 if (size <0) error("Malloc error");
106 if (free_mem_ptr == 0) error("Memory error");
107
108 free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
109
110 p = (void *)free_mem_ptr;
111 free_mem_ptr += size;
112
113 if (free_mem_ptr >= free_mem_end_ptr)
114 error("Out of memory");
115
116 return p;
117}
118
119static void free(void *where)
120{ /* Don't care */
121}
122
123static void gzip_mark(void **ptr)
124{
125 *ptr = (void *) free_mem_ptr;
126}
127
128static void gzip_release(void **ptr)
129{
130 free_mem_ptr = (long) *ptr;
131}
132
133int puts(const char *s) 95int puts(const char *s)
134{ 96{
135 return 0; 97 return 0;
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index b1f25c20a5db..7fda657110eb 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -20,6 +20,7 @@
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/mm.h>
23#include <linux/fs.h> 24#include <linux/fs.h>
24#include <linux/fb.h> 25#include <linux/fb.h>
25#include <linux/console.h> 26#include <linux/console.h>
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 7e028ceb93ba..465116aecb85 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -1139,7 +1139,7 @@ sys32_pipe (int __user *fd)
1139 int retval; 1139 int retval;
1140 int fds[2]; 1140 int fds[2];
1141 1141
1142 retval = do_pipe(fds); 1142 retval = do_pipe_flags(fds, 0);
1143 if (retval) 1143 if (retval)
1144 goto out; 1144 goto out;
1145 if (copy_to_user(fd, fds, sizeof(fds))) 1145 if (copy_to_user(fd, fds, sizeof(fds)))
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 56ab156c48ae..0dd6c1419d8d 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1691,6 +1691,12 @@ sys_call_table:
1691 data8 sys_timerfd_create // 1310 1691 data8 sys_timerfd_create // 1310
1692 data8 sys_timerfd_settime 1692 data8 sys_timerfd_settime
1693 data8 sys_timerfd_gettime 1693 data8 sys_timerfd_gettime
1694 data8 sys_signalfd4
1695 data8 sys_eventfd2
1696 data8 sys_epoll_create1 // 1315
1697 data8 sys_dup3
1698 data8 sys_pipe2
1699 data8 sys_inotify_init1
1694 1700
1695 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1701 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1696#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ 1702#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 233434f4f88f..f07688da947c 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -429,8 +429,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
429 ((struct fnptr *)kretprobe_trampoline)->ip; 429 ((struct fnptr *)kretprobe_trampoline)->ip;
430 430
431 INIT_HLIST_HEAD(&empty_rp); 431 INIT_HLIST_HEAD(&empty_rp);
432 spin_lock_irqsave(&kretprobe_lock, flags); 432 kretprobe_hash_lock(current, &head, &flags);
433 head = kretprobe_inst_table_head(current);
434 433
435 /* 434 /*
436 * It is possible to have multiple instances associated with a given 435 * It is possible to have multiple instances associated with a given
@@ -485,7 +484,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
485 kretprobe_assert(ri, orig_ret_address, trampoline_address); 484 kretprobe_assert(ri, orig_ret_address, trampoline_address);
486 485
487 reset_current_kprobe(); 486 reset_current_kprobe();
488 spin_unlock_irqrestore(&kretprobe_lock, flags); 487 kretprobe_hash_unlock(current, &flags);
489 preempt_enable_no_resched(); 488 preempt_enable_no_resched();
490 489
491 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 490 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
@@ -500,7 +499,6 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
500 return 1; 499 return 1;
501} 500}
502 501
503/* Called with kretprobe_lock held */
504void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 502void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
505 struct pt_regs *regs) 503 struct pt_regs *regs)
506{ 504{
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index 1eda194b9559..bcbb6d8792d3 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -160,7 +160,7 @@ sys_pipe (void)
160 int fd[2]; 160 int fd[2];
161 int retval; 161 int retval;
162 162
163 retval = do_pipe(fd); 163 retval = do_pipe_flags(fd, 0);
164 if (retval) 164 if (retval)
165 goto out; 165 goto out;
166 retval = fd[0]; 166 retval = fd[0];
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 544dc420c65e..d83125e1ed27 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -36,7 +36,6 @@ struct early_node_data {
36 struct ia64_node_data *node_data; 36 struct ia64_node_data *node_data;
37 unsigned long pernode_addr; 37 unsigned long pernode_addr;
38 unsigned long pernode_size; 38 unsigned long pernode_size;
39 struct bootmem_data bootmem_data;
40 unsigned long num_physpages; 39 unsigned long num_physpages;
41#ifdef CONFIG_ZONE_DMA 40#ifdef CONFIG_ZONE_DMA
42 unsigned long num_dma_physpages; 41 unsigned long num_dma_physpages;
@@ -75,17 +74,17 @@ pg_data_t *pgdat_list[MAX_NUMNODES];
75static int __init build_node_maps(unsigned long start, unsigned long len, 74static int __init build_node_maps(unsigned long start, unsigned long len,
76 int node) 75 int node)
77{ 76{
78 unsigned long cstart, epfn, end = start + len; 77 unsigned long spfn, epfn, end = start + len;
79 struct bootmem_data *bdp = &mem_data[node].bootmem_data; 78 struct bootmem_data *bdp = &bootmem_node_data[node];
80 79
81 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT; 80 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
82 cstart = GRANULEROUNDDOWN(start); 81 spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT;
83 82
84 if (!bdp->node_low_pfn) { 83 if (!bdp->node_low_pfn) {
85 bdp->node_boot_start = cstart; 84 bdp->node_min_pfn = spfn;
86 bdp->node_low_pfn = epfn; 85 bdp->node_low_pfn = epfn;
87 } else { 86 } else {
88 bdp->node_boot_start = min(cstart, bdp->node_boot_start); 87 bdp->node_min_pfn = min(spfn, bdp->node_min_pfn);
89 bdp->node_low_pfn = max(epfn, bdp->node_low_pfn); 88 bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
90 } 89 }
91 90
@@ -167,7 +166,7 @@ static void __init fill_pernode(int node, unsigned long pernode,
167{ 166{
168 void *cpu_data; 167 void *cpu_data;
169 int cpus = early_nr_cpus_node(node); 168 int cpus = early_nr_cpus_node(node);
170 struct bootmem_data *bdp = &mem_data[node].bootmem_data; 169 struct bootmem_data *bdp = &bootmem_node_data[node];
171 170
172 mem_data[node].pernode_addr = pernode; 171 mem_data[node].pernode_addr = pernode;
173 mem_data[node].pernode_size = pernodesize; 172 mem_data[node].pernode_size = pernodesize;
@@ -222,20 +221,21 @@ static void __init fill_pernode(int node, unsigned long pernode,
222static int __init find_pernode_space(unsigned long start, unsigned long len, 221static int __init find_pernode_space(unsigned long start, unsigned long len,
223 int node) 222 int node)
224{ 223{
225 unsigned long epfn; 224 unsigned long spfn, epfn;
226 unsigned long pernodesize = 0, pernode, pages, mapsize; 225 unsigned long pernodesize = 0, pernode, pages, mapsize;
227 struct bootmem_data *bdp = &mem_data[node].bootmem_data; 226 struct bootmem_data *bdp = &bootmem_node_data[node];
228 227
228 spfn = start >> PAGE_SHIFT;
229 epfn = (start + len) >> PAGE_SHIFT; 229 epfn = (start + len) >> PAGE_SHIFT;
230 230
231 pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT); 231 pages = bdp->node_low_pfn - bdp->node_min_pfn;
232 mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT; 232 mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
233 233
234 /* 234 /*
235 * Make sure this memory falls within this node's usable memory 235 * Make sure this memory falls within this node's usable memory
236 * since we may have thrown some away in build_maps(). 236 * since we may have thrown some away in build_maps().
237 */ 237 */
238 if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn) 238 if (spfn < bdp->node_min_pfn || epfn > bdp->node_low_pfn)
239 return 0; 239 return 0;
240 240
241 /* Don't setup this node's local space twice... */ 241 /* Don't setup this node's local space twice... */
@@ -297,7 +297,7 @@ static void __init reserve_pernode_space(void)
297 bdp = pdp->bdata; 297 bdp = pdp->bdata;
298 298
299 /* First the bootmem_map itself */ 299 /* First the bootmem_map itself */
300 pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT); 300 pages = bdp->node_low_pfn - bdp->node_min_pfn;
301 size = bootmem_bootmap_pages(pages) << PAGE_SHIFT; 301 size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
302 base = __pa(bdp->node_bootmem_map); 302 base = __pa(bdp->node_bootmem_map);
303 reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT); 303 reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
@@ -440,7 +440,7 @@ void __init find_memory(void)
440 efi_memmap_walk(find_max_min_low_pfn, NULL); 440 efi_memmap_walk(find_max_min_low_pfn, NULL);
441 441
442 for_each_online_node(node) 442 for_each_online_node(node)
443 if (mem_data[node].bootmem_data.node_low_pfn) { 443 if (bootmem_node_data[node].node_low_pfn) {
444 node_clear(node, memory_less_mask); 444 node_clear(node, memory_less_mask);
445 mem_data[node].min_pfn = ~0UL; 445 mem_data[node].min_pfn = ~0UL;
446 } 446 }
@@ -460,14 +460,14 @@ void __init find_memory(void)
460 else if (node_isset(node, memory_less_mask)) 460 else if (node_isset(node, memory_less_mask))
461 continue; 461 continue;
462 462
463 bdp = &mem_data[node].bootmem_data; 463 bdp = &bootmem_node_data[node];
464 pernode = mem_data[node].pernode_addr; 464 pernode = mem_data[node].pernode_addr;
465 pernodesize = mem_data[node].pernode_size; 465 pernodesize = mem_data[node].pernode_size;
466 map = pernode + pernodesize; 466 map = pernode + pernodesize;
467 467
468 init_bootmem_node(pgdat_list[node], 468 init_bootmem_node(pgdat_list[node],
469 map>>PAGE_SHIFT, 469 map>>PAGE_SHIFT,
470 bdp->node_boot_start>>PAGE_SHIFT, 470 bdp->node_min_pfn,
471 bdp->node_low_pfn); 471 bdp->node_low_pfn);
472 } 472 }
473 473
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index d3ce8f3bcaa6..c45fc7f5a979 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -24,7 +24,7 @@
24unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT; 24unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
25 25
26pte_t * 26pte_t *
27huge_pte_alloc (struct mm_struct *mm, unsigned long addr) 27huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
28{ 28{
29 unsigned long taddr = htlbpage_to_page(addr); 29 unsigned long taddr = htlbpage_to_page(addr);
30 pgd_t *pgd; 30 pgd_t *pgd;
@@ -75,7 +75,8 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
75 * Don't actually need to do any preparation, but need to make sure 75 * Don't actually need to do any preparation, but need to make sure
76 * the address is in the right region. 76 * the address is in the right region.
77 */ 77 */
78int prepare_hugepage_range(unsigned long addr, unsigned long len) 78int prepare_hugepage_range(struct file *file,
79 unsigned long addr, unsigned long len)
79{ 80{
80 if (len & ~HPAGE_MASK) 81 if (len & ~HPAGE_MASK)
81 return -EINVAL; 82 return -EINVAL;
@@ -106,13 +107,19 @@ int pmd_huge(pmd_t pmd)
106{ 107{
107 return 0; 108 return 0;
108} 109}
110
111int pud_huge(pud_t pud)
112{
113 return 0;
114}
115
109struct page * 116struct page *
110follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) 117follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
111{ 118{
112 return NULL; 119 return NULL;
113} 120}
114 121
115void hugetlb_free_pgd_range(struct mmu_gather **tlb, 122void hugetlb_free_pgd_range(struct mmu_gather *tlb,
116 unsigned long addr, unsigned long end, 123 unsigned long addr, unsigned long end,
117 unsigned long floor, unsigned long ceiling) 124 unsigned long floor, unsigned long ceiling)
118{ 125{
@@ -149,7 +156,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
149 156
150 /* Handle MAP_FIXED */ 157 /* Handle MAP_FIXED */
151 if (flags & MAP_FIXED) { 158 if (flags & MAP_FIXED) {
152 if (prepare_hugepage_range(addr, len)) 159 if (prepare_hugepage_range(file, addr, len))
153 return -EINVAL; 160 return -EINVAL;
154 return addr; 161 return addr;
155 } 162 }
diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c
index 600d40e33495..d394292498c0 100644
--- a/arch/m32r/boot/compressed/misc.c
+++ b/arch/m32r/boot/compressed/misc.c
@@ -70,8 +70,6 @@ static unsigned outcnt = 0; /* bytes in output buffer */
70static int fill_inbuf(void); 70static int fill_inbuf(void);
71static void flush_window(void); 71static void flush_window(void);
72static void error(char *m); 72static void error(char *m);
73static void gzip_mark(void **);
74static void gzip_release(void **);
75 73
76static unsigned char *input_data; 74static unsigned char *input_data;
77static int input_len; 75static int input_len;
@@ -82,9 +80,6 @@ static unsigned long output_ptr = 0;
82 80
83#include "m32r_sio.c" 81#include "m32r_sio.c"
84 82
85static void *malloc(int size);
86static void free(void *where);
87
88static unsigned long free_mem_ptr; 83static unsigned long free_mem_ptr;
89static unsigned long free_mem_end_ptr; 84static unsigned long free_mem_end_ptr;
90 85
@@ -92,38 +87,6 @@ static unsigned long free_mem_end_ptr;
92 87
93#include "../../../../lib/inflate.c" 88#include "../../../../lib/inflate.c"
94 89
95static void *malloc(int size)
96{
97 void *p;
98
99 if (size <0) error("Malloc error");
100 if (free_mem_ptr == 0) error("Memory error");
101
102 free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
103
104 p = (void *)free_mem_ptr;
105 free_mem_ptr += size;
106
107 if (free_mem_ptr >= free_mem_end_ptr)
108 error("Out of memory");
109
110 return p;
111}
112
113static void free(void *where)
114{ /* Don't care */
115}
116
117static void gzip_mark(void **ptr)
118{
119 *ptr = (void *) free_mem_ptr;
120}
121
122static void gzip_release(void **ptr)
123{
124 free_mem_ptr = (long) *ptr;
125}
126
127void* memset(void* s, int c, size_t n) 90void* memset(void* s, int c, size_t n)
128{ 91{
129 int i; 92 int i;
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c
index 07c1af7dc0e2..cbc3c4c54566 100644
--- a/arch/m32r/mm/discontig.c
+++ b/arch/m32r/mm/discontig.c
@@ -20,7 +20,6 @@ extern char _end[];
20 20
21struct pglist_data *node_data[MAX_NUMNODES]; 21struct pglist_data *node_data[MAX_NUMNODES];
22EXPORT_SYMBOL(node_data); 22EXPORT_SYMBOL(node_data);
23static bootmem_data_t node_bdata[MAX_NUMNODES] __initdata;
24 23
25pg_data_t m32r_node_data[MAX_NUMNODES]; 24pg_data_t m32r_node_data[MAX_NUMNODES];
26 25
@@ -81,7 +80,7 @@ unsigned long __init setup_memory(void)
81 for_each_online_node(nid) { 80 for_each_online_node(nid) {
82 mp = &mem_prof[nid]; 81 mp = &mem_prof[nid];
83 NODE_DATA(nid)=(pg_data_t *)&m32r_node_data[nid]; 82 NODE_DATA(nid)=(pg_data_t *)&m32r_node_data[nid];
84 NODE_DATA(nid)->bdata = &node_bdata[nid]; 83 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
85 min_pfn = mp->start_pfn; 84 min_pfn = mp->start_pfn;
86 max_pfn = mp->start_pfn + mp->pages; 85 max_pfn = mp->start_pfn + mp->pages;
87 bootmap_size = init_bootmem_node(NODE_DATA(nid), mp->free_pfn, 86 bootmap_size = init_bootmem_node(NODE_DATA(nid), mp->free_pfn,
@@ -124,8 +123,7 @@ unsigned long __init setup_memory(void)
124 return max_low_pfn; 123 return max_low_pfn;
125} 124}
126 125
127#define START_PFN(nid) \ 126#define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn)
128 (NODE_DATA(nid)->bdata->node_boot_start >> PAGE_SHIFT)
129#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) 127#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn)
130 128
131unsigned long __init zone_sizes_init(void) 129unsigned long __init zone_sizes_init(void)
@@ -148,8 +146,7 @@ unsigned long __init zone_sizes_init(void)
148 zholes_size[ZONE_DMA] = mp->holes; 146 zholes_size[ZONE_DMA] = mp->holes;
149 holes += zholes_size[ZONE_DMA]; 147 holes += zholes_size[ZONE_DMA];
150 148
151 free_area_init_node(nid, NODE_DATA(nid), zones_size, 149 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
152 start_pfn, zholes_size);
153 } 150 }
154 151
155 /* 152 /*
@@ -163,4 +160,3 @@ unsigned long __init zone_sizes_init(void)
163 160
164 return holes; 161 return holes;
165} 162}
166
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index bbd97c85bc5d..2554eb59cfef 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -93,8 +93,7 @@ void free_initrd_mem(unsigned long, unsigned long);
93#endif 93#endif
94 94
95/* It'd be good if these lines were in the standard header file. */ 95/* It'd be good if these lines were in the standard header file. */
96#define START_PFN(nid) \ 96#define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn)
97 (NODE_DATA(nid)->bdata->node_boot_start >> PAGE_SHIFT)
98#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) 97#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn)
99 98
100#ifndef CONFIG_DISCONTIGMEM 99#ifndef CONFIG_DISCONTIGMEM
@@ -123,7 +122,7 @@ unsigned long __init zone_sizes_init(void)
123 start_pfn = __MEMORY_START >> PAGE_SHIFT; 122 start_pfn = __MEMORY_START >> PAGE_SHIFT;
124#endif /* CONFIG_MMU */ 123#endif /* CONFIG_MMU */
125 124
126 free_area_init_node(0, NODE_DATA(0), zones_size, start_pfn, 0); 125 free_area_init_node(0, zones_size, start_pfn, 0);
127 126
128 return 0; 127 return 0;
129} 128}
@@ -252,4 +251,3 @@ void free_initrd_mem(unsigned long start, unsigned long end)
252 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 251 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
253} 252}
254#endif 253#endif
255
diff --git a/arch/m68k/amiga/chipram.c b/arch/m68k/amiga/chipram.c
index cbe36538af47..61df1d33c050 100644
--- a/arch/m68k/amiga/chipram.c
+++ b/arch/m68k/amiga/chipram.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/mm.h>
12#include <linux/init.h> 13#include <linux/init.h>
13#include <linux/ioport.h> 14#include <linux/ioport.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index d8fb9c5303cc..79f5f94d4800 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -32,8 +32,6 @@
32 32
33DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 33DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
34 34
35static bootmem_data_t __initdata bootmem_data[MAX_NUMNODES];
36
37pg_data_t pg_data_map[MAX_NUMNODES]; 35pg_data_t pg_data_map[MAX_NUMNODES];
38EXPORT_SYMBOL(pg_data_map); 36EXPORT_SYMBOL(pg_data_map);
39 37
@@ -58,7 +56,7 @@ void __init m68k_setup_node(int node)
58 pg_data_table[i] = pg_data_map + node; 56 pg_data_table[i] = pg_data_map + node;
59 } 57 }
60#endif 58#endif
61 pg_data_map[node].bdata = bootmem_data + node; 59 pg_data_map[node].bdata = bootmem_node_data + node;
62 node_set_online(node); 60 node_set_online(node);
63} 61}
64 62
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 226795bdf355..c5dbb9bdb322 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -296,7 +296,7 @@ void __init paging_init(void)
296#endif 296#endif
297 for (i = 0; i < m68k_num_memory; i++) { 297 for (i = 0; i < m68k_num_memory; i++) {
298 zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; 298 zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
299 free_area_init_node(i, pg_data_map + i, zones_size, 299 free_area_init_node(i, zones_size,
300 m68k_memory[i].addr >> PAGE_SHIFT, NULL); 300 m68k_memory[i].addr >> PAGE_SHIFT, NULL);
301 } 301 }
302} 302}
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index edceefc18870..1b902dbd4376 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -94,7 +94,7 @@ void __init paging_init(void)
94 94
95 /* I really wish I knew why the following change made things better... -- Sam */ 95 /* I really wish I knew why the following change made things better... -- Sam */
96/* free_area_init(zones_size); */ 96/* free_area_init(zones_size); */
97 free_area_init_node(0, NODE_DATA(0), zones_size, 97 free_area_init_node(0, zones_size,
98 (__pa(PAGE_OFFSET) >> PAGE_SHIFT) + 1, NULL); 98 (__pa(PAGE_OFFSET) >> PAGE_SHIFT) + 1, NULL);
99 99
100 100
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 8e8441587c22..2e7515e8db98 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -58,10 +58,18 @@ config GENERIC_TIME
58 bool 58 bool
59 default y 59 default y
60 60
61config GENERIC_CMOS_UPDATE
62 bool
63 default y
64
61config TIME_LOW_RES 65config TIME_LOW_RES
62 bool 66 bool
63 default y 67 default y
64 68
69config GENERIC_CLOCKEVENTS
70 bool
71 default n
72
65config NO_IOPORT 73config NO_IOPORT
66 def_bool y 74 def_bool y
67 75
@@ -108,11 +116,13 @@ config M5206e
108 116
109config M520x 117config M520x
110 bool "MCF520x" 118 bool "MCF520x"
119 select GENERIC_CLOCKEVENTS
111 help 120 help
112 Freescale Coldfire 5207/5208 processor support. 121 Freescale Coldfire 5207/5208 processor support.
113 122
114config M523x 123config M523x
115 bool "MCF523x" 124 bool "MCF523x"
125 select GENERIC_CLOCKEVENTS
116 help 126 help
117 Freescale Coldfire 5230/1/2/4/5 processor support 127 Freescale Coldfire 5230/1/2/4/5 processor support
118 128
@@ -138,6 +148,7 @@ config M5275
138 148
139config M528x 149config M528x
140 bool "MCF528x" 150 bool "MCF528x"
151 select GENERIC_CLOCKEVENTS
141 help 152 help
142 Motorola ColdFire 5280/5282 processor support. 153 Motorola ColdFire 5280/5282 processor support.
143 154
@@ -161,6 +172,7 @@ endchoice
161config M527x 172config M527x
162 bool 173 bool
163 depends on (M5271 || M5275) 174 depends on (M5271 || M5275)
175 select GENERIC_CLOCKEVENTS
164 default y 176 default y
165 177
166config COLDFIRE 178config COLDFIRE
@@ -674,6 +686,9 @@ endchoice
674if COLDFIRE 686if COLDFIRE
675source "kernel/Kconfig.preempt" 687source "kernel/Kconfig.preempt"
676endif 688endif
689
690source "kernel/time/Kconfig"
691
677source "mm/Kconfig" 692source "mm/Kconfig"
678 693
679endmenu 694endmenu
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile
index e0b5f62e395c..b63bbcf874ff 100644
--- a/arch/m68knommu/Makefile
+++ b/arch/m68knommu/Makefile
@@ -8,6 +8,8 @@
8# (C) Copyright 2002, Greg Ungerer <gerg@snapgear.com> 8# (C) Copyright 2002, Greg Ungerer <gerg@snapgear.com>
9# 9#
10 10
11KBUILD_DEFCONFIG := m5208evb_defconfig
12
11platform-$(CONFIG_M68328) := 68328 13platform-$(CONFIG_M68328) := 68328
12platform-$(CONFIG_M68EZ328) := 68EZ328 14platform-$(CONFIG_M68EZ328) := 68EZ328
13platform-$(CONFIG_M68VZ328) := 68VZ328 15platform-$(CONFIG_M68VZ328) := 68VZ328
@@ -90,13 +92,14 @@ export PLATFORM BOARD MODEL CPUCLASS
90cflags-$(CONFIG_M5206) := -m5200 92cflags-$(CONFIG_M5206) := -m5200
91cflags-$(CONFIG_M5206e) := -m5200 93cflags-$(CONFIG_M5206e) := -m5200
92cflags-$(CONFIG_M520x) := -m5307 94cflags-$(CONFIG_M520x) := -m5307
93cflags-$(CONFIG_M523x) := -m5307 95cflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307)
94cflags-$(CONFIG_M5249) := -m5200 96cflags-$(CONFIG_M5249) := -m5200
95cflags-$(CONFIG_M527x) := -m5307 97cflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307)
96cflags-$(CONFIG_M5272) := -m5307 98cflags-$(CONFIG_M5272) := -m5307
97cflags-$(CONFIG_M528x) := -m5307 99cflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307)
100cflags-$(CONFIG_M528x) := $(call cc-option,-m528x,-m5307)
98cflags-$(CONFIG_M5307) := -m5307 101cflags-$(CONFIG_M5307) := -m5307
99cflags-$(CONFIG_M532x) := -m5307 102cflags-$(CONFIG_M532x) := $(call cc-option,-mcpu=532x,-m5307)
100cflags-$(CONFIG_M5407) := -m5200 103cflags-$(CONFIG_M5407) := -m5200
101cflags-$(CONFIG_M68328) := -m68000 104cflags-$(CONFIG_M68328) := -m68000
102cflags-$(CONFIG_M68EZ328) := -m68000 105cflags-$(CONFIG_M68EZ328) := -m68000
diff --git a/arch/m68knommu/configs/m5208evb_defconfig b/arch/m68knommu/configs/m5208evb_defconfig
new file mode 100644
index 000000000000..6fae33a05e2a
--- /dev/null
+++ b/arch/m68knommu/configs/m5208evb_defconfig
@@ -0,0 +1,610 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc1
4#
5CONFIG_M68K=y
6# CONFIG_MMU is not set
7# CONFIG_FPU is not set
8CONFIG_ZONE_DMA=y
9CONFIG_RWSEM_GENERIC_SPINLOCK=y
10# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
11# CONFIG_ARCH_HAS_ILOG2_U32 is not set
12# CONFIG_ARCH_HAS_ILOG2_U64 is not set
13CONFIG_GENERIC_FIND_NEXT_BIT=y
14CONFIG_GENERIC_HWEIGHT=y
15CONFIG_GENERIC_HARDIRQS=y
16CONFIG_GENERIC_CALIBRATE_DELAY=y
17CONFIG_GENERIC_TIME=y
18CONFIG_TIME_LOW_RES=y
19CONFIG_NO_IOPORT=y
20CONFIG_ARCH_SUPPORTS_AOUT=y
21CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
22
23#
24# General setup
25#
26CONFIG_EXPERIMENTAL=y
27CONFIG_BROKEN_ON_SMP=y
28CONFIG_INIT_ENV_ARG_LIMIT=32
29CONFIG_LOCALVERSION=""
30CONFIG_LOCALVERSION_AUTO=y
31# CONFIG_SYSVIPC is not set
32# CONFIG_POSIX_MQUEUE is not set
33# CONFIG_BSD_PROCESS_ACCT is not set
34# CONFIG_TASKSTATS is not set
35# CONFIG_AUDIT is not set
36# CONFIG_IKCONFIG is not set
37CONFIG_LOG_BUF_SHIFT=14
38# CONFIG_CGROUPS is not set
39# CONFIG_GROUP_SCHED is not set
40# CONFIG_RELAY is not set
41# CONFIG_NAMESPACES is not set
42# CONFIG_BLK_DEV_INITRD is not set
43# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
44CONFIG_SYSCTL=y
45CONFIG_EMBEDDED=y
46# CONFIG_UID16 is not set
47# CONFIG_SYSCTL_SYSCALL is not set
48# CONFIG_KALLSYMS is not set
49# CONFIG_HOTPLUG is not set
50CONFIG_PRINTK=y
51CONFIG_BUG=y
52CONFIG_ELF_CORE=y
53# CONFIG_COMPAT_BRK is not set
54CONFIG_BASE_FULL=y
55# CONFIG_FUTEX is not set
56# CONFIG_EPOLL is not set
57# CONFIG_SIGNALFD is not set
58# CONFIG_TIMERFD is not set
59# CONFIG_EVENTFD is not set
60# CONFIG_VM_EVENT_COUNTERS is not set
61CONFIG_SLAB=y
62# CONFIG_SLUB is not set
63# CONFIG_SLOB is not set
64# CONFIG_PROFILING is not set
65# CONFIG_MARKERS is not set
66# CONFIG_HAVE_OPROFILE is not set
67# CONFIG_HAVE_KPROBES is not set
68# CONFIG_HAVE_KRETPROBES is not set
69# CONFIG_HAVE_DMA_ATTRS is not set
70CONFIG_SLABINFO=y
71CONFIG_TINY_SHMEM=y
72CONFIG_BASE_SMALL=0
73CONFIG_MODULES=y
74CONFIG_MODULE_UNLOAD=y
75# CONFIG_MODULE_FORCE_UNLOAD is not set
76# CONFIG_MODVERSIONS is not set
77# CONFIG_MODULE_SRCVERSION_ALL is not set
78# CONFIG_KMOD is not set
79CONFIG_BLOCK=y
80# CONFIG_LBD is not set
81# CONFIG_LSF is not set
82# CONFIG_BLK_DEV_BSG is not set
83
84#
85# IO Schedulers
86#
87CONFIG_IOSCHED_NOOP=y
88# CONFIG_IOSCHED_AS is not set
89# CONFIG_IOSCHED_DEADLINE is not set
90# CONFIG_IOSCHED_CFQ is not set
91# CONFIG_DEFAULT_AS is not set
92# CONFIG_DEFAULT_DEADLINE is not set
93# CONFIG_DEFAULT_CFQ is not set
94CONFIG_DEFAULT_NOOP=y
95CONFIG_DEFAULT_IOSCHED="noop"
96CONFIG_CLASSIC_RCU=y
97
98#
99# Processor type and features
100#
101# CONFIG_M68328 is not set
102# CONFIG_M68EZ328 is not set
103# CONFIG_M68VZ328 is not set
104# CONFIG_M68360 is not set
105# CONFIG_M5206 is not set
106# CONFIG_M5206e is not set
107CONFIG_M520x=y
108# CONFIG_M523x is not set
109# CONFIG_M5249 is not set
110# CONFIG_M5271 is not set
111# CONFIG_M5272 is not set
112# CONFIG_M5275 is not set
113# CONFIG_M528x is not set
114# CONFIG_M5307 is not set
115# CONFIG_M532x is not set
116# CONFIG_M5407 is not set
117CONFIG_COLDFIRE=y
118CONFIG_CLOCK_SET=y
119CONFIG_CLOCK_FREQ=166666666
120CONFIG_CLOCK_DIV=2
121
122#
123# Platform
124#
125CONFIG_M5208EVB=y
126CONFIG_FREESCALE=y
127# CONFIG_4KSTACKS is not set
128CONFIG_HZ=100
129
130#
131# RAM configuration
132#
133CONFIG_RAMBASE=0x40000000
134CONFIG_RAMSIZE=0x2000000
135CONFIG_VECTORBASE=0x40000000
136CONFIG_KERNELBASE=0x40020000
137# CONFIG_RAMAUTOBIT is not set
138# CONFIG_RAM8BIT is not set
139CONFIG_RAM16BIT=y
140# CONFIG_RAM32BIT is not set
141
142#
143# ROM configuration
144#
145# CONFIG_ROM is not set
146CONFIG_RAMKERNEL=y
147# CONFIG_ROMKERNEL is not set
148CONFIG_SELECT_MEMORY_MODEL=y
149CONFIG_FLATMEM_MANUAL=y
150# CONFIG_DISCONTIGMEM_MANUAL is not set
151# CONFIG_SPARSEMEM_MANUAL is not set
152CONFIG_FLATMEM=y
153CONFIG_FLAT_NODE_MEM_MAP=y
154# CONFIG_SPARSEMEM_STATIC is not set
155# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
156CONFIG_PAGEFLAGS_EXTENDED=y
157CONFIG_SPLIT_PTLOCK_CPUS=4
158# CONFIG_RESOURCES_64BIT is not set
159CONFIG_ZONE_DMA_FLAG=1
160CONFIG_VIRT_TO_BUS=y
161CONFIG_ISA_DMA_API=y
162
163#
164# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
165#
166# CONFIG_PCI is not set
167# CONFIG_ARCH_SUPPORTS_MSI is not set
168
169#
170# Executable file formats
171#
172CONFIG_BINFMT_FLAT=y
173# CONFIG_BINFMT_ZFLAT is not set
174# CONFIG_BINFMT_SHARED_FLAT is not set
175# CONFIG_BINFMT_AOUT is not set
176# CONFIG_BINFMT_MISC is not set
177
178#
179# Power management options
180#
181# CONFIG_PM is not set
182
183#
184# Networking
185#
186CONFIG_NET=y
187
188#
189# Networking options
190#
191CONFIG_PACKET=y
192# CONFIG_PACKET_MMAP is not set
193CONFIG_UNIX=y
194# CONFIG_NET_KEY is not set
195CONFIG_INET=y
196# CONFIG_IP_MULTICAST is not set
197# CONFIG_IP_ADVANCED_ROUTER is not set
198CONFIG_IP_FIB_HASH=y
199# CONFIG_IP_PNP is not set
200# CONFIG_NET_IPIP is not set
201# CONFIG_NET_IPGRE is not set
202# CONFIG_ARPD is not set
203# CONFIG_SYN_COOKIES is not set
204# CONFIG_INET_AH is not set
205# CONFIG_INET_ESP is not set
206# CONFIG_INET_IPCOMP is not set
207# CONFIG_INET_XFRM_TUNNEL is not set
208# CONFIG_INET_TUNNEL is not set
209# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
210# CONFIG_INET_XFRM_MODE_TUNNEL is not set
211# CONFIG_INET_XFRM_MODE_BEET is not set
212# CONFIG_INET_LRO is not set
213# CONFIG_INET_DIAG is not set
214# CONFIG_TCP_CONG_ADVANCED is not set
215CONFIG_TCP_CONG_CUBIC=y
216CONFIG_DEFAULT_TCP_CONG="cubic"
217# CONFIG_TCP_MD5SIG is not set
218# CONFIG_IPV6 is not set
219# CONFIG_NETWORK_SECMARK is not set
220# CONFIG_NETFILTER is not set
221# CONFIG_IP_DCCP is not set
222# CONFIG_IP_SCTP is not set
223# CONFIG_TIPC is not set
224# CONFIG_ATM is not set
225# CONFIG_BRIDGE is not set
226# CONFIG_VLAN_8021Q is not set
227# CONFIG_DECNET is not set
228# CONFIG_LLC2 is not set
229# CONFIG_IPX is not set
230# CONFIG_ATALK is not set
231# CONFIG_X25 is not set
232# CONFIG_LAPB is not set
233# CONFIG_ECONET is not set
234# CONFIG_WAN_ROUTER is not set
235# CONFIG_NET_SCHED is not set
236
237#
238# Network testing
239#
240# CONFIG_NET_PKTGEN is not set
241# CONFIG_HAMRADIO is not set
242# CONFIG_CAN is not set
243# CONFIG_IRDA is not set
244# CONFIG_BT is not set
245# CONFIG_AF_RXRPC is not set
246
247#
248# Wireless
249#
250# CONFIG_CFG80211 is not set
251# CONFIG_WIRELESS_EXT is not set
252# CONFIG_MAC80211 is not set
253# CONFIG_IEEE80211 is not set
254# CONFIG_RFKILL is not set
255# CONFIG_NET_9P is not set
256
257#
258# Device Drivers
259#
260
261#
262# Generic Driver Options
263#
264CONFIG_STANDALONE=y
265CONFIG_PREVENT_FIRMWARE_BUILD=y
266# CONFIG_SYS_HYPERVISOR is not set
267# CONFIG_CONNECTOR is not set
268CONFIG_MTD=y
269# CONFIG_MTD_DEBUG is not set
270# CONFIG_MTD_CONCAT is not set
271CONFIG_MTD_PARTITIONS=y
272# CONFIG_MTD_REDBOOT_PARTS is not set
273# CONFIG_MTD_CMDLINE_PARTS is not set
274# CONFIG_MTD_AR7_PARTS is not set
275
276#
277# User Modules And Translation Layers
278#
279CONFIG_MTD_CHAR=y
280CONFIG_MTD_BLKDEVS=y
281CONFIG_MTD_BLOCK=y
282# CONFIG_FTL is not set
283# CONFIG_NFTL is not set
284# CONFIG_INFTL is not set
285# CONFIG_RFD_FTL is not set
286# CONFIG_SSFDC is not set
287# CONFIG_MTD_OOPS is not set
288
289#
290# RAM/ROM/Flash chip drivers
291#
292CONFIG_MTD_CFI=y
293# CONFIG_MTD_JEDECPROBE is not set
294CONFIG_MTD_GEN_PROBE=y
295# CONFIG_MTD_CFI_ADV_OPTIONS is not set
296CONFIG_MTD_MAP_BANK_WIDTH_1=y
297CONFIG_MTD_MAP_BANK_WIDTH_2=y
298CONFIG_MTD_MAP_BANK_WIDTH_4=y
299# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
300# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
301# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
302CONFIG_MTD_CFI_I1=y
303CONFIG_MTD_CFI_I2=y
304# CONFIG_MTD_CFI_I4 is not set
305# CONFIG_MTD_CFI_I8 is not set
306# CONFIG_MTD_CFI_INTELEXT is not set
307CONFIG_MTD_CFI_AMDSTD=y
308# CONFIG_MTD_CFI_STAA is not set
309CONFIG_MTD_CFI_UTIL=y
310CONFIG_MTD_RAM=y
311# CONFIG_MTD_ROM is not set
312# CONFIG_MTD_ABSENT is not set
313
314#
315# Mapping drivers for chip access
316#
317# CONFIG_MTD_COMPLEX_MAPPINGS is not set
318# CONFIG_MTD_PHYSMAP is not set
319CONFIG_MTD_UCLINUX=y
320# CONFIG_MTD_PLATRAM is not set
321
322#
323# Self-contained MTD device drivers
324#
325# CONFIG_MTD_SLRAM is not set
326# CONFIG_MTD_PHRAM is not set
327# CONFIG_MTD_MTDRAM is not set
328# CONFIG_MTD_BLOCK2MTD is not set
329
330#
331# Disk-On-Chip Device Drivers
332#
333# CONFIG_MTD_DOC2000 is not set
334# CONFIG_MTD_DOC2001 is not set
335# CONFIG_MTD_DOC2001PLUS is not set
336# CONFIG_MTD_NAND is not set
337# CONFIG_MTD_ONENAND is not set
338
339#
340# UBI - Unsorted block images
341#
342# CONFIG_MTD_UBI is not set
343# CONFIG_PARPORT is not set
344CONFIG_BLK_DEV=y
345# CONFIG_BLK_DEV_COW_COMMON is not set
346# CONFIG_BLK_DEV_LOOP is not set
347# CONFIG_BLK_DEV_NBD is not set
348CONFIG_BLK_DEV_RAM=y
349CONFIG_BLK_DEV_RAM_COUNT=16
350CONFIG_BLK_DEV_RAM_SIZE=4096
351# CONFIG_BLK_DEV_XIP is not set
352# CONFIG_CDROM_PKTCDVD is not set
353# CONFIG_ATA_OVER_ETH is not set
354# CONFIG_MISC_DEVICES is not set
355CONFIG_HAVE_IDE=y
356# CONFIG_IDE is not set
357
358#
359# SCSI device support
360#
361# CONFIG_RAID_ATTRS is not set
362# CONFIG_SCSI is not set
363# CONFIG_SCSI_DMA is not set
364# CONFIG_SCSI_NETLINK is not set
365# CONFIG_MD is not set
366CONFIG_NETDEVICES=y
367# CONFIG_NETDEVICES_MULTIQUEUE is not set
368# CONFIG_DUMMY is not set
369# CONFIG_BONDING is not set
370# CONFIG_MACVLAN is not set
371# CONFIG_EQUALIZER is not set
372# CONFIG_TUN is not set
373# CONFIG_VETH is not set
374# CONFIG_PHYLIB is not set
375CONFIG_NET_ETHERNET=y
376# CONFIG_MII is not set
377# CONFIG_IBM_NEW_EMAC_ZMII is not set
378# CONFIG_IBM_NEW_EMAC_RGMII is not set
379# CONFIG_IBM_NEW_EMAC_TAH is not set
380# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
381# CONFIG_B44 is not set
382CONFIG_FEC=y
383# CONFIG_FEC2 is not set
384# CONFIG_NETDEV_1000 is not set
385# CONFIG_NETDEV_10000 is not set
386
387#
388# Wireless LAN
389#
390# CONFIG_WLAN_PRE80211 is not set
391# CONFIG_WLAN_80211 is not set
392# CONFIG_IWLWIFI is not set
393# CONFIG_IWLWIFI_LEDS is not set
394# CONFIG_WAN is not set
395# CONFIG_PPP is not set
396# CONFIG_SLIP is not set
397# CONFIG_NETCONSOLE is not set
398# CONFIG_NETPOLL is not set
399# CONFIG_NET_POLL_CONTROLLER is not set
400# CONFIG_ISDN is not set
401# CONFIG_PHONE is not set
402
403#
404# Input device support
405#
406# CONFIG_INPUT is not set
407
408#
409# Hardware I/O ports
410#
411# CONFIG_SERIO is not set
412# CONFIG_GAMEPORT is not set
413
414#
415# Character devices
416#
417# CONFIG_VT is not set
418# CONFIG_DEVKMEM is not set
419# CONFIG_SERIAL_NONSTANDARD is not set
420
421#
422# Serial drivers
423#
424# CONFIG_SERIAL_8250 is not set
425
426#
427# Non-8250 serial port support
428#
429CONFIG_SERIAL_CORE=y
430CONFIG_SERIAL_CORE_CONSOLE=y
431# CONFIG_SERIAL_COLDFIRE is not set
432CONFIG_SERIAL_MCF=y
433CONFIG_SERIAL_MCF_BAUDRATE=115200
434CONFIG_SERIAL_MCF_CONSOLE=y
435# CONFIG_UNIX98_PTYS is not set
436CONFIG_LEGACY_PTYS=y
437CONFIG_LEGACY_PTY_COUNT=256
438# CONFIG_IPMI_HANDLER is not set
439# CONFIG_HW_RANDOM is not set
440# CONFIG_GEN_RTC is not set
441# CONFIG_R3964 is not set
442# CONFIG_RAW_DRIVER is not set
443# CONFIG_TCG_TPM is not set
444# CONFIG_I2C is not set
445# CONFIG_SPI is not set
446# CONFIG_W1 is not set
447# CONFIG_POWER_SUPPLY is not set
448# CONFIG_HWMON is not set
449# CONFIG_THERMAL is not set
450# CONFIG_WATCHDOG is not set
451
452#
453# Sonics Silicon Backplane
454#
455CONFIG_SSB_POSSIBLE=y
456# CONFIG_SSB is not set
457
458#
459# Multifunction device drivers
460#
461# CONFIG_MFD_SM501 is not set
462# CONFIG_HTC_PASIC3 is not set
463
464#
465# Multimedia devices
466#
467
468#
469# Multimedia core support
470#
471# CONFIG_VIDEO_DEV is not set
472# CONFIG_DVB_CORE is not set
473
474#
475# Multimedia drivers
476#
477# CONFIG_DAB is not set
478
479#
480# Graphics support
481#
482# CONFIG_VGASTATE is not set
483# CONFIG_VIDEO_OUTPUT_CONTROL is not set
484# CONFIG_FB is not set
485# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
486
487#
488# Display device support
489#
490# CONFIG_DISPLAY_SUPPORT is not set
491
492#
493# Sound
494#
495# CONFIG_SOUND is not set
496# CONFIG_USB_SUPPORT is not set
497# CONFIG_MMC is not set
498# CONFIG_MEMSTICK is not set
499# CONFIG_NEW_LEDS is not set
500# CONFIG_ACCESSIBILITY is not set
501# CONFIG_RTC_CLASS is not set
502# CONFIG_UIO is not set
503
504#
505# File systems
506#
507CONFIG_EXT2_FS=y
508# CONFIG_EXT2_FS_XATTR is not set
509# CONFIG_EXT3_FS is not set
510# CONFIG_EXT4DEV_FS is not set
511# CONFIG_REISERFS_FS is not set
512# CONFIG_JFS_FS is not set
513# CONFIG_FS_POSIX_ACL is not set
514# CONFIG_XFS_FS is not set
515# CONFIG_DNOTIFY is not set
516# CONFIG_INOTIFY is not set
517# CONFIG_QUOTA is not set
518# CONFIG_AUTOFS_FS is not set
519# CONFIG_AUTOFS4_FS is not set
520# CONFIG_FUSE_FS is not set
521
522#
523# CD-ROM/DVD Filesystems
524#
525# CONFIG_ISO9660_FS is not set
526# CONFIG_UDF_FS is not set
527
528#
529# DOS/FAT/NT Filesystems
530#
531# CONFIG_MSDOS_FS is not set
532# CONFIG_VFAT_FS is not set
533# CONFIG_NTFS_FS is not set
534
535#
536# Pseudo filesystems
537#
538CONFIG_PROC_FS=y
539CONFIG_PROC_SYSCTL=y
540# CONFIG_SYSFS is not set
541# CONFIG_TMPFS is not set
542# CONFIG_HUGETLB_PAGE is not set
543
544#
545# Miscellaneous filesystems
546#
547# CONFIG_ADFS_FS is not set
548# CONFIG_AFFS_FS is not set
549# CONFIG_HFS_FS is not set
550# CONFIG_HFSPLUS_FS is not set
551# CONFIG_BEFS_FS is not set
552# CONFIG_BFS_FS is not set
553# CONFIG_EFS_FS is not set
554# CONFIG_JFFS2_FS is not set
555# CONFIG_CRAMFS is not set
556# CONFIG_VXFS_FS is not set
557# CONFIG_MINIX_FS is not set
558# CONFIG_HPFS_FS is not set
559# CONFIG_QNX4FS_FS is not set
560CONFIG_ROMFS_FS=y
561# CONFIG_SYSV_FS is not set
562# CONFIG_UFS_FS is not set
563# CONFIG_NETWORK_FILESYSTEMS is not set
564
565#
566# Partition Types
567#
568# CONFIG_PARTITION_ADVANCED is not set
569CONFIG_MSDOS_PARTITION=y
570# CONFIG_NLS is not set
571
572#
573# Kernel hacking
574#
575# CONFIG_PRINTK_TIME is not set
576CONFIG_ENABLE_WARN_DEPRECATED=y
577CONFIG_ENABLE_MUST_CHECK=y
578CONFIG_FRAME_WARN=1024
579# CONFIG_MAGIC_SYSRQ is not set
580# CONFIG_UNUSED_SYMBOLS is not set
581# CONFIG_HEADERS_CHECK is not set
582# CONFIG_DEBUG_KERNEL is not set
583# CONFIG_DEBUG_BUGVERBOSE is not set
584# CONFIG_SAMPLES is not set
585CONFIG_FULLDEBUG=y
586# CONFIG_HIGHPROFILE is not set
587# CONFIG_BOOTPARAM is not set
588# CONFIG_NO_KERNEL_MSG is not set
589# CONFIG_BDM_DISABLE is not set
590
591#
592# Security options
593#
594# CONFIG_KEYS is not set
595# CONFIG_SECURITY_FILE_CAPABILITIES is not set
596# CONFIG_CRYPTO is not set
597
598#
599# Library routines
600#
601CONFIG_BITREVERSE=y
602# CONFIG_GENERIC_FIND_FIRST_BIT is not set
603# CONFIG_CRC_CCITT is not set
604# CONFIG_CRC16 is not set
605# CONFIG_CRC_ITU_T is not set
606CONFIG_CRC32=y
607# CONFIG_CRC7 is not set
608# CONFIG_LIBCRC32C is not set
609CONFIG_HAS_IOMEM=y
610CONFIG_HAS_DMA=y
diff --git a/arch/m68knommu/configs/m5249evb_defconfig b/arch/m68knommu/configs/m5249evb_defconfig
new file mode 100644
index 000000000000..cc6458333d67
--- /dev/null
+++ b/arch/m68knommu/configs/m5249evb_defconfig
@@ -0,0 +1,497 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc1
4#
5CONFIG_M68K=y
6# CONFIG_MMU is not set
7# CONFIG_FPU is not set
8CONFIG_ZONE_DMA=y
9CONFIG_RWSEM_GENERIC_SPINLOCK=y
10# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
11# CONFIG_ARCH_HAS_ILOG2_U32 is not set
12# CONFIG_ARCH_HAS_ILOG2_U64 is not set
13CONFIG_GENERIC_FIND_NEXT_BIT=y
14CONFIG_GENERIC_HWEIGHT=y
15CONFIG_GENERIC_HARDIRQS=y
16CONFIG_GENERIC_CALIBRATE_DELAY=y
17CONFIG_GENERIC_TIME=y
18CONFIG_TIME_LOW_RES=y
19CONFIG_NO_IOPORT=y
20CONFIG_ARCH_SUPPORTS_AOUT=y
21CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
22
23#
24# General setup
25#
26CONFIG_EXPERIMENTAL=y
27CONFIG_BROKEN_ON_SMP=y
28CONFIG_INIT_ENV_ARG_LIMIT=32
29CONFIG_LOCALVERSION=""
30CONFIG_LOCALVERSION_AUTO=y
31# CONFIG_SYSVIPC is not set
32# CONFIG_BSD_PROCESS_ACCT is not set
33# CONFIG_IKCONFIG is not set
34CONFIG_LOG_BUF_SHIFT=14
35# CONFIG_CGROUPS is not set
36# CONFIG_GROUP_SCHED is not set
37# CONFIG_SYSFS_DEPRECATED_V2 is not set
38# CONFIG_RELAY is not set
39# CONFIG_NAMESPACES is not set
40# CONFIG_BLK_DEV_INITRD is not set
41# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
42CONFIG_SYSCTL=y
43CONFIG_EMBEDDED=y
44# CONFIG_UID16 is not set
45# CONFIG_SYSCTL_SYSCALL is not set
46# CONFIG_KALLSYMS is not set
47# CONFIG_HOTPLUG is not set
48CONFIG_PRINTK=y
49CONFIG_BUG=y
50CONFIG_ELF_CORE=y
51# CONFIG_COMPAT_BRK is not set
52CONFIG_BASE_FULL=y
53# CONFIG_FUTEX is not set
54# CONFIG_EPOLL is not set
55# CONFIG_SIGNALFD is not set
56# CONFIG_TIMERFD is not set
57# CONFIG_EVENTFD is not set
58# CONFIG_VM_EVENT_COUNTERS is not set
59CONFIG_SLAB=y
60# CONFIG_SLUB is not set
61# CONFIG_SLOB is not set
62# CONFIG_PROFILING is not set
63# CONFIG_MARKERS is not set
64# CONFIG_HAVE_OPROFILE is not set
65# CONFIG_HAVE_KPROBES is not set
66# CONFIG_HAVE_KRETPROBES is not set
67# CONFIG_HAVE_DMA_ATTRS is not set
68CONFIG_SLABINFO=y
69CONFIG_TINY_SHMEM=y
70CONFIG_BASE_SMALL=0
71CONFIG_MODULES=y
72CONFIG_MODULE_UNLOAD=y
73# CONFIG_MODULE_FORCE_UNLOAD is not set
74# CONFIG_MODVERSIONS is not set
75# CONFIG_MODULE_SRCVERSION_ALL is not set
76# CONFIG_KMOD is not set
77CONFIG_BLOCK=y
78# CONFIG_LBD is not set
79# CONFIG_BLK_DEV_IO_TRACE is not set
80# CONFIG_LSF is not set
81# CONFIG_BLK_DEV_BSG is not set
82
83#
84# IO Schedulers
85#
86CONFIG_IOSCHED_NOOP=y
87# CONFIG_IOSCHED_AS is not set
88# CONFIG_IOSCHED_DEADLINE is not set
89# CONFIG_IOSCHED_CFQ is not set
90# CONFIG_DEFAULT_AS is not set
91# CONFIG_DEFAULT_DEADLINE is not set
92# CONFIG_DEFAULT_CFQ is not set
93CONFIG_DEFAULT_NOOP=y
94CONFIG_DEFAULT_IOSCHED="noop"
95CONFIG_CLASSIC_RCU=y
96
97#
98# Processor type and features
99#
100# CONFIG_M68328 is not set
101# CONFIG_M68EZ328 is not set
102# CONFIG_M68VZ328 is not set
103# CONFIG_M68360 is not set
104# CONFIG_M5206 is not set
105# CONFIG_M5206e is not set
106# CONFIG_M520x is not set
107# CONFIG_M523x is not set
108CONFIG_M5249=y
109# CONFIG_M5271 is not set
110# CONFIG_M5272 is not set
111# CONFIG_M5275 is not set
112# CONFIG_M528x is not set
113# CONFIG_M5307 is not set
114# CONFIG_M532x is not set
115# CONFIG_M5407 is not set
116CONFIG_COLDFIRE=y
117CONFIG_CLOCK_SET=y
118CONFIG_CLOCK_FREQ=140000000
119CONFIG_CLOCK_DIV=2
120
121#
122# Platform
123#
124CONFIG_M5249C3=y
125CONFIG_FREESCALE=y
126CONFIG_4KSTACKS=y
127CONFIG_HZ=100
128
129#
130# RAM configuration
131#
132CONFIG_RAMBASE=0x00000000
133CONFIG_RAMSIZE=0x00800000
134CONFIG_VECTORBASE=0x00000000
135CONFIG_KERNELBASE=0x00020000
136CONFIG_RAMAUTOBIT=y
137# CONFIG_RAM8BIT is not set
138# CONFIG_RAM16BIT is not set
139# CONFIG_RAM32BIT is not set
140
141#
142# ROM configuration
143#
144# CONFIG_ROM is not set
145CONFIG_RAMKERNEL=y
146# CONFIG_ROMKERNEL is not set
147CONFIG_SELECT_MEMORY_MODEL=y
148CONFIG_FLATMEM_MANUAL=y
149# CONFIG_DISCONTIGMEM_MANUAL is not set
150# CONFIG_SPARSEMEM_MANUAL is not set
151CONFIG_FLATMEM=y
152CONFIG_FLAT_NODE_MEM_MAP=y
153# CONFIG_SPARSEMEM_STATIC is not set
154# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
155CONFIG_PAGEFLAGS_EXTENDED=y
156CONFIG_SPLIT_PTLOCK_CPUS=4
157# CONFIG_RESOURCES_64BIT is not set
158CONFIG_ZONE_DMA_FLAG=1
159CONFIG_VIRT_TO_BUS=y
160CONFIG_ISA_DMA_API=y
161
162#
163# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
164#
165# CONFIG_PCI is not set
166# CONFIG_ARCH_SUPPORTS_MSI is not set
167
168#
169# Executable file formats
170#
171CONFIG_BINFMT_FLAT=y
172# CONFIG_BINFMT_ZFLAT is not set
173# CONFIG_BINFMT_SHARED_FLAT is not set
174# CONFIG_BINFMT_AOUT is not set
175# CONFIG_BINFMT_MISC is not set
176
177#
178# Power management options
179#
180# CONFIG_PM is not set
181
182#
183# Networking
184#
185# CONFIG_NET is not set
186
187#
188# Device Drivers
189#
190
191#
192# Generic Driver Options
193#
194CONFIG_STANDALONE=y
195CONFIG_PREVENT_FIRMWARE_BUILD=y
196# CONFIG_SYS_HYPERVISOR is not set
197CONFIG_MTD=y
198# CONFIG_MTD_DEBUG is not set
199# CONFIG_MTD_CONCAT is not set
200CONFIG_MTD_PARTITIONS=y
201# CONFIG_MTD_REDBOOT_PARTS is not set
202# CONFIG_MTD_CMDLINE_PARTS is not set
203# CONFIG_MTD_AR7_PARTS is not set
204
205#
206# User Modules And Translation Layers
207#
208CONFIG_MTD_CHAR=y
209CONFIG_MTD_BLKDEVS=y
210CONFIG_MTD_BLOCK=y
211# CONFIG_FTL is not set
212# CONFIG_NFTL is not set
213# CONFIG_INFTL is not set
214# CONFIG_RFD_FTL is not set
215# CONFIG_SSFDC is not set
216# CONFIG_MTD_OOPS is not set
217
218#
219# RAM/ROM/Flash chip drivers
220#
221# CONFIG_MTD_CFI is not set
222# CONFIG_MTD_JEDECPROBE is not set
223CONFIG_MTD_MAP_BANK_WIDTH_1=y
224CONFIG_MTD_MAP_BANK_WIDTH_2=y
225CONFIG_MTD_MAP_BANK_WIDTH_4=y
226# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
227# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
228# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
229CONFIG_MTD_CFI_I1=y
230CONFIG_MTD_CFI_I2=y
231# CONFIG_MTD_CFI_I4 is not set
232# CONFIG_MTD_CFI_I8 is not set
233CONFIG_MTD_RAM=y
234# CONFIG_MTD_ROM is not set
235# CONFIG_MTD_ABSENT is not set
236
237#
238# Mapping drivers for chip access
239#
240# CONFIG_MTD_COMPLEX_MAPPINGS is not set
241CONFIG_MTD_UCLINUX=y
242# CONFIG_MTD_PLATRAM is not set
243
244#
245# Self-contained MTD device drivers
246#
247# CONFIG_MTD_SLRAM is not set
248# CONFIG_MTD_PHRAM is not set
249# CONFIG_MTD_MTDRAM is not set
250# CONFIG_MTD_BLOCK2MTD is not set
251
252#
253# Disk-On-Chip Device Drivers
254#
255# CONFIG_MTD_DOC2000 is not set
256# CONFIG_MTD_DOC2001 is not set
257# CONFIG_MTD_DOC2001PLUS is not set
258# CONFIG_MTD_NAND is not set
259# CONFIG_MTD_ONENAND is not set
260
261#
262# UBI - Unsorted block images
263#
264# CONFIG_MTD_UBI is not set
265# CONFIG_PARPORT is not set
266CONFIG_BLK_DEV=y
267# CONFIG_BLK_DEV_COW_COMMON is not set
268# CONFIG_BLK_DEV_LOOP is not set
269CONFIG_BLK_DEV_RAM=y
270CONFIG_BLK_DEV_RAM_COUNT=16
271CONFIG_BLK_DEV_RAM_SIZE=4096
272# CONFIG_BLK_DEV_XIP is not set
273# CONFIG_CDROM_PKTCDVD is not set
274CONFIG_MISC_DEVICES=y
275# CONFIG_EEPROM_93CX6 is not set
276# CONFIG_ENCLOSURE_SERVICES is not set
277CONFIG_HAVE_IDE=y
278# CONFIG_IDE is not set
279
280#
281# SCSI device support
282#
283# CONFIG_RAID_ATTRS is not set
284# CONFIG_SCSI is not set
285# CONFIG_SCSI_DMA is not set
286# CONFIG_SCSI_NETLINK is not set
287# CONFIG_MD is not set
288# CONFIG_PHONE is not set
289
290#
291# Input device support
292#
293# CONFIG_INPUT is not set
294
295#
296# Hardware I/O ports
297#
298# CONFIG_SERIO is not set
299# CONFIG_GAMEPORT is not set
300
301#
302# Character devices
303#
304# CONFIG_VT is not set
305# CONFIG_DEVKMEM is not set
306# CONFIG_SERIAL_NONSTANDARD is not set
307
308#
309# Serial drivers
310#
311# CONFIG_SERIAL_8250 is not set
312
313#
314# Non-8250 serial port support
315#
316CONFIG_SERIAL_CORE=y
317CONFIG_SERIAL_CORE_CONSOLE=y
318# CONFIG_SERIAL_COLDFIRE is not set
319CONFIG_SERIAL_MCF=y
320CONFIG_SERIAL_MCF_BAUDRATE=19200
321CONFIG_SERIAL_MCF_CONSOLE=y
322# CONFIG_UNIX98_PTYS is not set
323CONFIG_LEGACY_PTYS=y
324CONFIG_LEGACY_PTY_COUNT=256
325# CONFIG_IPMI_HANDLER is not set
326# CONFIG_HW_RANDOM is not set
327# CONFIG_GEN_RTC is not set
328# CONFIG_R3964 is not set
329# CONFIG_RAW_DRIVER is not set
330# CONFIG_TCG_TPM is not set
331# CONFIG_I2C is not set
332# CONFIG_SPI is not set
333# CONFIG_W1 is not set
334# CONFIG_POWER_SUPPLY is not set
335# CONFIG_HWMON is not set
336# CONFIG_THERMAL is not set
337# CONFIG_WATCHDOG is not set
338
339#
340# Sonics Silicon Backplane
341#
342CONFIG_SSB_POSSIBLE=y
343# CONFIG_SSB is not set
344
345#
346# Multifunction device drivers
347#
348# CONFIG_MFD_SM501 is not set
349# CONFIG_HTC_PASIC3 is not set
350
351#
352# Multimedia devices
353#
354
355#
356# Multimedia core support
357#
358# CONFIG_VIDEO_DEV is not set
359
360#
361# Multimedia drivers
362#
363# CONFIG_DAB is not set
364
365#
366# Graphics support
367#
368# CONFIG_VGASTATE is not set
369# CONFIG_VIDEO_OUTPUT_CONTROL is not set
370# CONFIG_FB is not set
371# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
372
373#
374# Display device support
375#
376# CONFIG_DISPLAY_SUPPORT is not set
377
378#
379# Sound
380#
381# CONFIG_SOUND is not set
382# CONFIG_USB_SUPPORT is not set
383# CONFIG_MMC is not set
384# CONFIG_MEMSTICK is not set
385# CONFIG_NEW_LEDS is not set
386# CONFIG_ACCESSIBILITY is not set
387# CONFIG_RTC_CLASS is not set
388# CONFIG_UIO is not set
389
390#
391# File systems
392#
393CONFIG_EXT2_FS=y
394# CONFIG_EXT2_FS_XATTR is not set
395# CONFIG_EXT3_FS is not set
396# CONFIG_EXT4DEV_FS is not set
397# CONFIG_REISERFS_FS is not set
398# CONFIG_JFS_FS is not set
399# CONFIG_FS_POSIX_ACL is not set
400# CONFIG_XFS_FS is not set
401# CONFIG_DNOTIFY is not set
402# CONFIG_INOTIFY is not set
403# CONFIG_QUOTA is not set
404# CONFIG_AUTOFS_FS is not set
405# CONFIG_AUTOFS4_FS is not set
406# CONFIG_FUSE_FS is not set
407
408#
409# CD-ROM/DVD Filesystems
410#
411# CONFIG_ISO9660_FS is not set
412# CONFIG_UDF_FS is not set
413
414#
415# DOS/FAT/NT Filesystems
416#
417# CONFIG_MSDOS_FS is not set
418# CONFIG_VFAT_FS is not set
419# CONFIG_NTFS_FS is not set
420
421#
422# Pseudo filesystems
423#
424CONFIG_PROC_FS=y
425CONFIG_PROC_SYSCTL=y
426CONFIG_SYSFS=y
427# CONFIG_TMPFS is not set
428# CONFIG_HUGETLB_PAGE is not set
429# CONFIG_CONFIGFS_FS is not set
430
431#
432# Miscellaneous filesystems
433#
434# CONFIG_ADFS_FS is not set
435# CONFIG_AFFS_FS is not set
436# CONFIG_HFS_FS is not set
437# CONFIG_HFSPLUS_FS is not set
438# CONFIG_BEFS_FS is not set
439# CONFIG_BFS_FS is not set
440# CONFIG_EFS_FS is not set
441# CONFIG_JFFS2_FS is not set
442# CONFIG_CRAMFS is not set
443# CONFIG_VXFS_FS is not set
444# CONFIG_MINIX_FS is not set
445# CONFIG_HPFS_FS is not set
446# CONFIG_QNX4FS_FS is not set
447CONFIG_ROMFS_FS=y
448# CONFIG_SYSV_FS is not set
449# CONFIG_UFS_FS is not set
450
451#
452# Partition Types
453#
454# CONFIG_PARTITION_ADVANCED is not set
455CONFIG_MSDOS_PARTITION=y
456# CONFIG_NLS is not set
457
458#
459# Kernel hacking
460#
461# CONFIG_PRINTK_TIME is not set
462CONFIG_ENABLE_WARN_DEPRECATED=y
463CONFIG_ENABLE_MUST_CHECK=y
464CONFIG_FRAME_WARN=1024
465# CONFIG_MAGIC_SYSRQ is not set
466# CONFIG_UNUSED_SYMBOLS is not set
467# CONFIG_DEBUG_FS is not set
468# CONFIG_HEADERS_CHECK is not set
469# CONFIG_DEBUG_KERNEL is not set
470# CONFIG_DEBUG_BUGVERBOSE is not set
471# CONFIG_SAMPLES is not set
472# CONFIG_FULLDEBUG is not set
473# CONFIG_HIGHPROFILE is not set
474# CONFIG_BOOTPARAM is not set
475# CONFIG_NO_KERNEL_MSG is not set
476# CONFIG_BDM_DISABLE is not set
477
478#
479# Security options
480#
481# CONFIG_KEYS is not set
482# CONFIG_SECURITY is not set
483# CONFIG_SECURITY_FILE_CAPABILITIES is not set
484# CONFIG_CRYPTO is not set
485
486#
487# Library routines
488#
489# CONFIG_GENERIC_FIND_FIRST_BIT is not set
490# CONFIG_CRC_CCITT is not set
491# CONFIG_CRC16 is not set
492# CONFIG_CRC_ITU_T is not set
493# CONFIG_CRC32 is not set
494# CONFIG_CRC7 is not set
495# CONFIG_LIBCRC32C is not set
496CONFIG_HAS_IOMEM=y
497CONFIG_HAS_DMA=y
diff --git a/arch/m68knommu/configs/m5275evb_defconfig b/arch/m68knommu/configs/m5275evb_defconfig
new file mode 100644
index 000000000000..0d1256f5addb
--- /dev/null
+++ b/arch/m68knommu/configs/m5275evb_defconfig
@@ -0,0 +1,627 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc1
4#
5CONFIG_M68K=y
6# CONFIG_MMU is not set
7# CONFIG_FPU is not set
8CONFIG_ZONE_DMA=y
9CONFIG_RWSEM_GENERIC_SPINLOCK=y
10# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
11# CONFIG_ARCH_HAS_ILOG2_U32 is not set
12# CONFIG_ARCH_HAS_ILOG2_U64 is not set
13CONFIG_GENERIC_FIND_NEXT_BIT=y
14CONFIG_GENERIC_HWEIGHT=y
15CONFIG_GENERIC_HARDIRQS=y
16CONFIG_GENERIC_CALIBRATE_DELAY=y
17CONFIG_GENERIC_TIME=y
18CONFIG_TIME_LOW_RES=y
19CONFIG_NO_IOPORT=y
20CONFIG_ARCH_SUPPORTS_AOUT=y
21CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
22
23#
24# General setup
25#
26CONFIG_EXPERIMENTAL=y
27CONFIG_BROKEN_ON_SMP=y
28CONFIG_INIT_ENV_ARG_LIMIT=32
29CONFIG_LOCALVERSION=""
30CONFIG_LOCALVERSION_AUTO=y
31# CONFIG_SYSVIPC is not set
32# CONFIG_POSIX_MQUEUE is not set
33# CONFIG_BSD_PROCESS_ACCT is not set
34# CONFIG_TASKSTATS is not set
35# CONFIG_AUDIT is not set
36# CONFIG_IKCONFIG is not set
37CONFIG_LOG_BUF_SHIFT=14
38# CONFIG_CGROUPS is not set
39# CONFIG_GROUP_SCHED is not set
40# CONFIG_SYSFS_DEPRECATED_V2 is not set
41# CONFIG_RELAY is not set
42# CONFIG_NAMESPACES is not set
43# CONFIG_BLK_DEV_INITRD is not set
44# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
45CONFIG_SYSCTL=y
46CONFIG_EMBEDDED=y
47# CONFIG_UID16 is not set
48# CONFIG_SYSCTL_SYSCALL is not set
49# CONFIG_KALLSYMS is not set
50# CONFIG_HOTPLUG is not set
51CONFIG_PRINTK=y
52CONFIG_BUG=y
53CONFIG_ELF_CORE=y
54# CONFIG_COMPAT_BRK is not set
55CONFIG_BASE_FULL=y
56# CONFIG_FUTEX is not set
57# CONFIG_EPOLL is not set
58# CONFIG_SIGNALFD is not set
59# CONFIG_TIMERFD is not set
60# CONFIG_EVENTFD is not set
61# CONFIG_VM_EVENT_COUNTERS is not set
62CONFIG_SLAB=y
63# CONFIG_SLUB is not set
64# CONFIG_SLOB is not set
65# CONFIG_PROFILING is not set
66# CONFIG_MARKERS is not set
67# CONFIG_HAVE_OPROFILE is not set
68# CONFIG_HAVE_KPROBES is not set
69# CONFIG_HAVE_KRETPROBES is not set
70# CONFIG_HAVE_DMA_ATTRS is not set
71CONFIG_SLABINFO=y
72CONFIG_TINY_SHMEM=y
73CONFIG_BASE_SMALL=0
74CONFIG_MODULES=y
75CONFIG_MODULE_UNLOAD=y
76# CONFIG_MODULE_FORCE_UNLOAD is not set
77# CONFIG_MODVERSIONS is not set
78# CONFIG_MODULE_SRCVERSION_ALL is not set
79# CONFIG_KMOD is not set
80CONFIG_BLOCK=y
81# CONFIG_LBD is not set
82# CONFIG_BLK_DEV_IO_TRACE is not set
83# CONFIG_LSF is not set
84# CONFIG_BLK_DEV_BSG is not set
85
86#
87# IO Schedulers
88#
89CONFIG_IOSCHED_NOOP=y
90# CONFIG_IOSCHED_AS is not set
91# CONFIG_IOSCHED_DEADLINE is not set
92# CONFIG_IOSCHED_CFQ is not set
93# CONFIG_DEFAULT_AS is not set
94# CONFIG_DEFAULT_DEADLINE is not set
95# CONFIG_DEFAULT_CFQ is not set
96CONFIG_DEFAULT_NOOP=y
97CONFIG_DEFAULT_IOSCHED="noop"
98CONFIG_CLASSIC_RCU=y
99
100#
101# Processor type and features
102#
103# CONFIG_M68328 is not set
104# CONFIG_M68EZ328 is not set
105# CONFIG_M68VZ328 is not set
106# CONFIG_M68360 is not set
107# CONFIG_M5206 is not set
108# CONFIG_M5206e is not set
109# CONFIG_M520x is not set
110# CONFIG_M523x is not set
111# CONFIG_M5249 is not set
112# CONFIG_M5271 is not set
113# CONFIG_M5272 is not set
114CONFIG_M5275=y
115# CONFIG_M528x is not set
116# CONFIG_M5307 is not set
117# CONFIG_M532x is not set
118# CONFIG_M5407 is not set
119CONFIG_M527x=y
120CONFIG_COLDFIRE=y
121CONFIG_CLOCK_SET=y
122CONFIG_CLOCK_FREQ=150000000
123CONFIG_CLOCK_DIV=2
124
125#
126# Platform
127#
128CONFIG_M5275EVB=y
129CONFIG_FREESCALE=y
130# CONFIG_4KSTACKS is not set
131CONFIG_HZ=100
132
133#
134# RAM configuration
135#
136CONFIG_RAMBASE=0x00000000
137CONFIG_RAMSIZE=0x00000000
138CONFIG_VECTORBASE=0x00000000
139CONFIG_KERNELBASE=0x00020000
140CONFIG_RAMAUTOBIT=y
141# CONFIG_RAM8BIT is not set
142# CONFIG_RAM16BIT is not set
143# CONFIG_RAM32BIT is not set
144
145#
146# ROM configuration
147#
148# CONFIG_ROM is not set
149CONFIG_RAMKERNEL=y
150# CONFIG_ROMKERNEL is not set
151CONFIG_SELECT_MEMORY_MODEL=y
152CONFIG_FLATMEM_MANUAL=y
153# CONFIG_DISCONTIGMEM_MANUAL is not set
154# CONFIG_SPARSEMEM_MANUAL is not set
155CONFIG_FLATMEM=y
156CONFIG_FLAT_NODE_MEM_MAP=y
157# CONFIG_SPARSEMEM_STATIC is not set
158# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
159CONFIG_PAGEFLAGS_EXTENDED=y
160CONFIG_SPLIT_PTLOCK_CPUS=4
161# CONFIG_RESOURCES_64BIT is not set
162CONFIG_ZONE_DMA_FLAG=1
163CONFIG_VIRT_TO_BUS=y
164CONFIG_ISA_DMA_API=y
165
166#
167# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
168#
169# CONFIG_PCI is not set
170# CONFIG_ARCH_SUPPORTS_MSI is not set
171
172#
173# Executable file formats
174#
175CONFIG_BINFMT_FLAT=y
176# CONFIG_BINFMT_ZFLAT is not set
177# CONFIG_BINFMT_SHARED_FLAT is not set
178# CONFIG_BINFMT_AOUT is not set
179# CONFIG_BINFMT_MISC is not set
180
181#
182# Power management options
183#
184# CONFIG_PM is not set
185
186#
187# Networking
188#
189CONFIG_NET=y
190
191#
192# Networking options
193#
194CONFIG_PACKET=y
195# CONFIG_PACKET_MMAP is not set
196CONFIG_UNIX=y
197# CONFIG_NET_KEY is not set
198CONFIG_INET=y
199# CONFIG_IP_MULTICAST is not set
200# CONFIG_IP_ADVANCED_ROUTER is not set
201CONFIG_IP_FIB_HASH=y
202# CONFIG_IP_PNP is not set
203# CONFIG_NET_IPIP is not set
204# CONFIG_NET_IPGRE is not set
205# CONFIG_ARPD is not set
206# CONFIG_SYN_COOKIES is not set
207# CONFIG_INET_AH is not set
208# CONFIG_INET_ESP is not set
209# CONFIG_INET_IPCOMP is not set
210# CONFIG_INET_XFRM_TUNNEL is not set
211# CONFIG_INET_TUNNEL is not set
212# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
213# CONFIG_INET_XFRM_MODE_TUNNEL is not set
214# CONFIG_INET_XFRM_MODE_BEET is not set
215# CONFIG_INET_LRO is not set
216# CONFIG_INET_DIAG is not set
217# CONFIG_TCP_CONG_ADVANCED is not set
218CONFIG_TCP_CONG_CUBIC=y
219CONFIG_DEFAULT_TCP_CONG="cubic"
220# CONFIG_TCP_MD5SIG is not set
221# CONFIG_IPV6 is not set
222# CONFIG_NETWORK_SECMARK is not set
223# CONFIG_NETFILTER is not set
224# CONFIG_IP_DCCP is not set
225# CONFIG_IP_SCTP is not set
226# CONFIG_TIPC is not set
227# CONFIG_ATM is not set
228# CONFIG_BRIDGE is not set
229# CONFIG_VLAN_8021Q is not set
230# CONFIG_DECNET is not set
231# CONFIG_LLC2 is not set
232# CONFIG_IPX is not set
233# CONFIG_ATALK is not set
234# CONFIG_X25 is not set
235# CONFIG_LAPB is not set
236# CONFIG_ECONET is not set
237# CONFIG_WAN_ROUTER is not set
238# CONFIG_NET_SCHED is not set
239
240#
241# Network testing
242#
243# CONFIG_NET_PKTGEN is not set
244# CONFIG_HAMRADIO is not set
245# CONFIG_CAN is not set
246# CONFIG_IRDA is not set
247# CONFIG_BT is not set
248# CONFIG_AF_RXRPC is not set
249
250#
251# Wireless
252#
253# CONFIG_CFG80211 is not set
254# CONFIG_WIRELESS_EXT is not set
255# CONFIG_MAC80211 is not set
256# CONFIG_IEEE80211 is not set
257# CONFIG_RFKILL is not set
258# CONFIG_NET_9P is not set
259
260#
261# Device Drivers
262#
263
264#
265# Generic Driver Options
266#
267CONFIG_STANDALONE=y
268CONFIG_PREVENT_FIRMWARE_BUILD=y
269# CONFIG_SYS_HYPERVISOR is not set
270# CONFIG_CONNECTOR is not set
271CONFIG_MTD=y
272# CONFIG_MTD_DEBUG is not set
273# CONFIG_MTD_CONCAT is not set
274CONFIG_MTD_PARTITIONS=y
275# CONFIG_MTD_REDBOOT_PARTS is not set
276# CONFIG_MTD_CMDLINE_PARTS is not set
277# CONFIG_MTD_AR7_PARTS is not set
278
279#
280# User Modules And Translation Layers
281#
282CONFIG_MTD_CHAR=y
283CONFIG_MTD_BLKDEVS=y
284CONFIG_MTD_BLOCK=y
285# CONFIG_FTL is not set
286# CONFIG_NFTL is not set
287# CONFIG_INFTL is not set
288# CONFIG_RFD_FTL is not set
289# CONFIG_SSFDC is not set
290# CONFIG_MTD_OOPS is not set
291
292#
293# RAM/ROM/Flash chip drivers
294#
295# CONFIG_MTD_CFI is not set
296# CONFIG_MTD_JEDECPROBE is not set
297CONFIG_MTD_MAP_BANK_WIDTH_1=y
298CONFIG_MTD_MAP_BANK_WIDTH_2=y
299CONFIG_MTD_MAP_BANK_WIDTH_4=y
300# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
301# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
302# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
303CONFIG_MTD_CFI_I1=y
304CONFIG_MTD_CFI_I2=y
305# CONFIG_MTD_CFI_I4 is not set
306# CONFIG_MTD_CFI_I8 is not set
307CONFIG_MTD_RAM=y
308# CONFIG_MTD_ROM is not set
309# CONFIG_MTD_ABSENT is not set
310
311#
312# Mapping drivers for chip access
313#
314# CONFIG_MTD_COMPLEX_MAPPINGS is not set
315CONFIG_MTD_UCLINUX=y
316# CONFIG_MTD_PLATRAM is not set
317
318#
319# Self-contained MTD device drivers
320#
321# CONFIG_MTD_SLRAM is not set
322# CONFIG_MTD_PHRAM is not set
323# CONFIG_MTD_MTDRAM is not set
324# CONFIG_MTD_BLOCK2MTD is not set
325
326#
327# Disk-On-Chip Device Drivers
328#
329# CONFIG_MTD_DOC2000 is not set
330# CONFIG_MTD_DOC2001 is not set
331# CONFIG_MTD_DOC2001PLUS is not set
332# CONFIG_MTD_NAND is not set
333# CONFIG_MTD_ONENAND is not set
334
335#
336# UBI - Unsorted block images
337#
338# CONFIG_MTD_UBI is not set
339# CONFIG_PARPORT is not set
340CONFIG_BLK_DEV=y
341# CONFIG_BLK_DEV_COW_COMMON is not set
342# CONFIG_BLK_DEV_LOOP is not set
343# CONFIG_BLK_DEV_NBD is not set
344CONFIG_BLK_DEV_RAM=y
345CONFIG_BLK_DEV_RAM_COUNT=16
346CONFIG_BLK_DEV_RAM_SIZE=4096
347# CONFIG_BLK_DEV_XIP is not set
348# CONFIG_CDROM_PKTCDVD is not set
349# CONFIG_ATA_OVER_ETH is not set
350# CONFIG_MISC_DEVICES is not set
351CONFIG_HAVE_IDE=y
352# CONFIG_IDE is not set
353
354#
355# SCSI device support
356#
357# CONFIG_RAID_ATTRS is not set
358# CONFIG_SCSI is not set
359# CONFIG_SCSI_DMA is not set
360# CONFIG_SCSI_NETLINK is not set
361# CONFIG_MD is not set
362CONFIG_NETDEVICES=y
363# CONFIG_NETDEVICES_MULTIQUEUE is not set
364# CONFIG_DUMMY is not set
365# CONFIG_BONDING is not set
366# CONFIG_MACVLAN is not set
367# CONFIG_EQUALIZER is not set
368# CONFIG_TUN is not set
369# CONFIG_VETH is not set
370# CONFIG_PHYLIB is not set
371CONFIG_NET_ETHERNET=y
372# CONFIG_MII is not set
373# CONFIG_IBM_NEW_EMAC_ZMII is not set
374# CONFIG_IBM_NEW_EMAC_RGMII is not set
375# CONFIG_IBM_NEW_EMAC_TAH is not set
376# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
377# CONFIG_B44 is not set
378CONFIG_FEC=y
379CONFIG_FEC2=y
380# CONFIG_NETDEV_1000 is not set
381# CONFIG_NETDEV_10000 is not set
382
383#
384# Wireless LAN
385#
386# CONFIG_WLAN_PRE80211 is not set
387# CONFIG_WLAN_80211 is not set
388# CONFIG_IWLWIFI is not set
389# CONFIG_IWLWIFI_LEDS is not set
390# CONFIG_WAN is not set
391CONFIG_PPP=y
392# CONFIG_PPP_MULTILINK is not set
393# CONFIG_PPP_FILTER is not set
394# CONFIG_PPP_ASYNC is not set
395# CONFIG_PPP_SYNC_TTY is not set
396# CONFIG_PPP_DEFLATE is not set
397# CONFIG_PPP_BSDCOMP is not set
398# CONFIG_PPP_MPPE is not set
399# CONFIG_PPPOE is not set
400# CONFIG_PPPOL2TP is not set
401# CONFIG_SLIP is not set
402CONFIG_SLHC=y
403# CONFIG_NETCONSOLE is not set
404# CONFIG_NETPOLL is not set
405# CONFIG_NET_POLL_CONTROLLER is not set
406# CONFIG_ISDN is not set
407# CONFIG_PHONE is not set
408
409#
410# Input device support
411#
412# CONFIG_INPUT is not set
413
414#
415# Hardware I/O ports
416#
417# CONFIG_SERIO is not set
418# CONFIG_GAMEPORT is not set
419
420#
421# Character devices
422#
423# CONFIG_VT is not set
424# CONFIG_DEVKMEM is not set
425# CONFIG_SERIAL_NONSTANDARD is not set
426
427#
428# Serial drivers
429#
430# CONFIG_SERIAL_8250 is not set
431
432#
433# Non-8250 serial port support
434#
435CONFIG_SERIAL_CORE=y
436CONFIG_SERIAL_CORE_CONSOLE=y
437# CONFIG_SERIAL_COLDFIRE is not set
438CONFIG_SERIAL_MCF=y
439CONFIG_SERIAL_MCF_BAUDRATE=19200
440CONFIG_SERIAL_MCF_CONSOLE=y
441# CONFIG_UNIX98_PTYS is not set
442CONFIG_LEGACY_PTYS=y
443CONFIG_LEGACY_PTY_COUNT=256
444# CONFIG_IPMI_HANDLER is not set
445# CONFIG_HW_RANDOM is not set
446# CONFIG_GEN_RTC is not set
447# CONFIG_R3964 is not set
448# CONFIG_RAW_DRIVER is not set
449# CONFIG_TCG_TPM is not set
450# CONFIG_I2C is not set
451# CONFIG_SPI is not set
452# CONFIG_W1 is not set
453# CONFIG_POWER_SUPPLY is not set
454# CONFIG_HWMON is not set
455# CONFIG_THERMAL is not set
456# CONFIG_WATCHDOG is not set
457
458#
459# Sonics Silicon Backplane
460#
461CONFIG_SSB_POSSIBLE=y
462# CONFIG_SSB is not set
463
464#
465# Multifunction device drivers
466#
467# CONFIG_MFD_SM501 is not set
468# CONFIG_HTC_PASIC3 is not set
469
470#
471# Multimedia devices
472#
473
474#
475# Multimedia core support
476#
477# CONFIG_VIDEO_DEV is not set
478# CONFIG_DVB_CORE is not set
479
480#
481# Multimedia drivers
482#
483CONFIG_DAB=y
484
485#
486# Graphics support
487#
488# CONFIG_VGASTATE is not set
489# CONFIG_VIDEO_OUTPUT_CONTROL is not set
490# CONFIG_FB is not set
491# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
492
493#
494# Display device support
495#
496# CONFIG_DISPLAY_SUPPORT is not set
497
498#
499# Sound
500#
501# CONFIG_SOUND is not set
502# CONFIG_USB_SUPPORT is not set
503# CONFIG_MMC is not set
504# CONFIG_MEMSTICK is not set
505# CONFIG_NEW_LEDS is not set
506# CONFIG_ACCESSIBILITY is not set
507# CONFIG_RTC_CLASS is not set
508# CONFIG_UIO is not set
509
510#
511# File systems
512#
513CONFIG_EXT2_FS=y
514# CONFIG_EXT2_FS_XATTR is not set
515# CONFIG_EXT3_FS is not set
516# CONFIG_EXT4DEV_FS is not set
517# CONFIG_REISERFS_FS is not set
518# CONFIG_JFS_FS is not set
519# CONFIG_FS_POSIX_ACL is not set
520# CONFIG_XFS_FS is not set
521# CONFIG_OCFS2_FS is not set
522# CONFIG_DNOTIFY is not set
523# CONFIG_INOTIFY is not set
524# CONFIG_QUOTA is not set
525# CONFIG_AUTOFS_FS is not set
526# CONFIG_AUTOFS4_FS is not set
527# CONFIG_FUSE_FS is not set
528
529#
530# CD-ROM/DVD Filesystems
531#
532# CONFIG_ISO9660_FS is not set
533# CONFIG_UDF_FS is not set
534
535#
536# DOS/FAT/NT Filesystems
537#
538# CONFIG_MSDOS_FS is not set
539# CONFIG_VFAT_FS is not set
540# CONFIG_NTFS_FS is not set
541
542#
543# Pseudo filesystems
544#
545CONFIG_PROC_FS=y
546CONFIG_PROC_SYSCTL=y
547CONFIG_SYSFS=y
548# CONFIG_TMPFS is not set
549# CONFIG_HUGETLB_PAGE is not set
550# CONFIG_CONFIGFS_FS is not set
551
552#
553# Miscellaneous filesystems
554#
555# CONFIG_ADFS_FS is not set
556# CONFIG_AFFS_FS is not set
557# CONFIG_HFS_FS is not set
558# CONFIG_HFSPLUS_FS is not set
559# CONFIG_BEFS_FS is not set
560# CONFIG_BFS_FS is not set
561# CONFIG_EFS_FS is not set
562# CONFIG_JFFS2_FS is not set
563# CONFIG_CRAMFS is not set
564# CONFIG_VXFS_FS is not set
565# CONFIG_MINIX_FS is not set
566# CONFIG_HPFS_FS is not set
567# CONFIG_QNX4FS_FS is not set
568CONFIG_ROMFS_FS=y
569# CONFIG_SYSV_FS is not set
570# CONFIG_UFS_FS is not set
571CONFIG_NETWORK_FILESYSTEMS=y
572# CONFIG_NFS_FS is not set
573# CONFIG_NFSD is not set
574# CONFIG_SMB_FS is not set
575# CONFIG_CIFS is not set
576# CONFIG_NCP_FS is not set
577# CONFIG_CODA_FS is not set
578# CONFIG_AFS_FS is not set
579
580#
581# Partition Types
582#
583# CONFIG_PARTITION_ADVANCED is not set
584CONFIG_MSDOS_PARTITION=y
585# CONFIG_NLS is not set
586# CONFIG_DLM is not set
587
588#
589# Kernel hacking
590#
591# CONFIG_PRINTK_TIME is not set
592CONFIG_ENABLE_WARN_DEPRECATED=y
593CONFIG_ENABLE_MUST_CHECK=y
594CONFIG_FRAME_WARN=1024
595# CONFIG_MAGIC_SYSRQ is not set
596# CONFIG_UNUSED_SYMBOLS is not set
597# CONFIG_DEBUG_FS is not set
598# CONFIG_HEADERS_CHECK is not set
599# CONFIG_DEBUG_KERNEL is not set
600# CONFIG_DEBUG_BUGVERBOSE is not set
601# CONFIG_SAMPLES is not set
602# CONFIG_FULLDEBUG is not set
603# CONFIG_HIGHPROFILE is not set
604# CONFIG_BOOTPARAM is not set
605# CONFIG_NO_KERNEL_MSG is not set
606# CONFIG_BDM_DISABLE is not set
607
608#
609# Security options
610#
611# CONFIG_KEYS is not set
612# CONFIG_SECURITY is not set
613# CONFIG_SECURITY_FILE_CAPABILITIES is not set
614# CONFIG_CRYPTO is not set
615
616#
617# Library routines
618#
619# CONFIG_GENERIC_FIND_FIRST_BIT is not set
620# CONFIG_CRC_CCITT is not set
621# CONFIG_CRC16 is not set
622# CONFIG_CRC_ITU_T is not set
623# CONFIG_CRC32 is not set
624# CONFIG_CRC7 is not set
625# CONFIG_LIBCRC32C is not set
626CONFIG_HAS_IOMEM=y
627CONFIG_HAS_DMA=y
diff --git a/arch/m68knommu/configs/m5307c3_defconfig b/arch/m68knommu/configs/m5307c3_defconfig
new file mode 100644
index 000000000000..fe2acdfa4d76
--- /dev/null
+++ b/arch/m68knommu/configs/m5307c3_defconfig
@@ -0,0 +1,580 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc1
4#
5CONFIG_M68K=y
6# CONFIG_MMU is not set
7# CONFIG_FPU is not set
8CONFIG_ZONE_DMA=y
9CONFIG_RWSEM_GENERIC_SPINLOCK=y
10# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
11# CONFIG_ARCH_HAS_ILOG2_U32 is not set
12# CONFIG_ARCH_HAS_ILOG2_U64 is not set
13CONFIG_GENERIC_FIND_NEXT_BIT=y
14CONFIG_GENERIC_HWEIGHT=y
15CONFIG_GENERIC_HARDIRQS=y
16CONFIG_GENERIC_CALIBRATE_DELAY=y
17CONFIG_GENERIC_TIME=y
18CONFIG_TIME_LOW_RES=y
19CONFIG_NO_IOPORT=y
20CONFIG_ARCH_SUPPORTS_AOUT=y
21CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
22
23#
24# General setup
25#
26CONFIG_EXPERIMENTAL=y
27CONFIG_BROKEN_ON_SMP=y
28CONFIG_INIT_ENV_ARG_LIMIT=32
29CONFIG_LOCALVERSION=""
30CONFIG_LOCALVERSION_AUTO=y
31# CONFIG_SYSVIPC is not set
32# CONFIG_POSIX_MQUEUE is not set
33# CONFIG_BSD_PROCESS_ACCT is not set
34# CONFIG_TASKSTATS is not set
35# CONFIG_AUDIT is not set
36# CONFIG_IKCONFIG is not set
37CONFIG_LOG_BUF_SHIFT=14
38# CONFIG_CGROUPS is not set
39# CONFIG_GROUP_SCHED is not set
40# CONFIG_SYSFS_DEPRECATED_V2 is not set
41# CONFIG_RELAY is not set
42# CONFIG_NAMESPACES is not set
43# CONFIG_BLK_DEV_INITRD is not set
44# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
45CONFIG_SYSCTL=y
46CONFIG_EMBEDDED=y
47# CONFIG_UID16 is not set
48# CONFIG_SYSCTL_SYSCALL is not set
49# CONFIG_KALLSYMS is not set
50# CONFIG_HOTPLUG is not set
51CONFIG_PRINTK=y
52CONFIG_BUG=y
53CONFIG_ELF_CORE=y
54# CONFIG_COMPAT_BRK is not set
55CONFIG_BASE_FULL=y
56# CONFIG_FUTEX is not set
57# CONFIG_EPOLL is not set
58# CONFIG_SIGNALFD is not set
59# CONFIG_TIMERFD is not set
60# CONFIG_EVENTFD is not set
61# CONFIG_VM_EVENT_COUNTERS is not set
62CONFIG_SLAB=y
63# CONFIG_SLUB is not set
64# CONFIG_SLOB is not set
65# CONFIG_PROFILING is not set
66# CONFIG_MARKERS is not set
67# CONFIG_HAVE_OPROFILE is not set
68# CONFIG_HAVE_KPROBES is not set
69# CONFIG_HAVE_KRETPROBES is not set
70# CONFIG_HAVE_DMA_ATTRS is not set
71CONFIG_SLABINFO=y
72CONFIG_TINY_SHMEM=y
73CONFIG_BASE_SMALL=0
74CONFIG_MODULES=y
75CONFIG_MODULE_UNLOAD=y
76# CONFIG_MODULE_FORCE_UNLOAD is not set
77# CONFIG_MODVERSIONS is not set
78# CONFIG_MODULE_SRCVERSION_ALL is not set
79# CONFIG_KMOD is not set
80CONFIG_BLOCK=y
81# CONFIG_LBD is not set
82# CONFIG_BLK_DEV_IO_TRACE is not set
83# CONFIG_LSF is not set
84# CONFIG_BLK_DEV_BSG is not set
85
86#
87# IO Schedulers
88#
89CONFIG_IOSCHED_NOOP=y
90# CONFIG_IOSCHED_AS is not set
91# CONFIG_IOSCHED_DEADLINE is not set
92# CONFIG_IOSCHED_CFQ is not set
93# CONFIG_DEFAULT_AS is not set
94# CONFIG_DEFAULT_DEADLINE is not set
95# CONFIG_DEFAULT_CFQ is not set
96CONFIG_DEFAULT_NOOP=y
97CONFIG_DEFAULT_IOSCHED="noop"
98CONFIG_CLASSIC_RCU=y
99
100#
101# Processor type and features
102#
103# CONFIG_M68328 is not set
104# CONFIG_M68EZ328 is not set
105# CONFIG_M68VZ328 is not set
106# CONFIG_M68360 is not set
107# CONFIG_M5206 is not set
108# CONFIG_M5206e is not set
109# CONFIG_M520x is not set
110# CONFIG_M523x is not set
111# CONFIG_M5249 is not set
112# CONFIG_M5271 is not set
113# CONFIG_M5272 is not set
114# CONFIG_M5275 is not set
115# CONFIG_M528x is not set
116CONFIG_M5307=y
117# CONFIG_M532x is not set
118# CONFIG_M5407 is not set
119CONFIG_COLDFIRE=y
120CONFIG_CLOCK_SET=y
121CONFIG_CLOCK_FREQ=90000000
122CONFIG_CLOCK_DIV=2
123# CONFIG_OLDMASK is not set
124
125#
126# Platform
127#
128# CONFIG_ARN5307 is not set
129CONFIG_M5307C3=y
130# CONFIG_eLIA is not set
131# CONFIG_SECUREEDGEMP3 is not set
132# CONFIG_CLEOPATRA is not set
133# CONFIG_NETtel is not set
134CONFIG_FREESCALE=y
135# CONFIG_4KSTACKS is not set
136CONFIG_HZ=100
137
138#
139# RAM configuration
140#
141CONFIG_RAMBASE=0x00000000
142CONFIG_RAMSIZE=0x00800000
143CONFIG_VECTORBASE=0x00000000
144CONFIG_KERNELBASE=0x00020000
145CONFIG_RAMAUTOBIT=y
146# CONFIG_RAM8BIT is not set
147# CONFIG_RAM16BIT is not set
148# CONFIG_RAM32BIT is not set
149
150#
151# ROM configuration
152#
153# CONFIG_ROM is not set
154CONFIG_RAMKERNEL=y
155# CONFIG_ROMKERNEL is not set
156CONFIG_SELECT_MEMORY_MODEL=y
157CONFIG_FLATMEM_MANUAL=y
158# CONFIG_DISCONTIGMEM_MANUAL is not set
159# CONFIG_SPARSEMEM_MANUAL is not set
160CONFIG_FLATMEM=y
161CONFIG_FLAT_NODE_MEM_MAP=y
162# CONFIG_SPARSEMEM_STATIC is not set
163# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
164CONFIG_PAGEFLAGS_EXTENDED=y
165CONFIG_SPLIT_PTLOCK_CPUS=4
166# CONFIG_RESOURCES_64BIT is not set
167CONFIG_ZONE_DMA_FLAG=1
168CONFIG_VIRT_TO_BUS=y
169CONFIG_ISA_DMA_API=y
170
171#
172# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
173#
174# CONFIG_PCI is not set
175# CONFIG_COMEMPCI is not set
176# CONFIG_ARCH_SUPPORTS_MSI is not set
177
178#
179# Executable file formats
180#
181CONFIG_BINFMT_FLAT=y
182# CONFIG_BINFMT_ZFLAT is not set
183# CONFIG_BINFMT_SHARED_FLAT is not set
184# CONFIG_BINFMT_AOUT is not set
185# CONFIG_BINFMT_MISC is not set
186
187#
188# Power management options
189#
190# CONFIG_PM is not set
191
192#
193# Networking
194#
195CONFIG_NET=y
196
197#
198# Networking options
199#
200CONFIG_PACKET=y
201# CONFIG_PACKET_MMAP is not set
202CONFIG_UNIX=y
203# CONFIG_NET_KEY is not set
204CONFIG_INET=y
205# CONFIG_IP_MULTICAST is not set
206# CONFIG_IP_ADVANCED_ROUTER is not set
207CONFIG_IP_FIB_HASH=y
208# CONFIG_IP_PNP is not set
209# CONFIG_NET_IPIP is not set
210# CONFIG_NET_IPGRE is not set
211# CONFIG_ARPD is not set
212# CONFIG_SYN_COOKIES is not set
213# CONFIG_INET_AH is not set
214# CONFIG_INET_ESP is not set
215# CONFIG_INET_IPCOMP is not set
216# CONFIG_INET_XFRM_TUNNEL is not set
217# CONFIG_INET_TUNNEL is not set
218# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
219# CONFIG_INET_XFRM_MODE_TUNNEL is not set
220# CONFIG_INET_XFRM_MODE_BEET is not set
221# CONFIG_INET_LRO is not set
222# CONFIG_INET_DIAG is not set
223# CONFIG_TCP_CONG_ADVANCED is not set
224CONFIG_TCP_CONG_CUBIC=y
225CONFIG_DEFAULT_TCP_CONG="cubic"
226# CONFIG_TCP_MD5SIG is not set
227# CONFIG_IPV6 is not set
228# CONFIG_NETWORK_SECMARK is not set
229# CONFIG_NETFILTER is not set
230# CONFIG_IP_DCCP is not set
231# CONFIG_IP_SCTP is not set
232# CONFIG_TIPC is not set
233# CONFIG_ATM is not set
234# CONFIG_BRIDGE is not set
235# CONFIG_VLAN_8021Q is not set
236# CONFIG_DECNET is not set
237# CONFIG_LLC2 is not set
238# CONFIG_IPX is not set
239# CONFIG_ATALK is not set
240# CONFIG_X25 is not set
241# CONFIG_LAPB is not set
242# CONFIG_ECONET is not set
243# CONFIG_WAN_ROUTER is not set
244# CONFIG_NET_SCHED is not set
245
246#
247# Network testing
248#
249# CONFIG_NET_PKTGEN is not set
250# CONFIG_HAMRADIO is not set
251# CONFIG_CAN is not set
252# CONFIG_IRDA is not set
253# CONFIG_BT is not set
254# CONFIG_AF_RXRPC is not set
255
256#
257# Wireless
258#
259# CONFIG_CFG80211 is not set
260# CONFIG_WIRELESS_EXT is not set
261# CONFIG_MAC80211 is not set
262# CONFIG_IEEE80211 is not set
263# CONFIG_RFKILL is not set
264# CONFIG_NET_9P is not set
265
266#
267# Device Drivers
268#
269
270#
271# Generic Driver Options
272#
273CONFIG_STANDALONE=y
274CONFIG_PREVENT_FIRMWARE_BUILD=y
275# CONFIG_SYS_HYPERVISOR is not set
276# CONFIG_CONNECTOR is not set
277# CONFIG_MTD is not set
278# CONFIG_PARPORT is not set
279CONFIG_BLK_DEV=y
280# CONFIG_BLK_DEV_COW_COMMON is not set
281# CONFIG_BLK_DEV_LOOP is not set
282# CONFIG_BLK_DEV_NBD is not set
283CONFIG_BLK_DEV_RAM=y
284CONFIG_BLK_DEV_RAM_COUNT=16
285CONFIG_BLK_DEV_RAM_SIZE=4096
286# CONFIG_BLK_DEV_XIP is not set
287# CONFIG_CDROM_PKTCDVD is not set
288# CONFIG_ATA_OVER_ETH is not set
289# CONFIG_MISC_DEVICES is not set
290CONFIG_HAVE_IDE=y
291# CONFIG_IDE is not set
292
293#
294# SCSI device support
295#
296# CONFIG_RAID_ATTRS is not set
297# CONFIG_SCSI is not set
298# CONFIG_SCSI_DMA is not set
299# CONFIG_SCSI_NETLINK is not set
300# CONFIG_MD is not set
301CONFIG_NETDEVICES=y
302# CONFIG_NETDEVICES_MULTIQUEUE is not set
303# CONFIG_DUMMY is not set
304# CONFIG_BONDING is not set
305# CONFIG_MACVLAN is not set
306# CONFIG_EQUALIZER is not set
307# CONFIG_TUN is not set
308# CONFIG_VETH is not set
309# CONFIG_PHYLIB is not set
310CONFIG_NET_ETHERNET=y
311# CONFIG_MII is not set
312# CONFIG_IBM_NEW_EMAC_ZMII is not set
313# CONFIG_IBM_NEW_EMAC_RGMII is not set
314# CONFIG_IBM_NEW_EMAC_TAH is not set
315# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
316# CONFIG_B44 is not set
317# CONFIG_NETDEV_1000 is not set
318# CONFIG_NETDEV_10000 is not set
319
320#
321# Wireless LAN
322#
323# CONFIG_WLAN_PRE80211 is not set
324# CONFIG_WLAN_80211 is not set
325# CONFIG_IWLWIFI is not set
326# CONFIG_IWLWIFI_LEDS is not set
327# CONFIG_WAN is not set
328CONFIG_PPP=y
329# CONFIG_PPP_MULTILINK is not set
330# CONFIG_PPP_FILTER is not set
331# CONFIG_PPP_ASYNC is not set
332# CONFIG_PPP_SYNC_TTY is not set
333# CONFIG_PPP_DEFLATE is not set
334# CONFIG_PPP_BSDCOMP is not set
335# CONFIG_PPP_MPPE is not set
336# CONFIG_PPPOE is not set
337# CONFIG_PPPOL2TP is not set
338CONFIG_SLIP=y
339CONFIG_SLIP_COMPRESSED=y
340CONFIG_SLHC=y
341# CONFIG_SLIP_SMART is not set
342# CONFIG_SLIP_MODE_SLIP6 is not set
343# CONFIG_NETCONSOLE is not set
344# CONFIG_NETPOLL is not set
345# CONFIG_NET_POLL_CONTROLLER is not set
346# CONFIG_ISDN is not set
347# CONFIG_PHONE is not set
348
349#
350# Input device support
351#
352CONFIG_INPUT=y
353# CONFIG_INPUT_FF_MEMLESS is not set
354# CONFIG_INPUT_POLLDEV is not set
355
356#
357# Userland interfaces
358#
359# CONFIG_INPUT_MOUSEDEV is not set
360# CONFIG_INPUT_JOYDEV is not set
361# CONFIG_INPUT_EVDEV is not set
362# CONFIG_INPUT_EVBUG is not set
363
364#
365# Input Device Drivers
366#
367# CONFIG_INPUT_KEYBOARD is not set
368# CONFIG_INPUT_MOUSE is not set
369# CONFIG_INPUT_JOYSTICK is not set
370# CONFIG_INPUT_TABLET is not set
371# CONFIG_INPUT_TOUCHSCREEN is not set
372# CONFIG_INPUT_MISC is not set
373
374#
375# Hardware I/O ports
376#
377# CONFIG_SERIO is not set
378# CONFIG_GAMEPORT is not set
379
380#
381# Character devices
382#
383# CONFIG_VT is not set
384# CONFIG_DEVKMEM is not set
385# CONFIG_SERIAL_NONSTANDARD is not set
386
387#
388# Serial drivers
389#
390# CONFIG_SERIAL_8250 is not set
391
392#
393# Non-8250 serial port support
394#
395CONFIG_SERIAL_CORE=y
396CONFIG_SERIAL_CORE_CONSOLE=y
397# CONFIG_SERIAL_COLDFIRE is not set
398CONFIG_SERIAL_MCF=y
399CONFIG_SERIAL_MCF_BAUDRATE=19200
400CONFIG_SERIAL_MCF_CONSOLE=y
401CONFIG_UNIX98_PTYS=y
402CONFIG_LEGACY_PTYS=y
403CONFIG_LEGACY_PTY_COUNT=256
404# CONFIG_IPMI_HANDLER is not set
405# CONFIG_HW_RANDOM is not set
406# CONFIG_GEN_RTC is not set
407# CONFIG_R3964 is not set
408# CONFIG_RAW_DRIVER is not set
409# CONFIG_TCG_TPM is not set
410# CONFIG_I2C is not set
411# CONFIG_SPI is not set
412# CONFIG_W1 is not set
413# CONFIG_POWER_SUPPLY is not set
414# CONFIG_HWMON is not set
415# CONFIG_THERMAL is not set
416# CONFIG_WATCHDOG is not set
417
418#
419# Sonics Silicon Backplane
420#
421CONFIG_SSB_POSSIBLE=y
422# CONFIG_SSB is not set
423
424#
425# Multifunction device drivers
426#
427# CONFIG_MFD_SM501 is not set
428# CONFIG_HTC_PASIC3 is not set
429
430#
431# Multimedia devices
432#
433
434#
435# Multimedia core support
436#
437# CONFIG_VIDEO_DEV is not set
438# CONFIG_DVB_CORE is not set
439
440#
441# Multimedia drivers
442#
443CONFIG_DAB=y
444
445#
446# Graphics support
447#
448# CONFIG_VGASTATE is not set
449# CONFIG_VIDEO_OUTPUT_CONTROL is not set
450# CONFIG_FB is not set
451# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
452
453#
454# Display device support
455#
456# CONFIG_DISPLAY_SUPPORT is not set
457
458#
459# Sound
460#
461# CONFIG_SOUND is not set
462# CONFIG_HID_SUPPORT is not set
463# CONFIG_USB_SUPPORT is not set
464# CONFIG_MMC is not set
465# CONFIG_MEMSTICK is not set
466# CONFIG_NEW_LEDS is not set
467# CONFIG_ACCESSIBILITY is not set
468# CONFIG_RTC_CLASS is not set
469# CONFIG_UIO is not set
470
471#
472# File systems
473#
474CONFIG_EXT2_FS=y
475# CONFIG_EXT2_FS_XATTR is not set
476# CONFIG_EXT3_FS is not set
477# CONFIG_EXT4DEV_FS is not set
478# CONFIG_REISERFS_FS is not set
479# CONFIG_JFS_FS is not set
480# CONFIG_FS_POSIX_ACL is not set
481# CONFIG_XFS_FS is not set
482# CONFIG_OCFS2_FS is not set
483# CONFIG_DNOTIFY is not set
484# CONFIG_INOTIFY is not set
485# CONFIG_QUOTA is not set
486# CONFIG_AUTOFS_FS is not set
487# CONFIG_AUTOFS4_FS is not set
488# CONFIG_FUSE_FS is not set
489
490#
491# CD-ROM/DVD Filesystems
492#
493# CONFIG_ISO9660_FS is not set
494# CONFIG_UDF_FS is not set
495
496#
497# DOS/FAT/NT Filesystems
498#
499# CONFIG_MSDOS_FS is not set
500# CONFIG_VFAT_FS is not set
501# CONFIG_NTFS_FS is not set
502
503#
504# Pseudo filesystems
505#
506CONFIG_PROC_FS=y
507CONFIG_PROC_SYSCTL=y
508CONFIG_SYSFS=y
509# CONFIG_TMPFS is not set
510# CONFIG_HUGETLB_PAGE is not set
511# CONFIG_CONFIGFS_FS is not set
512
513#
514# Miscellaneous filesystems
515#
516# CONFIG_ADFS_FS is not set
517# CONFIG_AFFS_FS is not set
518# CONFIG_HFS_FS is not set
519# CONFIG_HFSPLUS_FS is not set
520# CONFIG_BEFS_FS is not set
521# CONFIG_BFS_FS is not set
522# CONFIG_EFS_FS is not set
523# CONFIG_CRAMFS is not set
524# CONFIG_VXFS_FS is not set
525# CONFIG_MINIX_FS is not set
526# CONFIG_HPFS_FS is not set
527# CONFIG_QNX4FS_FS is not set
528CONFIG_ROMFS_FS=y
529# CONFIG_SYSV_FS is not set
530# CONFIG_UFS_FS is not set
531# CONFIG_NETWORK_FILESYSTEMS is not set
532
533#
534# Partition Types
535#
536# CONFIG_PARTITION_ADVANCED is not set
537CONFIG_MSDOS_PARTITION=y
538# CONFIG_NLS is not set
539# CONFIG_DLM is not set
540
541#
542# Kernel hacking
543#
544# CONFIG_PRINTK_TIME is not set
545CONFIG_ENABLE_WARN_DEPRECATED=y
546CONFIG_ENABLE_MUST_CHECK=y
547CONFIG_FRAME_WARN=1024
548# CONFIG_MAGIC_SYSRQ is not set
549# CONFIG_UNUSED_SYMBOLS is not set
550# CONFIG_DEBUG_FS is not set
551# CONFIG_HEADERS_CHECK is not set
552# CONFIG_DEBUG_KERNEL is not set
553# CONFIG_DEBUG_BUGVERBOSE is not set
554# CONFIG_SAMPLES is not set
555CONFIG_FULLDEBUG=y
556# CONFIG_HIGHPROFILE is not set
557# CONFIG_BOOTPARAM is not set
558# CONFIG_NO_KERNEL_MSG is not set
559# CONFIG_BDM_DISABLE is not set
560
561#
562# Security options
563#
564# CONFIG_KEYS is not set
565# CONFIG_SECURITY is not set
566# CONFIG_SECURITY_FILE_CAPABILITIES is not set
567# CONFIG_CRYPTO is not set
568
569#
570# Library routines
571#
572# CONFIG_GENERIC_FIND_FIRST_BIT is not set
573# CONFIG_CRC_CCITT is not set
574# CONFIG_CRC16 is not set
575# CONFIG_CRC_ITU_T is not set
576# CONFIG_CRC32 is not set
577# CONFIG_CRC7 is not set
578# CONFIG_LIBCRC32C is not set
579CONFIG_HAS_IOMEM=y
580CONFIG_HAS_DMA=y
diff --git a/arch/m68knommu/configs/m5407c3_defconfig b/arch/m68knommu/configs/m5407c3_defconfig
new file mode 100644
index 000000000000..1118936d20e3
--- /dev/null
+++ b/arch/m68knommu/configs/m5407c3_defconfig
@@ -0,0 +1,641 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc1
4# Wed May 7 10:25:16 2008
5#
6CONFIG_M68K=y
7# CONFIG_MMU is not set
8# CONFIG_FPU is not set
9CONFIG_ZONE_DMA=y
10CONFIG_RWSEM_GENERIC_SPINLOCK=y
11# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
12# CONFIG_ARCH_HAS_ILOG2_U32 is not set
13# CONFIG_ARCH_HAS_ILOG2_U64 is not set
14CONFIG_GENERIC_FIND_NEXT_BIT=y
15CONFIG_GENERIC_HWEIGHT=y
16CONFIG_GENERIC_HARDIRQS=y
17CONFIG_GENERIC_CALIBRATE_DELAY=y
18CONFIG_GENERIC_TIME=y
19CONFIG_TIME_LOW_RES=y
20CONFIG_NO_IOPORT=y
21CONFIG_ARCH_SUPPORTS_AOUT=y
22CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
23
24#
25# General setup
26#
27CONFIG_EXPERIMENTAL=y
28CONFIG_BROKEN_ON_SMP=y
29CONFIG_INIT_ENV_ARG_LIMIT=32
30CONFIG_LOCALVERSION=""
31CONFIG_LOCALVERSION_AUTO=y
32# CONFIG_SYSVIPC is not set
33# CONFIG_POSIX_MQUEUE is not set
34# CONFIG_BSD_PROCESS_ACCT is not set
35# CONFIG_TASKSTATS is not set
36# CONFIG_AUDIT is not set
37# CONFIG_IKCONFIG is not set
38CONFIG_LOG_BUF_SHIFT=14
39# CONFIG_CGROUPS is not set
40# CONFIG_GROUP_SCHED is not set
41# CONFIG_SYSFS_DEPRECATED_V2 is not set
42# CONFIG_RELAY is not set
43# CONFIG_NAMESPACES is not set
44# CONFIG_BLK_DEV_INITRD is not set
45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
46CONFIG_SYSCTL=y
47CONFIG_EMBEDDED=y
48# CONFIG_UID16 is not set
49# CONFIG_SYSCTL_SYSCALL is not set
50# CONFIG_KALLSYMS is not set
51# CONFIG_HOTPLUG is not set
52CONFIG_PRINTK=y
53CONFIG_BUG=y
54CONFIG_ELF_CORE=y
55# CONFIG_COMPAT_BRK is not set
56CONFIG_BASE_FULL=y
57# CONFIG_FUTEX is not set
58# CONFIG_EPOLL is not set
59# CONFIG_SIGNALFD is not set
60# CONFIG_TIMERFD is not set
61# CONFIG_EVENTFD is not set
62# CONFIG_VM_EVENT_COUNTERS is not set
63CONFIG_SLAB=y
64# CONFIG_SLUB is not set
65# CONFIG_SLOB is not set
66# CONFIG_PROFILING is not set
67# CONFIG_MARKERS is not set
68# CONFIG_HAVE_OPROFILE is not set
69# CONFIG_HAVE_KPROBES is not set
70# CONFIG_HAVE_KRETPROBES is not set
71# CONFIG_HAVE_DMA_ATTRS is not set
72CONFIG_SLABINFO=y
73CONFIG_TINY_SHMEM=y
74CONFIG_BASE_SMALL=0
75CONFIG_MODULES=y
76CONFIG_MODULE_UNLOAD=y
77# CONFIG_MODULE_FORCE_UNLOAD is not set
78# CONFIG_MODVERSIONS is not set
79# CONFIG_MODULE_SRCVERSION_ALL is not set
80# CONFIG_KMOD is not set
81CONFIG_BLOCK=y
82# CONFIG_LBD is not set
83# CONFIG_BLK_DEV_IO_TRACE is not set
84# CONFIG_LSF is not set
85# CONFIG_BLK_DEV_BSG is not set
86
87#
88# IO Schedulers
89#
90CONFIG_IOSCHED_NOOP=y
91# CONFIG_IOSCHED_AS is not set
92# CONFIG_IOSCHED_DEADLINE is not set
93# CONFIG_IOSCHED_CFQ is not set
94# CONFIG_DEFAULT_AS is not set
95# CONFIG_DEFAULT_DEADLINE is not set
96# CONFIG_DEFAULT_CFQ is not set
97CONFIG_DEFAULT_NOOP=y
98CONFIG_DEFAULT_IOSCHED="noop"
99CONFIG_CLASSIC_RCU=y
100
101#
102# Processor type and features
103#
104# CONFIG_M68328 is not set
105# CONFIG_M68EZ328 is not set
106# CONFIG_M68VZ328 is not set
107# CONFIG_M68360 is not set
108# CONFIG_M5206 is not set
109# CONFIG_M5206e is not set
110# CONFIG_M520x is not set
111# CONFIG_M523x is not set
112# CONFIG_M5249 is not set
113# CONFIG_M5271 is not set
114# CONFIG_M5272 is not set
115# CONFIG_M5275 is not set
116# CONFIG_M528x is not set
117# CONFIG_M5307 is not set
118# CONFIG_M532x is not set
119CONFIG_M5407=y
120CONFIG_COLDFIRE=y
121CONFIG_CLOCK_SET=y
122CONFIG_CLOCK_FREQ=50000000
123CONFIG_CLOCK_DIV=1
124
125#
126# Platform
127#
128CONFIG_M5407C3=y
129# CONFIG_CLEOPATRA is not set
130CONFIG_FREESCALE=y
131CONFIG_4KSTACKS=y
132CONFIG_HZ=100
133
134#
135# RAM configuration
136#
137CONFIG_RAMBASE=0x00000000
138CONFIG_RAMSIZE=0x00000000
139CONFIG_VECTORBASE=0x00000000
140CONFIG_KERNELBASE=0x00020000
141CONFIG_RAMAUTOBIT=y
142# CONFIG_RAM8BIT is not set
143# CONFIG_RAM16BIT is not set
144# CONFIG_RAM32BIT is not set
145
146#
147# ROM configuration
148#
149# CONFIG_ROM is not set
150CONFIG_RAMKERNEL=y
151# CONFIG_ROMKERNEL is not set
152CONFIG_SELECT_MEMORY_MODEL=y
153CONFIG_FLATMEM_MANUAL=y
154# CONFIG_DISCONTIGMEM_MANUAL is not set
155# CONFIG_SPARSEMEM_MANUAL is not set
156CONFIG_FLATMEM=y
157CONFIG_FLAT_NODE_MEM_MAP=y
158# CONFIG_SPARSEMEM_STATIC is not set
159# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
160CONFIG_PAGEFLAGS_EXTENDED=y
161CONFIG_SPLIT_PTLOCK_CPUS=4
162# CONFIG_RESOURCES_64BIT is not set
163CONFIG_ZONE_DMA_FLAG=1
164CONFIG_VIRT_TO_BUS=y
165CONFIG_ISA_DMA_API=y
166
167#
168# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
169#
170# CONFIG_PCI is not set
171# CONFIG_COMEMPCI is not set
172# CONFIG_ARCH_SUPPORTS_MSI is not set
173
174#
175# Executable file formats
176#
177CONFIG_BINFMT_FLAT=y
178# CONFIG_BINFMT_ZFLAT is not set
179# CONFIG_BINFMT_SHARED_FLAT is not set
180# CONFIG_BINFMT_AOUT is not set
181# CONFIG_BINFMT_MISC is not set
182
183#
184# Power management options
185#
186# CONFIG_PM is not set
187
188#
189# Networking
190#
191CONFIG_NET=y
192
193#
194# Networking options
195#
196CONFIG_PACKET=y
197# CONFIG_PACKET_MMAP is not set
198CONFIG_UNIX=y
199# CONFIG_NET_KEY is not set
200CONFIG_INET=y
201# CONFIG_IP_MULTICAST is not set
202# CONFIG_IP_ADVANCED_ROUTER is not set
203CONFIG_IP_FIB_HASH=y
204# CONFIG_IP_PNP is not set
205# CONFIG_NET_IPIP is not set
206# CONFIG_NET_IPGRE is not set
207# CONFIG_ARPD is not set
208# CONFIG_SYN_COOKIES is not set
209# CONFIG_INET_AH is not set
210# CONFIG_INET_ESP is not set
211# CONFIG_INET_IPCOMP is not set
212# CONFIG_INET_XFRM_TUNNEL is not set
213# CONFIG_INET_TUNNEL is not set
214# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
215# CONFIG_INET_XFRM_MODE_TUNNEL is not set
216# CONFIG_INET_XFRM_MODE_BEET is not set
217# CONFIG_INET_LRO is not set
218# CONFIG_INET_DIAG is not set
219# CONFIG_TCP_CONG_ADVANCED is not set
220CONFIG_TCP_CONG_CUBIC=y
221CONFIG_DEFAULT_TCP_CONG="cubic"
222# CONFIG_TCP_MD5SIG is not set
223# CONFIG_IPV6 is not set
224# CONFIG_NETWORK_SECMARK is not set
225# CONFIG_NETFILTER is not set
226# CONFIG_IP_DCCP is not set
227# CONFIG_IP_SCTP is not set
228# CONFIG_TIPC is not set
229# CONFIG_ATM is not set
230# CONFIG_BRIDGE is not set
231# CONFIG_VLAN_8021Q is not set
232# CONFIG_DECNET is not set
233# CONFIG_LLC2 is not set
234# CONFIG_IPX is not set
235# CONFIG_ATALK is not set
236# CONFIG_X25 is not set
237# CONFIG_LAPB is not set
238# CONFIG_ECONET is not set
239# CONFIG_WAN_ROUTER is not set
240# CONFIG_NET_SCHED is not set
241
242#
243# Network testing
244#
245# CONFIG_NET_PKTGEN is not set
246# CONFIG_HAMRADIO is not set
247# CONFIG_CAN is not set
248# CONFIG_IRDA is not set
249# CONFIG_BT is not set
250# CONFIG_AF_RXRPC is not set
251
252#
253# Wireless
254#
255# CONFIG_CFG80211 is not set
256# CONFIG_WIRELESS_EXT is not set
257# CONFIG_MAC80211 is not set
258# CONFIG_IEEE80211 is not set
259# CONFIG_RFKILL is not set
260# CONFIG_NET_9P is not set
261
262#
263# Device Drivers
264#
265
266#
267# Generic Driver Options
268#
269CONFIG_STANDALONE=y
270CONFIG_PREVENT_FIRMWARE_BUILD=y
271# CONFIG_SYS_HYPERVISOR is not set
272# CONFIG_CONNECTOR is not set
273CONFIG_MTD=y
274# CONFIG_MTD_DEBUG is not set
275# CONFIG_MTD_CONCAT is not set
276CONFIG_MTD_PARTITIONS=y
277# CONFIG_MTD_REDBOOT_PARTS is not set
278# CONFIG_MTD_CMDLINE_PARTS is not set
279# CONFIG_MTD_AR7_PARTS is not set
280
281#
282# User Modules And Translation Layers
283#
284CONFIG_MTD_CHAR=y
285CONFIG_MTD_BLKDEVS=y
286CONFIG_MTD_BLOCK=y
287# CONFIG_FTL is not set
288# CONFIG_NFTL is not set
289# CONFIG_INFTL is not set
290# CONFIG_RFD_FTL is not set
291# CONFIG_SSFDC is not set
292# CONFIG_MTD_OOPS is not set
293
294#
295# RAM/ROM/Flash chip drivers
296#
297# CONFIG_MTD_CFI is not set
298# CONFIG_MTD_JEDECPROBE is not set
299CONFIG_MTD_MAP_BANK_WIDTH_1=y
300CONFIG_MTD_MAP_BANK_WIDTH_2=y
301CONFIG_MTD_MAP_BANK_WIDTH_4=y
302# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
303# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
304# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
305CONFIG_MTD_CFI_I1=y
306CONFIG_MTD_CFI_I2=y
307# CONFIG_MTD_CFI_I4 is not set
308# CONFIG_MTD_CFI_I8 is not set
309CONFIG_MTD_RAM=y
310# CONFIG_MTD_ROM is not set
311# CONFIG_MTD_ABSENT is not set
312
313#
314# Mapping drivers for chip access
315#
316# CONFIG_MTD_COMPLEX_MAPPINGS is not set
317CONFIG_MTD_UCLINUX=y
318# CONFIG_MTD_PLATRAM is not set
319
320#
321# Self-contained MTD device drivers
322#
323# CONFIG_MTD_SLRAM is not set
324# CONFIG_MTD_PHRAM is not set
325# CONFIG_MTD_MTDRAM is not set
326# CONFIG_MTD_BLOCK2MTD is not set
327
328#
329# Disk-On-Chip Device Drivers
330#
331# CONFIG_MTD_DOC2000 is not set
332# CONFIG_MTD_DOC2001 is not set
333# CONFIG_MTD_DOC2001PLUS is not set
334# CONFIG_MTD_NAND is not set
335# CONFIG_MTD_ONENAND is not set
336
337#
338# UBI - Unsorted block images
339#
340# CONFIG_MTD_UBI is not set
341# CONFIG_PARPORT is not set
342CONFIG_BLK_DEV=y
343# CONFIG_BLK_DEV_COW_COMMON is not set
344# CONFIG_BLK_DEV_LOOP is not set
345# CONFIG_BLK_DEV_NBD is not set
346CONFIG_BLK_DEV_RAM=y
347CONFIG_BLK_DEV_RAM_COUNT=16
348CONFIG_BLK_DEV_RAM_SIZE=4096
349# CONFIG_BLK_DEV_XIP is not set
350# CONFIG_CDROM_PKTCDVD is not set
351# CONFIG_ATA_OVER_ETH is not set
352# CONFIG_MISC_DEVICES is not set
353CONFIG_HAVE_IDE=y
354# CONFIG_IDE is not set
355
356#
357# SCSI device support
358#
359# CONFIG_RAID_ATTRS is not set
360# CONFIG_SCSI is not set
361# CONFIG_SCSI_DMA is not set
362# CONFIG_SCSI_NETLINK is not set
363# CONFIG_MD is not set
364CONFIG_NETDEVICES=y
365# CONFIG_NETDEVICES_MULTIQUEUE is not set
366# CONFIG_DUMMY is not set
367# CONFIG_BONDING is not set
368# CONFIG_MACVLAN is not set
369# CONFIG_EQUALIZER is not set
370# CONFIG_TUN is not set
371# CONFIG_VETH is not set
372# CONFIG_PHYLIB is not set
373CONFIG_NET_ETHERNET=y
374# CONFIG_MII is not set
375# CONFIG_IBM_NEW_EMAC_ZMII is not set
376# CONFIG_IBM_NEW_EMAC_RGMII is not set
377# CONFIG_IBM_NEW_EMAC_TAH is not set
378# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
379# CONFIG_B44 is not set
380# CONFIG_NETDEV_1000 is not set
381# CONFIG_NETDEV_10000 is not set
382
383#
384# Wireless LAN
385#
386# CONFIG_WLAN_PRE80211 is not set
387# CONFIG_WLAN_80211 is not set
388# CONFIG_IWLWIFI is not set
389# CONFIG_IWLWIFI_LEDS is not set
390# CONFIG_WAN is not set
391CONFIG_PPP=y
392# CONFIG_PPP_MULTILINK is not set
393# CONFIG_PPP_FILTER is not set
394# CONFIG_PPP_ASYNC is not set
395# CONFIG_PPP_SYNC_TTY is not set
396# CONFIG_PPP_DEFLATE is not set
397# CONFIG_PPP_BSDCOMP is not set
398# CONFIG_PPP_MPPE is not set
399# CONFIG_PPPOE is not set
400# CONFIG_PPPOL2TP is not set
401# CONFIG_SLIP is not set
402CONFIG_SLHC=y
403# CONFIG_NETCONSOLE is not set
404# CONFIG_NETPOLL is not set
405# CONFIG_NET_POLL_CONTROLLER is not set
406# CONFIG_ISDN is not set
407# CONFIG_PHONE is not set
408
409#
410# Input device support
411#
412CONFIG_INPUT=y
413# CONFIG_INPUT_FF_MEMLESS is not set
414# CONFIG_INPUT_POLLDEV is not set
415
416#
417# Userland interfaces
418#
419# CONFIG_INPUT_MOUSEDEV is not set
420# CONFIG_INPUT_JOYDEV is not set
421# CONFIG_INPUT_EVDEV is not set
422# CONFIG_INPUT_EVBUG is not set
423
424#
425# Input Device Drivers
426#
427# CONFIG_INPUT_KEYBOARD is not set
428# CONFIG_INPUT_MOUSE is not set
429# CONFIG_INPUT_JOYSTICK is not set
430# CONFIG_INPUT_TABLET is not set
431# CONFIG_INPUT_TOUCHSCREEN is not set
432# CONFIG_INPUT_MISC is not set
433
434#
435# Hardware I/O ports
436#
437# CONFIG_SERIO is not set
438# CONFIG_GAMEPORT is not set
439
440#
441# Character devices
442#
443# CONFIG_VT is not set
444# CONFIG_DEVKMEM is not set
445# CONFIG_SERIAL_NONSTANDARD is not set
446
447#
448# Serial drivers
449#
450# CONFIG_SERIAL_8250 is not set
451
452#
453# Non-8250 serial port support
454#
455CONFIG_SERIAL_CORE=y
456CONFIG_SERIAL_CORE_CONSOLE=y
457# CONFIG_SERIAL_COLDFIRE is not set
458CONFIG_SERIAL_MCF=y
459CONFIG_SERIAL_MCF_BAUDRATE=19200
460CONFIG_SERIAL_MCF_CONSOLE=y
461# CONFIG_UNIX98_PTYS is not set
462CONFIG_LEGACY_PTYS=y
463CONFIG_LEGACY_PTY_COUNT=256
464# CONFIG_IPMI_HANDLER is not set
465# CONFIG_HW_RANDOM is not set
466# CONFIG_GEN_RTC is not set
467# CONFIG_R3964 is not set
468# CONFIG_RAW_DRIVER is not set
469# CONFIG_TCG_TPM is not set
470# CONFIG_I2C is not set
471# CONFIG_SPI is not set
472# CONFIG_W1 is not set
473# CONFIG_POWER_SUPPLY is not set
474# CONFIG_HWMON is not set
475# CONFIG_THERMAL is not set
476# CONFIG_WATCHDOG is not set
477
478#
479# Sonics Silicon Backplane
480#
481CONFIG_SSB_POSSIBLE=y
482# CONFIG_SSB is not set
483
484#
485# Multifunction device drivers
486#
487# CONFIG_MFD_SM501 is not set
488# CONFIG_HTC_PASIC3 is not set
489
490#
491# Multimedia devices
492#
493
494#
495# Multimedia core support
496#
497# CONFIG_VIDEO_DEV is not set
498# CONFIG_DVB_CORE is not set
499
500#
501# Multimedia drivers
502#
503CONFIG_DAB=y
504
505#
506# Graphics support
507#
508# CONFIG_VGASTATE is not set
509# CONFIG_VIDEO_OUTPUT_CONTROL is not set
510# CONFIG_FB is not set
511# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
512
513#
514# Display device support
515#
516# CONFIG_DISPLAY_SUPPORT is not set
517
518#
519# Sound
520#
521# CONFIG_SOUND is not set
522# CONFIG_HID_SUPPORT is not set
523# CONFIG_USB_SUPPORT is not set
524# CONFIG_MMC is not set
525# CONFIG_MEMSTICK is not set
526# CONFIG_NEW_LEDS is not set
527# CONFIG_ACCESSIBILITY is not set
528# CONFIG_RTC_CLASS is not set
529# CONFIG_UIO is not set
530
531#
532# File systems
533#
534CONFIG_EXT2_FS=y
535# CONFIG_EXT2_FS_XATTR is not set
536# CONFIG_EXT3_FS is not set
537# CONFIG_EXT4DEV_FS is not set
538# CONFIG_REISERFS_FS is not set
539# CONFIG_JFS_FS is not set
540# CONFIG_FS_POSIX_ACL is not set
541# CONFIG_XFS_FS is not set
542# CONFIG_OCFS2_FS is not set
543# CONFIG_DNOTIFY is not set
544# CONFIG_INOTIFY is not set
545# CONFIG_QUOTA is not set
546# CONFIG_AUTOFS_FS is not set
547# CONFIG_AUTOFS4_FS is not set
548# CONFIG_FUSE_FS is not set
549
550#
551# CD-ROM/DVD Filesystems
552#
553# CONFIG_ISO9660_FS is not set
554# CONFIG_UDF_FS is not set
555
556#
557# DOS/FAT/NT Filesystems
558#
559# CONFIG_MSDOS_FS is not set
560# CONFIG_VFAT_FS is not set
561# CONFIG_NTFS_FS is not set
562
563#
564# Pseudo filesystems
565#
566CONFIG_PROC_FS=y
567CONFIG_PROC_SYSCTL=y
568CONFIG_SYSFS=y
569# CONFIG_TMPFS is not set
570# CONFIG_HUGETLB_PAGE is not set
571# CONFIG_CONFIGFS_FS is not set
572
573#
574# Miscellaneous filesystems
575#
576# CONFIG_ADFS_FS is not set
577# CONFIG_AFFS_FS is not set
578# CONFIG_HFS_FS is not set
579# CONFIG_HFSPLUS_FS is not set
580# CONFIG_BEFS_FS is not set
581# CONFIG_BFS_FS is not set
582# CONFIG_EFS_FS is not set
583# CONFIG_JFFS2_FS is not set
584# CONFIG_CRAMFS is not set
585# CONFIG_VXFS_FS is not set
586# CONFIG_MINIX_FS is not set
587# CONFIG_HPFS_FS is not set
588# CONFIG_QNX4FS_FS is not set
589CONFIG_ROMFS_FS=y
590# CONFIG_SYSV_FS is not set
591# CONFIG_UFS_FS is not set
592# CONFIG_NETWORK_FILESYSTEMS is not set
593
594#
595# Partition Types
596#
597# CONFIG_PARTITION_ADVANCED is not set
598CONFIG_MSDOS_PARTITION=y
599# CONFIG_NLS is not set
600# CONFIG_DLM is not set
601
602#
603# Kernel hacking
604#
605# CONFIG_PRINTK_TIME is not set
606CONFIG_ENABLE_WARN_DEPRECATED=y
607CONFIG_ENABLE_MUST_CHECK=y
608CONFIG_FRAME_WARN=1024
609# CONFIG_MAGIC_SYSRQ is not set
610# CONFIG_UNUSED_SYMBOLS is not set
611# CONFIG_DEBUG_FS is not set
612# CONFIG_HEADERS_CHECK is not set
613# CONFIG_DEBUG_KERNEL is not set
614# CONFIG_DEBUG_BUGVERBOSE is not set
615# CONFIG_SAMPLES is not set
616# CONFIG_FULLDEBUG is not set
617# CONFIG_HIGHPROFILE is not set
618# CONFIG_BOOTPARAM is not set
619# CONFIG_NO_KERNEL_MSG is not set
620# CONFIG_BDM_DISABLE is not set
621
622#
623# Security options
624#
625# CONFIG_KEYS is not set
626# CONFIG_SECURITY is not set
627# CONFIG_SECURITY_FILE_CAPABILITIES is not set
628# CONFIG_CRYPTO is not set
629
630#
631# Library routines
632#
633# CONFIG_GENERIC_FIND_FIRST_BIT is not set
634# CONFIG_CRC_CCITT is not set
635# CONFIG_CRC16 is not set
636# CONFIG_CRC_ITU_T is not set
637# CONFIG_CRC32 is not set
638# CONFIG_CRC7 is not set
639# CONFIG_LIBCRC32C is not set
640CONFIG_HAS_IOMEM=y
641CONFIG_HAS_DMA=y
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c
index 03f4fe6a2fc0..5985f1989021 100644
--- a/arch/m68knommu/kernel/setup.c
+++ b/arch/m68knommu/kernel/setup.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/fb.h> 23#include <linux/fb.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/mm.h>
25#include <linux/console.h> 26#include <linux/console.h>
26#include <linux/errno.h> 27#include <linux/errno.h>
27#include <linux/string.h> 28#include <linux/string.h>
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c
index 0ccfb2ad6380..d182b2f72211 100644
--- a/arch/m68knommu/kernel/time.c
+++ b/arch/m68knommu/kernel/time.c
@@ -33,14 +33,13 @@ static inline int set_rtc_mmss(unsigned long nowtime)
33 return -1; 33 return -1;
34} 34}
35 35
36#ifndef CONFIG_GENERIC_CLOCKEVENTS
36/* 37/*
37 * timer_interrupt() needs to keep up the real-time clock, 38 * timer_interrupt() needs to keep up the real-time clock,
38 * as well as call the "do_timer()" routine every clocktick 39 * as well as call the "do_timer()" routine every clocktick
39 */ 40 */
40irqreturn_t arch_timer_interrupt(int irq, void *dummy) 41irqreturn_t arch_timer_interrupt(int irq, void *dummy)
41{ 42{
42 /* last time the cmos clock got updated */
43 static long last_rtc_update=0;
44 43
45 if (current->pid) 44 if (current->pid)
46 profile_tick(CPU_PROFILING); 45 profile_tick(CPU_PROFILING);
@@ -49,21 +48,6 @@ irqreturn_t arch_timer_interrupt(int irq, void *dummy)
49 48
50 do_timer(1); 49 do_timer(1);
51 50
52 /*
53 * If we have an externally synchronized Linux clock, then update
54 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
55 * called as close as possible to 500 ms before the new second starts.
56 */
57 if (ntp_synced() &&
58 xtime.tv_sec > last_rtc_update + 660 &&
59 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
60 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
61 if (set_rtc_mmss(xtime.tv_sec) == 0)
62 last_rtc_update = xtime.tv_sec;
63 else
64 last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
65 }
66
67 write_sequnlock(&xtime_lock); 51 write_sequnlock(&xtime_lock);
68 52
69#ifndef CONFIG_SMP 53#ifndef CONFIG_SMP
@@ -71,8 +55,9 @@ irqreturn_t arch_timer_interrupt(int irq, void *dummy)
71#endif 55#endif
72 return(IRQ_HANDLED); 56 return(IRQ_HANDLED);
73} 57}
58#endif
74 59
75void time_init(void) 60static unsigned long read_rtc_mmss(void)
76{ 61{
77 unsigned int year, mon, day, hour, min, sec; 62 unsigned int year, mon, day, hour, min, sec;
78 63
@@ -83,10 +68,21 @@ void time_init(void)
83 68
84 if ((year += 1900) < 1970) 69 if ((year += 1900) < 1970)
85 year += 100; 70 year += 100;
86 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
87 xtime.tv_nsec = 0;
88 wall_to_monotonic.tv_sec = -xtime.tv_sec;
89 71
90 hw_timer_init(); 72 return mktime(year, mon, day, hour, min, sec);;
73}
74
75unsigned long read_persistent_clock(void)
76{
77 return read_rtc_mmss();
91} 78}
92 79
80int update_persistent_clock(struct timespec now)
81{
82 return set_rtc_mmss(now.tv_sec);
83}
84
85void time_init(void)
86{
87 hw_timer_init();
88}
diff --git a/arch/m68knommu/kernel/traps.c b/arch/m68knommu/kernel/traps.c
index ec9aea652e79..46f8f9d0c408 100644
--- a/arch/m68knommu/kernel/traps.c
+++ b/arch/m68knommu/kernel/traps.c
@@ -103,12 +103,28 @@ asmlinkage void buserr_c(struct frame *fp)
103 force_sig(SIGSEGV, current); 103 force_sig(SIGSEGV, current);
104} 104}
105 105
106static void print_this_address(unsigned long addr, int i)
107{
108#ifdef CONFIG_KALLSYMS
109 printk(KERN_EMERG " [%08lx] ", addr);
110 print_symbol(KERN_CONT "%s\n", addr);
111#else
112 if (i % 5)
113 printk(KERN_CONT " [%08lx] ", addr);
114 else
115 printk(KERN_CONT "\n" KERN_EMERG " [%08lx] ", addr);
116 i++;
117#endif
118}
119
106int kstack_depth_to_print = 48; 120int kstack_depth_to_print = 48;
107 121
108static void __show_stack(struct task_struct *task, unsigned long *stack) 122static void __show_stack(struct task_struct *task, unsigned long *stack)
109{ 123{
110 unsigned long *endstack, addr; 124 unsigned long *endstack, addr;
125#ifdef CONFIG_FRAME_POINTER
111 unsigned long *last_stack; 126 unsigned long *last_stack;
127#endif
112 int i; 128 int i;
113 129
114 if (!stack) 130 if (!stack)
@@ -126,6 +142,7 @@ static void __show_stack(struct task_struct *task, unsigned long *stack)
126 printk(" %08lx", *(stack + i)); 142 printk(" %08lx", *(stack + i));
127 } 143 }
128 printk("\n"); 144 printk("\n");
145 i = 0;
129 146
130#ifdef CONFIG_FRAME_POINTER 147#ifdef CONFIG_FRAME_POINTER
131 printk(KERN_EMERG "Call Trace:\n"); 148 printk(KERN_EMERG "Call Trace:\n");
@@ -134,15 +151,30 @@ static void __show_stack(struct task_struct *task, unsigned long *stack)
134 while (stack <= endstack && stack > last_stack) { 151 while (stack <= endstack && stack > last_stack) {
135 152
136 addr = *(stack + 1); 153 addr = *(stack + 1);
137 printk(KERN_EMERG " [%08lx] ", addr); 154 print_this_address(addr, i);
138 print_symbol(KERN_CONT "%s\n", addr); 155 i++;
139 156
140 last_stack = stack; 157 last_stack = stack;
141 stack = (unsigned long *)*stack; 158 stack = (unsigned long *)*stack;
142 } 159 }
143 printk("\n"); 160 printk("\n");
144#else 161#else
145 printk(KERN_EMERG "CONFIG_FRAME_POINTER disabled, no symbolic call trace\n"); 162 printk(KERN_EMERG "Call Trace with CONFIG_FRAME_POINTER disabled:\n");
163 while (stack <= endstack) {
164 addr = *stack++;
165 /*
166 * If the address is either in the text segment of the kernel,
167 * or in a region which is occupied by a module then it *may*
168 * be the address of a calling routine; if so, print it so that
169 * someone tracing down the cause of the crash will be able to
170 * figure out the call path that was taken.
171 */
172 if (__kernel_text_address(addr)) {
173 print_this_address(addr, i);
174 i++;
175 }
176 }
177 printk(KERN_CONT "\n");
146#endif 178#endif
147} 179}
148 180
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index 93e69236ed6f..69ba9b10767a 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -62,6 +62,7 @@ SECTIONS {
62 .text : { 62 .text : {
63 _text = .; 63 _text = .;
64 _stext = . ; 64 _stext = . ;
65 HEAD_TEXT
65 TEXT_TEXT 66 TEXT_TEXT
66 SCHED_TEXT 67 SCHED_TEXT
67 LOCK_TEXT 68 LOCK_TEXT
diff --git a/arch/m68knommu/platform/coldfire/Makefile b/arch/m68knommu/platform/coldfire/Makefile
index 40cf20be1b90..4f416a91a829 100644
--- a/arch/m68knommu/platform/coldfire/Makefile
+++ b/arch/m68knommu/platform/coldfire/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_COLDFIRE) += dma.o entry.o vectors.o
18obj-$(CONFIG_M5206) += timers.o 18obj-$(CONFIG_M5206) += timers.o
19obj-$(CONFIG_M5206e) += timers.o 19obj-$(CONFIG_M5206e) += timers.o
20obj-$(CONFIG_M520x) += pit.o 20obj-$(CONFIG_M520x) += pit.o
21obj-$(CONFIG_M523x) += pit.o 21obj-$(CONFIG_M523x) += pit.o dma_timer.o
22obj-$(CONFIG_M5249) += timers.o 22obj-$(CONFIG_M5249) += timers.o
23obj-$(CONFIG_M527x) += pit.o 23obj-$(CONFIG_M527x) += pit.o
24obj-$(CONFIG_M5272) += timers.o 24obj-$(CONFIG_M5272) += timers.o
diff --git a/arch/m68knommu/platform/coldfire/dma_timer.c b/arch/m68knommu/platform/coldfire/dma_timer.c
new file mode 100644
index 000000000000..772578b1084f
--- /dev/null
+++ b/arch/m68knommu/platform/coldfire/dma_timer.c
@@ -0,0 +1,84 @@
1/*
2 * dma_timer.c -- Freescale ColdFire DMA Timer.
3 *
4 * Copyright (C) 2007, Benedikt Spranger <b.spranger@linutronix.de>
5 * Copyright (C) 2008. Sebastian Siewior, Linutronix
6 *
7 */
8
9#include <linux/clocksource.h>
10#include <linux/io.h>
11
12#include <asm/machdep.h>
13#include <asm/coldfire.h>
14#include <asm/mcfpit.h>
15#include <asm/mcfsim.h>
16
17#define DMA_TIMER_0 (0x00)
18#define DMA_TIMER_1 (0x40)
19#define DMA_TIMER_2 (0x80)
20#define DMA_TIMER_3 (0xc0)
21
22#define DTMR0 (MCF_IPSBAR + DMA_TIMER_0 + 0x400)
23#define DTXMR0 (MCF_IPSBAR + DMA_TIMER_0 + 0x402)
24#define DTER0 (MCF_IPSBAR + DMA_TIMER_0 + 0x403)
25#define DTRR0 (MCF_IPSBAR + DMA_TIMER_0 + 0x404)
26#define DTCR0 (MCF_IPSBAR + DMA_TIMER_0 + 0x408)
27#define DTCN0 (MCF_IPSBAR + DMA_TIMER_0 + 0x40c)
28
29#define DMA_FREQ ((MCF_CLK / 2) / 16)
30
31/* DTMR */
32#define DMA_DTMR_RESTART (1 << 3)
33#define DMA_DTMR_CLK_DIV_1 (1 << 1)
34#define DMA_DTMR_CLK_DIV_16 (2 << 1)
35#define DMA_DTMR_ENABLE (1 << 0)
36
37static cycle_t cf_dt_get_cycles(void)
38{
39 return __raw_readl(DTCN0);
40}
41
42static struct clocksource clocksource_cf_dt = {
43 .name = "coldfire_dma_timer",
44 .rating = 200,
45 .read = cf_dt_get_cycles,
46 .mask = CLOCKSOURCE_MASK(32),
47 .shift = 20,
48 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
49};
50
51static int __init init_cf_dt_clocksource(void)
52{
53 /*
54 * We setup DMA timer 0 in free run mode. This incrementing counter is
55 * used as a highly precious clock source. With MCF_CLOCK = 150 MHz we
56 * get a ~213 ns resolution and the 32bit register will overflow almost
57 * every 15 minutes.
58 */
59 __raw_writeb(0x00, DTXMR0);
60 __raw_writeb(0x00, DTER0);
61 __raw_writel(0x00000000, DTRR0);
62 __raw_writew(DMA_DTMR_CLK_DIV_16 | DMA_DTMR_ENABLE, DTMR0);
63 clocksource_cf_dt.mult = clocksource_hz2mult(DMA_FREQ,
64 clocksource_cf_dt.shift);
65 return clocksource_register(&clocksource_cf_dt);
66}
67
68arch_initcall(init_cf_dt_clocksource);
69
70#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
71#define CYC2NS_SCALE ((1000000 << CYC2NS_SCALE_FACTOR) / (DMA_FREQ / 1000))
72
73static unsigned long long cycles2ns(unsigned long cycl)
74{
75 return (unsigned long long) ((unsigned long long)cycl *
76 CYC2NS_SCALE) >> CYC2NS_SCALE_FACTOR;
77}
78
79unsigned long long sched_clock(void)
80{
81 unsigned long cycl = __raw_readl(DTCN0);
82
83 return cycles2ns(cycl);
84}
diff --git a/arch/m68knommu/platform/coldfire/head.S b/arch/m68knommu/platform/coldfire/head.S
index b9aa0ca29bfb..2b0d73c0cc32 100644
--- a/arch/m68knommu/platform/coldfire/head.S
+++ b/arch/m68knommu/platform/coldfire/head.S
@@ -10,6 +10,7 @@
10 10
11#include <linux/sys.h> 11#include <linux/sys.h>
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <linux/init.h>
13#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
14#include <asm/coldfire.h> 15#include <asm/coldfire.h>
15#include <asm/mcfcache.h> 16#include <asm/mcfcache.h>
@@ -126,7 +127,7 @@ _ramend:
126 127
127/*****************************************************************************/ 128/*****************************************************************************/
128 129
129.text 130__HEAD
130 131
131/* 132/*
132 * This is the codes first entry point. This is where it all 133 * This is the codes first entry point. This is where it all
diff --git a/arch/m68knommu/platform/coldfire/pit.c b/arch/m68knommu/platform/coldfire/pit.c
index 4290638012e0..c5b916700b22 100644
--- a/arch/m68knommu/platform/coldfire/pit.c
+++ b/arch/m68knommu/platform/coldfire/pit.c
@@ -18,7 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/irq.h> 20#include <linux/irq.h>
21#include <linux/clocksource.h> 21#include <linux/clockchips.h>
22#include <asm/machdep.h> 22#include <asm/machdep.h>
23#include <asm/io.h> 23#include <asm/io.h>
24#include <asm/coldfire.h> 24#include <asm/coldfire.h>
@@ -33,22 +33,86 @@
33#define FREQ ((MCF_CLK / 2) / 64) 33#define FREQ ((MCF_CLK / 2) / 64)
34#define TA(a) (MCF_IPSBAR + MCFPIT_BASE1 + (a)) 34#define TA(a) (MCF_IPSBAR + MCFPIT_BASE1 + (a))
35#define INTC0 (MCF_IPSBAR + MCFICM_INTC0) 35#define INTC0 (MCF_IPSBAR + MCFICM_INTC0)
36#define PIT_CYCLES_PER_JIFFY (FREQ / HZ)
36 37
37static u32 pit_cycles_per_jiffy;
38static u32 pit_cnt; 38static u32 pit_cnt;
39 39
40/*
41 * Initialize the PIT timer.
42 *
43 * This is also called after resume to bring the PIT into operation again.
44 */
45
46static void init_cf_pit_timer(enum clock_event_mode mode,
47 struct clock_event_device *evt)
48{
49 switch (mode) {
50 case CLOCK_EVT_MODE_PERIODIC:
51
52 __raw_writew(MCFPIT_PCSR_DISABLE, TA(MCFPIT_PCSR));
53 __raw_writew(PIT_CYCLES_PER_JIFFY, TA(MCFPIT_PMR));
54 __raw_writew(MCFPIT_PCSR_EN | MCFPIT_PCSR_PIE | \
55 MCFPIT_PCSR_OVW | MCFPIT_PCSR_RLD | \
56 MCFPIT_PCSR_CLK64, TA(MCFPIT_PCSR));
57 break;
58
59 case CLOCK_EVT_MODE_SHUTDOWN:
60 case CLOCK_EVT_MODE_UNUSED:
61
62 __raw_writew(MCFPIT_PCSR_DISABLE, TA(MCFPIT_PCSR));
63 break;
64
65 case CLOCK_EVT_MODE_ONESHOT:
66
67 __raw_writew(MCFPIT_PCSR_DISABLE, TA(MCFPIT_PCSR));
68 __raw_writew(MCFPIT_PCSR_EN | MCFPIT_PCSR_PIE | \
69 MCFPIT_PCSR_OVW | MCFPIT_PCSR_CLK64, \
70 TA(MCFPIT_PCSR));
71 break;
72
73 case CLOCK_EVT_MODE_RESUME:
74 /* Nothing to do here */
75 break;
76 }
77}
78
79/*
80 * Program the next event in oneshot mode
81 *
82 * Delta is given in PIT ticks
83 */
84static int cf_pit_next_event(unsigned long delta,
85 struct clock_event_device *evt)
86{
87 __raw_writew(delta, TA(MCFPIT_PMR));
88 return 0;
89}
90
91struct clock_event_device cf_pit_clockevent = {
92 .name = "pit",
93 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
94 .set_mode = init_cf_pit_timer,
95 .set_next_event = cf_pit_next_event,
96 .shift = 32,
97 .irq = MCFINT_VECBASE + MCFINT_PIT1,
98};
99
100
101
40/***************************************************************************/ 102/***************************************************************************/
41 103
42static irqreturn_t pit_tick(int irq, void *dummy) 104static irqreturn_t pit_tick(int irq, void *dummy)
43{ 105{
106 struct clock_event_device *evt = &cf_pit_clockevent;
44 u16 pcsr; 107 u16 pcsr;
45 108
46 /* Reset the ColdFire timer */ 109 /* Reset the ColdFire timer */
47 pcsr = __raw_readw(TA(MCFPIT_PCSR)); 110 pcsr = __raw_readw(TA(MCFPIT_PCSR));
48 __raw_writew(pcsr | MCFPIT_PCSR_PIF, TA(MCFPIT_PCSR)); 111 __raw_writew(pcsr | MCFPIT_PCSR_PIF, TA(MCFPIT_PCSR));
49 112
50 pit_cnt += pit_cycles_per_jiffy; 113 pit_cnt += PIT_CYCLES_PER_JIFFY;
51 return arch_timer_interrupt(irq, dummy); 114 evt->event_handler(evt);
115 return IRQ_HANDLED;
52} 116}
53 117
54/***************************************************************************/ 118/***************************************************************************/
@@ -72,14 +136,14 @@ static cycle_t pit_read_clk(void)
72 cycles = pit_cnt; 136 cycles = pit_cnt;
73 local_irq_restore(flags); 137 local_irq_restore(flags);
74 138
75 return cycles + pit_cycles_per_jiffy - pcntr; 139 return cycles + PIT_CYCLES_PER_JIFFY - pcntr;
76} 140}
77 141
78/***************************************************************************/ 142/***************************************************************************/
79 143
80static struct clocksource pit_clk = { 144static struct clocksource pit_clk = {
81 .name = "pit", 145 .name = "pit",
82 .rating = 250, 146 .rating = 100,
83 .read = pit_read_clk, 147 .read = pit_read_clk,
84 .shift = 20, 148 .shift = 20,
85 .mask = CLOCKSOURCE_MASK(32), 149 .mask = CLOCKSOURCE_MASK(32),
@@ -92,6 +156,14 @@ void hw_timer_init(void)
92{ 156{
93 u32 imr; 157 u32 imr;
94 158
159 cf_pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id());
160 cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32);
161 cf_pit_clockevent.max_delta_ns =
162 clockevent_delta2ns(0xFFFF, &cf_pit_clockevent);
163 cf_pit_clockevent.min_delta_ns =
164 clockevent_delta2ns(0x3f, &cf_pit_clockevent);
165 clockevents_register_device(&cf_pit_clockevent);
166
95 setup_irq(MCFINT_VECBASE + MCFINT_PIT1, &pit_irq); 167 setup_irq(MCFINT_VECBASE + MCFINT_PIT1, &pit_irq);
96 168
97 __raw_writeb(ICR_INTRCONF, INTC0 + MCFINTC_ICR0 + MCFINT_PIT1); 169 __raw_writeb(ICR_INTRCONF, INTC0 + MCFINTC_ICR0 + MCFINT_PIT1);
@@ -99,13 +171,6 @@ void hw_timer_init(void)
99 imr &= ~MCFPIT_IMR_IBIT; 171 imr &= ~MCFPIT_IMR_IBIT;
100 __raw_writel(imr, INTC0 + MCFPIT_IMR); 172 __raw_writel(imr, INTC0 + MCFPIT_IMR);
101 173
102 /* Set up PIT timer 1 as poll clock */
103 pit_cycles_per_jiffy = FREQ / HZ;
104 __raw_writew(MCFPIT_PCSR_DISABLE, TA(MCFPIT_PCSR));
105 __raw_writew(pit_cycles_per_jiffy, TA(MCFPIT_PMR));
106 __raw_writew(MCFPIT_PCSR_EN | MCFPIT_PCSR_PIE | MCFPIT_PCSR_OVW |
107 MCFPIT_PCSR_RLD | MCFPIT_PCSR_CLK64, TA(MCFPIT_PCSR));
108
109 pit_clk.mult = clocksource_hz2mult(FREQ, pit_clk.shift); 174 pit_clk.mult = clocksource_hz2mult(FREQ, pit_clk.shift);
110 clocksource_register(&pit_clk); 175 clocksource_register(&pit_clk);
111} 176}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index b9c754f4070c..b4c4eaa5dd26 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -713,7 +713,7 @@ config CSRC_SB1250
713 713
714config GPIO_TXX9 714config GPIO_TXX9
715 select GENERIC_GPIO 715 select GENERIC_GPIO
716 select HAVE_GPIO_LIB 716 select ARCH_REQUIRE_GPIOLIB
717 bool 717 bool
718 718
719config CFE 719config CFE
diff --git a/arch/mips/au1000/common/power.c b/arch/mips/au1000/common/power.c
index 2166b9e1e80c..bd854a6d1d89 100644
--- a/arch/mips/au1000/common/power.c
+++ b/arch/mips/au1000/common/power.c
@@ -31,7 +31,6 @@
31 31
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/pm.h> 33#include <linux/pm.h>
34#include <linux/pm_legacy.h>
35#include <linux/sysctl.h> 34#include <linux/sysctl.h>
36#include <linux/jiffies.h> 35#include <linux/jiffies.h>
37 36
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index c266211ed653..2fefb14414b7 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -11,7 +11,6 @@
11#include <linux/file.h> 11#include <linux/file.h>
12#include <linux/smp_lock.h> 12#include <linux/smp_lock.h>
13#include <linux/highuid.h> 13#include <linux/highuid.h>
14#include <linux/dirent.h>
15#include <linux/resource.h> 14#include <linux/resource.h>
16#include <linux/highmem.h> 15#include <linux/highmem.h>
17#include <linux/time.h> 16#include <linux/time.h>
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index e7ed0ac48537..1f60e27523d9 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/moduleloader.h> 23#include <linux/moduleloader.h>
24#include <linux/elf.h> 24#include <linux/elf.h>
25#include <linux/mm.h>
25#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <linux/fs.h> 28#include <linux/fs.h>
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c06f5b5d764c..b16facd9ea8e 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -53,7 +53,7 @@ void __noreturn cpu_idle(void)
53{ 53{
54 /* endless idle loop with no priority at all */ 54 /* endless idle loop with no priority at all */
55 while (1) { 55 while (1) {
56 tick_nohz_stop_sched_tick(); 56 tick_nohz_stop_sched_tick(1);
57 while (!need_resched()) { 57 while (!need_resched()) {
58#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 58#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
59 extern void smtc_idle_loop_hook(void); 59 extern void smtc_idle_loop_hook(void);
diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c
index 5eb4681a73d2..0632e2a849c0 100644
--- a/arch/mips/kernel/stacktrace.c
+++ b/arch/mips/kernel/stacktrace.c
@@ -7,6 +7,7 @@
7 */ 7 */
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/stacktrace.h> 9#include <linux/stacktrace.h>
10#include <linux/module.h>
10#include <asm/stacktrace.h> 11#include <asm/stacktrace.h>
11 12
12/* 13/*
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 3523c8d12eda..343015a2f418 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -52,7 +52,7 @@ asmlinkage int sysm_pipe(nabi_no_regargs volatile struct pt_regs regs)
52 int fd[2]; 52 int fd[2];
53 int error, res; 53 int error, res;
54 54
55 error = do_pipe(fd); 55 error = do_pipe_flags(fd, 0);
56 if (error) { 56 if (error) {
57 res = error; 57 res = error;
58 goto out; 58 goto out;
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
index 48932ce1d730..d9c79d8be81d 100644
--- a/arch/mips/sgi-ip27/ip27-klnuma.c
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -4,6 +4,7 @@
4 * Copyright 2000 - 2001 Kanoj Sarcar (kanoj@sgi.com) 4 * Copyright 2000 - 2001 Kanoj Sarcar (kanoj@sgi.com)
5 */ 5 */
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/mm.h>
7#include <linux/mmzone.h> 8#include <linux/mmzone.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
9#include <linux/nodemask.h> 10#include <linux/nodemask.h>
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 42cd10956306..060d853d7b35 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -33,8 +33,6 @@
33#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT) 33#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
34#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT) 34#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
35 35
36static struct bootmem_data __initdata plat_node_bdata[MAX_COMPACT_NODES];
37
38struct node_data *__node_data[MAX_COMPACT_NODES]; 36struct node_data *__node_data[MAX_COMPACT_NODES];
39 37
40EXPORT_SYMBOL(__node_data); 38EXPORT_SYMBOL(__node_data);
@@ -403,7 +401,7 @@ static void __init node_mem_init(cnodeid_t node)
403 */ 401 */
404 __node_data[node] = __va(slot_freepfn << PAGE_SHIFT); 402 __node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
405 403
406 NODE_DATA(node)->bdata = &plat_node_bdata[node]; 404 NODE_DATA(node)->bdata = &bootmem_node_data[node];
407 NODE_DATA(node)->node_start_pfn = start_pfn; 405 NODE_DATA(node)->node_start_pfn = start_pfn;
408 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; 406 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
409 407
diff --git a/arch/mn10300/boot/compressed/misc.c b/arch/mn10300/boot/compressed/misc.c
index ded207efc97a..f673383518e4 100644
--- a/arch/mn10300/boot/compressed/misc.c
+++ b/arch/mn10300/boot/compressed/misc.c
@@ -153,26 +153,9 @@ static uch *output_data;
153static unsigned long output_ptr; 153static unsigned long output_ptr;
154 154
155 155
156static void *malloc(int size);
157
158static inline void free(void *where)
159{ /* Don't care */
160}
161
162static unsigned long free_mem_ptr = (unsigned long) &end; 156static unsigned long free_mem_ptr = (unsigned long) &end;
163static unsigned long free_mem_end_ptr = (unsigned long) &end + 0x90000; 157static unsigned long free_mem_end_ptr = (unsigned long) &end + 0x90000;
164 158
165static inline void gzip_mark(void **ptr)
166{
167 kputs(".");
168 *ptr = (void *) free_mem_ptr;
169}
170
171static inline void gzip_release(void **ptr)
172{
173 free_mem_ptr = (unsigned long) *ptr;
174}
175
176#define INPLACE_MOVE_ROUTINE 0x1000 159#define INPLACE_MOVE_ROUTINE 0x1000
177#define LOW_BUFFER_START 0x2000 160#define LOW_BUFFER_START 0x2000
178#define LOW_BUFFER_END 0x90000 161#define LOW_BUFFER_END 0x90000
@@ -186,26 +169,6 @@ static int lines, cols;
186 169
187#include "../../../../lib/inflate.c" 170#include "../../../../lib/inflate.c"
188 171
189static void *malloc(int size)
190{
191 void *p;
192
193 if (size < 0)
194 error("Malloc error\n");
195 if (!free_mem_ptr)
196 error("Memory error\n");
197
198 free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
199
200 p = (void *) free_mem_ptr;
201 free_mem_ptr += size;
202
203 if (free_mem_ptr >= free_mem_end_ptr)
204 error("\nOut of memory\n");
205
206 return p;
207}
208
209static inline void scroll(void) 172static inline void scroll(void)
210{ 173{
211 int i; 174 int i;
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index 8c5d88c7b90a..8cee387a24fd 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -67,8 +67,8 @@ void __init paging_init(void)
67 67
68 /* declare the sizes of the RAM zones (only use the normal zone) */ 68 /* declare the sizes of the RAM zones (only use the normal zone) */
69 zones_size[ZONE_NORMAL] = 69 zones_size[ZONE_NORMAL] =
70 (contig_page_data.bdata->node_low_pfn) - 70 contig_page_data.bdata->node_low_pfn -
71 (contig_page_data.bdata->node_boot_start >> PAGE_SHIFT); 71 contig_page_data.bdata->node_min_pfn;
72 72
73 /* pass the memory from the bootmem allocator to the main allocator */ 73 /* pass the memory from the bootmem allocator to the main allocator */
74 free_area_init(zones_size); 74 free_area_init(zones_size);
@@ -87,7 +87,7 @@ void __init mem_init(void)
87 if (!mem_map) 87 if (!mem_map)
88 BUG(); 88 BUG();
89 89
90#define START_PFN (contig_page_data.bdata->node_boot_start >> PAGE_SHIFT) 90#define START_PFN (contig_page_data.bdata->node_min_pfn)
91#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn) 91#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn)
92 92
93 max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN; 93 max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN;
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index 0c5b9dabb475..be255ebb609c 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -448,7 +448,7 @@ int hpux_pipe(int *kstack_fildes)
448 int error; 448 int error;
449 449
450 lock_kernel(); 450 lock_kernel();
451 error = do_pipe(kstack_fildes); 451 error = do_pipe_flags(kstack_fildes, 0);
452 unlock_kernel(); 452 unlock_kernel();
453 return error; 453 return error;
454} 454}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index b4d6c8777ed0..7c155c254e72 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -36,7 +36,6 @@ extern int data_start;
36 36
37#ifdef CONFIG_DISCONTIGMEM 37#ifdef CONFIG_DISCONTIGMEM
38struct node_map_data node_data[MAX_NUMNODES] __read_mostly; 38struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
39bootmem_data_t bmem_data[MAX_NUMNODES] __read_mostly;
40unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; 39unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
41#endif 40#endif
42 41
@@ -262,7 +261,7 @@ static void __init setup_bootmem(void)
262#ifdef CONFIG_DISCONTIGMEM 261#ifdef CONFIG_DISCONTIGMEM
263 for (i = 0; i < MAX_PHYSMEM_RANGES; i++) { 262 for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
264 memset(NODE_DATA(i), 0, sizeof(pg_data_t)); 263 memset(NODE_DATA(i), 0, sizeof(pg_data_t));
265 NODE_DATA(i)->bdata = &bmem_data[i]; 264 NODE_DATA(i)->bdata = &bootmem_node_data[i];
266 } 265 }
267 memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); 266 memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
268 267
@@ -888,7 +887,7 @@ void __init paging_init(void)
888 } 887 }
889#endif 888#endif
890 889
891 free_area_init_node(i, NODE_DATA(i), zones_size, 890 free_area_init_node(i, zones_size,
892 pmem_ranges[i].start_pfn, NULL); 891 pmem_ranges[i].start_pfn, NULL);
893 } 892 }
894} 893}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4c22242b396f..fe88418167c5 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -110,8 +110,12 @@ config PPC
110 default y 110 default y
111 select HAVE_DYNAMIC_FTRACE 111 select HAVE_DYNAMIC_FTRACE
112 select HAVE_FTRACE 112 select HAVE_FTRACE
113 select ARCH_WANT_OPTIONAL_GPIOLIB
113 select HAVE_IDE 114 select HAVE_IDE
115 select HAVE_IOREMAP_PROT
116 select HAVE_EFFICIENT_UNALIGNED_ACCESS
114 select HAVE_KPROBES 117 select HAVE_KPROBES
118 select HAVE_ARCH_KGDB
115 select HAVE_KRETPROBES 119 select HAVE_KRETPROBES
116 select HAVE_LMB 120 select HAVE_LMB
117 select HAVE_DMA_ATTRS if PPC64 121 select HAVE_DMA_ATTRS if PPC64
@@ -842,6 +846,7 @@ source "crypto/Kconfig"
842config PPC_CLOCK 846config PPC_CLOCK
843 bool 847 bool
844 default n 848 default n
849 select HAVE_CLK
845 850
846config PPC_LIB_RHEAP 851config PPC_LIB_RHEAP
847 bool 852 bool
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 2840ab69ef4e..8c8aadbe9563 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -41,22 +41,6 @@ config HCALL_STATS
41 This option will add a small amount of overhead to all hypervisor 41 This option will add a small amount of overhead to all hypervisor
42 calls. 42 calls.
43 43
44config DEBUGGER
45 bool "Enable debugger hooks"
46 depends on DEBUG_KERNEL
47 help
48 Include in-kernel hooks for kernel debuggers. Unless you are
49 intending to debug the kernel, say N here.
50
51config KGDB
52 bool "Include kgdb kernel debugger"
53 depends on DEBUGGER && (BROKEN || PPC_GEN550 || 4xx)
54 select DEBUG_INFO
55 help
56 Include in-kernel hooks for kgdb, the Linux kernel source level
57 debugger. See <http://kgdb.sourceforge.net/> for more information.
58 Unless you are intending to debug the kernel, say N here.
59
60config CODE_PATCHING_SELFTEST 44config CODE_PATCHING_SELFTEST
61 bool "Run self-tests of the code-patching code." 45 bool "Run self-tests of the code-patching code."
62 depends on DEBUG_KERNEL 46 depends on DEBUG_KERNEL
@@ -67,36 +51,9 @@ config FTR_FIXUP_SELFTEST
67 depends on DEBUG_KERNEL 51 depends on DEBUG_KERNEL
68 default n 52 default n
69 53
70choice
71 prompt "Serial Port"
72 depends on KGDB
73 default KGDB_TTYS1
74
75config KGDB_TTYS0
76 bool "ttyS0"
77
78config KGDB_TTYS1
79 bool "ttyS1"
80
81config KGDB_TTYS2
82 bool "ttyS2"
83
84config KGDB_TTYS3
85 bool "ttyS3"
86
87endchoice
88
89config KGDB_CONSOLE
90 bool "Enable serial console thru kgdb port"
91 depends on KGDB && 8xx || CPM2
92 help
93 If you enable this, all serial console messages will be sent
94 over the gdb stub.
95 If unsure, say N.
96
97config XMON 54config XMON
98 bool "Include xmon kernel debugger" 55 bool "Include xmon kernel debugger"
99 depends on DEBUGGER 56 depends on DEBUG_KERNEL
100 help 57 help
101 Include in-kernel hooks for the xmon kernel monitor/debugger. 58 Include in-kernel hooks for the xmon kernel monitor/debugger.
102 Unless you are intending to debug the kernel, say N here. 59 Unless you are intending to debug the kernel, say N here.
@@ -126,6 +83,11 @@ config XMON_DISASSEMBLY
126 to say Y here, unless you're building for a memory-constrained 83 to say Y here, unless you're building for a memory-constrained
127 system. 84 system.
128 85
86config DEBUGGER
87 bool
88 depends on KGDB || XMON
89 default y
90
129config IRQSTACKS 91config IRQSTACKS
130 bool "Use separate kernel stacks when processing interrupts" 92 bool "Use separate kernel stacks when processing interrupts"
131 help 93 help
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index bf0b1fd0ec34..1a4094704b1f 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -74,6 +74,7 @@ obj-y += time.o prom.o traps.o setup-common.o \
74 misc_$(CONFIG_WORD_SIZE).o 74 misc_$(CONFIG_WORD_SIZE).o
75obj-$(CONFIG_PPC32) += entry_32.o setup_32.o 75obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
76obj-$(CONFIG_PPC64) += dma_64.o iommu.o 76obj-$(CONFIG_PPC64) += dma_64.o iommu.o
77obj-$(CONFIG_KGDB) += kgdb.o
77obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o 78obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
78obj-$(CONFIG_MODULES) += ppc_ksyms.o 79obj-$(CONFIG_MODULES) += ppc_ksyms.o
79obj-$(CONFIG_BOOTX_TEXT) += btext.o 80obj-$(CONFIG_BOOTX_TEXT) += btext.o
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index b936a1dd0a50..25c273c761d1 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -23,6 +23,9 @@
23struct cpu_spec* cur_cpu_spec = NULL; 23struct cpu_spec* cur_cpu_spec = NULL;
24EXPORT_SYMBOL(cur_cpu_spec); 24EXPORT_SYMBOL(cur_cpu_spec);
25 25
26/* The platform string corresponding to the real PVR */
27const char *powerpc_base_platform;
28
26/* NOTE: 29/* NOTE:
27 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's 30 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
28 * the responsibility of the appropriate CPU save/restore functions to 31 * the responsibility of the appropriate CPU save/restore functions to
@@ -1652,6 +1655,14 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
1652 } else 1655 } else
1653 *t = *s; 1656 *t = *s;
1654 *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec; 1657 *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
1658
1659 /*
1660 * Set the base platform string once; assumes
1661 * we're called with real pvr first.
1662 */
1663 if (*PTRRELOC(&powerpc_base_platform) == NULL)
1664 *PTRRELOC(&powerpc_base_platform) = t->platform;
1665
1655#if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE) 1666#if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE)
1656 /* ppc64 and booke expect identify_cpu to also call 1667 /* ppc64 and booke expect identify_cpu to also call
1657 * setup_cpu for that processor. I will consolidate 1668 * setup_cpu for that processor. I will consolidate
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index da52269aec1e..81c8324a4a3c 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -148,7 +148,7 @@ transfer_to_handler:
148 /* Check to see if the dbcr0 register is set up to debug. Use the 148 /* Check to see if the dbcr0 register is set up to debug. Use the
149 internal debug mode bit to do this. */ 149 internal debug mode bit to do this. */
150 lwz r12,THREAD_DBCR0(r12) 150 lwz r12,THREAD_DBCR0(r12)
151 andis. r12,r12,DBCR0_IDM@h 151 andis. r12,r12,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
152 beq+ 3f 152 beq+ 3f
153 /* From user and task is ptraced - load up global dbcr0 */ 153 /* From user and task is ptraced - load up global dbcr0 */
154 li r12,-1 /* clear all pending debug events */ 154 li r12,-1 /* clear all pending debug events */
@@ -292,7 +292,7 @@ syscall_exit_cont:
292 /* If the process has its own DBCR0 value, load it up. The internal 292 /* If the process has its own DBCR0 value, load it up. The internal
293 debug mode bit tells us that dbcr0 should be loaded. */ 293 debug mode bit tells us that dbcr0 should be loaded. */
294 lwz r0,THREAD+THREAD_DBCR0(r2) 294 lwz r0,THREAD+THREAD_DBCR0(r2)
295 andis. r10,r0,DBCR0_IDM@h 295 andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
296 bnel- load_dbcr0 296 bnel- load_dbcr0
297#endif 297#endif
298#ifdef CONFIG_44x 298#ifdef CONFIG_44x
@@ -720,7 +720,7 @@ restore_user:
720 /* Check whether this process has its own DBCR0 value. The internal 720 /* Check whether this process has its own DBCR0 value. The internal
721 debug mode bit tells us that dbcr0 should be loaded. */ 721 debug mode bit tells us that dbcr0 should be loaded. */
722 lwz r0,THREAD+THREAD_DBCR0(r2) 722 lwz r0,THREAD+THREAD_DBCR0(r2)
723 andis. r10,r0,DBCR0_IDM@h 723 andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
724 bnel- load_dbcr0 724 bnel- load_dbcr0
725#endif 725#endif
726 726
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index c3cf0e8f3ac1..d308a9f70f1b 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -60,7 +60,7 @@ void cpu_idle(void)
60 60
61 set_thread_flag(TIF_POLLING_NRFLAG); 61 set_thread_flag(TIF_POLLING_NRFLAG);
62 while (1) { 62 while (1) {
63 tick_nohz_stop_sched_tick(); 63 tick_nohz_stop_sched_tick(1);
64 while (!need_resched() && !cpu_should_die()) { 64 while (!need_resched() && !cpu_should_die()) {
65 ppc64_runlatch_off(); 65 ppc64_runlatch_off();
66 66
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 2385f68c1751..550a19399bfa 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -49,6 +49,8 @@ static int novmerge = 1;
49 49
50static int protect4gb = 1; 50static int protect4gb = 1;
51 51
52static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
53
52static inline unsigned long iommu_num_pages(unsigned long vaddr, 54static inline unsigned long iommu_num_pages(unsigned long vaddr,
53 unsigned long slen) 55 unsigned long slen)
54{ 56{
@@ -191,6 +193,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
191{ 193{
192 unsigned long entry, flags; 194 unsigned long entry, flags;
193 dma_addr_t ret = DMA_ERROR_CODE; 195 dma_addr_t ret = DMA_ERROR_CODE;
196 int build_fail;
194 197
195 spin_lock_irqsave(&(tbl->it_lock), flags); 198 spin_lock_irqsave(&(tbl->it_lock), flags);
196 199
@@ -205,9 +208,21 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
205 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ 208 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
206 209
207 /* Put the TCEs in the HW table */ 210 /* Put the TCEs in the HW table */
208 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK, 211 build_fail = ppc_md.tce_build(tbl, entry, npages,
209 direction, attrs); 212 (unsigned long)page & IOMMU_PAGE_MASK,
213 direction, attrs);
214
215 /* ppc_md.tce_build() only returns non-zero for transient errors.
216 * Clean up the table bitmap in this case and return
217 * DMA_ERROR_CODE. For all other errors the functionality is
218 * not altered.
219 */
220 if (unlikely(build_fail)) {
221 __iommu_free(tbl, ret, npages);
210 222
223 spin_unlock_irqrestore(&(tbl->it_lock), flags);
224 return DMA_ERROR_CODE;
225 }
211 226
212 /* Flush/invalidate TLB caches if necessary */ 227 /* Flush/invalidate TLB caches if necessary */
213 if (ppc_md.tce_flush) 228 if (ppc_md.tce_flush)
@@ -276,7 +291,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
276 dma_addr_t dma_next = 0, dma_addr; 291 dma_addr_t dma_next = 0, dma_addr;
277 unsigned long flags; 292 unsigned long flags;
278 struct scatterlist *s, *outs, *segstart; 293 struct scatterlist *s, *outs, *segstart;
279 int outcount, incount, i; 294 int outcount, incount, i, build_fail = 0;
280 unsigned int align; 295 unsigned int align;
281 unsigned long handle; 296 unsigned long handle;
282 unsigned int max_seg_size; 297 unsigned int max_seg_size;
@@ -337,8 +352,11 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
337 npages, entry, dma_addr); 352 npages, entry, dma_addr);
338 353
339 /* Insert into HW table */ 354 /* Insert into HW table */
340 ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, 355 build_fail = ppc_md.tce_build(tbl, entry, npages,
341 direction, attrs); 356 vaddr & IOMMU_PAGE_MASK,
357 direction, attrs);
358 if(unlikely(build_fail))
359 goto failure;
342 360
343 /* If we are in an open segment, try merging */ 361 /* If we are in an open segment, try merging */
344 if (segstart != s) { 362 if (segstart != s) {
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
new file mode 100644
index 000000000000..b4fdf2f2743c
--- /dev/null
+++ b/arch/powerpc/kernel/kgdb.c
@@ -0,0 +1,410 @@
1/*
2 * PowerPC backend to the KGDB stub.
3 *
4 * 1998 (c) Michael AK Tesch (tesch@cs.wisc.edu)
5 * Copyright (C) 2003 Timesys Corporation.
6 * Copyright (C) 2004-2006 MontaVista Software, Inc.
7 * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com)
8 * PPC32 support restored by Vitaly Wool <vwool@ru.mvista.com> and
9 * Sergei Shtylyov <sshtylyov@ru.mvista.com>
10 * Copyright (C) 2007-2008 Wind River Systems, Inc.
11 *
12 * This file is licensed under the terms of the GNU General Public License
13 * version 2. This program as licensed "as is" without any warranty of any
14 * kind, whether express or implied.
15 */
16
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/kgdb.h>
20#include <linux/smp.h>
21#include <linux/signal.h>
22#include <linux/ptrace.h>
23#include <asm/current.h>
24#include <asm/processor.h>
25#include <asm/machdep.h>
26
27/*
28 * This table contains the mapping between PowerPC hardware trap types, and
29 * signals, which are primarily what GDB understands. GDB and the kernel
30 * don't always agree on values, so we use constants taken from gdb-6.2.
31 */
32static struct hard_trap_info
33{
34 unsigned int tt; /* Trap type code for powerpc */
35 unsigned char signo; /* Signal that we map this trap into */
36} hard_trap_info[] = {
37 { 0x0100, 0x02 /* SIGINT */ }, /* system reset */
38 { 0x0200, 0x0b /* SIGSEGV */ }, /* machine check */
39 { 0x0300, 0x0b /* SIGSEGV */ }, /* data access */
40 { 0x0400, 0x0b /* SIGSEGV */ }, /* instruction access */
41 { 0x0500, 0x02 /* SIGINT */ }, /* external interrupt */
42 { 0x0600, 0x0a /* SIGBUS */ }, /* alignment */
43 { 0x0700, 0x05 /* SIGTRAP */ }, /* program check */
44 { 0x0800, 0x08 /* SIGFPE */ }, /* fp unavailable */
45 { 0x0900, 0x0e /* SIGALRM */ }, /* decrementer */
46 { 0x0c00, 0x14 /* SIGCHLD */ }, /* system call */
47#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
48 { 0x2002, 0x05 /* SIGTRAP */ }, /* debug */
49#if defined(CONFIG_FSL_BOOKE)
50 { 0x2010, 0x08 /* SIGFPE */ }, /* spe unavailable */
51 { 0x2020, 0x08 /* SIGFPE */ }, /* spe unavailable */
52 { 0x2030, 0x08 /* SIGFPE */ }, /* spe fp data */
53 { 0x2040, 0x08 /* SIGFPE */ }, /* spe fp data */
54 { 0x2050, 0x08 /* SIGFPE */ }, /* spe fp round */
55 { 0x2060, 0x0e /* SIGILL */ }, /* performace monitor */
56 { 0x2900, 0x08 /* SIGFPE */ }, /* apu unavailable */
57 { 0x3100, 0x0e /* SIGALRM */ }, /* fixed interval timer */
58 { 0x3200, 0x02 /* SIGINT */ }, /* watchdog */
59#else /* ! CONFIG_FSL_BOOKE */
60 { 0x1000, 0x0e /* SIGALRM */ }, /* prog interval timer */
61 { 0x1010, 0x0e /* SIGALRM */ }, /* fixed interval timer */
62 { 0x1020, 0x02 /* SIGINT */ }, /* watchdog */
63 { 0x2010, 0x08 /* SIGFPE */ }, /* fp unavailable */
64 { 0x2020, 0x08 /* SIGFPE */ }, /* ap unavailable */
65#endif
66#else /* ! (defined(CONFIG_40x) || defined(CONFIG_BOOKE)) */
67 { 0x0d00, 0x05 /* SIGTRAP */ }, /* single-step */
68#if defined(CONFIG_8xx)
69 { 0x1000, 0x04 /* SIGILL */ }, /* software emulation */
70#else /* ! CONFIG_8xx */
71 { 0x0f00, 0x04 /* SIGILL */ }, /* performance monitor */
72 { 0x0f20, 0x08 /* SIGFPE */ }, /* altivec unavailable */
73 { 0x1300, 0x05 /* SIGTRAP */ }, /* instruction address break */
74#if defined(CONFIG_PPC64)
75 { 0x1200, 0x05 /* SIGILL */ }, /* system error */
76 { 0x1500, 0x04 /* SIGILL */ }, /* soft patch */
77 { 0x1600, 0x04 /* SIGILL */ }, /* maintenance */
78 { 0x1700, 0x08 /* SIGFPE */ }, /* altivec assist */
79 { 0x1800, 0x04 /* SIGILL */ }, /* thermal */
80#else /* ! CONFIG_PPC64 */
81 { 0x1400, 0x02 /* SIGINT */ }, /* SMI */
82 { 0x1600, 0x08 /* SIGFPE */ }, /* altivec assist */
83 { 0x1700, 0x04 /* SIGILL */ }, /* TAU */
84 { 0x2000, 0x05 /* SIGTRAP */ }, /* run mode */
85#endif
86#endif
87#endif
88 { 0x0000, 0x00 } /* Must be last */
89};
90
91static int computeSignal(unsigned int tt)
92{
93 struct hard_trap_info *ht;
94
95 for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
96 if (ht->tt == tt)
97 return ht->signo;
98
99 return SIGHUP; /* default for things we don't know about */
100}
101
102static int kgdb_call_nmi_hook(struct pt_regs *regs)
103{
104 kgdb_nmicallback(raw_smp_processor_id(), regs);
105 return 0;
106}
107
108#ifdef CONFIG_SMP
109void kgdb_roundup_cpus(unsigned long flags)
110{
111 smp_send_debugger_break(MSG_ALL_BUT_SELF);
112}
113#endif
114
115/* KGDB functions to use existing PowerPC64 hooks. */
116static int kgdb_debugger(struct pt_regs *regs)
117{
118 return kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs);
119}
120
121static int kgdb_handle_breakpoint(struct pt_regs *regs)
122{
123 if (user_mode(regs))
124 return 0;
125
126 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
127 return 0;
128
129 if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
130 regs->nip += 4;
131
132 return 1;
133}
134
135static int kgdb_singlestep(struct pt_regs *regs)
136{
137 struct thread_info *thread_info, *exception_thread_info;
138
139 if (user_mode(regs))
140 return 0;
141
142 /*
143 * On Book E and perhaps other processsors, singlestep is handled on
144 * the critical exception stack. This causes current_thread_info()
145 * to fail, since it it locates the thread_info by masking off
146 * the low bits of the current stack pointer. We work around
147 * this issue by copying the thread_info from the kernel stack
148 * before calling kgdb_handle_exception, and copying it back
149 * afterwards. On most processors the copy is avoided since
150 * exception_thread_info == thread_info.
151 */
152 thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
153 exception_thread_info = current_thread_info();
154
155 if (thread_info != exception_thread_info)
156 memcpy(exception_thread_info, thread_info, sizeof *thread_info);
157
158 kgdb_handle_exception(0, SIGTRAP, 0, regs);
159
160 if (thread_info != exception_thread_info)
161 memcpy(thread_info, exception_thread_info, sizeof *thread_info);
162
163 return 1;
164}
165
166static int kgdb_iabr_match(struct pt_regs *regs)
167{
168 if (user_mode(regs))
169 return 0;
170
171 if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
172 return 0;
173 return 1;
174}
175
176static int kgdb_dabr_match(struct pt_regs *regs)
177{
178 if (user_mode(regs))
179 return 0;
180
181 if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
182 return 0;
183 return 1;
184}
185
186#define PACK64(ptr, src) do { *(ptr++) = (src); } while (0)
187
188#define PACK32(ptr, src) do { \
189 u32 *ptr32; \
190 ptr32 = (u32 *)ptr; \
191 *(ptr32++) = (src); \
192 ptr = (unsigned long *)ptr32; \
193 } while (0)
194
195
196void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
197{
198 unsigned long *ptr = gdb_regs;
199 int reg;
200
201 memset(gdb_regs, 0, NUMREGBYTES);
202
203 for (reg = 0; reg < 32; reg++)
204 PACK64(ptr, regs->gpr[reg]);
205
206#ifdef CONFIG_FSL_BOOKE
207#ifdef CONFIG_SPE
208 for (reg = 0; reg < 32; reg++)
209 PACK64(ptr, current->thread.evr[reg]);
210#else
211 ptr += 32;
212#endif
213#else
214 /* fp registers not used by kernel, leave zero */
215 ptr += 32 * 8 / sizeof(long);
216#endif
217
218 PACK64(ptr, regs->nip);
219 PACK64(ptr, regs->msr);
220 PACK32(ptr, regs->ccr);
221 PACK64(ptr, regs->link);
222 PACK64(ptr, regs->ctr);
223 PACK32(ptr, regs->xer);
224
225 BUG_ON((unsigned long)ptr >
226 (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
227}
228
229void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
230{
231 struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp +
232 STACK_FRAME_OVERHEAD);
233 unsigned long *ptr = gdb_regs;
234 int reg;
235
236 memset(gdb_regs, 0, NUMREGBYTES);
237
238 /* Regs GPR0-2 */
239 for (reg = 0; reg < 3; reg++)
240 PACK64(ptr, regs->gpr[reg]);
241
242 /* Regs GPR3-13 are caller saved, not in regs->gpr[] */
243 ptr += 11;
244
245 /* Regs GPR14-31 */
246 for (reg = 14; reg < 32; reg++)
247 PACK64(ptr, regs->gpr[reg]);
248
249#ifdef CONFIG_FSL_BOOKE
250#ifdef CONFIG_SPE
251 for (reg = 0; reg < 32; reg++)
252 PACK64(ptr, p->thread.evr[reg]);
253#else
254 ptr += 32;
255#endif
256#else
257 /* fp registers not used by kernel, leave zero */
258 ptr += 32 * 8 / sizeof(long);
259#endif
260
261 PACK64(ptr, regs->nip);
262 PACK64(ptr, regs->msr);
263 PACK32(ptr, regs->ccr);
264 PACK64(ptr, regs->link);
265 PACK64(ptr, regs->ctr);
266 PACK32(ptr, regs->xer);
267
268 BUG_ON((unsigned long)ptr >
269 (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
270}
271
272#define UNPACK64(dest, ptr) do { dest = *(ptr++); } while (0)
273
274#define UNPACK32(dest, ptr) do { \
275 u32 *ptr32; \
276 ptr32 = (u32 *)ptr; \
277 dest = *(ptr32++); \
278 ptr = (unsigned long *)ptr32; \
279 } while (0)
280
281void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
282{
283 unsigned long *ptr = gdb_regs;
284 int reg;
285#ifdef CONFIG_SPE
286 union {
287 u32 v32[2];
288 u64 v64;
289 } acc;
290#endif
291
292 for (reg = 0; reg < 32; reg++)
293 UNPACK64(regs->gpr[reg], ptr);
294
295#ifdef CONFIG_FSL_BOOKE
296#ifdef CONFIG_SPE
297 for (reg = 0; reg < 32; reg++)
298 UNPACK64(current->thread.evr[reg], ptr);
299#else
300 ptr += 32;
301#endif
302#else
303 /* fp registers not used by kernel, leave zero */
304 ptr += 32 * 8 / sizeof(int);
305#endif
306
307 UNPACK64(regs->nip, ptr);
308 UNPACK64(regs->msr, ptr);
309 UNPACK32(regs->ccr, ptr);
310 UNPACK64(regs->link, ptr);
311 UNPACK64(regs->ctr, ptr);
312 UNPACK32(regs->xer, ptr);
313
314 BUG_ON((unsigned long)ptr >
315 (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
316}
317
318/*
319 * This function does PowerPC specific procesing for interfacing to gdb.
320 */
321int kgdb_arch_handle_exception(int vector, int signo, int err_code,
322 char *remcom_in_buffer, char *remcom_out_buffer,
323 struct pt_regs *linux_regs)
324{
325 char *ptr = &remcom_in_buffer[1];
326 unsigned long addr;
327
328 switch (remcom_in_buffer[0]) {
329 /*
330 * sAA..AA Step one instruction from AA..AA
331 * This will return an error to gdb ..
332 */
333 case 's':
334 case 'c':
335 /* handle the optional parameter */
336 if (kgdb_hex2long(&ptr, &addr))
337 linux_regs->nip = addr;
338
339 atomic_set(&kgdb_cpu_doing_single_step, -1);
340 /* set the trace bit if we're stepping */
341 if (remcom_in_buffer[0] == 's') {
342#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
343 mtspr(SPRN_DBCR0,
344 mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
345 linux_regs->msr |= MSR_DE;
346#else
347 linux_regs->msr |= MSR_SE;
348#endif
349 kgdb_single_step = 1;
350 if (kgdb_contthread)
351 atomic_set(&kgdb_cpu_doing_single_step,
352 raw_smp_processor_id());
353 }
354 return 0;
355 }
356
357 return -1;
358}
359
360/*
361 * Global data
362 */
363struct kgdb_arch arch_kgdb_ops = {
364 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
365};
366
367static int kgdb_not_implemented(struct pt_regs *regs)
368{
369 return 0;
370}
371
372static void *old__debugger_ipi;
373static void *old__debugger;
374static void *old__debugger_bpt;
375static void *old__debugger_sstep;
376static void *old__debugger_iabr_match;
377static void *old__debugger_dabr_match;
378static void *old__debugger_fault_handler;
379
380int kgdb_arch_init(void)
381{
382 old__debugger_ipi = __debugger_ipi;
383 old__debugger = __debugger;
384 old__debugger_bpt = __debugger_bpt;
385 old__debugger_sstep = __debugger_sstep;
386 old__debugger_iabr_match = __debugger_iabr_match;
387 old__debugger_dabr_match = __debugger_dabr_match;
388 old__debugger_fault_handler = __debugger_fault_handler;
389
390 __debugger_ipi = kgdb_call_nmi_hook;
391 __debugger = kgdb_debugger;
392 __debugger_bpt = kgdb_handle_breakpoint;
393 __debugger_sstep = kgdb_singlestep;
394 __debugger_iabr_match = kgdb_iabr_match;
395 __debugger_dabr_match = kgdb_dabr_match;
396 __debugger_fault_handler = kgdb_not_implemented;
397
398 return 0;
399}
400
401void kgdb_arch_exit(void)
402{
403 __debugger_ipi = old__debugger_ipi;
404 __debugger = old__debugger;
405 __debugger_bpt = old__debugger_bpt;
406 __debugger_sstep = old__debugger_sstep;
407 __debugger_iabr_match = old__debugger_iabr_match;
408 __debugger_dabr_match = old__debugger_dabr_match;
409 __debugger_fault_handler = old__debugger_fault_handler;
410}
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 4ba2af125450..de79915452c8 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -144,7 +144,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
144 kcb->kprobe_saved_msr = regs->msr; 144 kcb->kprobe_saved_msr = regs->msr;
145} 145}
146 146
147/* Called with kretprobe_lock held */
148void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 147void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
149 struct pt_regs *regs) 148 struct pt_regs *regs)
150{ 149{
@@ -312,8 +311,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
312 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 311 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
313 312
314 INIT_HLIST_HEAD(&empty_rp); 313 INIT_HLIST_HEAD(&empty_rp);
315 spin_lock_irqsave(&kretprobe_lock, flags); 314 kretprobe_hash_lock(current, &head, &flags);
316 head = kretprobe_inst_table_head(current);
317 315
318 /* 316 /*
319 * It is possible to have multiple instances associated with a given 317 * It is possible to have multiple instances associated with a given
@@ -352,7 +350,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
352 regs->nip = orig_ret_address; 350 regs->nip = orig_ret_address;
353 351
354 reset_current_kprobe(); 352 reset_current_kprobe();
355 spin_unlock_irqrestore(&kretprobe_lock, flags); 353 kretprobe_hash_unlock(current, &flags);
356 preempt_enable_no_resched(); 354 preempt_enable_no_resched();
357 355
358 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 356 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 827a5726a035..9f856a0c3e38 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -34,8 +34,9 @@
34#include <asm/time.h> 34#include <asm/time.h>
35#include <asm/prom.h> 35#include <asm/prom.h>
36#include <asm/vdso_datapage.h> 36#include <asm/vdso_datapage.h>
37#include <asm/vio.h>
37 38
38#define MODULE_VERS "1.7" 39#define MODULE_VERS "1.8"
39#define MODULE_NAME "lparcfg" 40#define MODULE_NAME "lparcfg"
40 41
41/* #define LPARCFG_DEBUG */ 42/* #define LPARCFG_DEBUG */
@@ -129,32 +130,46 @@ static int iseries_lparcfg_data(struct seq_file *m, void *v)
129/* 130/*
130 * Methods used to fetch LPAR data when running on a pSeries platform. 131 * Methods used to fetch LPAR data when running on a pSeries platform.
131 */ 132 */
132static void log_plpar_hcall_return(unsigned long rc, char *tag) 133/**
134 * h_get_mpp
135 * H_GET_MPP hcall returns info in 7 parms
136 */
137int h_get_mpp(struct hvcall_mpp_data *mpp_data)
133{ 138{
134 switch(rc) { 139 int rc;
135 case 0: 140 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
136 return; 141
137 case H_HARDWARE: 142 rc = plpar_hcall9(H_GET_MPP, retbuf);
138 printk(KERN_INFO "plpar-hcall (%s) " 143
139 "Hardware fault\n", tag); 144 mpp_data->entitled_mem = retbuf[0];
140 return; 145 mpp_data->mapped_mem = retbuf[1];
141 case H_FUNCTION: 146
142 printk(KERN_INFO "plpar-hcall (%s) " 147 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
143 "Function not allowed\n", tag); 148 mpp_data->pool_num = retbuf[2] & 0xffff;
144 return; 149
145 case H_AUTHORITY: 150 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
146 printk(KERN_INFO "plpar-hcall (%s) " 151 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
147 "Not authorized to this function\n", tag); 152 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;
148 return; 153
149 case H_PARAMETER: 154 mpp_data->pool_size = retbuf[4];
150 printk(KERN_INFO "plpar-hcall (%s) " 155 mpp_data->loan_request = retbuf[5];
151 "Bad parameter(s)\n",tag); 156 mpp_data->backing_mem = retbuf[6];
152 return; 157
153 default: 158 return rc;
154 printk(KERN_INFO "plpar-hcall (%s) "
155 "Unexpected rc(0x%lx)\n", tag, rc);
156 }
157} 159}
160EXPORT_SYMBOL(h_get_mpp);
161
162struct hvcall_ppp_data {
163 u64 entitlement;
164 u64 unallocated_entitlement;
165 u16 group_num;
166 u16 pool_num;
167 u8 capped;
168 u8 weight;
169 u8 unallocated_weight;
170 u16 active_procs_in_pool;
171 u16 active_system_procs;
172};
158 173
159/* 174/*
160 * H_GET_PPP hcall returns info in 4 parms. 175 * H_GET_PPP hcall returns info in 4 parms.
@@ -176,27 +191,30 @@ static void log_plpar_hcall_return(unsigned long rc, char *tag)
176 * XXXX - Active processors in Physical Processor Pool. 191 * XXXX - Active processors in Physical Processor Pool.
177 * XXXX - Processors active on platform. 192 * XXXX - Processors active on platform.
178 */ 193 */
179static unsigned int h_get_ppp(unsigned long *entitled, 194static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
180 unsigned long *unallocated,
181 unsigned long *aggregation,
182 unsigned long *resource)
183{ 195{
184 unsigned long rc; 196 unsigned long rc;
185 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 197 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
186 198
187 rc = plpar_hcall(H_GET_PPP, retbuf); 199 rc = plpar_hcall(H_GET_PPP, retbuf);
188 200
189 *entitled = retbuf[0]; 201 ppp_data->entitlement = retbuf[0];
190 *unallocated = retbuf[1]; 202 ppp_data->unallocated_entitlement = retbuf[1];
191 *aggregation = retbuf[2]; 203
192 *resource = retbuf[3]; 204 ppp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
205 ppp_data->pool_num = retbuf[2] & 0xffff;
193 206
194 log_plpar_hcall_return(rc, "H_GET_PPP"); 207 ppp_data->capped = (retbuf[3] >> 6 * 8) & 0x01;
208 ppp_data->weight = (retbuf[3] >> 5 * 8) & 0xff;
209 ppp_data->unallocated_weight = (retbuf[3] >> 4 * 8) & 0xff;
210 ppp_data->active_procs_in_pool = (retbuf[3] >> 2 * 8) & 0xffff;
211 ppp_data->active_system_procs = retbuf[3] & 0xffff;
195 212
196 return rc; 213 return rc;
197} 214}
198 215
199static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs) 216static unsigned h_pic(unsigned long *pool_idle_time,
217 unsigned long *num_procs)
200{ 218{
201 unsigned long rc; 219 unsigned long rc;
202 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 220 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
@@ -206,8 +224,87 @@ static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs)
206 *pool_idle_time = retbuf[0]; 224 *pool_idle_time = retbuf[0];
207 *num_procs = retbuf[1]; 225 *num_procs = retbuf[1];
208 226
209 if (rc != H_AUTHORITY) 227 return rc;
210 log_plpar_hcall_return(rc, "H_PIC"); 228}
229
230/*
231 * parse_ppp_data
232 * Parse out the data returned from h_get_ppp and h_pic
233 */
234static void parse_ppp_data(struct seq_file *m)
235{
236 struct hvcall_ppp_data ppp_data;
237 int rc;
238
239 rc = h_get_ppp(&ppp_data);
240 if (rc)
241 return;
242
243 seq_printf(m, "partition_entitled_capacity=%ld\n",
244 ppp_data.entitlement);
245 seq_printf(m, "group=%d\n", ppp_data.group_num);
246 seq_printf(m, "system_active_processors=%d\n",
247 ppp_data.active_system_procs);
248
249 /* pool related entries are apropriate for shared configs */
250 if (lppaca[0].shared_proc) {
251 unsigned long pool_idle_time, pool_procs;
252
253 seq_printf(m, "pool=%d\n", ppp_data.pool_num);
254
255 /* report pool_capacity in percentage */
256 seq_printf(m, "pool_capacity=%d\n",
257 ppp_data.active_procs_in_pool * 100);
258
259 h_pic(&pool_idle_time, &pool_procs);
260 seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
261 seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
262 }
263
264 seq_printf(m, "unallocated_capacity_weight=%d\n",
265 ppp_data.unallocated_weight);
266 seq_printf(m, "capacity_weight=%d\n", ppp_data.weight);
267 seq_printf(m, "capped=%d\n", ppp_data.capped);
268 seq_printf(m, "unallocated_capacity=%ld\n",
269 ppp_data.unallocated_entitlement);
270}
271
272/**
273 * parse_mpp_data
274 * Parse out data returned from h_get_mpp
275 */
276static void parse_mpp_data(struct seq_file *m)
277{
278 struct hvcall_mpp_data mpp_data;
279 int rc;
280
281 rc = h_get_mpp(&mpp_data);
282 if (rc)
283 return;
284
285 seq_printf(m, "entitled_memory=%ld\n", mpp_data.entitled_mem);
286
287 if (mpp_data.mapped_mem != -1)
288 seq_printf(m, "mapped_entitled_memory=%ld\n",
289 mpp_data.mapped_mem);
290
291 seq_printf(m, "entitled_memory_group_number=%d\n", mpp_data.group_num);
292 seq_printf(m, "entitled_memory_pool_number=%d\n", mpp_data.pool_num);
293
294 seq_printf(m, "entitled_memory_weight=%d\n", mpp_data.mem_weight);
295 seq_printf(m, "unallocated_entitled_memory_weight=%d\n",
296 mpp_data.unallocated_mem_weight);
297 seq_printf(m, "unallocated_io_mapping_entitlement=%ld\n",
298 mpp_data.unallocated_entitlement);
299
300 if (mpp_data.pool_size != -1)
301 seq_printf(m, "entitled_memory_pool_size=%ld bytes\n",
302 mpp_data.pool_size);
303
304 seq_printf(m, "entitled_memory_loan_request=%ld\n",
305 mpp_data.loan_request);
306
307 seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem);
211} 308}
212 309
213#define SPLPAR_CHARACTERISTICS_TOKEN 20 310#define SPLPAR_CHARACTERISTICS_TOKEN 20
@@ -313,6 +410,25 @@ static int lparcfg_count_active_processors(void)
313 return count; 410 return count;
314} 411}
315 412
413static void pseries_cmo_data(struct seq_file *m)
414{
415 int cpu;
416 unsigned long cmo_faults = 0;
417 unsigned long cmo_fault_time = 0;
418
419 if (!firmware_has_feature(FW_FEATURE_CMO))
420 return;
421
422 for_each_possible_cpu(cpu) {
423 cmo_faults += lppaca[cpu].cmo_faults;
424 cmo_fault_time += lppaca[cpu].cmo_fault_time;
425 }
426
427 seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
428 seq_printf(m, "cmo_fault_time_usec=%lu\n",
429 cmo_fault_time / tb_ticks_per_usec);
430}
431
316static int pseries_lparcfg_data(struct seq_file *m, void *v) 432static int pseries_lparcfg_data(struct seq_file *m, void *v)
317{ 433{
318 int partition_potential_processors; 434 int partition_potential_processors;
@@ -334,60 +450,13 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
334 partition_active_processors = lparcfg_count_active_processors(); 450 partition_active_processors = lparcfg_count_active_processors();
335 451
336 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 452 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
337 unsigned long h_entitled, h_unallocated;
338 unsigned long h_aggregation, h_resource;
339 unsigned long pool_idle_time, pool_procs;
340 unsigned long purr;
341
342 h_get_ppp(&h_entitled, &h_unallocated, &h_aggregation,
343 &h_resource);
344
345 seq_printf(m, "R4=0x%lx\n", h_entitled);
346 seq_printf(m, "R5=0x%lx\n", h_unallocated);
347 seq_printf(m, "R6=0x%lx\n", h_aggregation);
348 seq_printf(m, "R7=0x%lx\n", h_resource);
349
350 purr = get_purr();
351
352 /* this call handles the ibm,get-system-parameter contents */ 453 /* this call handles the ibm,get-system-parameter contents */
353 parse_system_parameter_string(m); 454 parse_system_parameter_string(m);
455 parse_ppp_data(m);
456 parse_mpp_data(m);
457 pseries_cmo_data(m);
354 458
355 seq_printf(m, "partition_entitled_capacity=%ld\n", h_entitled); 459 seq_printf(m, "purr=%ld\n", get_purr());
356
357 seq_printf(m, "group=%ld\n", (h_aggregation >> 2 * 8) & 0xffff);
358
359 seq_printf(m, "system_active_processors=%ld\n",
360 (h_resource >> 0 * 8) & 0xffff);
361
362 /* pool related entries are apropriate for shared configs */
363 if (lppaca[0].shared_proc) {
364
365 h_pic(&pool_idle_time, &pool_procs);
366
367 seq_printf(m, "pool=%ld\n",
368 (h_aggregation >> 0 * 8) & 0xffff);
369
370 /* report pool_capacity in percentage */
371 seq_printf(m, "pool_capacity=%ld\n",
372 ((h_resource >> 2 * 8) & 0xffff) * 100);
373
374 seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
375
376 seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
377 }
378
379 seq_printf(m, "unallocated_capacity_weight=%ld\n",
380 (h_resource >> 4 * 8) & 0xFF);
381
382 seq_printf(m, "capacity_weight=%ld\n",
383 (h_resource >> 5 * 8) & 0xFF);
384
385 seq_printf(m, "capped=%ld\n", (h_resource >> 6 * 8) & 0x01);
386
387 seq_printf(m, "unallocated_capacity=%ld\n", h_unallocated);
388
389 seq_printf(m, "purr=%ld\n", purr);
390
391 } else { /* non SPLPAR case */ 460 } else { /* non SPLPAR case */
392 461
393 seq_printf(m, "system_active_processors=%d\n", 462 seq_printf(m, "system_active_processors=%d\n",
@@ -414,6 +483,83 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
414 return 0; 483 return 0;
415} 484}
416 485
486static ssize_t update_ppp(u64 *entitlement, u8 *weight)
487{
488 struct hvcall_ppp_data ppp_data;
489 u8 new_weight;
490 u64 new_entitled;
491 ssize_t retval;
492
493 /* Get our current parameters */
494 retval = h_get_ppp(&ppp_data);
495 if (retval)
496 return retval;
497
498 if (entitlement) {
499 new_weight = ppp_data.weight;
500 new_entitled = *entitlement;
501 } else if (weight) {
502 new_weight = *weight;
503 new_entitled = ppp_data.entitlement;
504 } else
505 return -EINVAL;
506
507 pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
508 __FUNCTION__, ppp_data.entitlement, ppp_data.weight);
509
510 pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
511 __FUNCTION__, new_entitled, new_weight);
512
513 retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);
514 return retval;
515}
516
517/**
518 * update_mpp
519 *
520 * Update the memory entitlement and weight for the partition. Caller must
521 * specify either a new entitlement or weight, not both, to be updated
522 * since the h_set_mpp call takes both entitlement and weight as parameters.
523 */
524static ssize_t update_mpp(u64 *entitlement, u8 *weight)
525{
526 struct hvcall_mpp_data mpp_data;
527 u64 new_entitled;
528 u8 new_weight;
529 ssize_t rc;
530
531 if (entitlement) {
532 /* Check with vio to ensure the new memory entitlement
533 * can be handled.
534 */
535 rc = vio_cmo_entitlement_update(*entitlement);
536 if (rc)
537 return rc;
538 }
539
540 rc = h_get_mpp(&mpp_data);
541 if (rc)
542 return rc;
543
544 if (entitlement) {
545 new_weight = mpp_data.mem_weight;
546 new_entitled = *entitlement;
547 } else if (weight) {
548 new_weight = *weight;
549 new_entitled = mpp_data.entitled_mem;
550 } else
551 return -EINVAL;
552
553 pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
554 __FUNCTION__, mpp_data.entitled_mem, mpp_data.mem_weight);
555
556 pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
557 __FUNCTION__, new_entitled, new_weight);
558
559 rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);
560 return rc;
561}
562
417/* 563/*
418 * Interface for changing system parameters (variable capacity weight 564 * Interface for changing system parameters (variable capacity weight
419 * and entitled capacity). Format of input is "param_name=value"; 565 * and entitled capacity). Format of input is "param_name=value";
@@ -427,35 +573,27 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
427static ssize_t lparcfg_write(struct file *file, const char __user * buf, 573static ssize_t lparcfg_write(struct file *file, const char __user * buf,
428 size_t count, loff_t * off) 574 size_t count, loff_t * off)
429{ 575{
430 char *kbuf; 576 int kbuf_sz = 64;
577 char kbuf[kbuf_sz];
431 char *tmp; 578 char *tmp;
432 u64 new_entitled, *new_entitled_ptr = &new_entitled; 579 u64 new_entitled, *new_entitled_ptr = &new_entitled;
433 u8 new_weight, *new_weight_ptr = &new_weight; 580 u8 new_weight, *new_weight_ptr = &new_weight;
434 581 ssize_t retval;
435 unsigned long current_entitled; /* parameters for h_get_ppp */
436 unsigned long dummy;
437 unsigned long resource;
438 u8 current_weight;
439
440 ssize_t retval = -ENOMEM;
441 582
442 if (!firmware_has_feature(FW_FEATURE_SPLPAR) || 583 if (!firmware_has_feature(FW_FEATURE_SPLPAR) ||
443 firmware_has_feature(FW_FEATURE_ISERIES)) 584 firmware_has_feature(FW_FEATURE_ISERIES))
444 return -EINVAL; 585 return -EINVAL;
445 586
446 kbuf = kmalloc(count, GFP_KERNEL); 587 if (count > kbuf_sz)
447 if (!kbuf) 588 return -EINVAL;
448 goto out;
449 589
450 retval = -EFAULT;
451 if (copy_from_user(kbuf, buf, count)) 590 if (copy_from_user(kbuf, buf, count))
452 goto out; 591 return -EFAULT;
453 592
454 retval = -EINVAL;
455 kbuf[count - 1] = '\0'; 593 kbuf[count - 1] = '\0';
456 tmp = strchr(kbuf, '='); 594 tmp = strchr(kbuf, '=');
457 if (!tmp) 595 if (!tmp)
458 goto out; 596 return -EINVAL;
459 597
460 *tmp++ = '\0'; 598 *tmp++ = '\0';
461 599
@@ -463,34 +601,32 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
463 char *endp; 601 char *endp;
464 *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10); 602 *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
465 if (endp == tmp) 603 if (endp == tmp)
466 goto out; 604 return -EINVAL;
467 new_weight_ptr = &current_weight; 605
606 retval = update_ppp(new_entitled_ptr, NULL);
468 } else if (!strcmp(kbuf, "capacity_weight")) { 607 } else if (!strcmp(kbuf, "capacity_weight")) {
469 char *endp; 608 char *endp;
470 *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10); 609 *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
471 if (endp == tmp) 610 if (endp == tmp)
472 goto out; 611 return -EINVAL;
473 new_entitled_ptr = &current_entitled;
474 } else
475 goto out;
476
477 /* Get our current parameters */
478 retval = h_get_ppp(&current_entitled, &dummy, &dummy, &resource);
479 if (retval) {
480 retval = -EIO;
481 goto out;
482 }
483
484 current_weight = (resource >> 5 * 8) & 0xFF;
485 612
486 pr_debug("%s: current_entitled = %lu, current_weight = %u\n", 613 retval = update_ppp(NULL, new_weight_ptr);
487 __func__, current_entitled, current_weight); 614 } else if (!strcmp(kbuf, "entitled_memory")) {
615 char *endp;
616 *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
617 if (endp == tmp)
618 return -EINVAL;
488 619
489 pr_debug("%s: new_entitled = %lu, new_weight = %u\n", 620 retval = update_mpp(new_entitled_ptr, NULL);
490 __func__, *new_entitled_ptr, *new_weight_ptr); 621 } else if (!strcmp(kbuf, "entitled_memory_weight")) {
622 char *endp;
623 *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
624 if (endp == tmp)
625 return -EINVAL;
491 626
492 retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr, 627 retval = update_mpp(NULL, new_weight_ptr);
493 *new_weight_ptr); 628 } else
629 return -EINVAL;
494 630
495 if (retval == H_SUCCESS || retval == H_CONSTRAINED) { 631 if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
496 retval = count; 632 retval = count;
@@ -506,8 +642,6 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
506 retval = -EIO; 642 retval = -EIO;
507 } 643 }
508 644
509out:
510 kfree(kbuf);
511 return retval; 645 return retval;
512} 646}
513 647
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 219f3634115e..db2497ccc111 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -47,6 +47,8 @@
47#ifdef CONFIG_PPC64 47#ifdef CONFIG_PPC64
48#include <asm/firmware.h> 48#include <asm/firmware.h>
49#endif 49#endif
50#include <linux/kprobes.h>
51#include <linux/kdebug.h>
50 52
51extern unsigned long _get_SP(void); 53extern unsigned long _get_SP(void);
52 54
@@ -239,6 +241,35 @@ void discard_lazy_cpu_state(void)
239} 241}
240#endif /* CONFIG_SMP */ 242#endif /* CONFIG_SMP */
241 243
244void do_dabr(struct pt_regs *regs, unsigned long address,
245 unsigned long error_code)
246{
247 siginfo_t info;
248
249 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
250 11, SIGSEGV) == NOTIFY_STOP)
251 return;
252
253 if (debugger_dabr_match(regs))
254 return;
255
256 /* Clear the DAC and struct entries. One shot trigger */
257#if (defined(CONFIG_44x) || defined(CONFIG_BOOKE))
258 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W
259 | DBCR0_IDM));
260#endif
261
262 /* Clear the DABR */
263 set_dabr(0);
264
265 /* Deliver the signal to userspace */
266 info.si_signo = SIGTRAP;
267 info.si_errno = 0;
268 info.si_code = TRAP_HWBKPT;
269 info.si_addr = (void __user *)address;
270 force_sig_info(SIGTRAP, &info, current);
271}
272
242static DEFINE_PER_CPU(unsigned long, current_dabr); 273static DEFINE_PER_CPU(unsigned long, current_dabr);
243 274
244int set_dabr(unsigned long dabr) 275int set_dabr(unsigned long dabr)
@@ -254,6 +285,11 @@ int set_dabr(unsigned long dabr)
254#if defined(CONFIG_PPC64) || defined(CONFIG_6xx) 285#if defined(CONFIG_PPC64) || defined(CONFIG_6xx)
255 mtspr(SPRN_DABR, dabr); 286 mtspr(SPRN_DABR, dabr);
256#endif 287#endif
288
289#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
290 mtspr(SPRN_DAC1, dabr);
291#endif
292
257 return 0; 293 return 0;
258} 294}
259 295
@@ -337,6 +373,12 @@ struct task_struct *__switch_to(struct task_struct *prev,
337 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) 373 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
338 set_dabr(new->thread.dabr); 374 set_dabr(new->thread.dabr);
339 375
376#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
377 /* If new thread DAC (HW breakpoint) is the same then leave it */
378 if (new->thread.dabr)
379 set_dabr(new->thread.dabr);
380#endif
381
340 new_thread = &new->thread; 382 new_thread = &new->thread;
341 old_thread = &current->thread; 383 old_thread = &current->thread;
342 384
@@ -525,6 +567,10 @@ void flush_thread(void)
525 if (current->thread.dabr) { 567 if (current->thread.dabr) {
526 current->thread.dabr = 0; 568 current->thread.dabr = 0;
527 set_dabr(0); 569 set_dabr(0);
570
571#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
572 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W);
573#endif
528 } 574 }
529} 575}
530 576
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 1ea8c8d3ce89..c4ab2195b9cb 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -643,6 +643,11 @@ static void __init early_cmdline_parse(void)
643#else 643#else
644#define OV5_MSI 0x00 644#define OV5_MSI 0x00
645#endif /* CONFIG_PCI_MSI */ 645#endif /* CONFIG_PCI_MSI */
646#ifdef CONFIG_PPC_SMLPAR
647#define OV5_CMO 0x80 /* Cooperative Memory Overcommitment */
648#else
649#define OV5_CMO 0x00
650#endif
646 651
647/* 652/*
648 * The architecture vector has an array of PVR mask/value pairs, 653 * The architecture vector has an array of PVR mask/value pairs,
@@ -687,10 +692,12 @@ static unsigned char ibm_architecture_vec[] = {
687 0, /* don't halt */ 692 0, /* don't halt */
688 693
689 /* option vector 5: PAPR/OF options */ 694 /* option vector 5: PAPR/OF options */
690 3 - 2, /* length */ 695 5 - 2, /* length */
691 0, /* don't ignore, don't halt */ 696 0, /* don't ignore, don't halt */
692 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | 697 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY |
693 OV5_DONATE_DEDICATE_CPU | OV5_MSI, 698 OV5_DONATE_DEDICATE_CPU | OV5_MSI,
699 0,
700 OV5_CMO,
694}; 701};
695 702
696/* Old method - ELF header with PT_NOTE sections */ 703/* Old method - ELF header with PT_NOTE sections */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 8feb93e7890c..a5d0e78779c8 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -703,7 +703,7 @@ void user_enable_single_step(struct task_struct *task)
703 703
704 if (regs != NULL) { 704 if (regs != NULL) {
705#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) 705#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
706 task->thread.dbcr0 = DBCR0_IDM | DBCR0_IC; 706 task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
707 regs->msr |= MSR_DE; 707 regs->msr |= MSR_DE;
708#else 708#else
709 regs->msr |= MSR_SE; 709 regs->msr |= MSR_SE;
@@ -716,9 +716,16 @@ void user_disable_single_step(struct task_struct *task)
716{ 716{
717 struct pt_regs *regs = task->thread.regs; 717 struct pt_regs *regs = task->thread.regs;
718 718
719
720#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
721 /* If DAC then do not single step, skip */
722 if (task->thread.dabr)
723 return;
724#endif
725
719 if (regs != NULL) { 726 if (regs != NULL) {
720#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) 727#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
721 task->thread.dbcr0 = 0; 728 task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_IDM);
722 regs->msr &= ~MSR_DE; 729 regs->msr &= ~MSR_DE;
723#else 730#else
724 regs->msr &= ~MSR_SE; 731 regs->msr &= ~MSR_SE;
@@ -727,22 +734,75 @@ void user_disable_single_step(struct task_struct *task)
727 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 734 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
728} 735}
729 736
730static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, 737int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
731 unsigned long data) 738 unsigned long data)
732{ 739{
733 /* We only support one DABR and no IABRS at the moment */ 740 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
741 * For embedded processors we support one DAC and no IAC's at the
742 * moment.
743 */
734 if (addr > 0) 744 if (addr > 0)
735 return -EINVAL; 745 return -EINVAL;
736 746
737 /* The bottom 3 bits are flags */
738 if ((data & ~0x7UL) >= TASK_SIZE) 747 if ((data & ~0x7UL) >= TASK_SIZE)
739 return -EIO; 748 return -EIO;
740 749
741 /* Ensure translation is on */ 750#ifdef CONFIG_PPC64
751
752 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
753 * It was assumed, on previous implementations, that 3 bits were
754 * passed together with the data address, fitting the design of the
755 * DABR register, as follows:
756 *
757 * bit 0: Read flag
758 * bit 1: Write flag
759 * bit 2: Breakpoint translation
760 *
761 * Thus, we use them here as so.
762 */
763
764 /* Ensure breakpoint translation bit is set */
742 if (data && !(data & DABR_TRANSLATION)) 765 if (data && !(data & DABR_TRANSLATION))
743 return -EIO; 766 return -EIO;
744 767
768 /* Move contents to the DABR register */
745 task->thread.dabr = data; 769 task->thread.dabr = data;
770
771#endif
772#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
773
774 /* As described above, it was assumed 3 bits were passed with the data
775 * address, but we will assume only the mode bits will be passed
776 * as to not cause alignment restrictions for DAC-based processors.
777 */
778
779 /* DAC's hold the whole address without any mode flags */
780 task->thread.dabr = data & ~0x3UL;
781
782 if (task->thread.dabr == 0) {
783 task->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W | DBCR0_IDM);
784 task->thread.regs->msr &= ~MSR_DE;
785 return 0;
786 }
787
788 /* Read or Write bits must be set */
789
790 if (!(data & 0x3UL))
791 return -EINVAL;
792
793 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
794 register */
795 task->thread.dbcr0 = DBCR0_IDM;
796
797 /* Check for write and read flags and set DBCR0
798 accordingly */
799 if (data & 0x1UL)
800 task->thread.dbcr0 |= DBSR_DAC1R;
801 if (data & 0x2UL)
802 task->thread.dbcr0 |= DBSR_DAC1W;
803
804 task->thread.regs->msr |= MSR_DE;
805#endif
746 return 0; 806 return 0;
747} 807}
748 808
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 4efebe88e64a..066e65c59b58 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -43,10 +43,6 @@
43 43
44#define DBG(fmt...) 44#define DBG(fmt...)
45 45
46#if defined CONFIG_KGDB
47#include <asm/kgdb.h>
48#endif
49
50extern void bootx_init(unsigned long r4, unsigned long phys); 46extern void bootx_init(unsigned long r4, unsigned long phys);
51 47
52int boot_cpuid; 48int boot_cpuid;
@@ -302,18 +298,6 @@ void __init setup_arch(char **cmdline_p)
302 298
303 xmon_setup(); 299 xmon_setup();
304 300
305#if defined(CONFIG_KGDB)
306 if (ppc_md.kgdb_map_scc)
307 ppc_md.kgdb_map_scc();
308 set_debug_traps();
309 if (strstr(cmd_line, "gdb")) {
310 if (ppc_md.progress)
311 ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000);
312 printk("kgdb breakpoint activated\n");
313 breakpoint();
314 }
315#endif
316
317 /* 301 /*
318 * Set cache line size based on type of cpu as a default. 302 * Set cache line size based on type of cpu as a default.
319 * Systems with OF can look in the properties on the cpu node(s) 303 * Systems with OF can look in the properties on the cpu node(s)
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index ad55488939c3..7aada783ec6a 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -145,8 +145,12 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
145 * user space. The DABR will have been cleared if it 145 * user space. The DABR will have been cleared if it
146 * triggered inside the kernel. 146 * triggered inside the kernel.
147 */ 147 */
148 if (current->thread.dabr) 148 if (current->thread.dabr) {
149 set_dabr(current->thread.dabr); 149 set_dabr(current->thread.dabr);
150#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
151 mtspr(SPRN_DBCR0, current->thread.dbcr0);
152#endif
153 }
150 154
151 if (is32) { 155 if (is32) {
152 if (ka.sa.sa_flags & SA_SIGINFO) 156 if (ka.sa.sa_flags & SA_SIGINFO)
diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c
index 8cee57107541..6fc6328dc626 100644
--- a/arch/powerpc/kernel/suspend.c
+++ b/arch/powerpc/kernel/suspend.c
@@ -7,6 +7,7 @@
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> 7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
8 */ 8 */
9 9
10#include <linux/mm.h>
10#include <asm/page.h> 11#include <asm/page.h>
11 12
12/* References to section boundaries */ 13/* References to section boundaries */
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index aba0ba95f062..800e5e9a087b 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -529,7 +529,8 @@ static void register_nodes(void)
529#endif 529#endif
530 530
531/* Only valid if CPU is present. */ 531/* Only valid if CPU is present. */
532static ssize_t show_physical_id(struct sys_device *dev, char *buf) 532static ssize_t show_physical_id(struct sys_device *dev,
533 struct sysdev_attribute *attr, char *buf)
533{ 534{
534 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 535 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
535 536
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 878fbddb6ae1..81ccb8dd1a54 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1067,6 +1067,22 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1067 } 1067 }
1068 1068
1069 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1069 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1070 } else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1071 regs->msr &= ~MSR_DE;
1072
1073 if (user_mode(regs)) {
1074 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W |
1075 DBCR0_IDM);
1076 } else {
1077 /* Disable DAC interupts */
1078 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R |
1079 DBSR_DAC1W | DBCR0_IDM));
1080
1081 /* Clear the DAC event */
1082 mtspr(SPRN_DBSR, (DBSR_DAC1R | DBSR_DAC1W));
1083 }
1084 /* Setup and send the trap to the handler */
1085 do_dabr(regs, mfspr(SPRN_DAC1), debug_status);
1070 } 1086 }
1071} 1087}
1072#endif /* CONFIG_4xx || CONFIG_BOOKE */ 1088#endif /* CONFIG_4xx || CONFIG_BOOKE */
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index b77f8af7ddde..ade8aeaa2e70 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1,11 +1,12 @@
1/* 1/*
2 * IBM PowerPC Virtual I/O Infrastructure Support. 2 * IBM PowerPC Virtual I/O Infrastructure Support.
3 * 3 *
4 * Copyright (c) 2003-2005 IBM Corp. 4 * Copyright (c) 2003,2008 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com 5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com 6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com> 7 * Hollis Blanchard <hollisb@us.ibm.com>
8 * Stephen Rothwell 8 * Stephen Rothwell
9 * Robert Jennings <rcjenn@us.ibm.com>
9 * 10 *
10 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
@@ -46,6 +47,996 @@ static struct vio_dev vio_bus_device = { /* fake "parent" device */
46 .dev.bus = &vio_bus_type, 47 .dev.bus = &vio_bus_type,
47}; 48};
48 49
50#ifdef CONFIG_PPC_SMLPAR
51/**
52 * vio_cmo_pool - A pool of IO memory for CMO use
53 *
54 * @size: The size of the pool in bytes
55 * @free: The amount of free memory in the pool
56 */
57struct vio_cmo_pool {
58 size_t size;
59 size_t free;
60};
61
62/* How many ms to delay queued balance work */
63#define VIO_CMO_BALANCE_DELAY 100
64
65/* Portion out IO memory to CMO devices by this chunk size */
66#define VIO_CMO_BALANCE_CHUNK 131072
67
68/**
69 * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
70 *
71 * @vio_dev: struct vio_dev pointer
72 * @list: pointer to other devices on bus that are being tracked
73 */
74struct vio_cmo_dev_entry {
75 struct vio_dev *viodev;
76 struct list_head list;
77};
78
79/**
80 * vio_cmo - VIO bus accounting structure for CMO entitlement
81 *
82 * @lock: spinlock for entire structure
83 * @balance_q: work queue for balancing system entitlement
84 * @device_list: list of CMO-enabled devices requiring entitlement
85 * @entitled: total system entitlement in bytes
86 * @reserve: pool of memory from which devices reserve entitlement, incl. spare
87 * @excess: pool of excess entitlement not needed for device reserves or spare
88 * @spare: IO memory for device hotplug functionality
89 * @min: minimum necessary for system operation
90 * @desired: desired memory for system operation
91 * @curr: bytes currently allocated
92 * @high: high water mark for IO data usage
93 */
94struct vio_cmo {
95 spinlock_t lock;
96 struct delayed_work balance_q;
97 struct list_head device_list;
98 size_t entitled;
99 struct vio_cmo_pool reserve;
100 struct vio_cmo_pool excess;
101 size_t spare;
102 size_t min;
103 size_t desired;
104 size_t curr;
105 size_t high;
106} vio_cmo;
107
108/**
109 * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
110 */
111static int vio_cmo_num_OF_devs(void)
112{
113 struct device_node *node_vroot;
114 int count = 0;
115
116 /*
117 * Count the number of vdevice entries with an
118 * ibm,my-dma-window OF property
119 */
120 node_vroot = of_find_node_by_name(NULL, "vdevice");
121 if (node_vroot) {
122 struct device_node *of_node;
123 struct property *prop;
124
125 for_each_child_of_node(node_vroot, of_node) {
126 prop = of_find_property(of_node, "ibm,my-dma-window",
127 NULL);
128 if (prop)
129 count++;
130 }
131 }
132 of_node_put(node_vroot);
133 return count;
134}
135
136/**
137 * vio_cmo_alloc - allocate IO memory for CMO-enable devices
138 *
139 * @viodev: VIO device requesting IO memory
140 * @size: size of allocation requested
141 *
142 * Allocations come from memory reserved for the devices and any excess
143 * IO memory available to all devices. The spare pool used to service
144 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
145 * made available.
146 *
147 * Return codes:
148 * 0 for successful allocation and -ENOMEM for a failure
149 */
150static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
151{
152 unsigned long flags;
153 size_t reserve_free = 0;
154 size_t excess_free = 0;
155 int ret = -ENOMEM;
156
157 spin_lock_irqsave(&vio_cmo.lock, flags);
158
159 /* Determine the amount of free entitlement available in reserve */
160 if (viodev->cmo.entitled > viodev->cmo.allocated)
161 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
162
163 /* If spare is not fulfilled, the excess pool can not be used. */
164 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
165 excess_free = vio_cmo.excess.free;
166
167 /* The request can be satisfied */
168 if ((reserve_free + excess_free) >= size) {
169 vio_cmo.curr += size;
170 if (vio_cmo.curr > vio_cmo.high)
171 vio_cmo.high = vio_cmo.curr;
172 viodev->cmo.allocated += size;
173 size -= min(reserve_free, size);
174 vio_cmo.excess.free -= size;
175 ret = 0;
176 }
177
178 spin_unlock_irqrestore(&vio_cmo.lock, flags);
179 return ret;
180}
181
182/**
183 * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
184 * @viodev: VIO device freeing IO memory
185 * @size: size of deallocation
186 *
187 * IO memory is freed by the device back to the correct memory pools.
188 * The spare pool is replenished first from either memory pool, then
189 * the reserve pool is used to reduce device entitlement, the excess
190 * pool is used to increase the reserve pool toward the desired entitlement
191 * target, and then the remaining memory is returned to the pools.
192 *
193 */
194static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
195{
196 unsigned long flags;
197 size_t spare_needed = 0;
198 size_t excess_freed = 0;
199 size_t reserve_freed = size;
200 size_t tmp;
201 int balance = 0;
202
203 spin_lock_irqsave(&vio_cmo.lock, flags);
204 vio_cmo.curr -= size;
205
206 /* Amount of memory freed from the excess pool */
207 if (viodev->cmo.allocated > viodev->cmo.entitled) {
208 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
209 viodev->cmo.entitled));
210 reserve_freed -= excess_freed;
211 }
212
213 /* Remove allocation from device */
214 viodev->cmo.allocated -= (reserve_freed + excess_freed);
215
216 /* Spare is a subset of the reserve pool, replenish it first. */
217 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
218
219 /*
220 * Replenish the spare in the reserve pool from the excess pool.
221 * This moves entitlement into the reserve pool.
222 */
223 if (spare_needed && excess_freed) {
224 tmp = min(excess_freed, spare_needed);
225 vio_cmo.excess.size -= tmp;
226 vio_cmo.reserve.size += tmp;
227 vio_cmo.spare += tmp;
228 excess_freed -= tmp;
229 spare_needed -= tmp;
230 balance = 1;
231 }
232
233 /*
234 * Replenish the spare in the reserve pool from the reserve pool.
235 * This removes entitlement from the device down to VIO_CMO_MIN_ENT,
236 * if needed, and gives it to the spare pool. The amount of used
237 * memory in this pool does not change.
238 */
239 if (spare_needed && reserve_freed) {
240 tmp = min(spare_needed, min(reserve_freed,
241 (viodev->cmo.entitled -
242 VIO_CMO_MIN_ENT)));
243
244 vio_cmo.spare += tmp;
245 viodev->cmo.entitled -= tmp;
246 reserve_freed -= tmp;
247 spare_needed -= tmp;
248 balance = 1;
249 }
250
251 /*
252 * Increase the reserve pool until the desired allocation is met.
253 * Move an allocation freed from the excess pool into the reserve
254 * pool and schedule a balance operation.
255 */
256 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
257 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
258
259 vio_cmo.excess.size -= tmp;
260 vio_cmo.reserve.size += tmp;
261 excess_freed -= tmp;
262 balance = 1;
263 }
264
265 /* Return memory from the excess pool to that pool */
266 if (excess_freed)
267 vio_cmo.excess.free += excess_freed;
268
269 if (balance)
270 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
271 spin_unlock_irqrestore(&vio_cmo.lock, flags);
272}
273
274/**
275 * vio_cmo_entitlement_update - Manage system entitlement changes
276 *
277 * @new_entitlement: new system entitlement to attempt to accommodate
278 *
279 * Increases in entitlement will be used to fulfill the spare entitlement
280 * and the rest is given to the excess pool. Decreases, if they are
281 * possible, come from the excess pool and from unused device entitlement
282 *
283 * Returns: 0 on success, -ENOMEM when change can not be made
284 */
285int vio_cmo_entitlement_update(size_t new_entitlement)
286{
287 struct vio_dev *viodev;
288 struct vio_cmo_dev_entry *dev_ent;
289 unsigned long flags;
290 size_t avail, delta, tmp;
291
292 spin_lock_irqsave(&vio_cmo.lock, flags);
293
294 /* Entitlement increases */
295 if (new_entitlement > vio_cmo.entitled) {
296 delta = new_entitlement - vio_cmo.entitled;
297
298 /* Fulfill spare allocation */
299 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
300 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
301 vio_cmo.spare += tmp;
302 vio_cmo.reserve.size += tmp;
303 delta -= tmp;
304 }
305
306 /* Remaining new allocation goes to the excess pool */
307 vio_cmo.entitled += delta;
308 vio_cmo.excess.size += delta;
309 vio_cmo.excess.free += delta;
310
311 goto out;
312 }
313
314 /* Entitlement decreases */
315 delta = vio_cmo.entitled - new_entitlement;
316 avail = vio_cmo.excess.free;
317
318 /*
319 * Need to check how much unused entitlement each device can
320 * sacrifice to fulfill entitlement change.
321 */
322 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
323 if (avail >= delta)
324 break;
325
326 viodev = dev_ent->viodev;
327 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
328 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
329 avail += viodev->cmo.entitled -
330 max_t(size_t, viodev->cmo.allocated,
331 VIO_CMO_MIN_ENT);
332 }
333
334 if (delta <= avail) {
335 vio_cmo.entitled -= delta;
336
337 /* Take entitlement from the excess pool first */
338 tmp = min(vio_cmo.excess.free, delta);
339 vio_cmo.excess.size -= tmp;
340 vio_cmo.excess.free -= tmp;
341 delta -= tmp;
342
343 /*
344 * Remove all but VIO_CMO_MIN_ENT bytes from devices
345 * until entitlement change is served
346 */
347 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
348 if (!delta)
349 break;
350
351 viodev = dev_ent->viodev;
352 tmp = 0;
353 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
354 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
355 tmp = viodev->cmo.entitled -
356 max_t(size_t, viodev->cmo.allocated,
357 VIO_CMO_MIN_ENT);
358 viodev->cmo.entitled -= min(tmp, delta);
359 delta -= min(tmp, delta);
360 }
361 } else {
362 spin_unlock_irqrestore(&vio_cmo.lock, flags);
363 return -ENOMEM;
364 }
365
366out:
367 schedule_delayed_work(&vio_cmo.balance_q, 0);
368 spin_unlock_irqrestore(&vio_cmo.lock, flags);
369 return 0;
370}
371
372/**
373 * vio_cmo_balance - Balance entitlement among devices
374 *
375 * @work: work queue structure for this operation
376 *
377 * Any system entitlement above the minimum needed for devices, or
378 * already allocated to devices, can be distributed to the devices.
379 * The list of devices is iterated through to recalculate the desired
380 * entitlement level and to determine how much entitlement above the
381 * minimum entitlement is allocated to devices.
382 *
383 * Small chunks of the available entitlement are given to devices until
384 * their requirements are fulfilled or there is no entitlement left to give.
385 * Upon completion sizes of the reserve and excess pools are calculated.
386 *
387 * The system minimum entitlement level is also recalculated here.
388 * Entitlement will be reserved for devices even after vio_bus_remove to
389 * accommodate reloading the driver. The OF tree is walked to count the
390 * number of devices present and this will remove entitlement for devices
391 * that have actually left the system after having vio_bus_remove called.
392 */
393static void vio_cmo_balance(struct work_struct *work)
394{
395 struct vio_cmo *cmo;
396 struct vio_dev *viodev;
397 struct vio_cmo_dev_entry *dev_ent;
398 unsigned long flags;
399 size_t avail = 0, level, chunk, need;
400 int devcount = 0, fulfilled;
401
402 cmo = container_of(work, struct vio_cmo, balance_q.work);
403
404 spin_lock_irqsave(&vio_cmo.lock, flags);
405
406 /* Calculate minimum entitlement and fulfill spare */
407 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
408 BUG_ON(cmo->min > cmo->entitled);
409 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
410 cmo->min += cmo->spare;
411 cmo->desired = cmo->min;
412
413 /*
414 * Determine how much entitlement is available and reset device
415 * entitlements
416 */
417 avail = cmo->entitled - cmo->spare;
418 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
419 viodev = dev_ent->viodev;
420 devcount++;
421 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
422 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
423 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
424 }
425
426 /*
427 * Having provided each device with the minimum entitlement, loop
428 * over the devices portioning out the remaining entitlement
429 * until there is nothing left.
430 */
431 level = VIO_CMO_MIN_ENT;
432 while (avail) {
433 fulfilled = 0;
434 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
435 viodev = dev_ent->viodev;
436
437 if (viodev->cmo.desired <= level) {
438 fulfilled++;
439 continue;
440 }
441
442 /*
443 * Give the device up to VIO_CMO_BALANCE_CHUNK
444 * bytes of entitlement, but do not exceed the
445 * desired level of entitlement for the device.
446 */
447 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
448 chunk = min(chunk, (viodev->cmo.desired -
449 viodev->cmo.entitled));
450 viodev->cmo.entitled += chunk;
451
452 /*
453 * If the memory for this entitlement increase was
454 * already allocated to the device it does not come
455 * from the available pool being portioned out.
456 */
457 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
458 max(viodev->cmo.allocated, level);
459 avail -= need;
460
461 }
462 if (fulfilled == devcount)
463 break;
464 level += VIO_CMO_BALANCE_CHUNK;
465 }
466
467 /* Calculate new reserve and excess pool sizes */
468 cmo->reserve.size = cmo->min;
469 cmo->excess.free = 0;
470 cmo->excess.size = 0;
471 need = 0;
472 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
473 viodev = dev_ent->viodev;
474 /* Calculated reserve size above the minimum entitlement */
475 if (viodev->cmo.entitled)
476 cmo->reserve.size += (viodev->cmo.entitled -
477 VIO_CMO_MIN_ENT);
478 /* Calculated used excess entitlement */
479 if (viodev->cmo.allocated > viodev->cmo.entitled)
480 need += viodev->cmo.allocated - viodev->cmo.entitled;
481 }
482 cmo->excess.size = cmo->entitled - cmo->reserve.size;
483 cmo->excess.free = cmo->excess.size - need;
484
485 cancel_delayed_work(container_of(work, struct delayed_work, work));
486 spin_unlock_irqrestore(&vio_cmo.lock, flags);
487}
488
489static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
490 dma_addr_t *dma_handle, gfp_t flag)
491{
492 struct vio_dev *viodev = to_vio_dev(dev);
493 void *ret;
494
495 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
496 atomic_inc(&viodev->cmo.allocs_failed);
497 return NULL;
498 }
499
500 ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
501 if (unlikely(ret == NULL)) {
502 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
503 atomic_inc(&viodev->cmo.allocs_failed);
504 }
505
506 return ret;
507}
508
509static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
510 void *vaddr, dma_addr_t dma_handle)
511{
512 struct vio_dev *viodev = to_vio_dev(dev);
513
514 dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
515
516 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
517}
518
519static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr,
520 size_t size,
521 enum dma_data_direction direction,
522 struct dma_attrs *attrs)
523{
524 struct vio_dev *viodev = to_vio_dev(dev);
525 dma_addr_t ret = DMA_ERROR_CODE;
526
527 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
528 atomic_inc(&viodev->cmo.allocs_failed);
529 return ret;
530 }
531
532 ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs);
533 if (unlikely(dma_mapping_error(ret))) {
534 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
535 atomic_inc(&viodev->cmo.allocs_failed);
536 }
537
538 return ret;
539}
540
541static void vio_dma_iommu_unmap_single(struct device *dev,
542 dma_addr_t dma_handle, size_t size,
543 enum dma_data_direction direction,
544 struct dma_attrs *attrs)
545{
546 struct vio_dev *viodev = to_vio_dev(dev);
547
548 dma_iommu_ops.unmap_single(dev, dma_handle, size, direction, attrs);
549
550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
551}
552
553static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
554 int nelems, enum dma_data_direction direction,
555 struct dma_attrs *attrs)
556{
557 struct vio_dev *viodev = to_vio_dev(dev);
558 struct scatterlist *sgl;
559 int ret, count = 0;
560 size_t alloc_size = 0;
561
562 for (sgl = sglist; count < nelems; count++, sgl++)
563 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE);
564
565 if (vio_cmo_alloc(viodev, alloc_size)) {
566 atomic_inc(&viodev->cmo.allocs_failed);
567 return 0;
568 }
569
570 ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
571
572 if (unlikely(!ret)) {
573 vio_cmo_dealloc(viodev, alloc_size);
574 atomic_inc(&viodev->cmo.allocs_failed);
575 }
576
577 for (sgl = sglist, count = 0; count < ret; count++, sgl++)
578 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
579 if (alloc_size)
580 vio_cmo_dealloc(viodev, alloc_size);
581
582 return ret;
583}
584
585static void vio_dma_iommu_unmap_sg(struct device *dev,
586 struct scatterlist *sglist, int nelems,
587 enum dma_data_direction direction,
588 struct dma_attrs *attrs)
589{
590 struct vio_dev *viodev = to_vio_dev(dev);
591 struct scatterlist *sgl;
592 size_t alloc_size = 0;
593 int count = 0;
594
595 for (sgl = sglist; count < nelems; count++, sgl++)
596 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
597
598 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
599
600 vio_cmo_dealloc(viodev, alloc_size);
601}
602
603struct dma_mapping_ops vio_dma_mapping_ops = {
604 .alloc_coherent = vio_dma_iommu_alloc_coherent,
605 .free_coherent = vio_dma_iommu_free_coherent,
606 .map_single = vio_dma_iommu_map_single,
607 .unmap_single = vio_dma_iommu_unmap_single,
608 .map_sg = vio_dma_iommu_map_sg,
609 .unmap_sg = vio_dma_iommu_unmap_sg,
610};
611
612/**
613 * vio_cmo_set_dev_desired - Set desired entitlement for a device
614 *
615 * @viodev: struct vio_dev for device to alter
616 * @new_desired: new desired entitlement level in bytes
617 *
618 * For use by devices to request a change to their entitlement at runtime or
619 * through sysfs. The desired entitlement level is changed and a balancing
620 * of system resources is scheduled to run in the future.
621 */
622void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
623{
624 unsigned long flags;
625 struct vio_cmo_dev_entry *dev_ent;
626 int found = 0;
627
628 if (!firmware_has_feature(FW_FEATURE_CMO))
629 return;
630
631 spin_lock_irqsave(&vio_cmo.lock, flags);
632 if (desired < VIO_CMO_MIN_ENT)
633 desired = VIO_CMO_MIN_ENT;
634
635 /*
636 * Changes will not be made for devices not in the device list.
637 * If it is not in the device list, then no driver is loaded
638 * for the device and it can not receive entitlement.
639 */
640 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
641 if (viodev == dev_ent->viodev) {
642 found = 1;
643 break;
644 }
645 if (!found)
646 return;
647
648 /* Increase/decrease in desired device entitlement */
649 if (desired >= viodev->cmo.desired) {
650 /* Just bump the bus and device values prior to a balance*/
651 vio_cmo.desired += desired - viodev->cmo.desired;
652 viodev->cmo.desired = desired;
653 } else {
654 /* Decrease bus and device values for desired entitlement */
655 vio_cmo.desired -= viodev->cmo.desired - desired;
656 viodev->cmo.desired = desired;
657 /*
658 * If less entitlement is desired than current entitlement, move
659 * any reserve memory in the change region to the excess pool.
660 */
661 if (viodev->cmo.entitled > desired) {
662 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
663 vio_cmo.excess.size += viodev->cmo.entitled - desired;
664 /*
665 * If entitlement moving from the reserve pool to the
666 * excess pool is currently unused, add to the excess
667 * free counter.
668 */
669 if (viodev->cmo.allocated < viodev->cmo.entitled)
670 vio_cmo.excess.free += viodev->cmo.entitled -
671 max(viodev->cmo.allocated, desired);
672 viodev->cmo.entitled = desired;
673 }
674 }
675 schedule_delayed_work(&vio_cmo.balance_q, 0);
676 spin_unlock_irqrestore(&vio_cmo.lock, flags);
677}
678
679/**
680 * vio_cmo_bus_probe - Handle CMO specific bus probe activities
681 *
682 * @viodev - Pointer to struct vio_dev for device
683 *
684 * Determine the devices IO memory entitlement needs, attempting
685 * to satisfy the system minimum entitlement at first and scheduling
686 * a balance operation to take care of the rest at a later time.
687 *
688 * Returns: 0 on success, -EINVAL when device doesn't support CMO, and
689 * -ENOMEM when entitlement is not available for device or
690 * device entry.
691 *
692 */
693static int vio_cmo_bus_probe(struct vio_dev *viodev)
694{
695 struct vio_cmo_dev_entry *dev_ent;
696 struct device *dev = &viodev->dev;
697 struct vio_driver *viodrv = to_vio_driver(dev->driver);
698 unsigned long flags;
699 size_t size;
700
701 /*
702 * Check to see that device has a DMA window and configure
703 * entitlement for the device.
704 */
705 if (of_get_property(viodev->dev.archdata.of_node,
706 "ibm,my-dma-window", NULL)) {
707 /* Check that the driver is CMO enabled and get desired DMA */
708 if (!viodrv->get_desired_dma) {
709 dev_err(dev, "%s: device driver does not support CMO\n",
710 __func__);
711 return -EINVAL;
712 }
713
714 viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev));
715 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
716 viodev->cmo.desired = VIO_CMO_MIN_ENT;
717 size = VIO_CMO_MIN_ENT;
718
719 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
720 GFP_KERNEL);
721 if (!dev_ent)
722 return -ENOMEM;
723
724 dev_ent->viodev = viodev;
725 spin_lock_irqsave(&vio_cmo.lock, flags);
726 list_add(&dev_ent->list, &vio_cmo.device_list);
727 } else {
728 viodev->cmo.desired = 0;
729 size = 0;
730 spin_lock_irqsave(&vio_cmo.lock, flags);
731 }
732
733 /*
734 * If the needs for vio_cmo.min have not changed since they
735 * were last set, the number of devices in the OF tree has
736 * been constant and the IO memory for this is already in
737 * the reserve pool.
738 */
739 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
740 VIO_CMO_MIN_ENT)) {
741 /* Updated desired entitlement if device requires it */
742 if (size)
743 vio_cmo.desired += (viodev->cmo.desired -
744 VIO_CMO_MIN_ENT);
745 } else {
746 size_t tmp;
747
748 tmp = vio_cmo.spare + vio_cmo.excess.free;
749 if (tmp < size) {
750 dev_err(dev, "%s: insufficient free "
751 "entitlement to add device. "
752 "Need %lu, have %lu\n", __func__,
753 size, (vio_cmo.spare + tmp));
754 spin_unlock_irqrestore(&vio_cmo.lock, flags);
755 return -ENOMEM;
756 }
757
758 /* Use excess pool first to fulfill request */
759 tmp = min(size, vio_cmo.excess.free);
760 vio_cmo.excess.free -= tmp;
761 vio_cmo.excess.size -= tmp;
762 vio_cmo.reserve.size += tmp;
763
764 /* Use spare if excess pool was insufficient */
765 vio_cmo.spare -= size - tmp;
766
767 /* Update bus accounting */
768 vio_cmo.min += size;
769 vio_cmo.desired += viodev->cmo.desired;
770 }
771 spin_unlock_irqrestore(&vio_cmo.lock, flags);
772 return 0;
773}
774
775/**
776 * vio_cmo_bus_remove - Handle CMO specific bus removal activities
777 *
778 * @viodev - Pointer to struct vio_dev for device
779 *
780 * Remove the device from the cmo device list. The minimum entitlement
781 * will be reserved for the device as long as it is in the system. The
782 * rest of the entitlement the device had been allocated will be returned
783 * to the system.
784 */
785static void vio_cmo_bus_remove(struct vio_dev *viodev)
786{
787 struct vio_cmo_dev_entry *dev_ent;
788 unsigned long flags;
789 size_t tmp;
790
791 spin_lock_irqsave(&vio_cmo.lock, flags);
792 if (viodev->cmo.allocated) {
793 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
794 "allocated after remove operation.\n",
795 __func__, viodev->cmo.allocated);
796 BUG();
797 }
798
799 /*
800 * Remove the device from the device list being maintained for
801 * CMO enabled devices.
802 */
803 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
804 if (viodev == dev_ent->viodev) {
805 list_del(&dev_ent->list);
806 kfree(dev_ent);
807 break;
808 }
809
810 /*
811 * Devices may not require any entitlement and they do not need
812 * to be processed. Otherwise, return the device's entitlement
813 * back to the pools.
814 */
815 if (viodev->cmo.entitled) {
816 /*
817 * This device has not yet left the OF tree, it's
818 * minimum entitlement remains in vio_cmo.min and
819 * vio_cmo.desired
820 */
821 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
822
823 /*
824 * Save min allocation for device in reserve as long
825 * as it exists in OF tree as determined by later
826 * balance operation
827 */
828 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
829
830 /* Replenish spare from freed reserve pool */
831 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
832 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
833 vio_cmo.spare));
834 vio_cmo.spare += tmp;
835 viodev->cmo.entitled -= tmp;
836 }
837
838 /* Remaining reserve goes to excess pool */
839 vio_cmo.excess.size += viodev->cmo.entitled;
840 vio_cmo.excess.free += viodev->cmo.entitled;
841 vio_cmo.reserve.size -= viodev->cmo.entitled;
842
843 /*
844 * Until the device is removed it will keep a
845 * minimum entitlement; this will guarantee that
846 * a module unload/load will result in a success.
847 */
848 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
849 viodev->cmo.desired = VIO_CMO_MIN_ENT;
850 atomic_set(&viodev->cmo.allocs_failed, 0);
851 }
852
853 spin_unlock_irqrestore(&vio_cmo.lock, flags);
854}
855
856static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
857{
858 vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
859 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
860}
861
862/**
863 * vio_cmo_bus_init - CMO entitlement initialization at bus init time
864 *
865 * Set up the reserve and excess entitlement pools based on available
866 * system entitlement and the number of devices in the OF tree that
867 * require entitlement in the reserve pool.
868 */
869static void vio_cmo_bus_init(void)
870{
871 struct hvcall_mpp_data mpp_data;
872 int err;
873
874 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
875 spin_lock_init(&vio_cmo.lock);
876 INIT_LIST_HEAD(&vio_cmo.device_list);
877 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
878
879 /* Get current system entitlement */
880 err = h_get_mpp(&mpp_data);
881
882 /*
883 * On failure, continue with entitlement set to 0, will panic()
884 * later when spare is reserved.
885 */
886 if (err != H_SUCCESS) {
887 printk(KERN_ERR "%s: unable to determine system IO "\
888 "entitlement. (%d)\n", __func__, err);
889 vio_cmo.entitled = 0;
890 } else {
891 vio_cmo.entitled = mpp_data.entitled_mem;
892 }
893
894 /* Set reservation and check against entitlement */
895 vio_cmo.spare = VIO_CMO_MIN_ENT;
896 vio_cmo.reserve.size = vio_cmo.spare;
897 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
898 VIO_CMO_MIN_ENT);
899 if (vio_cmo.reserve.size > vio_cmo.entitled) {
900 printk(KERN_ERR "%s: insufficient system entitlement\n",
901 __func__);
902 panic("%s: Insufficient system entitlement", __func__);
903 }
904
905 /* Set the remaining accounting variables */
906 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
907 vio_cmo.excess.free = vio_cmo.excess.size;
908 vio_cmo.min = vio_cmo.reserve.size;
909 vio_cmo.desired = vio_cmo.reserve.size;
910}
911
912/* sysfs device functions and data structures for CMO */
913
914#define viodev_cmo_rd_attr(name) \
915static ssize_t viodev_cmo_##name##_show(struct device *dev, \
916 struct device_attribute *attr, \
917 char *buf) \
918{ \
919 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
920}
921
922static ssize_t viodev_cmo_allocs_failed_show(struct device *dev,
923 struct device_attribute *attr, char *buf)
924{
925 struct vio_dev *viodev = to_vio_dev(dev);
926 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
927}
928
929static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev,
930 struct device_attribute *attr, const char *buf, size_t count)
931{
932 struct vio_dev *viodev = to_vio_dev(dev);
933 atomic_set(&viodev->cmo.allocs_failed, 0);
934 return count;
935}
936
937static ssize_t viodev_cmo_desired_set(struct device *dev,
938 struct device_attribute *attr, const char *buf, size_t count)
939{
940 struct vio_dev *viodev = to_vio_dev(dev);
941 size_t new_desired;
942 int ret;
943
944 ret = strict_strtoul(buf, 10, &new_desired);
945 if (ret)
946 return ret;
947
948 vio_cmo_set_dev_desired(viodev, new_desired);
949 return count;
950}
951
952viodev_cmo_rd_attr(desired);
953viodev_cmo_rd_attr(entitled);
954viodev_cmo_rd_attr(allocated);
955
956static ssize_t name_show(struct device *, struct device_attribute *, char *);
957static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
958static struct device_attribute vio_cmo_dev_attrs[] = {
959 __ATTR_RO(name),
960 __ATTR_RO(devspec),
961 __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
962 viodev_cmo_desired_show, viodev_cmo_desired_set),
963 __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL),
964 __ATTR(cmo_allocated, S_IRUGO, viodev_cmo_allocated_show, NULL),
965 __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
966 viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset),
967 __ATTR_NULL
968};
969
970/* sysfs bus functions and data structures for CMO */
971
972#define viobus_cmo_rd_attr(name) \
973static ssize_t \
974viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \
975{ \
976 return sprintf(buf, "%lu\n", vio_cmo.name); \
977}
978
979#define viobus_cmo_pool_rd_attr(name, var) \
980static ssize_t \
981viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \
982{ \
983 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
984}
985
986static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf,
987 size_t count)
988{
989 unsigned long flags;
990
991 spin_lock_irqsave(&vio_cmo.lock, flags);
992 vio_cmo.high = vio_cmo.curr;
993 spin_unlock_irqrestore(&vio_cmo.lock, flags);
994
995 return count;
996}
997
998viobus_cmo_rd_attr(entitled);
999viobus_cmo_pool_rd_attr(reserve, size);
1000viobus_cmo_pool_rd_attr(excess, size);
1001viobus_cmo_pool_rd_attr(excess, free);
1002viobus_cmo_rd_attr(spare);
1003viobus_cmo_rd_attr(min);
1004viobus_cmo_rd_attr(desired);
1005viobus_cmo_rd_attr(curr);
1006viobus_cmo_rd_attr(high);
1007
1008static struct bus_attribute vio_cmo_bus_attrs[] = {
1009 __ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL),
1010 __ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL),
1011 __ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL),
1012 __ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL),
1013 __ATTR(cmo_spare, S_IRUGO, viobus_cmo_spare_show, NULL),
1014 __ATTR(cmo_min, S_IRUGO, viobus_cmo_min_show, NULL),
1015 __ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL),
1016 __ATTR(cmo_curr, S_IRUGO, viobus_cmo_curr_show, NULL),
1017 __ATTR(cmo_high, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
1018 viobus_cmo_high_show, viobus_cmo_high_reset),
1019 __ATTR_NULL
1020};
1021
1022static void vio_cmo_sysfs_init(void)
1023{
1024 vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
1025 vio_bus_type.bus_attrs = vio_cmo_bus_attrs;
1026}
1027#else /* CONFIG_PPC_SMLPAR */
1028/* Dummy functions for iSeries platform */
1029int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1030void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1031static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1032static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1033static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1034static void vio_cmo_bus_init() {}
1035static void vio_cmo_sysfs_init() { }
1036#endif /* CONFIG_PPC_SMLPAR */
1037EXPORT_SYMBOL(vio_cmo_entitlement_update);
1038EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1039
49static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) 1040static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
50{ 1041{
51 const unsigned char *dma_window; 1042 const unsigned char *dma_window;
@@ -114,8 +1105,17 @@ static int vio_bus_probe(struct device *dev)
114 return error; 1105 return error;
115 1106
116 id = vio_match_device(viodrv->id_table, viodev); 1107 id = vio_match_device(viodrv->id_table, viodev);
117 if (id) 1108 if (id) {
1109 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1110 if (firmware_has_feature(FW_FEATURE_CMO)) {
1111 error = vio_cmo_bus_probe(viodev);
1112 if (error)
1113 return error;
1114 }
118 error = viodrv->probe(viodev, id); 1115 error = viodrv->probe(viodev, id);
1116 if (error)
1117 vio_cmo_bus_remove(viodev);
1118 }
119 1119
120 return error; 1120 return error;
121} 1121}
@@ -125,12 +1125,23 @@ static int vio_bus_remove(struct device *dev)
125{ 1125{
126 struct vio_dev *viodev = to_vio_dev(dev); 1126 struct vio_dev *viodev = to_vio_dev(dev);
127 struct vio_driver *viodrv = to_vio_driver(dev->driver); 1127 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1128 struct device *devptr;
1129 int ret = 1;
1130
1131 /*
1132 * Hold a reference to the device after the remove function is called
1133 * to allow for CMO accounting cleanup for the device.
1134 */
1135 devptr = get_device(dev);
128 1136
129 if (viodrv->remove) 1137 if (viodrv->remove)
130 return viodrv->remove(viodev); 1138 ret = viodrv->remove(viodev);
1139
1140 if (!ret && firmware_has_feature(FW_FEATURE_CMO))
1141 vio_cmo_bus_remove(viodev);
131 1142
132 /* driver can't remove */ 1143 put_device(devptr);
133 return 1; 1144 return ret;
134} 1145}
135 1146
136/** 1147/**
@@ -215,7 +1226,11 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
215 viodev->unit_address = *unit_address; 1226 viodev->unit_address = *unit_address;
216 } 1227 }
217 viodev->dev.archdata.of_node = of_node_get(of_node); 1228 viodev->dev.archdata.of_node = of_node_get(of_node);
218 viodev->dev.archdata.dma_ops = &dma_iommu_ops; 1229
1230 if (firmware_has_feature(FW_FEATURE_CMO))
1231 vio_cmo_set_dma_ops(viodev);
1232 else
1233 viodev->dev.archdata.dma_ops = &dma_iommu_ops;
219 viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev); 1234 viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev);
220 viodev->dev.archdata.numa_node = of_node_to_nid(of_node); 1235 viodev->dev.archdata.numa_node = of_node_to_nid(of_node);
221 1236
@@ -245,6 +1260,9 @@ static int __init vio_bus_init(void)
245 int err; 1260 int err;
246 struct device_node *node_vroot; 1261 struct device_node *node_vroot;
247 1262
1263 if (firmware_has_feature(FW_FEATURE_CMO))
1264 vio_cmo_sysfs_init();
1265
248 err = bus_register(&vio_bus_type); 1266 err = bus_register(&vio_bus_type);
249 if (err) { 1267 if (err) {
250 printk(KERN_ERR "failed to register VIO bus\n"); 1268 printk(KERN_ERR "failed to register VIO bus\n");
@@ -262,6 +1280,9 @@ static int __init vio_bus_init(void)
262 return err; 1280 return err;
263 } 1281 }
264 1282
1283 if (firmware_has_feature(FW_FEATURE_CMO))
1284 vio_cmo_bus_init();
1285
265 node_vroot = of_find_node_by_name(NULL, "vdevice"); 1286 node_vroot = of_find_node_by_name(NULL, "vdevice");
266 if (node_vroot) { 1287 if (node_vroot) {
267 struct device_node *of_node; 1288 struct device_node *of_node;
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index a914411bced5..4a8ce62fe112 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -85,7 +85,7 @@ SECTIONS
85 85
86 /* The dummy segment contents for the bug workaround mentioned above 86 /* The dummy segment contents for the bug workaround mentioned above
87 near PHDRS. */ 87 near PHDRS. */
88 .dummy : { 88 .dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
89 LONG(0xf177) 89 LONG(0xf177)
90 } :kernel :dummy 90 } :kernel :dummy
91 91
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 0559fe086eb4..7c975d43e3f3 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/mm.h>
13#include <asm/page.h> 14#include <asm/page.h>
14#include <asm/code-patching.h> 15#include <asm/code-patching.h>
15 16
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 1707d00331fc..565b7a237c84 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -100,31 +100,6 @@ static int store_updates_sp(struct pt_regs *regs)
100 return 0; 100 return 0;
101} 101}
102 102
103#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
104static void do_dabr(struct pt_regs *regs, unsigned long address,
105 unsigned long error_code)
106{
107 siginfo_t info;
108
109 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
110 11, SIGSEGV) == NOTIFY_STOP)
111 return;
112
113 if (debugger_dabr_match(regs))
114 return;
115
116 /* Clear the DABR */
117 set_dabr(0);
118
119 /* Deliver the signal to userspace */
120 info.si_signo = SIGTRAP;
121 info.si_errno = 0;
122 info.si_code = TRAP_HWBKPT;
123 info.si_addr = (void __user *)address;
124 force_sig_info(SIGTRAP, &info, current);
125}
126#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
127
128/* 103/*
129 * For 600- and 800-family processors, the error_code parameter is DSISR 104 * For 600- and 800-family processors, the error_code parameter is DSISR
130 * for a data fault, SRR1 for an instruction fault. For 400-family processors 105 * for a data fault, SRR1 for an instruction fault. For 400-family processors
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 8d3b58ebd38e..5ce5a4dcd008 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -68,6 +68,7 @@
68 68
69#define KB (1024) 69#define KB (1024)
70#define MB (1024*KB) 70#define MB (1024*KB)
71#define GB (1024L*MB)
71 72
72/* 73/*
73 * Note: pte --> Linux PTE 74 * Note: pte --> Linux PTE
@@ -102,7 +103,6 @@ int mmu_kernel_ssize = MMU_SEGSIZE_256M;
102int mmu_highuser_ssize = MMU_SEGSIZE_256M; 103int mmu_highuser_ssize = MMU_SEGSIZE_256M;
103u16 mmu_slb_size = 64; 104u16 mmu_slb_size = 64;
104#ifdef CONFIG_HUGETLB_PAGE 105#ifdef CONFIG_HUGETLB_PAGE
105int mmu_huge_psize = MMU_PAGE_16M;
106unsigned int HPAGE_SHIFT; 106unsigned int HPAGE_SHIFT;
107#endif 107#endif
108#ifdef CONFIG_PPC_64K_PAGES 108#ifdef CONFIG_PPC_64K_PAGES
@@ -329,6 +329,44 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
329 return 0; 329 return 0;
330} 330}
331 331
332/* Scan for 16G memory blocks that have been set aside for huge pages
333 * and reserve those blocks for 16G huge pages.
334 */
335static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
336 const char *uname, int depth,
337 void *data) {
338 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
339 unsigned long *addr_prop;
340 u32 *page_count_prop;
341 unsigned int expected_pages;
342 long unsigned int phys_addr;
343 long unsigned int block_size;
344
345 /* We are scanning "memory" nodes only */
346 if (type == NULL || strcmp(type, "memory") != 0)
347 return 0;
348
349 /* This property is the log base 2 of the number of virtual pages that
350 * will represent this memory block. */
351 page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
352 if (page_count_prop == NULL)
353 return 0;
354 expected_pages = (1 << page_count_prop[0]);
355 addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
356 if (addr_prop == NULL)
357 return 0;
358 phys_addr = addr_prop[0];
359 block_size = addr_prop[1];
360 if (block_size != (16 * GB))
361 return 0;
362 printk(KERN_INFO "Huge page(16GB) memory: "
363 "addr = 0x%lX size = 0x%lX pages = %d\n",
364 phys_addr, block_size, expected_pages);
365 lmb_reserve(phys_addr, block_size * expected_pages);
366 add_gpage(phys_addr, block_size, expected_pages);
367 return 0;
368}
369
332static void __init htab_init_page_sizes(void) 370static void __init htab_init_page_sizes(void)
333{ 371{
334 int rc; 372 int rc;
@@ -418,15 +456,18 @@ static void __init htab_init_page_sizes(void)
418 ); 456 );
419 457
420#ifdef CONFIG_HUGETLB_PAGE 458#ifdef CONFIG_HUGETLB_PAGE
421 /* Init large page size. Currently, we pick 16M or 1M depending 459 /* Reserve 16G huge page memory sections for huge pages */
460 of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
461
462/* Set default large page size. Currently, we pick 16M or 1M depending
422 * on what is available 463 * on what is available
423 */ 464 */
424 if (mmu_psize_defs[MMU_PAGE_16M].shift) 465 if (mmu_psize_defs[MMU_PAGE_16M].shift)
425 set_huge_psize(MMU_PAGE_16M); 466 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
426 /* With 4k/4level pagetables, we can't (for now) cope with a 467 /* With 4k/4level pagetables, we can't (for now) cope with a
427 * huge page size < PMD_SIZE */ 468 * huge page size < PMD_SIZE */
428 else if (mmu_psize_defs[MMU_PAGE_1M].shift) 469 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
429 set_huge_psize(MMU_PAGE_1M); 470 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
430#endif /* CONFIG_HUGETLB_PAGE */ 471#endif /* CONFIG_HUGETLB_PAGE */
431} 472}
432 473
@@ -847,7 +888,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
847 888
848#ifdef CONFIG_HUGETLB_PAGE 889#ifdef CONFIG_HUGETLB_PAGE
849 /* Handle hugepage regions */ 890 /* Handle hugepage regions */
850 if (HPAGE_SHIFT && psize == mmu_huge_psize) { 891 if (HPAGE_SHIFT && mmu_huge_psizes[psize]) {
851 DBG_LOW(" -> huge page !\n"); 892 DBG_LOW(" -> huge page !\n");
852 return hash_huge_page(mm, access, ea, vsid, local, trap); 893 return hash_huge_page(mm, access, ea, vsid, local, trap);
853 } 894 }
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0d12fba31bc5..fb42c4dd3217 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -24,21 +24,43 @@
24#include <asm/cputable.h> 24#include <asm/cputable.h>
25#include <asm/spu.h> 25#include <asm/spu.h>
26 26
27#define HPAGE_SHIFT_64K 16 27#define PAGE_SHIFT_64K 16
28#define HPAGE_SHIFT_16M 24 28#define PAGE_SHIFT_16M 24
29#define PAGE_SHIFT_16G 34
29 30
30#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT) 31#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
31#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT) 32#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
33#define MAX_NUMBER_GPAGES 1024
32 34
33unsigned int hugepte_shift; 35/* Tracks the 16G pages after the device tree is scanned and before the
34#define PTRS_PER_HUGEPTE (1 << hugepte_shift) 36 * huge_boot_pages list is ready. */
35#define HUGEPTE_TABLE_SIZE (sizeof(pte_t) << hugepte_shift) 37static unsigned long gpage_freearray[MAX_NUMBER_GPAGES];
38static unsigned nr_gpages;
36 39
37#define HUGEPD_SHIFT (HPAGE_SHIFT + hugepte_shift) 40/* Array of valid huge page sizes - non-zero value(hugepte_shift) is
38#define HUGEPD_SIZE (1UL << HUGEPD_SHIFT) 41 * stored for the huge page sizes that are valid.
39#define HUGEPD_MASK (~(HUGEPD_SIZE-1)) 42 */
43unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */
44
45#define hugepte_shift mmu_huge_psizes
46#define PTRS_PER_HUGEPTE(psize) (1 << hugepte_shift[psize])
47#define HUGEPTE_TABLE_SIZE(psize) (sizeof(pte_t) << hugepte_shift[psize])
48
49#define HUGEPD_SHIFT(psize) (mmu_psize_to_shift(psize) \
50 + hugepte_shift[psize])
51#define HUGEPD_SIZE(psize) (1UL << HUGEPD_SHIFT(psize))
52#define HUGEPD_MASK(psize) (~(HUGEPD_SIZE(psize)-1))
53
54/* Subtract one from array size because we don't need a cache for 4K since
55 * is not a huge page size */
56#define huge_pgtable_cache(psize) (pgtable_cache[HUGEPTE_CACHE_NUM \
57 + psize-1])
58#define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize])
40 59
41#define huge_pgtable_cache (pgtable_cache[HUGEPTE_CACHE_NUM]) 60static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = {
61 "unused_4K", "hugepte_cache_64K", "unused_64K_AP",
62 "hugepte_cache_1M", "hugepte_cache_16M", "hugepte_cache_16G"
63};
42 64
43/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad() 65/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
44 * will choke on pointers to hugepte tables, which is handy for 66 * will choke on pointers to hugepte tables, which is handy for
@@ -49,24 +71,49 @@ typedef struct { unsigned long pd; } hugepd_t;
49 71
50#define hugepd_none(hpd) ((hpd).pd == 0) 72#define hugepd_none(hpd) ((hpd).pd == 0)
51 73
74static inline int shift_to_mmu_psize(unsigned int shift)
75{
76 switch (shift) {
77#ifndef CONFIG_PPC_64K_PAGES
78 case PAGE_SHIFT_64K:
79 return MMU_PAGE_64K;
80#endif
81 case PAGE_SHIFT_16M:
82 return MMU_PAGE_16M;
83 case PAGE_SHIFT_16G:
84 return MMU_PAGE_16G;
85 }
86 return -1;
87}
88
89static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
90{
91 if (mmu_psize_defs[mmu_psize].shift)
92 return mmu_psize_defs[mmu_psize].shift;
93 BUG();
94}
95
52static inline pte_t *hugepd_page(hugepd_t hpd) 96static inline pte_t *hugepd_page(hugepd_t hpd)
53{ 97{
54 BUG_ON(!(hpd.pd & HUGEPD_OK)); 98 BUG_ON(!(hpd.pd & HUGEPD_OK));
55 return (pte_t *)(hpd.pd & ~HUGEPD_OK); 99 return (pte_t *)(hpd.pd & ~HUGEPD_OK);
56} 100}
57 101
58static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr) 102static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
103 struct hstate *hstate)
59{ 104{
60 unsigned long idx = ((addr >> HPAGE_SHIFT) & (PTRS_PER_HUGEPTE-1)); 105 unsigned int shift = huge_page_shift(hstate);
106 int psize = shift_to_mmu_psize(shift);
107 unsigned long idx = ((addr >> shift) & (PTRS_PER_HUGEPTE(psize)-1));
61 pte_t *dir = hugepd_page(*hpdp); 108 pte_t *dir = hugepd_page(*hpdp);
62 109
63 return dir + idx; 110 return dir + idx;
64} 111}
65 112
66static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, 113static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
67 unsigned long address) 114 unsigned long address, unsigned int psize)
68{ 115{
69 pte_t *new = kmem_cache_alloc(huge_pgtable_cache, 116 pte_t *new = kmem_cache_alloc(huge_pgtable_cache(psize),
70 GFP_KERNEL|__GFP_REPEAT); 117 GFP_KERNEL|__GFP_REPEAT);
71 118
72 if (! new) 119 if (! new)
@@ -74,7 +121,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
74 121
75 spin_lock(&mm->page_table_lock); 122 spin_lock(&mm->page_table_lock);
76 if (!hugepd_none(*hpdp)) 123 if (!hugepd_none(*hpdp))
77 kmem_cache_free(huge_pgtable_cache, new); 124 kmem_cache_free(huge_pgtable_cache(psize), new);
78 else 125 else
79 hpdp->pd = (unsigned long)new | HUGEPD_OK; 126 hpdp->pd = (unsigned long)new | HUGEPD_OK;
80 spin_unlock(&mm->page_table_lock); 127 spin_unlock(&mm->page_table_lock);
@@ -83,27 +130,60 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
83 130
84/* Base page size affects how we walk hugetlb page tables */ 131/* Base page size affects how we walk hugetlb page tables */
85#ifdef CONFIG_PPC_64K_PAGES 132#ifdef CONFIG_PPC_64K_PAGES
86#define hpmd_offset(pud, addr) pmd_offset(pud, addr) 133#define hpmd_offset(pud, addr, h) pmd_offset(pud, addr)
87#define hpmd_alloc(mm, pud, addr) pmd_alloc(mm, pud, addr) 134#define hpmd_alloc(mm, pud, addr, h) pmd_alloc(mm, pud, addr)
88#else 135#else
89static inline 136static inline
90pmd_t *hpmd_offset(pud_t *pud, unsigned long addr) 137pmd_t *hpmd_offset(pud_t *pud, unsigned long addr, struct hstate *hstate)
91{ 138{
92 if (HPAGE_SHIFT == HPAGE_SHIFT_64K) 139 if (huge_page_shift(hstate) == PAGE_SHIFT_64K)
93 return pmd_offset(pud, addr); 140 return pmd_offset(pud, addr);
94 else 141 else
95 return (pmd_t *) pud; 142 return (pmd_t *) pud;
96} 143}
97static inline 144static inline
98pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr) 145pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr,
146 struct hstate *hstate)
99{ 147{
100 if (HPAGE_SHIFT == HPAGE_SHIFT_64K) 148 if (huge_page_shift(hstate) == PAGE_SHIFT_64K)
101 return pmd_alloc(mm, pud, addr); 149 return pmd_alloc(mm, pud, addr);
102 else 150 else
103 return (pmd_t *) pud; 151 return (pmd_t *) pud;
104} 152}
105#endif 153#endif
106 154
155/* Build list of addresses of gigantic pages. This function is used in early
156 * boot before the buddy or bootmem allocator is setup.
157 */
158void add_gpage(unsigned long addr, unsigned long page_size,
159 unsigned long number_of_pages)
160{
161 if (!addr)
162 return;
163 while (number_of_pages > 0) {
164 gpage_freearray[nr_gpages] = addr;
165 nr_gpages++;
166 number_of_pages--;
167 addr += page_size;
168 }
169}
170
171/* Moves the gigantic page addresses from the temporary list to the
172 * huge_boot_pages list.
173 */
174int alloc_bootmem_huge_page(struct hstate *hstate)
175{
176 struct huge_bootmem_page *m;
177 if (nr_gpages == 0)
178 return 0;
179 m = phys_to_virt(gpage_freearray[--nr_gpages]);
180 gpage_freearray[nr_gpages] = 0;
181 list_add(&m->list, &huge_boot_pages);
182 m->hstate = hstate;
183 return 1;
184}
185
186
107/* Modelled after find_linux_pte() */ 187/* Modelled after find_linux_pte() */
108pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 188pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
109{ 189{
@@ -111,39 +191,52 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
111 pud_t *pu; 191 pud_t *pu;
112 pmd_t *pm; 192 pmd_t *pm;
113 193
114 BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize); 194 unsigned int psize;
195 unsigned int shift;
196 unsigned long sz;
197 struct hstate *hstate;
198 psize = get_slice_psize(mm, addr);
199 shift = mmu_psize_to_shift(psize);
200 sz = ((1UL) << shift);
201 hstate = size_to_hstate(sz);
115 202
116 addr &= HPAGE_MASK; 203 addr &= hstate->mask;
117 204
118 pg = pgd_offset(mm, addr); 205 pg = pgd_offset(mm, addr);
119 if (!pgd_none(*pg)) { 206 if (!pgd_none(*pg)) {
120 pu = pud_offset(pg, addr); 207 pu = pud_offset(pg, addr);
121 if (!pud_none(*pu)) { 208 if (!pud_none(*pu)) {
122 pm = hpmd_offset(pu, addr); 209 pm = hpmd_offset(pu, addr, hstate);
123 if (!pmd_none(*pm)) 210 if (!pmd_none(*pm))
124 return hugepte_offset((hugepd_t *)pm, addr); 211 return hugepte_offset((hugepd_t *)pm, addr,
212 hstate);
125 } 213 }
126 } 214 }
127 215
128 return NULL; 216 return NULL;
129} 217}
130 218
131pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 219pte_t *huge_pte_alloc(struct mm_struct *mm,
220 unsigned long addr, unsigned long sz)
132{ 221{
133 pgd_t *pg; 222 pgd_t *pg;
134 pud_t *pu; 223 pud_t *pu;
135 pmd_t *pm; 224 pmd_t *pm;
136 hugepd_t *hpdp = NULL; 225 hugepd_t *hpdp = NULL;
226 struct hstate *hstate;
227 unsigned int psize;
228 hstate = size_to_hstate(sz);
137 229
138 BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize); 230 psize = get_slice_psize(mm, addr);
231 BUG_ON(!mmu_huge_psizes[psize]);
139 232
140 addr &= HPAGE_MASK; 233 addr &= hstate->mask;
141 234
142 pg = pgd_offset(mm, addr); 235 pg = pgd_offset(mm, addr);
143 pu = pud_alloc(mm, pg, addr); 236 pu = pud_alloc(mm, pg, addr);
144 237
145 if (pu) { 238 if (pu) {
146 pm = hpmd_alloc(mm, pu, addr); 239 pm = hpmd_alloc(mm, pu, addr, hstate);
147 if (pm) 240 if (pm)
148 hpdp = (hugepd_t *)pm; 241 hpdp = (hugepd_t *)pm;
149 } 242 }
@@ -151,10 +244,10 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
151 if (! hpdp) 244 if (! hpdp)
152 return NULL; 245 return NULL;
153 246
154 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr)) 247 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, psize))
155 return NULL; 248 return NULL;
156 249
157 return hugepte_offset(hpdp, addr); 250 return hugepte_offset(hpdp, addr, hstate);
158} 251}
159 252
160int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) 253int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
@@ -162,19 +255,22 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
162 return 0; 255 return 0;
163} 256}
164 257
165static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp) 258static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp,
259 unsigned int psize)
166{ 260{
167 pte_t *hugepte = hugepd_page(*hpdp); 261 pte_t *hugepte = hugepd_page(*hpdp);
168 262
169 hpdp->pd = 0; 263 hpdp->pd = 0;
170 tlb->need_flush = 1; 264 tlb->need_flush = 1;
171 pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM, 265 pgtable_free_tlb(tlb, pgtable_free_cache(hugepte,
266 HUGEPTE_CACHE_NUM+psize-1,
172 PGF_CACHENUM_MASK)); 267 PGF_CACHENUM_MASK));
173} 268}
174 269
175static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 270static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
176 unsigned long addr, unsigned long end, 271 unsigned long addr, unsigned long end,
177 unsigned long floor, unsigned long ceiling) 272 unsigned long floor, unsigned long ceiling,
273 unsigned int psize)
178{ 274{
179 pmd_t *pmd; 275 pmd_t *pmd;
180 unsigned long next; 276 unsigned long next;
@@ -186,7 +282,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
186 next = pmd_addr_end(addr, end); 282 next = pmd_addr_end(addr, end);
187 if (pmd_none(*pmd)) 283 if (pmd_none(*pmd))
188 continue; 284 continue;
189 free_hugepte_range(tlb, (hugepd_t *)pmd); 285 free_hugepte_range(tlb, (hugepd_t *)pmd, psize);
190 } while (pmd++, addr = next, addr != end); 286 } while (pmd++, addr = next, addr != end);
191 287
192 start &= PUD_MASK; 288 start &= PUD_MASK;
@@ -212,6 +308,9 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
212 pud_t *pud; 308 pud_t *pud;
213 unsigned long next; 309 unsigned long next;
214 unsigned long start; 310 unsigned long start;
311 unsigned int shift;
312 unsigned int psize = get_slice_psize(tlb->mm, addr);
313 shift = mmu_psize_to_shift(psize);
215 314
216 start = addr; 315 start = addr;
217 pud = pud_offset(pgd, addr); 316 pud = pud_offset(pgd, addr);
@@ -220,16 +319,18 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
220#ifdef CONFIG_PPC_64K_PAGES 319#ifdef CONFIG_PPC_64K_PAGES
221 if (pud_none_or_clear_bad(pud)) 320 if (pud_none_or_clear_bad(pud))
222 continue; 321 continue;
223 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling); 322 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling,
323 psize);
224#else 324#else
225 if (HPAGE_SHIFT == HPAGE_SHIFT_64K) { 325 if (shift == PAGE_SHIFT_64K) {
226 if (pud_none_or_clear_bad(pud)) 326 if (pud_none_or_clear_bad(pud))
227 continue; 327 continue;
228 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling); 328 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
329 ceiling, psize);
229 } else { 330 } else {
230 if (pud_none(*pud)) 331 if (pud_none(*pud))
231 continue; 332 continue;
232 free_hugepte_range(tlb, (hugepd_t *)pud); 333 free_hugepte_range(tlb, (hugepd_t *)pud, psize);
233 } 334 }
234#endif 335#endif
235 } while (pud++, addr = next, addr != end); 336 } while (pud++, addr = next, addr != end);
@@ -255,7 +356,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
255 * 356 *
256 * Must be called with pagetable lock held. 357 * Must be called with pagetable lock held.
257 */ 358 */
258void hugetlb_free_pgd_range(struct mmu_gather **tlb, 359void hugetlb_free_pgd_range(struct mmu_gather *tlb,
259 unsigned long addr, unsigned long end, 360 unsigned long addr, unsigned long end,
260 unsigned long floor, unsigned long ceiling) 361 unsigned long floor, unsigned long ceiling)
261{ 362{
@@ -297,31 +398,33 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb,
297 * now has no other vmas using it, so can be freed, we don't 398 * now has no other vmas using it, so can be freed, we don't
298 * bother to round floor or end up - the tests don't need that. 399 * bother to round floor or end up - the tests don't need that.
299 */ 400 */
401 unsigned int psize = get_slice_psize(tlb->mm, addr);
300 402
301 addr &= HUGEPD_MASK; 403 addr &= HUGEPD_MASK(psize);
302 if (addr < floor) { 404 if (addr < floor) {
303 addr += HUGEPD_SIZE; 405 addr += HUGEPD_SIZE(psize);
304 if (!addr) 406 if (!addr)
305 return; 407 return;
306 } 408 }
307 if (ceiling) { 409 if (ceiling) {
308 ceiling &= HUGEPD_MASK; 410 ceiling &= HUGEPD_MASK(psize);
309 if (!ceiling) 411 if (!ceiling)
310 return; 412 return;
311 } 413 }
312 if (end - 1 > ceiling - 1) 414 if (end - 1 > ceiling - 1)
313 end -= HUGEPD_SIZE; 415 end -= HUGEPD_SIZE(psize);
314 if (addr > end - 1) 416 if (addr > end - 1)
315 return; 417 return;
316 418
317 start = addr; 419 start = addr;
318 pgd = pgd_offset((*tlb)->mm, addr); 420 pgd = pgd_offset(tlb->mm, addr);
319 do { 421 do {
320 BUG_ON(get_slice_psize((*tlb)->mm, addr) != mmu_huge_psize); 422 psize = get_slice_psize(tlb->mm, addr);
423 BUG_ON(!mmu_huge_psizes[psize]);
321 next = pgd_addr_end(addr, end); 424 next = pgd_addr_end(addr, end);
322 if (pgd_none_or_clear_bad(pgd)) 425 if (pgd_none_or_clear_bad(pgd))
323 continue; 426 continue;
324 hugetlb_free_pud_range(*tlb, pgd, addr, next, floor, ceiling); 427 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
325 } while (pgd++, addr = next, addr != end); 428 } while (pgd++, addr = next, addr != end);
326} 429}
327 430
@@ -334,7 +437,11 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
334 * necessary anymore if we make hpte_need_flush() get the 437 * necessary anymore if we make hpte_need_flush() get the
335 * page size from the slices 438 * page size from the slices
336 */ 439 */
337 pte_update(mm, addr & HPAGE_MASK, ptep, ~0UL, 1); 440 unsigned int psize = get_slice_psize(mm, addr);
441 unsigned int shift = mmu_psize_to_shift(psize);
442 unsigned long sz = ((1UL) << shift);
443 struct hstate *hstate = size_to_hstate(sz);
444 pte_update(mm, addr & hstate->mask, ptep, ~0UL, 1);
338 } 445 }
339 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); 446 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
340} 447}
@@ -351,14 +458,19 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
351{ 458{
352 pte_t *ptep; 459 pte_t *ptep;
353 struct page *page; 460 struct page *page;
461 unsigned int mmu_psize = get_slice_psize(mm, address);
354 462
355 if (get_slice_psize(mm, address) != mmu_huge_psize) 463 /* Verify it is a huge page else bail. */
464 if (!mmu_huge_psizes[mmu_psize])
356 return ERR_PTR(-EINVAL); 465 return ERR_PTR(-EINVAL);
357 466
358 ptep = huge_pte_offset(mm, address); 467 ptep = huge_pte_offset(mm, address);
359 page = pte_page(*ptep); 468 page = pte_page(*ptep);
360 if (page) 469 if (page) {
361 page += (address % HPAGE_SIZE) / PAGE_SIZE; 470 unsigned int shift = mmu_psize_to_shift(mmu_psize);
471 unsigned long sz = ((1UL) << shift);
472 page += (address % sz) / PAGE_SIZE;
473 }
362 474
363 return page; 475 return page;
364} 476}
@@ -368,6 +480,11 @@ int pmd_huge(pmd_t pmd)
368 return 0; 480 return 0;
369} 481}
370 482
483int pud_huge(pud_t pud)
484{
485 return 0;
486}
487
371struct page * 488struct page *
372follow_huge_pmd(struct mm_struct *mm, unsigned long address, 489follow_huge_pmd(struct mm_struct *mm, unsigned long address,
373 pmd_t *pmd, int write) 490 pmd_t *pmd, int write)
@@ -381,15 +498,16 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
381 unsigned long len, unsigned long pgoff, 498 unsigned long len, unsigned long pgoff,
382 unsigned long flags) 499 unsigned long flags)
383{ 500{
384 return slice_get_unmapped_area(addr, len, flags, 501 struct hstate *hstate = hstate_file(file);
385 mmu_huge_psize, 1, 0); 502 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
503 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
386} 504}
387 505
388/* 506/*
389 * Called by asm hashtable.S for doing lazy icache flush 507 * Called by asm hashtable.S for doing lazy icache flush
390 */ 508 */
391static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags, 509static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
392 pte_t pte, int trap) 510 pte_t pte, int trap, unsigned long sz)
393{ 511{
394 struct page *page; 512 struct page *page;
395 int i; 513 int i;
@@ -402,7 +520,7 @@ static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
402 /* page is dirty */ 520 /* page is dirty */
403 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { 521 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
404 if (trap == 0x400) { 522 if (trap == 0x400) {
405 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) 523 for (i = 0; i < (sz / PAGE_SIZE); i++)
406 __flush_dcache_icache(page_address(page+i)); 524 __flush_dcache_icache(page_address(page+i));
407 set_bit(PG_arch_1, &page->flags); 525 set_bit(PG_arch_1, &page->flags);
408 } else { 526 } else {
@@ -418,11 +536,16 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
418{ 536{
419 pte_t *ptep; 537 pte_t *ptep;
420 unsigned long old_pte, new_pte; 538 unsigned long old_pte, new_pte;
421 unsigned long va, rflags, pa; 539 unsigned long va, rflags, pa, sz;
422 long slot; 540 long slot;
423 int err = 1; 541 int err = 1;
424 int ssize = user_segment_size(ea); 542 int ssize = user_segment_size(ea);
543 unsigned int mmu_psize;
544 int shift;
545 mmu_psize = get_slice_psize(mm, ea);
425 546
547 if (!mmu_huge_psizes[mmu_psize])
548 goto out;
426 ptep = huge_pte_offset(mm, ea); 549 ptep = huge_pte_offset(mm, ea);
427 550
428 /* Search the Linux page table for a match with va */ 551 /* Search the Linux page table for a match with va */
@@ -465,30 +588,32 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
465 rflags = 0x2 | (!(new_pte & _PAGE_RW)); 588 rflags = 0x2 | (!(new_pte & _PAGE_RW));
466 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ 589 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
467 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); 590 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
591 shift = mmu_psize_to_shift(mmu_psize);
592 sz = ((1UL) << shift);
468 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 593 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
469 /* No CPU has hugepages but lacks no execute, so we 594 /* No CPU has hugepages but lacks no execute, so we
470 * don't need to worry about that case */ 595 * don't need to worry about that case */
471 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte), 596 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
472 trap); 597 trap, sz);
473 598
474 /* Check if pte already has an hpte (case 2) */ 599 /* Check if pte already has an hpte (case 2) */
475 if (unlikely(old_pte & _PAGE_HASHPTE)) { 600 if (unlikely(old_pte & _PAGE_HASHPTE)) {
476 /* There MIGHT be an HPTE for this pte */ 601 /* There MIGHT be an HPTE for this pte */
477 unsigned long hash, slot; 602 unsigned long hash, slot;
478 603
479 hash = hpt_hash(va, HPAGE_SHIFT, ssize); 604 hash = hpt_hash(va, shift, ssize);
480 if (old_pte & _PAGE_F_SECOND) 605 if (old_pte & _PAGE_F_SECOND)
481 hash = ~hash; 606 hash = ~hash;
482 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 607 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
483 slot += (old_pte & _PAGE_F_GIX) >> 12; 608 slot += (old_pte & _PAGE_F_GIX) >> 12;
484 609
485 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize, 610 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize,
486 ssize, local) == -1) 611 ssize, local) == -1)
487 old_pte &= ~_PAGE_HPTEFLAGS; 612 old_pte &= ~_PAGE_HPTEFLAGS;
488 } 613 }
489 614
490 if (likely(!(old_pte & _PAGE_HASHPTE))) { 615 if (likely(!(old_pte & _PAGE_HASHPTE))) {
491 unsigned long hash = hpt_hash(va, HPAGE_SHIFT, ssize); 616 unsigned long hash = hpt_hash(va, shift, ssize);
492 unsigned long hpte_group; 617 unsigned long hpte_group;
493 618
494 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; 619 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
@@ -509,7 +634,7 @@ repeat:
509 634
510 /* Insert into the hash table, primary slot */ 635 /* Insert into the hash table, primary slot */
511 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0, 636 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
512 mmu_huge_psize, ssize); 637 mmu_psize, ssize);
513 638
514 /* Primary is full, try the secondary */ 639 /* Primary is full, try the secondary */
515 if (unlikely(slot == -1)) { 640 if (unlikely(slot == -1)) {
@@ -517,7 +642,7 @@ repeat:
517 HPTES_PER_GROUP) & ~0x7UL; 642 HPTES_PER_GROUP) & ~0x7UL;
518 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 643 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
519 HPTE_V_SECONDARY, 644 HPTE_V_SECONDARY,
520 mmu_huge_psize, ssize); 645 mmu_psize, ssize);
521 if (slot == -1) { 646 if (slot == -1) {
522 if (mftb() & 0x1) 647 if (mftb() & 0x1)
523 hpte_group = ((hash & htab_hash_mask) * 648 hpte_group = ((hash & htab_hash_mask) *
@@ -549,45 +674,54 @@ void set_huge_psize(int psize)
549{ 674{
550 /* Check that it is a page size supported by the hardware and 675 /* Check that it is a page size supported by the hardware and
551 * that it fits within pagetable limits. */ 676 * that it fits within pagetable limits. */
552 if (mmu_psize_defs[psize].shift && mmu_psize_defs[psize].shift < SID_SHIFT && 677 if (mmu_psize_defs[psize].shift &&
678 mmu_psize_defs[psize].shift < SID_SHIFT_1T &&
553 (mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT || 679 (mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT ||
554 mmu_psize_defs[psize].shift == HPAGE_SHIFT_64K)) { 680 mmu_psize_defs[psize].shift == PAGE_SHIFT_64K ||
555 HPAGE_SHIFT = mmu_psize_defs[psize].shift; 681 mmu_psize_defs[psize].shift == PAGE_SHIFT_16G)) {
556 mmu_huge_psize = psize; 682 /* Return if huge page size has already been setup or is the
557#ifdef CONFIG_PPC_64K_PAGES 683 * same as the base page size. */
558 hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT); 684 if (mmu_huge_psizes[psize] ||
559#else 685 mmu_psize_defs[psize].shift == PAGE_SHIFT)
560 if (HPAGE_SHIFT == HPAGE_SHIFT_64K) 686 return;
561 hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT); 687 hugetlb_add_hstate(mmu_psize_defs[psize].shift - PAGE_SHIFT);
562 else 688
563 hugepte_shift = (PUD_SHIFT-HPAGE_SHIFT); 689 switch (mmu_psize_defs[psize].shift) {
564#endif 690 case PAGE_SHIFT_64K:
565 691 /* We only allow 64k hpages with 4k base page,
692 * which was checked above, and always put them
693 * at the PMD */
694 hugepte_shift[psize] = PMD_SHIFT;
695 break;
696 case PAGE_SHIFT_16M:
697 /* 16M pages can be at two different levels
698 * of pagestables based on base page size */
699 if (PAGE_SHIFT == PAGE_SHIFT_64K)
700 hugepte_shift[psize] = PMD_SHIFT;
701 else /* 4k base page */
702 hugepte_shift[psize] = PUD_SHIFT;
703 break;
704 case PAGE_SHIFT_16G:
705 /* 16G pages are always at PGD level */
706 hugepte_shift[psize] = PGDIR_SHIFT;
707 break;
708 }
709 hugepte_shift[psize] -= mmu_psize_defs[psize].shift;
566 } else 710 } else
567 HPAGE_SHIFT = 0; 711 hugepte_shift[psize] = 0;
568} 712}
569 713
570static int __init hugepage_setup_sz(char *str) 714static int __init hugepage_setup_sz(char *str)
571{ 715{
572 unsigned long long size; 716 unsigned long long size;
573 int mmu_psize = -1; 717 int mmu_psize;
574 int shift; 718 int shift;
575 719
576 size = memparse(str, &str); 720 size = memparse(str, &str);
577 721
578 shift = __ffs(size); 722 shift = __ffs(size);
579 switch (shift) { 723 mmu_psize = shift_to_mmu_psize(shift);
580#ifndef CONFIG_PPC_64K_PAGES 724 if (mmu_psize >= 0 && mmu_psize_defs[mmu_psize].shift)
581 case HPAGE_SHIFT_64K:
582 mmu_psize = MMU_PAGE_64K;
583 break;
584#endif
585 case HPAGE_SHIFT_16M:
586 mmu_psize = MMU_PAGE_16M;
587 break;
588 }
589
590 if (mmu_psize >=0 && mmu_psize_defs[mmu_psize].shift)
591 set_huge_psize(mmu_psize); 725 set_huge_psize(mmu_psize);
592 else 726 else
593 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size); 727 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
@@ -603,16 +737,31 @@ static void zero_ctor(struct kmem_cache *cache, void *addr)
603 737
604static int __init hugetlbpage_init(void) 738static int __init hugetlbpage_init(void)
605{ 739{
740 unsigned int psize;
741
606 if (!cpu_has_feature(CPU_FTR_16M_PAGE)) 742 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
607 return -ENODEV; 743 return -ENODEV;
608 744 /* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE
609 huge_pgtable_cache = kmem_cache_create("hugepte_cache", 745 * and adjust PTE_NONCACHE_NUM if the number of supported huge page
610 HUGEPTE_TABLE_SIZE, 746 * sizes changes.
611 HUGEPTE_TABLE_SIZE, 747 */
612 0, 748 set_huge_psize(MMU_PAGE_16M);
613 zero_ctor); 749 set_huge_psize(MMU_PAGE_64K);
614 if (! huge_pgtable_cache) 750 set_huge_psize(MMU_PAGE_16G);
615 panic("hugetlbpage_init(): could not create hugepte cache\n"); 751
752 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
753 if (mmu_huge_psizes[psize]) {
754 huge_pgtable_cache(psize) = kmem_cache_create(
755 HUGEPTE_CACHE_NAME(psize),
756 HUGEPTE_TABLE_SIZE(psize),
757 HUGEPTE_TABLE_SIZE(psize),
758 0,
759 zero_ctor);
760 if (!huge_pgtable_cache(psize))
761 panic("hugetlbpage_init(): could not create %s"\
762 "\n", HUGEPTE_CACHE_NAME(psize));
763 }
764 }
616 765
617 return 0; 766 return 0;
618} 767}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 6ef63caca682..a41bc5aa2043 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -153,10 +153,10 @@ static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
153}; 153};
154 154
155#ifdef CONFIG_HUGETLB_PAGE 155#ifdef CONFIG_HUGETLB_PAGE
156/* Hugepages need one extra cache, initialized in hugetlbpage.c. We 156/* Hugepages need an extra cache per hugepagesize, initialized in
157 * can't put into the tables above, because HPAGE_SHIFT is not compile 157 * hugetlbpage.c. We can't put into the tables above, because HPAGE_SHIFT
158 * time constant. */ 158 * is not compile time constant. */
159struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1]; 159struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+MMU_PAGE_COUNT];
160#else 160#else
161struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; 161struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
162#endif 162#endif
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index cf4bffba6f7c..d9a181351332 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -39,7 +39,6 @@ EXPORT_SYMBOL(numa_cpu_lookup_table);
39EXPORT_SYMBOL(numa_cpumask_lookup_table); 39EXPORT_SYMBOL(numa_cpumask_lookup_table);
40EXPORT_SYMBOL(node_data); 40EXPORT_SYMBOL(node_data);
41 41
42static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
43static int min_common_depth; 42static int min_common_depth;
44static int n_mem_addr_cells, n_mem_size_cells; 43static int n_mem_addr_cells, n_mem_size_cells;
45 44
@@ -816,7 +815,7 @@ void __init do_init_bootmem(void)
816 dbg("node %d\n", nid); 815 dbg("node %d\n", nid);
817 dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); 816 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
818 817
819 NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; 818 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
820 NODE_DATA(nid)->node_start_pfn = start_pfn; 819 NODE_DATA(nid)->node_start_pfn = start_pfn;
821 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; 820 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
822 821
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index c7584072dfcc..2001abdb1912 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -145,13 +145,20 @@ void pte_free(struct mm_struct *mm, pgtable_t ptepage)
145void __iomem * 145void __iomem *
146ioremap(phys_addr_t addr, unsigned long size) 146ioremap(phys_addr_t addr, unsigned long size)
147{ 147{
148 return __ioremap(addr, size, _PAGE_NO_CACHE); 148 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
149} 149}
150EXPORT_SYMBOL(ioremap); 150EXPORT_SYMBOL(ioremap);
151 151
152void __iomem * 152void __iomem *
153ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) 153ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
154{ 154{
155 /* writeable implies dirty for kernel addresses */
156 if (flags & _PAGE_RW)
157 flags |= _PAGE_DIRTY | _PAGE_HWWRITE;
158
159 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
160 flags &= ~(_PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC);
161
155 return __ioremap(addr, size, flags); 162 return __ioremap(addr, size, flags);
156} 163}
157EXPORT_SYMBOL(ioremap_flags); 164EXPORT_SYMBOL(ioremap_flags);
@@ -163,6 +170,14 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
163 phys_addr_t p; 170 phys_addr_t p;
164 int err; 171 int err;
165 172
173 /* Make sure we have the base flags */
174 if ((flags & _PAGE_PRESENT) == 0)
175 flags |= _PAGE_KERNEL;
176
177 /* Non-cacheable page cannot be coherent */
178 if (flags & _PAGE_NO_CACHE)
179 flags &= ~_PAGE_COHERENT;
180
166 /* 181 /*
167 * Choose an address to map it to. 182 * Choose an address to map it to.
168 * Once the vmalloc system is running, we use it. 183 * Once the vmalloc system is running, we use it.
@@ -219,11 +234,6 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
219 v = (ioremap_bot -= size); 234 v = (ioremap_bot -= size);
220 } 235 }
221 236
222 if ((flags & _PAGE_PRESENT) == 0)
223 flags |= _PAGE_KERNEL;
224 if (flags & _PAGE_NO_CACHE)
225 flags |= _PAGE_GUARDED;
226
227 /* 237 /*
228 * Should check if it is a candidate for a BAT mapping 238 * Should check if it is a candidate for a BAT mapping
229 */ 239 */
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 3ef0ad2f9ca0..365e61ae5dbc 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -107,9 +107,18 @@ void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
107{ 107{
108 unsigned long i; 108 unsigned long i;
109 109
110 /* Make sure we have the base flags */
110 if ((flags & _PAGE_PRESENT) == 0) 111 if ((flags & _PAGE_PRESENT) == 0)
111 flags |= pgprot_val(PAGE_KERNEL); 112 flags |= pgprot_val(PAGE_KERNEL);
112 113
114 /* Non-cacheable page cannot be coherent */
115 if (flags & _PAGE_NO_CACHE)
116 flags &= ~_PAGE_COHERENT;
117
118 /* We don't support the 4K PFN hack with ioremap */
119 if (flags & _PAGE_4K_PFN)
120 return NULL;
121
113 WARN_ON(pa & ~PAGE_MASK); 122 WARN_ON(pa & ~PAGE_MASK);
114 WARN_ON(((unsigned long)ea) & ~PAGE_MASK); 123 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
115 WARN_ON(size & ~PAGE_MASK); 124 WARN_ON(size & ~PAGE_MASK);
@@ -190,6 +199,13 @@ void __iomem * ioremap(phys_addr_t addr, unsigned long size)
190void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, 199void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
191 unsigned long flags) 200 unsigned long flags)
192{ 201{
202 /* writeable implies dirty for kernel addresses */
203 if (flags & _PAGE_RW)
204 flags |= _PAGE_DIRTY;
205
206 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
207 flags &= ~(_PAGE_USER | _PAGE_EXEC);
208
193 if (ppc_md.ioremap) 209 if (ppc_md.ioremap)
194 return ppc_md.ioremap(addr, size, flags); 210 return ppc_md.ioremap(addr, size, flags);
195 return __ioremap(addr, size, flags); 211 return __ioremap(addr, size, flags);
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index a01b5c608ff9..409fcc7b63ce 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -147,7 +147,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
147 */ 147 */
148 if (huge) { 148 if (huge) {
149#ifdef CONFIG_HUGETLB_PAGE 149#ifdef CONFIG_HUGETLB_PAGE
150 psize = mmu_huge_psize; 150 psize = get_slice_psize(mm, addr);;
151#else 151#else
152 BUG(); 152 BUG();
153 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ 153 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index d664b1bce381..696a5ee4962d 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -1,7 +1,6 @@
1config PPC_MPC52xx 1config PPC_MPC52xx
2 bool "52xx-based boards" 2 bool "52xx-based boards"
3 depends on PPC_MULTIPLATFORM && PPC32 3 depends on PPC_MULTIPLATFORM && PPC32
4 select FSL_SOC
5 select PPC_CLOCK 4 select PPC_CLOCK
6 select PPC_PCI_CHOICE 5 select PPC_PCI_CHOICE
7 6
@@ -48,6 +47,7 @@ config PPC_MPC5200_BUGFIX
48config PPC_MPC5200_GPIO 47config PPC_MPC5200_GPIO
49 bool "MPC5200 GPIO support" 48 bool "MPC5200 GPIO support"
50 depends on PPC_MPC52xx 49 depends on PPC_MPC52xx
51 select HAVE_GPIO_LIB 50 select ARCH_REQUIRE_GPIOLIB
51 select GENERIC_GPIO
52 help 52 help
53 Enable gpiolib support for mpc5200 based boards 53 Enable gpiolib support for mpc5200 based boards
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 208005ca262c..e06420af5fe9 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -172,7 +172,7 @@ static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
172 } 172 }
173} 173}
174 174
175static void tce_build_cell(struct iommu_table *tbl, long index, long npages, 175static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
176 unsigned long uaddr, enum dma_data_direction direction, 176 unsigned long uaddr, enum dma_data_direction direction,
177 struct dma_attrs *attrs) 177 struct dma_attrs *attrs)
178{ 178{
@@ -213,6 +213,7 @@ static void tce_build_cell(struct iommu_table *tbl, long index, long npages,
213 213
214 pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n", 214 pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
215 index, npages, direction, base_pte); 215 index, npages, direction, base_pte);
216 return 0;
216} 217}
217 218
218static void tce_free_cell(struct iommu_table *tbl, long index, long npages) 219static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
@@ -1150,12 +1151,23 @@ static int iommu_fixed_disabled;
1150 1151
1151static int __init setup_iommu_fixed(char *str) 1152static int __init setup_iommu_fixed(char *str)
1152{ 1153{
1154 struct device_node *pciep;
1155
1153 if (strcmp(str, "off") == 0) 1156 if (strcmp(str, "off") == 0)
1154 iommu_fixed_disabled = 1; 1157 iommu_fixed_disabled = 1;
1155 1158
1156 else if (strcmp(str, "weak") == 0) 1159 /* If we can find a pcie-endpoint in the device tree assume that
1160 * we're on a triblade or a CAB so by default the fixed mapping
1161 * should be set to be weakly ordered; but only if the boot
1162 * option WASN'T set for strong ordering
1163 */
1164 pciep = of_find_node_by_type(NULL, "pcie-endpoint");
1165
1166 if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0))
1157 iommu_fixed_is_weak = 1; 1167 iommu_fixed_is_weak = 1;
1158 1168
1169 of_node_put(pciep);
1170
1159 return 1; 1171 return 1;
1160} 1172}
1161__setup("iommu_fixed=", setup_iommu_fixed); 1173__setup("iommu_fixed=", setup_iommu_fixed);
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 99c73066b82f..010a51f59796 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -288,9 +288,32 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
288 return VM_FAULT_NOPAGE; 288 return VM_FAULT_NOPAGE;
289} 289}
290 290
291static int spufs_mem_mmap_access(struct vm_area_struct *vma,
292 unsigned long address,
293 void *buf, int len, int write)
294{
295 struct spu_context *ctx = vma->vm_file->private_data;
296 unsigned long offset = address - vma->vm_start;
297 char *local_store;
298
299 if (write && !(vma->vm_flags & VM_WRITE))
300 return -EACCES;
301 if (spu_acquire(ctx))
302 return -EINTR;
303 if ((offset + len) > vma->vm_end)
304 len = vma->vm_end - offset;
305 local_store = ctx->ops->get_ls(ctx);
306 if (write)
307 memcpy_toio(local_store + offset, buf, len);
308 else
309 memcpy_fromio(buf, local_store + offset, len);
310 spu_release(ctx);
311 return len;
312}
291 313
292static struct vm_operations_struct spufs_mem_mmap_vmops = { 314static struct vm_operations_struct spufs_mem_mmap_vmops = {
293 .fault = spufs_mem_mmap_fault, 315 .fault = spufs_mem_mmap_fault,
316 .access = spufs_mem_mmap_access,
294}; 317};
295 318
296static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) 319static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 34654743363d..2deeeba7eccf 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -312,11 +312,28 @@ static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
312 */ 312 */
313 node = cpu_to_node(raw_smp_processor_id()); 313 node = cpu_to_node(raw_smp_processor_id());
314 for (n = 0; n < MAX_NUMNODES; n++, node++) { 314 for (n = 0; n < MAX_NUMNODES; n++, node++) {
315 int available_spus;
316
315 node = (node < MAX_NUMNODES) ? node : 0; 317 node = (node < MAX_NUMNODES) ? node : 0;
316 if (!node_allowed(ctx, node)) 318 if (!node_allowed(ctx, node))
317 continue; 319 continue;
320
321 available_spus = 0;
318 mutex_lock(&cbe_spu_info[node].list_mutex); 322 mutex_lock(&cbe_spu_info[node].list_mutex);
319 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { 323 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
324 if (spu->ctx && spu->ctx->gang
325 && spu->ctx->aff_offset == 0)
326 available_spus -=
327 (spu->ctx->gang->contexts - 1);
328 else
329 available_spus++;
330 }
331 if (available_spus < ctx->gang->contexts) {
332 mutex_unlock(&cbe_spu_info[node].list_mutex);
333 continue;
334 }
335
336 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
320 if ((!mem_aff || spu->has_mem_affinity) && 337 if ((!mem_aff || spu->has_mem_affinity) &&
321 sched_spu(spu)) { 338 sched_spu(spu)) {
322 mutex_unlock(&cbe_spu_info[node].list_mutex); 339 mutex_unlock(&cbe_spu_info[node].list_mutex);
@@ -389,6 +406,9 @@ static int has_affinity(struct spu_context *ctx)
389 if (list_empty(&ctx->aff_list)) 406 if (list_empty(&ctx->aff_list))
390 return 0; 407 return 0;
391 408
409 if (atomic_read(&ctx->gang->aff_sched_count) == 0)
410 ctx->gang->aff_ref_spu = NULL;
411
392 if (!gang->aff_ref_spu) { 412 if (!gang->aff_ref_spu) {
393 if (!(gang->aff_flags & AFF_MERGED)) 413 if (!(gang->aff_flags & AFF_MERGED))
394 aff_merge_remaining_ctxs(gang); 414 aff_merge_remaining_ctxs(gang);
@@ -416,14 +436,8 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
416 if (spu->ctx->flags & SPU_CREATE_NOSCHED) 436 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
417 atomic_dec(&cbe_spu_info[spu->node].reserved_spus); 437 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
418 438
419 if (ctx->gang){ 439 if (ctx->gang)
420 mutex_lock(&ctx->gang->aff_mutex); 440 atomic_dec_if_positive(&ctx->gang->aff_sched_count);
421 if (has_affinity(ctx)) {
422 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
423 ctx->gang->aff_ref_spu = NULL;
424 }
425 mutex_unlock(&ctx->gang->aff_mutex);
426 }
427 441
428 spu_switch_notify(spu, NULL); 442 spu_switch_notify(spu, NULL);
429 spu_unmap_mappings(ctx); 443 spu_unmap_mappings(ctx);
@@ -562,10 +576,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
562 goto found; 576 goto found;
563 mutex_unlock(&cbe_spu_info[node].list_mutex); 577 mutex_unlock(&cbe_spu_info[node].list_mutex);
564 578
565 mutex_lock(&ctx->gang->aff_mutex); 579 atomic_dec(&ctx->gang->aff_sched_count);
566 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
567 ctx->gang->aff_ref_spu = NULL;
568 mutex_unlock(&ctx->gang->aff_mutex);
569 goto not_found; 580 goto not_found;
570 } 581 }
571 mutex_unlock(&ctx->gang->aff_mutex); 582 mutex_unlock(&ctx->gang->aff_mutex);
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c
index 8c0e95766a62..92d20e993ede 100644
--- a/arch/powerpc/platforms/cell/spufs/sputrace.c
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.c
@@ -196,8 +196,7 @@ static int __init sputrace_init(void)
196 struct proc_dir_entry *entry; 196 struct proc_dir_entry *entry;
197 int i, error = -ENOMEM; 197 int i, error = -ENOMEM;
198 198
199 sputrace_log = kcalloc(sizeof(struct sputrace), 199 sputrace_log = kcalloc(bufsize, sizeof(struct sputrace), GFP_KERNEL);
200 bufsize, GFP_KERNEL);
201 if (!sputrace_log) 200 if (!sputrace_log)
202 goto out; 201 goto out;
203 202
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index bc818e4e2033..bb464d1211b2 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -41,7 +41,7 @@
41#include <asm/iseries/hv_call_event.h> 41#include <asm/iseries/hv_call_event.h>
42#include <asm/iseries/iommu.h> 42#include <asm/iseries/iommu.h>
43 43
44static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages, 44static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
45 unsigned long uaddr, enum dma_data_direction direction, 45 unsigned long uaddr, enum dma_data_direction direction,
46 struct dma_attrs *attrs) 46 struct dma_attrs *attrs)
47{ 47{
@@ -71,6 +71,7 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
71 index++; 71 index++;
72 uaddr += TCE_PAGE_SIZE; 72 uaddr += TCE_PAGE_SIZE;
73 } 73 }
74 return 0;
74} 75}
75 76
76static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) 77static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index b72120751bbe..70b688c1aefb 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -561,7 +561,7 @@ static void yield_shared_processor(void)
561static void iseries_shared_idle(void) 561static void iseries_shared_idle(void)
562{ 562{
563 while (1) { 563 while (1) {
564 tick_nohz_stop_sched_tick(); 564 tick_nohz_stop_sched_tick(1);
565 while (!need_resched() && !hvlpevent_is_pending()) { 565 while (!need_resched() && !hvlpevent_is_pending()) {
566 local_irq_disable(); 566 local_irq_disable();
567 ppc64_runlatch_off(); 567 ppc64_runlatch_off();
@@ -591,7 +591,7 @@ static void iseries_dedicated_idle(void)
591 set_thread_flag(TIF_POLLING_NRFLAG); 591 set_thread_flag(TIF_POLLING_NRFLAG);
592 592
593 while (1) { 593 while (1) {
594 tick_nohz_stop_sched_tick(); 594 tick_nohz_stop_sched_tick(1);
595 if (!need_resched()) { 595 if (!need_resched()) {
596 while (!need_resched()) { 596 while (!need_resched()) {
597 ppc64_runlatch_off(); 597 ppc64_runlatch_off();
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 70541b7a5013..a0ff03a3d8da 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -83,7 +83,7 @@ static u32 *iob_l2_base;
83static struct iommu_table iommu_table_iobmap; 83static struct iommu_table iommu_table_iobmap;
84static int iommu_table_iobmap_inited; 84static int iommu_table_iobmap_inited;
85 85
86static void iobmap_build(struct iommu_table *tbl, long index, 86static int iobmap_build(struct iommu_table *tbl, long index,
87 long npages, unsigned long uaddr, 87 long npages, unsigned long uaddr,
88 enum dma_data_direction direction, 88 enum dma_data_direction direction,
89 struct dma_attrs *attrs) 89 struct dma_attrs *attrs)
@@ -108,6 +108,7 @@ static void iobmap_build(struct iommu_table *tbl, long index,
108 uaddr += IOBMAP_PAGE_SIZE; 108 uaddr += IOBMAP_PAGE_SIZE;
109 bus_addr += IOBMAP_PAGE_SIZE; 109 bus_addr += IOBMAP_PAGE_SIZE;
110 } 110 }
111 return 0;
111} 112}
112 113
113 114
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 00bd0166d07f..31635446901a 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -97,8 +97,6 @@ extern struct machdep_calls pmac_md;
97int sccdbg; 97int sccdbg;
98#endif 98#endif
99 99
100extern void zs_kgdb_hook(int tty_num);
101
102sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN; 100sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
103EXPORT_SYMBOL(sys_ctrler); 101EXPORT_SYMBOL(sys_ctrler);
104 102
@@ -329,10 +327,6 @@ static void __init pmac_setup_arch(void)
329 l2cr_init(); 327 l2cr_init();
330#endif /* CONFIG_PPC32 */ 328#endif /* CONFIG_PPC32 */
331 329
332#ifdef CONFIG_KGDB
333 zs_kgdb_hook(0);
334#endif
335
336 find_via_cuda(); 330 find_via_cuda();
337 find_via_pmu(); 331 find_via_pmu();
338 smu_init(); 332 smu_init();
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 757c0296e0b8..97619fd51e39 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -40,3 +40,26 @@ config PPC_PSERIES_DEBUG
40 depends on PPC_PSERIES && PPC_EARLY_DEBUG 40 depends on PPC_PSERIES && PPC_EARLY_DEBUG
41 bool "Enable extra debug logging in platforms/pseries" 41 bool "Enable extra debug logging in platforms/pseries"
42 default y 42 default y
43
44config PPC_SMLPAR
45 bool "Support for shared-memory logical partitions"
46 depends on PPC_PSERIES
47 select LPARCFG
48 default n
49 help
50 Select this option to enable shared memory partition support.
51 With this option a system running in an LPAR can be given more
52 memory than physically available and will allow firmware to
53 balance memory across many LPARs.
54
55config CMM
56 tristate "Collaborative memory management"
57 depends on PPC_SMLPAR
58 default y
59 help
60 Select this option, if you want to enable the kernel interface
61 to reduce the memory size of the system. This is accomplished
62 by allocating pages of memory and put them "on hold". This only
63 makes sense for a system running in an LPAR where the unused pages
64 will be reused for other LPARs. The interface allows firmware to
65 balance memory across many LPARs.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 554c6e42ef2a..dfe574af2dc0 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -24,3 +24,4 @@ obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
24obj-$(CONFIG_HVCS) += hvcserver.o 24obj-$(CONFIG_HVCS) += hvcserver.o
25obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o 25obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
26obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o 26obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o
27obj-$(CONFIG_CMM) += cmm.o
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
new file mode 100644
index 000000000000..c6b3be03168b
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -0,0 +1,468 @@
1/*
2 * Collaborative memory management interface.
3 *
4 * Copyright (C) 2008 IBM Corporation
5 * Author(s): Brian King (brking@linux.vnet.ibm.com),
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/ctype.h>
24#include <linux/delay.h>
25#include <linux/errno.h>
26#include <linux/fs.h>
27#include <linux/init.h>
28#include <linux/kthread.h>
29#include <linux/module.h>
30#include <linux/oom.h>
31#include <linux/sched.h>
32#include <linux/stringify.h>
33#include <linux/swap.h>
34#include <linux/sysdev.h>
35#include <asm/firmware.h>
36#include <asm/hvcall.h>
37#include <asm/mmu.h>
38#include <asm/pgalloc.h>
39#include <asm/uaccess.h>
40
41#include "plpar_wrappers.h"
42
43#define CMM_DRIVER_VERSION "1.0.0"
44#define CMM_DEFAULT_DELAY 1
45#define CMM_DEBUG 0
46#define CMM_DISABLE 0
47#define CMM_OOM_KB 1024
48#define CMM_MIN_MEM_MB 256
49#define KB2PAGES(_p) ((_p)>>(PAGE_SHIFT-10))
50#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
51
52static unsigned int delay = CMM_DEFAULT_DELAY;
53static unsigned int oom_kb = CMM_OOM_KB;
54static unsigned int cmm_debug = CMM_DEBUG;
55static unsigned int cmm_disabled = CMM_DISABLE;
56static unsigned long min_mem_mb = CMM_MIN_MEM_MB;
57static struct sys_device cmm_sysdev;
58
59MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
60MODULE_DESCRIPTION("IBM System p Collaborative Memory Manager");
61MODULE_LICENSE("GPL");
62MODULE_VERSION(CMM_DRIVER_VERSION);
63
64module_param_named(delay, delay, uint, S_IRUGO | S_IWUSR);
65MODULE_PARM_DESC(delay, "Delay (in seconds) between polls to query hypervisor paging requests. "
66 "[Default=" __stringify(CMM_DEFAULT_DELAY) "]");
67module_param_named(oom_kb, oom_kb, uint, S_IRUGO | S_IWUSR);
68MODULE_PARM_DESC(oom_kb, "Amount of memory in kb to free on OOM. "
69 "[Default=" __stringify(CMM_OOM_KB) "]");
70module_param_named(min_mem_mb, min_mem_mb, ulong, S_IRUGO | S_IWUSR);
71MODULE_PARM_DESC(min_mem_mb, "Minimum amount of memory (in MB) to not balloon. "
72 "[Default=" __stringify(CMM_MIN_MEM_MB) "]");
73module_param_named(debug, cmm_debug, uint, S_IRUGO | S_IWUSR);
74MODULE_PARM_DESC(debug, "Enable module debugging logging. Set to 1 to enable. "
75 "[Default=" __stringify(CMM_DEBUG) "]");
76
77#define CMM_NR_PAGES ((PAGE_SIZE - sizeof(void *) - sizeof(unsigned long)) / sizeof(unsigned long))
78
79#define cmm_dbg(...) if (cmm_debug) { printk(KERN_INFO "cmm: "__VA_ARGS__); }
80
81struct cmm_page_array {
82 struct cmm_page_array *next;
83 unsigned long index;
84 unsigned long page[CMM_NR_PAGES];
85};
86
87static unsigned long loaned_pages;
88static unsigned long loaned_pages_target;
89static unsigned long oom_freed_pages;
90
91static struct cmm_page_array *cmm_page_list;
92static DEFINE_SPINLOCK(cmm_lock);
93
94static struct task_struct *cmm_thread_ptr;
95
96/**
97 * cmm_alloc_pages - Allocate pages and mark them as loaned
98 * @nr: number of pages to allocate
99 *
100 * Return value:
101 * number of pages requested to be allocated which were not
102 **/
103static long cmm_alloc_pages(long nr)
104{
105 struct cmm_page_array *pa, *npa;
106 unsigned long addr;
107 long rc;
108
109 cmm_dbg("Begin request for %ld pages\n", nr);
110
111 while (nr) {
112 addr = __get_free_page(GFP_NOIO | __GFP_NOWARN |
113 __GFP_NORETRY | __GFP_NOMEMALLOC);
114 if (!addr)
115 break;
116 spin_lock(&cmm_lock);
117 pa = cmm_page_list;
118 if (!pa || pa->index >= CMM_NR_PAGES) {
119 /* Need a new page for the page list. */
120 spin_unlock(&cmm_lock);
121 npa = (struct cmm_page_array *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
122 __GFP_NORETRY | __GFP_NOMEMALLOC);
123 if (!npa) {
124 pr_info("%s: Can not allocate new page list\n", __FUNCTION__);
125 free_page(addr);
126 break;
127 }
128 spin_lock(&cmm_lock);
129 pa = cmm_page_list;
130
131 if (!pa || pa->index >= CMM_NR_PAGES) {
132 npa->next = pa;
133 npa->index = 0;
134 pa = npa;
135 cmm_page_list = pa;
136 } else
137 free_page((unsigned long) npa);
138 }
139
140 if ((rc = plpar_page_set_loaned(__pa(addr)))) {
141 pr_err("%s: Can not set page to loaned. rc=%ld\n", __FUNCTION__, rc);
142 spin_unlock(&cmm_lock);
143 free_page(addr);
144 break;
145 }
146
147 pa->page[pa->index++] = addr;
148 loaned_pages++;
149 totalram_pages--;
150 spin_unlock(&cmm_lock);
151 nr--;
152 }
153
154 cmm_dbg("End request with %ld pages unfulfilled\n", nr);
155 return nr;
156}
157
158/**
159 * cmm_free_pages - Free pages and mark them as active
160 * @nr: number of pages to free
161 *
162 * Return value:
163 * number of pages requested to be freed which were not
164 **/
165static long cmm_free_pages(long nr)
166{
167 struct cmm_page_array *pa;
168 unsigned long addr;
169
170 cmm_dbg("Begin free of %ld pages.\n", nr);
171 spin_lock(&cmm_lock);
172 pa = cmm_page_list;
173 while (nr) {
174 if (!pa || pa->index <= 0)
175 break;
176 addr = pa->page[--pa->index];
177
178 if (pa->index == 0) {
179 pa = pa->next;
180 free_page((unsigned long) cmm_page_list);
181 cmm_page_list = pa;
182 }
183
184 plpar_page_set_active(__pa(addr));
185 free_page(addr);
186 loaned_pages--;
187 nr--;
188 totalram_pages++;
189 }
190 spin_unlock(&cmm_lock);
191 cmm_dbg("End request with %ld pages unfulfilled\n", nr);
192 return nr;
193}
194
195/**
196 * cmm_oom_notify - OOM notifier
197 * @self: notifier block struct
198 * @dummy: not used
199 * @parm: returned - number of pages freed
200 *
201 * Return value:
202 * NOTIFY_OK
203 **/
204static int cmm_oom_notify(struct notifier_block *self,
205 unsigned long dummy, void *parm)
206{
207 unsigned long *freed = parm;
208 long nr = KB2PAGES(oom_kb);
209
210 cmm_dbg("OOM processing started\n");
211 nr = cmm_free_pages(nr);
212 loaned_pages_target = loaned_pages;
213 *freed += KB2PAGES(oom_kb) - nr;
214 oom_freed_pages += KB2PAGES(oom_kb) - nr;
215 cmm_dbg("OOM processing complete\n");
216 return NOTIFY_OK;
217}
218
219/**
220 * cmm_get_mpp - Read memory performance parameters
221 *
222 * Makes hcall to query the current page loan request from the hypervisor.
223 *
224 * Return value:
225 * nothing
226 **/
227static void cmm_get_mpp(void)
228{
229 int rc;
230 struct hvcall_mpp_data mpp_data;
231 unsigned long active_pages_target;
232 signed long page_loan_request;
233
234 rc = h_get_mpp(&mpp_data);
235
236 if (rc != H_SUCCESS)
237 return;
238
239 page_loan_request = div_s64((s64)mpp_data.loan_request, PAGE_SIZE);
240 loaned_pages_target = page_loan_request + loaned_pages;
241 if (loaned_pages_target > oom_freed_pages)
242 loaned_pages_target -= oom_freed_pages;
243 else
244 loaned_pages_target = 0;
245
246 active_pages_target = totalram_pages + loaned_pages - loaned_pages_target;
247
248 if ((min_mem_mb * 1024 * 1024) > (active_pages_target * PAGE_SIZE))
249 loaned_pages_target = totalram_pages + loaned_pages -
250 ((min_mem_mb * 1024 * 1024) / PAGE_SIZE);
251
252 cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
253 page_loan_request, loaned_pages, loaned_pages_target,
254 oom_freed_pages, totalram_pages);
255}
256
257static struct notifier_block cmm_oom_nb = {
258 .notifier_call = cmm_oom_notify
259};
260
261/**
262 * cmm_thread - CMM task thread
263 * @dummy: not used
264 *
265 * Return value:
266 * 0
267 **/
268static int cmm_thread(void *dummy)
269{
270 unsigned long timeleft;
271
272 while (1) {
273 timeleft = msleep_interruptible(delay * 1000);
274
275 if (kthread_should_stop() || timeleft) {
276 loaned_pages_target = loaned_pages;
277 break;
278 }
279
280 cmm_get_mpp();
281
282 if (loaned_pages_target > loaned_pages) {
283 if (cmm_alloc_pages(loaned_pages_target - loaned_pages))
284 loaned_pages_target = loaned_pages;
285 } else if (loaned_pages_target < loaned_pages)
286 cmm_free_pages(loaned_pages - loaned_pages_target);
287 }
288 return 0;
289}
290
291#define CMM_SHOW(name, format, args...) \
292 static ssize_t show_##name(struct sys_device *dev, char *buf) \
293 { \
294 return sprintf(buf, format, ##args); \
295 } \
296 static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
297
298CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages));
299CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
300
301static ssize_t show_oom_pages(struct sys_device *dev, char *buf)
302{
303 return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages));
304}
305
306static ssize_t store_oom_pages(struct sys_device *dev,
307 const char *buf, size_t count)
308{
309 unsigned long val = simple_strtoul (buf, NULL, 10);
310
311 if (!capable(CAP_SYS_ADMIN))
312 return -EPERM;
313 if (val != 0)
314 return -EBADMSG;
315
316 oom_freed_pages = 0;
317 return count;
318}
319
320static SYSDEV_ATTR(oom_freed_kb, S_IWUSR| S_IRUGO,
321 show_oom_pages, store_oom_pages);
322
323static struct sysdev_attribute *cmm_attrs[] = {
324 &attr_loaned_kb,
325 &attr_loaned_target_kb,
326 &attr_oom_freed_kb,
327};
328
329static struct sysdev_class cmm_sysdev_class = {
330 .name = "cmm",
331};
332
333/**
334 * cmm_sysfs_register - Register with sysfs
335 *
336 * Return value:
337 * 0 on success / other on failure
338 **/
339static int cmm_sysfs_register(struct sys_device *sysdev)
340{
341 int i, rc;
342
343 if ((rc = sysdev_class_register(&cmm_sysdev_class)))
344 return rc;
345
346 sysdev->id = 0;
347 sysdev->cls = &cmm_sysdev_class;
348
349 if ((rc = sysdev_register(sysdev)))
350 goto class_unregister;
351
352 for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++) {
353 if ((rc = sysdev_create_file(sysdev, cmm_attrs[i])))
354 goto fail;
355 }
356
357 return 0;
358
359fail:
360 while (--i >= 0)
361 sysdev_remove_file(sysdev, cmm_attrs[i]);
362 sysdev_unregister(sysdev);
363class_unregister:
364 sysdev_class_unregister(&cmm_sysdev_class);
365 return rc;
366}
367
368/**
369 * cmm_unregister_sysfs - Unregister from sysfs
370 *
371 **/
372static void cmm_unregister_sysfs(struct sys_device *sysdev)
373{
374 int i;
375
376 for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++)
377 sysdev_remove_file(sysdev, cmm_attrs[i]);
378 sysdev_unregister(sysdev);
379 sysdev_class_unregister(&cmm_sysdev_class);
380}
381
382/**
383 * cmm_init - Module initialization
384 *
385 * Return value:
386 * 0 on success / other on failure
387 **/
388static int cmm_init(void)
389{
390 int rc = -ENOMEM;
391
392 if (!firmware_has_feature(FW_FEATURE_CMO))
393 return -EOPNOTSUPP;
394
395 if ((rc = register_oom_notifier(&cmm_oom_nb)) < 0)
396 return rc;
397
398 if ((rc = cmm_sysfs_register(&cmm_sysdev)))
399 goto out_oom_notifier;
400
401 if (cmm_disabled)
402 return rc;
403
404 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
405 if (IS_ERR(cmm_thread_ptr)) {
406 rc = PTR_ERR(cmm_thread_ptr);
407 goto out_unregister_sysfs;
408 }
409
410 return rc;
411
412out_unregister_sysfs:
413 cmm_unregister_sysfs(&cmm_sysdev);
414out_oom_notifier:
415 unregister_oom_notifier(&cmm_oom_nb);
416 return rc;
417}
418
419/**
420 * cmm_exit - Module exit
421 *
422 * Return value:
423 * nothing
424 **/
425static void cmm_exit(void)
426{
427 if (cmm_thread_ptr)
428 kthread_stop(cmm_thread_ptr);
429 unregister_oom_notifier(&cmm_oom_nb);
430 cmm_free_pages(loaned_pages);
431 cmm_unregister_sysfs(&cmm_sysdev);
432}
433
434/**
435 * cmm_set_disable - Disable/Enable CMM
436 *
437 * Return value:
438 * 0 on success / other on failure
439 **/
440static int cmm_set_disable(const char *val, struct kernel_param *kp)
441{
442 int disable = simple_strtoul(val, NULL, 10);
443
444 if (disable != 0 && disable != 1)
445 return -EINVAL;
446
447 if (disable && !cmm_disabled) {
448 if (cmm_thread_ptr)
449 kthread_stop(cmm_thread_ptr);
450 cmm_thread_ptr = NULL;
451 cmm_free_pages(loaned_pages);
452 } else if (!disable && cmm_disabled) {
453 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
454 if (IS_ERR(cmm_thread_ptr))
455 return PTR_ERR(cmm_thread_ptr);
456 }
457
458 cmm_disabled = disable;
459 return 0;
460}
461
462module_param_call(disable, cmm_set_disable, param_get_uint,
463 &cmm_disabled, S_IRUGO | S_IWUSR);
464MODULE_PARM_DESC(disable, "Disable CMM. Set to 1 to disable. "
465 "[Default=" __stringify(CMM_DISABLE) "]");
466
467module_init(cmm_init);
468module_exit(cmm_exit);
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 5377dd4b849a..a8c446697f9e 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -48,7 +48,7 @@
48#include "plpar_wrappers.h" 48#include "plpar_wrappers.h"
49 49
50 50
51static void tce_build_pSeries(struct iommu_table *tbl, long index, 51static int tce_build_pSeries(struct iommu_table *tbl, long index,
52 long npages, unsigned long uaddr, 52 long npages, unsigned long uaddr,
53 enum dma_data_direction direction, 53 enum dma_data_direction direction,
54 struct dma_attrs *attrs) 54 struct dma_attrs *attrs)
@@ -72,6 +72,7 @@ static void tce_build_pSeries(struct iommu_table *tbl, long index,
72 uaddr += TCE_PAGE_SIZE; 72 uaddr += TCE_PAGE_SIZE;
73 tcep++; 73 tcep++;
74 } 74 }
75 return 0;
75} 76}
76 77
77 78
@@ -94,14 +95,19 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
94 return *tcep; 95 return *tcep;
95} 96}
96 97
97static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, 98static void tce_free_pSeriesLP(struct iommu_table*, long, long);
99static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
100
101static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
98 long npages, unsigned long uaddr, 102 long npages, unsigned long uaddr,
99 enum dma_data_direction direction, 103 enum dma_data_direction direction,
100 struct dma_attrs *attrs) 104 struct dma_attrs *attrs)
101{ 105{
102 u64 rc; 106 u64 rc = 0;
103 u64 proto_tce, tce; 107 u64 proto_tce, tce;
104 u64 rpn; 108 u64 rpn;
109 int ret = 0;
110 long tcenum_start = tcenum, npages_start = npages;
105 111
106 rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 112 rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
107 proto_tce = TCE_PCI_READ; 113 proto_tce = TCE_PCI_READ;
@@ -112,6 +118,13 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
112 tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; 118 tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
113 rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce); 119 rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
114 120
121 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
122 ret = (int)rc;
123 tce_free_pSeriesLP(tbl, tcenum_start,
124 (npages_start - (npages + 1)));
125 break;
126 }
127
115 if (rc && printk_ratelimit()) { 128 if (rc && printk_ratelimit()) {
116 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc); 129 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
117 printk("\tindex = 0x%lx\n", (u64)tbl->it_index); 130 printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
@@ -123,25 +136,27 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
123 tcenum++; 136 tcenum++;
124 rpn++; 137 rpn++;
125 } 138 }
139 return ret;
126} 140}
127 141
128static DEFINE_PER_CPU(u64 *, tce_page) = NULL; 142static DEFINE_PER_CPU(u64 *, tce_page) = NULL;
129 143
130static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, 144static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
131 long npages, unsigned long uaddr, 145 long npages, unsigned long uaddr,
132 enum dma_data_direction direction, 146 enum dma_data_direction direction,
133 struct dma_attrs *attrs) 147 struct dma_attrs *attrs)
134{ 148{
135 u64 rc; 149 u64 rc = 0;
136 u64 proto_tce; 150 u64 proto_tce;
137 u64 *tcep; 151 u64 *tcep;
138 u64 rpn; 152 u64 rpn;
139 long l, limit; 153 long l, limit;
154 long tcenum_start = tcenum, npages_start = npages;
155 int ret = 0;
140 156
141 if (npages == 1) { 157 if (npages == 1) {
142 tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, 158 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
143 direction, attrs); 159 direction, attrs);
144 return;
145 } 160 }
146 161
147 tcep = __get_cpu_var(tce_page); 162 tcep = __get_cpu_var(tce_page);
@@ -153,9 +168,8 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
153 tcep = (u64 *)__get_free_page(GFP_ATOMIC); 168 tcep = (u64 *)__get_free_page(GFP_ATOMIC);
154 /* If allocation fails, fall back to the loop implementation */ 169 /* If allocation fails, fall back to the loop implementation */
155 if (!tcep) { 170 if (!tcep) {
156 tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, 171 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
157 direction, attrs); 172 direction, attrs);
158 return;
159 } 173 }
160 __get_cpu_var(tce_page) = tcep; 174 __get_cpu_var(tce_page) = tcep;
161 } 175 }
@@ -187,6 +201,13 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
187 tcenum += limit; 201 tcenum += limit;
188 } while (npages > 0 && !rc); 202 } while (npages > 0 && !rc);
189 203
204 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
205 ret = (int)rc;
206 tce_freemulti_pSeriesLP(tbl, tcenum_start,
207 (npages_start - (npages + limit)));
208 return ret;
209 }
210
190 if (rc && printk_ratelimit()) { 211 if (rc && printk_ratelimit()) {
191 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc); 212 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
192 printk("\tindex = 0x%lx\n", (u64)tbl->it_index); 213 printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
@@ -194,6 +215,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
194 printk("\ttce[0] val = 0x%lx\n", tcep[0]); 215 printk("\ttce[0] val = 0x%lx\n", tcep[0]);
195 show_stack(current, (unsigned long *)__get_SP()); 216 show_stack(current, (unsigned long *)__get_SP());
196 } 217 }
218 return ret;
197} 219}
198 220
199static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) 221static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index d8680b589dc9..a437267c6bf8 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -42,6 +42,16 @@ static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
42 return vpa_call(0x3, cpu, vpa); 42 return vpa_call(0x3, cpu, vpa);
43} 43}
44 44
45static inline long plpar_page_set_loaned(unsigned long vpa)
46{
47 return plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa, 0);
48}
49
50static inline long plpar_page_set_active(unsigned long vpa)
51{
52 return plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa, 0);
53}
54
45extern void vpa_init(int cpu); 55extern void vpa_init(int cpu);
46 56
47static inline long plpar_pte_enter(unsigned long flags, 57static inline long plpar_pte_enter(unsigned long flags,
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 90beb444e1dd..063a0d2fba30 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -314,6 +314,76 @@ static int pseries_set_xdabr(unsigned long dabr)
314 H_DABRX_KERNEL | H_DABRX_USER); 314 H_DABRX_KERNEL | H_DABRX_USER);
315} 315}
316 316
317#define CMO_CHARACTERISTICS_TOKEN 44
318#define CMO_MAXLENGTH 1026
319
320/**
321 * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions,
322 * handle that here. (Stolen from parse_system_parameter_string)
323 */
324void pSeries_cmo_feature_init(void)
325{
326 char *ptr, *key, *value, *end;
327 int call_status;
328 int PrPSP = -1;
329 int SecPSP = -1;
330
331 pr_debug(" -> fw_cmo_feature_init()\n");
332 spin_lock(&rtas_data_buf_lock);
333 memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
334 call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
335 NULL,
336 CMO_CHARACTERISTICS_TOKEN,
337 __pa(rtas_data_buf),
338 RTAS_DATA_BUF_SIZE);
339
340 if (call_status != 0) {
341 spin_unlock(&rtas_data_buf_lock);
342 pr_debug("CMO not available\n");
343 pr_debug(" <- fw_cmo_feature_init()\n");
344 return;
345 }
346
347 end = rtas_data_buf + CMO_MAXLENGTH - 2;
348 ptr = rtas_data_buf + 2; /* step over strlen value */
349 key = value = ptr;
350
351 while (*ptr && (ptr <= end)) {
352 /* Separate the key and value by replacing '=' with '\0' and
353 * point the value at the string after the '='
354 */
355 if (ptr[0] == '=') {
356 ptr[0] = '\0';
357 value = ptr + 1;
358 } else if (ptr[0] == '\0' || ptr[0] == ',') {
359 /* Terminate the string containing the key/value pair */
360 ptr[0] = '\0';
361
362 if (key == value) {
363 pr_debug("Malformed key/value pair\n");
364 /* Never found a '=', end processing */
365 break;
366 }
367
368 if (0 == strcmp(key, "PrPSP"))
369 PrPSP = simple_strtol(value, NULL, 10);
370 else if (0 == strcmp(key, "SecPSP"))
371 SecPSP = simple_strtol(value, NULL, 10);
372 value = key = ptr + 1;
373 }
374 ptr++;
375 }
376
377 if (PrPSP != -1 || SecPSP != -1) {
378 pr_info("CMO enabled\n");
379 pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", PrPSP, SecPSP);
380 powerpc_firmware_features |= FW_FEATURE_CMO;
381 } else
382 pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", PrPSP, SecPSP);
383 spin_unlock(&rtas_data_buf_lock);
384 pr_debug(" <- fw_cmo_feature_init()\n");
385}
386
317/* 387/*
318 * Early initialization. Relocation is on but do not reference unbolted pages 388 * Early initialization. Relocation is on but do not reference unbolted pages
319 */ 389 */
@@ -329,6 +399,7 @@ static void __init pSeries_init_early(void)
329 else if (firmware_has_feature(FW_FEATURE_XDABR)) 399 else if (firmware_has_feature(FW_FEATURE_XDABR))
330 ppc_md.set_dabr = pseries_set_xdabr; 400 ppc_md.set_dabr = pseries_set_xdabr;
331 401
402 pSeries_cmo_feature_init();
332 iommu_init_early_pSeries(); 403 iommu_init_early_pSeries();
333 404
334 pr_debug(" <- pSeries_init_early()\n"); 405 pr_debug(" <- pSeries_init_early()\n");
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index de8c8b542cfa..89639ecbf381 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -147,7 +147,7 @@ static void dart_flush(struct iommu_table *tbl)
147 } 147 }
148} 148}
149 149
150static void dart_build(struct iommu_table *tbl, long index, 150static int dart_build(struct iommu_table *tbl, long index,
151 long npages, unsigned long uaddr, 151 long npages, unsigned long uaddr,
152 enum dma_data_direction direction, 152 enum dma_data_direction direction,
153 struct dma_attrs *attrs) 153 struct dma_attrs *attrs)
@@ -184,6 +184,7 @@ static void dart_build(struct iommu_table *tbl, long index,
184 } else { 184 } else {
185 dart_dirty = 1; 185 dart_dirty = 1;
186 } 186 }
187 return 0;
187} 188}
188 189
189 190
diff --git a/arch/powerpc/sysdev/qe_lib/Kconfig b/arch/powerpc/sysdev/qe_lib/Kconfig
index 4bb18f57901e..1ce546462be5 100644
--- a/arch/powerpc/sysdev/qe_lib/Kconfig
+++ b/arch/powerpc/sysdev/qe_lib/Kconfig
@@ -29,7 +29,7 @@ config QE_GPIO
29 bool "QE GPIO support" 29 bool "QE GPIO support"
30 depends on QUICC_ENGINE 30 depends on QUICC_ENGINE
31 select GENERIC_GPIO 31 select GENERIC_GPIO
32 select HAVE_GPIO_LIB 32 select ARCH_REQUIRE_GPIOLIB
33 help 33 help
34 Say Y here if you're going to use hardware that connects to the 34 Say Y here if you're going to use hardware that connects to the
35 QE GPIOs. 35 QE GPIOs.
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index eb530b4128ba..2ed88122be93 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -565,6 +565,7 @@ bool "s390 guest support (EXPERIMENTAL)"
565 depends on 64BIT && EXPERIMENTAL 565 depends on 64BIT && EXPERIMENTAL
566 select VIRTIO 566 select VIRTIO
567 select VIRTIO_RING 567 select VIRTIO_RING
568 select VIRTIO_CONSOLE
568 help 569 help
569 Select this option if you want to run the kernel under s390 linux 570 Select this option if you want to run the kernel under s390 linux
570endmenu 571endmenu
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 288ad490a6dd..4f82e5b5f879 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -270,7 +270,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
270 __ctl_store(kcb->kprobe_saved_ctl, 9, 11); 270 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
271} 271}
272 272
273/* Called with kretprobe_lock held */
274void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 273void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
275 struct pt_regs *regs) 274 struct pt_regs *regs)
276{ 275{
@@ -377,8 +376,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
377 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 376 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
378 377
379 INIT_HLIST_HEAD(&empty_rp); 378 INIT_HLIST_HEAD(&empty_rp);
380 spin_lock_irqsave(&kretprobe_lock, flags); 379 kretprobe_hash_lock(current, &head, &flags);
381 head = kretprobe_inst_table_head(current);
382 380
383 /* 381 /*
384 * It is possible to have multiple instances associated with a given 382 * It is possible to have multiple instances associated with a given
@@ -417,7 +415,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
417 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; 415 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
418 416
419 reset_current_kprobe(); 417 reset_current_kprobe();
420 spin_unlock_irqrestore(&kretprobe_lock, flags); 418 kretprobe_hash_unlock(current, &flags);
421 preempt_enable_no_resched(); 419 preempt_enable_no_resched();
422 420
423 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 421 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 85defd01d293..9839767d0842 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -142,7 +142,7 @@ static void default_idle(void)
142void cpu_idle(void) 142void cpu_idle(void)
143{ 143{
144 for (;;) { 144 for (;;) {
145 tick_nohz_stop_sched_tick(); 145 tick_nohz_stop_sched_tick(1);
146 while (!need_resched()) 146 while (!need_resched())
147 default_idle(); 147 default_idle();
148 tick_nohz_restart_sched_tick(); 148 tick_nohz_restart_sched_tick();
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index b358e18273b0..62122bad1e33 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -54,6 +54,7 @@
54#include <asm/sections.h> 54#include <asm/sections.h>
55#include <asm/ebcdic.h> 55#include <asm/ebcdic.h>
56#include <asm/compat.h> 56#include <asm/compat.h>
57#include <asm/kvm_virtio.h>
57 58
58long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | 59long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
59 PSW_MASK_MCHECK | PSW_DEFAULT_KEY); 60 PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
@@ -766,7 +767,8 @@ setup_arch(char **cmdline_p)
766 printk("We are running under VM (64 bit mode)\n"); 767 printk("We are running under VM (64 bit mode)\n");
767 else if (MACHINE_IS_KVM) { 768 else if (MACHINE_IS_KVM) {
768 printk("We are running under KVM (64 bit mode)\n"); 769 printk("We are running under KVM (64 bit mode)\n");
769 add_preferred_console("ttyS", 1, NULL); 770 add_preferred_console("hvc", 0, NULL);
771 s390_virtio_console_init();
770 } else 772 } else
771 printk("We are running native (64 bit mode)\n"); 773 printk("We are running native (64 bit mode)\n");
772#endif /* CONFIG_64BIT */ 774#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 212d618b0095..632b13e10053 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -9,7 +9,6 @@
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/bootmem.h> 10#include <linux/bootmem.h>
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <linux/kthread.h>
13#include <linux/workqueue.h> 12#include <linux/workqueue.h>
14#include <linux/cpu.h> 13#include <linux/cpu.h>
15#include <linux/smp.h> 14#include <linux/smp.h>
@@ -230,20 +229,9 @@ void arch_update_cpu_topology(void)
230 } 229 }
231} 230}
232 231
233static int topology_kthread(void *data)
234{
235 arch_reinit_sched_domains();
236 return 0;
237}
238
239static void topology_work_fn(struct work_struct *work) 232static void topology_work_fn(struct work_struct *work)
240{ 233{
241 /* We can't call arch_reinit_sched_domains() from a multi-threaded 234 arch_reinit_sched_domains();
242 * workqueue context since it may deadlock in case of cpu hotplug.
243 * So we have to create a kernel thread in order to call
244 * arch_reinit_sched_domains().
245 */
246 kthread_run(topology_kthread, NULL, "topology_update");
247} 235}
248 236
249void topology_schedule_update(void) 237void topology_schedule_update(void)
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index f4b6124fdb75..f28c43d2f61d 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -72,7 +72,8 @@ void arch_release_hugepage(struct page *page)
72 page[1].index = 0; 72 page[1].index = 0;
73} 73}
74 74
75pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 75pte_t *huge_pte_alloc(struct mm_struct *mm,
76 unsigned long addr, unsigned long sz)
76{ 77{
77 pgd_t *pgdp; 78 pgd_t *pgdp;
78 pud_t *pudp; 79 pud_t *pudp;
@@ -119,6 +120,11 @@ int pmd_huge(pmd_t pmd)
119 return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); 120 return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
120} 121}
121 122
123int pud_huge(pud_t pud)
124{
125 return 0;
126}
127
122struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 128struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
123 pmd_t *pmdp, int write) 129 pmd_t *pmdp, int write)
124{ 130{
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 3e7384f4619c..8879938f3356 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -8,6 +8,7 @@ mainmenu "Linux/SuperH Kernel Configuration"
8config SUPERH 8config SUPERH
9 def_bool y 9 def_bool y
10 select EMBEDDED 10 select EMBEDDED
11 select HAVE_CLK
11 select HAVE_IDE 12 select HAVE_IDE
12 select HAVE_OPROFILE 13 select HAVE_OPROFILE
13 help 14 help
diff --git a/arch/sh/boards/renesas/migor/setup.c b/arch/sh/boards/renesas/migor/setup.c
index 01af44245b57..963c99322095 100644
--- a/arch/sh/boards/renesas/migor/setup.c
+++ b/arch/sh/boards/renesas/migor/setup.c
@@ -30,7 +30,6 @@
30 30
31static struct smc91x_platdata smc91x_info = { 31static struct smc91x_platdata smc91x_info = {
32 .flags = SMC91X_USE_16BIT, 32 .flags = SMC91X_USE_16BIT,
33 .irq_flags = IRQF_TRIGGER_HIGH,
34}; 33};
35 34
36static struct resource smc91x_eth_resources[] = { 35static struct resource smc91x_eth_resources[] = {
@@ -42,7 +41,7 @@ static struct resource smc91x_eth_resources[] = {
42 }, 41 },
43 [1] = { 42 [1] = {
44 .start = 32, /* IRQ0 */ 43 .start = 32, /* IRQ0 */
45 .flags = IORESOURCE_IRQ, 44 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
46 }, 45 },
47}; 46};
48 47
diff --git a/arch/sh/boot/compressed/misc_32.c b/arch/sh/boot/compressed/misc_32.c
index adcea31e663e..f386997e4d9c 100644
--- a/arch/sh/boot/compressed/misc_32.c
+++ b/arch/sh/boot/compressed/misc_32.c
@@ -74,8 +74,6 @@ static unsigned outcnt = 0; /* bytes in output buffer */
74static int fill_inbuf(void); 74static int fill_inbuf(void);
75static void flush_window(void); 75static void flush_window(void);
76static void error(char *m); 76static void error(char *m);
77static void gzip_mark(void **);
78static void gzip_release(void **);
79 77
80extern char input_data[]; 78extern char input_data[];
81extern int input_len; 79extern int input_len;
@@ -84,11 +82,7 @@ static long bytes_out = 0;
84static uch *output_data; 82static uch *output_data;
85static unsigned long output_ptr = 0; 83static unsigned long output_ptr = 0;
86 84
87static void *malloc(int size);
88static void free(void *where);
89static void error(char *m); 85static void error(char *m);
90static void gzip_mark(void **);
91static void gzip_release(void **);
92 86
93int puts(const char *); 87int puts(const char *);
94 88
@@ -101,38 +95,6 @@ static unsigned long free_mem_end_ptr;
101 95
102#include "../../../../lib/inflate.c" 96#include "../../../../lib/inflate.c"
103 97
104static void *malloc(int size)
105{
106 void *p;
107
108 if (size <0) error("Malloc error");
109 if (free_mem_ptr == 0) error("Memory error");
110
111 free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
112
113 p = (void *)free_mem_ptr;
114 free_mem_ptr += size;
115
116 if (free_mem_ptr >= free_mem_end_ptr)
117 error("Out of memory");
118
119 return p;
120}
121
122static void free(void *where)
123{ /* Don't care */
124}
125
126static void gzip_mark(void **ptr)
127{
128 *ptr = (void *) free_mem_ptr;
129}
130
131static void gzip_release(void **ptr)
132{
133 free_mem_ptr = (long) *ptr;
134}
135
136#ifdef CONFIG_SH_STANDARD_BIOS 98#ifdef CONFIG_SH_STANDARD_BIOS
137size_t strlen(const char *s) 99size_t strlen(const char *s)
138{ 100{
diff --git a/arch/sh/boot/compressed/misc_64.c b/arch/sh/boot/compressed/misc_64.c
index a006ef89b9dd..2941657e18aa 100644
--- a/arch/sh/boot/compressed/misc_64.c
+++ b/arch/sh/boot/compressed/misc_64.c
@@ -72,8 +72,6 @@ static unsigned outcnt = 0; /* bytes in output buffer */
72static int fill_inbuf(void); 72static int fill_inbuf(void);
73static void flush_window(void); 73static void flush_window(void);
74static void error(char *m); 74static void error(char *m);
75static void gzip_mark(void **);
76static void gzip_release(void **);
77 75
78extern char input_data[]; 76extern char input_data[];
79extern int input_len; 77extern int input_len;
@@ -82,11 +80,7 @@ static long bytes_out = 0;
82static uch *output_data; 80static uch *output_data;
83static unsigned long output_ptr = 0; 81static unsigned long output_ptr = 0;
84 82
85static void *malloc(int size);
86static void free(void *where);
87static void error(char *m); 83static void error(char *m);
88static void gzip_mark(void **);
89static void gzip_release(void **);
90 84
91static void puts(const char *); 85static void puts(const char *);
92 86
@@ -99,40 +93,6 @@ static unsigned long free_mem_end_ptr;
99 93
100#include "../../../../lib/inflate.c" 94#include "../../../../lib/inflate.c"
101 95
102static void *malloc(int size)
103{
104 void *p;
105
106 if (size < 0)
107 error("Malloc error\n");
108 if (free_mem_ptr == 0)
109 error("Memory error\n");
110
111 free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
112
113 p = (void *) free_mem_ptr;
114 free_mem_ptr += size;
115
116 if (free_mem_ptr >= free_mem_end_ptr)
117 error("\nOut of memory\n");
118
119 return p;
120}
121
122static void free(void *where)
123{ /* Don't care */
124}
125
126static void gzip_mark(void **ptr)
127{
128 *ptr = (void *) free_mem_ptr;
129}
130
131static void gzip_release(void **ptr)
132{
133 free_mem_ptr = (long) *ptr;
134}
135
136void puts(const char *s) 96void puts(const char *s)
137{ 97{
138} 98}
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index b98e37a1f54c..921892c351da 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -86,7 +86,7 @@ void cpu_idle(void)
86 if (!idle) 86 if (!idle)
87 idle = default_idle; 87 idle = default_idle;
88 88
89 tick_nohz_stop_sched_tick(); 89 tick_nohz_stop_sched_tick(1);
90 while (!need_resched()) 90 while (!need_resched())
91 idle(); 91 idle();
92 tick_nohz_restart_sched_tick(); 92 tick_nohz_restart_sched_tick();
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
index 1b2ae35c4a76..54d1f61aa007 100644
--- a/arch/sh/kernel/stacktrace.c
+++ b/arch/sh/kernel/stacktrace.c
@@ -12,6 +12,7 @@
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/stacktrace.h> 13#include <linux/stacktrace.h>
14#include <linux/thread_info.h> 14#include <linux/thread_info.h>
15#include <linux/module.h>
15#include <asm/ptrace.h> 16#include <asm/ptrace.h>
16 17
17/* 18/*
diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
index 125e493ead82..f0aa5c398656 100644
--- a/arch/sh/kernel/sys_sh32.c
+++ b/arch/sh/kernel/sys_sh32.c
@@ -29,7 +29,7 @@ asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
29 int fd[2]; 29 int fd[2];
30 int error; 30 int error;
31 31
32 error = do_pipe(fd); 32 error = do_pipe_flags(fd, 0);
33 if (!error) { 33 if (!error) {
34 regs->regs[1] = fd[1]; 34 regs->regs[1] = fd[1];
35 return fd[0]; 35 return fd[0];
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index ae8c321d6e2a..9304117039c4 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -22,7 +22,8 @@
22#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
23#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
24 24
25pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 25pte_t *huge_pte_alloc(struct mm_struct *mm,
26 unsigned long addr, unsigned long sz)
26{ 27{
27 pgd_t *pgd; 28 pgd_t *pgd;
28 pud_t *pud; 29 pud_t *pud;
@@ -78,6 +79,11 @@ int pmd_huge(pmd_t pmd)
78 return 0; 79 return 0;
79} 80}
80 81
82int pud_huge(pud_t pud)
83{
84 return 0;
85}
86
81struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 87struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
82 pmd_t *pmd, int write) 88 pmd_t *pmd, int write)
83{ 89{
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index d7df26bd1e54..d652d375eb1e 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -191,7 +191,7 @@ void __init paging_init(void)
191 pg_data_t *pgdat = NODE_DATA(nid); 191 pg_data_t *pgdat = NODE_DATA(nid);
192 unsigned long low, start_pfn; 192 unsigned long low, start_pfn;
193 193
194 start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT; 194 start_pfn = pgdat->bdata->node_min_pfn;
195 low = pgdat->bdata->node_low_pfn; 195 low = pgdat->bdata->node_low_pfn;
196 196
197 if (max_zone_pfns[ZONE_NORMAL] < low) 197 if (max_zone_pfns[ZONE_NORMAL] < low)
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index 1663199ce888..095d93bec7cd 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -14,7 +14,6 @@
14#include <linux/pfn.h> 14#include <linux/pfn.h>
15#include <asm/sections.h> 15#include <asm/sections.h>
16 16
17static bootmem_data_t plat_node_bdata[MAX_NUMNODES];
18struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 17struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
19EXPORT_SYMBOL_GPL(node_data); 18EXPORT_SYMBOL_GPL(node_data);
20 19
@@ -35,7 +34,7 @@ void __init setup_memory(void)
35 NODE_DATA(0) = pfn_to_kaddr(free_pfn); 34 NODE_DATA(0) = pfn_to_kaddr(free_pfn);
36 memset(NODE_DATA(0), 0, sizeof(struct pglist_data)); 35 memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
37 free_pfn += PFN_UP(sizeof(struct pglist_data)); 36 free_pfn += PFN_UP(sizeof(struct pglist_data));
38 NODE_DATA(0)->bdata = &plat_node_bdata[0]; 37 NODE_DATA(0)->bdata = &bootmem_node_data[0];
39 38
40 /* Set up node 0 */ 39 /* Set up node 0 */
41 setup_bootmem_allocator(free_pfn); 40 setup_bootmem_allocator(free_pfn);
@@ -66,7 +65,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
66 free_pfn += PFN_UP(sizeof(struct pglist_data)); 65 free_pfn += PFN_UP(sizeof(struct pglist_data));
67 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 66 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
68 67
69 NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; 68 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
70 NODE_DATA(nid)->node_start_pfn = start_pfn; 69 NODE_DATA(nid)->node_start_pfn = start_pfn;
71 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; 70 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
72 71
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 789724e61e83..375de7c6d082 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -298,20 +298,6 @@ config UNIX98_PTYS
298 Read the instructions in <file:Documentation/Changes> pertaining to 298 Read the instructions in <file:Documentation/Changes> pertaining to
299 pseudo terminals. It's safe to say N. 299 pseudo terminals. It's safe to say N.
300 300
301config UNIX98_PTY_COUNT
302 int "Maximum number of Unix98 PTYs in use (0-2048)"
303 depends on UNIX98_PTYS
304 default "256"
305 help
306 The maximum number of Unix98 PTYs that can be used at any one time.
307 The default is 256, and should be enough for desktop systems. Server
308 machines which support incoming telnet/rlogin/ssh connections and/or
309 serve several X terminals may want to increase this: every incoming
310 connection and every xterm uses up one PTY.
311
312 When not in use, each additional set of 256 PTYs occupy
313 approximately 8 KB of kernel memory on 32-bit architectures.
314
315endmenu 301endmenu
316 302
317source "fs/Kconfig" 303source "fs/Kconfig"
diff --git a/arch/sparc/kernel/sys_sparc.c b/arch/sparc/kernel/sys_sparc.c
index 3c6b49a53ae8..4d73421559c3 100644
--- a/arch/sparc/kernel/sys_sparc.c
+++ b/arch/sparc/kernel/sys_sparc.c
@@ -97,7 +97,7 @@ asmlinkage int sparc_pipe(struct pt_regs *regs)
97 int fd[2]; 97 int fd[2];
98 int error; 98 int error;
99 99
100 error = do_pipe(fd); 100 error = do_pipe_flags(fd, 0);
101 if (error) 101 if (error)
102 goto out; 102 goto out;
103 regs->u_regs[UREG_I1] = fd[1]; 103 regs->u_regs[UREG_I1] = fd[1];
diff --git a/arch/sparc/kernel/systbls.S b/arch/sparc/kernel/systbls.S
index 5a7c4c8345c3..e1b9233b90ab 100644
--- a/arch/sparc/kernel/systbls.S
+++ b/arch/sparc/kernel/systbls.S
@@ -80,4 +80,5 @@ sys_call_table:
80/*300*/ .long sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy 80/*300*/ .long sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy
81/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait 81/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
82/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 82/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
83/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime 83/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index c624e04ff03e..ee30462598fc 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1352,8 +1352,7 @@ void __init srmmu_paging_init(void)
1352 zones_size[ZONE_HIGHMEM] = npages; 1352 zones_size[ZONE_HIGHMEM] = npages;
1353 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); 1353 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
1354 1354
1355 free_area_init_node(0, &contig_page_data, zones_size, 1355 free_area_init_node(0, zones_size, pfn_base, zholes_size);
1356 pfn_base, zholes_size);
1357 } 1356 }
1358} 1357}
1359 1358
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index 2375fe9dc312..d1782f6368be 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -2123,8 +2123,7 @@ void __init sun4c_paging_init(void)
2123 zones_size[ZONE_HIGHMEM] = npages; 2123 zones_size[ZONE_HIGHMEM] = npages;
2124 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); 2124 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
2125 2125
2126 free_area_init_node(0, &contig_page_data, zones_size, 2126 free_area_init_node(0, zones_size, pfn_base, zholes_size);
2127 pfn_base, zholes_size);
2128 } 2127 }
2129 2128
2130 cnt = 0; 2129 cnt = 0;
diff --git a/arch/sparc64/kernel/iommu_common.h b/arch/sparc64/kernel/iommu_common.h
index f3575a614fa2..53b19c8231a9 100644
--- a/arch/sparc64/kernel/iommu_common.h
+++ b/arch/sparc64/kernel/iommu_common.h
@@ -23,7 +23,7 @@
23#define IO_PAGE_SHIFT 13 23#define IO_PAGE_SHIFT 13
24#define IO_PAGE_SIZE (1UL << IO_PAGE_SHIFT) 24#define IO_PAGE_SIZE (1UL << IO_PAGE_SHIFT)
25#define IO_PAGE_MASK (~(IO_PAGE_SIZE-1)) 25#define IO_PAGE_MASK (~(IO_PAGE_SIZE-1))
26#define IO_PAGE_ALIGN(addr) (((addr)+IO_PAGE_SIZE-1)&IO_PAGE_MASK) 26#define IO_PAGE_ALIGN(addr) ALIGN(addr, IO_PAGE_SIZE)
27 27
28#define IO_TSB_ENTRIES (128*1024) 28#define IO_TSB_ENTRIES (128*1024)
29#define IO_TSB_SIZE (IO_TSB_ENTRIES * 8) 29#define IO_TSB_SIZE (IO_TSB_ENTRIES * 8)
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index b441a26b73b0..c481673d249c 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -621,8 +621,9 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
621unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) 621unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
622{ 622{
623 struct irq_handler_data *data; 623 struct irq_handler_data *data;
624 struct ino_bucket *bucket;
625 unsigned long hv_err, cookie; 624 unsigned long hv_err, cookie;
625 struct ino_bucket *bucket;
626 struct irq_desc *desc;
626 unsigned int virt_irq; 627 unsigned int virt_irq;
627 628
628 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); 629 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
@@ -643,6 +644,13 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
643 if (unlikely(!data)) 644 if (unlikely(!data))
644 return 0; 645 return 0;
645 646
647 /* In order to make the LDC channel startup sequence easier,
648 * especially wrt. locking, we do not let request_irq() enable
649 * the interrupt.
650 */
651 desc = irq_desc + virt_irq;
652 desc->status |= IRQ_NOAUTOEN;
653
646 set_irq_chip_data(virt_irq, data); 654 set_irq_chip_data(virt_irq, data);
647 655
648 /* Catch accidental accesses to these things. IMAP/ICLR handling 656 /* Catch accidental accesses to these things. IMAP/ICLR handling
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index f43b5d755354..201a6e547e4a 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -478,9 +478,9 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
478 return 0; 478 return 0;
479} 479}
480 480
481/* Called with kretprobe_lock held. The value stored in the return 481/* The value stored in the return address register is actually 2
482 * address register is actually 2 instructions before where the 482 * instructions before where the callee will return to.
483 * callee will return to. Sequences usually look something like this 483 * Sequences usually look something like this
484 * 484 *
485 * call some_function <--- return register points here 485 * call some_function <--- return register points here
486 * nop <--- call delay slot 486 * nop <--- call delay slot
@@ -512,8 +512,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
512 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 512 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
513 513
514 INIT_HLIST_HEAD(&empty_rp); 514 INIT_HLIST_HEAD(&empty_rp);
515 spin_lock_irqsave(&kretprobe_lock, flags); 515 kretprobe_hash_lock(current, &head, &flags);
516 head = kretprobe_inst_table_head(current);
517 516
518 /* 517 /*
519 * It is possible to have multiple instances associated with a given 518 * It is possible to have multiple instances associated with a given
@@ -553,7 +552,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
553 regs->tnpc = orig_ret_address + 4; 552 regs->tnpc = orig_ret_address + 4;
554 553
555 reset_current_kprobe(); 554 reset_current_kprobe();
556 spin_unlock_irqrestore(&kretprobe_lock, flags); 555 kretprobe_hash_unlock(current, &flags);
557 preempt_enable_no_resched(); 556 preempt_enable_no_resched();
558 557
559 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 558 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
diff --git a/arch/sparc64/kernel/ldc.c b/arch/sparc64/kernel/ldc.c
index 63969f610284..d68982330f66 100644
--- a/arch/sparc64/kernel/ldc.c
+++ b/arch/sparc64/kernel/ldc.c
@@ -1,6 +1,6 @@
1/* ldc.c: Logical Domain Channel link-layer protocol driver. 1/* ldc.c: Logical Domain Channel link-layer protocol driver.
2 * 2 *
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
4 */ 4 */
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
@@ -23,8 +23,8 @@
23 23
24#define DRV_MODULE_NAME "ldc" 24#define DRV_MODULE_NAME "ldc"
25#define PFX DRV_MODULE_NAME ": " 25#define PFX DRV_MODULE_NAME ": "
26#define DRV_MODULE_VERSION "1.0" 26#define DRV_MODULE_VERSION "1.1"
27#define DRV_MODULE_RELDATE "June 25, 2007" 27#define DRV_MODULE_RELDATE "July 22, 2008"
28 28
29static char version[] __devinitdata = 29static char version[] __devinitdata =
30 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 30 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -1235,13 +1235,9 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
1235 unsigned long hv_err, flags; 1235 unsigned long hv_err, flags;
1236 int err = -EINVAL; 1236 int err = -EINVAL;
1237 1237
1238 spin_lock_irqsave(&lp->lock, flags); 1238 if (!name ||
1239 1239 (lp->state != LDC_STATE_INIT))
1240 if (!name) 1240 return -EINVAL;
1241 goto out_err;
1242
1243 if (lp->state != LDC_STATE_INIT)
1244 goto out_err;
1245 1241
1246 snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name); 1242 snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
1247 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); 1243 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
@@ -1250,25 +1246,32 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
1250 IRQF_SAMPLE_RANDOM | IRQF_SHARED, 1246 IRQF_SAMPLE_RANDOM | IRQF_SHARED,
1251 lp->rx_irq_name, lp); 1247 lp->rx_irq_name, lp);
1252 if (err) 1248 if (err)
1253 goto out_err; 1249 return err;
1254 1250
1255 err = request_irq(lp->cfg.tx_irq, ldc_tx, 1251 err = request_irq(lp->cfg.tx_irq, ldc_tx,
1256 IRQF_SAMPLE_RANDOM | IRQF_SHARED, 1252 IRQF_SAMPLE_RANDOM | IRQF_SHARED,
1257 lp->tx_irq_name, lp); 1253 lp->tx_irq_name, lp);
1258 if (err) 1254 if (err) {
1259 goto out_free_rx_irq; 1255 free_irq(lp->cfg.rx_irq, lp);
1256 return err;
1257 }
1258
1260 1259
1260 spin_lock_irqsave(&lp->lock, flags);
1261
1262 enable_irq(lp->cfg.rx_irq);
1263 enable_irq(lp->cfg.tx_irq);
1261 1264
1262 lp->flags |= LDC_FLAG_REGISTERED_IRQS; 1265 lp->flags |= LDC_FLAG_REGISTERED_IRQS;
1263 1266
1264 err = -ENODEV; 1267 err = -ENODEV;
1265 hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0); 1268 hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
1266 if (hv_err) 1269 if (hv_err)
1267 goto out_free_tx_irq; 1270 goto out_free_irqs;
1268 1271
1269 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries); 1272 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
1270 if (hv_err) 1273 if (hv_err)
1271 goto out_free_tx_irq; 1274 goto out_free_irqs;
1272 1275
1273 hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0); 1276 hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
1274 if (hv_err) 1277 if (hv_err)
@@ -1304,14 +1307,11 @@ out_unmap_rx:
1304out_unmap_tx: 1307out_unmap_tx:
1305 sun4v_ldc_tx_qconf(lp->id, 0, 0); 1308 sun4v_ldc_tx_qconf(lp->id, 0, 0);
1306 1309
1307out_free_tx_irq: 1310out_free_irqs:
1308 lp->flags &= ~LDC_FLAG_REGISTERED_IRQS; 1311 lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
1309 free_irq(lp->cfg.tx_irq, lp); 1312 free_irq(lp->cfg.tx_irq, lp);
1310
1311out_free_rx_irq:
1312 free_irq(lp->cfg.rx_irq, lp); 1313 free_irq(lp->cfg.rx_irq, lp);
1313 1314
1314out_err:
1315 spin_unlock_irqrestore(&lp->lock, flags); 1315 spin_unlock_irqrestore(&lp->lock, flags);
1316 1316
1317 return err; 1317 return err;
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 31ea752d307b..8a9cd3e165b9 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -96,7 +96,7 @@ void cpu_idle(void)
96 set_thread_flag(TIF_POLLING_NRFLAG); 96 set_thread_flag(TIF_POLLING_NRFLAG);
97 97
98 while(1) { 98 while(1) {
99 tick_nohz_stop_sched_tick(); 99 tick_nohz_stop_sched_tick(1);
100 100
101 while (!need_resched() && !cpu_is_offline(cpu)) 101 while (!need_resched() && !cpu_is_offline(cpu))
102 sparc64_yield(cpu); 102 sparc64_yield(cpu);
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index e1f4eba2e576..39749e32dc7e 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -418,7 +418,7 @@ asmlinkage long sparc_pipe(struct pt_regs *regs)
418 int fd[2]; 418 int fd[2];
419 int error; 419 int error;
420 420
421 error = do_pipe(fd); 421 error = do_pipe_flags(fd, 0);
422 if (error) 422 if (error)
423 goto out; 423 goto out;
424 regs->u_regs[UREG_I1] = fd[1]; 424 regs->u_regs[UREG_I1] = fd[1];
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index 8b5282d433c4..1095bf4c5100 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -81,7 +81,8 @@ sys_call_table32:
81/*300*/ .word compat_sys_set_robust_list, compat_sys_get_robust_list, compat_sys_migrate_pages, compat_sys_mbind, compat_sys_get_mempolicy 81/*300*/ .word compat_sys_set_robust_list, compat_sys_get_robust_list, compat_sys_migrate_pages, compat_sys_mbind, compat_sys_get_mempolicy
82 .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait 82 .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
83/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate 83/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime 84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1
85 86
86#endif /* CONFIG_COMPAT */ 87#endif /* CONFIG_COMPAT */
87 88
@@ -154,4 +155,5 @@ sys_call_table:
154/*300*/ .word sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy 155/*300*/ .word sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy
155 .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait 156 .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
156/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 157/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
157 .word sys_timerfd_settime, sys_timerfd_gettime 158 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
159/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index bedc4c159b1c..a0c6a97eec6e 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -884,6 +884,16 @@ static struct notifier_block sparc64_cpufreq_notifier_block = {
884 .notifier_call = sparc64_cpufreq_notifier 884 .notifier_call = sparc64_cpufreq_notifier
885}; 885};
886 886
887static int __init register_sparc64_cpufreq_notifier(void)
888{
889
890 cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
891 CPUFREQ_TRANSITION_NOTIFIER);
892 return 0;
893}
894
895core_initcall(register_sparc64_cpufreq_notifier);
896
887#endif /* CONFIG_CPU_FREQ */ 897#endif /* CONFIG_CPU_FREQ */
888 898
889static int sparc64_next_event(unsigned long delta, 899static int sparc64_next_event(unsigned long delta,
@@ -1050,11 +1060,6 @@ void __init time_init(void)
1050 sparc64_clockevent.mult, sparc64_clockevent.shift); 1060 sparc64_clockevent.mult, sparc64_clockevent.shift);
1051 1061
1052 setup_sparc64_timer(); 1062 setup_sparc64_timer();
1053
1054#ifdef CONFIG_CPU_FREQ
1055 cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
1056 CPUFREQ_TRANSITION_NOTIFIER);
1057#endif
1058} 1063}
1059 1064
1060unsigned long long sched_clock(void) 1065unsigned long long sched_clock(void)
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index ebefd2a14375..f27d10369e0c 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -175,7 +175,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
175 return -ENOMEM; 175 return -ENOMEM;
176 176
177 if (flags & MAP_FIXED) { 177 if (flags & MAP_FIXED) {
178 if (prepare_hugepage_range(addr, len)) 178 if (prepare_hugepage_range(file, addr, len))
179 return -EINVAL; 179 return -EINVAL;
180 return addr; 180 return addr;
181 } 181 }
@@ -195,7 +195,8 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
195 pgoff, flags); 195 pgoff, flags);
196} 196}
197 197
198pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 198pte_t *huge_pte_alloc(struct mm_struct *mm,
199 unsigned long addr, unsigned long sz)
199{ 200{
200 pgd_t *pgd; 201 pgd_t *pgd;
201 pud_t *pud; 202 pud_t *pud;
@@ -294,6 +295,11 @@ int pmd_huge(pmd_t pmd)
294 return 0; 295 return 0;
295} 296}
296 297
298int pud_huge(pud_t pud)
299{
300 return 0;
301}
302
297struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 303struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
298 pmd_t *pmd, int write) 304 pmd_t *pmd, int write)
299{ 305{
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 84898c44dd4d..713297473951 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -788,7 +788,6 @@ int numa_cpu_lookup_table[NR_CPUS];
788cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; 788cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
789 789
790#ifdef CONFIG_NEED_MULTIPLE_NODES 790#ifdef CONFIG_NEED_MULTIPLE_NODES
791static bootmem_data_t plat_node_bdata[MAX_NUMNODES];
792 791
793struct mdesc_mblock { 792struct mdesc_mblock {
794 u64 base; 793 u64 base;
@@ -871,7 +870,7 @@ static void __init allocate_node_data(int nid)
871 NODE_DATA(nid) = __va(paddr); 870 NODE_DATA(nid) = __va(paddr);
872 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 871 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
873 872
874 NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; 873 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
875#endif 874#endif
876 875
877 p = NODE_DATA(nid); 876 p = NODE_DATA(nid);
diff --git a/arch/um/include/init.h b/arch/um/include/init.h
index b00a95741d41..37dd097c16c0 100644
--- a/arch/um/include/init.h
+++ b/arch/um/include/init.h
@@ -45,6 +45,8 @@ typedef void (*exitcall_t)(void);
45# define __section(S) __attribute__ ((__section__(#S))) 45# define __section(S) __attribute__ ((__section__(#S)))
46#endif 46#endif
47 47
48#if __GNUC__ == 3
49
48#if __GNUC_MINOR__ >= 3 50#if __GNUC_MINOR__ >= 3
49# define __used __attribute__((__used__)) 51# define __used __attribute__((__used__))
50#else 52#else
@@ -52,6 +54,12 @@ typedef void (*exitcall_t)(void);
52#endif 54#endif
53 55
54#else 56#else
57#if __GNUC__ == 4
58# define __used __attribute__((__used__))
59#endif
60#endif
61
62#else
55#include <linux/compiler.h> 63#include <linux/compiler.h>
56#endif 64#endif
57/* These are for everybody (although not all archs will actually 65/* These are for everybody (although not all archs will actually
diff --git a/arch/um/include/irq_kern.h b/arch/um/include/irq_kern.h
index 4f775597fd5f..fba3895274f9 100644
--- a/arch/um/include/irq_kern.h
+++ b/arch/um/include/irq_kern.h
@@ -13,8 +13,6 @@ extern int um_request_irq(unsigned int irq, int fd, int type,
13 irq_handler_t handler, 13 irq_handler_t handler,
14 unsigned long irqflags, const char * devname, 14 unsigned long irqflags, const char * devname,
15 void *dev_id); 15 void *dev_id);
16extern int init_aio_irq(int irq, char *name,
17 irq_handler_t handler);
18 16
19#endif 17#endif
20 18
diff --git a/arch/um/include/irq_user.h b/arch/um/include/irq_user.h
index e60b31873de1..c6c784df2673 100644
--- a/arch/um/include/irq_user.h
+++ b/arch/um/include/irq_user.h
@@ -21,8 +21,6 @@ struct irq_fd {
21enum { IRQ_READ, IRQ_WRITE }; 21enum { IRQ_READ, IRQ_WRITE };
22 22
23extern void sigio_handler(int sig, struct uml_pt_regs *regs); 23extern void sigio_handler(int sig, struct uml_pt_regs *regs);
24extern int activate_fd(int irq, int fd, int type, void *dev_id);
25extern void free_irq_by_irq_and_dev(unsigned int irq, void *dev_id);
26extern void free_irq_by_fd(int fd); 24extern void free_irq_by_fd(int fd);
27extern void reactivate_fd(int fd, int irqnum); 25extern void reactivate_fd(int fd, int irqnum);
28extern void deactivate_fd(int fd, int irqnum); 26extern void deactivate_fd(int fd, int irqnum);
diff --git a/arch/um/include/skas/skas.h b/arch/um/include/skas/skas.h
index b073f8a86bd3..64d2c7443306 100644
--- a/arch/um/include/skas/skas.h
+++ b/arch/um/include/skas/skas.h
@@ -16,7 +16,6 @@ extern int user_thread(unsigned long stack, int flags);
16extern void new_thread_handler(void); 16extern void new_thread_handler(void);
17extern void handle_syscall(struct uml_pt_regs *regs); 17extern void handle_syscall(struct uml_pt_regs *regs);
18extern int new_mm(unsigned long stack); 18extern int new_mm(unsigned long stack);
19extern void get_skas_faultinfo(int pid, struct faultinfo * fi);
20extern long execute_syscall_skas(void *r); 19extern long execute_syscall_skas(void *r);
21extern unsigned long current_stub_stack(void); 20extern unsigned long current_stub_stack(void);
22 21
diff --git a/arch/um/include/um_uaccess.h b/arch/um/include/um_uaccess.h
index 2b6fc8e0f071..45c04999d670 100644
--- a/arch/um/include/um_uaccess.h
+++ b/arch/um/include/um_uaccess.h
@@ -34,7 +34,6 @@ extern int copy_to_user(void __user *to, const void *from, int n);
34 34
35extern int __do_copy_to_user(void *to, const void *from, int n, 35extern int __do_copy_to_user(void *to, const void *from, int n,
36 void **fault_addr, jmp_buf **fault_catcher); 36 void **fault_addr, jmp_buf **fault_catcher);
37extern void __do_copy(void *to, const void *from, int n);
38 37
39/* 38/*
40 * strncpy_from_user: - Copy a NUL terminated string from userspace. 39 * strncpy_from_user: - Copy a NUL terminated string from userspace.
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 91587f8db340..3d7aad09b171 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -102,7 +102,7 @@ void sigio_handler(int sig, struct uml_pt_regs *regs)
102 102
103static DEFINE_SPINLOCK(irq_lock); 103static DEFINE_SPINLOCK(irq_lock);
104 104
105int activate_fd(int irq, int fd, int type, void *dev_id) 105static int activate_fd(int irq, int fd, int type, void *dev_id)
106{ 106{
107 struct pollfd *tmp_pfd; 107 struct pollfd *tmp_pfd;
108 struct irq_fd *new_fd, *irq_fd; 108 struct irq_fd *new_fd, *irq_fd;
@@ -216,7 +216,7 @@ static int same_irq_and_dev(struct irq_fd *irq, void *d)
216 return ((irq->irq == data->irq) && (irq->id == data->dev)); 216 return ((irq->irq == data->irq) && (irq->id == data->dev));
217} 217}
218 218
219void free_irq_by_irq_and_dev(unsigned int irq, void *dev) 219static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
220{ 220{
221 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq, 221 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
222 .dev = dev }); 222 .dev = dev });
@@ -403,37 +403,6 @@ void __init init_IRQ(void)
403 } 403 }
404} 404}
405 405
406int init_aio_irq(int irq, char *name, irq_handler_t handler)
407{
408 int fds[2], err;
409
410 err = os_pipe(fds, 1, 1);
411 if (err) {
412 printk(KERN_ERR "init_aio_irq - os_pipe failed, err = %d\n",
413 -err);
414 goto out;
415 }
416
417 err = um_request_irq(irq, fds[0], IRQ_READ, handler,
418 IRQF_DISABLED | IRQF_SAMPLE_RANDOM, name,
419 (void *) (long) fds[0]);
420 if (err) {
421 printk(KERN_ERR "init_aio_irq - : um_request_irq failed, "
422 "err = %d\n",
423 err);
424 goto out_close;
425 }
426
427 err = fds[1];
428 goto out;
429
430 out_close:
431 os_close_file(fds[0]);
432 os_close_file(fds[1]);
433 out:
434 return err;
435}
436
437/* 406/*
438 * IRQ stack entry and exit: 407 * IRQ stack entry and exit:
439 * 408 *
diff --git a/arch/um/kernel/ksyms.c b/arch/um/kernel/ksyms.c
index ccc02a616c22..836fc9b94707 100644
--- a/arch/um/kernel/ksyms.c
+++ b/arch/um/kernel/ksyms.c
@@ -18,7 +18,6 @@ EXPORT_SYMBOL(get_signals);
18EXPORT_SYMBOL(kernel_thread); 18EXPORT_SYMBOL(kernel_thread);
19EXPORT_SYMBOL(sys_waitpid); 19EXPORT_SYMBOL(sys_waitpid);
20EXPORT_SYMBOL(flush_tlb_range); 20EXPORT_SYMBOL(flush_tlb_range);
21EXPORT_SYMBOL(arch_validate);
22 21
23EXPORT_SYMBOL(high_physmem); 22EXPORT_SYMBOL(high_physmem);
24EXPORT_SYMBOL(empty_zero_page); 23EXPORT_SYMBOL(empty_zero_page);
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index b0ee64622ff7..e2274ef3155d 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -21,7 +21,7 @@
21/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */ 21/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
22unsigned long *empty_zero_page = NULL; 22unsigned long *empty_zero_page = NULL;
23/* allocated in paging_init and unchanged thereafter */ 23/* allocated in paging_init and unchanged thereafter */
24unsigned long *empty_bad_page = NULL; 24static unsigned long *empty_bad_page = NULL;
25 25
26/* 26/*
27 * Initialized during boot, and readonly for initializing page tables 27 * Initialized during boot, and readonly for initializing page tables
@@ -240,37 +240,6 @@ void __init paging_init(void)
240#endif 240#endif
241} 241}
242 242
243struct page *arch_validate(struct page *page, gfp_t mask, int order)
244{
245 unsigned long addr, zero = 0;
246 int i;
247
248 again:
249 if (page == NULL)
250 return page;
251 if (PageHighMem(page))
252 return page;
253
254 addr = (unsigned long) page_address(page);
255 for (i = 0; i < (1 << order); i++) {
256 current->thread.fault_addr = (void *) addr;
257 if (__do_copy_to_user((void __user *) addr, &zero,
258 sizeof(zero),
259 &current->thread.fault_addr,
260 &current->thread.fault_catcher)) {
261 if (!(mask & __GFP_WAIT))
262 return NULL;
263 else break;
264 }
265 addr += PAGE_SIZE;
266 }
267
268 if (i == (1 << order))
269 return page;
270 page = alloc_pages(mask, order);
271 goto again;
272}
273
274/* 243/*
275 * This can't do anything because nothing in the kernel image can be freed 244 * This can't do anything because nothing in the kernel image can be freed
276 * since it's not in kernel physical memory. 245 * since it's not in kernel physical memory.
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index 9757085a0220..a1a9090254c2 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -185,7 +185,7 @@ unsigned long find_iomem(char *driver, unsigned long *len_out)
185 return 0; 185 return 0;
186} 186}
187 187
188int setup_iomem(void) 188static int setup_iomem(void)
189{ 189{
190 struct iomem_region *region = iomem_regions; 190 struct iomem_region *region = iomem_regions;
191 unsigned long iomem_start = high_physmem + PAGE_SIZE; 191 unsigned long iomem_start = high_physmem + PAGE_SIZE;
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 83603cfbde81..a1c6d07cac3e 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -243,7 +243,7 @@ void default_idle(void)
243 if (need_resched()) 243 if (need_resched())
244 schedule(); 244 schedule();
245 245
246 tick_nohz_stop_sched_tick(); 246 tick_nohz_stop_sched_tick(1);
247 nsecs = disable_timer(); 247 nsecs = disable_timer();
248 idle_sleep(nsecs); 248 idle_sleep(nsecs);
249 tick_nohz_restart_sched_tick(); 249 tick_nohz_restart_sched_tick();
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index 47b57b497d55..15e8b7c4de13 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -225,7 +225,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
225 return ret; 225 return ret;
226} 226}
227 227
228void send_sigtrap(struct task_struct *tsk, struct uml_pt_regs *regs, 228static void send_sigtrap(struct task_struct *tsk, struct uml_pt_regs *regs,
229 int error_code) 229 int error_code)
230{ 230{
231 struct siginfo info; 231 struct siginfo info;
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index c3e2f369c33c..47f04f4a3464 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -13,14 +13,6 @@
13#include "kern_util.h" 13#include "kern_util.h"
14#include "os.h" 14#include "os.h"
15 15
16/*
17 * Scheduler clock - returns current time in nanosec units.
18 */
19unsigned long long sched_clock(void)
20{
21 return (unsigned long long)jiffies_64 * (NSEC_PER_SEC / HZ);
22}
23
24void timer_handler(int sig, struct uml_pt_regs *regs) 16void timer_handler(int sig, struct uml_pt_regs *regs)
25{ 17{
26 unsigned long flags; 18 unsigned long flags;
diff --git a/arch/um/kernel/uaccess.c b/arch/um/kernel/uaccess.c
index f0f4b040d7c5..dd33f040c526 100644
--- a/arch/um/kernel/uaccess.c
+++ b/arch/um/kernel/uaccess.c
@@ -12,7 +12,7 @@
12#include <linux/string.h> 12#include <linux/string.h>
13#include "os.h" 13#include "os.h"
14 14
15void __do_copy(void *to, const void *from, int n) 15static void __do_copy(void *to, const void *from, int n)
16{ 16{
17 memcpy(to, from, n); 17 memcpy(to, from, n);
18} 18}
diff --git a/arch/um/os-Linux/sigio.c b/arch/um/os-Linux/sigio.c
index eb8f2e4be192..63d299df152b 100644
--- a/arch/um/os-Linux/sigio.c
+++ b/arch/um/os-Linux/sigio.c
@@ -530,7 +530,7 @@ static void tty_close(int master, int slave)
530 printk(UM_KERN_CONT "No, enabling workaround\n"); 530 printk(UM_KERN_CONT "No, enabling workaround\n");
531} 531}
532 532
533void __init check_sigio(void) 533static void __init check_sigio(void)
534{ 534{
535 if ((access("/dev/ptmx", R_OK) < 0) && 535 if ((access("/dev/ptmx", R_OK) < 0) &&
536 (access("/dev/ptyp0", R_OK) < 0)) { 536 (access("/dev/ptyp0", R_OK) < 0)) {
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 5aade6027e40..6ae180703a63 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -126,7 +126,7 @@ void set_sigstack(void *sig_stack, int size)
126 panic("enabling signal stack failed, errno = %d\n", errno); 126 panic("enabling signal stack failed, errno = %d\n", errno);
127} 127}
128 128
129void (*handlers[_NSIG])(int sig, struct sigcontext *sc); 129static void (*handlers[_NSIG])(int sig, struct sigcontext *sc);
130 130
131void handle_signal(int sig, struct sigcontext *sc) 131void handle_signal(int sig, struct sigcontext *sc)
132{ 132{
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 172ad8f72e12..d6e0a2234b86 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -96,7 +96,7 @@ bad_wait:
96 96
97extern unsigned long current_stub_stack(void); 97extern unsigned long current_stub_stack(void);
98 98
99void get_skas_faultinfo(int pid, struct faultinfo * fi) 99static void get_skas_faultinfo(int pid, struct faultinfo *fi)
100{ 100{
101 int err; 101 int err;
102 102
diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c
index 106fa8641553..a27defb81884 100644
--- a/arch/um/os-Linux/umid.c
+++ b/arch/um/os-Linux/umid.c
@@ -245,7 +245,7 @@ int __init set_umid(char *name)
245/* Changed in make_umid, which is called during early boot */ 245/* Changed in make_umid, which is called during early boot */
246static int umid_setup = 0; 246static int umid_setup = 0;
247 247
248int __init make_umid(void) 248static int __init make_umid(void)
249{ 249{
250 int fd, err; 250 int fd, err;
251 char tmp[256]; 251 char tmp[256];
diff --git a/arch/um/sys-i386/bugs.c b/arch/um/sys-i386/bugs.c
index a74442d13762..2c6d0d731c12 100644
--- a/arch/um/sys-i386/bugs.c
+++ b/arch/um/sys-i386/bugs.c
@@ -12,7 +12,7 @@
12#include "sysdep/ptrace.h" 12#include "sysdep/ptrace.h"
13 13
14/* Set during early boot */ 14/* Set during early boot */
15int host_has_cmov = 1; 15static int host_has_cmov = 1;
16static jmp_buf cmov_test_return; 16static jmp_buf cmov_test_return;
17 17
18static void cmov_sigill_test_handler(int sig) 18static void cmov_sigill_test_handler(int sig)
diff --git a/arch/um/sys-i386/checksum.S b/arch/um/sys-i386/checksum.S
index 62c7e564f22e..f058d2f82e18 100644
--- a/arch/um/sys-i386/checksum.S
+++ b/arch/um/sys-i386/checksum.S
@@ -243,13 +243,12 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
243 .previous 243 .previous
244 244
245.align 4 245.align 4
246.globl csum_partial_copy_generic_i386 246
247
248#ifndef CONFIG_X86_USE_PPRO_CHECKSUM 247#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
249 248
250#define ARGBASE 16 249#define ARGBASE 16
251#define FP 12 250#define FP 12
252 251
253csum_partial_copy_generic_i386: 252csum_partial_copy_generic_i386:
254 subl $4,%esp 253 subl $4,%esp
255 pushl %edi 254 pushl %edi
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c
index a34263e6b08d..a4846a84a7be 100644
--- a/arch/um/sys-i386/ldt.c
+++ b/arch/um/sys-i386/ldt.c
@@ -14,8 +14,8 @@
14 14
15extern int modify_ldt(int func, void *ptr, unsigned long bytecount); 15extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
16 16
17long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, 17static long write_ldt_entry(struct mm_id *mm_idp, int func,
18 void **addr, int done) 18 struct user_desc *desc, void **addr, int done)
19{ 19{
20 long res; 20 long res;
21 21
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig
deleted file mode 100644
index 4379f43505ef..000000000000
--- a/arch/v850/Kconfig
+++ /dev/null
@@ -1,353 +0,0 @@
1#############################################################################
2#
3# For a description of the syntax of this configuration file,
4# see Documentation/kbuild/kconfig-language.txt.
5#
6#############################################################################
7
8mainmenu "uClinux/v850 (w/o MMU) Kernel Configuration"
9
10config MMU
11 bool
12 default n
13config ZONE_DMA
14 bool
15 default y
16config RWSEM_GENERIC_SPINLOCK
17 bool
18 default y
19config RWSEM_XCHGADD_ALGORITHM
20 bool
21 default n
22config GENERIC_FIND_NEXT_BIT
23 bool
24 default y
25config GENERIC_HWEIGHT
26 bool
27 default y
28config GENERIC_CALIBRATE_DELAY
29 bool
30 default y
31
32config GENERIC_HARDIRQS
33 bool
34 default y
35
36config GENERIC_IRQ_PROBE
37 bool
38 default y
39
40config GENERIC_TIME
41 bool
42 default y
43
44config TIME_LOW_RES
45 bool
46 default y
47
48config ARCH_HAS_ILOG2_U32
49 bool
50 default n
51
52config ARCH_HAS_ILOG2_U64
53 bool
54 default n
55
56config ARCH_SUPPORTS_AOUT
57 def_bool y
58
59# Turn off some random 386 crap that can affect device config
60config ISA
61 bool
62 default n
63config ISAPNP
64 bool
65 default n
66config EISA
67 bool
68 default n
69config MCA
70 bool
71 default n
72
73
74#############################################################################
75#### v850-specific config
76
77# Define the architecture
78config V850
79 bool
80 default y
81 select HAVE_IDE
82
83menu "Processor type and features"
84
85 choice
86 prompt "Platform"
87 default GDB
88 config V850E_SIM
89 bool "GDB"
90 config RTE_CB_MA1
91 bool "RTE-V850E/MA1-CB"
92 config RTE_CB_NB85E
93 bool "RTE-V850E/NB85E-CB"
94 config RTE_CB_ME2
95 bool "RTE-V850E/ME2-CB"
96 config V850E_AS85EP1
97 bool "AS85EP1"
98 config V850E2_SIM85E2C
99 bool "sim85e2c"
100 config V850E2_SIM85E2S
101 bool "sim85e2s"
102 config V850E2_FPGA85E2C
103 bool "NA85E2C-FPGA"
104 config V850E2_ANNA
105 bool "Anna"
106 endchoice
107
108 #### V850E processor-specific config
109
110 # All CPUs currently supported use the v850e architecture
111 config V850E
112 bool
113 default y
114
115 # The RTE-V850E/MA1-CB is the only type of V850E/MA1 platform we
116 # currently support
117 config V850E_MA1
118 bool
119 depends on RTE_CB_MA1
120 default y
121 # Similarly for the RTE-V850E/NB85E-CB - V850E/TEG
122 config V850E_TEG
123 bool
124 depends on RTE_CB_NB85E
125 default y
126 # ... and the RTE-V850E/ME2-CB - V850E/ME2
127 config V850E_ME2
128 bool
129 depends on RTE_CB_ME2
130 default y
131
132
133 #### sim85e2-specific config
134
135 config V850E2_SIM85E2
136 bool
137 depends on V850E2_SIM85E2C || V850E2_SIM85E2S
138 default y
139
140
141 #### V850E2 processor-specific config
142
143 # V850E2 processors
144 config V850E2
145 bool
146 depends on V850E2_SIM85E2 || V850E2_FPGA85E2C || V850E2_ANNA
147 default y
148
149
150 #### RTE-CB platform-specific config
151
152 # Boards in the RTE-x-CB series
153 config RTE_CB
154 bool
155 depends on RTE_CB_MA1 || RTE_CB_NB85E || RTE_CB_ME2
156 default y
157
158 config RTE_CB_MULTI
159 bool
160 # RTE_CB_NB85E can either have multi ROM support or not, but
161 # other platforms (currently only RTE_CB_MA1) require it.
162 prompt "Multi monitor ROM support" if RTE_CB_NB85E
163 depends on RTE_CB_MA1 || RTE_CB_NB85E
164 default y
165
166 config RTE_CB_MULTI_DBTRAP
167 bool "Pass illegal insn trap / dbtrap to kernel"
168 depends on RTE_CB_MULTI
169 default n
170
171 config RTE_CB_MA1_KSRAM
172 bool "Kernel in SRAM (limits size of kernel)"
173 depends on RTE_CB_MA1 && RTE_CB_MULTI
174 default n
175
176 config RTE_MB_A_PCI
177 bool "Mother-A PCI support"
178 depends on RTE_CB
179 default y
180
181 # The GBUS is used to talk to the RTE-MOTHER-A board
182 config RTE_GBUS_INT
183 bool
184 depends on RTE_MB_A_PCI
185 default y
186
187 # The only PCI bus we support is on the RTE-MOTHER-A board
188 config PCI
189 bool
190 default RTE_MB_A_PCI
191
192 #### Some feature-specific configs
193
194 # Everything except for the GDB simulator uses the same interrupt controller
195 config V850E_INTC
196 bool
197 default !V850E_SIM
198
199 # Everything except for the various simulators uses the "Timer D" unit
200 config V850E_TIMER_D
201 bool
202 default !V850E_SIM && !V850E2_SIM85E2
203
204 # Cache control used on some v850e1 processors
205 config V850E_CACHE
206 bool
207 default V850E_TEG || V850E_ME2
208
209 # Cache control used on v850e2 processors; I think this should
210 # actually apply to more, but currently only the SIM85E2S uses it
211 config V850E2_CACHE
212 bool
213 default V850E2_SIM85E2S
214
215 config NO_CACHE
216 bool
217 default !V850E_CACHE && !V850E2_CACHE
218
219 # HZ depends on the platform
220 config HZ
221 int
222 default 24 if V850E_SIM || V850E2_SIM85E2
223 default 122 if V850E2_FPGA85E2C
224 default 100
225
226 #### Misc config
227
228 config ROM_KERNEL
229 bool "Kernel in ROM"
230 depends on V850E2_ANNA || V850E_AS85EP1 || RTE_CB_ME2
231
232 # Some platforms pre-zero memory, in which case the kernel doesn't need to
233 config ZERO_BSS
234 bool
235 depends on !V850E2_SIM85E2C
236 default y
237
238 # The crappy-ass zone allocator requires that the start of allocatable
239 # memory be aligned to the largest possible allocation.
240 config FORCE_MAX_ZONEORDER
241 int
242 default 8 if V850E2_SIM85E2C || V850E2_FPGA85E2C
243
244 config V850E_HIGHRES_TIMER
245 bool "High resolution timer support"
246 depends on V850E_TIMER_D
247 config TIME_BOOTUP
248 bool "Time bootup"
249 depends on V850E_HIGHRES_TIMER
250
251 config RESET_GUARD
252 bool "Reset Guard"
253
254source "mm/Kconfig"
255
256endmenu
257
258
259#############################################################################
260
261source init/Kconfig
262
263#############################################################################
264
265menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
266
267# config PCI
268# bool "PCI support"
269# help
270# Support for PCI bus.
271
272source "drivers/pci/Kconfig"
273
274source "drivers/pcmcia/Kconfig"
275
276source "drivers/pci/hotplug/Kconfig"
277
278endmenu
279
280menu "Executable file formats"
281
282source "fs/Kconfig.binfmt"
283
284endmenu
285
286source "net/Kconfig"
287
288#############################################################################
289
290source "drivers/base/Kconfig"
291
292source drivers/mtd/Kconfig
293
294source drivers/parport/Kconfig
295
296#source drivers/pnp/Kconfig
297
298source drivers/block/Kconfig
299
300#############################################################################
301
302menu "Disk device support"
303
304source "drivers/ide/Kconfig"
305
306source "drivers/scsi/Kconfig"
307
308endmenu
309
310#############################################################################
311
312
313source "drivers/md/Kconfig"
314
315source "drivers/message/fusion/Kconfig"
316
317source "drivers/ieee1394/Kconfig"
318
319source "drivers/message/i2o/Kconfig"
320
321source "drivers/net/Kconfig"
322
323source "drivers/isdn/Kconfig"
324
325#source "drivers/telephony/Kconfig"
326
327#
328# input before char - char/joystick depends on it. As does USB.
329#
330source "drivers/input/Kconfig"
331
332source "drivers/char/Kconfig"
333
334#source drivers/misc/Config.in
335source "drivers/media/Kconfig"
336
337source "fs/Kconfig"
338
339source "drivers/video/Kconfig"
340
341source "sound/Kconfig"
342
343source "drivers/usb/Kconfig"
344
345source "arch/v850/Kconfig.debug"
346
347source "security/Kconfig"
348
349source "crypto/Kconfig"
350
351source "lib/Kconfig"
352
353#############################################################################
diff --git a/arch/v850/Kconfig.debug b/arch/v850/Kconfig.debug
deleted file mode 100644
index 4acfb9cca1ca..000000000000
--- a/arch/v850/Kconfig.debug
+++ /dev/null
@@ -1,10 +0,0 @@
1menu "Kernel hacking"
2
3source "lib/Kconfig.debug"
4
5config NO_KERNEL_MSG
6 bool "Suppress Kernel BUG Messages"
7 help
8 Do not output any debug BUG messages within the kernel.
9
10endmenu
diff --git a/arch/v850/Makefile b/arch/v850/Makefile
deleted file mode 100644
index 8b629df0029a..000000000000
--- a/arch/v850/Makefile
+++ /dev/null
@@ -1,54 +0,0 @@
1#
2# arch/v850/Makefile
3#
4# Copyright (C) 2001,02,03,05 NEC Corporation
5# Copyright (C) 2001,02,03,05 Miles Bader <miles@gnu.org>
6#
7# This file is included by the global makefile so that you can add your own
8# architecture-specific flags and dependencies. Remember to do have actions
9# for "archclean" and "archdep" for cleaning up and making dependencies for
10# this architecture
11#
12# This file is subject to the terms and conditions of the GNU General Public
13# License. See the file "COPYING" in the main directory of this archive
14# for more details.
15#
16
17arch_dir = arch/v850
18
19KBUILD_CFLAGS += -mv850e
20# r16 is a fixed pointer to the current task
21KBUILD_CFLAGS += -ffixed-r16 -mno-prolog-function
22KBUILD_CFLAGS += -fno-builtin
23KBUILD_CFLAGS += -D__linux__ -DUTS_SYSNAME=\"uClinux\"
24
25# By default, build a kernel that runs on the gdb v850 simulator.
26KBUILD_DEFCONFIG := sim_defconfig
27
28# This prevents the linker from consolidating the .gnu.linkonce.this_module
29# section into .text (which the v850 default linker script for -r does for
30# some reason)
31LDFLAGS_MODULE += --unique=.gnu.linkonce.this_module
32
33OBJCOPY_FLAGS_BLOB := -I binary -O elf32-little -B v850e
34
35
36head-y := $(arch_dir)/kernel/head.o $(arch_dir)/kernel/init_task.o
37core-y += $(arch_dir)/kernel/
38libs-y += $(arch_dir)/lib/
39
40
41# Deal with the initial contents of the root device
42ifdef ROOT_FS_IMAGE
43core-y += root_fs_image.o
44
45# Because the kernel build-system erases all explicit .o build rules, we
46# have to use an intermediate target to fool it into building for us.
47# This results in it being built anew each time, but that's alright.
48root_fs_image.o: root_fs_image_force
49
50root_fs_image_force: $(ROOT_FS_IMAGE)
51 $(OBJCOPY) $(OBJCOPY_FLAGS_BLOB) --rename-section .data=.root,alloc,load,readonly,data,contents $< root_fs_image.o
52endif
53
54CLEAN_FILES += root_fs_image.o
diff --git a/arch/v850/README b/arch/v850/README
deleted file mode 100644
index 12f7f7a665e0..000000000000
--- a/arch/v850/README
+++ /dev/null
@@ -1,44 +0,0 @@
1This port to the NEC V850E processor supports the following platforms:
2
3 "sim"
4 The gdb v850e simulator (CONFIG_V850E_SIM).
5
6 "rte-ma1-cb"
7 The Midas labs RTE-V850E/MA1-CB and RTE-V850E/NB85E-CB evaluation
8 boards (CONFIG_RTE_CB_MA1 and CONFIG_RTE_CB_NB85E). This support
9 has only been tested when running with the Multi-debugger monitor
10 ROM (for the Green Hills Multi debugger). The optional NEC
11 Solution Gear RTE-MOTHER-A motherboard is also supported, which
12 allows PCI boards to be used (CONFIG_RTE_MB_A_PCI).
13
14 "rte-me2-cb"
15 The Midas labs RTE-V850E/ME2-CB evaluation board (CONFIG_RTE_CB_ME2).
16 This has only been tested using a kernel downloaded via an ICE
17 connection using the Multi debugger. Support for the RTE-MOTHER-A is
18 present, but hasn't been tested (unlike the other Midas labs cpu
19 boards, the RTE-V850E/ME2-CB includes an ethernet adaptor).
20
21 "as85ep1"
22 The NEC AS85EP1 V850E evaluation chip/board (CONFIG_V850E_AS85EP1).
23
24 "anna"
25 The NEC `Anna' (board/chip) implementation of the V850E2 processor
26 (CONFIG_V850E2_ANNA).
27
28 "sim85e2c", "sim85e2s"
29 The sim85e2c and sim85e2s simulators, which are verilog simulations
30 of the V850E2 NA85E2C/NA85E2S cpu cores (CONFIG_V850E2_SIM85E2C and
31 CONFIG_V850E2_SIM85E2S).
32
33 "fpga85e2c"
34 A FPGA implementation of the V850E2 NA85E2C cpu core
35 (CONFIG_V850E2_FPGA85E2C).
36
37To get a default kernel configuration for a particular platform, you can
38use a <platform>_defconfig make target (e.g., "make rte-me2-cb_defconfig");
39to see which default configurations are possible, look in the directory
40"arch/v850/configs".
41
42Porting to anything with a V850E/MA1 or MA2 processor should be simple.
43See the file <asm-v850/machdep.h> and the files it includes for an example of
44how to add platform/chip-specific support.
diff --git a/arch/v850/configs/rte-ma1-cb_defconfig b/arch/v850/configs/rte-ma1-cb_defconfig
deleted file mode 100644
index 1a5beda36e29..000000000000
--- a/arch/v850/configs/rte-ma1-cb_defconfig
+++ /dev/null
@@ -1,617 +0,0 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.13-uc0
4# Fri Sep 2 13:54:27 2005
5#
6# CONFIG_MMU is not set
7# CONFIG_UID16 is not set
8CONFIG_RWSEM_GENERIC_SPINLOCK=y
9# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
10CONFIG_GENERIC_CALIBRATE_DELAY=y
11# CONFIG_ISA is not set
12# CONFIG_ISAPNP is not set
13# CONFIG_EISA is not set
14# CONFIG_MCA is not set
15CONFIG_V850=y
16
17#
18# Processor type and features
19#
20# CONFIG_V850E_SIM is not set
21CONFIG_RTE_CB_MA1=y
22# CONFIG_RTE_CB_NB85E is not set
23# CONFIG_RTE_CB_ME2 is not set
24# CONFIG_V850E_AS85EP1 is not set
25# CONFIG_V850E2_SIM85E2C is not set
26# CONFIG_V850E2_SIM85E2S is not set
27# CONFIG_V850E2_FPGA85E2C is not set
28# CONFIG_V850E2_ANNA is not set
29CONFIG_V850E=y
30CONFIG_V850E_MA1=y
31CONFIG_RTE_CB=y
32CONFIG_RTE_CB_MULTI=y
33CONFIG_RTE_CB_MULTI_DBTRAP=y
34# CONFIG_RTE_CB_MA1_KSRAM is not set
35CONFIG_RTE_MB_A_PCI=y
36CONFIG_RTE_GBUS_INT=y
37CONFIG_PCI=y
38CONFIG_V850E_INTC=y
39CONFIG_V850E_TIMER_D=y
40# CONFIG_V850E_CACHE is not set
41# CONFIG_V850E2_CACHE is not set
42CONFIG_NO_CACHE=y
43CONFIG_ZERO_BSS=y
44# CONFIG_V850E_HIGHRES_TIMER is not set
45# CONFIG_RESET_GUARD is not set
46CONFIG_LARGE_ALLOCS=y
47CONFIG_FLATMEM=y
48CONFIG_FLAT_NODE_MEM_MAP=y
49
50#
51# Code maturity level options
52#
53# CONFIG_EXPERIMENTAL is not set
54CONFIG_CLEAN_COMPILE=y
55CONFIG_BROKEN_ON_SMP=y
56CONFIG_INIT_ENV_ARG_LIMIT=32
57
58#
59# General setup
60#
61CONFIG_LOCALVERSION=""
62# CONFIG_BSD_PROCESS_ACCT is not set
63# CONFIG_SYSCTL is not set
64# CONFIG_AUDIT is not set
65# CONFIG_HOTPLUG is not set
66CONFIG_KOBJECT_UEVENT=y
67# CONFIG_IKCONFIG is not set
68CONFIG_EMBEDDED=y
69# CONFIG_KALLSYMS is not set
70CONFIG_PRINTK=y
71CONFIG_BUG=y
72# CONFIG_BASE_FULL is not set
73# CONFIG_FUTEX is not set
74# CONFIG_EPOLL is not set
75CONFIG_CC_OPTIMIZE_FOR_SIZE=y
76CONFIG_CC_ALIGN_FUNCTIONS=0
77CONFIG_CC_ALIGN_LABELS=0
78CONFIG_CC_ALIGN_LOOPS=0
79CONFIG_CC_ALIGN_JUMPS=0
80CONFIG_BASE_SMALL=1
81
82#
83# Loadable module support
84#
85CONFIG_MODULES=y
86CONFIG_MODULE_UNLOAD=y
87CONFIG_OBSOLETE_MODPARM=y
88# CONFIG_MODULE_SRCVERSION_ALL is not set
89CONFIG_KMOD=y
90
91#
92# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
93#
94# CONFIG_PCI_LEGACY_PROC is not set
95# CONFIG_PCI_NAMES is not set
96# CONFIG_PCI_DEBUG is not set
97
98#
99# PCCARD (PCMCIA/CardBus) support
100#
101# CONFIG_PCCARD is not set
102
103#
104# PCI Hotplug Support
105#
106
107#
108# Executable file formats
109#
110CONFIG_BINFMT_FLAT=y
111# CONFIG_BINFMT_ZFLAT is not set
112# CONFIG_BINFMT_SHARED_FLAT is not set
113# CONFIG_BINFMT_MISC is not set
114
115#
116# Networking
117#
118CONFIG_NET=y
119
120#
121# Networking options
122#
123# CONFIG_PACKET is not set
124# CONFIG_UNIX is not set
125# CONFIG_NET_KEY is not set
126CONFIG_INET=y
127# CONFIG_IP_MULTICAST is not set
128# CONFIG_IP_ADVANCED_ROUTER is not set
129CONFIG_IP_FIB_HASH=y
130# CONFIG_IP_PNP is not set
131# CONFIG_NET_IPIP is not set
132# CONFIG_NET_IPGRE is not set
133# CONFIG_SYN_COOKIES is not set
134# CONFIG_INET_AH is not set
135# CONFIG_INET_ESP is not set
136# CONFIG_INET_IPCOMP is not set
137# CONFIG_INET_TUNNEL is not set
138# CONFIG_IP_TCPDIAG is not set
139# CONFIG_IP_TCPDIAG_IPV6 is not set
140# CONFIG_TCP_CONG_ADVANCED is not set
141CONFIG_TCP_CONG_BIC=y
142# CONFIG_IPV6 is not set
143# CONFIG_NETFILTER is not set
144# CONFIG_BRIDGE is not set
145# CONFIG_VLAN_8021Q is not set
146# CONFIG_DECNET is not set
147# CONFIG_LLC2 is not set
148# CONFIG_IPX is not set
149# CONFIG_ATALK is not set
150# CONFIG_NET_SCHED is not set
151# CONFIG_NET_CLS_ROUTE is not set
152
153#
154# Network testing
155#
156# CONFIG_NET_PKTGEN is not set
157# CONFIG_HAMRADIO is not set
158# CONFIG_IRDA is not set
159# CONFIG_BT is not set
160
161#
162# Generic Driver Options
163#
164CONFIG_STANDALONE=y
165CONFIG_PREVENT_FIRMWARE_BUILD=y
166# CONFIG_FW_LOADER is not set
167# CONFIG_DEBUG_DRIVER is not set
168
169#
170# Memory Technology Devices (MTD)
171#
172CONFIG_MTD=y
173# CONFIG_MTD_DEBUG is not set
174# CONFIG_MTD_CONCAT is not set
175# CONFIG_MTD_PARTITIONS is not set
176
177#
178# User Modules And Translation Layers
179#
180# CONFIG_MTD_CHAR is not set
181CONFIG_MTD_BLOCK=y
182# CONFIG_FTL is not set
183# CONFIG_NFTL is not set
184# CONFIG_INFTL is not set
185
186#
187# RAM/ROM/Flash chip drivers
188#
189# CONFIG_MTD_CFI is not set
190# CONFIG_MTD_JEDECPROBE is not set
191CONFIG_MTD_MAP_BANK_WIDTH_1=y
192CONFIG_MTD_MAP_BANK_WIDTH_2=y
193CONFIG_MTD_MAP_BANK_WIDTH_4=y
194# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
195# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
196# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
197CONFIG_MTD_CFI_I1=y
198CONFIG_MTD_CFI_I2=y
199# CONFIG_MTD_CFI_I4 is not set
200# CONFIG_MTD_CFI_I8 is not set
201# CONFIG_MTD_RAM is not set
202# CONFIG_MTD_ROM is not set
203# CONFIG_MTD_ABSENT is not set
204
205#
206# Mapping drivers for chip access
207#
208# CONFIG_MTD_COMPLEX_MAPPINGS is not set
209# CONFIG_MTD_PLATRAM is not set
210
211#
212# Self-contained MTD device drivers
213#
214# CONFIG_MTD_PMC551 is not set
215CONFIG_MTD_SLRAM=y
216# CONFIG_MTD_PHRAM is not set
217# CONFIG_MTD_MTDRAM is not set
218# CONFIG_MTD_BLKMTD is not set
219
220#
221# Disk-On-Chip Device Drivers
222#
223# CONFIG_MTD_DOC2000 is not set
224# CONFIG_MTD_DOC2001 is not set
225# CONFIG_MTD_DOC2001PLUS is not set
226
227#
228# NAND Flash Device Drivers
229#
230# CONFIG_MTD_NAND is not set
231
232#
233# Parallel port support
234#
235# CONFIG_PARPORT is not set
236
237#
238# Block devices
239#
240# CONFIG_BLK_DEV_FD is not set
241# CONFIG_BLK_CPQ_DA is not set
242# CONFIG_BLK_CPQ_CISS_DA is not set
243# CONFIG_BLK_DEV_DAC960 is not set
244# CONFIG_BLK_DEV_COW_COMMON is not set
245# CONFIG_BLK_DEV_LOOP is not set
246# CONFIG_BLK_DEV_NBD is not set
247# CONFIG_BLK_DEV_SX8 is not set
248# CONFIG_BLK_DEV_RAM is not set
249CONFIG_BLK_DEV_RAM_COUNT=16
250CONFIG_INITRAMFS_SOURCE=""
251# CONFIG_CDROM_PKTCDVD is not set
252
253#
254# IO Schedulers
255#
256CONFIG_IOSCHED_NOOP=y
257# CONFIG_IOSCHED_AS is not set
258# CONFIG_IOSCHED_DEADLINE is not set
259# CONFIG_IOSCHED_CFQ is not set
260# CONFIG_ATA_OVER_ETH is not set
261
262#
263# Disk device support
264#
265
266#
267# ATA/ATAPI/MFM/RLL support
268#
269# CONFIG_IDE is not set
270
271#
272# SCSI device support
273#
274# CONFIG_SCSI is not set
275
276#
277# Multi-device support (RAID and LVM)
278#
279# CONFIG_MD is not set
280
281#
282# Fusion MPT device support
283#
284# CONFIG_FUSION is not set
285
286#
287# IEEE 1394 (FireWire) support
288#
289# CONFIG_IEEE1394 is not set
290
291#
292# I2O device support
293#
294# CONFIG_I2O is not set
295
296#
297# Network device support
298#
299CONFIG_NETDEVICES=y
300# CONFIG_DUMMY is not set
301# CONFIG_BONDING is not set
302# CONFIG_EQUALIZER is not set
303# CONFIG_TUN is not set
304
305#
306# ARCnet devices
307#
308# CONFIG_ARCNET is not set
309
310#
311# Ethernet (10 or 100Mbit)
312#
313CONFIG_NET_ETHERNET=y
314CONFIG_MII=y
315# CONFIG_HAPPYMEAL is not set
316# CONFIG_SUNGEM is not set
317# CONFIG_NET_VENDOR_3COM is not set
318# CONFIG_NET_VENDOR_SMC is not set
319
320#
321# Tulip family network device support
322#
323# CONFIG_NET_TULIP is not set
324# CONFIG_HP100 is not set
325# CONFIG_NE2000 is not set
326CONFIG_NET_PCI=y
327# CONFIG_PCNET32 is not set
328# CONFIG_AMD8111_ETH is not set
329# CONFIG_ADAPTEC_STARFIRE is not set
330# CONFIG_DGRS is not set
331CONFIG_EEPRO100=y
332# CONFIG_E100 is not set
333# CONFIG_FEALNX is not set
334# CONFIG_NATSEMI is not set
335# CONFIG_NE2K_PCI is not set
336# CONFIG_8139TOO is not set
337# CONFIG_SIS900 is not set
338# CONFIG_EPIC100 is not set
339# CONFIG_SUNDANCE is not set
340# CONFIG_TLAN is not set
341# CONFIG_VIA_RHINE is not set
342
343#
344# Ethernet (1000 Mbit)
345#
346# CONFIG_ACENIC is not set
347# CONFIG_DL2K is not set
348# CONFIG_E1000 is not set
349# CONFIG_NS83820 is not set
350# CONFIG_HAMACHI is not set
351# CONFIG_R8169 is not set
352# CONFIG_SK98LIN is not set
353# CONFIG_VIA_VELOCITY is not set
354# CONFIG_TIGON3 is not set
355# CONFIG_BNX2 is not set
356
357#
358# Ethernet (10000 Mbit)
359#
360# CONFIG_IXGB is not set
361# CONFIG_S2IO is not set
362
363#
364# Token Ring devices
365#
366# CONFIG_TR is not set
367
368#
369# Wireless LAN (non-hamradio)
370#
371# CONFIG_NET_RADIO is not set
372
373#
374# Wan interfaces
375#
376# CONFIG_WAN is not set
377# CONFIG_FDDI is not set
378# CONFIG_PPP is not set
379# CONFIG_SLIP is not set
380# CONFIG_NETPOLL is not set
381# CONFIG_NET_POLL_CONTROLLER is not set
382
383#
384# ISDN subsystem
385#
386# CONFIG_ISDN is not set
387
388#
389# Input device support
390#
391CONFIG_INPUT=y
392
393#
394# Userland interfaces
395#
396# CONFIG_INPUT_MOUSEDEV is not set
397# CONFIG_INPUT_JOYDEV is not set
398# CONFIG_INPUT_TSDEV is not set
399# CONFIG_INPUT_EVDEV is not set
400# CONFIG_INPUT_EVBUG is not set
401
402#
403# Input Device Drivers
404#
405# CONFIG_INPUT_KEYBOARD is not set
406# CONFIG_INPUT_MOUSE is not set
407# CONFIG_INPUT_JOYSTICK is not set
408# CONFIG_INPUT_TOUCHSCREEN is not set
409# CONFIG_INPUT_MISC is not set
410
411#
412# Hardware I/O ports
413#
414# CONFIG_SERIO is not set
415# CONFIG_GAMEPORT is not set
416
417#
418# Character devices
419#
420# CONFIG_VT is not set
421# CONFIG_SERIAL_NONSTANDARD is not set
422
423#
424# Serial drivers
425#
426# CONFIG_SERIAL_8250 is not set
427
428#
429# Non-8250 serial port support
430#
431CONFIG_V850E_UART=y
432CONFIG_V850E_UART_CONSOLE=y
433CONFIG_SERIAL_CORE=y
434CONFIG_SERIAL_CORE_CONSOLE=y
435# CONFIG_SERIAL_JSM is not set
436# CONFIG_UNIX98_PTYS is not set
437# CONFIG_LEGACY_PTYS is not set
438
439#
440# IPMI
441#
442# CONFIG_IPMI_HANDLER is not set
443
444#
445# Watchdog Cards
446#
447# CONFIG_WATCHDOG is not set
448# CONFIG_RTC is not set
449# CONFIG_GEN_RTC is not set
450# CONFIG_DTLK is not set
451# CONFIG_R3964 is not set
452# CONFIG_APPLICOM is not set
453
454#
455# Ftape, the floppy tape device driver
456#
457# CONFIG_DRM is not set
458# CONFIG_RAW_DRIVER is not set
459
460#
461# TPM devices
462#
463
464#
465# Multimedia devices
466#
467# CONFIG_VIDEO_DEV is not set
468
469#
470# Digital Video Broadcasting Devices
471#
472# CONFIG_DVB is not set
473
474#
475# File systems
476#
477# CONFIG_EXT2_FS is not set
478# CONFIG_EXT3_FS is not set
479# CONFIG_JBD is not set
480# CONFIG_REISERFS_FS is not set
481# CONFIG_JFS_FS is not set
482# CONFIG_FS_POSIX_ACL is not set
483
484#
485# XFS support
486#
487# CONFIG_XFS_FS is not set
488# CONFIG_MINIX_FS is not set
489CONFIG_ROMFS_FS=y
490# CONFIG_MAGIC_ROM_PTR is not set
491CONFIG_INOTIFY=y
492# CONFIG_QUOTA is not set
493CONFIG_DNOTIFY=y
494# CONFIG_AUTOFS_FS is not set
495# CONFIG_AUTOFS4_FS is not set
496
497#
498# CD-ROM/DVD Filesystems
499#
500# CONFIG_ISO9660_FS is not set
501# CONFIG_UDF_FS is not set
502
503#
504# DOS/FAT/NT Filesystems
505#
506# CONFIG_MSDOS_FS is not set
507# CONFIG_VFAT_FS is not set
508# CONFIG_NTFS_FS is not set
509
510#
511# Pseudo filesystems
512#
513CONFIG_PROC_FS=y
514CONFIG_SYSFS=y
515# CONFIG_TMPFS is not set
516# CONFIG_HUGETLB_PAGE is not set
517CONFIG_RAMFS=y
518
519#
520# Miscellaneous filesystems
521#
522# CONFIG_HFSPLUS_FS is not set
523# CONFIG_JFFS_FS is not set
524# CONFIG_JFFS2_FS is not set
525# CONFIG_CRAMFS is not set
526# CONFIG_VXFS_FS is not set
527# CONFIG_HPFS_FS is not set
528# CONFIG_QNX4FS_FS is not set
529# CONFIG_SYSV_FS is not set
530# CONFIG_UFS_FS is not set
531
532#
533# Network File Systems
534#
535CONFIG_NFS_FS=y
536CONFIG_NFS_V3=y
537# CONFIG_NFS_V3_ACL is not set
538# CONFIG_NFSD is not set
539CONFIG_LOCKD=y
540CONFIG_LOCKD_V4=y
541CONFIG_NFS_COMMON=y
542CONFIG_SUNRPC=y
543# CONFIG_SMB_FS is not set
544# CONFIG_CIFS is not set
545# CONFIG_NCP_FS is not set
546# CONFIG_CODA_FS is not set
547
548#
549# Partition Types
550#
551# CONFIG_PARTITION_ADVANCED is not set
552CONFIG_MSDOS_PARTITION=y
553
554#
555# Native Language Support
556#
557# CONFIG_NLS is not set
558
559#
560# Graphics support
561#
562# CONFIG_FB is not set
563
564#
565# Sound
566#
567# CONFIG_SOUND is not set
568
569#
570# USB support
571#
572CONFIG_USB_ARCH_HAS_HCD=y
573CONFIG_USB_ARCH_HAS_OHCI=y
574# CONFIG_USB is not set
575
576#
577# USB Gadget Support
578#
579# CONFIG_USB_GADGET is not set
580
581#
582# Kernel hacking
583#
584# CONFIG_PRINTK_TIME is not set
585CONFIG_DEBUG_KERNEL=y
586# CONFIG_MAGIC_SYSRQ is not set
587CONFIG_LOG_BUF_SHIFT=14
588# CONFIG_SCHEDSTATS is not set
589# CONFIG_DEBUG_SLAB is not set
590# CONFIG_DEBUG_SPINLOCK is not set
591# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
592# CONFIG_DEBUG_KOBJECT is not set
593CONFIG_DEBUG_INFO=y
594# CONFIG_DEBUG_FS is not set
595# CONFIG_NO_KERNEL_MSG is not set
596
597#
598# Security options
599#
600# CONFIG_KEYS is not set
601# CONFIG_SECURITY is not set
602
603#
604# Cryptographic options
605#
606# CONFIG_CRYPTO is not set
607
608#
609# Hardware crypto devices
610#
611
612#
613# Library routines
614#
615# CONFIG_CRC_CCITT is not set
616# CONFIG_CRC32 is not set
617# CONFIG_LIBCRC32C is not set
diff --git a/arch/v850/configs/rte-me2-cb_defconfig b/arch/v850/configs/rte-me2-cb_defconfig
deleted file mode 100644
index 15e666478061..000000000000
--- a/arch/v850/configs/rte-me2-cb_defconfig
+++ /dev/null
@@ -1,462 +0,0 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.13-uc0
4# Fri Sep 2 13:47:50 2005
5#
6# CONFIG_MMU is not set
7# CONFIG_UID16 is not set
8CONFIG_RWSEM_GENERIC_SPINLOCK=y
9# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
10CONFIG_GENERIC_CALIBRATE_DELAY=y
11# CONFIG_ISA is not set
12# CONFIG_ISAPNP is not set
13# CONFIG_EISA is not set
14# CONFIG_MCA is not set
15CONFIG_V850=y
16
17#
18# Processor type and features
19#
20# CONFIG_V850E_SIM is not set
21# CONFIG_RTE_CB_MA1 is not set
22# CONFIG_RTE_CB_NB85E is not set
23CONFIG_RTE_CB_ME2=y
24# CONFIG_V850E_AS85EP1 is not set
25# CONFIG_V850E2_SIM85E2C is not set
26# CONFIG_V850E2_SIM85E2S is not set
27# CONFIG_V850E2_FPGA85E2C is not set
28# CONFIG_V850E2_ANNA is not set
29CONFIG_V850E=y
30CONFIG_V850E_ME2=y
31CONFIG_RTE_CB=y
32# CONFIG_RTE_MB_A_PCI is not set
33# CONFIG_PCI is not set
34CONFIG_V850E_INTC=y
35CONFIG_V850E_TIMER_D=y
36CONFIG_V850E_CACHE=y
37# CONFIG_V850E2_CACHE is not set
38# CONFIG_NO_CACHE is not set
39# CONFIG_ROM_KERNEL is not set
40CONFIG_ZERO_BSS=y
41# CONFIG_V850E_HIGHRES_TIMER is not set
42# CONFIG_RESET_GUARD is not set
43CONFIG_LARGE_ALLOCS=y
44CONFIG_FLATMEM=y
45CONFIG_FLAT_NODE_MEM_MAP=y
46
47#
48# Code maturity level options
49#
50# CONFIG_EXPERIMENTAL is not set
51CONFIG_CLEAN_COMPILE=y
52CONFIG_BROKEN_ON_SMP=y
53CONFIG_INIT_ENV_ARG_LIMIT=32
54
55#
56# General setup
57#
58CONFIG_LOCALVERSION=""
59# CONFIG_BSD_PROCESS_ACCT is not set
60# CONFIG_SYSCTL is not set
61# CONFIG_HOTPLUG is not set
62# CONFIG_IKCONFIG is not set
63CONFIG_EMBEDDED=y
64# CONFIG_KALLSYMS is not set
65CONFIG_PRINTK=y
66CONFIG_BUG=y
67# CONFIG_BASE_FULL is not set
68# CONFIG_FUTEX is not set
69# CONFIG_EPOLL is not set
70CONFIG_CC_OPTIMIZE_FOR_SIZE=y
71CONFIG_CC_ALIGN_FUNCTIONS=0
72CONFIG_CC_ALIGN_LABELS=0
73CONFIG_CC_ALIGN_LOOPS=0
74CONFIG_CC_ALIGN_JUMPS=0
75CONFIG_BASE_SMALL=1
76
77#
78# Loadable module support
79#
80CONFIG_MODULES=y
81CONFIG_MODULE_UNLOAD=y
82CONFIG_OBSOLETE_MODPARM=y
83# CONFIG_MODULE_SRCVERSION_ALL is not set
84CONFIG_KMOD=y
85
86#
87# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
88#
89
90#
91# PCCARD (PCMCIA/CardBus) support
92#
93# CONFIG_PCCARD is not set
94
95#
96# PCI Hotplug Support
97#
98
99#
100# Executable file formats
101#
102CONFIG_BINFMT_FLAT=y
103# CONFIG_BINFMT_ZFLAT is not set
104# CONFIG_BINFMT_SHARED_FLAT is not set
105# CONFIG_BINFMT_MISC is not set
106
107#
108# Networking
109#
110# CONFIG_NET is not set
111
112#
113# Generic Driver Options
114#
115CONFIG_STANDALONE=y
116CONFIG_PREVENT_FIRMWARE_BUILD=y
117# CONFIG_FW_LOADER is not set
118# CONFIG_DEBUG_DRIVER is not set
119
120#
121# Memory Technology Devices (MTD)
122#
123CONFIG_MTD=y
124# CONFIG_MTD_DEBUG is not set
125# CONFIG_MTD_CONCAT is not set
126# CONFIG_MTD_PARTITIONS is not set
127
128#
129# User Modules And Translation Layers
130#
131# CONFIG_MTD_CHAR is not set
132CONFIG_MTD_BLOCK=y
133# CONFIG_FTL is not set
134# CONFIG_NFTL is not set
135# CONFIG_INFTL is not set
136
137#
138# RAM/ROM/Flash chip drivers
139#
140# CONFIG_MTD_CFI is not set
141# CONFIG_MTD_JEDECPROBE is not set
142CONFIG_MTD_MAP_BANK_WIDTH_1=y
143CONFIG_MTD_MAP_BANK_WIDTH_2=y
144CONFIG_MTD_MAP_BANK_WIDTH_4=y
145# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
146# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
147# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
148CONFIG_MTD_CFI_I1=y
149CONFIG_MTD_CFI_I2=y
150# CONFIG_MTD_CFI_I4 is not set
151# CONFIG_MTD_CFI_I8 is not set
152# CONFIG_MTD_RAM is not set
153# CONFIG_MTD_ROM is not set
154# CONFIG_MTD_ABSENT is not set
155
156#
157# Mapping drivers for chip access
158#
159# CONFIG_MTD_COMPLEX_MAPPINGS is not set
160# CONFIG_MTD_PLATRAM is not set
161
162#
163# Self-contained MTD device drivers
164#
165CONFIG_MTD_SLRAM=y
166# CONFIG_MTD_PHRAM is not set
167# CONFIG_MTD_MTDRAM is not set
168# CONFIG_MTD_BLKMTD is not set
169
170#
171# Disk-On-Chip Device Drivers
172#
173# CONFIG_MTD_DOC2000 is not set
174# CONFIG_MTD_DOC2001 is not set
175# CONFIG_MTD_DOC2001PLUS is not set
176
177#
178# NAND Flash Device Drivers
179#
180# CONFIG_MTD_NAND is not set
181
182#
183# Parallel port support
184#
185# CONFIG_PARPORT is not set
186
187#
188# Block devices
189#
190# CONFIG_BLK_DEV_FD is not set
191# CONFIG_BLK_DEV_COW_COMMON is not set
192# CONFIG_BLK_DEV_LOOP is not set
193# CONFIG_BLK_DEV_RAM is not set
194CONFIG_BLK_DEV_RAM_COUNT=16
195CONFIG_INITRAMFS_SOURCE=""
196# CONFIG_CDROM_PKTCDVD is not set
197
198#
199# IO Schedulers
200#
201CONFIG_IOSCHED_NOOP=y
202# CONFIG_IOSCHED_AS is not set
203# CONFIG_IOSCHED_DEADLINE is not set
204# CONFIG_IOSCHED_CFQ is not set
205
206#
207# Disk device support
208#
209
210#
211# ATA/ATAPI/MFM/RLL support
212#
213# CONFIG_IDE is not set
214
215#
216# SCSI device support
217#
218# CONFIG_SCSI is not set
219
220#
221# Multi-device support (RAID and LVM)
222#
223# CONFIG_MD is not set
224
225#
226# Fusion MPT device support
227#
228# CONFIG_FUSION is not set
229
230#
231# IEEE 1394 (FireWire) support
232#
233
234#
235# I2O device support
236#
237
238#
239# Network device support
240#
241# CONFIG_NETPOLL is not set
242# CONFIG_NET_POLL_CONTROLLER is not set
243
244#
245# ISDN subsystem
246#
247
248#
249# Input device support
250#
251CONFIG_INPUT=y
252
253#
254# Userland interfaces
255#
256# CONFIG_INPUT_MOUSEDEV is not set
257# CONFIG_INPUT_JOYDEV is not set
258# CONFIG_INPUT_TSDEV is not set
259# CONFIG_INPUT_EVDEV is not set
260# CONFIG_INPUT_EVBUG is not set
261
262#
263# Input Device Drivers
264#
265# CONFIG_INPUT_KEYBOARD is not set
266# CONFIG_INPUT_MOUSE is not set
267# CONFIG_INPUT_JOYSTICK is not set
268# CONFIG_INPUT_TOUCHSCREEN is not set
269# CONFIG_INPUT_MISC is not set
270
271#
272# Hardware I/O ports
273#
274CONFIG_SERIO=y
275# CONFIG_SERIO_I8042 is not set
276# CONFIG_SERIO_SERPORT is not set
277# CONFIG_SERIO_LIBPS2 is not set
278# CONFIG_SERIO_RAW is not set
279# CONFIG_GAMEPORT is not set
280
281#
282# Character devices
283#
284# CONFIG_VT is not set
285# CONFIG_SERIAL_NONSTANDARD is not set
286
287#
288# Serial drivers
289#
290CONFIG_SERIAL_8250=y
291CONFIG_SERIAL_8250_CONSOLE=y
292CONFIG_SERIAL_8250_NR_UARTS=1
293# CONFIG_SERIAL_8250_EXTENDED is not set
294
295#
296# Non-8250 serial port support
297#
298# CONFIG_V850E_UART is not set
299CONFIG_SERIAL_CORE=y
300CONFIG_SERIAL_CORE_CONSOLE=y
301# CONFIG_UNIX98_PTYS is not set
302# CONFIG_LEGACY_PTYS is not set
303
304#
305# IPMI
306#
307# CONFIG_IPMI_HANDLER is not set
308
309#
310# Watchdog Cards
311#
312# CONFIG_WATCHDOG is not set
313# CONFIG_RTC is not set
314# CONFIG_GEN_RTC is not set
315# CONFIG_DTLK is not set
316# CONFIG_R3964 is not set
317
318#
319# Ftape, the floppy tape device driver
320#
321# CONFIG_RAW_DRIVER is not set
322
323#
324# TPM devices
325#
326
327#
328# Multimedia devices
329#
330# CONFIG_VIDEO_DEV is not set
331
332#
333# Digital Video Broadcasting Devices
334#
335
336#
337# File systems
338#
339# CONFIG_EXT2_FS is not set
340# CONFIG_EXT3_FS is not set
341# CONFIG_JBD is not set
342# CONFIG_REISERFS_FS is not set
343# CONFIG_JFS_FS is not set
344# CONFIG_FS_POSIX_ACL is not set
345
346#
347# XFS support
348#
349# CONFIG_XFS_FS is not set
350# CONFIG_MINIX_FS is not set
351CONFIG_ROMFS_FS=y
352# CONFIG_MAGIC_ROM_PTR is not set
353CONFIG_INOTIFY=y
354# CONFIG_QUOTA is not set
355CONFIG_DNOTIFY=y
356# CONFIG_AUTOFS_FS is not set
357# CONFIG_AUTOFS4_FS is not set
358
359#
360# CD-ROM/DVD Filesystems
361#
362# CONFIG_ISO9660_FS is not set
363# CONFIG_UDF_FS is not set
364
365#
366# DOS/FAT/NT Filesystems
367#
368# CONFIG_MSDOS_FS is not set
369# CONFIG_VFAT_FS is not set
370# CONFIG_NTFS_FS is not set
371
372#
373# Pseudo filesystems
374#
375CONFIG_PROC_FS=y
376CONFIG_SYSFS=y
377# CONFIG_TMPFS is not set
378# CONFIG_HUGETLB_PAGE is not set
379CONFIG_RAMFS=y
380
381#
382# Miscellaneous filesystems
383#
384# CONFIG_HFSPLUS_FS is not set
385# CONFIG_JFFS_FS is not set
386# CONFIG_JFFS2_FS is not set
387# CONFIG_CRAMFS is not set
388# CONFIG_VXFS_FS is not set
389# CONFIG_HPFS_FS is not set
390# CONFIG_QNX4FS_FS is not set
391# CONFIG_SYSV_FS is not set
392# CONFIG_UFS_FS is not set
393
394#
395# Partition Types
396#
397# CONFIG_PARTITION_ADVANCED is not set
398CONFIG_MSDOS_PARTITION=y
399
400#
401# Native Language Support
402#
403# CONFIG_NLS is not set
404
405#
406# Graphics support
407#
408# CONFIG_FB is not set
409
410#
411# Sound
412#
413# CONFIG_SOUND is not set
414
415#
416# USB support
417#
418# CONFIG_USB_ARCH_HAS_HCD is not set
419# CONFIG_USB_ARCH_HAS_OHCI is not set
420
421#
422# USB Gadget Support
423#
424# CONFIG_USB_GADGET is not set
425
426#
427# Kernel hacking
428#
429# CONFIG_PRINTK_TIME is not set
430CONFIG_DEBUG_KERNEL=y
431# CONFIG_MAGIC_SYSRQ is not set
432CONFIG_LOG_BUF_SHIFT=14
433# CONFIG_SCHEDSTATS is not set
434# CONFIG_DEBUG_SLAB is not set
435# CONFIG_DEBUG_SPINLOCK is not set
436# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
437# CONFIG_DEBUG_KOBJECT is not set
438CONFIG_DEBUG_INFO=y
439# CONFIG_DEBUG_FS is not set
440# CONFIG_NO_KERNEL_MSG is not set
441
442#
443# Security options
444#
445# CONFIG_KEYS is not set
446# CONFIG_SECURITY is not set
447
448#
449# Cryptographic options
450#
451# CONFIG_CRYPTO is not set
452
453#
454# Hardware crypto devices
455#
456
457#
458# Library routines
459#
460# CONFIG_CRC_CCITT is not set
461# CONFIG_CRC32 is not set
462# CONFIG_LIBCRC32C is not set
diff --git a/arch/v850/configs/sim_defconfig b/arch/v850/configs/sim_defconfig
deleted file mode 100644
index f31ba7398ad0..000000000000
--- a/arch/v850/configs/sim_defconfig
+++ /dev/null
@@ -1,451 +0,0 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.13-uc0
4# Fri Sep 2 13:36:43 2005
5#
6# CONFIG_MMU is not set
7# CONFIG_UID16 is not set
8CONFIG_RWSEM_GENERIC_SPINLOCK=y
9# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
10CONFIG_GENERIC_CALIBRATE_DELAY=y
11# CONFIG_ISA is not set
12# CONFIG_ISAPNP is not set
13# CONFIG_EISA is not set
14# CONFIG_MCA is not set
15CONFIG_V850=y
16
17#
18# Processor type and features
19#
20CONFIG_V850E_SIM=y
21# CONFIG_RTE_CB_MA1 is not set
22# CONFIG_RTE_CB_NB85E is not set
23# CONFIG_RTE_CB_ME2 is not set
24# CONFIG_V850E_AS85EP1 is not set
25# CONFIG_V850E2_SIM85E2C is not set
26# CONFIG_V850E2_SIM85E2S is not set
27# CONFIG_V850E2_FPGA85E2C is not set
28# CONFIG_V850E2_ANNA is not set
29CONFIG_V850E=y
30# CONFIG_PCI is not set
31# CONFIG_V850E_INTC is not set
32# CONFIG_V850E_TIMER_D is not set
33# CONFIG_V850E_CACHE is not set
34# CONFIG_V850E2_CACHE is not set
35CONFIG_NO_CACHE=y
36CONFIG_ZERO_BSS=y
37# CONFIG_RESET_GUARD is not set
38CONFIG_LARGE_ALLOCS=y
39CONFIG_FLATMEM=y
40CONFIG_FLAT_NODE_MEM_MAP=y
41
42#
43# Code maturity level options
44#
45# CONFIG_EXPERIMENTAL is not set
46CONFIG_CLEAN_COMPILE=y
47CONFIG_BROKEN_ON_SMP=y
48CONFIG_INIT_ENV_ARG_LIMIT=32
49
50#
51# General setup
52#
53CONFIG_LOCALVERSION=""
54# CONFIG_BSD_PROCESS_ACCT is not set
55# CONFIG_SYSCTL is not set
56# CONFIG_HOTPLUG is not set
57# CONFIG_IKCONFIG is not set
58CONFIG_EMBEDDED=y
59# CONFIG_KALLSYMS is not set
60CONFIG_PRINTK=y
61CONFIG_BUG=y
62# CONFIG_BASE_FULL is not set
63# CONFIG_FUTEX is not set
64# CONFIG_EPOLL is not set
65CONFIG_CC_OPTIMIZE_FOR_SIZE=y
66CONFIG_CC_ALIGN_FUNCTIONS=0
67CONFIG_CC_ALIGN_LABELS=0
68CONFIG_CC_ALIGN_LOOPS=0
69CONFIG_CC_ALIGN_JUMPS=0
70CONFIG_BASE_SMALL=1
71
72#
73# Loadable module support
74#
75CONFIG_MODULES=y
76CONFIG_MODULE_UNLOAD=y
77CONFIG_OBSOLETE_MODPARM=y
78# CONFIG_MODULE_SRCVERSION_ALL is not set
79CONFIG_KMOD=y
80
81#
82# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
83#
84
85#
86# PCCARD (PCMCIA/CardBus) support
87#
88# CONFIG_PCCARD is not set
89
90#
91# PCI Hotplug Support
92#
93
94#
95# Executable file formats
96#
97CONFIG_BINFMT_FLAT=y
98# CONFIG_BINFMT_ZFLAT is not set
99# CONFIG_BINFMT_SHARED_FLAT is not set
100# CONFIG_BINFMT_MISC is not set
101
102#
103# Networking
104#
105# CONFIG_NET is not set
106
107#
108# Generic Driver Options
109#
110CONFIG_STANDALONE=y
111CONFIG_PREVENT_FIRMWARE_BUILD=y
112# CONFIG_FW_LOADER is not set
113# CONFIG_DEBUG_DRIVER is not set
114
115#
116# Memory Technology Devices (MTD)
117#
118CONFIG_MTD=y
119# CONFIG_MTD_DEBUG is not set
120# CONFIG_MTD_CONCAT is not set
121# CONFIG_MTD_PARTITIONS is not set
122
123#
124# User Modules And Translation Layers
125#
126# CONFIG_MTD_CHAR is not set
127CONFIG_MTD_BLOCK=y
128# CONFIG_FTL is not set
129# CONFIG_NFTL is not set
130# CONFIG_INFTL is not set
131
132#
133# RAM/ROM/Flash chip drivers
134#
135# CONFIG_MTD_CFI is not set
136# CONFIG_MTD_JEDECPROBE is not set
137CONFIG_MTD_MAP_BANK_WIDTH_1=y
138CONFIG_MTD_MAP_BANK_WIDTH_2=y
139CONFIG_MTD_MAP_BANK_WIDTH_4=y
140# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
141# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
142# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
143CONFIG_MTD_CFI_I1=y
144CONFIG_MTD_CFI_I2=y
145# CONFIG_MTD_CFI_I4 is not set
146# CONFIG_MTD_CFI_I8 is not set
147# CONFIG_MTD_RAM is not set
148# CONFIG_MTD_ROM is not set
149# CONFIG_MTD_ABSENT is not set
150
151#
152# Mapping drivers for chip access
153#
154# CONFIG_MTD_COMPLEX_MAPPINGS is not set
155# CONFIG_MTD_PLATRAM is not set
156
157#
158# Self-contained MTD device drivers
159#
160CONFIG_MTD_SLRAM=y
161# CONFIG_MTD_PHRAM is not set
162# CONFIG_MTD_MTDRAM is not set
163# CONFIG_MTD_BLKMTD is not set
164
165#
166# Disk-On-Chip Device Drivers
167#
168# CONFIG_MTD_DOC2000 is not set
169# CONFIG_MTD_DOC2001 is not set
170# CONFIG_MTD_DOC2001PLUS is not set
171
172#
173# NAND Flash Device Drivers
174#
175# CONFIG_MTD_NAND is not set
176
177#
178# Parallel port support
179#
180# CONFIG_PARPORT is not set
181
182#
183# Block devices
184#
185# CONFIG_BLK_DEV_FD is not set
186# CONFIG_BLK_DEV_COW_COMMON is not set
187# CONFIG_BLK_DEV_LOOP is not set
188# CONFIG_BLK_DEV_RAM is not set
189CONFIG_BLK_DEV_RAM_COUNT=16
190CONFIG_INITRAMFS_SOURCE=""
191# CONFIG_CDROM_PKTCDVD is not set
192
193#
194# IO Schedulers
195#
196CONFIG_IOSCHED_NOOP=y
197# CONFIG_IOSCHED_AS is not set
198# CONFIG_IOSCHED_DEADLINE is not set
199# CONFIG_IOSCHED_CFQ is not set
200
201#
202# Disk device support
203#
204
205#
206# ATA/ATAPI/MFM/RLL support
207#
208# CONFIG_IDE is not set
209
210#
211# SCSI device support
212#
213# CONFIG_SCSI is not set
214
215#
216# Multi-device support (RAID and LVM)
217#
218# CONFIG_MD is not set
219
220#
221# Fusion MPT device support
222#
223# CONFIG_FUSION is not set
224
225#
226# IEEE 1394 (FireWire) support
227#
228
229#
230# I2O device support
231#
232
233#
234# Network device support
235#
236# CONFIG_NETPOLL is not set
237# CONFIG_NET_POLL_CONTROLLER is not set
238
239#
240# ISDN subsystem
241#
242
243#
244# Input device support
245#
246CONFIG_INPUT=y
247
248#
249# Userland interfaces
250#
251# CONFIG_INPUT_MOUSEDEV is not set
252# CONFIG_INPUT_JOYDEV is not set
253# CONFIG_INPUT_TSDEV is not set
254# CONFIG_INPUT_EVDEV is not set
255# CONFIG_INPUT_EVBUG is not set
256
257#
258# Input Device Drivers
259#
260# CONFIG_INPUT_KEYBOARD is not set
261# CONFIG_INPUT_MOUSE is not set
262# CONFIG_INPUT_JOYSTICK is not set
263# CONFIG_INPUT_TOUCHSCREEN is not set
264# CONFIG_INPUT_MISC is not set
265
266#
267# Hardware I/O ports
268#
269CONFIG_SERIO=y
270# CONFIG_SERIO_I8042 is not set
271# CONFIG_SERIO_SERPORT is not set
272# CONFIG_SERIO_LIBPS2 is not set
273# CONFIG_SERIO_RAW is not set
274# CONFIG_GAMEPORT is not set
275
276#
277# Character devices
278#
279# CONFIG_VT is not set
280# CONFIG_SERIAL_NONSTANDARD is not set
281
282#
283# Serial drivers
284#
285# CONFIG_SERIAL_8250 is not set
286
287#
288# Non-8250 serial port support
289#
290# CONFIG_UNIX98_PTYS is not set
291# CONFIG_LEGACY_PTYS is not set
292
293#
294# IPMI
295#
296# CONFIG_IPMI_HANDLER is not set
297
298#
299# Watchdog Cards
300#
301# CONFIG_WATCHDOG is not set
302# CONFIG_RTC is not set
303# CONFIG_GEN_RTC is not set
304# CONFIG_DTLK is not set
305# CONFIG_R3964 is not set
306
307#
308# Ftape, the floppy tape device driver
309#
310# CONFIG_RAW_DRIVER is not set
311
312#
313# TPM devices
314#
315
316#
317# Multimedia devices
318#
319# CONFIG_VIDEO_DEV is not set
320
321#
322# Digital Video Broadcasting Devices
323#
324
325#
326# File systems
327#
328# CONFIG_EXT2_FS is not set
329# CONFIG_EXT3_FS is not set
330# CONFIG_JBD is not set
331# CONFIG_REISERFS_FS is not set
332# CONFIG_JFS_FS is not set
333# CONFIG_FS_POSIX_ACL is not set
334
335#
336# XFS support
337#
338# CONFIG_XFS_FS is not set
339# CONFIG_MINIX_FS is not set
340CONFIG_ROMFS_FS=y
341# CONFIG_MAGIC_ROM_PTR is not set
342CONFIG_INOTIFY=y
343# CONFIG_QUOTA is not set
344CONFIG_DNOTIFY=y
345# CONFIG_AUTOFS_FS is not set
346# CONFIG_AUTOFS4_FS is not set
347
348#
349# CD-ROM/DVD Filesystems
350#
351# CONFIG_ISO9660_FS is not set
352# CONFIG_UDF_FS is not set
353
354#
355# DOS/FAT/NT Filesystems
356#
357# CONFIG_MSDOS_FS is not set
358# CONFIG_VFAT_FS is not set
359# CONFIG_NTFS_FS is not set
360
361#
362# Pseudo filesystems
363#
364CONFIG_PROC_FS=y
365CONFIG_SYSFS=y
366# CONFIG_TMPFS is not set
367# CONFIG_HUGETLB_PAGE is not set
368CONFIG_RAMFS=y
369
370#
371# Miscellaneous filesystems
372#
373# CONFIG_HFSPLUS_FS is not set
374# CONFIG_JFFS_FS is not set
375# CONFIG_JFFS2_FS is not set
376# CONFIG_CRAMFS is not set
377# CONFIG_VXFS_FS is not set
378# CONFIG_HPFS_FS is not set
379# CONFIG_QNX4FS_FS is not set
380# CONFIG_SYSV_FS is not set
381# CONFIG_UFS_FS is not set
382
383#
384# Partition Types
385#
386# CONFIG_PARTITION_ADVANCED is not set
387CONFIG_MSDOS_PARTITION=y
388
389#
390# Native Language Support
391#
392# CONFIG_NLS is not set
393
394#
395# Graphics support
396#
397# CONFIG_FB is not set
398
399#
400# Sound
401#
402# CONFIG_SOUND is not set
403
404#
405# USB support
406#
407# CONFIG_USB_ARCH_HAS_HCD is not set
408# CONFIG_USB_ARCH_HAS_OHCI is not set
409
410#
411# USB Gadget Support
412#
413# CONFIG_USB_GADGET is not set
414
415#
416# Kernel hacking
417#
418# CONFIG_PRINTK_TIME is not set
419CONFIG_DEBUG_KERNEL=y
420# CONFIG_MAGIC_SYSRQ is not set
421CONFIG_LOG_BUF_SHIFT=14
422# CONFIG_SCHEDSTATS is not set
423# CONFIG_DEBUG_SLAB is not set
424# CONFIG_DEBUG_SPINLOCK is not set
425# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
426# CONFIG_DEBUG_KOBJECT is not set
427CONFIG_DEBUG_INFO=y
428# CONFIG_DEBUG_FS is not set
429# CONFIG_NO_KERNEL_MSG is not set
430
431#
432# Security options
433#
434# CONFIG_KEYS is not set
435# CONFIG_SECURITY is not set
436
437#
438# Cryptographic options
439#
440# CONFIG_CRYPTO is not set
441
442#
443# Hardware crypto devices
444#
445
446#
447# Library routines
448#
449# CONFIG_CRC_CCITT is not set
450# CONFIG_CRC32 is not set
451# CONFIG_LIBCRC32C is not set
diff --git a/arch/v850/kernel/Makefile b/arch/v850/kernel/Makefile
deleted file mode 100644
index da5889c53576..000000000000
--- a/arch/v850/kernel/Makefile
+++ /dev/null
@@ -1,40 +0,0 @@
1#
2# arch/v850/kernel/Makefile
3#
4# Copyright (C) 2001,02,03 NEC Electronics Corporation
5# Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6#
7# This file is subject to the terms and conditions of the GNU General Public
8# License. See the file "COPYING" in the main directory of this archive
9# for more details.
10#
11
12extra-y := head.o init_task.o vmlinux.lds
13
14obj-y += intv.o entry.o process.o syscalls.o time.o setup.o \
15 signal.o irq.o mach.o ptrace.o bug.o
16obj-$(CONFIG_MODULES) += module.o v850_ksyms.o
17# chip-specific code
18obj-$(CONFIG_V850E_MA1) += ma.o
19obj-$(CONFIG_V850E_ME2) += me2.o
20obj-$(CONFIG_V850E_TEG) += teg.o
21obj-$(CONFIG_V850E_AS85EP1) += as85ep1.o
22obj-$(CONFIG_V850E2_ANNA) += anna.o
23# platform-specific code
24obj-$(CONFIG_V850E_SIM) += sim.o simcons.o
25obj-$(CONFIG_V850E2_SIM85E2) += sim85e2.o memcons.o
26obj-$(CONFIG_V850E2_FPGA85E2C) += fpga85e2c.o memcons.o
27obj-$(CONFIG_RTE_CB) += rte_cb.o rte_cb_leds.o
28obj-$(CONFIG_RTE_CB_MA1) += rte_ma1_cb.o
29obj-$(CONFIG_RTE_CB_ME2) += rte_me2_cb.o
30obj-$(CONFIG_RTE_CB_NB85E) += rte_nb85e_cb.o
31obj-$(CONFIG_RTE_CB_MULTI) += rte_cb_multi.o
32obj-$(CONFIG_RTE_MB_A_PCI) += rte_mb_a_pci.o
33obj-$(CONFIG_RTE_GBUS_INT) += gbus_int.o
34# feature-specific code
35obj-$(CONFIG_V850E_INTC) += v850e_intc.o
36obj-$(CONFIG_V850E_TIMER_D) += v850e_timer_d.o v850e_utils.o
37obj-$(CONFIG_V850E_CACHE) += v850e_cache.o
38obj-$(CONFIG_V850E2_CACHE) += v850e2_cache.o
39obj-$(CONFIG_V850E_HIGHRES_TIMER) += highres_timer.o
40obj-$(CONFIG_PROC_FS) += procfs.o
diff --git a/arch/v850/kernel/anna-rom.ld b/arch/v850/kernel/anna-rom.ld
deleted file mode 100644
index 7c54e7e3f1b1..000000000000
--- a/arch/v850/kernel/anna-rom.ld
+++ /dev/null
@@ -1,16 +0,0 @@
1/* Linker script for the Midas labs Anna V850E2 evaluation board
2 (CONFIG_V850E2_ANNA), with kernel in ROM (CONFIG_ROM_KERNEL). */
3
4MEMORY {
5 /* 8MB of flash ROM. */
6 ROM : ORIGIN = 0, LENGTH = 0x00800000
7
8 /* 1MB of static RAM. This memory is mirrored 64 times. */
9 SRAM : ORIGIN = SRAM_ADDR, LENGTH = SRAM_SIZE
10 /* 64MB of DRAM. */
11 SDRAM : ORIGIN = SDRAM_ADDR, LENGTH = SDRAM_SIZE
12}
13
14SECTIONS {
15 ROMK_SECTIONS(ROM, SRAM)
16}
diff --git a/arch/v850/kernel/anna.c b/arch/v850/kernel/anna.c
deleted file mode 100644
index 5978a25170fb..000000000000
--- a/arch/v850/kernel/anna.c
+++ /dev/null
@@ -1,202 +0,0 @@
1/*
2 * arch/v850/kernel/anna.c -- Anna V850E2 evaluation chip/board
3 *
4 * Copyright (C) 2002,03 NEC Electronics Corporation
5 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/bootmem.h>
18#include <linux/major.h>
19#include <linux/irq.h>
20
21#include <asm/machdep.h>
22#include <asm/atomic.h>
23#include <asm/page.h>
24#include <asm/v850e_timer_d.h>
25#include <asm/v850e_uart.h>
26
27#include "mach.h"
28
29
30/* SRAM and SDRAM are vaguely contiguous (with a big hole in between; see
31 mach_reserve_bootmem for details); use both as one big area. */
32#define RAM_START SRAM_ADDR
33#define RAM_END (SDRAM_ADDR + SDRAM_SIZE)
34
35/* The bits of this port are connected to an 8-LED bar-graph. */
36#define LEDS_PORT 0
37
38
39static void anna_led_tick (void);
40
41
42void __init mach_early_init (void)
43{
44 ANNA_ILBEN = 0;
45
46 V850E2_CSC(0) = 0x402F;
47 V850E2_CSC(1) = 0x4000;
48 V850E2_BPC = 0;
49 V850E2_BSC = 0xAAAA;
50 V850E2_BEC = 0;
51
52#if 0
53 V850E2_BHC = 0xFFFF; /* icache all memory, dcache all */
54#else
55 V850E2_BHC = 0; /* cache no memory */
56#endif
57 V850E2_BCT(0) = 0xB088;
58 V850E2_BCT(1) = 0x0008;
59 V850E2_DWC(0) = 0x0027;
60 V850E2_DWC(1) = 0;
61 V850E2_BCC = 0x0006;
62 V850E2_ASC = 0;
63 V850E2_LBS = 0x0089;
64 V850E2_SCR(3) = 0x21A9;
65 V850E2_RFS(3) = 0x8121;
66
67 v850e_intc_disable_irqs ();
68}
69
70void __init mach_setup (char **cmdline)
71{
72 ANNA_PORT_PM (LEDS_PORT) = 0; /* Make all LED pins output pins. */
73 mach_tick = anna_led_tick;
74}
75
76void __init mach_get_physical_ram (unsigned long *ram_start,
77 unsigned long *ram_len)
78{
79 *ram_start = RAM_START;
80 *ram_len = RAM_END - RAM_START;
81}
82
83void __init mach_reserve_bootmem ()
84{
85 /* The space between SRAM and SDRAM is filled with duplicate
86 images of SRAM. Prevent the kernel from using them. */
87 reserve_bootmem (SRAM_ADDR + SRAM_SIZE,
88 SDRAM_ADDR - (SRAM_ADDR + SRAM_SIZE),
89 BOOTMEM_DEFAULT);
90}
91
92void mach_gettimeofday (struct timespec *tv)
93{
94 tv->tv_sec = 0;
95 tv->tv_nsec = 0;
96}
97
98void __init mach_sched_init (struct irqaction *timer_action)
99{
100 /* Start hardware timer. */
101 v850e_timer_d_configure (0, HZ);
102 /* Install timer interrupt handler. */
103 setup_irq (IRQ_INTCMD(0), timer_action);
104}
105
106static struct v850e_intc_irq_init irq_inits[] = {
107 { "IRQ", 0, NUM_MACH_IRQS, 1, 7 },
108 { "PIN", IRQ_INTP(0), IRQ_INTP_NUM, 1, 4 },
109 { "CCC", IRQ_INTCCC(0), IRQ_INTCCC_NUM, 1, 5 },
110 { "CMD", IRQ_INTCMD(0), IRQ_INTCMD_NUM, 1, 5 },
111 { "DMA", IRQ_INTDMA(0), IRQ_INTDMA_NUM, 1, 2 },
112 { "DMXER", IRQ_INTDMXER,1, 1, 2 },
113 { "SRE", IRQ_INTSRE(0), IRQ_INTSRE_NUM, 3, 3 },
114 { "SR", IRQ_INTSR(0), IRQ_INTSR_NUM, 3, 4 },
115 { "ST", IRQ_INTST(0), IRQ_INTST_NUM, 3, 5 },
116 { 0 }
117};
118#define NUM_IRQ_INITS (ARRAY_SIZE(irq_inits) - 1)
119
120static struct hw_interrupt_type hw_itypes[NUM_IRQ_INITS];
121
122void __init mach_init_irqs (void)
123{
124 v850e_intc_init_irq_types (irq_inits, hw_itypes);
125}
126
127void machine_restart (char *__unused)
128{
129#ifdef CONFIG_RESET_GUARD
130 disable_reset_guard ();
131#endif
132 asm ("jmp r0"); /* Jump to the reset vector. */
133}
134
135void machine_halt (void)
136{
137#ifdef CONFIG_RESET_GUARD
138 disable_reset_guard ();
139#endif
140 local_irq_disable (); /* Ignore all interrupts. */
141 ANNA_PORT_IO(LEDS_PORT) = 0xAA; /* Note that we halted. */
142 for (;;)
143 asm ("halt; nop; nop; nop; nop; nop");
144}
145
146void machine_power_off (void)
147{
148 machine_halt ();
149}
150
151/* Called before configuring an on-chip UART. */
152void anna_uart_pre_configure (unsigned chan, unsigned cflags, unsigned baud)
153{
154 /* The Anna connects some general-purpose I/O pins on the CPU to
155 the RTS/CTS lines of UART 1's serial connection. I/O pins P07
156 and P37 are RTS and CTS respectively. */
157 if (chan == 1) {
158 ANNA_PORT_PM(0) &= ~0x80; /* P07 in output mode */
159 ANNA_PORT_PM(3) |= 0x80; /* P37 in input mode */
160 }
161}
162
163/* Minimum and maximum bounds for the moving upper LED boundary in the
164 clock tick display. We can't use the last bit because it's used for
165 UART0's CTS output. */
166#define MIN_MAX_POS 0
167#define MAX_MAX_POS 6
168
169/* There are MAX_MAX_POS^2 - MIN_MAX_POS^2 cycles in the animation, so if
170 we pick 6 and 0 as above, we get 49 cycles, which is when divided into
171 the standard 100 value for HZ, gives us an almost 1s total time. */
172#define TICKS_PER_FRAME \
173 (HZ / (MAX_MAX_POS * MAX_MAX_POS - MIN_MAX_POS * MIN_MAX_POS))
174
175static void anna_led_tick ()
176{
177 static unsigned counter = 0;
178
179 if (++counter == TICKS_PER_FRAME) {
180 static int pos = 0, max_pos = MAX_MAX_POS, dir = 1;
181
182 if (dir > 0 && pos == max_pos) {
183 dir = -1;
184 if (max_pos == MIN_MAX_POS)
185 max_pos = MAX_MAX_POS;
186 else
187 max_pos--;
188 } else {
189 if (dir < 0 && pos == 0)
190 dir = 1;
191
192 if (pos + dir <= max_pos) {
193 /* Each bit of port 0 has a LED. */
194 clear_bit (pos, &ANNA_PORT_IO(LEDS_PORT));
195 pos += dir;
196 set_bit (pos, &ANNA_PORT_IO(LEDS_PORT));
197 }
198 }
199
200 counter = 0;
201 }
202}
diff --git a/arch/v850/kernel/anna.ld b/arch/v850/kernel/anna.ld
deleted file mode 100644
index df7f80f2833d..000000000000
--- a/arch/v850/kernel/anna.ld
+++ /dev/null
@@ -1,20 +0,0 @@
1/* Linker script for the Midas labs Anna V850E2 evaluation board
2 (CONFIG_V850E2_ANNA). */
3
4MEMORY {
5 /* 256KB of internal memory (followed by one mirror). */
6 iMEM0 : ORIGIN = 0, LENGTH = 0x00040000
7 /* 256KB of internal memory (followed by one mirror). */
8 iMEM1 : ORIGIN = 0x00040000, LENGTH = 0x00040000
9
10 /* 1MB of static RAM. This memory is mirrored 64 times. */
11 SRAM : ORIGIN = SRAM_ADDR, LENGTH = SRAM_SIZE
12 /* 64MB of DRAM. */
13 SDRAM : ORIGIN = SDRAM_ADDR, LENGTH = SDRAM_SIZE
14}
15
16SECTIONS {
17 .intv : { INTV_CONTENTS } > iMEM0
18 .sram : { RAMK_KRAM_CONTENTS } > SRAM
19 .root : { ROOT_FS_CONTENTS } > SDRAM
20}
diff --git a/arch/v850/kernel/as85ep1-rom.ld b/arch/v850/kernel/as85ep1-rom.ld
deleted file mode 100644
index fe2a9a3ab525..000000000000
--- a/arch/v850/kernel/as85ep1-rom.ld
+++ /dev/null
@@ -1,21 +0,0 @@
1/* Linker script for the NEC AS85EP1 V850E evaluation board
2 (CONFIG_V850E_AS85EP1), with kernel in ROM (CONFIG_ROM_KERNEL). */
3
4MEMORY {
5 /* 4MB of flash ROM. */
6 ROM : ORIGIN = 0, LENGTH = 0x00400000
7
8 /* 1MB of static RAM. */
9 SRAM : ORIGIN = SRAM_ADDR, LENGTH = SRAM_SIZE
10
11 /* About 58MB of DRAM. This can actually be at one of two
12 positions, determined by jumper JP3; we have to use the first
13 position because the second is partially out of processor
14 instruction addressing range (though in the second position
15 there's actually 64MB available). */
16 SDRAM : ORIGIN = SDRAM_ADDR, LENGTH = SDRAM_SIZE
17}
18
19SECTIONS {
20 ROMK_SECTIONS(ROM, SRAM)
21}
diff --git a/arch/v850/kernel/as85ep1.c b/arch/v850/kernel/as85ep1.c
deleted file mode 100644
index b525ecf3aea4..000000000000
--- a/arch/v850/kernel/as85ep1.c
+++ /dev/null
@@ -1,234 +0,0 @@
1/*
2 * arch/v850/kernel/as85ep1.c -- AS85EP1 V850E evaluation chip/board
3 *
4 * Copyright (C) 2002,03 NEC Electronics Corporation
5 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/bootmem.h>
18#include <linux/major.h>
19#include <linux/irq.h>
20
21#include <asm/machdep.h>
22#include <asm/atomic.h>
23#include <asm/page.h>
24#include <asm/v850e_timer_d.h>
25#include <asm/v850e_uart.h>
26
27#include "mach.h"
28
29
30/* SRAM and SDRAM are vaguely contiguous (with a big hole in between; see
31 mach_reserve_bootmem for details); use both as one big area. */
32#define RAM_START SRAM_ADDR
33#define RAM_END (SDRAM_ADDR + SDRAM_SIZE)
34
35/* The bits of this port are connected to an 8-LED bar-graph. */
36#define LEDS_PORT 4
37
38
39static void as85ep1_led_tick (void);
40
41extern char _intv_copy_src_start, _intv_copy_src_end;
42extern char _intv_copy_dst_start;
43
44
45void __init mach_early_init (void)
46{
47#ifndef CONFIG_ROM_KERNEL
48 const u32 *src;
49 register u32 *dst asm ("ep");
50#endif
51
52 AS85EP1_CSC(0) = 0x0403;
53 AS85EP1_BCT(0) = 0xB8B8;
54 AS85EP1_DWC(0) = 0x0104;
55 AS85EP1_BCC = 0x0012;
56 AS85EP1_ASC = 0;
57 AS85EP1_LBS = 0x00A9;
58
59 AS85EP1_PORT_PMC(6) = 0xFF; /* valid A0,A1,A20-A25 */
60 AS85EP1_PORT_PMC(7) = 0x0E; /* valid CS1-CS3 */
61 AS85EP1_PORT_PMC(9) = 0xFF; /* valid D16-D23 */
62 AS85EP1_PORT_PMC(10) = 0xFF; /* valid D24-D31 */
63
64 AS85EP1_RFS(1) = 0x800c;
65 AS85EP1_RFS(3) = 0x800c;
66 AS85EP1_SCR(1) = 0x20A9;
67 AS85EP1_SCR(3) = 0x20A9;
68
69#ifndef CONFIG_ROM_KERNEL
70 /* The early chip we have is buggy, and writing the interrupt
71 vectors into low RAM may screw up, so for non-ROM kernels, we
72 only rely on the reset vector being downloaded, and copy the
73 rest of the interrupt vectors into place here. The specific bug
74 is that writing address N, where (N & 0x10) == 0x10, will _also_
75 write to address (N - 0x10). We avoid this (effectively) by
76 writing in 16-byte chunks backwards from the end. */
77
78 AS85EP1_IRAMM = 0x3; /* "write-mode" for the internal instruction memory */
79
80 src = (u32 *)(((u32)&_intv_copy_src_end - 1) & ~0xF);
81 dst = (u32 *)&_intv_copy_dst_start
82 + (src - (u32 *)&_intv_copy_src_start);
83 do {
84 u32 t0 = src[0], t1 = src[1], t2 = src[2], t3 = src[3];
85 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
86 dst -= 4;
87 src -= 4;
88 } while (src > (u32 *)&_intv_copy_src_start);
89
90 AS85EP1_IRAMM = 0x0; /* "read-mode" for the internal instruction memory */
91#endif /* !CONFIG_ROM_KERNEL */
92
93 v850e_intc_disable_irqs ();
94}
95
96void __init mach_setup (char **cmdline)
97{
98 AS85EP1_PORT_PMC (LEDS_PORT) = 0; /* Make the LEDs port an I/O port. */
99 AS85EP1_PORT_PM (LEDS_PORT) = 0; /* Make all the bits output pins. */
100 mach_tick = as85ep1_led_tick;
101}
102
103void __init mach_get_physical_ram (unsigned long *ram_start,
104 unsigned long *ram_len)
105{
106 *ram_start = RAM_START;
107 *ram_len = RAM_END - RAM_START;
108}
109
110/* Convenience macros. */
111#define SRAM_END (SRAM_ADDR + SRAM_SIZE)
112#define SDRAM_END (SDRAM_ADDR + SDRAM_SIZE)
113
114void __init mach_reserve_bootmem ()
115{
116 if (SDRAM_ADDR < RAM_END && SDRAM_ADDR > RAM_START)
117 /* We can't use the space between SRAM and SDRAM, so
118 prevent the kernel from trying. */
119 reserve_bootmem(SRAM_END, SDRAM_ADDR - SRAM_END,
120 BOOTMEM_DEFAULT);
121}
122
123void mach_gettimeofday (struct timespec *tv)
124{
125 tv->tv_sec = 0;
126 tv->tv_nsec = 0;
127}
128
129void __init mach_sched_init (struct irqaction *timer_action)
130{
131 /* Start hardware timer. */
132 v850e_timer_d_configure (0, HZ);
133 /* Install timer interrupt handler. */
134 setup_irq (IRQ_INTCMD(0), timer_action);
135}
136
137static struct v850e_intc_irq_init irq_inits[] = {
138 { "IRQ", 0, NUM_MACH_IRQS, 1, 7 },
139 { "CCC", IRQ_INTCCC(0), IRQ_INTCCC_NUM, 1, 5 },
140 { "CMD", IRQ_INTCMD(0), IRQ_INTCMD_NUM, 1, 5 },
141 { "SRE", IRQ_INTSRE(0), IRQ_INTSRE_NUM, 3, 3 },
142 { "SR", IRQ_INTSR(0), IRQ_INTSR_NUM, 3, 4 },
143 { "ST", IRQ_INTST(0), IRQ_INTST_NUM, 3, 5 },
144 { 0 }
145};
146#define NUM_IRQ_INITS (ARRAY_SIZE(irq_inits) - 1)
147
148static struct hw_interrupt_type hw_itypes[NUM_IRQ_INITS];
149
150void __init mach_init_irqs (void)
151{
152 v850e_intc_init_irq_types (irq_inits, hw_itypes);
153}
154
155void machine_restart (char *__unused)
156{
157#ifdef CONFIG_RESET_GUARD
158 disable_reset_guard ();
159#endif
160 asm ("jmp r0"); /* Jump to the reset vector. */
161}
162
163void machine_halt (void)
164{
165#ifdef CONFIG_RESET_GUARD
166 disable_reset_guard ();
167#endif
168 local_irq_disable (); /* Ignore all interrupts. */
169 AS85EP1_PORT_IO (LEDS_PORT) = 0xAA; /* Note that we halted. */
170 for (;;)
171 asm ("halt; nop; nop; nop; nop; nop");
172}
173
174void machine_power_off (void)
175{
176 machine_halt ();
177}
178
179/* Called before configuring an on-chip UART. */
180void as85ep1_uart_pre_configure (unsigned chan, unsigned cflags, unsigned baud)
181{
182 /* Make the shared uart/port pins be uart pins. */
183 AS85EP1_PORT_PMC(3) |= (0x5 << chan);
184
185 /* The AS85EP1 connects some general-purpose I/O pins on the CPU to
186 the RTS/CTS lines of UART 1's serial connection. I/O pins P53
187 and P54 are RTS and CTS respectively. */
188 if (chan == 1) {
189 /* Put P53 & P54 in I/O port mode. */
190 AS85EP1_PORT_PMC(5) &= ~0x18;
191 /* Make P53 an output, and P54 an input. */
192 AS85EP1_PORT_PM(5) |= 0x10;
193 }
194}
195
196/* Minimum and maximum bounds for the moving upper LED boundary in the
197 clock tick display. */
198#define MIN_MAX_POS 0
199#define MAX_MAX_POS 7
200
201/* There are MAX_MAX_POS^2 - MIN_MAX_POS^2 cycles in the animation, so if
202 we pick 6 and 0 as above, we get 49 cycles, which is when divided into
203 the standard 100 value for HZ, gives us an almost 1s total time. */
204#define TICKS_PER_FRAME \
205 (HZ / (MAX_MAX_POS * MAX_MAX_POS - MIN_MAX_POS * MIN_MAX_POS))
206
207static void as85ep1_led_tick ()
208{
209 static unsigned counter = 0;
210
211 if (++counter == TICKS_PER_FRAME) {
212 static int pos = 0, max_pos = MAX_MAX_POS, dir = 1;
213
214 if (dir > 0 && pos == max_pos) {
215 dir = -1;
216 if (max_pos == MIN_MAX_POS)
217 max_pos = MAX_MAX_POS;
218 else
219 max_pos--;
220 } else {
221 if (dir < 0 && pos == 0)
222 dir = 1;
223
224 if (pos + dir <= max_pos) {
225 /* Each bit of port 0 has a LED. */
226 set_bit (pos, &AS85EP1_PORT_IO(LEDS_PORT));
227 pos += dir;
228 clear_bit (pos, &AS85EP1_PORT_IO(LEDS_PORT));
229 }
230 }
231
232 counter = 0;
233 }
234}
diff --git a/arch/v850/kernel/as85ep1.ld b/arch/v850/kernel/as85ep1.ld
deleted file mode 100644
index ef2c4399063e..000000000000
--- a/arch/v850/kernel/as85ep1.ld
+++ /dev/null
@@ -1,49 +0,0 @@
1/* Linker script for the NEC AS85EP1 V850E evaluation board
2 (CONFIG_V850E_AS85EP1). */
3
4MEMORY {
5 /* 1MB of internal instruction memory. */
6 iMEM0 : ORIGIN = 0, LENGTH = 0x00100000
7
8 /* 1MB of static RAM. */
9 SRAM : ORIGIN = SRAM_ADDR, LENGTH = SRAM_SIZE
10
11 /* About 58MB of DRAM. This can actually be at one of two
12 positions, determined by jump JP3; we have to use the first
13 position because the second is partially out of processor
14 instruction addressing range (though in the second position
15 there's actually 64MB available). */
16 SDRAM : ORIGIN = SDRAM_ADDR, LENGTH = SDRAM_SIZE
17}
18
19SECTIONS {
20 .resetv : {
21 __intv_start = . ;
22 *(.intv.reset) /* Reset vector */
23 } > iMEM0
24
25 .sram : {
26 RAMK_KRAM_CONTENTS
27
28 /* We stick most of the interrupt vectors here; they'll be
29 copied into the proper location by the early init code (we
30 can't put them directly in the right place because of
31 hardware bugs). The vectors shouldn't need to be
32 relocated, so we don't have to use `> ... AT> ...' to
33 split the load/vm addresses (and we can't because of
34 problems with the loader). */
35 . = ALIGN (0x10) ;
36 __intv_copy_src_start = . ;
37 *(.intv.common) /* Vectors common to all v850e proc. */
38 *(.intv.mach) /* Machine-specific int. vectors. */
39 . = ALIGN (0x10) ;
40 __intv_copy_src_end = . ;
41 } > SRAM
42
43 /* Where we end up putting the vectors. */
44 __intv_copy_dst_start = 0x10 ;
45 __intv_copy_dst_end = __intv_copy_dst_start + (__intv_copy_src_end - __intv_copy_src_start) ;
46 __intv_end = __intv_copy_dst_end ;
47
48 .root : { ROOT_FS_CONTENTS } > SDRAM
49}
diff --git a/arch/v850/kernel/asm-offsets.c b/arch/v850/kernel/asm-offsets.c
deleted file mode 100644
index 581e6986a776..000000000000
--- a/arch/v850/kernel/asm-offsets.c
+++ /dev/null
@@ -1,58 +0,0 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 */
10
11#include <linux/stddef.h>
12#include <linux/sched.h>
13#include <linux/kernel_stat.h>
14#include <linux/ptrace.h>
15#include <linux/hardirq.h>
16#include <linux/kbuild.h>
17
18#include <asm/irq.h>
19#include <asm/errno.h>
20
21int main (void)
22{
23 /* offsets into the task struct */
24 DEFINE (TASK_STATE, offsetof (struct task_struct, state));
25 DEFINE (TASK_FLAGS, offsetof (struct task_struct, flags));
26 DEFINE (TASK_PTRACE, offsetof (struct task_struct, ptrace));
27 DEFINE (TASK_BLOCKED, offsetof (struct task_struct, blocked));
28 DEFINE (TASK_THREAD, offsetof (struct task_struct, thread));
29 DEFINE (TASK_THREAD_INFO, offsetof (struct task_struct, stack));
30 DEFINE (TASK_MM, offsetof (struct task_struct, mm));
31 DEFINE (TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
32 DEFINE (TASK_PID, offsetof (struct task_struct, pid));
33
34 /* offsets into the kernel_stat struct */
35 DEFINE (STAT_IRQ, offsetof (struct kernel_stat, irqs));
36
37
38 /* signal defines */
39 DEFINE (SIGSEGV, SIGSEGV);
40 DEFINE (SEGV_MAPERR, SEGV_MAPERR);
41 DEFINE (SIGTRAP, SIGTRAP);
42 DEFINE (SIGCHLD, SIGCHLD);
43 DEFINE (SIGILL, SIGILL);
44 DEFINE (TRAP_TRACE, TRAP_TRACE);
45
46 /* ptrace flag bits */
47 DEFINE (PT_PTRACED, PT_PTRACED);
48 DEFINE (PT_DTRACE, PT_DTRACE);
49
50 /* error values */
51 DEFINE (ENOSYS, ENOSYS);
52
53 /* clone flag bits */
54 DEFINE (CLONE_VFORK, CLONE_VFORK);
55 DEFINE (CLONE_VM, CLONE_VM);
56
57 return 0;
58}
diff --git a/arch/v850/kernel/bug.c b/arch/v850/kernel/bug.c
deleted file mode 100644
index c78cf750915a..000000000000
--- a/arch/v850/kernel/bug.c
+++ /dev/null
@@ -1,142 +0,0 @@
1/*
2 * arch/v850/kernel/bug.c -- Bug reporting functions
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/kernel.h>
15#include <linux/reboot.h>
16#include <linux/sched.h>
17#include <linux/module.h>
18
19#include <asm/errno.h>
20#include <asm/ptrace.h>
21#include <asm/processor.h>
22#include <asm/current.h>
23
24/* We should use __builtin_return_address, but it doesn't work in gcc-2.90
25 (which is currently our standard compiler on the v850). */
26#define ret_addr() ({ register u32 lp asm ("lp"); lp; })
27#define stack_addr() ({ register u32 sp asm ("sp"); sp; })
28
29void __bug ()
30{
31 printk (KERN_CRIT "kernel BUG at PC 0x%x (SP ~0x%x)!\n",
32 ret_addr() - 4, /* - 4 for `jarl' */
33 stack_addr());
34 machine_halt ();
35}
36
37int bad_trap (int trap_num, struct pt_regs *regs)
38{
39 printk (KERN_CRIT
40 "unimplemented trap %d called at 0x%08lx, pid %d!\n",
41 trap_num, regs->pc, current->pid);
42 return -ENOSYS;
43}
44
45#ifdef CONFIG_RESET_GUARD
46void unexpected_reset (unsigned long ret_addr, unsigned long kmode,
47 struct task_struct *task, unsigned long sp)
48{
49 printk (KERN_CRIT
50 "unexpected reset in %s mode, pid %d"
51 " (ret_addr = 0x%lx, sp = 0x%lx)\n",
52 kmode ? "kernel" : "user",
53 task ? task->pid : -1,
54 ret_addr, sp);
55
56 machine_halt ();
57}
58#endif /* CONFIG_RESET_GUARD */
59
60
61
62struct spec_reg_name {
63 const char *name;
64 int gpr;
65};
66
67struct spec_reg_name spec_reg_names[] = {
68 { "sp", GPR_SP },
69 { "gp", GPR_GP },
70 { "tp", GPR_TP },
71 { "ep", GPR_EP },
72 { "lp", GPR_LP },
73 { 0, 0 }
74};
75
76void show_regs (struct pt_regs *regs)
77{
78 int gpr_base, gpr_offs;
79
80 printk (" pc 0x%08lx psw 0x%08lx kernel_mode %d\n",
81 regs->pc, regs->psw, regs->kernel_mode);
82 printk (" ctpc 0x%08lx ctpsw 0x%08lx ctbp 0x%08lx\n",
83 regs->ctpc, regs->ctpsw, regs->ctbp);
84
85 for (gpr_base = 0; gpr_base < NUM_GPRS; gpr_base += 4) {
86 for (gpr_offs = 0; gpr_offs < 4; gpr_offs++) {
87 int gpr = gpr_base + gpr_offs;
88 long val = regs->gpr[gpr];
89 struct spec_reg_name *srn;
90
91 for (srn = spec_reg_names; srn->name; srn++)
92 if (srn->gpr == gpr)
93 break;
94
95 if (srn->name)
96 printk ("%7s 0x%08lx", srn->name, val);
97 else
98 printk (" r%02d 0x%08lx", gpr, val);
99 }
100
101 printk ("\n");
102 }
103}
104
105/*
106 * TASK is a pointer to the task whose backtrace we want to see (or NULL
107 * for current task), SP is the stack pointer of the first frame that
108 * should be shown in the back trace (or NULL if the entire call-chain of
109 * the task should be shown).
110 */
111void show_stack (struct task_struct *task, unsigned long *sp)
112{
113 unsigned long addr, end;
114
115 if (sp)
116 addr = (unsigned long)sp;
117 else if (task)
118 addr = task_sp (task);
119 else
120 addr = stack_addr ();
121
122 addr = addr & ~3;
123 end = (addr + THREAD_SIZE - 1) & THREAD_MASK;
124
125 while (addr < end) {
126 printk ("%8lX: ", addr);
127 while (addr < end) {
128 printk (" %8lX", *(unsigned long *)addr);
129 addr += sizeof (unsigned long);
130 if (! (addr & 0xF))
131 break;
132 }
133 printk ("\n");
134 }
135}
136
137void dump_stack ()
138{
139 show_stack (0, 0);
140}
141
142EXPORT_SYMBOL(dump_stack);
diff --git a/arch/v850/kernel/entry.S b/arch/v850/kernel/entry.S
deleted file mode 100644
index e4327a8d6bcd..000000000000
--- a/arch/v850/kernel/entry.S
+++ /dev/null
@@ -1,1121 +0,0 @@
1/*
2 * arch/v850/kernel/entry.S -- Low-level system-call handling, trap handlers,
3 * and context-switching
4 *
5 * Copyright (C) 2001,02,03 NEC Electronics Corporation
6 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 */
14
15#include <linux/sys.h>
16
17#include <asm/entry.h>
18#include <asm/current.h>
19#include <asm/thread_info.h>
20#include <asm/clinkage.h>
21#include <asm/processor.h>
22#include <asm/irq.h>
23#include <asm/errno.h>
24
25#include <asm/asm-offsets.h>
26
27
28/* Make a slightly more convenient alias for C_SYMBOL_NAME. */
29#define CSYM C_SYMBOL_NAME
30
31
32/* The offset of the struct pt_regs in a state-save-frame on the stack. */
33#define PTO STATE_SAVE_PT_OFFSET
34
35
36/* Save argument registers to the state-save-frame pointed to by EP. */
37#define SAVE_ARG_REGS \
38 sst.w r6, PTO+PT_GPR(6)[ep]; \
39 sst.w r7, PTO+PT_GPR(7)[ep]; \
40 sst.w r8, PTO+PT_GPR(8)[ep]; \
41 sst.w r9, PTO+PT_GPR(9)[ep]
42/* Restore argument registers from the state-save-frame pointed to by EP. */
43#define RESTORE_ARG_REGS \
44 sld.w PTO+PT_GPR(6)[ep], r6; \
45 sld.w PTO+PT_GPR(7)[ep], r7; \
46 sld.w PTO+PT_GPR(8)[ep], r8; \
47 sld.w PTO+PT_GPR(9)[ep], r9
48
49/* Save value return registers to the state-save-frame pointed to by EP. */
50#define SAVE_RVAL_REGS \
51 sst.w r10, PTO+PT_GPR(10)[ep]; \
52 sst.w r11, PTO+PT_GPR(11)[ep]
53/* Restore value return registers from the state-save-frame pointed to by EP. */
54#define RESTORE_RVAL_REGS \
55 sld.w PTO+PT_GPR(10)[ep], r10; \
56 sld.w PTO+PT_GPR(11)[ep], r11
57
58
59#define SAVE_CALL_CLOBBERED_REGS_BEFORE_ARGS \
60 sst.w r1, PTO+PT_GPR(1)[ep]; \
61 sst.w r5, PTO+PT_GPR(5)[ep]
62#define SAVE_CALL_CLOBBERED_REGS_AFTER_RVAL \
63 sst.w r12, PTO+PT_GPR(12)[ep]; \
64 sst.w r13, PTO+PT_GPR(13)[ep]; \
65 sst.w r14, PTO+PT_GPR(14)[ep]; \
66 sst.w r15, PTO+PT_GPR(15)[ep]; \
67 sst.w r16, PTO+PT_GPR(16)[ep]; \
68 sst.w r17, PTO+PT_GPR(17)[ep]; \
69 sst.w r18, PTO+PT_GPR(18)[ep]; \
70 sst.w r19, PTO+PT_GPR(19)[ep]
71#define RESTORE_CALL_CLOBBERED_REGS_BEFORE_ARGS \
72 sld.w PTO+PT_GPR(1)[ep], r1; \
73 sld.w PTO+PT_GPR(5)[ep], r5
74#define RESTORE_CALL_CLOBBERED_REGS_AFTER_RVAL \
75 sld.w PTO+PT_GPR(12)[ep], r12; \
76 sld.w PTO+PT_GPR(13)[ep], r13; \
77 sld.w PTO+PT_GPR(14)[ep], r14; \
78 sld.w PTO+PT_GPR(15)[ep], r15; \
79 sld.w PTO+PT_GPR(16)[ep], r16; \
80 sld.w PTO+PT_GPR(17)[ep], r17; \
81 sld.w PTO+PT_GPR(18)[ep], r18; \
82 sld.w PTO+PT_GPR(19)[ep], r19
83
84/* Save `call clobbered' registers to the state-save-frame pointed to by EP. */
85#define SAVE_CALL_CLOBBERED_REGS \
86 SAVE_CALL_CLOBBERED_REGS_BEFORE_ARGS; \
87 SAVE_ARG_REGS; \
88 SAVE_RVAL_REGS; \
89 SAVE_CALL_CLOBBERED_REGS_AFTER_RVAL
90/* Restore `call clobbered' registers from the state-save-frame pointed to
91 by EP. */
92#define RESTORE_CALL_CLOBBERED_REGS \
93 RESTORE_CALL_CLOBBERED_REGS_BEFORE_ARGS; \
94 RESTORE_ARG_REGS; \
95 RESTORE_RVAL_REGS; \
96 RESTORE_CALL_CLOBBERED_REGS_AFTER_RVAL
97
98/* Save `call clobbered' registers except for the return-value registers
99 to the state-save-frame pointed to by EP. */
100#define SAVE_CALL_CLOBBERED_REGS_NO_RVAL \
101 SAVE_CALL_CLOBBERED_REGS_BEFORE_ARGS; \
102 SAVE_ARG_REGS; \
103 SAVE_CALL_CLOBBERED_REGS_AFTER_RVAL
104/* Restore `call clobbered' registers except for the return-value registers
105 from the state-save-frame pointed to by EP. */
106#define RESTORE_CALL_CLOBBERED_REGS_NO_RVAL \
107 RESTORE_CALL_CLOBBERED_REGS_BEFORE_ARGS; \
108 RESTORE_ARG_REGS; \
109 RESTORE_CALL_CLOBBERED_REGS_AFTER_RVAL
110
111/* Save `call saved' registers to the state-save-frame pointed to by EP. */
112#define SAVE_CALL_SAVED_REGS \
113 sst.w r2, PTO+PT_GPR(2)[ep]; \
114 sst.w r20, PTO+PT_GPR(20)[ep]; \
115 sst.w r21, PTO+PT_GPR(21)[ep]; \
116 sst.w r22, PTO+PT_GPR(22)[ep]; \
117 sst.w r23, PTO+PT_GPR(23)[ep]; \
118 sst.w r24, PTO+PT_GPR(24)[ep]; \
119 sst.w r25, PTO+PT_GPR(25)[ep]; \
120 sst.w r26, PTO+PT_GPR(26)[ep]; \
121 sst.w r27, PTO+PT_GPR(27)[ep]; \
122 sst.w r28, PTO+PT_GPR(28)[ep]; \
123 sst.w r29, PTO+PT_GPR(29)[ep]
124/* Restore `call saved' registers from the state-save-frame pointed to by EP. */
125#define RESTORE_CALL_SAVED_REGS \
126 sld.w PTO+PT_GPR(2)[ep], r2; \
127 sld.w PTO+PT_GPR(20)[ep], r20; \
128 sld.w PTO+PT_GPR(21)[ep], r21; \
129 sld.w PTO+PT_GPR(22)[ep], r22; \
130 sld.w PTO+PT_GPR(23)[ep], r23; \
131 sld.w PTO+PT_GPR(24)[ep], r24; \
132 sld.w PTO+PT_GPR(25)[ep], r25; \
133 sld.w PTO+PT_GPR(26)[ep], r26; \
134 sld.w PTO+PT_GPR(27)[ep], r27; \
135 sld.w PTO+PT_GPR(28)[ep], r28; \
136 sld.w PTO+PT_GPR(29)[ep], r29
137
138
139/* Save the PC stored in the special register SAVEREG to the state-save-frame
140 pointed to by EP. r19 is clobbered. */
141#define SAVE_PC(savereg) \
142 stsr SR_ ## savereg, r19; \
143 sst.w r19, PTO+PT_PC[ep]
144/* Restore the PC from the state-save-frame pointed to by EP, to the special
145 register SAVEREG. LP is clobbered (it is used as a scratch register
146 because the POP_STATE macro restores it, and this macro is usually used
147 inside POP_STATE). */
148#define RESTORE_PC(savereg) \
149 sld.w PTO+PT_PC[ep], lp; \
150 ldsr lp, SR_ ## savereg
151/* Save the PSW register stored in the special register SAVREG to the
152 state-save-frame pointed to by EP. r19 is clobbered. */
153#define SAVE_PSW(savereg) \
154 stsr SR_ ## savereg, r19; \
155 sst.w r19, PTO+PT_PSW[ep]
156/* Restore the PSW register from the state-save-frame pointed to by EP, to
157 the special register SAVEREG. LP is clobbered (it is used as a scratch
158 register because the POP_STATE macro restores it, and this macro is
159 usually used inside POP_STATE). */
160#define RESTORE_PSW(savereg) \
161 sld.w PTO+PT_PSW[ep], lp; \
162 ldsr lp, SR_ ## savereg
163
164/* Save CTPC/CTPSW/CTBP registers to the state-save-frame pointed to by REG.
165 r19 is clobbered. */
166#define SAVE_CT_REGS \
167 stsr SR_CTPC, r19; \
168 sst.w r19, PTO+PT_CTPC[ep]; \
169 stsr SR_CTPSW, r19; \
170 sst.w r19, PTO+PT_CTPSW[ep]; \
171 stsr SR_CTBP, r19; \
172 sst.w r19, PTO+PT_CTBP[ep]
173/* Restore CTPC/CTPSW/CTBP registers from the state-save-frame pointed to by EP.
174 LP is clobbered (it is used as a scratch register because the POP_STATE
175 macro restores it, and this macro is usually used inside POP_STATE). */
176#define RESTORE_CT_REGS \
177 sld.w PTO+PT_CTPC[ep], lp; \
178 ldsr lp, SR_CTPC; \
179 sld.w PTO+PT_CTPSW[ep], lp; \
180 ldsr lp, SR_CTPSW; \
181 sld.w PTO+PT_CTBP[ep], lp; \
182 ldsr lp, SR_CTBP
183
184
185/* Push register state, except for the stack pointer, on the stack in the
186 form of a state-save-frame (plus some extra padding), in preparation for
187 a system call. This macro makes sure that the EP, GP, and LP
188 registers are saved, and TYPE identifies the set of extra registers to
189 be saved as well. Also copies (the new value of) SP to EP. */
190#define PUSH_STATE(type) \
191 addi -STATE_SAVE_SIZE, sp, sp; /* Make room on the stack. */ \
192 st.w ep, PTO+PT_GPR(GPR_EP)[sp]; \
193 mov sp, ep; \
194 sst.w gp, PTO+PT_GPR(GPR_GP)[ep]; \
195 sst.w lp, PTO+PT_GPR(GPR_LP)[ep]; \
196 type ## _STATE_SAVER
197/* Pop a register state pushed by PUSH_STATE, except for the stack pointer,
198 from the stack. */
199#define POP_STATE(type) \
200 mov sp, ep; \
201 type ## _STATE_RESTORER; \
202 sld.w PTO+PT_GPR(GPR_GP)[ep], gp; \
203 sld.w PTO+PT_GPR(GPR_LP)[ep], lp; \
204 sld.w PTO+PT_GPR(GPR_EP)[ep], ep; \
205 addi STATE_SAVE_SIZE, sp, sp /* Clean up our stack space. */
206
207
208/* Switch to the kernel stack if necessary, and push register state on the
209 stack in the form of a state-save-frame. Also load the current task
210 pointer if switching from user mode. The stack-pointer (r3) should have
211 already been saved to the memory location SP_SAVE_LOC (the reason for
212 this is that the interrupt vectors may be beyond a 22-bit signed offset
213 jump from the actual interrupt handler, and this allows them to save the
214 stack-pointer and use that register to do an indirect jump). This macro
215 makes sure that `special' registers, system registers, and the stack
216 pointer are saved; TYPE identifies the set of extra registers to be
217 saved as well. SYSCALL_NUM is the register in which the system-call
218 number this state is for is stored (r0 if this isn't a system call).
219 Interrupts should already be disabled when calling this. */
220#define SAVE_STATE(type, syscall_num, sp_save_loc) \
221 tst1 0, KM; /* See if already in kernel mode. */ \
222 bz 1f; \
223 ld.w sp_save_loc, sp; /* ... yes, use saved SP. */ \
224 br 2f; \
2251: ld.w KSP, sp; /* ... no, switch to kernel stack. */ \
2262: PUSH_STATE(type); \
227 ld.b KM, r19; /* Remember old kernel-mode. */ \
228 sst.w r19, PTO+PT_KERNEL_MODE[ep]; \
229 ld.w sp_save_loc, r19; /* Remember old SP. */ \
230 sst.w r19, PTO+PT_GPR(GPR_SP)[ep]; \
231 mov 1, r19; /* Now definitely in kernel-mode. */ \
232 st.b r19, KM; \
233 GET_CURRENT_TASK(CURRENT_TASK); /* Fetch the current task pointer. */ \
234 /* Save away the syscall number. */ \
235 sst.w syscall_num, PTO+PT_CUR_SYSCALL[ep]
236
237
238/* Save register state not normally saved by PUSH_STATE for TYPE, to the
239 state-save-frame on the stack; also copies SP to EP. r19 may be trashed. */
240#define SAVE_EXTRA_STATE(type) \
241 mov sp, ep; \
242 type ## _EXTRA_STATE_SAVER
243/* Restore register state not normally restored by POP_STATE for TYPE,
244 from the state-save-frame on the stack; also copies SP to EP.
245 r19 may be trashed. */
246#define RESTORE_EXTRA_STATE(type) \
247 mov sp, ep; \
248 type ## _EXTRA_STATE_RESTORER
249
250/* Save any call-clobbered registers not normally saved by PUSH_STATE for
251 TYPE, to the state-save-frame on the stack.
252 EP may be trashed, but is not guaranteed to contain a copy of SP
253 (unlike after most SAVE_... macros). r19 may be trashed. */
254#define SAVE_EXTRA_STATE_FOR_SCHEDULE(type) \
255 type ## _SCHEDULE_EXTRA_STATE_SAVER
256/* Restore any call-clobbered registers not normally restored by
257 POP_STATE for TYPE, to the state-save-frame on the stack.
258 EP may be trashed, but is not guaranteed to contain a copy of SP
259 (unlike after most RESTORE_... macros). r19 may be trashed. */
260#define RESTORE_EXTRA_STATE_FOR_SCHEDULE(type) \
261 type ## _SCHEDULE_EXTRA_STATE_RESTORER
262
263
264/* These are extra_state_saver/restorer values for a user trap. Note
265 that we save the argument registers so that restarted syscalls will
266 function properly (otherwise it wouldn't be necessary), and we must
267 _not_ restore the return-value registers (so that traps can return a
268 value!), but call-clobbered registers are not saved at all, as the
269 caller of the syscall function should have saved them. */
270
271#define TRAP_RET reti
272/* Traps don't save call-clobbered registers (but do still save arg regs).
273 We preserve PSw to keep long-term state, namely interrupt status (for traps
274 from kernel-mode), and the single-step flag (for user traps). */
275#define TRAP_STATE_SAVER \
276 SAVE_ARG_REGS; \
277 SAVE_PC(EIPC); \
278 SAVE_PSW(EIPSW)
279/* When traps return, they just leave call-clobbered registers (except for arg
280 regs) with whatever value they have from the kernel. Traps don't preserve
281 the PSW, but we zero EIPSW to ensure it doesn't contain anything dangerous
282 (in particular, the single-step flag). */
283#define TRAP_STATE_RESTORER \
284 RESTORE_ARG_REGS; \
285 RESTORE_PC(EIPC); \
286 RESTORE_PSW(EIPSW)
287/* Save registers not normally saved by traps. We need to save r12, even
288 though it's nominally call-clobbered, because it's used when restarting
289 a system call (the signal-handling path uses SAVE_EXTRA_STATE, and
290 expects r12 to be restored when the trap returns). */
291#define TRAP_EXTRA_STATE_SAVER \
292 SAVE_RVAL_REGS; \
293 sst.w r12, PTO+PT_GPR(12)[ep]; \
294 SAVE_CALL_SAVED_REGS; \
295 SAVE_CT_REGS
296#define TRAP_EXTRA_STATE_RESTORER \
297 RESTORE_RVAL_REGS; \
298 sld.w PTO+PT_GPR(12)[ep], r12; \
299 RESTORE_CALL_SAVED_REGS; \
300 RESTORE_CT_REGS
301/* Save registers prior to calling scheduler (just before trap returns).
302 We have to save the return-value registers to preserve the trap's return
303 value. Note that ..._SCHEDULE_EXTRA_STATE_SAVER, unlike most ..._SAVER
304 macros, is required to setup EP itself if EP is needed (this is because
305 in many cases, the macro is empty). */
306#define TRAP_SCHEDULE_EXTRA_STATE_SAVER \
307 mov sp, ep; \
308 SAVE_RVAL_REGS
309/* Note that ..._SCHEDULE_EXTRA_STATE_RESTORER, unlike most ..._RESTORER
310 macros, is required to setup EP itself if EP is needed (this is because
311 in many cases, the macro is empty). */
312#define TRAP_SCHEDULE_EXTRA_STATE_RESTORER \
313 mov sp, ep; \
314 RESTORE_RVAL_REGS
315
316/* Register saving/restoring for maskable interrupts. */
317#define IRQ_RET reti
318#define IRQ_STATE_SAVER \
319 SAVE_CALL_CLOBBERED_REGS; \
320 SAVE_PC(EIPC); \
321 SAVE_PSW(EIPSW)
322#define IRQ_STATE_RESTORER \
323 RESTORE_CALL_CLOBBERED_REGS; \
324 RESTORE_PC(EIPC); \
325 RESTORE_PSW(EIPSW)
326#define IRQ_EXTRA_STATE_SAVER \
327 SAVE_CALL_SAVED_REGS; \
328 SAVE_CT_REGS
329#define IRQ_EXTRA_STATE_RESTORER \
330 RESTORE_CALL_SAVED_REGS; \
331 RESTORE_CT_REGS
332#define IRQ_SCHEDULE_EXTRA_STATE_SAVER /* nothing */
333#define IRQ_SCHEDULE_EXTRA_STATE_RESTORER /* nothing */
334
335/* Register saving/restoring for non-maskable interrupts. */
336#define NMI_RET reti
337#define NMI_STATE_SAVER \
338 SAVE_CALL_CLOBBERED_REGS; \
339 SAVE_PC(FEPC); \
340 SAVE_PSW(FEPSW);
341#define NMI_STATE_RESTORER \
342 RESTORE_CALL_CLOBBERED_REGS; \
343 RESTORE_PC(FEPC); \
344 RESTORE_PSW(FEPSW);
345#define NMI_EXTRA_STATE_SAVER \
346 SAVE_CALL_SAVED_REGS; \
347 SAVE_CT_REGS
348#define NMI_EXTRA_STATE_RESTORER \
349 RESTORE_CALL_SAVED_REGS; \
350 RESTORE_CT_REGS
351#define NMI_SCHEDULE_EXTRA_STATE_SAVER /* nothing */
352#define NMI_SCHEDULE_EXTRA_STATE_RESTORER /* nothing */
353
354/* Register saving/restoring for debug traps. */
355#define DBTRAP_RET .long 0x014607E0 /* `dbret', but gas doesn't support it. */
356#define DBTRAP_STATE_SAVER \
357 SAVE_CALL_CLOBBERED_REGS; \
358 SAVE_PC(DBPC); \
359 SAVE_PSW(DBPSW)
360#define DBTRAP_STATE_RESTORER \
361 RESTORE_CALL_CLOBBERED_REGS; \
362 RESTORE_PC(DBPC); \
363 RESTORE_PSW(DBPSW)
364#define DBTRAP_EXTRA_STATE_SAVER \
365 SAVE_CALL_SAVED_REGS; \
366 SAVE_CT_REGS
367#define DBTRAP_EXTRA_STATE_RESTORER \
368 RESTORE_CALL_SAVED_REGS; \
369 RESTORE_CT_REGS
370#define DBTRAP_SCHEDULE_EXTRA_STATE_SAVER /* nothing */
371#define DBTRAP_SCHEDULE_EXTRA_STATE_RESTORER /* nothing */
372
373/* Register saving/restoring for a context switch. We don't need to save
374 too many registers, because context-switching looks like a function call
375 (via the function `switch_thread'), so callers will save any
376 call-clobbered registers themselves. We do need to save the CT regs, as
377 they're normally not saved during kernel entry (the kernel doesn't use
378 them). We save PSW so that interrupt-status state will correctly follow
379 each thread (mostly NMI vs. normal-IRQ/trap), though for the most part
380 it doesn't matter since threads are always in almost exactly the same
381 processor state during a context switch. The stack pointer and return
382 value are handled by switch_thread itself. */
383#define SWITCH_STATE_SAVER \
384 SAVE_CALL_SAVED_REGS; \
385 SAVE_PSW(PSW); \
386 SAVE_CT_REGS
387#define SWITCH_STATE_RESTORER \
388 RESTORE_CALL_SAVED_REGS; \
389 RESTORE_PSW(PSW); \
390 RESTORE_CT_REGS
391
392
393/* Restore register state from the state-save-frame on the stack, switch back
394 to the user stack if necessary, and return from the trap/interrupt.
395 EXTRA_STATE_RESTORER is a sequence of assembly language statements to
396 restore anything not restored by this macro. Only registers not saved by
397 the C compiler are restored (that is, R3(sp), R4(gp), R31(lp), and
398 anything restored by EXTRA_STATE_RESTORER). */
399#define RETURN(type) \
400 ld.b PTO+PT_KERNEL_MODE[sp], r19; \
401 di; /* Disable interrupts */ \
402 cmp r19, r0; /* See if returning to kernel mode, */\
403 bne 2f; /* ... if so, skip resched &c. */ \
404 \
405 /* We're returning to user mode, so check for various conditions that \
406 trigger rescheduling. */ \
407 GET_CURRENT_THREAD(r18); \
408 ld.w TI_FLAGS[r18], r19; \
409 andi _TIF_NEED_RESCHED, r19, r0; \
410 bnz 3f; /* Call the scheduler. */ \
4115: andi _TIF_SIGPENDING, r19, r18; \
412 ld.w TASK_PTRACE[CURRENT_TASK], r19; /* ptrace flags */ \
413 or r18, r19; /* see if either is non-zero */ \
414 bnz 4f; /* if so, handle them */ \
415 \
416/* Return to user state. */ \
4171: st.b r0, KM; /* Now officially in user state. */ \
418 \
419/* Final return. The stack-pointer fiddling is not needed when returning \
420 to kernel-mode, but they don't hurt, and this way we can share the \
421 (sometimes rather lengthy) POP_STATE macro. */ \
4222: POP_STATE(type); \
423 st.w sp, KSP; /* Save the kernel stack pointer. */ \
424 ld.w PT_GPR(GPR_SP)-PT_SIZE[sp], sp; /* Restore stack pointer. */ \
425 type ## _RET; /* Return from the trap/interrupt. */ \
426 \
427/* Call the scheduler before returning from a syscall/trap. */ \
4283: SAVE_EXTRA_STATE_FOR_SCHEDULE(type); /* Prepare to call scheduler. */ \
429 jarl call_scheduler, lp; /* Call scheduler */ \
430 di; /* The scheduler enables interrupts */\
431 RESTORE_EXTRA_STATE_FOR_SCHEDULE(type); \
432 GET_CURRENT_THREAD(r18); \
433 ld.w TI_FLAGS[r18], r19; \
434 br 5b; /* Continue with return path. */ \
435 \
436/* Handle a signal or ptraced process return. \
437 r18 should be non-zero if there are pending signals. */ \
4384: /* Not all registers are saved by the normal trap/interrupt entry \
439 points (for instance, call-saved registers (because the normal \
440 C-compiler calling sequence in the kernel makes sure they're \
441 preserved), and call-clobbered registers in the case of \
442 traps), but signal handlers may want to examine or change the \
443 complete register state. Here we save anything not saved by \
444 the normal entry sequence, so that it may be safely restored \
445 (in a possibly modified form) after do_signal returns. */ \
446 SAVE_EXTRA_STATE(type); /* Save state not saved by entry. */ \
447 jarl handle_signal_or_ptrace_return, lp; \
448 RESTORE_EXTRA_STATE(type); /* Restore extra regs. */ \
449 br 1b
450
451
452/* Jump to the appropriate function for the system call number in r12
453 (r12 is not preserved), or return an error if r12 is not valid. The
454 LP register should point to the location where the called function
455 should return. [note that MAKE_SYS_CALL uses label 1] */
456#define MAKE_SYS_CALL \
457 /* Figure out which function to use for this system call. */ \
458 shl 2, r12; \
459 /* See if the system call number is valid. */ \
460 addi lo(CSYM(sys_call_table) - sys_call_table_end), r12, r0; \
461 bnh 1f; \
462 mov hilo(CSYM(sys_call_table)), r19; \
463 add r19, r12; \
464 ld.w 0[r12], r12; \
465 /* Make the system call. */ \
466 jmp [r12]; \
467 /* The syscall number is invalid, return an error. */ \
4681: addi -ENOSYS, r0, r10; \
469 jmp [lp]
470
471
472 .text
473
474/*
475 * User trap.
476 *
477 * Trap 0 system calls are also handled here.
478 *
479 * The stack-pointer (r3) should have already been saved to the memory
480 * location ENTRY_SP (the reason for this is that the interrupt vectors may be
481 * beyond a 22-bit signed offset jump from the actual interrupt handler, and
482 * this allows them to save the stack-pointer and use that register to do an
483 * indirect jump).
484 *
485 * Syscall protocol:
486 * Syscall number in r12, args in r6-r9
487 * Return value in r10
488 */
489G_ENTRY(trap):
490 SAVE_STATE (TRAP, r12, ENTRY_SP) // Save registers.
491 stsr SR_ECR, r19 // Find out which trap it was.
492 ei // Enable interrupts.
493 mov hilo(ret_from_trap), lp // where the trap should return
494
495 // The following two shifts (1) clear out extraneous NMI data in the
496 // upper 16-bits, (2) convert the 0x40 - 0x5f range of trap ECR
497 // numbers into the (0-31) << 2 range we want, (3) set the flags.
498 shl 27, r19 // chop off all high bits
499 shr 25, r19 // scale back down and then << 2
500 bnz 2f // See if not trap 0.
501
502 // Trap 0 is a `short' system call, skip general trap table.
503 MAKE_SYS_CALL // Jump to the syscall function.
504
5052: // For other traps, use a table lookup.
506 mov hilo(CSYM(trap_table)), r18
507 add r19, r18
508 ld.w 0[r18], r18
509 jmp [r18] // Jump to the trap handler.
510END(trap)
511
512/* This is just like ret_from_trap, but first restores extra registers
513 saved by some wrappers. */
514L_ENTRY(restore_extra_regs_and_ret_from_trap):
515 RESTORE_EXTRA_STATE(TRAP)
516 // fall through
517END(restore_extra_regs_and_ret_from_trap)
518
519/* Entry point used to return from a syscall/trap. */
520L_ENTRY(ret_from_trap):
521 RETURN(TRAP)
522END(ret_from_trap)
523
524
525/* This the initial entry point for a new child thread, with an appropriate
526 stack in place that makes it look that the child is in the middle of an
527 syscall. This function is actually `returned to' from switch_thread
528 (copy_thread makes ret_from_fork the return address in each new thread's
529 saved context). */
530C_ENTRY(ret_from_fork):
531 mov r10, r6 // switch_thread returns the prev task.
532 jarl CSYM(schedule_tail), lp // ...which is schedule_tail's arg
533 mov r0, r10 // Child's fork call should return 0.
534 br ret_from_trap // Do normal trap return.
535C_END(ret_from_fork)
536
537
538/*
539 * Trap 1: `long' system calls
540 * `Long' syscall protocol:
541 * Syscall number in r12, args in r6-r9, r13-r14
542 * Return value in r10
543 */
544L_ENTRY(syscall_long):
545 // Push extra arguments on the stack. Note that by default, the trap
546 // handler reserves enough stack space for 6 arguments, so we don't
547 // have to make any additional room.
548 st.w r13, 16[sp] // arg 5
549 st.w r14, 20[sp] // arg 6
550
551 // Make sure r13 and r14 are preserved, in case we have to restart a
552 // system call because of a signal (ep has already been set by caller).
553 st.w r13, PTO+PT_GPR(13)[sp]
554 st.w r14, PTO+PT_GPR(13)[sp]
555 mov hilo(ret_from_long_syscall), lp
556
557 MAKE_SYS_CALL // Jump to the syscall function.
558END(syscall_long)
559
560/* Entry point used to return from a long syscall. Only needed to restore
561 r13/r14 if the general trap mechanism doesnt' do so. */
562L_ENTRY(ret_from_long_syscall):
563 ld.w PTO+PT_GPR(13)[sp], r13 // Restore the extra registers
564 ld.w PTO+PT_GPR(13)[sp], r14
565 br ret_from_trap // The rest is the same as other traps
566END(ret_from_long_syscall)
567
568
569/* These syscalls need access to the struct pt_regs on the stack, so we
570 implement them in assembly (they're basically all wrappers anyway). */
571
572L_ENTRY(sys_fork_wrapper):
573#ifdef CONFIG_MMU
574 addi SIGCHLD, r0, r6 // Arg 0: flags
575 ld.w PTO+PT_GPR(GPR_SP)[sp], r7 // Arg 1: child SP (use parent's)
576 movea PTO, sp, r8 // Arg 2: parent context
577 mov r0, r9 // Arg 3/4/5: 0
578 st.w r0, 16[sp]
579 st.w r0, 20[sp]
580 mov hilo(CSYM(do_fork)), r18 // Where the real work gets done
581 br save_extra_state_tramp // Save state and go there
582#else
583 // fork almost works, enough to trick you into looking elsewhere :-(
584 addi -EINVAL, r0, r10
585 jmp [lp]
586#endif
587END(sys_fork_wrapper)
588
589L_ENTRY(sys_vfork_wrapper):
590 addi CLONE_VFORK | CLONE_VM | SIGCHLD, r0, r6 // Arg 0: flags
591 ld.w PTO+PT_GPR(GPR_SP)[sp], r7 // Arg 1: child SP (use parent's)
592 movea PTO, sp, r8 // Arg 2: parent context
593 mov r0, r9 // Arg 3/4/5: 0
594 st.w r0, 16[sp]
595 st.w r0, 20[sp]
596 mov hilo(CSYM(do_fork)), r18 // Where the real work gets done
597 br save_extra_state_tramp // Save state and go there
598END(sys_vfork_wrapper)
599
600L_ENTRY(sys_clone_wrapper):
601 ld.w PTO+PT_GPR(GPR_SP)[sp], r19// parent's stack pointer
602 cmp r7, r0 // See if child SP arg (arg 1) is 0.
603 cmov z, r19, r7, r7 // ... and use the parent's if so.
604 movea PTO, sp, r8 // Arg 2: parent context
605 mov r0, r9 // Arg 3/4/5: 0
606 st.w r0, 16[sp]
607 st.w r0, 20[sp]
608 mov hilo(CSYM(do_fork)), r18 // Where the real work gets done
609 br save_extra_state_tramp // Save state and go there
610END(sys_clone_wrapper)
611
612
613L_ENTRY(sys_execve_wrapper):
614 movea PTO, sp, r9 // add user context as 4th arg
615 jr CSYM(sys_execve) // Do real work (tail-call).
616END(sys_execve_wrapper)
617
618
619L_ENTRY(sys_sigsuspend_wrapper):
620 movea PTO, sp, r7 // add user context as 2nd arg
621 mov hilo(CSYM(sys_sigsuspend)), r18 // syscall function
622 jarl save_extra_state_tramp, lp // Save state and do it
623 br restore_extra_regs_and_ret_from_trap
624END(sys_sigsuspend_wrapper)
625L_ENTRY(sys_rt_sigsuspend_wrapper):
626 movea PTO, sp, r8 // add user context as 3rd arg
627 mov hilo(CSYM(sys_rt_sigsuspend)), r18 // syscall function
628 jarl save_extra_state_tramp, lp // Save state and do it
629 br restore_extra_regs_and_ret_from_trap
630END(sys_rt_sigsuspend_wrapper)
631
632L_ENTRY(sys_sigreturn_wrapper):
633 movea PTO, sp, r6 // add user context as 1st arg
634 mov hilo(CSYM(sys_sigreturn)), r18 // syscall function
635 jarl save_extra_state_tramp, lp // Save state and do it
636 br restore_extra_regs_and_ret_from_trap
637END(sys_sigreturn_wrapper)
638L_ENTRY(sys_rt_sigreturn_wrapper):
639 movea PTO, sp, r6 // add user context as 1st arg
640 mov hilo(CSYM(sys_rt_sigreturn)), r18// syscall function
641 jarl save_extra_state_tramp, lp // Save state and do it
642 br restore_extra_regs_and_ret_from_trap
643END(sys_rt_sigreturn_wrapper)
644
645
646/* Save any state not saved by SAVE_STATE(TRAP), and jump to r18.
647 It's main purpose is to share the rather lengthy code sequence that
648 SAVE_STATE expands into among the above wrapper functions. */
649L_ENTRY(save_extra_state_tramp):
650 SAVE_EXTRA_STATE(TRAP) // Save state not saved by entry.
651 jmp [r18] // Do the work the caller wants
652END(save_extra_state_tramp)
653
654
655/*
656 * Hardware maskable interrupts.
657 *
658 * The stack-pointer (r3) should have already been saved to the memory
659 * location ENTRY_SP (the reason for this is that the interrupt vectors may be
660 * beyond a 22-bit signed offset jump from the actual interrupt handler, and
661 * this allows them to save the stack-pointer and use that register to do an
662 * indirect jump).
663 */
664G_ENTRY(irq):
665 SAVE_STATE (IRQ, r0, ENTRY_SP) // Save registers.
666
667 stsr SR_ECR, r6 // Find out which interrupt it was.
668 movea PTO, sp, r7 // User regs are arg2
669
670 // All v850 implementations I know about encode their interrupts as
671 // multiples of 0x10, starting at 0x80 (after NMIs and software
672 // interrupts). Convert this number into a simple IRQ index for the
673 // rest of the kernel. We also clear the upper 16 bits, which hold
674 // NMI info, and don't appear to be cleared when a NMI returns.
675 shl 16, r6 // clear upper 16 bits
676 shr 20, r6 // shift back, and remove lower nibble
677 add -8, r6 // remove bias for irqs
678
679 // Call the high-level interrupt handling code.
680 jarl CSYM(handle_irq), lp
681
682 RETURN(IRQ)
683END(irq)
684
685
686/*
687 * Debug trap / illegal-instruction exception
688 *
689 * The stack-pointer (r3) should have already been saved to the memory
690 * location ENTRY_SP (the reason for this is that the interrupt vectors may be
691 * beyond a 22-bit signed offset jump from the actual interrupt handler, and
692 * this allows them to save the stack-pointer and use that register to do an
693 * indirect jump).
694 */
695G_ENTRY(dbtrap):
696 SAVE_STATE (DBTRAP, r0, ENTRY_SP)// Save registers.
697
698 /* First see if we came from kernel mode; if so, the dbtrap
699 instruction has a special meaning, to set the DIR (`debug
700 information register') register. This is because the DIR register
701 can _only_ be manipulated/read while in `debug mode,' and debug
702 mode is only active while we're inside the dbtrap handler. The
703 exact functionality is: { DIR = (DIR | r6) & ~r7; return DIR; }. */
704 ld.b PTO+PT_KERNEL_MODE[sp], r19
705 cmp r19, r0
706 bz 1f
707
708 stsr SR_DIR, r10
709 or r6, r10
710 not r7, r7
711 and r7, r10
712 ldsr r10, SR_DIR
713 stsr SR_DIR, r10 // Confirm the value we set
714 st.w r10, PTO+PT_GPR(10)[sp] // return it
715 br 3f
716
7171: ei // Enable interrupts.
718
719 /* The default signal type we raise. */
720 mov SIGTRAP, r6
721
722 /* See if it's a single-step trap. */
723 stsr SR_DBPSW, r19
724 andi 0x0800, r19, r19
725 bnz 2f
726
727 /* Look to see if the preceding instruction was is a dbtrap or not,
728 to decide which signal we should use. */
729 stsr SR_DBPC, r19 // PC following trapping insn
730 ld.hu -2[r19], r19
731 ori 0xf840, r0, r20 // DBTRAP insn
732 cmp r19, r20 // Was this trap caused by DBTRAP?
733 cmov ne, SIGILL, r6, r6 // Choose signal appropriately
734
735 /* Raise the desired signal. */
7362: mov CURRENT_TASK, r7 // Arg 1: task
737 jarl CSYM(send_sig), lp // tail call
738
7393: RETURN(DBTRAP)
740END(dbtrap)
741
742
743/*
744 * Hardware non-maskable interrupts.
745 *
746 * The stack-pointer (r3) should have already been saved to the memory
747 * location ENTRY_SP (the reason for this is that the interrupt vectors may be
748 * beyond a 22-bit signed offset jump from the actual interrupt handler, and
749 * this allows them to save the stack-pointer and use that register to do an
750 * indirect jump).
751 */
752G_ENTRY(nmi):
753 SAVE_STATE (NMI, r0, NMI_ENTRY_SP); /* Save registers. */
754
755 stsr SR_ECR, r6; /* Find out which nmi it was. */
756 shr 20, r6; /* Extract NMI code in bits 20-24. */
757 movea PTO, sp, r7; /* User regs are arg2. */
758
759 /* Non-maskable interrupts always lie right after maskable interrupts.
760 Call the generic IRQ handler, with two arguments, the IRQ number,
761 and a pointer to the user registers, to handle the specifics.
762 (we subtract one because the first NMI has code 1). */
763 addi FIRST_NMI - 1, r6, r6
764 jarl CSYM(handle_irq), lp
765
766 RETURN(NMI)
767END(nmi)
768
769
770/*
771 * Trap with no handler
772 */
773L_ENTRY(bad_trap_wrapper):
774 mov r19, r6 // Arg 0: trap number
775 movea PTO, sp, r7 // Arg 1: user regs
776 jr CSYM(bad_trap) // tail call handler
777END(bad_trap_wrapper)
778
779
780/*
781 * Invoke the scheduler, called from the trap/irq kernel exit path.
782 *
783 * This basically just calls `schedule', but also arranges for extra
784 * registers to be saved for ptrace'd processes, so ptrace can modify them.
785 */
786L_ENTRY(call_scheduler):
787 ld.w TASK_PTRACE[CURRENT_TASK], r19 // See if task is ptrace'd
788 cmp r19, r0
789 bnz 1f // ... yes, do special stuff
790 jr CSYM(schedule) // ... no, just tail-call scheduler
791
792 // Save extra regs for ptrace'd task. We want to save anything
793 // that would otherwise only be `implicitly' saved by the normal
794 // compiler calling-convention.
7951: mov sp, ep // Setup EP for SAVE_CALL_SAVED_REGS
796 SAVE_CALL_SAVED_REGS // Save call-saved registers to stack
797 mov lp, r20 // Save LP in a callee-saved register
798
799 jarl CSYM(schedule), lp // Call scheduler
800
801 mov r20, lp
802 mov sp, ep // We can't rely on EP after return
803 RESTORE_CALL_SAVED_REGS // Restore (possibly modified) regs
804 jmp [lp] // Return to the return path
805END(call_scheduler)
806
807
808/*
809 * This is an out-of-line handler for two special cases during the kernel
810 * trap/irq exit sequence:
811 *
812 * (1) If r18 is non-zero then a signal needs to be handled, which is
813 * done, and then the caller returned to.
814 *
815 * (2) If r18 is non-zero then we're returning to a ptraced process, which
816 * has several special cases -- single-stepping and trap tracing, both
817 * of which require using the `dbret' instruction to exit the kernel
818 * instead of the normal `reti' (this is because the CPU not correctly
819 * single-step after a reti). In this case, of course, this handler
820 * never returns to the caller.
821 *
822 * In either case, all registers should have been saved to the current
823 * state-save-frame on the stack, except for callee-saved registers.
824 *
825 * [These two different cases are combined merely to avoid bloating the
826 * macro-inlined code, not because they really make much sense together!]
827 */
828L_ENTRY(handle_signal_or_ptrace_return):
829 cmp r18, r0 // See if handling a signal
830 bz 1f // ... nope, go do ptrace return
831
832 // Handle a signal
833 mov lp, r20 // Save link-pointer
834 mov r10, r21 // Save return-values (for trap)
835 mov r11, r22
836
837 movea PTO, sp, r6 // Arg 1: struct pt_regs *regs
838 mov r0, r7 // Arg 2: sigset_t *oldset
839 jarl CSYM(do_signal), lp // Handle the signal
840 di // sig handling enables interrupts
841
842 mov r20, lp // Restore link-pointer
843 mov r21, r10 // Restore return-values (for trap)
844 mov r22, r11
845 ld.w TASK_PTRACE[CURRENT_TASK], r19 // check ptrace flags too
846 cmp r19, r0
847 bnz 1f // ... some set, so look more
8482: jmp [lp] // ... none set, so return normally
849
850 // ptrace return
8511: ld.w PTO+PT_PSW[sp], r19 // Look at user-processes's flags
852 andi 0x0800, r19, r19 // See if single-step flag is set
853 bz 2b // ... nope, return normally
854
855 // Return as if from a dbtrap insn
856 st.b r0, KM // Now officially in user state.
857 POP_STATE(DBTRAP) // Restore regs
858 st.w sp, KSP // Save the kernel stack pointer.
859 ld.w PT_GPR(GPR_SP)-PT_SIZE[sp], sp // Restore user stack pointer.
860 DBTRAP_RET // Return from the trap/interrupt.
861END(handle_signal_or_ptrace_return)
862
863
864/*
865 * This is where we switch between two threads. The arguments are:
866 * r6 -- pointer to the struct thread for the `current' process
867 * r7 -- pointer to the struct thread for the `new' process.
868 * when this function returns, it will return to the new thread.
869 */
870C_ENTRY(switch_thread):
871 // Return the previous task (r10 is not clobbered by restore below)
872 mov CURRENT_TASK, r10
873 // First, push the current processor state on the stack
874 PUSH_STATE(SWITCH)
875 // Now save the location of the kernel stack pointer for this thread;
876 // since we've pushed all other state on the stack, this is enough to
877 // restore it all later.
878 st.w sp, THREAD_KSP[r6]
879 // Now restore the stack pointer from the new process
880 ld.w THREAD_KSP[r7], sp
881 // ... and restore all state from that
882 POP_STATE(SWITCH)
883 // Update the current task pointer
884 GET_CURRENT_TASK(CURRENT_TASK)
885 // Now return into the new thread
886 jmp [lp]
887C_END(switch_thread)
888
889
890 .data
891
892 .align 4
893C_DATA(trap_table):
894 .long bad_trap_wrapper // trap 0, doesn't use trap table.
895 .long syscall_long // trap 1, `long' syscall.
896 .long bad_trap_wrapper
897 .long bad_trap_wrapper
898 .long bad_trap_wrapper
899 .long bad_trap_wrapper
900 .long bad_trap_wrapper
901 .long bad_trap_wrapper
902 .long bad_trap_wrapper
903 .long bad_trap_wrapper
904 .long bad_trap_wrapper
905 .long bad_trap_wrapper
906 .long bad_trap_wrapper
907 .long bad_trap_wrapper
908 .long bad_trap_wrapper
909 .long bad_trap_wrapper
910C_END(trap_table)
911
912
913 .section .rodata
914
915 .align 4
916C_DATA(sys_call_table):
917 .long CSYM(sys_restart_syscall) // 0
918 .long CSYM(sys_exit)
919 .long sys_fork_wrapper
920 .long CSYM(sys_read)
921 .long CSYM(sys_write)
922 .long CSYM(sys_open) // 5
923 .long CSYM(sys_close)
924 .long CSYM(sys_waitpid)
925 .long CSYM(sys_creat)
926 .long CSYM(sys_link)
927 .long CSYM(sys_unlink) // 10
928 .long sys_execve_wrapper
929 .long CSYM(sys_chdir)
930 .long CSYM(sys_time)
931 .long CSYM(sys_mknod)
932 .long CSYM(sys_chmod) // 15
933 .long CSYM(sys_chown)
934 .long CSYM(sys_ni_syscall) // was: break
935 .long CSYM(sys_ni_syscall) // was: oldstat (aka stat)
936 .long CSYM(sys_lseek)
937 .long CSYM(sys_getpid) // 20
938 .long CSYM(sys_mount)
939 .long CSYM(sys_oldumount)
940 .long CSYM(sys_setuid)
941 .long CSYM(sys_getuid)
942 .long CSYM(sys_stime) // 25
943 .long CSYM(sys_ptrace)
944 .long CSYM(sys_alarm)
945 .long CSYM(sys_ni_syscall) // was: oldfstat (aka fstat)
946 .long CSYM(sys_pause)
947 .long CSYM(sys_utime) // 30
948 .long CSYM(sys_ni_syscall) // was: stty
949 .long CSYM(sys_ni_syscall) // was: gtty
950 .long CSYM(sys_access)
951 .long CSYM(sys_nice)
952 .long CSYM(sys_ni_syscall) // 35, was: ftime
953 .long CSYM(sys_sync)
954 .long CSYM(sys_kill)
955 .long CSYM(sys_rename)
956 .long CSYM(sys_mkdir)
957 .long CSYM(sys_rmdir) // 40
958 .long CSYM(sys_dup)
959 .long CSYM(sys_pipe)
960 .long CSYM(sys_times)
961 .long CSYM(sys_ni_syscall) // was: prof
962 .long CSYM(sys_brk) // 45
963 .long CSYM(sys_setgid)
964 .long CSYM(sys_getgid)
965 .long CSYM(sys_signal)
966 .long CSYM(sys_geteuid)
967 .long CSYM(sys_getegid) // 50
968 .long CSYM(sys_acct)
969 .long CSYM(sys_umount) // recycled never used phys()
970 .long CSYM(sys_ni_syscall) // was: lock
971 .long CSYM(sys_ioctl)
972 .long CSYM(sys_fcntl) // 55
973 .long CSYM(sys_ni_syscall) // was: mpx
974 .long CSYM(sys_setpgid)
975 .long CSYM(sys_ni_syscall) // was: ulimit
976 .long CSYM(sys_ni_syscall)
977 .long CSYM(sys_umask) // 60
978 .long CSYM(sys_chroot)
979 .long CSYM(sys_ustat)
980 .long CSYM(sys_dup2)
981 .long CSYM(sys_getppid)
982 .long CSYM(sys_getpgrp) // 65
983 .long CSYM(sys_setsid)
984 .long CSYM(sys_sigaction)
985 .long CSYM(sys_sgetmask)
986 .long CSYM(sys_ssetmask)
987 .long CSYM(sys_setreuid) // 70
988 .long CSYM(sys_setregid)
989 .long sys_sigsuspend_wrapper
990 .long CSYM(sys_sigpending)
991 .long CSYM(sys_sethostname)
992 .long CSYM(sys_setrlimit) // 75
993 .long CSYM(sys_getrlimit)
994 .long CSYM(sys_getrusage)
995 .long CSYM(sys_gettimeofday)
996 .long CSYM(sys_settimeofday)
997 .long CSYM(sys_getgroups) // 80
998 .long CSYM(sys_setgroups)
999 .long CSYM(sys_select)
1000 .long CSYM(sys_symlink)
1001 .long CSYM(sys_ni_syscall) // was: oldlstat (aka lstat)
1002 .long CSYM(sys_readlink) // 85
1003 .long CSYM(sys_uselib)
1004 .long CSYM(sys_swapon)
1005 .long CSYM(sys_reboot)
1006 .long CSYM(old_readdir)
1007 .long CSYM(sys_mmap) // 90
1008 .long CSYM(sys_munmap)
1009 .long CSYM(sys_truncate)
1010 .long CSYM(sys_ftruncate)
1011 .long CSYM(sys_fchmod)
1012 .long CSYM(sys_fchown) // 95
1013 .long CSYM(sys_getpriority)
1014 .long CSYM(sys_setpriority)
1015 .long CSYM(sys_ni_syscall) // was: profil
1016 .long CSYM(sys_statfs)
1017 .long CSYM(sys_fstatfs) // 100
1018 .long CSYM(sys_ni_syscall) // i386: ioperm
1019 .long CSYM(sys_socketcall)
1020 .long CSYM(sys_syslog)
1021 .long CSYM(sys_setitimer)
1022 .long CSYM(sys_getitimer) // 105
1023 .long CSYM(sys_newstat)
1024 .long CSYM(sys_newlstat)
1025 .long CSYM(sys_newfstat)
1026 .long CSYM(sys_ni_syscall) // was: olduname (aka uname)
1027 .long CSYM(sys_ni_syscall) // 110, i386: iopl
1028 .long CSYM(sys_vhangup)
1029 .long CSYM(sys_ni_syscall) // was: idle
1030 .long CSYM(sys_ni_syscall) // i386: vm86old
1031 .long CSYM(sys_wait4)
1032 .long CSYM(sys_swapoff) // 115
1033 .long CSYM(sys_sysinfo)
1034 .long CSYM(sys_ipc)
1035 .long CSYM(sys_fsync)
1036 .long sys_sigreturn_wrapper
1037 .long sys_clone_wrapper // 120
1038 .long CSYM(sys_setdomainname)
1039 .long CSYM(sys_newuname)
1040 .long CSYM(sys_ni_syscall) // i386: modify_ldt, m68k: cacheflush
1041 .long CSYM(sys_adjtimex)
1042 .long CSYM(sys_ni_syscall) // 125 - sys_mprotect
1043 .long CSYM(sys_sigprocmask)
1044 .long CSYM(sys_ni_syscall) // sys_create_module
1045 .long CSYM(sys_init_module)
1046 .long CSYM(sys_delete_module)
1047 .long CSYM(sys_ni_syscall) // 130 - sys_get_kernel_syms
1048 .long CSYM(sys_quotactl)
1049 .long CSYM(sys_getpgid)
1050 .long CSYM(sys_fchdir)
1051 .long CSYM(sys_bdflush)
1052 .long CSYM(sys_sysfs) // 135
1053 .long CSYM(sys_personality)
1054 .long CSYM(sys_ni_syscall) // for afs_syscall
1055 .long CSYM(sys_setfsuid)
1056 .long CSYM(sys_setfsgid)
1057 .long CSYM(sys_llseek) // 140
1058 .long CSYM(sys_getdents)
1059 .long CSYM(sys_select) // for backward compat; remove someday
1060 .long CSYM(sys_flock)
1061 .long CSYM(sys_ni_syscall) // sys_msync
1062 .long CSYM(sys_readv) // 145
1063 .long CSYM(sys_writev)
1064 .long CSYM(sys_getsid)
1065 .long CSYM(sys_fdatasync)
1066 .long CSYM(sys_sysctl)
1067 .long CSYM(sys_ni_syscall) // 150 - sys_mlock
1068 .long CSYM(sys_ni_syscall) // sys_munlock
1069 .long CSYM(sys_ni_syscall) // sys_mlockall
1070 .long CSYM(sys_ni_syscall) // sys_munlockall
1071 .long CSYM(sys_sched_setparam)
1072 .long CSYM(sys_sched_getparam) // 155
1073 .long CSYM(sys_sched_setscheduler)
1074 .long CSYM(sys_sched_getscheduler)
1075 .long CSYM(sys_sched_yield)
1076 .long CSYM(sys_sched_get_priority_max)
1077 .long CSYM(sys_sched_get_priority_min) // 160
1078 .long CSYM(sys_sched_rr_get_interval)
1079 .long CSYM(sys_nanosleep)
1080 .long CSYM(sys_ni_syscall) // sys_mremap
1081 .long CSYM(sys_setresuid)
1082 .long CSYM(sys_getresuid) // 165
1083 .long CSYM(sys_ni_syscall) // for vm86
1084 .long CSYM(sys_ni_syscall) // sys_query_module
1085 .long CSYM(sys_poll)
1086 .long CSYM(sys_nfsservctl)
1087 .long CSYM(sys_setresgid) // 170
1088 .long CSYM(sys_getresgid)
1089 .long CSYM(sys_prctl)
1090 .long sys_rt_sigreturn_wrapper
1091 .long CSYM(sys_rt_sigaction)
1092 .long CSYM(sys_rt_sigprocmask) // 175
1093 .long CSYM(sys_rt_sigpending)
1094 .long CSYM(sys_rt_sigtimedwait)
1095 .long CSYM(sys_rt_sigqueueinfo)
1096 .long sys_rt_sigsuspend_wrapper
1097 .long CSYM(sys_pread64) // 180
1098 .long CSYM(sys_pwrite64)
1099 .long CSYM(sys_lchown)
1100 .long CSYM(sys_getcwd)
1101 .long CSYM(sys_capget)
1102 .long CSYM(sys_capset) // 185
1103 .long CSYM(sys_sigaltstack)
1104 .long CSYM(sys_sendfile)
1105 .long CSYM(sys_ni_syscall) // streams1
1106 .long CSYM(sys_ni_syscall) // streams2
1107 .long sys_vfork_wrapper // 190
1108 .long CSYM(sys_ni_syscall)
1109 .long CSYM(sys_mmap2)
1110 .long CSYM(sys_truncate64)
1111 .long CSYM(sys_ftruncate64)
1112 .long CSYM(sys_stat64) // 195
1113 .long CSYM(sys_lstat64)
1114 .long CSYM(sys_fstat64)
1115 .long CSYM(sys_fcntl64)
1116 .long CSYM(sys_getdents64)
1117 .long CSYM(sys_pivot_root) // 200
1118 .long CSYM(sys_gettid)
1119 .long CSYM(sys_tkill)
1120sys_call_table_end:
1121C_END(sys_call_table)
diff --git a/arch/v850/kernel/fpga85e2c.c b/arch/v850/kernel/fpga85e2c.c
deleted file mode 100644
index ab9cf16a85c8..000000000000
--- a/arch/v850/kernel/fpga85e2c.c
+++ /dev/null
@@ -1,167 +0,0 @@
1/*
2 * arch/v850/kernel/fpga85e2c.h -- Machine-dependent defs for
3 * FPGA implementation of V850E2/NA85E2C
4 *
5 * Copyright (C) 2002,03 NEC Electronics Corporation
6 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/bootmem.h>
21#include <linux/irq.h>
22#include <linux/bitops.h>
23
24#include <asm/atomic.h>
25#include <asm/page.h>
26#include <asm/machdep.h>
27
28#include "mach.h"
29
30extern void memcons_setup (void);
31
32
33#define REG_DUMP_ADDR 0x220000
34
35
36extern struct irqaction reg_snap_action; /* fwd decl */
37
38
39void __init mach_early_init (void)
40{
41 int i;
42 const u32 *src;
43 register u32 *dst asm ("ep");
44 extern u32 _intv_end, _intv_load_start;
45
46 /* Set bus sizes: CS0 32-bit, CS1 16-bit, CS7 8-bit,
47 everything else 32-bit. */
48 V850E2_BSC = 0x2AA6;
49 for (i = 2; i <= 6; i++)
50 CSDEV(i) = 0; /* 32 bit */
51
52 /* Ensure that the simulator halts on a panic, instead of going
53 into an infinite loop inside the panic function. */
54 panic_timeout = -1;
55
56 /* Move the interrupt vectors into their real location. Note that
57 any relocations there are relative to the real location, so we
58 don't have to fix anything up. We use a loop instead of calling
59 memcpy to keep this a leaf function (to avoid a function
60 prologue being generated). */
61 dst = 0x10; /* &_intv_start + 0x10. */
62 src = &_intv_load_start;
63 do {
64 u32 t0 = src[0], t1 = src[1], t2 = src[2], t3 = src[3];
65 u32 t4 = src[4], t5 = src[5], t6 = src[6], t7 = src[7];
66 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
67 dst[4] = t4; dst[5] = t5; dst[6] = t6; dst[7] = t7;
68 dst += 8;
69 src += 8;
70 } while (dst < &_intv_end);
71}
72
73void __init mach_setup (char **cmdline)
74{
75 memcons_setup ();
76
77 /* Setup up NMI0 to copy the registers to a known memory location.
78 The FGPA board has a button that produces NMI0 when pressed, so
79 this allows us to push the button, and then look at memory to see
80 what's in the registers (there's no other way to easily do so).
81 We have to use `setup_irq' instead of `request_irq' because it's
82 still too early to do memory allocation. */
83 setup_irq (IRQ_NMI (0), &reg_snap_action);
84}
85
86void mach_get_physical_ram (unsigned long *ram_start, unsigned long *ram_len)
87{
88 *ram_start = ERAM_ADDR;
89 *ram_len = ERAM_SIZE;
90}
91
92void __init mach_sched_init (struct irqaction *timer_action)
93{
94 /* Setup up the timer interrupt. The FPGA peripheral control
95 registers _only_ work with single-bit writes (set1/clr1)! */
96 __clear_bit (RPU_GTMC_CE_BIT, &RPU_GTMC);
97 __clear_bit (RPU_GTMC_CLK_BIT, &RPU_GTMC);
98 __set_bit (RPU_GTMC_CE_BIT, &RPU_GTMC);
99
100 /* We use the first RPU interrupt, which occurs every 8.192ms. */
101 setup_irq (IRQ_RPU (0), timer_action);
102}
103
104
105void mach_gettimeofday (struct timespec *tv)
106{
107 tv->tv_sec = 0;
108 tv->tv_nsec = 0;
109}
110
111void machine_halt (void) __attribute__ ((noreturn));
112void machine_halt (void)
113{
114 for (;;) {
115 DWC(0) = 0x7777;
116 DWC(1) = 0x7777;
117 ASC = 0xffff;
118 FLGREG(0) = 1; /* Halt immediately. */
119 asm ("di; halt; nop; nop; nop; nop; nop");
120 }
121}
122
123void machine_restart (char *__unused)
124{
125 machine_halt ();
126}
127
128void machine_power_off (void)
129{
130 machine_halt ();
131}
132
133
134/* Interrupts */
135
136struct v850e_intc_irq_init irq_inits[] = {
137 { "IRQ", 0, NUM_MACH_IRQS, 1, 7 },
138 { "RPU", IRQ_RPU(0), IRQ_RPU_NUM, 1, 6 },
139 { 0 }
140};
141#define NUM_IRQ_INITS (ARRAY_SIZE(irq_inits) - 1)
142
143struct hw_interrupt_type hw_itypes[NUM_IRQ_INITS];
144
145/* Initialize interrupts. */
146void __init mach_init_irqs (void)
147{
148 v850e_intc_init_irq_types (irq_inits, hw_itypes);
149}
150
151
152/* An interrupt handler that copies the registers to a known memory location,
153 for debugging purposes. */
154
155static void make_reg_snap (int irq, void *dummy, struct pt_regs *regs)
156{
157 (*(unsigned *)REG_DUMP_ADDR)++;
158 (*(struct pt_regs *)(REG_DUMP_ADDR + sizeof (unsigned))) = *regs;
159}
160
161static int reg_snap_dev_id;
162static struct irqaction reg_snap_action = {
163 .handler = make_reg_snap,
164 .mask = CPU_MASK_NONE,
165 .name = "reg_snap",
166 .dev_id = &reg_snap_dev_id,
167};
diff --git a/arch/v850/kernel/fpga85e2c.ld b/arch/v850/kernel/fpga85e2c.ld
deleted file mode 100644
index b5d4578ae411..000000000000
--- a/arch/v850/kernel/fpga85e2c.ld
+++ /dev/null
@@ -1,62 +0,0 @@
1/* Linker script for the FPGA implementation of the V850E2 NA85E2C cpu core
2 (CONFIG_V850E2_FPGA85E2C). */
3
4MEMORY {
5 /* Reset vector. */
6 RESET : ORIGIN = 0, LENGTH = 0x10
7 /* Interrupt vectors. */
8 INTV : ORIGIN = 0x10, LENGTH = 0x470
9 /* The `window' in RAM were we're allowed to load stuff. */
10 RAM_LOW : ORIGIN = 0x480, LENGTH = 0x0005FB80
11 /* Some more ram above the window were we can put bss &c. */
12 RAM_HIGH : ORIGIN = 0x00060000, LENGTH = 0x000A0000
13 /* This is the area visible from the outside world (we can use
14 this only for uninitialized data). */
15 VISIBLE : ORIGIN = 0x00200000, LENGTH = 0x00060000
16}
17
18SECTIONS {
19 .reset : {
20 __kram_start = . ;
21 __intv_start = . ;
22 *(.intv.reset) /* Reset vector */
23 } > RESET
24
25 .ram_low : {
26 __r0_ram = . ; /* Must be near address 0. */
27 . = . + 32 ;
28
29 TEXT_CONTENTS
30 DATA_CONTENTS
31 ROOT_FS_CONTENTS
32 RAMK_INIT_CONTENTS_NO_END
33 INITRAMFS_CONTENTS
34 } > RAM_LOW
35
36 /* Where the interrupt vectors are initially loaded. */
37 __intv_load_start = . ;
38
39 .intv : {
40 *(.intv.common) /* Vectors common to all v850e proc. */
41 *(.intv.mach) /* Machine-specific int. vectors. */
42 __intv_end = . ;
43 } > INTV AT> RAM_LOW
44
45 .ram_high : {
46 /* This is here so that when we free init memory the
47 load-time copy of the interrupt vectors and any empty
48 space at the end of the `RAM_LOW' area is freed too. */
49 . = ALIGN (4096);
50 __init_end = . ;
51
52 BSS_CONTENTS
53 __kram_end = . ;
54 BOOTMAP_CONTENTS
55 } > RAM_HIGH
56
57 .visible : {
58 _memcons_output = . ;
59 . = . + 0x8000 ;
60 _memcons_output_end = . ;
61 } > VISIBLE
62}
diff --git a/arch/v850/kernel/gbus_int.c b/arch/v850/kernel/gbus_int.c
deleted file mode 100644
index b2bcc251f65b..000000000000
--- a/arch/v850/kernel/gbus_int.c
+++ /dev/null
@@ -1,271 +0,0 @@
1/*
2 * arch/v850/kernel/gbus_int.c -- Midas labs GBUS interrupt support
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/types.h>
15#include <linux/init.h>
16#include <linux/irq.h>
17#include <linux/interrupt.h>
18#include <linux/signal.h>
19#include <linux/kernel.h>
20
21#include <asm/machdep.h>
22
23
24/* The number of shared GINT interrupts. */
25#define NUM_GINTS 4
26
27/* For each GINT interrupt, how many GBUS interrupts are using it. */
28static unsigned gint_num_active_irqs[NUM_GINTS] = { 0 };
29
30/* A table of GINTn interrupts we actually use.
31 Note that we don't use GINT0 because all the boards we support treat it
32 specially. */
33struct used_gint {
34 unsigned gint;
35 unsigned priority;
36} used_gint[] = {
37 { 1, GBUS_INT_PRIORITY_HIGH },
38 { 3, GBUS_INT_PRIORITY_LOW }
39};
40#define NUM_USED_GINTS ARRAY_SIZE(used_gint)
41
42/* A table of which GINT is used by each GBUS interrupts (they are
43 assigned based on priority). */
44static unsigned char gbus_int_gint[IRQ_GBUS_INT_NUM];
45
46
47/* Interrupt enabling/disabling. */
48
49/* Enable interrupt handling for interrupt IRQ. */
50void gbus_int_enable_irq (unsigned irq)
51{
52 unsigned gint = gbus_int_gint[irq - GBUS_INT_BASE_IRQ];
53 GBUS_INT_ENABLE (GBUS_INT_IRQ_WORD(irq), gint)
54 |= GBUS_INT_IRQ_MASK (irq);
55}
56
57/* Disable interrupt handling for interrupt IRQ. Note that any
58 interrupts received while disabled will be delivered once the
59 interrupt is enabled again, unless they are explicitly cleared using
60 `gbus_int_clear_pending_irq'. */
61void gbus_int_disable_irq (unsigned irq)
62{
63 unsigned gint = gbus_int_gint[irq - GBUS_INT_BASE_IRQ];
64 GBUS_INT_ENABLE (GBUS_INT_IRQ_WORD(irq), gint)
65 &= ~GBUS_INT_IRQ_MASK (irq);
66}
67
68/* Return true if interrupt handling for interrupt IRQ is enabled. */
69int gbus_int_irq_enabled (unsigned irq)
70{
71 unsigned gint = gbus_int_gint[irq - GBUS_INT_BASE_IRQ];
72 return (GBUS_INT_ENABLE (GBUS_INT_IRQ_WORD(irq), gint)
73 & GBUS_INT_IRQ_MASK(irq));
74}
75
76/* Disable all GBUS irqs. */
77void gbus_int_disable_irqs ()
78{
79 unsigned w, n;
80 for (w = 0; w < GBUS_INT_NUM_WORDS; w++)
81 for (n = 0; n < IRQ_GINT_NUM; n++)
82 GBUS_INT_ENABLE (w, n) = 0;
83}
84
85/* Clear any pending interrupts for IRQ. */
86void gbus_int_clear_pending_irq (unsigned irq)
87{
88 GBUS_INT_CLEAR (GBUS_INT_IRQ_WORD(irq)) = GBUS_INT_IRQ_MASK (irq);
89}
90
91/* Return true if interrupt IRQ is pending (but disabled). */
92int gbus_int_irq_pending (unsigned irq)
93{
94 return (GBUS_INT_STATUS (GBUS_INT_IRQ_WORD(irq))
95 & GBUS_INT_IRQ_MASK(irq));
96}
97
98
99/* Delegating interrupts. */
100
101/* Handle a shared GINT interrupt by passing to the appropriate GBUS
102 interrupt handler. */
103static irqreturn_t gbus_int_handle_irq (int irq, void *dev_id,
104 struct pt_regs *regs)
105{
106 unsigned w;
107 irqreturn_t rval = IRQ_NONE;
108 unsigned gint = irq - IRQ_GINT (0);
109
110 for (w = 0; w < GBUS_INT_NUM_WORDS; w++) {
111 unsigned status = GBUS_INT_STATUS (w);
112 unsigned enable = GBUS_INT_ENABLE (w, gint);
113
114 /* Only pay attention to enabled interrupts. */
115 status &= enable;
116 if (status) {
117 irq = IRQ_GBUS_INT (w * GBUS_INT_BITS_PER_WORD);
118 do {
119 /* There's an active interrupt in word
120 W, find out which one, and call its
121 handler. */
122
123 while (! (status & 0x1)) {
124 irq++;
125 status >>= 1;
126 }
127 status &= ~0x1;
128
129 /* Recursively call handle_irq to handle it. */
130 handle_irq (irq, regs);
131 rval = IRQ_HANDLED;
132 } while (status);
133 }
134 }
135
136 /* Toggle the `all enable' bit back and forth, which should cause
137 another edge transition if there are any other interrupts
138 still pending, and so result in another CPU interrupt. */
139 GBUS_INT_ENABLE (0, gint) &= ~0x1;
140 GBUS_INT_ENABLE (0, gint) |= 0x1;
141
142 return rval;
143}
144
145
146/* Initialize GBUS interrupt sources. */
147
148static void irq_nop (unsigned irq) { }
149
150static unsigned gbus_int_startup_irq (unsigned irq)
151{
152 unsigned gint = gbus_int_gint[irq - GBUS_INT_BASE_IRQ];
153
154 if (gint_num_active_irqs[gint] == 0) {
155 /* First enable the CPU interrupt. */
156 int rval =
157 request_irq (IRQ_GINT(gint), gbus_int_handle_irq,
158 IRQF_DISABLED,
159 "gbus_int_handler",
160 &gint_num_active_irqs[gint]);
161 if (rval != 0)
162 return rval;
163 }
164
165 gint_num_active_irqs[gint]++;
166
167 gbus_int_clear_pending_irq (irq);
168 gbus_int_enable_irq (irq);
169
170 return 0;
171}
172
173static void gbus_int_shutdown_irq (unsigned irq)
174{
175 unsigned gint = gbus_int_gint[irq - GBUS_INT_BASE_IRQ];
176
177 gbus_int_disable_irq (irq);
178
179 if (--gint_num_active_irqs[gint] == 0)
180 /* Disable the CPU interrupt. */
181 free_irq (IRQ_GINT(gint), &gint_num_active_irqs[gint]);
182}
183
184/* Initialize HW_IRQ_TYPES for INTC-controlled irqs described in array
185 INITS (which is terminated by an entry with the name field == 0). */
186void __init gbus_int_init_irq_types (struct gbus_int_irq_init *inits,
187 struct hw_interrupt_type *hw_irq_types)
188{
189 struct gbus_int_irq_init *init;
190 for (init = inits; init->name; init++) {
191 unsigned i;
192 struct hw_interrupt_type *hwit = hw_irq_types++;
193
194 hwit->typename = init->name;
195
196 hwit->startup = gbus_int_startup_irq;
197 hwit->shutdown = gbus_int_shutdown_irq;
198 hwit->enable = gbus_int_enable_irq;
199 hwit->disable = gbus_int_disable_irq;
200 hwit->ack = irq_nop;
201 hwit->end = irq_nop;
202
203 /* Initialize kernel IRQ infrastructure for this interrupt. */
204 init_irq_handlers(init->base, init->num, init->interval, hwit);
205
206 /* Set the interrupt priorities. */
207 for (i = 0; i < init->num; i++) {
208 unsigned j;
209 for (j = 0; j < NUM_USED_GINTS; j++)
210 if (used_gint[j].priority > init->priority)
211 break;
212 /* Wherever we stopped looking is one past the
213 GINT we want. */
214 gbus_int_gint[init->base + i * init->interval
215 - GBUS_INT_BASE_IRQ]
216 = used_gint[j > 0 ? j - 1 : 0].gint;
217 }
218 }
219}
220
221
222/* Initialize IRQS. */
223
224/* Chip interrupts (GINTn) shared among GBUS interrupts. */
225static struct hw_interrupt_type gint_hw_itypes[NUM_USED_GINTS];
226
227
228/* GBUS interrupts themselves. */
229
230struct gbus_int_irq_init gbus_irq_inits[] __initdata = {
231 /* First set defaults. */
232 { "GBUS_INT", IRQ_GBUS_INT(0), IRQ_GBUS_INT_NUM, 1, 6},
233 { 0 }
234};
235#define NUM_GBUS_IRQ_INITS (ARRAY_SIZE(gbus_irq_inits) - 1)
236
237static struct hw_interrupt_type gbus_hw_itypes[NUM_GBUS_IRQ_INITS];
238
239
240/* Initialize GBUS interrupts. */
241void __init gbus_int_init_irqs (void)
242{
243 unsigned i;
244
245 /* First initialize the shared gint interrupts. */
246 for (i = 0; i < NUM_USED_GINTS; i++) {
247 unsigned gint = used_gint[i].gint;
248 struct v850e_intc_irq_init gint_irq_init[2];
249
250 /* We initialize one GINT interrupt at a time. */
251 gint_irq_init[0].name = "GINT";
252 gint_irq_init[0].base = IRQ_GINT (gint);
253 gint_irq_init[0].num = 1;
254 gint_irq_init[0].interval = 1;
255 gint_irq_init[0].priority = used_gint[i].priority;
256
257 gint_irq_init[1].name = 0; /* Terminate the vector. */
258
259 v850e_intc_init_irq_types (gint_irq_init, gint_hw_itypes);
260 }
261
262 /* Then the GBUS interrupts. */
263 gbus_int_disable_irqs ();
264 gbus_int_init_irq_types (gbus_irq_inits, gbus_hw_itypes);
265 /* Turn on the `all enable' bits, which are ANDed with
266 individual interrupt enable bits; we only want to bother with
267 the latter. They are the first bit in the first word of each
268 interrupt-enable area. */
269 for (i = 0; i < NUM_USED_GINTS; i++)
270 GBUS_INT_ENABLE (0, used_gint[i].gint) = 0x1;
271}
diff --git a/arch/v850/kernel/head.S b/arch/v850/kernel/head.S
deleted file mode 100644
index c490b937ef14..000000000000
--- a/arch/v850/kernel/head.S
+++ /dev/null
@@ -1,128 +0,0 @@
1/*
2 * arch/v850/kernel/head.S -- Lowest-level startup code
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <asm/clinkage.h>
15#include <asm/current.h>
16#include <asm/entry.h>
17#include <asm/thread_info.h>
18#include <asm/irq.h>
19
20
21/* Make a slightly more convenient alias for C_SYMBOL_NAME. */
22#define CSYM C_SYMBOL_NAME
23
24
25 .text
26
27 // Define `mach_early_init' as a weak symbol
28 .global CSYM(mach_early_init)
29 .weak CSYM(mach_early_init)
30
31C_ENTRY(start):
32 // Make sure interrupts are turned off, just in case
33 di
34
35#ifdef CONFIG_RESET_GUARD
36 // See if we got here via an unexpected reset
37 ld.w RESET_GUARD, r19 // Check current value of reset guard
38 mov RESET_GUARD_ACTIVE, r20
39 cmp r19, r20
40 bne 1f // Guard was not active
41
42 // If we get here, the reset guard was active. Load up some
43 // interesting values as arguments, and jump to the handler.
44 st.w r0, RESET_GUARD // Allow further resets to succeed
45 mov lp, r6 // Arg 0: return address
46 ld.b KM, r7 // Arg 1: kernel mode
47 mov sp, r9 // Arg 3: stack pointer
48 ld.w KSP, r19 // maybe switch to kernel stack
49 cmp r7, r0 // see if already in kernel mode
50 cmov z, r19, sp, sp // and switch to kernel stack if not
51 GET_CURRENT_TASK(r8) // Arg 2: task pointer
52 jr CSYM(unexpected_reset)
53
541: st.w r20, RESET_GUARD // Turn on reset guard
55#endif /* CONFIG_RESET_GUARD */
56
57 // Setup a temporary stack for doing pre-initialization function calls.
58 //
59 // We can't use the initial kernel stack, because (1) it may be
60 // located in memory we're not allowed to touch, and (2) since
61 // it's in the data segment, calling memcpy to initialize that
62 // area from ROM will overwrite memcpy's return address.
63 mov hilo(CSYM(_init_stack_end) - 4), sp
64
65 // See if there's a platform-specific early-initialization routine
66 // defined; it's a weak symbol, so it will have an address of zero if
67 // there's not.
68 mov hilo(CSYM(mach_early_init)), r6
69 cmp r6, r0
70 bz 3f
71
72 // There is one, so call it. If this function is written in C, it
73 // should be very careful -- the stack pointer is valid, but very
74 // little else is (e.g., bss is not zeroed yet, and initialized data
75 // hasn't been).
76 jarl 2f, lp // first figure out return address
772: add 3f - ., lp
78 jmp [r6] // do call
793:
80
81#ifdef CONFIG_ROM_KERNEL
82 // Copy the data area from ROM to RAM
83 mov hilo(CSYM(_rom_copy_dst_start)), r6
84 mov hilo(CSYM(_rom_copy_src_start)), r7
85 mov hilo(CSYM(_rom_copy_dst_end)), r8
86 sub r6, r8
87 jarl CSYM(memcpy), lp
88#endif
89
90 // Load the initial thread's stack, and current task pointer (in r16)
91 mov hilo(CSYM(init_thread_union)), r19
92 movea THREAD_SIZE, r19, sp
93 ld.w TI_TASK[r19], CURRENT_TASK
94
95#ifdef CONFIG_TIME_BOOTUP
96 /* This stuff must come after mach_early_init, because interrupts may
97 not work until after its been called. */
98 jarl CSYM(highres_timer_reset), lp
99 jarl CSYM(highres_timer_start), lp
100#endif
101
102 // Kernel stack pointer save location
103 st.w sp, KSP
104
105 // Assert that we're in `kernel mode'
106 mov 1, r19
107 st.w r19, KM
108
109#ifdef CONFIG_ZERO_BSS
110 // Zero bss area, since we can't rely upon any loader to do so
111 mov hilo(CSYM(_sbss)), r6
112 mov r0, r7
113 mov hilo(CSYM(_ebss)), r8
114 sub r6, r8
115 jarl CSYM(memset), lp
116#endif
117
118 // What happens if the main kernel function returns (it shouldn't)
119 mov hilo(CSYM(machine_halt)), lp
120
121 // Start the linux kernel. We use an indirect jump to get extra
122 // range, because on some platforms this initial startup code
123 // (and the associated platform-specific code in mach_early_init)
124 // are located far away from the main kernel, e.g. so that they
125 // can initialize RAM first and copy the kernel or something.
126 mov hilo(CSYM(start_kernel)), r12
127 jmp [r12]
128C_END(start)
diff --git a/arch/v850/kernel/highres_timer.c b/arch/v850/kernel/highres_timer.c
deleted file mode 100644
index b16ad1eaf966..000000000000
--- a/arch/v850/kernel/highres_timer.c
+++ /dev/null
@@ -1,132 +0,0 @@
1/*
2 * arch/v850/kernel/highres_timer.c -- High resolution timing routines
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <asm/system.h>
15#include <asm/v850e_timer_d.h>
16#include <asm/highres_timer.h>
17
18#define HIGHRES_TIMER_USEC_SHIFT 12
19
20/* Pre-calculated constant used for converting ticks to real time
21 units. We initialize it to prevent it being put into BSS. */
22static u32 highres_timer_usec_prescale = 1;
23
24void highres_timer_slow_tick_irq (void) __attribute__ ((noreturn));
25void highres_timer_slow_tick_irq (void)
26{
27 /* This is an interrupt handler, so it must be very careful to
28 not to trash any registers. At this point, the stack-pointer
29 (r3) has been saved in the chip ram location ENTRY_SP by the
30 interrupt vector, so we can use it as a scratch register; we
31 must also restore it before returning. */
32 asm ("ld.w %0[r0], sp;"
33 "add 1, sp;"
34 "st.w sp, %0[r0];"
35 "ld.w %1[r0], sp;" /* restore pre-irq stack-pointer */
36 "reti"
37 ::
38 "i" (HIGHRES_TIMER_SLOW_TICKS_ADDR),
39 "i" (ENTRY_SP_ADDR)
40 : "memory");
41}
42
43void highres_timer_reset (void)
44{
45 V850E_TIMER_D_TMD (HIGHRES_TIMER_TIMER_D_UNIT) = 0;
46 HIGHRES_TIMER_SLOW_TICKS = 0;
47}
48
49void highres_timer_start (void)
50{
51 u32 fast_tick_rate;
52
53 /* Start hardware timer. */
54 v850e_timer_d_configure (HIGHRES_TIMER_TIMER_D_UNIT,
55 HIGHRES_TIMER_SLOW_TICK_RATE);
56
57 fast_tick_rate =
58 (V850E_TIMER_D_BASE_FREQ
59 >> V850E_TIMER_D_DIVLOG2 (HIGHRES_TIMER_TIMER_D_UNIT));
60
61 /* The obvious way of calculating microseconds from fast ticks
62 is to do:
63
64 usec = fast_ticks * 10^6 / fast_tick_rate
65
66 However, divisions are much slower than multiplications, and
67 the above calculation can overflow, so we do this instead:
68
69 usec = fast_ticks * (10^6 * 2^12 / fast_tick_rate) / 2^12
70
71 since we can pre-calculate (10^6 * (2^12 / fast_tick_rate))
72 and use a shift for dividing by 2^12, this avoids division,
73 and is almost as accurate (it differs by about 2 microseconds
74 at the extreme value of the fast-tick counter's ranger). */
75 highres_timer_usec_prescale = ((1000000 << HIGHRES_TIMER_USEC_SHIFT)
76 / fast_tick_rate);
77
78 /* Enable the interrupt (which is hardwired to this use), and
79 give it the highest priority. */
80 V850E_INTC_IC (IRQ_INTCMD (HIGHRES_TIMER_TIMER_D_UNIT)) = 0;
81}
82
83void highres_timer_stop (void)
84{
85 /* Stop the timer. */
86 V850E_TIMER_D_TMCD (HIGHRES_TIMER_TIMER_D_UNIT) =
87 V850E_TIMER_D_TMCD_CAE;
88 /* Disable its interrupt, just in case. */
89 v850e_intc_disable_irq (IRQ_INTCMD (HIGHRES_TIMER_TIMER_D_UNIT));
90}
91
92inline void highres_timer_read_ticks (u32 *slow_ticks, u32 *fast_ticks)
93{
94 int flags;
95 u32 fast_ticks_1, fast_ticks_2, _slow_ticks;
96
97 local_irq_save (flags);
98 fast_ticks_1 = V850E_TIMER_D_TMD (HIGHRES_TIMER_TIMER_D_UNIT);
99 _slow_ticks = HIGHRES_TIMER_SLOW_TICKS;
100 fast_ticks_2 = V850E_TIMER_D_TMD (HIGHRES_TIMER_TIMER_D_UNIT);
101 local_irq_restore (flags);
102
103 if (fast_ticks_2 < fast_ticks_1)
104 _slow_ticks++;
105
106 *slow_ticks = _slow_ticks;
107 *fast_ticks = fast_ticks_2;
108}
109
110inline void highres_timer_ticks_to_timeval (u32 slow_ticks, u32 fast_ticks,
111 struct timeval *tv)
112{
113 unsigned long sec, sec_rem, usec;
114
115 usec = ((fast_ticks * highres_timer_usec_prescale)
116 >> HIGHRES_TIMER_USEC_SHIFT);
117
118 sec = slow_ticks / HIGHRES_TIMER_SLOW_TICK_RATE;
119 sec_rem = slow_ticks % HIGHRES_TIMER_SLOW_TICK_RATE;
120
121 usec += sec_rem * (1000000 / HIGHRES_TIMER_SLOW_TICK_RATE);
122
123 tv->tv_sec = sec;
124 tv->tv_usec = usec;
125}
126
127void highres_timer_read (struct timeval *tv)
128{
129 u32 fast_ticks, slow_ticks;
130 highres_timer_read_ticks (&slow_ticks, &fast_ticks);
131 highres_timer_ticks_to_timeval (slow_ticks, fast_ticks, tv);
132}
diff --git a/arch/v850/kernel/init_task.c b/arch/v850/kernel/init_task.c
deleted file mode 100644
index 44b274dff33f..000000000000
--- a/arch/v850/kernel/init_task.c
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * arch/v850/kernel/init_task.c -- Initial task/thread structures
3 *
4 * Copyright (C) 2002,03 NEC Electronics Corporation
5 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 */
11
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/init.h>
16#include <linux/init_task.h>
17#include <linux/fs.h>
18#include <linux/mqueue.h>
19
20#include <asm/uaccess.h>
21#include <asm/pgtable.h>
22
23static struct fs_struct init_fs = INIT_FS;
24static struct signal_struct init_signals = INIT_SIGNALS (init_signals);
25static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
26struct mm_struct init_mm = INIT_MM (init_mm);
27
28EXPORT_SYMBOL(init_mm);
29
30/*
31 * Initial task structure.
32 *
33 * All other task structs will be allocated on slabs in fork.c
34 */
35struct task_struct init_task = INIT_TASK (init_task);
36
37EXPORT_SYMBOL(init_task);
38
39/*
40 * Initial thread structure.
41 *
42 * We need to make sure that this is 8192-byte aligned due to the
43 * way process stacks are handled. This is done by having a special
44 * "init_task" linker map entry.
45 */
46union thread_union init_thread_union
47 __attribute__((__section__(".data.init_task"))) =
48 { INIT_THREAD_INFO(init_task) };
diff --git a/arch/v850/kernel/intv.S b/arch/v850/kernel/intv.S
deleted file mode 100644
index 671e4c6150dd..000000000000
--- a/arch/v850/kernel/intv.S
+++ /dev/null
@@ -1,87 +0,0 @@
1/*
2 * arch/v850/kernel/intv.S -- Interrupt vectors
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <asm/clinkage.h>
15#include <asm/irq.h>
16#include <asm/machdep.h>
17#include <asm/entry.h>
18
19#ifdef CONFIG_V850E_HIGHRES_TIMER
20#include <asm/highres_timer.h>
21#endif
22
23/* Jump to an interrupt/trap handler. These handlers (defined in entry.S)
24 expect the stack-pointer to be saved in ENTRY_SP, so we use sp to do an
25 indirect jump (which avoids problems when the handler is more than a signed
26 22-bit offset away). */
27#define JUMP_TO_HANDLER(name, sp_save_loc) \
28 st.w sp, sp_save_loc; \
29 mov hilo(name), sp; \
30 jmp [sp]
31
32
33 /* Reset vector. */
34 .section .intv.reset, "ax"
35 .org 0x0
36 mov hilo(C_SYMBOL_NAME(start)), r1;
37 jmp [r1]
38
39
40 /* Generic interrupt vectors. */
41 .section .intv.common, "ax"
42 .balign 0x10
43 JUMP_TO_HANDLER (nmi, NMI_ENTRY_SP) // 0x10 - NMI0
44 .balign 0x10
45 JUMP_TO_HANDLER (nmi, NMI_ENTRY_SP) // 0x20 - NMI1
46 .balign 0x10
47 JUMP_TO_HANDLER (nmi, NMI_ENTRY_SP) // 0x30 - NMI2
48
49 .balign 0x10
50 JUMP_TO_HANDLER (trap, ENTRY_SP) // 0x40 - TRAP0n
51 .balign 0x10
52 JUMP_TO_HANDLER (trap, ENTRY_SP) // 0x50 - TRAP1n
53
54 .balign 0x10
55 JUMP_TO_HANDLER (dbtrap, ENTRY_SP) // 0x60 - Illegal op / DBTRAP insn
56
57
58 /* Hardware interrupt vectors. */
59 .section .intv.mach, "ax"
60 .org 0x0
61
62#if defined (CONFIG_V850E_HIGHRES_TIMER) && defined (IRQ_INTCMD)
63
64 /* Interrupts before the highres timer interrupt. */
65 .rept IRQ_INTCMD (HIGHRES_TIMER_TIMER_D_UNIT)
66 .balign 0x10
67 JUMP_TO_HANDLER (irq, ENTRY_SP)
68 .endr
69
70 /* The highres timer interrupt. */
71 .balign 0x10
72 JUMP_TO_HANDLER (C_SYMBOL_NAME (highres_timer_slow_tick_irq), ENTRY_SP)
73
74 /* Interrupts after the highres timer interrupt. */
75 .rept NUM_CPU_IRQS - IRQ_INTCMD (HIGHRES_TIMER_TIMER_D_UNIT) - 1
76 .balign 0x10
77 JUMP_TO_HANDLER (irq, ENTRY_SP)
78 .endr
79
80#else /* No highres timer */
81
82 .rept NUM_CPU_IRQS
83 .balign 0x10
84 JUMP_TO_HANDLER (irq, ENTRY_SP)
85 .endr
86
87#endif /* Highres timer */
diff --git a/arch/v850/kernel/irq.c b/arch/v850/kernel/irq.c
deleted file mode 100644
index 858c45819aab..000000000000
--- a/arch/v850/kernel/irq.c
+++ /dev/null
@@ -1,123 +0,0 @@
1/*
2 * arch/v850/kernel/irq.c -- High-level interrupt handling
3 *
4 * Copyright (C) 2001,02,03,04,05 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03,04,05 Miles Bader <miles@gnu.org>
6 * Copyright (C) 1994-2000 Ralf Baechle
7 * Copyright (C) 1992 Linus Torvalds
8 *
9 * This file is subject to the terms and conditions of the GNU General
10 * Public License. See the file COPYING in the main directory of this
11 * archive for more details.
12 *
13 * This file was was derived from the mips version, arch/mips/kernel/irq.c
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/irq.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/kernel_stat.h>
22#include <linux/slab.h>
23#include <linux/mm.h>
24#include <linux/random.h>
25#include <linux/seq_file.h>
26
27#include <asm/system.h>
28
29/*
30 * 'what should we do if we get a hw irq event on an illegal vector'.
31 * each architecture has to answer this themselves, it doesn't deserve
32 * a generic callback i think.
33 */
34void ack_bad_irq(unsigned int irq)
35{
36 printk("received IRQ %d with unknown interrupt type\n", irq);
37}
38
39volatile unsigned long irq_err_count, spurious_count;
40
41/*
42 * Generic, controller-independent functions:
43 */
44
45int show_interrupts(struct seq_file *p, void *v)
46{
47 int irq = *(loff_t *) v;
48
49 if (irq == 0) {
50 int cpu;
51 seq_puts(p, " ");
52 for (cpu=0; cpu < 1 /*smp_num_cpus*/; cpu++)
53 seq_printf(p, "CPU%d ", cpu);
54 seq_putc(p, '\n');
55 }
56
57 if (irq < NR_IRQS) {
58 unsigned long flags;
59 struct irqaction *action;
60
61 spin_lock_irqsave(&irq_desc[irq].lock, flags);
62
63 action = irq_desc[irq].action;
64 if (action) {
65 int j;
66 int count = 0;
67 int num = -1;
68 const char *type_name = irq_desc[irq].chip->typename;
69
70 for (j = 0; j < NR_IRQS; j++)
71 if (irq_desc[j].chip->typename == type_name){
72 if (irq == j)
73 num = count;
74 count++;
75 }
76
77 seq_printf(p, "%3d: ",irq);
78 seq_printf(p, "%10u ", kstat_irqs(irq));
79 if (count > 1) {
80 int prec = (num >= 100 ? 3 : num >= 10 ? 2 : 1);
81 seq_printf(p, " %*s%d", 14 - prec,
82 type_name, num);
83 } else
84 seq_printf(p, " %14s", type_name);
85
86 seq_printf(p, " %s", action->name);
87 for (action=action->next; action; action = action->next)
88 seq_printf(p, ", %s", action->name);
89 seq_putc(p, '\n');
90 }
91
92 spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
93 } else if (irq == NR_IRQS)
94 seq_printf(p, "ERR: %10lu\n", irq_err_count);
95
96 return 0;
97}
98
99/* Handle interrupt IRQ. REGS are the registers at the time of ther
100 interrupt. */
101unsigned int handle_irq (int irq, struct pt_regs *regs)
102{
103 irq_enter();
104 __do_IRQ(irq, regs);
105 irq_exit();
106 return 1;
107}
108
109/* Initialize irq handling for IRQs.
110 BASE_IRQ, BASE_IRQ+INTERVAL, ..., BASE_IRQ+NUM*INTERVAL
111 to IRQ_TYPE. An IRQ_TYPE of 0 means to use a generic interrupt type. */
112void __init
113init_irq_handlers (int base_irq, int num, int interval,
114 struct hw_interrupt_type *irq_type)
115{
116 while (num-- > 0) {
117 irq_desc[base_irq].status = IRQ_DISABLED;
118 irq_desc[base_irq].action = NULL;
119 irq_desc[base_irq].depth = 1;
120 irq_desc[base_irq].chip = irq_type;
121 base_irq += interval;
122 }
123}
diff --git a/arch/v850/kernel/ma.c b/arch/v850/kernel/ma.c
deleted file mode 100644
index 143774de75e1..000000000000
--- a/arch/v850/kernel/ma.c
+++ /dev/null
@@ -1,69 +0,0 @@
1/*
2 * arch/v850/kernel/ma.c -- V850E/MA series of cpu chips
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/swap.h>
18#include <linux/bootmem.h>
19#include <linux/irq.h>
20
21#include <asm/atomic.h>
22#include <asm/page.h>
23#include <asm/machdep.h>
24#include <asm/v850e_timer_d.h>
25
26#include "mach.h"
27
28void __init mach_sched_init (struct irqaction *timer_action)
29{
30 /* Start hardware timer. */
31 v850e_timer_d_configure (0, HZ);
32 /* Install timer interrupt handler. */
33 setup_irq (IRQ_INTCMD(0), timer_action);
34}
35
36static struct v850e_intc_irq_init irq_inits[] = {
37 { "IRQ", 0, NUM_MACH_IRQS, 1, 7 },
38 { "CMD", IRQ_INTCMD(0), IRQ_INTCMD_NUM, 1, 5 },
39 { "DMA", IRQ_INTDMA(0), IRQ_INTDMA_NUM, 1, 2 },
40 { "CSI", IRQ_INTCSI(0), IRQ_INTCSI_NUM, 4, 4 },
41 { "SER", IRQ_INTSER(0), IRQ_INTSER_NUM, 4, 3 },
42 { "SR", IRQ_INTSR(0), IRQ_INTSR_NUM, 4, 4 },
43 { "ST", IRQ_INTST(0), IRQ_INTST_NUM, 4, 5 },
44 { 0 }
45};
46#define NUM_IRQ_INITS (ARRAY_SIZE(irq_inits) - 1)
47
48static struct hw_interrupt_type hw_itypes[NUM_IRQ_INITS];
49
50/* Initialize MA chip interrupts. */
51void __init ma_init_irqs (void)
52{
53 v850e_intc_init_irq_types (irq_inits, hw_itypes);
54}
55
56/* Called before configuring an on-chip UART. */
57void ma_uart_pre_configure (unsigned chan, unsigned cflags, unsigned baud)
58{
59 /* We only know about the first two UART channels (though
60 specific chips may have more). */
61 if (chan < 2) {
62 unsigned bits = 0x3 << (chan * 3);
63 /* Specify that the relevant pins on the chip should do
64 serial I/O, not direct I/O. */
65 MA_PORT4_PMC |= bits;
66 /* Specify that we're using the UART, not the CSI device. */
67 MA_PORT4_PFC |= bits;
68 }
69}
diff --git a/arch/v850/kernel/mach.c b/arch/v850/kernel/mach.c
deleted file mode 100644
index b9db278d2b71..000000000000
--- a/arch/v850/kernel/mach.c
+++ /dev/null
@@ -1,17 +0,0 @@
1/*
2 * arch/v850/kernel/mach.c -- Defaults for some things defined by "mach.h"
3 *
4 * Copyright (C) 2001 NEC Corporation
5 * Copyright (C) 2001 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include "mach.h"
15
16/* Called with each timer tick, if non-zero. */
17void (*mach_tick)(void) = 0;
diff --git a/arch/v850/kernel/mach.h b/arch/v850/kernel/mach.h
deleted file mode 100644
index 9e0e4816ec56..000000000000
--- a/arch/v850/kernel/mach.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * arch/v850/kernel/mach.h -- Machine-dependent functions used by v850 port
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_MACH_H__
15#define __V850_MACH_H__
16
17#include <linux/kernel.h>
18#include <linux/time.h>
19#include <linux/spinlock.h>
20#include <linux/interrupt.h>
21#include <linux/fs.h>
22#include <linux/seq_file.h>
23
24#include <asm/ptrace.h>
25#include <asm/entry.h>
26#include <asm/clinkage.h>
27
28void mach_setup (char **cmdline);
29void mach_gettimeofday (struct timespec *tv);
30void mach_sched_init (struct irqaction *timer_action);
31void mach_get_physical_ram (unsigned long *ram_start, unsigned long *ram_len);
32void mach_init_irqs (void);
33
34/* If defined, is called very early in the kernel initialization. The
35 stack pointer is valid, but very little has been initialized (e.g.,
36 bss is not zeroed yet) when this is called, so care must taken. */
37void mach_early_init (void);
38
39/* If defined, called after the bootmem allocator has been initialized,
40 to allow the platform-dependent code to reserve any areas of RAM that
41 the kernel shouldn't touch. */
42void mach_reserve_bootmem (void) __attribute__ ((__weak__));
43
44/* Called with each timer tick, if non-zero. */
45extern void (*mach_tick) (void);
46
47/* The following establishes aliases for various mach_ functions to the
48 name by which the rest of the kernel calls them. These statements
49 should only have an effect in the file that defines the actual functions. */
50#define MACH_ALIAS(to, from) \
51 asm (".global " macrology_stringify (C_SYMBOL_NAME (to)) ";" \
52 macrology_stringify (C_SYMBOL_NAME (to)) \
53 " = " macrology_stringify (C_SYMBOL_NAME (from)))
54/* e.g.: MACH_ALIAS (kernel_name, arch_spec_name); */
55
56#endif /* __V850_MACH_H__ */
diff --git a/arch/v850/kernel/me2.c b/arch/v850/kernel/me2.c
deleted file mode 100644
index 007115dc9ce0..000000000000
--- a/arch/v850/kernel/me2.c
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * arch/v850/kernel/me2.c -- V850E/ME2 chip-specific support
3 *
4 * Copyright (C) 2003 NEC Corporation
5 * Copyright (C) 2003 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/swap.h>
18#include <linux/bootmem.h>
19#include <linux/irq.h>
20
21#include <asm/atomic.h>
22#include <asm/page.h>
23#include <asm/machdep.h>
24#include <asm/v850e_timer_d.h>
25
26#include "mach.h"
27
28void __init mach_sched_init (struct irqaction *timer_action)
29{
30 /* Start hardware timer. */
31 v850e_timer_d_configure (0, HZ);
32 /* Install timer interrupt handler. */
33 setup_irq (IRQ_INTCMD(0), timer_action);
34}
35
36static struct v850e_intc_irq_init irq_inits[] = {
37 { "IRQ", 0, NUM_CPU_IRQS, 1, 7 },
38 { "INTP", IRQ_INTP(0), IRQ_INTP_NUM, 1, 5 },
39 { "CMD", IRQ_INTCMD(0), IRQ_INTCMD_NUM, 1, 3 },
40 { "UBTIRE", IRQ_INTUBTIRE(0), IRQ_INTUBTIRE_NUM, 5, 4 },
41 { "UBTIR", IRQ_INTUBTIR(0), IRQ_INTUBTIR_NUM, 5, 4 },
42 { "UBTIT", IRQ_INTUBTIT(0), IRQ_INTUBTIT_NUM, 5, 4 },
43 { "UBTIF", IRQ_INTUBTIF(0), IRQ_INTUBTIF_NUM, 5, 4 },
44 { "UBTITO", IRQ_INTUBTITO(0), IRQ_INTUBTITO_NUM, 5, 4 },
45 { 0 }
46};
47#define NUM_IRQ_INITS (ARRAY_SIZE(irq_inits) - 1)
48
49static struct hw_interrupt_type hw_itypes[NUM_IRQ_INITS];
50
51/* Initialize V850E/ME2 chip interrupts. */
52void __init me2_init_irqs (void)
53{
54 v850e_intc_init_irq_types (irq_inits, hw_itypes);
55}
56
57/* Called before configuring an on-chip UART. */
58void me2_uart_pre_configure (unsigned chan, unsigned cflags, unsigned baud)
59{
60 if (chan == 0) {
61 /* Specify that the relevant pins on the chip should do
62 serial I/O, not direct I/O. */
63 ME2_PORT1_PMC |= 0xC;
64 /* Specify that we're using the UART, not the CSI device. */
65 ME2_PORT1_PFC |= 0xC;
66 } else if (chan == 1) {
67 /* Specify that the relevant pins on the chip should do
68 serial I/O, not direct I/O. */
69 ME2_PORT2_PMC |= 0x6;
70 /* Specify that we're using the UART, not the CSI device. */
71 ME2_PORT2_PFC |= 0x6;
72 }
73}
diff --git a/arch/v850/kernel/memcons.c b/arch/v850/kernel/memcons.c
deleted file mode 100644
index 92f514fdcc79..000000000000
--- a/arch/v850/kernel/memcons.c
+++ /dev/null
@@ -1,135 +0,0 @@
1/*
2 * arch/v850/kernel/memcons.c -- Console I/O to a memory buffer
3 *
4 * Copyright (C) 2001,02 NEC Corporation
5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/kernel.h>
15#include <linux/console.h>
16#include <linux/tty.h>
17#include <linux/tty_driver.h>
18#include <linux/init.h>
19
20/* If this device is enabled, the linker map should define start and
21 end points for its buffer. */
22extern char memcons_output[], memcons_output_end;
23
24/* Current offset into the buffer. */
25static unsigned long memcons_offs = 0;
26
27/* Spinlock protecting memcons_offs. */
28static DEFINE_SPINLOCK(memcons_lock);
29
30
31static size_t write (const char *buf, size_t len)
32{
33 unsigned long flags;
34 char *point;
35
36 spin_lock_irqsave (memcons_lock, flags);
37
38 point = memcons_output + memcons_offs;
39 if (point + len >= &memcons_output_end) {
40 len = &memcons_output_end - point;
41 memcons_offs = 0;
42 } else
43 memcons_offs += len;
44
45 spin_unlock_irqrestore (memcons_lock, flags);
46
47 memcpy (point, buf, len);
48
49 return len;
50}
51
52
53/* Low-level console. */
54
55static void memcons_write (struct console *co, const char *buf, unsigned len)
56{
57 while (len > 0)
58 len -= write (buf, len);
59}
60
61static struct tty_driver *tty_driver;
62
63static struct tty_driver *memcons_device (struct console *co, int *index)
64{
65 *index = co->index;
66 return tty_driver;
67}
68
69static struct console memcons =
70{
71 .name = "memcons",
72 .write = memcons_write,
73 .device = memcons_device,
74 .flags = CON_PRINTBUFFER,
75 .index = -1,
76};
77
78void memcons_setup (void)
79{
80 register_console (&memcons);
81 printk (KERN_INFO "Console: static memory buffer (memcons)\n");
82}
83
84/* Higher level TTY interface. */
85
86int memcons_tty_open (struct tty_struct *tty, struct file *filp)
87{
88 return 0;
89}
90
91int memcons_tty_write (struct tty_struct *tty, const unsigned char *buf, int len)
92{
93 return write (buf, len);
94}
95
96int memcons_tty_write_room (struct tty_struct *tty)
97{
98 return &memcons_output_end - (memcons_output + memcons_offs);
99}
100
101int memcons_tty_chars_in_buffer (struct tty_struct *tty)
102{
103 /* We have no buffer. */
104 return 0;
105}
106
107static const struct tty_operations ops = {
108 .open = memcons_tty_open,
109 .write = memcons_tty_write,
110 .write_room = memcons_tty_write_room,
111 .chars_in_buffer = memcons_tty_chars_in_buffer,
112};
113
114int __init memcons_tty_init (void)
115{
116 int err;
117 struct tty_driver *driver = alloc_tty_driver(1);
118 if (!driver)
119 return -ENOMEM;
120
121 driver->name = "memcons";
122 driver->major = TTY_MAJOR;
123 driver->minor_start = 64;
124 driver->type = TTY_DRIVER_TYPE_SYSCONS;
125 driver->init_termios = tty_std_termios;
126 tty_set_operations(driver, &ops);
127 err = tty_register_driver(driver);
128 if (err) {
129 put_tty_driver(driver);
130 return err;
131 }
132 tty_driver = driver;
133 return 0;
134}
135__initcall (memcons_tty_init);
diff --git a/arch/v850/kernel/module.c b/arch/v850/kernel/module.c
deleted file mode 100644
index 64aeb3e37c52..000000000000
--- a/arch/v850/kernel/module.c
+++ /dev/null
@@ -1,237 +0,0 @@
1/*
2 * arch/v850/kernel/module.c -- Architecture-specific module functions
3 *
4 * Copyright (C) 2002,03 NEC Electronics Corporation
5 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
6 * Copyright (C) 2001,03 Rusty Russell
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 *
14 * Derived in part from arch/ppc/kernel/module.c
15 */
16
17#include <linux/kernel.h>
18#include <linux/vmalloc.h>
19#include <linux/moduleloader.h>
20#include <linux/elf.h>
21
22#if 0
23#define DEBUGP printk
24#else
25#define DEBUGP(fmt , ...)
26#endif
27
28void *module_alloc (unsigned long size)
29{
30 return size == 0 ? 0 : vmalloc (size);
31}
32
33void module_free (struct module *mod, void *module_region)
34{
35 vfree (module_region);
36 /* FIXME: If module_region == mod->init_region, trim exception
37 table entries. */
38}
39
40int module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
41 struct module *mod)
42{
43 return 0;
44}
45
46/* Count how many different relocations (different symbol, different
47 addend) */
48static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
49{
50 unsigned int i, j, ret = 0;
51
52 /* Sure, this is order(n^2), but it's usually short, and not
53 time critical */
54 for (i = 0; i < num; i++) {
55 for (j = 0; j < i; j++) {
56 /* If this addend appeared before, it's
57 already been counted */
58 if (ELF32_R_SYM(rela[i].r_info)
59 == ELF32_R_SYM(rela[j].r_info)
60 && rela[i].r_addend == rela[j].r_addend)
61 break;
62 }
63 if (j == i) ret++;
64 }
65 return ret;
66}
67
68/* Get the potential trampolines size required of the init and
69 non-init sections */
70static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
71 const Elf32_Shdr *sechdrs,
72 const char *secstrings,
73 int is_init)
74{
75 unsigned long ret = 0;
76 unsigned i;
77
78 /* Everything marked ALLOC (this includes the exported
79 symbols) */
80 for (i = 1; i < hdr->e_shnum; i++) {
81 /* If it's called *.init*, and we're not init, we're
82 not interested */
83 if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
84 != is_init)
85 continue;
86
87 if (sechdrs[i].sh_type == SHT_RELA) {
88 DEBUGP("Found relocations in section %u\n", i);
89 DEBUGP("Ptr: %p. Number: %u\n",
90 (void *)hdr + sechdrs[i].sh_offset,
91 sechdrs[i].sh_size / sizeof(Elf32_Rela));
92 ret += count_relocs((void *)hdr
93 + sechdrs[i].sh_offset,
94 sechdrs[i].sh_size
95 / sizeof(Elf32_Rela))
96 * sizeof(struct v850_plt_entry);
97 }
98 }
99
100 return ret;
101}
102
103int module_frob_arch_sections(Elf32_Ehdr *hdr,
104 Elf32_Shdr *sechdrs,
105 char *secstrings,
106 struct module *me)
107{
108 unsigned int i;
109
110 /* Find .plt and .pltinit sections */
111 for (i = 0; i < hdr->e_shnum; i++) {
112 if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0)
113 me->arch.init_plt_section = i;
114 else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
115 me->arch.core_plt_section = i;
116 }
117 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
118 printk("Module doesn't contain .plt or .plt.init sections.\n");
119 return -ENOEXEC;
120 }
121
122 /* Override their sizes */
123 sechdrs[me->arch.core_plt_section].sh_size
124 = get_plt_size(hdr, sechdrs, secstrings, 0);
125 sechdrs[me->arch.init_plt_section].sh_size
126 = get_plt_size(hdr, sechdrs, secstrings, 1);
127 return 0;
128}
129
130int apply_relocate (Elf32_Shdr *sechdrs, const char *strtab,
131 unsigned int symindex, unsigned int relsec,
132 struct module *mod)
133{
134 printk ("Barf\n");
135 return -ENOEXEC;
136}
137
138/* Set up a trampoline in the PLT to bounce us to the distant function */
139static uint32_t do_plt_call (void *location, Elf32_Addr val,
140 Elf32_Shdr *sechdrs, struct module *mod)
141{
142 struct v850_plt_entry *entry;
143 /* Instructions used to do the indirect jump. */
144 uint32_t tramp[2];
145
146 /* We have to trash a register, so we assume that any control
147 transfer more than 21-bits away must be a function call
148 (so we can use a call-clobbered register). */
149 tramp[0] = 0x0621 + ((val & 0xffff) << 16); /* mov sym, r1 ... */
150 tramp[1] = ((val >> 16) & 0xffff) + 0x610000; /* ...; jmp r1 */
151
152 /* Init, or core PLT? */
153 if (location >= mod->module_core
154 && location < mod->module_core + mod->core_size)
155 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
156 else
157 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
158
159 /* Find this entry, or if that fails, the next avail. entry */
160 while (entry->tramp[0])
161 if (entry->tramp[0] == tramp[0] && entry->tramp[1] == tramp[1])
162 return (uint32_t)entry;
163 else
164 entry++;
165
166 entry->tramp[0] = tramp[0];
167 entry->tramp[1] = tramp[1];
168
169 return (uint32_t)entry;
170}
171
172int apply_relocate_add (Elf32_Shdr *sechdrs, const char *strtab,
173 unsigned int symindex, unsigned int relsec,
174 struct module *mod)
175{
176 unsigned int i;
177 Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
178
179 DEBUGP ("Applying relocate section %u to %u\n", relsec,
180 sechdrs[relsec].sh_info);
181
182 for (i = 0; i < sechdrs[relsec].sh_size / sizeof (*rela); i++) {
183 /* This is where to make the change */
184 uint32_t *loc
185 = ((void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
186 + rela[i].r_offset);
187 /* This is the symbol it is referring to. Note that all
188 undefined symbols have been resolved. */
189 Elf32_Sym *sym
190 = ((Elf32_Sym *)sechdrs[symindex].sh_addr
191 + ELF32_R_SYM (rela[i].r_info));
192 uint32_t val = sym->st_value + rela[i].r_addend;
193
194 switch (ELF32_R_TYPE (rela[i].r_info)) {
195 case R_V850_32:
196 /* We write two shorts instead of a long because even
197 32-bit insns only need half-word alignment, but
198 32-bit data writes need to be long-word aligned. */
199 val += ((uint16_t *)loc)[0];
200 val += ((uint16_t *)loc)[1] << 16;
201 ((uint16_t *)loc)[0] = val & 0xffff;
202 ((uint16_t *)loc)[1] = (val >> 16) & 0xffff;
203 break;
204
205 case R_V850_22_PCREL:
206 /* Maybe jump indirectly via a PLT table entry. */
207 if ((int32_t)(val - (uint32_t)loc) > 0x1fffff
208 || (int32_t)(val - (uint32_t)loc) < -0x200000)
209 val = do_plt_call (loc, val, sechdrs, mod);
210
211 val -= (uint32_t)loc;
212
213 /* We write two shorts instead of a long because
214 even 32-bit insns only need half-word alignment,
215 but 32-bit data writes need to be long-word
216 aligned. */
217 ((uint16_t *)loc)[0] =
218 (*(uint16_t *)loc & 0xffc0) /* opcode + reg */
219 | ((val >> 16) & 0xffc03f); /* offs high */
220 ((uint16_t *)loc)[1] =
221 (val & 0xffff); /* offs low */
222 break;
223
224 default:
225 printk (KERN_ERR "module %s: Unknown reloc: %u\n",
226 mod->name, ELF32_R_TYPE (rela[i].r_info));
227 return -ENOEXEC;
228 }
229 }
230
231 return 0;
232}
233
234void
235module_arch_cleanup(struct module *mod)
236{
237}
diff --git a/arch/v850/kernel/process.c b/arch/v850/kernel/process.c
deleted file mode 100644
index e4a4b8e7d5a3..000000000000
--- a/arch/v850/kernel/process.c
+++ /dev/null
@@ -1,217 +0,0 @@
1/*
2 * arch/v850/kernel/process.c -- Arch-dependent process handling
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/errno.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/stddef.h>
20#include <linux/unistd.h>
21#include <linux/ptrace.h>
22#include <linux/slab.h>
23#include <linux/user.h>
24#include <linux/a.out.h>
25#include <linux/reboot.h>
26
27#include <asm/uaccess.h>
28#include <asm/system.h>
29#include <asm/pgtable.h>
30
31void (*pm_power_off)(void) = NULL;
32EXPORT_SYMBOL(pm_power_off);
33
34extern void ret_from_fork (void);
35
36
37/* The idle loop. */
38static void default_idle (void)
39{
40 while (! need_resched ())
41 asm ("halt; nop; nop; nop; nop; nop" ::: "cc");
42}
43
44void (*idle)(void) = default_idle;
45
46/*
47 * The idle thread. There's no useful work to be
48 * done, so just try to conserve power and have a
49 * low exit latency (ie sit in a loop waiting for
50 * somebody to say that they'd like to reschedule)
51 */
52void cpu_idle (void)
53{
54 /* endless idle loop with no priority at all */
55 while (1) {
56 while (!need_resched())
57 (*idle) ();
58
59 preempt_enable_no_resched();
60 schedule();
61 preempt_disable();
62 }
63}
64
65/*
66 * This is the mechanism for creating a new kernel thread.
67 *
68 * NOTE! Only a kernel-only process (ie the swapper or direct descendants who
69 * haven't done an "execve()") should use this: it will work within a system
70 * call from a "real" process, but the process memory space will not be free'd
71 * until both the parent and the child have exited.
72 */
73int kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
74{
75 register mm_segment_t fs = get_fs ();
76 register unsigned long syscall asm (SYSCALL_NUM);
77 register unsigned long arg0 asm (SYSCALL_ARG0);
78 register unsigned long ret asm (SYSCALL_RET);
79
80 set_fs (KERNEL_DS);
81
82 /* Clone this thread. Note that we don't pass the clone syscall's
83 second argument -- it's ignored for calls from kernel mode (the
84 child's SP is always set to the top of the kernel stack). */
85 arg0 = flags | CLONE_VM;
86 syscall = __NR_clone;
87 asm volatile ("trap " SYSCALL_SHORT_TRAP
88 : "=r" (ret), "=r" (syscall)
89 : "1" (syscall), "r" (arg0)
90 : SYSCALL_SHORT_CLOBBERS);
91
92 if (ret == 0) {
93 /* In child thread, call FN and exit. */
94 arg0 = (*fn) (arg);
95 syscall = __NR_exit;
96 asm volatile ("trap " SYSCALL_SHORT_TRAP
97 : "=r" (ret), "=r" (syscall)
98 : "1" (syscall), "r" (arg0)
99 : SYSCALL_SHORT_CLOBBERS);
100 }
101
102 /* In parent. */
103 set_fs (fs);
104
105 return ret;
106}
107
108void flush_thread (void)
109{
110 set_fs (USER_DS);
111}
112
113int copy_thread (int nr, unsigned long clone_flags,
114 unsigned long stack_start, unsigned long stack_size,
115 struct task_struct *p, struct pt_regs *regs)
116{
117 /* Start pushing stuff from the top of the child's kernel stack. */
118 unsigned long orig_ksp = task_tos(p);
119 unsigned long ksp = orig_ksp;
120 /* We push two `state save' stack fames (see entry.S) on the new
121 kernel stack:
122 1) The innermost one is what switch_thread would have
123 pushed, and is used when we context switch to the child
124 thread for the first time. It's set up to return to
125 ret_from_fork in entry.S.
126 2) The outermost one (nearest the top) is what a syscall
127 trap would have pushed, and is set up to return to the
128 same location as the parent thread, but with a return
129 value of 0. */
130 struct pt_regs *child_switch_regs, *child_trap_regs;
131
132 /* Trap frame. */
133 ksp -= STATE_SAVE_SIZE;
134 child_trap_regs = (struct pt_regs *)(ksp + STATE_SAVE_PT_OFFSET);
135 /* Switch frame. */
136 ksp -= STATE_SAVE_SIZE;
137 child_switch_regs = (struct pt_regs *)(ksp + STATE_SAVE_PT_OFFSET);
138
139 /* First copy parent's register state to child. */
140 *child_switch_regs = *regs;
141 *child_trap_regs = *regs;
142
143 /* switch_thread returns to the restored value of the lp
144 register (r31), so we make that the place where we want to
145 jump when the child thread begins running. */
146 child_switch_regs->gpr[GPR_LP] = (v850_reg_t)ret_from_fork;
147
148 if (regs->kernel_mode)
149 /* Since we're returning to kernel-mode, make sure the child's
150 stored kernel stack pointer agrees with what the actual
151 stack pointer will be at that point (the trap return code
152 always restores the SP, even when returning to
153 kernel-mode). */
154 child_trap_regs->gpr[GPR_SP] = orig_ksp;
155 else
156 /* Set the child's user-mode stack-pointer (the name
157 `stack_start' is a misnomer, it's just the initial SP
158 value). */
159 child_trap_regs->gpr[GPR_SP] = stack_start;
160
161 /* Thread state for the child (everything else is on the stack). */
162 p->thread.ksp = ksp;
163
164 return 0;
165}
166
167/*
168 * sys_execve() executes a new program.
169 */
170int sys_execve (char *name, char **argv, char **envp, struct pt_regs *regs)
171{
172 char *filename = getname (name);
173 int error = PTR_ERR (filename);
174
175 if (! IS_ERR (filename)) {
176 error = do_execve (filename, argv, envp, regs);
177 putname (filename);
178 }
179
180 return error;
181}
182
183
184/*
185 * These bracket the sleeping functions..
186 */
187#define first_sched ((unsigned long)__sched_text_start)
188#define last_sched ((unsigned long)__sched_text_end)
189
190unsigned long get_wchan (struct task_struct *p)
191{
192#if 0 /* Barf. Figure out the stack-layout later. XXX */
193 unsigned long fp, pc;
194 int count = 0;
195
196 if (!p || p == current || p->state == TASK_RUNNING)
197 return 0;
198
199 pc = thread_saved_pc (p);
200
201 /* This quite disgusting function walks up the stack, following
202 saved return address, until it something that's out of bounds
203 (as defined by `first_sched' and `last_sched'). It then
204 returns the last PC that was in-bounds. */
205 do {
206 if (fp < stack_page + sizeof (struct task_struct) ||
207 fp >= 8184+stack_page)
208 return 0;
209 pc = ((unsigned long *)fp)[1];
210 if (pc < first_sched || pc >= last_sched)
211 return pc;
212 fp = *(unsigned long *) fp;
213 } while (count++ < 16);
214#endif
215
216 return 0;
217}
diff --git a/arch/v850/kernel/procfs.c b/arch/v850/kernel/procfs.c
deleted file mode 100644
index e433cde789b4..000000000000
--- a/arch/v850/kernel/procfs.c
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * arch/v850/kernel/procfs.c -- Introspection functions for /proc filesystem
3 *
4 * Copyright (C) 2001,02 NEC Corporation
5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include "mach.h"
15
16static int cpuinfo_print (struct seq_file *m, void *v)
17{
18 extern unsigned long loops_per_jiffy;
19
20 seq_printf (m, "CPU-Family: v850\nCPU-Arch: %s\n", CPU_ARCH);
21
22#ifdef CPU_MODEL_LONG
23 seq_printf (m, "CPU-Model: %s (%s)\n", CPU_MODEL, CPU_MODEL_LONG);
24#else
25 seq_printf (m, "CPU-Model: %s\n", CPU_MODEL);
26#endif
27
28#ifdef CPU_CLOCK_FREQ
29 seq_printf (m, "CPU-Clock: %ld (%ld MHz)\n",
30 (long)CPU_CLOCK_FREQ,
31 (long)CPU_CLOCK_FREQ / 1000000);
32#endif
33
34 seq_printf (m, "BogoMips: %lu.%02lu\n",
35 loops_per_jiffy/(500000/HZ),
36 (loops_per_jiffy/(5000/HZ)) % 100);
37
38#ifdef PLATFORM_LONG
39 seq_printf (m, "Platform: %s (%s)\n", PLATFORM, PLATFORM_LONG);
40#elif defined (PLATFORM)
41 seq_printf (m, "Platform: %s\n", PLATFORM);
42#endif
43
44 return 0;
45}
46
47static void *cpuinfo_start (struct seq_file *m, loff_t *pos)
48{
49 return *pos < NR_CPUS ? ((void *) 0x12345678) : NULL;
50}
51
52static void *cpuinfo_next (struct seq_file *m, void *v, loff_t *pos)
53{
54 ++*pos;
55 return cpuinfo_start (m, pos);
56}
57
58static void cpuinfo_stop (struct seq_file *m, void *v)
59{
60}
61
62const struct seq_operations cpuinfo_op = {
63 .start = cpuinfo_start,
64 .next = cpuinfo_next,
65 .stop = cpuinfo_stop,
66 .show = cpuinfo_print
67};
diff --git a/arch/v850/kernel/ptrace.c b/arch/v850/kernel/ptrace.c
deleted file mode 100644
index a458ac941b25..000000000000
--- a/arch/v850/kernel/ptrace.c
+++ /dev/null
@@ -1,235 +0,0 @@
1/*
2 * arch/v850/kernel/ptrace.c -- `ptrace' system call
3 *
4 * Copyright (C) 2002,03,04 NEC Electronics Corporation
5 * Copyright (C) 2002,03,04 Miles Bader <miles@gnu.org>
6 *
7 * Derived from arch/mips/kernel/ptrace.c:
8 *
9 * Copyright (C) 1992 Ross Biro
10 * Copyright (C) Linus Torvalds
11 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
12 * Copyright (C) 1996 David S. Miller
13 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
14 * Copyright (C) 1999 MIPS Technologies, Inc.
15 *
16 * This file is subject to the terms and conditions of the GNU General
17 * Public License. See the file COPYING in the main directory of this
18 * archive for more details.
19 */
20
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/sched.h>
24#include <linux/ptrace.h>
25#include <linux/signal.h>
26
27#include <asm/errno.h>
28#include <asm/ptrace.h>
29#include <asm/processor.h>
30#include <asm/uaccess.h>
31
32/* Returns the address where the register at REG_OFFS in P is stashed away. */
33static v850_reg_t *reg_save_addr (unsigned reg_offs, struct task_struct *t)
34{
35 struct pt_regs *regs;
36
37 /* Three basic cases:
38
39 (1) A register normally saved before calling the scheduler, is
40 available in the kernel entry pt_regs structure at the top
41 of the kernel stack. The kernel trap/irq exit path takes
42 care to save/restore almost all registers for ptrace'd
43 processes.
44
45 (2) A call-clobbered register, where the process P entered the
46 kernel via [syscall] trap, is not stored anywhere; that's
47 OK, because such registers are not expected to be preserved
48 when the trap returns anyway (so we don't actually bother to
49 test for this case).
50
51 (3) A few registers not used at all by the kernel, and so
52 normally never saved except by context-switches, are in the
53 context switch state. */
54
55 if (reg_offs == PT_CTPC || reg_offs == PT_CTPSW || reg_offs == PT_CTBP)
56 /* Register saved during context switch. */
57 regs = thread_saved_regs (t);
58 else
59 /* Register saved during kernel entry (or not available). */
60 regs = task_pt_regs (t);
61
62 return (v850_reg_t *)((char *)regs + reg_offs);
63}
64
65/* Set the bits SET and clear the bits CLEAR in the v850e DIR
66 (`debug information register'). Returns the new value of DIR. */
67static inline v850_reg_t set_dir (v850_reg_t set, v850_reg_t clear)
68{
69 register v850_reg_t rval asm ("r10");
70 register v850_reg_t arg0 asm ("r6") = set;
71 register v850_reg_t arg1 asm ("r7") = clear;
72
73 /* The dbtrap handler has exactly this functionality when called
74 from kernel mode. 0xf840 is a `dbtrap' insn. */
75 asm (".short 0xf840" : "=r" (rval) : "r" (arg0), "r" (arg1));
76
77 return rval;
78}
79
80/* Makes sure hardware single-stepping is (globally) enabled.
81 Returns true if successful. */
82static inline int enable_single_stepping (void)
83{
84 static int enabled = 0; /* Remember whether we already did it. */
85 if (! enabled) {
86 /* Turn on the SE (`single-step enable') bit, 0x100, in the
87 DIR (`debug information register'). This may fail if a
88 processor doesn't support it or something. We also try
89 to clear bit 0x40 (`INI'), which is necessary to use the
90 debug stuff on the v850e2; on the v850e, clearing 0x40
91 shouldn't cause any problem. */
92 v850_reg_t dir = set_dir (0x100, 0x40);
93 /* Make sure it really got set. */
94 if (dir & 0x100)
95 enabled = 1;
96 }
97 return enabled;
98}
99
100/* Try to set CHILD's single-step flag to VAL. Returns true if successful. */
101static int set_single_step (struct task_struct *t, int val)
102{
103 v850_reg_t *psw_addr = reg_save_addr(PT_PSW, t);
104 if (val) {
105 /* Make sure single-stepping is enabled. */
106 if (! enable_single_stepping ())
107 return 0;
108 /* Set T's single-step flag. */
109 *psw_addr |= 0x800;
110 } else
111 *psw_addr &= ~0x800;
112 return 1;
113}
114
115long arch_ptrace(struct task_struct *child, long request, long addr, long data)
116{
117 int rval;
118
119 switch (request) {
120 unsigned long val;
121
122 case PTRACE_PEEKTEXT: /* read word at location addr. */
123 case PTRACE_PEEKDATA:
124 rval = generic_ptrace_peekdata(child, addr, data);
125 goto out;
126
127 case PTRACE_POKETEXT: /* write the word at location addr. */
128 case PTRACE_POKEDATA:
129 rval = generic_ptrace_pokedata(child, addr, data);
130 goto out;
131
132 /* Read/write the word at location ADDR in the registers. */
133 case PTRACE_PEEKUSR:
134 case PTRACE_POKEUSR:
135 rval = 0;
136 if (addr >= PT_SIZE && request == PTRACE_PEEKUSR) {
137 /* Special requests that don't actually correspond
138 to offsets in struct pt_regs. */
139 if (addr == PT_TEXT_ADDR)
140 val = child->mm->start_code;
141 else if (addr == PT_DATA_ADDR)
142 val = child->mm->start_data;
143 else if (addr == PT_TEXT_LEN)
144 val = child->mm->end_code
145 - child->mm->start_code;
146 else
147 rval = -EIO;
148 } else if (addr >= 0 && addr < PT_SIZE && (addr & 0x3) == 0) {
149 v850_reg_t *reg_addr = reg_save_addr(addr, child);
150 if (request == PTRACE_PEEKUSR)
151 val = *reg_addr;
152 else
153 *reg_addr = data;
154 } else
155 rval = -EIO;
156
157 if (rval == 0 && request == PTRACE_PEEKUSR)
158 rval = put_user (val, (unsigned long *)data);
159 goto out;
160
161 /* Continue and stop at next (return from) syscall */
162 case PTRACE_SYSCALL:
163 /* Restart after a signal. */
164 case PTRACE_CONT:
165 /* Execute a single instruction. */
166 case PTRACE_SINGLESTEP:
167 rval = -EIO;
168 if (!valid_signal(data))
169 break;
170
171 /* Turn CHILD's single-step flag on or off. */
172 if (! set_single_step (child, request == PTRACE_SINGLESTEP))
173 break;
174
175 if (request == PTRACE_SYSCALL)
176 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
177 else
178 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
179
180 child->exit_code = data;
181 wake_up_process(child);
182 rval = 0;
183 break;
184
185 /*
186 * make the child exit. Best I can do is send it a sigkill.
187 * perhaps it should be put in the status that it wants to
188 * exit.
189 */
190 case PTRACE_KILL:
191 rval = 0;
192 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
193 break;
194 child->exit_code = SIGKILL;
195 wake_up_process(child);
196 break;
197
198 case PTRACE_DETACH: /* detach a process that was attached. */
199 set_single_step (child, 0); /* Clear single-step flag */
200 rval = ptrace_detach(child, data);
201 break;
202
203 default:
204 rval = -EIO;
205 goto out;
206 }
207 out:
208 return rval;
209}
210
211asmlinkage void syscall_trace(void)
212{
213 if (!test_thread_flag(TIF_SYSCALL_TRACE))
214 return;
215 if (!(current->ptrace & PT_PTRACED))
216 return;
217 /* The 0x80 provides a way for the tracing parent to distinguish
218 between a syscall stop and SIGTRAP delivery */
219 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
220 ? 0x80 : 0));
221 /*
222 * this isn't the same as continuing with a signal, but it will do
223 * for normal use. strace only continues with a signal if the
224 * stopping signal is not SIGTRAP. -brl
225 */
226 if (current->exit_code) {
227 send_sig(current->exit_code, current, 1);
228 current->exit_code = 0;
229 }
230}
231
232void ptrace_disable (struct task_struct *child)
233{
234 /* nothing to do */
235}
diff --git a/arch/v850/kernel/rte_cb.c b/arch/v850/kernel/rte_cb.c
deleted file mode 100644
index 43018e1edebd..000000000000
--- a/arch/v850/kernel/rte_cb.c
+++ /dev/null
@@ -1,193 +0,0 @@
1/*
2 * include/asm-v850/rte_cb.c -- Midas lab RTE-CB series of evaluation boards
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/init.h>
15#include <linux/irq.h>
16#include <linux/fs.h>
17#include <linux/module.h>
18#include <linux/kernel.h>
19
20#include <asm/machdep.h>
21#include <asm/v850e_uart.h>
22
23#include "mach.h"
24
25static void led_tick (void);
26
27/* LED access routines. */
28extern unsigned read_leds (int pos, char *buf, int len);
29extern unsigned write_leds (int pos, const char *buf, int len);
30
31#ifdef CONFIG_RTE_CB_MULTI
32extern void multi_init (void);
33#endif
34
35
36void __init rte_cb_early_init (void)
37{
38 v850e_intc_disable_irqs ();
39
40#ifdef CONFIG_RTE_CB_MULTI
41 multi_init ();
42#endif
43}
44
45void __init mach_setup (char **cmdline)
46{
47#ifdef CONFIG_RTE_MB_A_PCI
48 /* Probe for Mother-A, and print a message if we find it. */
49 *(volatile unsigned long *)MB_A_SRAM_ADDR = 0xDEADBEEF;
50 if (*(volatile unsigned long *)MB_A_SRAM_ADDR == 0xDEADBEEF) {
51 *(volatile unsigned long *)MB_A_SRAM_ADDR = 0x12345678;
52 if (*(volatile unsigned long *)MB_A_SRAM_ADDR == 0x12345678)
53 printk (KERN_INFO
54 " NEC SolutionGear/Midas lab"
55 " RTE-MOTHER-A motherboard\n");
56 }
57#endif /* CONFIG_RTE_MB_A_PCI */
58
59 mach_tick = led_tick;
60}
61
62void machine_restart (char *__unused)
63{
64#ifdef CONFIG_RESET_GUARD
65 disable_reset_guard ();
66#endif
67 asm ("jmp r0"); /* Jump to the reset vector. */
68}
69
70/* This says `HALt.' in LEDese. */
71static unsigned char halt_leds_msg[] = { 0x76, 0x77, 0x38, 0xF8 };
72
73void machine_halt (void)
74{
75#ifdef CONFIG_RESET_GUARD
76 disable_reset_guard ();
77#endif
78
79 /* Ignore all interrupts. */
80 local_irq_disable ();
81
82 /* Write a little message. */
83 write_leds (0, halt_leds_msg, sizeof halt_leds_msg);
84
85 /* Really halt. */
86 for (;;)
87 asm ("halt; nop; nop; nop; nop; nop");
88}
89
90void machine_power_off (void)
91{
92 machine_halt ();
93}
94
95
96/* Animated LED display for timer tick. */
97
98#define TICK_UPD_FREQ 6
99static int tick_frames[][10] = {
100 { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, -1 },
101 { 0x63, 0x5c, -1 },
102 { 0x5c, 0x00, -1 },
103 { 0x63, 0x00, -1 },
104 { -1 }
105};
106
107static void led_tick ()
108{
109 static unsigned counter = 0;
110
111 if (++counter == (HZ / TICK_UPD_FREQ)) {
112 /* Which frame we're currently displaying for each digit. */
113 static unsigned frame_nums[LED_NUM_DIGITS] = { 0 };
114 /* Display image. */
115 static unsigned char image[LED_NUM_DIGITS] = { 0 };
116 unsigned char prev_image[LED_NUM_DIGITS];
117 int write_to_leds = 1; /* true if we should actually display */
118 int digit;
119
120 /* We check to see if the physical LEDs contains what we last
121 wrote to them; if not, we suppress display (this is so that
122 users can write to the LEDs, and not have their output
123 overwritten). As a special case, we start writing again if
124 all the LEDs are blank, or our display image is all zeros
125 (indicating that this is the initial update, when the actual
126 LEDs might contain random data). */
127 read_leds (0, prev_image, LED_NUM_DIGITS);
128 for (digit = 0; digit < LED_NUM_DIGITS; digit++)
129 if (image[digit] != prev_image[digit]
130 && image[digit] && prev_image[digit])
131 {
132 write_to_leds = 0;
133 break;
134 }
135
136 /* Update display image. */
137 for (digit = 0;
138 digit < LED_NUM_DIGITS && tick_frames[digit][0] >= 0;
139 digit++)
140 {
141 int frame = tick_frames[digit][frame_nums[digit]];
142 if (frame < 0) {
143 image[digit] = tick_frames[digit][0];
144 frame_nums[digit] = 1;
145 } else {
146 image[digit] = frame;
147 frame_nums[digit]++;
148 break;
149 }
150 }
151
152 if (write_to_leds)
153 /* Write the display image to the physical LEDs. */
154 write_leds (0, image, LED_NUM_DIGITS);
155
156 counter = 0;
157 }
158}
159
160
161/* Mother-A interrupts. */
162
163#ifdef CONFIG_RTE_GBUS_INT
164
165#define L GBUS_INT_PRIORITY_LOW
166#define M GBUS_INT_PRIORITY_MEDIUM
167#define H GBUS_INT_PRIORITY_HIGH
168
169static struct gbus_int_irq_init gbus_irq_inits[] = {
170#ifdef CONFIG_RTE_MB_A_PCI
171 { "MB_A_LAN", IRQ_MB_A_LAN, 1, 1, L },
172 { "MB_A_PCI1", IRQ_MB_A_PCI1(0), IRQ_MB_A_PCI1_NUM, 1, L },
173 { "MB_A_PCI2", IRQ_MB_A_PCI2(0), IRQ_MB_A_PCI2_NUM, 1, L },
174 { "MB_A_EXT", IRQ_MB_A_EXT(0), IRQ_MB_A_EXT_NUM, 1, L },
175 { "MB_A_USB_OC",IRQ_MB_A_USB_OC(0), IRQ_MB_A_USB_OC_NUM, 1, L },
176 { "MB_A_PCMCIA_OC",IRQ_MB_A_PCMCIA_OC, 1, 1, L },
177#endif
178 { 0 }
179};
180#define NUM_GBUS_IRQ_INITS (ARRAY_SIZE(gbus_irq_inits) - 1)
181
182static struct hw_interrupt_type gbus_hw_itypes[NUM_GBUS_IRQ_INITS];
183
184#endif /* CONFIG_RTE_GBUS_INT */
185
186
187void __init rte_cb_init_irqs (void)
188{
189#ifdef CONFIG_RTE_GBUS_INT
190 gbus_int_init_irqs ();
191 gbus_int_init_irq_types (gbus_irq_inits, gbus_hw_itypes);
192#endif /* CONFIG_RTE_GBUS_INT */
193}
diff --git a/arch/v850/kernel/rte_cb_leds.c b/arch/v850/kernel/rte_cb_leds.c
deleted file mode 100644
index aa47ab1dcd87..000000000000
--- a/arch/v850/kernel/rte_cb_leds.c
+++ /dev/null
@@ -1,137 +0,0 @@
1/*
2 * include/asm-v850/rte_cb_leds.c -- Midas lab RTE-CB board LED device support
3 *
4 * Copyright (C) 2002,03 NEC Electronics Corporation
5 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/init.h>
15#include <linux/spinlock.h>
16#include <linux/fs.h>
17#include <linux/miscdevice.h>
18
19#include <asm/uaccess.h>
20
21#define LEDS_MINOR 169 /* Minor device number, using misc major. */
22
23/* The actual LED hardware is write-only, so we hold the contents here too. */
24static unsigned char leds_image[LED_NUM_DIGITS] = { 0 };
25
26/* Spinlock protecting the above leds. */
27static DEFINE_SPINLOCK(leds_lock);
28
29/* Common body of LED read/write functions, checks POS and LEN for
30 correctness, declares a variable using IMG_DECL, initialized pointing at
31 the POS position in the LED image buffer, and and iterates COPY_EXPR
32 until BUF is equal to the last buffer position; finally, sets LEN to be
33 the amount actually copied. IMG should be a variable declaration
34 (without an initializer or a terminating semicolon); POS, BUF, and LEN
35 should all be simple variables. */
36#define DO_LED_COPY(img_decl, pos, buf, len, copy_expr) \
37do { \
38 if (pos > LED_NUM_DIGITS) \
39 len = 0; \
40 else { \
41 if (pos + len > LED_NUM_DIGITS) \
42 len = LED_NUM_DIGITS - pos; \
43 \
44 if (len > 0) { \
45 unsigned long _flags; \
46 const char *_end = buf + len; \
47 img_decl = &leds_image[pos]; \
48 \
49 spin_lock_irqsave (leds_lock, _flags); \
50 do \
51 (copy_expr); \
52 while (buf != _end); \
53 spin_unlock_irqrestore (leds_lock, _flags); \
54 } \
55 } \
56} while (0)
57
58/* Read LEN bytes from LEDs at position POS, into BUF.
59 Returns actual amount read. */
60unsigned read_leds (unsigned pos, char *buf, unsigned len)
61{
62 DO_LED_COPY (const char *img, pos, buf, len, *buf++ = *img++);
63 return len;
64}
65
66/* Write LEN bytes to LEDs at position POS, from BUF.
67 Returns actual amount written. */
68unsigned write_leds (unsigned pos, const char *buf, unsigned len)
69{
70 /* We write the actual LED values backwards, because
71 increasing memory addresses reflect LEDs right-to-left. */
72 volatile char *led = &LED (LED_NUM_DIGITS - pos - 1);
73 /* We invert the value written to the hardware, because 1 = off,
74 and 0 = on. */
75 DO_LED_COPY (char *img, pos, buf, len,
76 *led-- = 0xFF ^ (*img++ = *buf++));
77 return len;
78}
79
80
81/* Device functions. */
82
83static ssize_t leds_dev_read (struct file *file, char *buf, size_t len,
84 loff_t *pos)
85{
86 char temp_buf[LED_NUM_DIGITS];
87 len = read_leds (*pos, temp_buf, len);
88 if (copy_to_user (buf, temp_buf, len))
89 return -EFAULT;
90 *pos += len;
91 return len;
92}
93
94static ssize_t leds_dev_write (struct file *file, const char *buf, size_t len,
95 loff_t *pos)
96{
97 char temp_buf[LED_NUM_DIGITS];
98 if (copy_from_user (temp_buf, buf, min_t(size_t, len, LED_NUM_DIGITS)))
99 return -EFAULT;
100 len = write_leds (*pos, temp_buf, len);
101 *pos += len;
102 return len;
103}
104
105static loff_t leds_dev_lseek (struct file *file, loff_t offs, int whence)
106{
107 if (whence == 1)
108 offs += file->f_pos; /* relative */
109 else if (whence == 2)
110 offs += LED_NUM_DIGITS; /* end-relative */
111
112 if (offs < 0 || offs > LED_NUM_DIGITS)
113 return -EINVAL;
114
115 file->f_pos = offs;
116
117 return 0;
118}
119
120static const struct file_operations leds_fops = {
121 .read = leds_dev_read,
122 .write = leds_dev_write,
123 .llseek = leds_dev_lseek
124};
125
126static struct miscdevice leds_miscdev = {
127 .name = "leds",
128 .minor = LEDS_MINOR,
129 .fops = &leds_fops
130};
131
132int __init leds_dev_init (void)
133{
134 return misc_register (&leds_miscdev);
135}
136
137__initcall (leds_dev_init);
diff --git a/arch/v850/kernel/rte_cb_multi.c b/arch/v850/kernel/rte_cb_multi.c
deleted file mode 100644
index 963d55ab34cc..000000000000
--- a/arch/v850/kernel/rte_cb_multi.c
+++ /dev/null
@@ -1,121 +0,0 @@
1/*
2 * include/asm-v850/rte_multi.c -- Support for Multi debugger monitor ROM
3 * on Midas lab RTE-CB series of evaluation boards
4 *
5 * Copyright (C) 2001,02,03 NEC Electronics Corporation
6 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 */
14
15#include <linux/init.h>
16
17#include <asm/machdep.h>
18
19#define IRQ_ADDR(irq) (0x80 + (irq) * 0x10)
20
21/* A table of which interrupt vectors to install, since blindly
22 installing all of them makes the debugger stop working. This is a
23 list of offsets in the interrupt vector area; each entry means to
24 copy that particular 16-byte vector. An entry less than zero ends
25 the table. */
26static long multi_intv_install_table[] = {
27 /* Trap vectors */
28 0x40, 0x50,
29
30#ifdef CONFIG_RTE_CB_MULTI_DBTRAP
31 /* Illegal insn / dbtrap. These are used by multi, so only handle
32 them if configured to do so. */
33 0x60,
34#endif
35
36 /* GINT1 - GINT3 (note, not GINT0!) */
37 IRQ_ADDR (IRQ_GINT(1)),
38 IRQ_ADDR (IRQ_GINT(2)),
39 IRQ_ADDR (IRQ_GINT(3)),
40
41 /* Timer D interrupts (up to 4 timers) */
42 IRQ_ADDR (IRQ_INTCMD(0)),
43#if IRQ_INTCMD_NUM > 1
44 IRQ_ADDR (IRQ_INTCMD(1)),
45#if IRQ_INTCMD_NUM > 2
46 IRQ_ADDR (IRQ_INTCMD(2)),
47#if IRQ_INTCMD_NUM > 3
48 IRQ_ADDR (IRQ_INTCMD(3)),
49#endif
50#endif
51#endif
52
53 /* UART interrupts (up to 3 channels) */
54 IRQ_ADDR (IRQ_INTSER (0)), /* err */
55 IRQ_ADDR (IRQ_INTSR (0)), /* rx */
56 IRQ_ADDR (IRQ_INTST (0)), /* tx */
57#if IRQ_INTSR_NUM > 1
58 IRQ_ADDR (IRQ_INTSER (1)), /* err */
59 IRQ_ADDR (IRQ_INTSR (1)), /* rx */
60 IRQ_ADDR (IRQ_INTST (1)), /* tx */
61#if IRQ_INTSR_NUM > 2
62 IRQ_ADDR (IRQ_INTSER (2)), /* err */
63 IRQ_ADDR (IRQ_INTSR (2)), /* rx */
64 IRQ_ADDR (IRQ_INTST (2)), /* tx */
65#endif
66#endif
67
68 -1
69};
70
71/* Early initialization for kernel using Multi debugger ROM monitor. */
72void __init multi_init (void)
73{
74 /* We're using the Multi debugger monitor, so we have to install
75 the interrupt vectors. The monitor doesn't allow them to be
76 initially downloaded into their final destination because
77 it's in the monitor's scratch-RAM area. Unfortunately, Multi
78 also doesn't deal correctly with ELF sections where the LMA
79 and VMA differ -- it just ignores the LMA -- so we can't use
80 that feature to work around the problem. What we do instead
81 is just put the interrupt vectors into a normal section, and
82 do the necessary copying and relocation here. Since the
83 interrupt vector basically only contains `jr' instructions
84 and no-ops, it's not that hard. */
85 extern unsigned long _intv_load_start, _intv_start;
86 register unsigned long *src = &_intv_load_start;
87 register unsigned long *dst = (unsigned long *)INTV_BASE;
88 register unsigned long jr_fixup = (char *)&_intv_start - (char *)dst;
89 register long *ii;
90
91 /* Copy interrupt vectors as instructed by multi_intv_install_table. */
92 for (ii = multi_intv_install_table; *ii >= 0; ii++) {
93 /* Copy 16-byte interrupt vector at offset *ii. */
94 int boffs;
95 for (boffs = 0; boffs < 0x10; boffs += sizeof *src) {
96 /* Copy a single word, fixing up the jump offs
97 if it's a `jr' instruction. */
98 int woffs = (*ii + boffs) / sizeof *src;
99 unsigned long word = src[woffs];
100
101 if ((word & 0xFC0) == 0x780) {
102 /* A `jr' insn, fix up its offset (and yes, the
103 weird half-word swapping is intentional). */
104 unsigned short hi = word & 0xFFFF;
105 unsigned short lo = word >> 16;
106 unsigned long udisp22
107 = lo + ((hi & 0x3F) << 16);
108 long disp22 = (long)(udisp22 << 10) >> 10;
109
110 disp22 += jr_fixup;
111
112 hi = ((disp22 >> 16) & 0x3F) | 0x780;
113 lo = disp22 & 0xFFFF;
114
115 word = hi + (lo << 16);
116 }
117
118 dst[woffs] = word;
119 }
120 }
121}
diff --git a/arch/v850/kernel/rte_ma1_cb-rom.ld b/arch/v850/kernel/rte_ma1_cb-rom.ld
deleted file mode 100644
index 87b618f8253b..000000000000
--- a/arch/v850/kernel/rte_ma1_cb-rom.ld
+++ /dev/null
@@ -1,14 +0,0 @@
1/* Linker script for the Midas labs RTE-V850E/MA1-CB evaluation board
2 (CONFIG_RTE_CB_MA1), with kernel in ROM. */
3
4MEMORY {
5 ROM : ORIGIN = 0x00000000, LENGTH = 0x00100000
6 /* 1MB of SRAM. This memory is mirrored 4 times. */
7 SRAM : ORIGIN = SRAM_ADDR, LENGTH = SRAM_SIZE
8 /* 32MB of SDRAM. */
9 SDRAM : ORIGIN = SDRAM_ADDR, LENGTH = SDRAM_SIZE
10}
11
12SECTIONS {
13 ROMK_SECTIONS(ROM, SRAM)
14}
diff --git a/arch/v850/kernel/rte_ma1_cb.c b/arch/v850/kernel/rte_ma1_cb.c
deleted file mode 100644
index 08abf3d5f8df..000000000000
--- a/arch/v850/kernel/rte_ma1_cb.c
+++ /dev/null
@@ -1,107 +0,0 @@
1/*
2 * arch/v850/kernel/rte_ma1_cb.c -- Midas labs RTE-V850E/MA1-CB board
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/bootmem.h>
17
18#include <asm/atomic.h>
19#include <asm/page.h>
20#include <asm/ma1.h>
21#include <asm/rte_ma1_cb.h>
22#include <asm/v850e_timer_c.h>
23
24#include "mach.h"
25
26
27/* SRAM and SDRAM are almost contiguous (with a small hole in between;
28 see mach_reserve_bootmem for details), so just use both as one big area. */
29#define RAM_START SRAM_ADDR
30#define RAM_END (SDRAM_ADDR + SDRAM_SIZE)
31
32
33void __init mach_early_init (void)
34{
35 rte_cb_early_init ();
36}
37
38void __init mach_get_physical_ram (unsigned long *ram_start,
39 unsigned long *ram_len)
40{
41 *ram_start = RAM_START;
42 *ram_len = RAM_END - RAM_START;
43}
44
45void __init mach_reserve_bootmem ()
46{
47#ifdef CONFIG_RTE_CB_MULTI
48 /* Prevent the kernel from touching the monitor's scratch RAM. */
49 reserve_bootmem(MON_SCRATCH_ADDR, MON_SCRATCH_SIZE,
50 BOOTMEM_DEFAULT);
51#endif
52
53 /* The space between SRAM and SDRAM is filled with duplicate
54 images of SRAM. Prevent the kernel from using them. */
55 reserve_bootmem (SRAM_ADDR + SRAM_SIZE,
56 SDRAM_ADDR - (SRAM_ADDR + SRAM_SIZE),
57 BOOTMEM_DEFAULT);
58}
59
60void mach_gettimeofday (struct timespec *tv)
61{
62 tv->tv_sec = 0;
63 tv->tv_nsec = 0;
64}
65
66/* Called before configuring an on-chip UART. */
67void rte_ma1_cb_uart_pre_configure (unsigned chan,
68 unsigned cflags, unsigned baud)
69{
70 /* The RTE-MA1-CB connects some general-purpose I/O pins on the
71 CPU to the RTS/CTS lines of UART 0's serial connection.
72 I/O pins P42 and P43 are RTS and CTS respectively. */
73 if (chan == 0) {
74 /* Put P42 & P43 in I/O port mode. */
75 MA_PORT4_PMC &= ~0xC;
76 /* Make P42 an output, and P43 an input. */
77 MA_PORT4_PM = (MA_PORT4_PM & ~0xC) | 0x8;
78 }
79
80 /* Do pre-configuration for the actual UART. */
81 ma_uart_pre_configure (chan, cflags, baud);
82}
83
84void __init mach_init_irqs (void)
85{
86 unsigned tc;
87
88 /* Initialize interrupts. */
89 ma_init_irqs ();
90 rte_cb_init_irqs ();
91
92 /* Use falling-edge-sensitivity for interrupts . */
93 V850E_TIMER_C_SESC (0) &= ~0xC;
94 V850E_TIMER_C_SESC (1) &= ~0xF;
95
96 /* INTP000-INTP011 are shared with `Timer C', so we have to set
97 up Timer C to pass them through as raw interrupts. */
98 for (tc = 0; tc < 2; tc++)
99 /* Turn on the timer. */
100 V850E_TIMER_C_TMCC0 (tc) |= V850E_TIMER_C_TMCC0_CAE;
101
102 /* Make sure the relevant port0/port1 pins are assigned
103 interrupt duty. We used INTP001-INTP011 (don't screw with
104 INTP000 because the monitor uses it). */
105 MA_PORT0_PMC |= 0x4; /* P02 (INTP001) in IRQ mode. */
106 MA_PORT1_PMC |= 0x6; /* P11 (INTP010) & P12 (INTP011) in IRQ mode.*/
107}
diff --git a/arch/v850/kernel/rte_ma1_cb.ld b/arch/v850/kernel/rte_ma1_cb.ld
deleted file mode 100644
index c8e16d16be41..000000000000
--- a/arch/v850/kernel/rte_ma1_cb.ld
+++ /dev/null
@@ -1,57 +0,0 @@
1/* Linker script for the Midas labs RTE-V850E/MA1-CB evaluation board
2 (CONFIG_RTE_CB_MA1), with kernel in SDRAM, under Multi debugger. */
3
4MEMORY {
5 /* 1MB of SRAM; we can't use the last 32KB, because it's used by
6 the monitor scratch-RAM. This memory is mirrored 4 times. */
7 SRAM : ORIGIN = SRAM_ADDR, LENGTH = (SRAM_SIZE - MON_SCRATCH_SIZE)
8 /* Monitor scratch RAM; only the interrupt vectors should go here. */
9 MRAM : ORIGIN = MON_SCRATCH_ADDR, LENGTH = MON_SCRATCH_SIZE
10 /* 32MB of SDRAM. */
11 SDRAM : ORIGIN = SDRAM_ADDR, LENGTH = SDRAM_SIZE
12}
13
14#ifdef CONFIG_RTE_CB_MA1_KSRAM
15# define KRAM SRAM
16#else
17# define KRAM SDRAM
18#endif
19
20SECTIONS {
21 /* We can't use RAMK_KRAM_CONTENTS because that puts the whole
22 kernel in a single ELF segment, and the Multi debugger (which
23 we use to load the kernel) appears to have bizarre problems
24 dealing with it. */
25
26 .text : {
27 __kram_start = . ;
28 TEXT_CONTENTS
29 } > KRAM
30
31 .data : {
32 DATA_CONTENTS
33 BSS_CONTENTS
34 RAMK_INIT_CONTENTS
35 __kram_end = . ;
36 BOOTMAP_CONTENTS
37
38 /* The address at which the interrupt vectors are initially
39 loaded by the loader. We can't load the interrupt vectors
40 directly into their target location, because the monitor
41 ROM for the GHS Multi debugger barfs if we try.
42 Unfortunately, Multi also doesn't deal correctly with ELF
43 sections where the LMA and VMA differ (it just ignores the
44 LMA), so we can't use that feature to work around the
45 problem! What we do instead is just put the interrupt
46 vectors into a normal section, and have the
47 `mach_early_init' function for Midas boards do the
48 necessary copying and relocation at runtime (this section
49 basically only contains `jr' instructions, so it's not
50 that hard). */
51 . = ALIGN (0x10) ;
52 __intv_load_start = . ;
53 INTV_CONTENTS
54 } > KRAM
55
56 .root ALIGN (4096) : { ROOT_FS_CONTENTS } > SDRAM
57}
diff --git a/arch/v850/kernel/rte_mb_a_pci.c b/arch/v850/kernel/rte_mb_a_pci.c
deleted file mode 100644
index 687e367d8b64..000000000000
--- a/arch/v850/kernel/rte_mb_a_pci.c
+++ /dev/null
@@ -1,819 +0,0 @@
1/*
2 * arch/v850/kernel/mb_a_pci.c -- PCI support for Midas lab RTE-MOTHER-A board
3 *
4 * Copyright (C) 2001,02,03,05 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03,05 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/pci.h>
20
21#include <asm/machdep.h>
22
23/* __nomods_init is like __devinit, but is a no-op when modules are enabled.
24 This is used by some routines that can be called either during boot
25 or by a module. */
26#ifdef CONFIG_MODULES
27#define __nomods_init /*nothing*/
28#else
29#define __nomods_init __devinit
30#endif
31
32/* PCI devices on the Mother-A board can only do DMA to/from the MB SRAM
33 (the RTE-V850E/MA1-CB cpu board doesn't support PCI access to
34 CPU-board memory), and since linux DMA buffers are allocated in
35 normal kernel memory, we basically have to copy DMA blocks around
36 (this is like a `bounce buffer'). When a DMA block is `mapped', we
37 allocate an identically sized block in MB SRAM, and if we're doing
38 output to the device, copy the CPU-memory block to the MB-SRAM block.
39 When an active block is `unmapped', we will copy the block back to
40 CPU memory if necessary, and then deallocate the MB SRAM block.
41 Ack. */
42
43/* Where the motherboard SRAM is in the PCI-bus address space (the
44 first 512K of it is also mapped at PCI address 0). */
45#define PCI_MB_SRAM_ADDR 0x800000
46
47/* Convert CPU-view MB SRAM address to/from PCI-view addresses of the
48 same memory. */
49#define MB_SRAM_TO_PCI(mb_sram_addr) \
50 ((dma_addr_t)mb_sram_addr - MB_A_SRAM_ADDR + PCI_MB_SRAM_ADDR)
51#define PCI_TO_MB_SRAM(pci_addr) \
52 (void *)(pci_addr - PCI_MB_SRAM_ADDR + MB_A_SRAM_ADDR)
53
54static void pcibios_assign_resources (void);
55
56struct mb_pci_dev_irq {
57 unsigned dev; /* PCI device number */
58 unsigned irq_base; /* First IRQ */
59 unsigned query_pin; /* True if we should read the device's
60 Interrupt Pin info, and allocate
61 interrupt IRQ_BASE + PIN. */
62};
63
64/* PCI interrupts are mapped statically to GBUS interrupts. */
65static struct mb_pci_dev_irq mb_pci_dev_irqs[] = {
66 /* Motherboard SB82558 ethernet controller */
67 { 10, IRQ_MB_A_LAN, 0 },
68 /* PCI slot 1 */
69 { 8, IRQ_MB_A_PCI1(0), 1 },
70 /* PCI slot 2 */
71 { 9, IRQ_MB_A_PCI2(0), 1 }
72};
73#define NUM_MB_PCI_DEV_IRQS ARRAY_SIZE(mb_pci_dev_irqs)
74
75
76/* PCI configuration primitives. */
77
78#define CONFIG_DMCFGA(bus, devfn, offs) \
79 (0x80000000 \
80 | ((offs) & ~0x3) \
81 | ((devfn) << 8) \
82 | ((bus)->number << 16))
83
84static int
85mb_pci_read (struct pci_bus *bus, unsigned devfn, int offs, int size, u32 *rval)
86{
87 u32 addr;
88 int flags;
89
90 local_irq_save (flags);
91
92 MB_A_PCI_PCICR = 0x7;
93 MB_A_PCI_DMCFGA = CONFIG_DMCFGA (bus, devfn, offs);
94
95 addr = MB_A_PCI_IO_ADDR + (offs & 0x3);
96
97 switch (size) {
98 case 1: *rval = *(volatile u8 *)addr; break;
99 case 2: *rval = *(volatile u16 *)addr; break;
100 case 4: *rval = *(volatile u32 *)addr; break;
101 }
102
103 if (MB_A_PCI_PCISR & 0x2000) {
104 MB_A_PCI_PCISR = 0x2000;
105 *rval = ~0;
106 }
107
108 MB_A_PCI_DMCFGA = 0;
109
110 local_irq_restore (flags);
111
112 return PCIBIOS_SUCCESSFUL;
113}
114
115static int
116mb_pci_write (struct pci_bus *bus, unsigned devfn, int offs, int size, u32 val)
117{
118 u32 addr;
119 int flags;
120
121 local_irq_save (flags);
122
123 MB_A_PCI_PCICR = 0x7;
124 MB_A_PCI_DMCFGA = CONFIG_DMCFGA (bus, devfn, offs);
125
126 addr = MB_A_PCI_IO_ADDR + (offs & 0x3);
127
128 switch (size) {
129 case 1: *(volatile u8 *)addr = val; break;
130 case 2: *(volatile u16 *)addr = val; break;
131 case 4: *(volatile u32 *)addr = val; break;
132 }
133
134 if (MB_A_PCI_PCISR & 0x2000)
135 MB_A_PCI_PCISR = 0x2000;
136
137 MB_A_PCI_DMCFGA = 0;
138
139 local_irq_restore (flags);
140
141 return PCIBIOS_SUCCESSFUL;
142}
143
144static struct pci_ops mb_pci_config_ops = {
145 .read = mb_pci_read,
146 .write = mb_pci_write,
147};
148
149
150/* PCI Initialization. */
151
152static struct pci_bus *mb_pci_bus = 0;
153
154/* Do initial PCI setup. */
155static int __devinit pcibios_init (void)
156{
157 u32 id = MB_A_PCI_PCIHIDR;
158 u16 vendor = id & 0xFFFF;
159 u16 device = (id >> 16) & 0xFFFF;
160
161 if (vendor == PCI_VENDOR_ID_PLX && device == PCI_DEVICE_ID_PLX_9080) {
162 printk (KERN_INFO
163 "PCI: PLX Technology PCI9080 HOST/PCI bridge\n");
164
165 MB_A_PCI_PCICR = 0x147;
166
167 MB_A_PCI_PCIBAR0 = 0x007FFF00;
168 MB_A_PCI_PCIBAR1 = 0x0000FF00;
169 MB_A_PCI_PCIBAR2 = 0x00800000;
170
171 MB_A_PCI_PCILTR = 0x20;
172
173 MB_A_PCI_PCIPBAM |= 0x3;
174
175 MB_A_PCI_PCISR = ~0; /* Clear errors. */
176
177 /* Reprogram the motherboard's IO/config address space,
178 as we don't support the GCS7 address space that the
179 default uses. */
180
181 /* Significant address bits used for decoding PCI GCS5 space
182 accesses. */
183 MB_A_PCI_DMRR = ~(MB_A_PCI_MEM_SIZE - 1);
184
185 /* I don't understand this, but the SolutionGear example code
186 uses such an offset, and it doesn't work without it. XXX */
187#if GCS5_SIZE == 0x00800000
188#define GCS5_CFG_OFFS 0x00800000
189#else
190#define GCS5_CFG_OFFS 0
191#endif
192
193 /* Address bit values for matching. Note that we have to give
194 the address from the motherboard's point of view, which is
195 different than the CPU's. */
196 /* PCI memory space. */
197 MB_A_PCI_DMLBAM = GCS5_CFG_OFFS + 0x0;
198 /* PCI I/O space. */
199 MB_A_PCI_DMLBAI =
200 GCS5_CFG_OFFS + (MB_A_PCI_IO_ADDR - GCS5_ADDR);
201
202 mb_pci_bus = pci_scan_bus (0, &mb_pci_config_ops, 0);
203
204 pcibios_assign_resources ();
205 } else
206 printk (KERN_ERR "PCI: HOST/PCI bridge not found\n");
207
208 return 0;
209}
210
211subsys_initcall (pcibios_init);
212
213char __devinit *pcibios_setup (char *option)
214{
215 /* Don't handle any options. */
216 return option;
217}
218
219
220int __nomods_init pcibios_enable_device (struct pci_dev *dev, int mask)
221{
222 u16 cmd, old_cmd;
223 int idx;
224 struct resource *r;
225
226 pci_read_config_word(dev, PCI_COMMAND, &cmd);
227 old_cmd = cmd;
228 for (idx = 0; idx < 6; idx++) {
229 r = &dev->resource[idx];
230 if (!r->start && r->end) {
231 printk(KERN_ERR "PCI: Device %s not available because "
232 "of resource collisions\n", pci_name(dev));
233 return -EINVAL;
234 }
235 if (r->flags & IORESOURCE_IO)
236 cmd |= PCI_COMMAND_IO;
237 if (r->flags & IORESOURCE_MEM)
238 cmd |= PCI_COMMAND_MEMORY;
239 }
240 if (cmd != old_cmd) {
241 printk("PCI: Enabling device %s (%04x -> %04x)\n",
242 pci_name(dev), old_cmd, cmd);
243 pci_write_config_word(dev, PCI_COMMAND, cmd);
244 }
245 return 0;
246}
247
248
249/* Resource allocation. */
250static void __devinit pcibios_assign_resources (void)
251{
252 struct pci_dev *dev = NULL;
253 struct resource *r;
254
255 for_each_pci_dev(dev) {
256 unsigned di_num;
257 unsigned class = dev->class >> 8;
258
259 if (class && class != PCI_CLASS_BRIDGE_HOST) {
260 unsigned r_num;
261 for(r_num = 0; r_num < 6; r_num++) {
262 r = &dev->resource[r_num];
263 if (!r->start && r->end)
264 pci_assign_resource (dev, r_num);
265 }
266 }
267
268 /* Assign interrupts. */
269 for (di_num = 0; di_num < NUM_MB_PCI_DEV_IRQS; di_num++) {
270 struct mb_pci_dev_irq *di = &mb_pci_dev_irqs[di_num];
271
272 if (di->dev == PCI_SLOT (dev->devfn)) {
273 unsigned irq = di->irq_base;
274
275 if (di->query_pin) {
276 /* Find out which interrupt pin
277 this device uses (each PCI
278 slot has 4). */
279 u8 irq_pin;
280
281 pci_read_config_byte (dev,
282 PCI_INTERRUPT_PIN,
283 &irq_pin);
284
285 if (irq_pin == 0)
286 /* Doesn't use interrupts. */
287 continue;
288 else
289 irq += irq_pin - 1;
290 }
291
292 pcibios_update_irq (dev, irq);
293 }
294 }
295 }
296}
297
298void __devinit pcibios_update_irq (struct pci_dev *dev, int irq)
299{
300 dev->irq = irq;
301 pci_write_config_byte (dev, PCI_INTERRUPT_LINE, irq);
302}
303
304void __devinit
305pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
306 struct resource *res)
307{
308 unsigned long offset = 0;
309
310 if (res->flags & IORESOURCE_IO) {
311 offset = MB_A_PCI_IO_ADDR;
312 } else if (res->flags & IORESOURCE_MEM) {
313 offset = MB_A_PCI_MEM_ADDR;
314 }
315
316 region->start = res->start - offset;
317 region->end = res->end - offset;
318}
319
320
321/* Stubs for things we don't use. */
322
323/* Called after each bus is probed, but before its children are examined. */
324void pcibios_fixup_bus(struct pci_bus *b)
325{
326}
327
328void
329pcibios_align_resource (void *data, struct resource *res,
330 resource_size_t size, resource_size_t align)
331{
332}
333
334void pcibios_set_master (struct pci_dev *dev)
335{
336}
337
338
339/* Mother-A SRAM memory allocation. This is a simple first-fit allocator. */
340
341/* A memory free-list node. */
342struct mb_sram_free_area {
343 void *mem;
344 unsigned long size;
345 struct mb_sram_free_area *next;
346};
347
348/* The tail of the free-list, which starts out containing all the SRAM. */
349static struct mb_sram_free_area mb_sram_free_tail = {
350 (void *)MB_A_SRAM_ADDR, MB_A_SRAM_SIZE, 0
351};
352
353/* The free-list. */
354static struct mb_sram_free_area *mb_sram_free_areas = &mb_sram_free_tail;
355
356/* The free-list of free free-list nodes. (:-) */
357static struct mb_sram_free_area *mb_sram_free_free_areas = 0;
358
359/* Spinlock protecting the above globals. */
360static DEFINE_SPINLOCK(mb_sram_lock);
361
362/* Allocate a memory block at least SIZE bytes long in the Mother-A SRAM
363 space. */
364static void *alloc_mb_sram (size_t size)
365{
366 struct mb_sram_free_area *prev, *fa;
367 unsigned long flags;
368 void *mem = 0;
369
370 spin_lock_irqsave (mb_sram_lock, flags);
371
372 /* Look for a free area that can contain SIZE bytes. */
373 for (prev = 0, fa = mb_sram_free_areas; fa; prev = fa, fa = fa->next)
374 if (fa->size >= size) {
375 /* Found one! */
376 mem = fa->mem;
377
378 if (fa->size == size) {
379 /* In fact, it fits exactly, so remove
380 this node from the free-list. */
381 if (prev)
382 prev->next = fa->next;
383 else
384 mb_sram_free_areas = fa->next;
385 /* Put it on the free-list-entry-free-list. */
386 fa->next = mb_sram_free_free_areas;
387 mb_sram_free_free_areas = fa;
388 } else {
389 /* FA is bigger than SIZE, so just
390 reduce its size to account for this
391 allocation. */
392 fa->mem += size;
393 fa->size -= size;
394 }
395
396 break;
397 }
398
399 spin_unlock_irqrestore (mb_sram_lock, flags);
400
401 return mem;
402}
403
404/* Return the memory area MEM of size SIZE to the MB SRAM free pool. */
405static void free_mb_sram (void *mem, size_t size)
406{
407 struct mb_sram_free_area *prev, *fa, *new_fa;
408 unsigned long flags;
409 void *end = mem + size;
410
411 spin_lock_irqsave (mb_sram_lock, flags);
412
413 retry:
414 /* Find an adjacent free-list entry. */
415 for (prev = 0, fa = mb_sram_free_areas; fa; prev = fa, fa = fa->next)
416 if (fa->mem == end) {
417 /* FA is just after MEM, grow down to encompass it. */
418 fa->mem = mem;
419 fa->size += size;
420 goto done;
421 } else if (fa->mem + fa->size == mem) {
422 struct mb_sram_free_area *next_fa = fa->next;
423
424 /* FA is just before MEM, expand to encompass it. */
425 fa->size += size;
426
427 /* See if FA can now be merged with its successor. */
428 if (next_fa && fa->mem + fa->size == next_fa->mem) {
429 /* Yup; merge NEXT_FA's info into FA. */
430 fa->size += next_fa->size;
431 fa->next = next_fa->next;
432 /* Free NEXT_FA. */
433 next_fa->next = mb_sram_free_free_areas;
434 mb_sram_free_free_areas = next_fa;
435 }
436 goto done;
437 } else if (fa->mem > mem)
438 /* We've reached the right spot in the free-list
439 without finding an adjacent free-area, so add
440 a new free area to hold mem. */
441 break;
442
443 /* Make a new free-list entry. */
444
445 /* First, get a free-list entry. */
446 if (! mb_sram_free_free_areas) {
447 /* There are none, so make some. */
448 void *block;
449 size_t block_size = sizeof (struct mb_sram_free_area) * 8;
450
451 /* Don't hold the lock while calling kmalloc (I'm not
452 sure whether it would be a problem, since we use
453 GFP_ATOMIC, but it makes me nervous). */
454 spin_unlock_irqrestore (mb_sram_lock, flags);
455
456 block = kmalloc (block_size, GFP_ATOMIC);
457 if (! block)
458 panic ("free_mb_sram: can't allocate free-list entry");
459
460 /* Now get the lock back. */
461 spin_lock_irqsave (mb_sram_lock, flags);
462
463 /* Add the new free free-list entries. */
464 while (block_size > 0) {
465 struct mb_sram_free_area *nfa = block;
466 nfa->next = mb_sram_free_free_areas;
467 mb_sram_free_free_areas = nfa;
468 block += sizeof *nfa;
469 block_size -= sizeof *nfa;
470 }
471
472 /* Since we dropped the lock to call kmalloc, the
473 free-list could have changed, so retry from the
474 beginning. */
475 goto retry;
476 }
477
478 /* Remove NEW_FA from the free-list of free-list entries. */
479 new_fa = mb_sram_free_free_areas;
480 mb_sram_free_free_areas = new_fa->next;
481
482 /* NEW_FA initially holds only MEM. */
483 new_fa->mem = mem;
484 new_fa->size = size;
485
486 /* Insert NEW_FA in the free-list between PREV and FA. */
487 new_fa->next = fa;
488 if (prev)
489 prev->next = new_fa;
490 else
491 mb_sram_free_areas = new_fa;
492
493 done:
494 spin_unlock_irqrestore (mb_sram_lock, flags);
495}
496
497
498/* Maintainence of CPU -> Mother-A DMA mappings. */
499
500struct dma_mapping {
501 void *cpu_addr;
502 void *mb_sram_addr;
503 size_t size;
504 struct dma_mapping *next;
505};
506
507/* A list of mappings from CPU addresses to MB SRAM addresses for active
508 DMA blocks (that have been `granted' to the PCI device). */
509static struct dma_mapping *active_dma_mappings = 0;
510
511/* A list of free mapping objects. */
512static struct dma_mapping *free_dma_mappings = 0;
513
514/* Spinlock protecting the above globals. */
515static DEFINE_SPINLOCK(dma_mappings_lock);
516
517static struct dma_mapping *new_dma_mapping (size_t size)
518{
519 unsigned long flags;
520 struct dma_mapping *mapping;
521 void *mb_sram_block = alloc_mb_sram (size);
522
523 if (! mb_sram_block)
524 return 0;
525
526 spin_lock_irqsave (dma_mappings_lock, flags);
527
528 if (! free_dma_mappings) {
529 /* We're out of mapping structures, make more. */
530 void *mblock;
531 size_t mblock_size = sizeof (struct dma_mapping) * 8;
532
533 /* Don't hold the lock while calling kmalloc (I'm not
534 sure whether it would be a problem, since we use
535 GFP_ATOMIC, but it makes me nervous). */
536 spin_unlock_irqrestore (dma_mappings_lock, flags);
537
538 mblock = kmalloc (mblock_size, GFP_ATOMIC);
539 if (! mblock) {
540 free_mb_sram (mb_sram_block, size);
541 return 0;
542 }
543
544 /* Get the lock back. */
545 spin_lock_irqsave (dma_mappings_lock, flags);
546
547 /* Add the new mapping structures to the free-list. */
548 while (mblock_size > 0) {
549 struct dma_mapping *fm = mblock;
550 fm->next = free_dma_mappings;
551 free_dma_mappings = fm;
552 mblock += sizeof *fm;
553 mblock_size -= sizeof *fm;
554 }
555 }
556
557 /* Get a mapping struct from the freelist. */
558 mapping = free_dma_mappings;
559 free_dma_mappings = mapping->next;
560
561 /* Initialize the mapping. Other fields should be filled in by
562 caller. */
563 mapping->mb_sram_addr = mb_sram_block;
564 mapping->size = size;
565
566 /* Add it to the list of active mappings. */
567 mapping->next = active_dma_mappings;
568 active_dma_mappings = mapping;
569
570 spin_unlock_irqrestore (dma_mappings_lock, flags);
571
572 return mapping;
573}
574
575static struct dma_mapping *find_dma_mapping (void *mb_sram_addr)
576{
577 unsigned long flags;
578 struct dma_mapping *mapping;
579
580 spin_lock_irqsave (dma_mappings_lock, flags);
581
582 for (mapping = active_dma_mappings; mapping; mapping = mapping->next)
583 if (mapping->mb_sram_addr == mb_sram_addr) {
584 spin_unlock_irqrestore (dma_mappings_lock, flags);
585 return mapping;
586 }
587
588 panic ("find_dma_mapping: unmapped PCI DMA addr 0x%x",
589 MB_SRAM_TO_PCI (mb_sram_addr));
590}
591
592static struct dma_mapping *deactivate_dma_mapping (void *mb_sram_addr)
593{
594 unsigned long flags;
595 struct dma_mapping *mapping, *prev;
596
597 spin_lock_irqsave (dma_mappings_lock, flags);
598
599 for (prev = 0, mapping = active_dma_mappings;
600 mapping;
601 prev = mapping, mapping = mapping->next)
602 {
603 if (mapping->mb_sram_addr == mb_sram_addr) {
604 /* This is the MAPPING; deactivate it. */
605 if (prev)
606 prev->next = mapping->next;
607 else
608 active_dma_mappings = mapping->next;
609
610 spin_unlock_irqrestore (dma_mappings_lock, flags);
611
612 return mapping;
613 }
614 }
615
616 panic ("deactivate_dma_mapping: unmapped PCI DMA addr 0x%x",
617 MB_SRAM_TO_PCI (mb_sram_addr));
618}
619
620/* Return MAPPING to the freelist. */
621static inline void
622free_dma_mapping (struct dma_mapping *mapping)
623{
624 unsigned long flags;
625
626 free_mb_sram (mapping->mb_sram_addr, mapping->size);
627
628 spin_lock_irqsave (dma_mappings_lock, flags);
629
630 mapping->next = free_dma_mappings;
631 free_dma_mappings = mapping;
632
633 spin_unlock_irqrestore (dma_mappings_lock, flags);
634}
635
636
637/* Single PCI DMA mappings. */
638
639/* `Grant' to PDEV the memory block at CPU_ADDR, for doing DMA. The
640 32-bit PCI bus mastering address to use is returned. the device owns
641 this memory until either pci_unmap_single or pci_dma_sync_single is
642 performed. */
643dma_addr_t
644pci_map_single (struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
645{
646 struct dma_mapping *mapping = new_dma_mapping (size);
647
648 if (! mapping)
649 return 0;
650
651 mapping->cpu_addr = cpu_addr;
652
653 if (dir == PCI_DMA_BIDIRECTIONAL || dir == PCI_DMA_TODEVICE)
654 memcpy (mapping->mb_sram_addr, cpu_addr, size);
655
656 return MB_SRAM_TO_PCI (mapping->mb_sram_addr);
657}
658
659/* Return to the CPU the PCI DMA memory block previously `granted' to
660 PDEV, at DMA_ADDR. */
661void pci_unmap_single (struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
662 int dir)
663{
664 void *mb_sram_addr = PCI_TO_MB_SRAM (dma_addr);
665 struct dma_mapping *mapping = deactivate_dma_mapping (mb_sram_addr);
666
667 if (size != mapping->size)
668 panic ("pci_unmap_single: size (%d) doesn't match"
669 " size of mapping at PCI DMA addr 0x%x (%d)\n",
670 size, dma_addr, mapping->size);
671
672 /* Copy back the DMA'd contents if necessary. */
673 if (dir == PCI_DMA_BIDIRECTIONAL || dir == PCI_DMA_FROMDEVICE)
674 memcpy (mapping->cpu_addr, mb_sram_addr, size);
675
676 /* Return mapping to the freelist. */
677 free_dma_mapping (mapping);
678}
679
680/* Make physical memory consistent for a single streaming mode DMA
681 translation after a transfer.
682
683 If you perform a pci_map_single() but wish to interrogate the
684 buffer using the cpu, yet do not wish to teardown the PCI dma
685 mapping, you must call this function before doing so. At the next
686 point you give the PCI dma address back to the card, you must first
687 perform a pci_dma_sync_for_device, and then the device again owns
688 the buffer. */
689void
690pci_dma_sync_single_for_cpu (struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
691 int dir)
692{
693 void *mb_sram_addr = PCI_TO_MB_SRAM (dma_addr);
694 struct dma_mapping *mapping = find_dma_mapping (mb_sram_addr);
695
696 /* Synchronize the DMA buffer with the CPU buffer if necessary. */
697 if (dir == PCI_DMA_FROMDEVICE)
698 memcpy (mapping->cpu_addr, mb_sram_addr, size);
699 else if (dir == PCI_DMA_TODEVICE)
700 ; /* nothing to do */
701 else
702 panic("pci_dma_sync_single: unsupported sync dir: %d", dir);
703}
704
705void
706pci_dma_sync_single_for_device (struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
707 int dir)
708{
709 void *mb_sram_addr = PCI_TO_MB_SRAM (dma_addr);
710 struct dma_mapping *mapping = find_dma_mapping (mb_sram_addr);
711
712 /* Synchronize the DMA buffer with the CPU buffer if necessary. */
713 if (dir == PCI_DMA_FROMDEVICE)
714 ; /* nothing to do */
715 else if (dir == PCI_DMA_TODEVICE)
716 memcpy (mb_sram_addr, mapping->cpu_addr, size);
717 else
718 panic("pci_dma_sync_single: unsupported sync dir: %d", dir);
719}
720
721
722/* Scatter-gather PCI DMA mappings. */
723
724/* Do multiple DMA mappings at once. */
725int
726pci_map_sg (struct pci_dev *pdev, struct scatterlist *sg, int sg_len, int dir)
727{
728 BUG ();
729 return 0;
730}
731
732/* Unmap multiple DMA mappings at once. */
733void
734pci_unmap_sg (struct pci_dev *pdev, struct scatterlist *sg, int sg_len,int dir)
735{
736 BUG ();
737}
738
739/* Make physical memory consistent for a set of streaming mode DMA
740 translations after a transfer. The same as pci_dma_sync_single_* but
741 for a scatter-gather list, same rules and usage. */
742
743void
744pci_dma_sync_sg_for_cpu (struct pci_dev *dev,
745 struct scatterlist *sg, int sg_len,
746 int dir)
747{
748 BUG ();
749}
750
751void
752pci_dma_sync_sg_for_device (struct pci_dev *dev,
753 struct scatterlist *sg, int sg_len,
754 int dir)
755{
756 BUG ();
757}
758
759
760/* PCI mem mapping. */
761
762/* Allocate and map kernel buffer using consistent mode DMA for PCI
763 device. Returns non-NULL cpu-view pointer to the buffer if
764 successful and sets *DMA_ADDR to the pci side dma address as well,
765 else DMA_ADDR is undefined. */
766void *
767pci_alloc_consistent (struct pci_dev *pdev, size_t size, dma_addr_t *dma_addr)
768{
769 void *mb_sram_mem = alloc_mb_sram (size);
770 if (mb_sram_mem)
771 *dma_addr = MB_SRAM_TO_PCI (mb_sram_mem);
772 return mb_sram_mem;
773}
774
775/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
776 be values that were returned from pci_alloc_consistent. SIZE must be
777 the same as what as passed into pci_alloc_consistent. References to
778 the memory and mappings associated with CPU_ADDR or DMA_ADDR past
779 this call are illegal. */
780void
781pci_free_consistent (struct pci_dev *pdev, size_t size, void *cpu_addr,
782 dma_addr_t dma_addr)
783{
784 void *mb_sram_mem = PCI_TO_MB_SRAM (dma_addr);
785 free_mb_sram (mb_sram_mem, size);
786}
787
788
789/* iomap/iomap */
790
791void __iomem *pci_iomap (struct pci_dev *dev, int bar, unsigned long max)
792{
793 resource_size_t start = pci_resource_start (dev, bar);
794 resource_size_t len = pci_resource_len (dev, bar);
795
796 if (!start || len == 0)
797 return 0;
798
799 /* None of the ioremap functions actually do anything, other than
800 re-casting their argument, so don't bother differentiating them. */
801 return ioremap (start, len);
802}
803
804void pci_iounmap (struct pci_dev *dev, void __iomem *addr)
805{
806 /* nothing */
807}
808
809
810/* symbol exports (for modules) */
811
812EXPORT_SYMBOL (pci_map_single);
813EXPORT_SYMBOL (pci_unmap_single);
814EXPORT_SYMBOL (pci_alloc_consistent);
815EXPORT_SYMBOL (pci_free_consistent);
816EXPORT_SYMBOL (pci_dma_sync_single_for_cpu);
817EXPORT_SYMBOL (pci_dma_sync_single_for_device);
818EXPORT_SYMBOL (pci_iomap);
819EXPORT_SYMBOL (pci_iounmap);
diff --git a/arch/v850/kernel/rte_me2_cb.c b/arch/v850/kernel/rte_me2_cb.c
deleted file mode 100644
index 46803d48dffe..000000000000
--- a/arch/v850/kernel/rte_me2_cb.c
+++ /dev/null
@@ -1,298 +0,0 @@
1/*
2 * arch/v850/kernel/rte_me2_cb.c -- Midas labs RTE-V850E/ME2-CB board
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/bootmem.h>
17#include <linux/irq.h>
18#include <linux/fs.h>
19#include <linux/major.h>
20#include <linux/sched.h>
21#include <linux/delay.h>
22
23#include <asm/atomic.h>
24#include <asm/page.h>
25#include <asm/me2.h>
26#include <asm/rte_me2_cb.h>
27#include <asm/machdep.h>
28#include <asm/v850e_intc.h>
29#include <asm/v850e_cache.h>
30#include <asm/irq.h>
31
32#include "mach.h"
33
34extern unsigned long *_intv_start;
35extern unsigned long *_intv_end;
36
37/* LED access routines. */
38extern unsigned read_leds (int pos, char *buf, int len);
39extern unsigned write_leds (int pos, const char *buf, int len);
40
41
42/* SDRAM are almost contiguous (with a small hole in between;
43 see mach_reserve_bootmem for details), so just use both as one big area. */
44#define RAM_START SDRAM_ADDR
45#define RAM_END (SDRAM_ADDR + SDRAM_SIZE)
46
47
48void __init mach_get_physical_ram (unsigned long *ram_start,
49 unsigned long *ram_len)
50{
51 *ram_start = RAM_START;
52 *ram_len = RAM_END - RAM_START;
53}
54
55void mach_gettimeofday (struct timespec *tv)
56{
57 tv->tv_sec = 0;
58 tv->tv_nsec = 0;
59}
60
61/* Called before configuring an on-chip UART. */
62void rte_me2_cb_uart_pre_configure (unsigned chan,
63 unsigned cflags, unsigned baud)
64{
65 /* The RTE-V850E/ME2-CB connects some general-purpose I/O
66 pins on the CPU to the RTS/CTS lines of UARTB channel 0's
67 serial connection.
68 I/O pins P21 and P22 are RTS and CTS respectively. */
69 if (chan == 0) {
70 /* Put P21 & P22 in I/O port mode. */
71 ME2_PORT2_PMC &= ~0x6;
72 /* Make P21 and output, and P22 an input. */
73 ME2_PORT2_PM = (ME2_PORT2_PM & ~0xC) | 0x4;
74 }
75
76 me2_uart_pre_configure (chan, cflags, baud);
77}
78
79void __init mach_init_irqs (void)
80{
81 /* Initialize interrupts. */
82 me2_init_irqs ();
83 rte_me2_cb_init_irqs ();
84}
85
86#ifdef CONFIG_ROM_KERNEL
87/* Initialization for kernel in ROM. */
88static inline rom_kernel_init (void)
89{
90 /* If the kernel is in ROM, we have to copy any initialized data
91 from ROM into RAM. */
92 extern unsigned long _data_load_start, _sdata, _edata;
93 register unsigned long *src = &_data_load_start;
94 register unsigned long *dst = &_sdata, *end = &_edata;
95
96 while (dst != end)
97 *dst++ = *src++;
98}
99#endif /* CONFIG_ROM_KERNEL */
100
101static void install_interrupt_vectors (void)
102{
103 unsigned long *p1, *p2;
104
105 ME2_IRAMM = 0x03; /* V850E/ME2 iRAM write mode */
106
107 /* vector copy to iRAM */
108 p1 = (unsigned long *)0; /* v85x vector start */
109 p2 = (unsigned long *)&_intv_start;
110 while (p2 < (unsigned long *)&_intv_end)
111 *p1++ = *p2++;
112
113 ME2_IRAMM = 0x00; /* V850E/ME2 iRAM read mode */
114}
115
116/* CompactFlash */
117
118static void cf_power_on (void)
119{
120 /* CF card detected? */
121 if (CB_CF_STS0 & 0x0030)
122 return;
123
124 CB_CF_REG0 = 0x0002; /* reest on */
125 mdelay (10);
126 CB_CF_REG0 = 0x0003; /* power on */
127 mdelay (10);
128 CB_CF_REG0 = 0x0001; /* reset off */
129 mdelay (10);
130}
131
132static void cf_power_off (void)
133{
134 CB_CF_REG0 = 0x0003; /* power on */
135 mdelay (10);
136 CB_CF_REG0 = 0x0002; /* reest on */
137 mdelay (10);
138}
139
140void __init mach_early_init (void)
141{
142 install_interrupt_vectors ();
143
144 /* CS1 SDRAM instruction cache enable */
145 v850e_cache_enable (0x04, 0x03, 0);
146
147 rte_cb_early_init ();
148
149 /* CompactFlash power on */
150 cf_power_on ();
151
152#if defined (CONFIG_ROM_KERNEL)
153 rom_kernel_init ();
154#endif
155}
156
157
158/* RTE-V850E/ME2-CB Programmable Interrupt Controller. */
159
160static struct cb_pic_irq_init cb_pic_irq_inits[] = {
161 { "CB_EXTTM0", IRQ_CB_EXTTM0, 1, 1, 6 },
162 { "CB_EXTSIO", IRQ_CB_EXTSIO, 1, 1, 6 },
163 { "CB_TOVER", IRQ_CB_TOVER, 1, 1, 6 },
164 { "CB_GINT0", IRQ_CB_GINT0, 1, 1, 6 },
165 { "CB_USB", IRQ_CB_USB, 1, 1, 6 },
166 { "CB_LANC", IRQ_CB_LANC, 1, 1, 6 },
167 { "CB_USB_VBUS_ON", IRQ_CB_USB_VBUS_ON, 1, 1, 6 },
168 { "CB_USB_VBUS_OFF", IRQ_CB_USB_VBUS_OFF, 1, 1, 6 },
169 { "CB_EXTTM1", IRQ_CB_EXTTM1, 1, 1, 6 },
170 { "CB_EXTTM2", IRQ_CB_EXTTM2, 1, 1, 6 },
171 { 0 }
172};
173#define NUM_CB_PIC_IRQ_INITS (ARRAY_SIZE(cb_pic_irq_inits) - 1)
174
175static struct hw_interrupt_type cb_pic_hw_itypes[NUM_CB_PIC_IRQ_INITS];
176static unsigned char cb_pic_active_irqs = 0;
177
178void __init rte_me2_cb_init_irqs (void)
179{
180 cb_pic_init_irq_types (cb_pic_irq_inits, cb_pic_hw_itypes);
181
182 /* Initalize on board PIC1 (not PIC0) enable */
183 CB_PIC_INT0M = 0x0000;
184 CB_PIC_INT1M = 0x0000;
185 CB_PIC_INTR = 0x0000;
186 CB_PIC_INTEN |= CB_PIC_INT1EN;
187
188 ME2_PORT2_PMC |= 0x08; /* INTP23/SCK1 mode */
189 ME2_PORT2_PFC &= ~0x08; /* INTP23 mode */
190 ME2_INTR(2) &= ~0x08; /* INTP23 falling-edge detect */
191 ME2_INTF(2) &= ~0x08; /* " */
192
193 rte_cb_init_irqs (); /* gbus &c */
194}
195
196
197/* Enable interrupt handling for interrupt IRQ. */
198void cb_pic_enable_irq (unsigned irq)
199{
200 CB_PIC_INT1M |= 1 << (irq - CB_PIC_BASE_IRQ);
201}
202
203void cb_pic_disable_irq (unsigned irq)
204{
205 CB_PIC_INT1M &= ~(1 << (irq - CB_PIC_BASE_IRQ));
206}
207
208void cb_pic_shutdown_irq (unsigned irq)
209{
210 cb_pic_disable_irq (irq);
211
212 if (--cb_pic_active_irqs == 0)
213 free_irq (IRQ_CB_PIC, 0);
214
215 CB_PIC_INT1M &= ~(1 << (irq - CB_PIC_BASE_IRQ));
216}
217
218static irqreturn_t cb_pic_handle_irq (int irq, void *dev_id,
219 struct pt_regs *regs)
220{
221 irqreturn_t rval = IRQ_NONE;
222 unsigned status = CB_PIC_INTR;
223 unsigned enable = CB_PIC_INT1M;
224
225 /* Only pay attention to enabled interrupts. */
226 status &= enable;
227
228 CB_PIC_INTEN &= ~CB_PIC_INT1EN;
229
230 if (status) {
231 unsigned mask = 1;
232
233 irq = CB_PIC_BASE_IRQ;
234 do {
235 /* There's an active interrupt, find out which one,
236 and call its handler. */
237 while (! (status & mask)) {
238 irq++;
239 mask <<= 1;
240 }
241 status &= ~mask;
242
243 CB_PIC_INTR = mask;
244
245 /* Recursively call handle_irq to handle it. */
246 handle_irq (irq, regs);
247 rval = IRQ_HANDLED;
248 } while (status);
249 }
250
251 CB_PIC_INTEN |= CB_PIC_INT1EN;
252
253 return rval;
254}
255
256
257static void irq_nop (unsigned irq) { }
258
259static unsigned cb_pic_startup_irq (unsigned irq)
260{
261 int rval;
262
263 if (cb_pic_active_irqs == 0) {
264 rval = request_irq (IRQ_CB_PIC, cb_pic_handle_irq,
265 IRQF_DISABLED, "cb_pic_handler", 0);
266 if (rval != 0)
267 return rval;
268 }
269
270 cb_pic_active_irqs++;
271
272 cb_pic_enable_irq (irq);
273
274 return 0;
275}
276
277/* Initialize HW_IRQ_TYPES for INTC-controlled irqs described in array
278 INITS (which is terminated by an entry with the name field == 0). */
279void __init cb_pic_init_irq_types (struct cb_pic_irq_init *inits,
280 struct hw_interrupt_type *hw_irq_types)
281{
282 struct cb_pic_irq_init *init;
283 for (init = inits; init->name; init++) {
284 struct hw_interrupt_type *hwit = hw_irq_types++;
285
286 hwit->typename = init->name;
287
288 hwit->startup = cb_pic_startup_irq;
289 hwit->shutdown = cb_pic_shutdown_irq;
290 hwit->enable = cb_pic_enable_irq;
291 hwit->disable = cb_pic_disable_irq;
292 hwit->ack = irq_nop;
293 hwit->end = irq_nop;
294
295 /* Initialize kernel IRQ infrastructure for this interrupt. */
296 init_irq_handlers(init->base, init->num, init->interval, hwit);
297 }
298}
diff --git a/arch/v850/kernel/rte_me2_cb.ld b/arch/v850/kernel/rte_me2_cb.ld
deleted file mode 100644
index cf0766065ec6..000000000000
--- a/arch/v850/kernel/rte_me2_cb.ld
+++ /dev/null
@@ -1,30 +0,0 @@
1/* Linker script for the Midas labs RTE-V850E/ME2-CB evaluation board
2 (CONFIG_RTE_CB_ME2), with kernel in SDRAM. */
3
4MEMORY {
5 /* 128Kbyte of IRAM */
6 IRAM : ORIGIN = 0x00000000, LENGTH = 0x00020000
7
8 /* 32MB of SDRAM. */
9 SDRAM : ORIGIN = SDRAM_ADDR, LENGTH = SDRAM_SIZE
10}
11
12#define KRAM SDRAM
13
14SECTIONS {
15 .text : {
16 __kram_start = . ;
17 TEXT_CONTENTS
18 INTV_CONTENTS /* copy to iRAM (0x0-0x620) */
19 } > KRAM
20
21 .data : {
22 DATA_CONTENTS
23 BSS_CONTENTS
24 RAMK_INIT_CONTENTS
25 __kram_end = . ;
26 BOOTMAP_CONTENTS
27 } > KRAM
28
29 .root ALIGN (4096) : { ROOT_FS_CONTENTS } > SDRAM
30}
diff --git a/arch/v850/kernel/rte_nb85e_cb-multi.ld b/arch/v850/kernel/rte_nb85e_cb-multi.ld
deleted file mode 100644
index de347b4fffac..000000000000
--- a/arch/v850/kernel/rte_nb85e_cb-multi.ld
+++ /dev/null
@@ -1,57 +0,0 @@
1/* Linker script for the Midas labs RTE-NB85E-CB evaluation board
2 (CONFIG_RTE_CB_NB85E), with the Multi debugger ROM monitor . */
3
4MEMORY {
5 /* 1MB of SRAM; we can't use the last 96KB, because it's used by
6 the monitor scratch-RAM. This memory is mirrored 4 times. */
7 SRAM : ORIGIN = SRAM_ADDR, LENGTH = (SRAM_SIZE - MON_SCRATCH_SIZE)
8 /* Monitor scratch RAM; only the interrupt vectors should go here. */
9 MRAM : ORIGIN = MON_SCRATCH_ADDR, LENGTH = MON_SCRATCH_SIZE
10 /* 16MB of SDRAM. */
11 SDRAM : ORIGIN = SDRAM_ADDR, LENGTH = SDRAM_SIZE
12}
13
14#ifdef CONFIG_RTE_CB_NB85E_KSRAM
15# define KRAM SRAM
16#else
17# define KRAM SDRAM
18#endif
19
20SECTIONS {
21 /* We can't use RAMK_KRAM_CONTENTS because that puts the whole
22 kernel in a single ELF segment, and the Multi debugger (which
23 we use to load the kernel) appears to have bizarre problems
24 dealing with it. */
25
26 .text : {
27 __kram_start = . ;
28 TEXT_CONTENTS
29 } > KRAM
30
31 .data : {
32 DATA_CONTENTS
33 BSS_CONTENTS
34 RAMK_INIT_CONTENTS
35 __kram_end = . ;
36 BOOTMAP_CONTENTS
37
38 /* The address at which the interrupt vectors are initially
39 loaded by the loader. We can't load the interrupt vectors
40 directly into their target location, because the monitor
41 ROM for the GHS Multi debugger barfs if we try.
42 Unfortunately, Multi also doesn't deal correctly with ELF
43 sections where the LMA and VMA differ (it just ignores the
44 LMA), so we can't use that feature to work around the
45 problem! What we do instead is just put the interrupt
46 vectors into a normal section, and have the
47 `mach_early_init' function for Midas boards do the
48 necessary copying and relocation at runtime (this section
49 basically only contains `jr' instructions, so it's not
50 that hard). */
51 . = ALIGN (0x10) ;
52 __intv_load_start = . ;
53 INTV_CONTENTS
54 } > KRAM
55
56 .root ALIGN (4096) : { ROOT_FS_CONTENTS } > SDRAM
57}
diff --git a/arch/v850/kernel/rte_nb85e_cb.c b/arch/v850/kernel/rte_nb85e_cb.c
deleted file mode 100644
index b4a045da5d70..000000000000
--- a/arch/v850/kernel/rte_nb85e_cb.c
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * arch/v850/kernel/rte_nb85e_cb.c -- Midas labs RTE-V850E/NB85E-CB board
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/swap.h>
18#include <linux/bootmem.h>
19#include <linux/irq.h>
20
21#include <asm/atomic.h>
22#include <asm/page.h>
23#include <asm/v850e.h>
24#include <asm/rte_nb85e_cb.h>
25
26#include "mach.h"
27
28void __init mach_early_init (void)
29{
30 /* Configure caching; some possible settings:
31
32 BHC = 0x0000, DCC = 0x0000 -- all caching disabled
33 BHC = 0x0040, DCC = 0x0000 -- SDRAM: icache only
34 BHC = 0x0080, DCC = 0x0C00 -- SDRAM: write-back dcache only
35 BHC = 0x00C0, DCC = 0x0C00 -- SDRAM: icache + write-back dcache
36 BHC = 0x00C0, DCC = 0x0800 -- SDRAM: icache + write-thru dcache
37
38 We can only cache SDRAM (we can't use cache SRAM because it's in
39 the same memory region as the on-chip RAM and I/O space).
40
41 Unfortunately, the dcache seems to be buggy, so we only use the
42 icache for now. */
43 v850e_cache_enable (0x0040 /*BHC*/, 0x0003 /*ICC*/, 0x0000 /*DCC*/);
44
45 rte_cb_early_init ();
46}
47
48void __init mach_get_physical_ram (unsigned long *ram_start,
49 unsigned long *ram_len)
50{
51 /* We just use SDRAM here. */
52 *ram_start = SDRAM_ADDR;
53 *ram_len = SDRAM_SIZE;
54}
55
56void mach_gettimeofday (struct timespec *tv)
57{
58 tv->tv_sec = 0;
59 tv->tv_nsec = 0;
60}
61
62/* Called before configuring an on-chip UART. */
63void rte_nb85e_cb_uart_pre_configure (unsigned chan,
64 unsigned cflags, unsigned baud)
65{
66 /* The RTE-NB85E-CB connects some general-purpose I/O pins on the
67 CPU to the RTS/CTS lines the UART's serial connection, as follows:
68 P00 = CTS (in), P01 = DSR (in), P02 = RTS (out), P03 = DTR (out). */
69
70 TEG_PORT0_PM = 0x03; /* P00 and P01 inputs, P02 and P03 outputs */
71 TEG_PORT0_IO = 0x03; /* Accept input */
72
73 /* Do pre-configuration for the actual UART. */
74 teg_uart_pre_configure (chan, cflags, baud);
75}
76
77void __init mach_init_irqs (void)
78{
79 teg_init_irqs ();
80 rte_cb_init_irqs ();
81}
diff --git a/arch/v850/kernel/rte_nb85e_cb.ld b/arch/v850/kernel/rte_nb85e_cb.ld
deleted file mode 100644
index b672f484f085..000000000000
--- a/arch/v850/kernel/rte_nb85e_cb.ld
+++ /dev/null
@@ -1,22 +0,0 @@
1/* Linker script for the Midas labs RTE-NB85E-CB evaluation board
2 (CONFIG_RTE_CB_NB85E). */
3
4MEMORY {
5 LOW : ORIGIN = 0x0, LENGTH = 0x00100000
6 /* 1MB of SRAM This memory is mirrored 4 times. */
7 SRAM : ORIGIN = SRAM_ADDR, LENGTH = SRAM_SIZE
8 /* 16MB of SDRAM. */
9 SDRAM : ORIGIN = SDRAM_ADDR, LENGTH = SDRAM_SIZE
10}
11
12#ifdef CONFIG_RTE_CB_NB85E_KSRAM
13# define KRAM SRAM
14#else
15# define KRAM SDRAM
16#endif
17
18SECTIONS {
19 .intv : { INTV_CONTENTS } > LOW
20 .sram : { RAMK_KRAM_CONTENTS } > KRAM
21 .root : { ROOT_FS_CONTENTS } > SDRAM
22}
diff --git a/arch/v850/kernel/setup.c b/arch/v850/kernel/setup.c
deleted file mode 100644
index a0a8456a8430..000000000000
--- a/arch/v850/kernel/setup.c
+++ /dev/null
@@ -1,330 +0,0 @@
1/*
2 * arch/v850/kernel/setup.c -- Arch-dependent initialization functions
3 *
4 * Copyright (C) 2001,02,03,05,06 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03,05,06 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/mm.h>
15#include <linux/bootmem.h>
16#include <linux/swap.h> /* we don't have swap, but for nr_free_pages */
17#include <linux/irq.h>
18#include <linux/reboot.h>
19#include <linux/personality.h>
20#include <linux/major.h>
21#include <linux/root_dev.h>
22#include <linux/mtd/mtd.h>
23#include <linux/init.h>
24
25#include <asm/irq.h>
26#include <asm/setup.h>
27
28#include "mach.h"
29
30/* These symbols are all defined in the linker map to delineate various
31 statically allocated regions of memory. */
32
33extern char _intv_start, _intv_end;
34/* `kram' is only used if the kernel uses part of normal user RAM. */
35extern char _kram_start __attribute__ ((__weak__));
36extern char _kram_end __attribute__ ((__weak__));
37extern char _init_start, _init_end;
38extern char _bootmap;
39extern char _stext, _etext, _sdata, _edata, _sbss, _ebss;
40/* Many platforms use an embedded root image. */
41extern char _root_fs_image_start __attribute__ ((__weak__));
42extern char _root_fs_image_end __attribute__ ((__weak__));
43
44
45char __initdata command_line[COMMAND_LINE_SIZE];
46
47/* Memory not used by the kernel. */
48static unsigned long total_ram_pages;
49
50/* System RAM. */
51static unsigned long ram_start = 0, ram_len = 0;
52
53
54#define ADDR_TO_PAGE_UP(x) ((((unsigned long)x) + PAGE_SIZE-1) >> PAGE_SHIFT)
55#define ADDR_TO_PAGE(x) (((unsigned long)x) >> PAGE_SHIFT)
56#define PAGE_TO_ADDR(x) (((unsigned long)x) << PAGE_SHIFT)
57
58static void init_mem_alloc (unsigned long ram_start, unsigned long ram_len);
59
60void set_mem_root (void *addr, size_t len, char *cmd_line);
61
62
63void __init setup_arch (char **cmdline)
64{
65 /* Keep a copy of command line */
66 *cmdline = command_line;
67 memcpy (boot_command_line, command_line, COMMAND_LINE_SIZE);
68 boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
69
70 console_verbose ();
71
72 init_mm.start_code = (unsigned long) &_stext;
73 init_mm.end_code = (unsigned long) &_etext;
74 init_mm.end_data = (unsigned long) &_edata;
75 init_mm.brk = (unsigned long) &_kram_end;
76
77 /* Find out what mem this machine has. */
78 mach_get_physical_ram (&ram_start, &ram_len);
79 /* ... and tell the kernel about it. */
80 init_mem_alloc (ram_start, ram_len);
81
82 printk (KERN_INFO "CPU: %s\nPlatform: %s\n",
83 CPU_MODEL_LONG, PLATFORM_LONG);
84
85 /* do machine-specific setups. */
86 mach_setup (cmdline);
87
88#ifdef CONFIG_MTD
89 if (!ROOT_DEV && &_root_fs_image_end > &_root_fs_image_start)
90 set_mem_root (&_root_fs_image_start,
91 &_root_fs_image_end - &_root_fs_image_start,
92 *cmdline);
93#endif
94}
95
96void __init trap_init (void)
97{
98}
99
100#ifdef CONFIG_MTD
101
102/* From drivers/mtd/devices/slram.c */
103#define SLRAM_BLK_SZ 0x4000
104
105/* Set the root filesystem to be the given memory region.
106 Some parameter may be appended to CMD_LINE. */
107void set_mem_root (void *addr, size_t len, char *cmd_line)
108{
109 /* Some sort of idiocy in MTD means we must supply a length that's
110 a multiple of SLRAM_BLK_SZ. We just round up the real length,
111 as the file system shouldn't attempt to access anything beyond
112 the end of the image anyway. */
113 len = (((len - 1) + SLRAM_BLK_SZ) / SLRAM_BLK_SZ) * SLRAM_BLK_SZ;
114
115 /* The only way to pass info to the MTD slram driver is via
116 the command line. */
117 if (*cmd_line) {
118 cmd_line += strlen (cmd_line);
119 *cmd_line++ = ' ';
120 }
121 sprintf (cmd_line, "slram=root,0x%x,+0x%x", (u32)addr, (u32)len);
122
123 ROOT_DEV = MKDEV (MTD_BLOCK_MAJOR, 0);
124}
125#endif
126
127
128static void irq_nop (unsigned irq) { }
129static unsigned irq_zero (unsigned irq) { return 0; }
130
131static void nmi_end (unsigned irq)
132{
133 if (irq != IRQ_NMI (0)) {
134 printk (KERN_CRIT "NMI %d is unrecoverable; restarting...",
135 irq - IRQ_NMI (0));
136 machine_restart (0);
137 }
138}
139
140static struct hw_interrupt_type nmi_irq_type = {
141 .typename = "NMI",
142 .startup = irq_zero, /* startup */
143 .shutdown = irq_nop, /* shutdown */
144 .enable = irq_nop, /* enable */
145 .disable = irq_nop, /* disable */
146 .ack = irq_nop, /* ack */
147 .end = nmi_end, /* end */
148};
149
150void __init init_IRQ (void)
151{
152 init_irq_handlers (0, NUM_MACH_IRQS, 1, 0);
153 init_irq_handlers (IRQ_NMI (0), NUM_NMIS, 1, &nmi_irq_type);
154 mach_init_irqs ();
155}
156
157
158void __init mem_init (void)
159{
160 max_mapnr = MAP_NR (ram_start + ram_len);
161
162 num_physpages = ADDR_TO_PAGE (ram_len);
163
164 total_ram_pages = free_all_bootmem ();
165
166 printk (KERN_INFO
167 "Memory: %luK/%luK available"
168 " (%luK kernel code, %luK data)\n",
169 PAGE_TO_ADDR (nr_free_pages()) / 1024,
170 ram_len / 1024,
171 ((unsigned long)&_etext - (unsigned long)&_stext) / 1024,
172 ((unsigned long)&_ebss - (unsigned long)&_sdata) / 1024);
173}
174
175void free_initmem (void)
176{
177 unsigned long ram_end = ram_start + ram_len;
178 unsigned long start = PAGE_ALIGN ((unsigned long)(&_init_start));
179
180 if (start >= ram_start && start < ram_end) {
181 unsigned long addr;
182 unsigned long end = PAGE_ALIGN ((unsigned long)(&_init_end));
183
184 if (end > ram_end)
185 end = ram_end;
186
187 printk("Freeing unused kernel memory: %ldK freed\n",
188 (end - start) / 1024);
189
190 for (addr = start; addr < end; addr += PAGE_SIZE) {
191 struct page *page = virt_to_page (addr);
192 ClearPageReserved (page);
193 init_page_count (page);
194 __free_page (page);
195 total_ram_pages++;
196 }
197 }
198}
199
200
201/* Initialize the `bootmem allocator'. RAM_START and RAM_LEN identify
202 what RAM may be used. */
203static void __init
204init_bootmem_alloc (unsigned long ram_start, unsigned long ram_len)
205{
206 /* The part of the kernel that's in the same managed RAM space
207 used for general allocation. */
208 unsigned long kram_start = (unsigned long)&_kram_start;
209 unsigned long kram_end = (unsigned long)&_kram_end;
210 /* End of the managed RAM space. */
211 unsigned long ram_end = ram_start + ram_len;
212 /* Address range of the interrupt vector table. */
213 unsigned long intv_start = (unsigned long)&_intv_start;
214 unsigned long intv_end = (unsigned long)&_intv_end;
215 /* True if the interrupt vectors are in the managed RAM area. */
216 int intv_in_ram = (intv_end > ram_start && intv_start < ram_end);
217 /* True if the interrupt vectors are inside the kernel's RAM. */
218 int intv_in_kram = (intv_end > kram_start && intv_start < kram_end);
219 /* A pointer to an optional function that reserves platform-specific
220 memory regions. We declare the pointer `volatile' to avoid gcc
221 turning the call into a static call (the problem is that since
222 it's a weak symbol, a static call may end up trying to reference
223 the location 0x0, which is not always reachable). */
224 void (*volatile mrb) (void) = mach_reserve_bootmem;
225 /* The bootmem allocator's allocation bitmap. */
226 unsigned long bootmap = (unsigned long)&_bootmap;
227 unsigned long bootmap_len;
228
229 /* Round bootmap location up to next page. */
230 bootmap = PAGE_TO_ADDR (ADDR_TO_PAGE_UP (bootmap));
231
232 /* Initialize bootmem allocator. */
233 bootmap_len = init_bootmem_node (NODE_DATA (0),
234 ADDR_TO_PAGE (bootmap),
235 ADDR_TO_PAGE (PAGE_OFFSET),
236 ADDR_TO_PAGE (ram_end));
237
238 /* Now make the RAM actually allocatable (it starts out `reserved'). */
239 free_bootmem (ram_start, ram_len);
240
241 if (kram_end > kram_start)
242 /* Reserve the RAM part of the kernel's address space, so it
243 doesn't get allocated. */
244 reserve_bootmem(kram_start, kram_end - kram_start,
245 BOOTMEM_DEFAULT);
246
247 if (intv_in_ram && !intv_in_kram)
248 /* Reserve the interrupt vector space. */
249 reserve_bootmem(intv_start, intv_end - intv_start,
250 BOOTMEM_DEFAULT);
251
252 if (bootmap >= ram_start && bootmap < ram_end)
253 /* Reserve the bootmap space. */
254 reserve_bootmem(bootmap, bootmap_len,
255 BOOTMEM_DEFAULT);
256
257 /* Reserve the memory used by the root filesystem image if it's
258 in RAM. */
259 if (&_root_fs_image_end > &_root_fs_image_start
260 && (unsigned long)&_root_fs_image_start >= ram_start
261 && (unsigned long)&_root_fs_image_start < ram_end)
262 reserve_bootmem ((unsigned long)&_root_fs_image_start,
263 &_root_fs_image_end - &_root_fs_image_start,
264 BOOTMEM_DEFAULT);
265
266 /* Let the platform-dependent code reserve some too. */
267 if (mrb)
268 (*mrb) ();
269}
270
271/* Tell the kernel about what RAM it may use for memory allocation. */
272static void __init
273init_mem_alloc (unsigned long ram_start, unsigned long ram_len)
274{
275 unsigned i;
276 unsigned long zones_size[MAX_NR_ZONES];
277
278 init_bootmem_alloc (ram_start, ram_len);
279
280 for (i = 0; i < MAX_NR_ZONES; i++)
281 zones_size[i] = 0;
282
283 /* We stuff all the memory into one area, which includes the
284 initial gap from PAGE_OFFSET to ram_start. */
285 zones_size[ZONE_DMA]
286 = ADDR_TO_PAGE (ram_len + (ram_start - PAGE_OFFSET));
287
288 /* The allocator is very picky about the address of the first
289 allocatable page -- it must be at least as aligned as the
290 maximum allocation -- so try to detect cases where it will get
291 confused and signal them at compile time (this is a common
292 problem when porting to a new platform with ). There is a
293 similar runtime check in free_area_init_core. */
294#if ((PAGE_OFFSET >> PAGE_SHIFT) & ((1UL << (MAX_ORDER - 1)) - 1))
295#error MAX_ORDER is too large for given PAGE_OFFSET (use CONFIG_FORCE_MAX_ZONEORDER to change it)
296#endif
297 NODE_DATA(0)->node_mem_map = NULL;
298 free_area_init_node (0, NODE_DATA(0), zones_size,
299 ADDR_TO_PAGE (PAGE_OFFSET), 0);
300}
301
302
303
304/* Taken from m68knommu */
305void show_mem(void)
306{
307 unsigned long i;
308 int free = 0, total = 0, reserved = 0, shared = 0;
309 int cached = 0;
310
311 printk(KERN_INFO "\nMem-info:\n");
312 show_free_areas();
313 i = max_mapnr;
314 while (i-- > 0) {
315 total++;
316 if (PageReserved(mem_map+i))
317 reserved++;
318 else if (PageSwapCache(mem_map+i))
319 cached++;
320 else if (!page_count(mem_map+i))
321 free++;
322 else
323 shared += page_count(mem_map+i) - 1;
324 }
325 printk(KERN_INFO "%d pages of RAM\n",total);
326 printk(KERN_INFO "%d free pages\n",free);
327 printk(KERN_INFO "%d reserved pages\n",reserved);
328 printk(KERN_INFO "%d pages shared\n",shared);
329 printk(KERN_INFO "%d pages swap cached\n",cached);
330}
diff --git a/arch/v850/kernel/signal.c b/arch/v850/kernel/signal.c
deleted file mode 100644
index bf166e7e762c..000000000000
--- a/arch/v850/kernel/signal.c
+++ /dev/null
@@ -1,523 +0,0 @@
1/*
2 * arch/v850/kernel/signal.c -- Signal handling
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 * Copyright (C) 1999,2000,2002 Niibe Yutaka & Kaz Kojima
7 * Copyright (C) 1991,1992 Linus Torvalds
8 *
9 * This file is subject to the terms and conditions of the GNU General
10 * Public License. See the file COPYING in the main directory of this
11 * archive for more details.
12 *
13 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
14 *
15 * This file was derived from the sh version, arch/sh/kernel/signal.c
16 */
17
18#include <linux/mm.h>
19#include <linux/smp.h>
20#include <linux/kernel.h>
21#include <linux/signal.h>
22#include <linux/errno.h>
23#include <linux/wait.h>
24#include <linux/ptrace.h>
25#include <linux/unistd.h>
26#include <linux/stddef.h>
27#include <linux/personality.h>
28#include <linux/tty.h>
29
30#include <asm/ucontext.h>
31#include <asm/uaccess.h>
32#include <asm/pgtable.h>
33#include <asm/pgalloc.h>
34#include <asm/thread_info.h>
35#include <asm/cacheflush.h>
36
37#define DEBUG_SIG 0
38
39#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
40
41asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
42
43/*
44 * Atomically swap in the new signal mask, and wait for a signal.
45 */
46asmlinkage int
47sys_sigsuspend(old_sigset_t mask, struct pt_regs *regs)
48{
49 sigset_t saveset;
50
51 mask &= _BLOCKABLE;
52 spin_lock_irq(&current->sighand->siglock);
53 saveset = current->blocked;
54 siginitset(&current->blocked, mask);
55 recalc_sigpending();
56 spin_unlock_irq(&current->sighand->siglock);
57
58 regs->gpr[GPR_RVAL] = -EINTR;
59 while (1) {
60 current->state = TASK_INTERRUPTIBLE;
61 schedule();
62 if (do_signal(regs, &saveset))
63 return -EINTR;
64 }
65}
66
67asmlinkage int
68sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
69 struct pt_regs *regs)
70{
71 sigset_t saveset, newset;
72
73 /* XXX: Don't preclude handling different sized sigset_t's. */
74 if (sigsetsize != sizeof(sigset_t))
75 return -EINVAL;
76
77 if (copy_from_user(&newset, unewset, sizeof(newset)))
78 return -EFAULT;
79 sigdelsetmask(&newset, ~_BLOCKABLE);
80 spin_lock_irq(&current->sighand->siglock);
81 saveset = current->blocked;
82 current->blocked = newset;
83 recalc_sigpending();
84 spin_unlock_irq(&current->sighand->siglock);
85
86 regs->gpr[GPR_RVAL] = -EINTR;
87 while (1) {
88 current->state = TASK_INTERRUPTIBLE;
89 schedule();
90 if (do_signal(regs, &saveset))
91 return -EINTR;
92 }
93}
94
95asmlinkage int
96sys_sigaction(int sig, const struct old_sigaction *act,
97 struct old_sigaction *oact)
98{
99 struct k_sigaction new_ka, old_ka;
100 int ret;
101
102 if (act) {
103 old_sigset_t mask;
104 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
105 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
106 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
107 return -EFAULT;
108 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
109 __get_user(mask, &act->sa_mask);
110 siginitset(&new_ka.sa.sa_mask, mask);
111 }
112
113 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
114
115 if (!ret && oact) {
116 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
117 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
118 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
119 return -EFAULT;
120 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
121 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
122 }
123
124 return ret;
125}
126
127asmlinkage int
128sys_sigaltstack(const stack_t *uss, stack_t *uoss,
129 struct pt_regs *regs)
130{
131 return do_sigaltstack(uss, uoss, regs->gpr[GPR_SP]);
132}
133
134
135/*
136 * Do a signal return; undo the signal stack.
137 */
138
139struct sigframe
140{
141 struct sigcontext sc;
142 unsigned long extramask[_NSIG_WORDS-1];
143 unsigned long tramp[2]; /* signal trampoline */
144};
145
146struct rt_sigframe
147{
148 struct siginfo info;
149 struct ucontext uc;
150 unsigned long tramp[2]; /* signal trampoline */
151};
152
153static int
154restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *rval_p)
155{
156 unsigned int err = 0;
157
158#define COPY(x) err |= __get_user(regs->x, &sc->regs.x)
159 COPY(gpr[0]); COPY(gpr[1]); COPY(gpr[2]); COPY(gpr[3]);
160 COPY(gpr[4]); COPY(gpr[5]); COPY(gpr[6]); COPY(gpr[7]);
161 COPY(gpr[8]); COPY(gpr[9]); COPY(gpr[10]); COPY(gpr[11]);
162 COPY(gpr[12]); COPY(gpr[13]); COPY(gpr[14]); COPY(gpr[15]);
163 COPY(gpr[16]); COPY(gpr[17]); COPY(gpr[18]); COPY(gpr[19]);
164 COPY(gpr[20]); COPY(gpr[21]); COPY(gpr[22]); COPY(gpr[23]);
165 COPY(gpr[24]); COPY(gpr[25]); COPY(gpr[26]); COPY(gpr[27]);
166 COPY(gpr[28]); COPY(gpr[29]); COPY(gpr[30]); COPY(gpr[31]);
167 COPY(pc); COPY(psw);
168 COPY(ctpc); COPY(ctpsw); COPY(ctbp);
169#undef COPY
170
171 return err;
172}
173
174asmlinkage int sys_sigreturn(struct pt_regs *regs)
175{
176 struct sigframe *frame = (struct sigframe *)regs->gpr[GPR_SP];
177 sigset_t set;
178 int rval;
179
180 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
181 goto badframe;
182
183 if (__get_user(set.sig[0], &frame->sc.oldmask)
184 || (_NSIG_WORDS > 1
185 && __copy_from_user(&set.sig[1], &frame->extramask,
186 sizeof(frame->extramask))))
187 goto badframe;
188
189 sigdelsetmask(&set, ~_BLOCKABLE);
190 spin_lock_irq(&current->sighand->siglock);
191 current->blocked = set;
192 recalc_sigpending();
193 spin_unlock_irq(&current->sighand->siglock);
194
195 if (restore_sigcontext(regs, &frame->sc, &rval))
196 goto badframe;
197 return rval;
198
199badframe:
200 force_sig(SIGSEGV, current);
201 return 0;
202}
203
204asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
205{
206 struct rt_sigframe *frame = (struct rt_sigframe *)regs->gpr[GPR_SP];
207 sigset_t set;
208 stack_t st;
209 int rval;
210
211 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
212 goto badframe;
213
214 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
215 goto badframe;
216
217 sigdelsetmask(&set, ~_BLOCKABLE);
218 spin_lock_irq(&current->sighand->siglock);
219 current->blocked = set;
220 recalc_sigpending();
221 spin_unlock_irq(&current->sighand->siglock);
222
223 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval))
224 goto badframe;
225
226 if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
227 goto badframe;
228 /* It is more difficult to avoid calling this function than to
229 call it and ignore errors. */
230 do_sigaltstack(&st, NULL, regs->gpr[GPR_SP]);
231
232 return rval;
233
234badframe:
235 force_sig(SIGSEGV, current);
236 return 0;
237}
238
239/*
240 * Set up a signal frame.
241 */
242
243static int
244setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
245 unsigned long mask)
246{
247 int err = 0;
248
249#define COPY(x) err |= __put_user(regs->x, &sc->regs.x)
250 COPY(gpr[0]); COPY(gpr[1]); COPY(gpr[2]); COPY(gpr[3]);
251 COPY(gpr[4]); COPY(gpr[5]); COPY(gpr[6]); COPY(gpr[7]);
252 COPY(gpr[8]); COPY(gpr[9]); COPY(gpr[10]); COPY(gpr[11]);
253 COPY(gpr[12]); COPY(gpr[13]); COPY(gpr[14]); COPY(gpr[15]);
254 COPY(gpr[16]); COPY(gpr[17]); COPY(gpr[18]); COPY(gpr[19]);
255 COPY(gpr[20]); COPY(gpr[21]); COPY(gpr[22]); COPY(gpr[23]);
256 COPY(gpr[24]); COPY(gpr[25]); COPY(gpr[26]); COPY(gpr[27]);
257 COPY(gpr[28]); COPY(gpr[29]); COPY(gpr[30]); COPY(gpr[31]);
258 COPY(pc); COPY(psw);
259 COPY(ctpc); COPY(ctpsw); COPY(ctbp);
260#undef COPY
261
262 err |= __put_user(mask, &sc->oldmask);
263
264 return err;
265}
266
267/*
268 * Determine which stack to use..
269 */
270static inline void *
271get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
272{
273 /* Default to using normal stack */
274 unsigned long sp = regs->gpr[GPR_SP];
275
276 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
277 sp = current->sas_ss_sp + current->sas_ss_size;
278
279 return (void *)((sp - frame_size) & -8UL);
280}
281
282static void setup_frame(int sig, struct k_sigaction *ka,
283 sigset_t *set, struct pt_regs *regs)
284{
285 struct sigframe *frame;
286 int err = 0;
287 int signal;
288
289 frame = get_sigframe(ka, regs, sizeof(*frame));
290
291 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
292 goto give_sigsegv;
293
294 signal = current_thread_info()->exec_domain
295 && current_thread_info()->exec_domain->signal_invmap
296 && sig < 32
297 ? current_thread_info()->exec_domain->signal_invmap[sig]
298 : sig;
299
300 err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
301
302 if (_NSIG_WORDS > 1) {
303 err |= __copy_to_user(frame->extramask, &set->sig[1],
304 sizeof(frame->extramask));
305 }
306
307 /* Set up to return from userspace. If provided, use a stub
308 already in userspace. */
309 if (ka->sa.sa_flags & SA_RESTORER) {
310 regs->gpr[GPR_LP] = (unsigned long) ka->sa.sa_restorer;
311 } else {
312 /* Note, these encodings are _little endian_! */
313
314 /* addi __NR_sigreturn, r0, r12 */
315 err |= __put_user(0x6600 | (__NR_sigreturn << 16),
316 frame->tramp + 0);
317 /* trap 0 */
318 err |= __put_user(0x010007e0,
319 frame->tramp + 1);
320
321 regs->gpr[GPR_LP] = (unsigned long)frame->tramp;
322
323 flush_cache_sigtramp (regs->gpr[GPR_LP]);
324 }
325
326 if (err)
327 goto give_sigsegv;
328
329 /* Set up registers for signal handler. */
330 regs->pc = (v850_reg_t) ka->sa.sa_handler;
331 regs->gpr[GPR_SP] = (v850_reg_t)frame;
332 /* Signal handler args: */
333 regs->gpr[GPR_ARG0] = signal; /* arg 0: signum */
334 regs->gpr[GPR_ARG1] = (v850_reg_t)&frame->sc;/* arg 1: sigcontext */
335
336 set_fs(USER_DS);
337
338#if DEBUG_SIG
339 printk("SIG deliver (%s:%d): sp=%p pc=%08lx ra=%08lx\n",
340 current->comm, current->pid, frame, regs->pc, );
341#endif
342
343 return;
344
345give_sigsegv:
346 force_sigsegv(sig, current);
347}
348
349static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
350 sigset_t *set, struct pt_regs *regs)
351{
352 struct rt_sigframe *frame;
353 int err = 0;
354 int signal;
355
356 frame = get_sigframe(ka, regs, sizeof(*frame));
357
358 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
359 goto give_sigsegv;
360
361 signal = current_thread_info()->exec_domain
362 && current_thread_info()->exec_domain->signal_invmap
363 && sig < 32
364 ? current_thread_info()->exec_domain->signal_invmap[sig]
365 : sig;
366
367 err |= copy_siginfo_to_user(&frame->info, info);
368
369 /* Create the ucontext. */
370 err |= __put_user(0, &frame->uc.uc_flags);
371 err |= __put_user(0, &frame->uc.uc_link);
372 err |= __put_user((void *)current->sas_ss_sp,
373 &frame->uc.uc_stack.ss_sp);
374 err |= __put_user(sas_ss_flags(regs->gpr[GPR_SP]),
375 &frame->uc.uc_stack.ss_flags);
376 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
377 err |= setup_sigcontext(&frame->uc.uc_mcontext,
378 regs, set->sig[0]);
379 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
380
381 /* Set up to return from userspace. If provided, use a stub
382 already in userspace. */
383 if (ka->sa.sa_flags & SA_RESTORER) {
384 regs->gpr[GPR_LP] = (unsigned long) ka->sa.sa_restorer;
385 } else {
386 /* Note, these encodings are _little endian_! */
387
388 /* addi __NR_sigreturn, r0, r12 */
389 err |= __put_user(0x6600 | (__NR_sigreturn << 16),
390 frame->tramp + 0);
391 /* trap 0 */
392 err |= __put_user(0x010007e0,
393 frame->tramp + 1);
394
395 regs->gpr[GPR_LP] = (unsigned long)frame->tramp;
396
397 flush_cache_sigtramp (regs->gpr[GPR_LP]);
398 }
399
400 if (err)
401 goto give_sigsegv;
402
403 /* Set up registers for signal handler. */
404 regs->pc = (v850_reg_t) ka->sa.sa_handler;
405 regs->gpr[GPR_SP] = (v850_reg_t)frame;
406 /* Signal handler args: */
407 regs->gpr[GPR_ARG0] = signal; /* arg 0: signum */
408 regs->gpr[GPR_ARG1] = (v850_reg_t)&frame->info; /* arg 1: siginfo */
409 regs->gpr[GPR_ARG2] = (v850_reg_t)&frame->uc; /* arg 2: ucontext */
410
411 set_fs(USER_DS);
412
413#if DEBUG_SIG
414 printk("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
415 current->comm, current->pid, frame, regs->pc, regs->pr);
416#endif
417
418 return;
419
420give_sigsegv:
421 force_sigsegv(sig, current);
422}
423
424/*
425 * OK, we're invoking a handler
426 */
427
428static void
429handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
430 sigset_t *oldset, struct pt_regs * regs)
431{
432 /* Are we from a system call? */
433 if (PT_REGS_SYSCALL (regs)) {
434 /* If so, check system call restarting.. */
435 switch (regs->gpr[GPR_RVAL]) {
436 case -ERESTART_RESTARTBLOCK:
437 current_thread_info()->restart_block.fn =
438 do_no_restart_syscall;
439 /* fall through */
440 case -ERESTARTNOHAND:
441 regs->gpr[GPR_RVAL] = -EINTR;
442 break;
443
444 case -ERESTARTSYS:
445 if (!(ka->sa.sa_flags & SA_RESTART)) {
446 regs->gpr[GPR_RVAL] = -EINTR;
447 break;
448 }
449 /* fallthrough */
450 case -ERESTARTNOINTR:
451 regs->gpr[12] = PT_REGS_SYSCALL (regs);
452 regs->pc -= 4; /* Size of `trap 0' insn. */
453 }
454
455 PT_REGS_SET_SYSCALL (regs, 0);
456 }
457
458 /* Set up the stack frame */
459 if (ka->sa.sa_flags & SA_SIGINFO)
460 setup_rt_frame(sig, ka, info, oldset, regs);
461 else
462 setup_frame(sig, ka, oldset, regs);
463
464 spin_lock_irq(&current->sighand->siglock);
465 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
466 if (!(ka->sa.sa_flags & SA_NODEFER))
467 sigaddset(&current->blocked,sig);
468 recalc_sigpending();
469 spin_unlock_irq(&current->sighand->siglock);
470}
471
472/*
473 * Note that 'init' is a special process: it doesn't get signals it doesn't
474 * want to handle. Thus you cannot kill init even with a SIGKILL even by
475 * mistake.
476 *
477 * Note that we go through the signals twice: once to check the signals that
478 * the kernel can handle, and then we build all the user-level signal handling
479 * stack-frames in one go after that.
480 */
481int do_signal(struct pt_regs *regs, sigset_t *oldset)
482{
483 siginfo_t info;
484 int signr;
485 struct k_sigaction ka;
486
487 /*
488 * We want the common case to go fast, which
489 * is why we may in certain cases get here from
490 * kernel mode. Just return without doing anything
491 * if so.
492 */
493 if (!user_mode(regs))
494 return 1;
495
496 if (!oldset)
497 oldset = &current->blocked;
498
499 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
500 if (signr > 0) {
501 /* Whee! Actually deliver the signal. */
502 handle_signal(signr, &info, &ka, oldset, regs);
503 return 1;
504 }
505
506 /* Did we come from a system call? */
507 if (PT_REGS_SYSCALL (regs)) {
508 int rval = (int)regs->gpr[GPR_RVAL];
509 /* Restart the system call - no handlers present */
510 if (rval == -ERESTARTNOHAND
511 || rval == -ERESTARTSYS
512 || rval == -ERESTARTNOINTR)
513 {
514 regs->gpr[12] = PT_REGS_SYSCALL (regs);
515 regs->pc -= 4; /* Size of `trap 0' insn. */
516 }
517 else if (rval == -ERESTART_RESTARTBLOCK) {
518 regs->gpr[12] = __NR_restart_syscall;
519 regs->pc -= 4; /* Size of `trap 0' insn. */
520 }
521 }
522 return 0;
523}
diff --git a/arch/v850/kernel/sim.c b/arch/v850/kernel/sim.c
deleted file mode 100644
index 467b4aa0acdd..000000000000
--- a/arch/v850/kernel/sim.c
+++ /dev/null
@@ -1,172 +0,0 @@
1/*
2 * arch/v850/kernel/sim.c -- Machine-specific stuff for GDB v850e simulator
3 *
4 * Copyright (C) 2001,02 NEC Corporation
5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/bootmem.h>
20#include <linux/irq.h>
21
22#include <asm/atomic.h>
23#include <asm/page.h>
24#include <asm/machdep.h>
25#include <asm/simsyscall.h>
26
27#include "mach.h"
28
29/* The name of a file containing the root filesystem. */
30#define ROOT_FS "rootfs.image"
31
32extern void simcons_setup (void);
33extern void simcons_poll_ttys (void);
34extern void set_mem_root (void *addr, size_t len, char *cmd_line);
35
36static int read_file (const char *name,
37 unsigned long *addr, unsigned long *len,
38 const char **err);
39
40void __init mach_setup (char **cmdline)
41{
42 const char *err;
43 unsigned long root_dev_addr, root_dev_len;
44
45 simcons_setup ();
46
47 printk (KERN_INFO "Reading root filesystem: %s", ROOT_FS);
48
49 if (read_file (ROOT_FS, &root_dev_addr, &root_dev_len, &err)) {
50 printk (" (size %luK)\n", root_dev_len / 1024);
51 set_mem_root ((void *)root_dev_addr, (size_t)root_dev_len,
52 *cmdline);
53 } else
54 printk ("...%s failed!\n", err);
55}
56
57void mach_get_physical_ram (unsigned long *ram_start, unsigned long *ram_len)
58{
59 *ram_start = RAM_ADDR;
60 *ram_len = RAM_SIZE;
61}
62
63void __init mach_sched_init (struct irqaction *timer_action)
64{
65 /* ...do magic timer initialization?... */
66 mach_tick = simcons_poll_ttys;
67 setup_irq (0, timer_action);
68}
69
70
71static void irq_nop (unsigned irq) { }
72static unsigned irq_zero (unsigned irq) { return 0; }
73
74static struct hw_interrupt_type sim_irq_type = {
75 .typename = "IRQ",
76 .startup = irq_zero, /* startup */
77 .shutdown = irq_nop, /* shutdown */
78 .enable = irq_nop, /* enable */
79 .disable = irq_nop, /* disable */
80 .ack = irq_nop, /* ack */
81 .end = irq_nop, /* end */
82};
83
84void __init mach_init_irqs (void)
85{
86 init_irq_handlers (0, NUM_MACH_IRQS, 1, &sim_irq_type);
87}
88
89
90void mach_gettimeofday (struct timespec *tv)
91{
92 long timeval[2], timezone[2];
93 int rval = V850_SIM_SYSCALL (gettimeofday, timeval, timezone);
94 if (rval == 0) {
95 tv->tv_sec = timeval[0];
96 tv->tv_nsec = timeval[1] * 1000;
97 }
98}
99
100void machine_restart (char *__unused)
101{
102 V850_SIM_SYSCALL (write, 1, "RESTART\n", 8);
103 V850_SIM_SYSCALL (exit, 0);
104}
105
106void machine_halt (void)
107{
108 V850_SIM_SYSCALL (write, 1, "HALT\n", 5);
109 V850_SIM_SYSCALL (exit, 0);
110}
111
112void machine_power_off (void)
113{
114 V850_SIM_SYSCALL (write, 1, "POWER OFF\n", 10);
115 V850_SIM_SYSCALL (exit, 0);
116}
117
118
119/* Load data from a file called NAME into ram. The address and length
120 of the data image are returned in ADDR and LEN. */
121static int __init
122read_file (const char *name,
123 unsigned long *addr, unsigned long *len,
124 const char **err)
125{
126 int rval, fd;
127 unsigned long cur, left;
128 /* Note this is not a normal stat buffer, it's an ad-hoc
129 structure defined by the simulator. */
130 unsigned long stat_buf[10];
131
132 /* Stat the file to find out the length. */
133 rval = V850_SIM_SYSCALL (stat, name, stat_buf);
134 if (rval < 0) {
135 if (err) *err = "stat";
136 return 0;
137 }
138 *len = stat_buf[4];
139
140 /* Open the file; `0' is O_RDONLY. */
141 fd = V850_SIM_SYSCALL (open, name, 0);
142 if (fd < 0) {
143 if (err) *err = "open";
144 return 0;
145 }
146
147 *addr = (unsigned long)alloc_bootmem(*len);
148 if (! *addr) {
149 V850_SIM_SYSCALL (close, fd);
150 if (err) *err = "alloc_bootmem";
151 return 0;
152 }
153
154 cur = *addr;
155 left = *len;
156 while (left > 0) {
157 int chunk = V850_SIM_SYSCALL (read, fd, cur, left);
158 if (chunk <= 0)
159 break;
160 cur += chunk;
161 left -= chunk;
162 }
163 V850_SIM_SYSCALL (close, fd);
164 if (left > 0) {
165 /* Some read failed. */
166 free_bootmem (*addr, *len);
167 if (err) *err = "read";
168 return 0;
169 }
170
171 return 1;
172}
diff --git a/arch/v850/kernel/sim.ld b/arch/v850/kernel/sim.ld
deleted file mode 100644
index 101885f3c9f0..000000000000
--- a/arch/v850/kernel/sim.ld
+++ /dev/null
@@ -1,13 +0,0 @@
1/* Linker script for the gdb v850e simulator (CONFIG_V850E_SIM). */
2
3MEMORY {
4 /* Interrupt vectors. */
5 INTV : ORIGIN = 0x0, LENGTH = 0xe0
6 /* Main RAM. */
7 RAM : ORIGIN = RAM_ADDR, LENGTH = RAM_SIZE
8}
9
10SECTIONS {
11 .intv : { INTV_CONTENTS } > INTV
12 .ram : { RAMK_KRAM_CONTENTS } > RAM
13}
diff --git a/arch/v850/kernel/sim85e2.c b/arch/v850/kernel/sim85e2.c
deleted file mode 100644
index 566dde5e6070..000000000000
--- a/arch/v850/kernel/sim85e2.c
+++ /dev/null
@@ -1,195 +0,0 @@
1/*
2 * arch/v850/kernel/sim85e2.c -- Machine-specific stuff for
3 * V850E2 RTL simulator
4 *
5 * Copyright (C) 2002,03 NEC Electronics Corporation
6 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/bootmem.h>
21#include <linux/irq.h>
22
23#include <asm/atomic.h>
24#include <asm/page.h>
25#include <asm/machdep.h>
26
27#include "mach.h"
28
29
30/* There are 4 possible areas we can use:
31
32 IRAM (1MB) is fast for instruction fetches, but slow for data
33 DRAM (1020KB) is fast for data, but slow for instructions
34 ERAM is cached, so should be fast for both insns and data
35 SDRAM is external DRAM, similar to ERAM
36*/
37
38#define INIT_MEMC_FOR_SDRAM
39#define USE_SDRAM_AREA
40#define KERNEL_IN_SDRAM_AREA
41
42#define DCACHE_MODE V850E2_CACHE_BTSC_DCM_WT
43/*#define DCACHE_MODE V850E2_CACHE_BTSC_DCM_WB_ALLOC*/
44
45#ifdef USE_SDRAM_AREA
46#define RAM_START SDRAM_ADDR
47#define RAM_END (SDRAM_ADDR + SDRAM_SIZE)
48#else
49/* When we use DRAM, we need to account for the fact that the end of it is
50 used for R0_RAM. */
51#define RAM_START DRAM_ADDR
52#define RAM_END R0_RAM_ADDR
53#endif
54
55
56extern void memcons_setup (void);
57
58
59#ifdef KERNEL_IN_SDRAM_AREA
60#define EARLY_INIT_SECTION_ATTR __attribute__ ((section (".early.text")))
61#else
62#define EARLY_INIT_SECTION_ATTR __init
63#endif
64
65void EARLY_INIT_SECTION_ATTR mach_early_init (void)
66{
67 /* The sim85e2 simulator tracks `undefined' values, so to make
68 debugging easier, we begin by zeroing out all otherwise
69 undefined registers. This is not strictly necessary.
70
71 The registers we zero are:
72 Every GPR except:
73 stack-pointer (r3)
74 task-pointer (r16)
75 our return addr (r31)
76 Every system register (SPR) that we know about except for
77 the PSW (SPR 5), which we zero except for the
78 disable-interrupts bit.
79 */
80
81 /* GPRs */
82 asm volatile (" mov r0, r1 ; mov r0, r2 ");
83 asm volatile ("mov r0, r4 ; mov r0, r5 ; mov r0, r6 ; mov r0, r7 ");
84 asm volatile ("mov r0, r8 ; mov r0, r9 ; mov r0, r10; mov r0, r11");
85 asm volatile ("mov r0, r12; mov r0, r13; mov r0, r14; mov r0, r15");
86 asm volatile (" mov r0, r17; mov r0, r18; mov r0, r19");
87 asm volatile ("mov r0, r20; mov r0, r21; mov r0, r22; mov r0, r23");
88 asm volatile ("mov r0, r24; mov r0, r25; mov r0, r26; mov r0, r27");
89 asm volatile ("mov r0, r28; mov r0, r29; mov r0, r30");
90
91 /* SPRs */
92 asm volatile ("ldsr r0, 0; ldsr r0, 1; ldsr r0, 2; ldsr r0, 3");
93 asm volatile ("ldsr r0, 4");
94 asm volatile ("addi 0x20, r0, r1; ldsr r1, 5"); /* PSW */
95 asm volatile ("ldsr r0, 16; ldsr r0, 17; ldsr r0, 18; ldsr r0, 19");
96 asm volatile ("ldsr r0, 20");
97
98
99#ifdef INIT_MEMC_FOR_SDRAM
100 /* Settings for SDRAM controller. */
101 V850E2_VSWC = 0x0042;
102 V850E2_BSC = 0x9286;
103 V850E2_BCT(0) = 0xb000; /* was: 0 */
104 V850E2_BCT(1) = 0x000b;
105 V850E2_ASC = 0;
106 V850E2_LBS = 0xa9aa; /* was: 0xaaaa */
107 V850E2_LBC(0) = 0;
108 V850E2_LBC(1) = 0; /* was: 0x3 */
109 V850E2_BCC = 0;
110 V850E2_RFS(4) = 0x800a; /* was: 0xf109 */
111 V850E2_SCR(4) = 0x2091; /* was: 0x20a1 */
112 V850E2_RFS(3) = 0x800c;
113 V850E2_SCR(3) = 0x20a1;
114 V850E2_DWC(0) = 0;
115 V850E2_DWC(1) = 0;
116#endif
117
118#if 0
119#ifdef CONFIG_V850E2_SIM85E2S
120 /* Turn on the caches. */
121 V850E2_CACHE_BTSC = V850E2_CACHE_BTSC_ICM | DCACHE_MODE;
122 V850E2_BHC = 0x1010;
123#elif CONFIG_V850E2_SIM85E2C
124 V850E2_CACHE_BTSC |= (V850E2_CACHE_BTSC_ICM | V850E2_CACHE_BTSC_DCM0);
125 V850E2_BUSM_BHC = 0xFFFF;
126#endif
127#else
128 V850E2_BHC = 0;
129#endif
130
131 /* Don't stop the simulator at `halt' instructions. */
132 SIM85E2_NOTHAL = 1;
133
134 /* Ensure that the simulator halts on a panic, instead of going
135 into an infinite loop inside the panic function. */
136 panic_timeout = -1;
137}
138
139void __init mach_setup (char **cmdline)
140{
141 memcons_setup ();
142}
143
144void mach_get_physical_ram (unsigned long *ram_start, unsigned long *ram_len)
145{
146 *ram_start = RAM_START;
147 *ram_len = RAM_END - RAM_START;
148}
149
150void __init mach_sched_init (struct irqaction *timer_action)
151{
152 /* The simulator actually cycles through all interrupts
153 periodically. We just pay attention to IRQ0, which gives us
154 1/64 the rate of the periodic interrupts. */
155 setup_irq (0, timer_action);
156}
157
158void mach_gettimeofday (struct timespec *tv)
159{
160 tv->tv_sec = 0;
161 tv->tv_nsec = 0;
162}
163
164/* Interrupts */
165
166struct v850e_intc_irq_init irq_inits[] = {
167 { "IRQ", 0, NUM_MACH_IRQS, 1, 7 },
168 { 0 }
169};
170struct hw_interrupt_type hw_itypes[1];
171
172/* Initialize interrupts. */
173void __init mach_init_irqs (void)
174{
175 v850e_intc_init_irq_types (irq_inits, hw_itypes);
176}
177
178
179void machine_halt (void) __attribute__ ((noreturn));
180void machine_halt (void)
181{
182 SIM85E2_SIMFIN = 0; /* Halt immediately. */
183 for (;;) {}
184}
185
186void machine_restart (char *__unused)
187{
188 machine_halt ();
189}
190
191void machine_power_off (void)
192{
193 machine_halt ();
194}
195
diff --git a/arch/v850/kernel/sim85e2.ld b/arch/v850/kernel/sim85e2.ld
deleted file mode 100644
index 7470fd2ffb5b..000000000000
--- a/arch/v850/kernel/sim85e2.ld
+++ /dev/null
@@ -1,36 +0,0 @@
1/* Linker script for the sim85e2c simulator, which is a verilog simulation of
2 the V850E2 NA85E2C cpu core (CONFIG_V850E2_SIM85E2C). */
3
4MEMORY {
5 /* 1MB of `instruction RAM', starting at 0.
6 Instruction fetches are much faster from IRAM than from DRAM. */
7 IRAM : ORIGIN = IRAM_ADDR, LENGTH = IRAM_SIZE
8
9 /* 1MB of `data RAM', below and contiguous with the I/O space.
10 Data fetches are much faster from DRAM than from IRAM. */
11 DRAM : ORIGIN = DRAM_ADDR, LENGTH = DRAM_SIZE
12
13 /* `external ram' (CS1 area), comes after IRAM. */
14 ERAM : ORIGIN = ERAM_ADDR, LENGTH = ERAM_SIZE
15
16 /* Dynamic RAM; uses memory controller. */
17 SDRAM : ORIGIN = SDRAM_ADDR, LENGTH = SDRAM_SIZE
18}
19
20SECTIONS {
21 .iram : {
22 INTV_CONTENTS
23 *arch/v850/kernel/head.o
24 *(.early.text)
25 } > IRAM
26 .dram : {
27 _memcons_output = . ;
28 . = . + 0x8000 ;
29 _memcons_output_end = . ;
30 } > DRAM
31 .sdram : {
32 /* We stick console output into a buffer here. */
33 RAMK_KRAM_CONTENTS
34 ROOT_FS_CONTENTS
35 } > SDRAM
36}
diff --git a/arch/v850/kernel/simcons.c b/arch/v850/kernel/simcons.c
deleted file mode 100644
index 9973596ae304..000000000000
--- a/arch/v850/kernel/simcons.c
+++ /dev/null
@@ -1,161 +0,0 @@
1/*
2 * arch/v850/kernel/simcons.c -- Console I/O for GDB v850e simulator
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/kernel.h>
15#include <linux/console.h>
16#include <linux/tty.h>
17#include <linux/tty_flip.h>
18#include <linux/tty_driver.h>
19#include <linux/init.h>
20
21#include <asm/poll.h>
22#include <asm/string.h>
23#include <asm/simsyscall.h>
24
25
26/* Low-level console. */
27
28static void simcons_write (struct console *co, const char *buf, unsigned len)
29{
30 V850_SIM_SYSCALL (write, 1, buf, len);
31}
32
33static int simcons_read (struct console *co, char *buf, unsigned len)
34{
35 return V850_SIM_SYSCALL (read, 0, buf, len);
36}
37
38static struct tty_driver *tty_driver;
39static struct tty_driver *simcons_device (struct console *c, int *index)
40{
41 *index = c->index;
42 return tty_driver;
43}
44
45static struct console simcons =
46{
47 .name = "simcons",
48 .write = simcons_write,
49 .read = simcons_read,
50 .device = simcons_device,
51 .flags = CON_PRINTBUFFER,
52 .index = -1,
53};
54
55/* Higher level TTY interface. */
56
57int simcons_tty_open (struct tty_struct *tty, struct file *filp)
58{
59 return 0;
60}
61
62int simcons_tty_write (struct tty_struct *tty,
63 const unsigned char *buf, int count)
64{
65 return V850_SIM_SYSCALL (write, 1, buf, count);
66}
67
68int simcons_tty_write_room (struct tty_struct *tty)
69{
70 /* Completely arbitrary. */
71 return 0x100000;
72}
73
74int simcons_tty_chars_in_buffer (struct tty_struct *tty)
75{
76 /* We have no buffer. */
77 return 0;
78}
79
80static const struct tty_operations ops = {
81 .open = simcons_tty_open,
82 .write = simcons_tty_write,
83 .write_room = simcons_tty_write_room,
84 .chars_in_buffer = simcons_tty_chars_in_buffer,
85};
86
87int __init simcons_tty_init (void)
88{
89 struct tty_driver *driver = alloc_tty_driver(1);
90 int err;
91 if (!driver)
92 return -ENOMEM;
93 driver->name = "simcons";
94 driver->major = TTY_MAJOR;
95 driver->minor_start = 64;
96 driver->type = TTY_DRIVER_TYPE_SYSCONS;
97 driver->init_termios = tty_std_termios;
98 tty_set_operations(driver, &ops);
99 err = tty_register_driver(driver);
100 if (err) {
101 put_tty_driver(driver);
102 return err;
103 }
104 tty_driver = driver;
105 return 0;
106}
107/* We use `late_initcall' instead of just `__initcall' as a workaround for
108 the fact that (1) simcons_tty_init can't be called before tty_init,
109 (2) tty_init is called via `module_init', (3) if statically linked,
110 module_init == device_init, and (4) there's no ordering of init lists.
111 We can do this easily because simcons is always statically linked, but
112 other tty drivers that depend on tty_init and which must use
113 `module_init' to declare their init routines are likely to be broken. */
114late_initcall(simcons_tty_init);
115
116/* Poll for input on the console, and if there's any, deliver it to the
117 tty driver. */
118void simcons_poll_tty (struct tty_struct *tty)
119{
120 char buf[32]; /* Not the nicest way to do it but I need it correct first */
121 int flip = 0, send_break = 0;
122 struct pollfd pfd;
123 pfd.fd = 0;
124 pfd.events = POLLIN;
125
126 if (V850_SIM_SYSCALL (poll, &pfd, 1, 0) > 0) {
127 if (pfd.revents & POLLIN) {
128 /* Real block hardware knows the transfer size before
129 transfer so the new tty buffering doesn't try to handle
130 this rather weird simulator specific case well */
131 int rd = V850_SIM_SYSCALL (read, 0, buf, 32);
132 if (rd > 0) {
133 tty_insert_flip_string(tty, buf, rd);
134 flip = 1;
135 } else
136 send_break = 1;
137 } else if (pfd.revents & POLLERR)
138 send_break = 1;
139 }
140
141 if (send_break) {
142 tty_insert_flip_char (tty, 0, TTY_BREAK);
143 flip = 1;
144 }
145
146 if (flip)
147 tty_schedule_flip (tty);
148}
149
150void simcons_poll_ttys (void)
151{
152 if (tty_driver && tty_driver->ttys[0])
153 simcons_poll_tty (tty_driver->ttys[0]);
154}
155
156void simcons_setup (void)
157{
158 V850_SIM_SYSCALL (make_raw, 0);
159 register_console (&simcons);
160 printk (KERN_INFO "Console: GDB V850E simulator stdio\n");
161}
diff --git a/arch/v850/kernel/syscalls.c b/arch/v850/kernel/syscalls.c
deleted file mode 100644
index 1a83daf8e24f..000000000000
--- a/arch/v850/kernel/syscalls.c
+++ /dev/null
@@ -1,196 +0,0 @@
1/*
2 * arch/v850/kernel/syscalls.c -- Various system-call definitions not
3 * defined in machine-independent code
4 *
5 * Copyright (C) 2001,02 NEC Corporation
6 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * This file was derived the ppc version, arch/ppc/kernel/syscalls.c
13 * ... which was derived from "arch/i386/kernel/sys_i386.c" by Gary Thomas;
14 * modified by Cort Dougan (cort@cs.nmt.edu)
15 * and Paul Mackerras (paulus@cs.anu.edu.au).
16 */
17
18#include <linux/errno.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/syscalls.h>
22#include <linux/sem.h>
23#include <linux/msg.h>
24#include <linux/shm.h>
25#include <linux/stat.h>
26#include <linux/mman.h>
27#include <linux/sys.h>
28#include <linux/ipc.h>
29#include <linux/utsname.h>
30#include <linux/file.h>
31
32#include <asm/uaccess.h>
33#include <asm/unistd.h>
34
35/*
36 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
37 *
38 * This is really horribly ugly.
39 */
40int
41sys_ipc (uint call, int first, int second, int third, void *ptr, long fifth)
42{
43 int version, ret;
44
45 version = call >> 16; /* hack for backward compatibility */
46 call &= 0xffff;
47
48 ret = -EINVAL;
49 switch (call) {
50 case SEMOP:
51 ret = sys_semop (first, (struct sembuf *)ptr, second);
52 break;
53 case SEMGET:
54 ret = sys_semget (first, second, third);
55 break;
56 case SEMCTL:
57 {
58 union semun fourth;
59
60 if (!ptr)
61 break;
62 if ((ret = access_ok(VERIFY_READ, ptr, sizeof(long)) ? 0 : -EFAULT)
63 || (ret = get_user(fourth.__pad, (void **)ptr)))
64 break;
65 ret = sys_semctl (first, second, third, fourth);
66 break;
67 }
68 case MSGSND:
69 ret = sys_msgsnd (first, (struct msgbuf *) ptr, second, third);
70 break;
71 case MSGRCV:
72 switch (version) {
73 case 0: {
74 struct ipc_kludge tmp;
75
76 if (!ptr)
77 break;
78 if ((ret = access_ok(VERIFY_READ, ptr, sizeof(tmp)) ? 0 : -EFAULT)
79 || (ret = copy_from_user(&tmp,
80 (struct ipc_kludge *) ptr,
81 sizeof (tmp))))
82 break;
83 ret = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp,
84 third);
85 break;
86 }
87 default:
88 ret = sys_msgrcv (first, (struct msgbuf *) ptr,
89 second, fifth, third);
90 break;
91 }
92 break;
93 case MSGGET:
94 ret = sys_msgget ((key_t) first, second);
95 break;
96 case MSGCTL:
97 ret = sys_msgctl (first, second, (struct msqid_ds *) ptr);
98 break;
99 case SHMAT:
100 switch (version) {
101 default: {
102 ulong raddr;
103
104 if ((ret = access_ok(VERIFY_WRITE, (ulong*) third,
105 sizeof(ulong)) ? 0 : -EFAULT))
106 break;
107 ret = do_shmat (first, (char *) ptr, second, &raddr);
108 if (ret)
109 break;
110 ret = put_user (raddr, (ulong *) third);
111 break;
112 }
113 case 1: /* iBCS2 emulator entry point */
114 if (!segment_eq(get_fs(), get_ds()))
115 break;
116 ret = do_shmat (first, (char *) ptr, second,
117 (ulong *) third);
118 break;
119 }
120 break;
121 case SHMDT:
122 ret = sys_shmdt ((char *)ptr);
123 break;
124 case SHMGET:
125 ret = sys_shmget (first, second, third);
126 break;
127 case SHMCTL:
128 ret = sys_shmctl (first, second, (struct shmid_ds *) ptr);
129 break;
130 }
131
132 return ret;
133}
134
135static inline unsigned long
136do_mmap2 (unsigned long addr, size_t len,
137 unsigned long prot, unsigned long flags,
138 unsigned long fd, unsigned long pgoff)
139{
140 struct file * file = NULL;
141 int ret = -EBADF;
142
143 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
144 if (! (flags & MAP_ANONYMOUS)) {
145 if (!(file = fget (fd)))
146 goto out;
147 }
148
149 down_write (&current->mm->mmap_sem);
150 ret = do_mmap_pgoff (file, addr, len, prot, flags, pgoff);
151 up_write (&current->mm->mmap_sem);
152 if (file)
153 fput (file);
154out:
155 return ret;
156}
157
158unsigned long sys_mmap2 (unsigned long addr, size_t len,
159 unsigned long prot, unsigned long flags,
160 unsigned long fd, unsigned long pgoff)
161{
162 return do_mmap2 (addr, len, prot, flags, fd, pgoff);
163}
164
165unsigned long sys_mmap (unsigned long addr, size_t len,
166 unsigned long prot, unsigned long flags,
167 unsigned long fd, off_t offset)
168{
169 int err = -EINVAL;
170
171 if (offset & ~PAGE_MASK)
172 goto out;
173
174 err = do_mmap2 (addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
175out:
176 return err;
177}
178
179/*
180 * Do a system call from kernel instead of calling sys_execve so we
181 * end up with proper pt_regs.
182 */
183int kernel_execve(const char *filename, char *const argv[], char *const envp[])
184{
185 register char *__a __asm__ ("r6") = filename;
186 register void *__b __asm__ ("r7") = argv;
187 register void *__c __asm__ ("r8") = envp;
188 register unsigned long __syscall __asm__ ("r12") = __NR_execve;
189 register unsigned long __ret __asm__ ("r10");
190 __asm__ __volatile__ ("trap 0"
191 : "=r" (__ret), "=r" (__syscall)
192 : "1" (__syscall), "r" (__a), "r" (__b), "r" (__c)
193 : "r1", "r5", "r11", "r13", "r14",
194 "r15", "r16", "r17", "r18", "r19");
195 return __ret;
196}
diff --git a/arch/v850/kernel/teg.c b/arch/v850/kernel/teg.c
deleted file mode 100644
index 699248f92aae..000000000000
--- a/arch/v850/kernel/teg.c
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * arch/v850/kernel/teg.c -- NB85E-TEG cpu chip
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/swap.h>
18#include <linux/bootmem.h>
19#include <linux/irq.h>
20
21#include <asm/atomic.h>
22#include <asm/page.h>
23#include <asm/machdep.h>
24#include <asm/v850e_timer_d.h>
25
26#include "mach.h"
27
28void __init mach_sched_init (struct irqaction *timer_action)
29{
30 /* Select timer interrupt instead of external pin. */
31 TEG_ISS |= 0x1;
32 /* Start hardware timer. */
33 v850e_timer_d_configure (0, HZ);
34 /* Install timer interrupt handler. */
35 setup_irq (IRQ_INTCMD(0), timer_action);
36}
37
38static struct v850e_intc_irq_init irq_inits[] = {
39 { "IRQ", 0, NUM_CPU_IRQS, 1, 7 },
40 { "CMD", IRQ_INTCMD(0), IRQ_INTCMD_NUM, 1, 5 },
41 { "SER", IRQ_INTSER(0), IRQ_INTSER_NUM, 1, 3 },
42 { "SR", IRQ_INTSR(0), IRQ_INTSR_NUM, 1, 4 },
43 { "ST", IRQ_INTST(0), IRQ_INTST_NUM, 1, 5 },
44 { 0 }
45};
46#define NUM_IRQ_INITS (ARRAY_SIZE(irq_inits) - 1)
47
48static struct hw_interrupt_type hw_itypes[NUM_IRQ_INITS];
49
50/* Initialize MA chip interrupts. */
51void __init teg_init_irqs (void)
52{
53 v850e_intc_init_irq_types (irq_inits, hw_itypes);
54}
55
56/* Called before configuring an on-chip UART. */
57void teg_uart_pre_configure (unsigned chan, unsigned cflags, unsigned baud)
58{
59 /* Enable UART I/O pins instead of external interrupt pins, and
60 UART interrupts instead of external pin interrupts. */
61 TEG_ISS |= 0x4E;
62}
diff --git a/arch/v850/kernel/time.c b/arch/v850/kernel/time.c
deleted file mode 100644
index d810c93fe665..000000000000
--- a/arch/v850/kernel/time.c
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * linux/arch/v850/kernel/time.c -- Arch-dependent timer functions
3 *
4 * Copyright (C) 1991, 1992, 1995, 2001, 2002 Linus Torvalds
5 *
6 * This file contains the v850-specific time handling details.
7 * Most of the stuff is located in the machine specific files.
8 *
9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
10 * "A Kernel Model for Precision Timekeeping" by Dave Mills
11 */
12
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/param.h>
17#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/interrupt.h>
20#include <linux/time.h>
21#include <linux/timex.h>
22#include <linux/profile.h>
23
24#include <asm/io.h>
25
26#include "mach.h"
27
28#define TICK_SIZE (tick_nsec / 1000)
29
30/*
31 * timer_interrupt() needs to keep up the real-time clock,
32 * as well as call the "do_timer()" routine every clocktick
33 */
34static irqreturn_t timer_interrupt (int irq, void *dummy, struct pt_regs *regs)
35{
36#if 0
37 /* last time the cmos clock got updated */
38 static long last_rtc_update=0;
39#endif
40
41 /* may need to kick the hardware timer */
42 if (mach_tick)
43 mach_tick ();
44
45 do_timer (1);
46#ifndef CONFIG_SMP
47 update_process_times(user_mode(regs));
48#endif
49 profile_tick(CPU_PROFILING, regs);
50#if 0
51 /*
52 * If we have an externally synchronized Linux clock, then update
53 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
54 * called as close as possible to 500 ms before the new second starts.
55 */
56 if (ntp_synced() &&
57 xtime.tv_sec > last_rtc_update + 660 &&
58 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
59 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
60 if (set_rtc_mmss (xtime.tv_sec) == 0)
61 last_rtc_update = xtime.tv_sec;
62 else
63 last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
64 }
65#ifdef CONFIG_HEARTBEAT
66 /* use power LED as a heartbeat instead -- much more useful
67 for debugging -- based on the version for PReP by Cort */
68 /* acts like an actual heart beat -- ie thump-thump-pause... */
69 if (mach_heartbeat) {
70 static unsigned cnt = 0, period = 0, dist = 0;
71
72 if (cnt == 0 || cnt == dist)
73 mach_heartbeat ( 1 );
74 else if (cnt == 7 || cnt == dist+7)
75 mach_heartbeat ( 0 );
76
77 if (++cnt > period) {
78 cnt = 0;
79 /* The hyperbolic function below modifies the heartbeat period
80 * length in dependency of the current (5min) load. It goes
81 * through the points f(0)=126, f(1)=86, f(5)=51,
82 * f(inf)->30. */
83 period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30;
84 dist = period / 4;
85 }
86 }
87#endif /* CONFIG_HEARTBEAT */
88#endif /* 0 */
89
90 return IRQ_HANDLED;
91}
92
93static int timer_dev_id;
94static struct irqaction timer_irqaction = {
95 .handler = timer_interrupt,
96 .flags = IRQF_DISABLED,
97 .mask = CPU_MASK_NONE,
98 .name = "timer",
99 .dev_id = &timer_dev_id,
100};
101
102void time_init (void)
103{
104 mach_gettimeofday (&xtime);
105 mach_sched_init (&timer_irqaction);
106}
diff --git a/arch/v850/kernel/v850_ksyms.c b/arch/v850/kernel/v850_ksyms.c
deleted file mode 100644
index 8d386a5dbc4a..000000000000
--- a/arch/v850/kernel/v850_ksyms.c
+++ /dev/null
@@ -1,51 +0,0 @@
1#include <linux/module.h>
2#include <linux/linkage.h>
3#include <linux/sched.h>
4#include <linux/string.h>
5#include <linux/mm.h>
6#include <linux/user.h>
7#include <linux/elfcore.h>
8#include <linux/in6.h>
9#include <linux/interrupt.h>
10
11#include <asm/pgalloc.h>
12#include <asm/irq.h>
13#include <asm/io.h>
14#include <asm/checksum.h>
15#include <asm/current.h>
16
17
18extern void *trap_table;
19EXPORT_SYMBOL (trap_table);
20
21/* platform dependent support */
22EXPORT_SYMBOL (kernel_thread);
23EXPORT_SYMBOL (__bug);
24
25/* Networking helper routines. */
26EXPORT_SYMBOL (csum_partial_copy_nocheck);
27EXPORT_SYMBOL (csum_partial_copy_from_user);
28EXPORT_SYMBOL (ip_compute_csum);
29EXPORT_SYMBOL (ip_fast_csum);
30
31/* string / mem functions */
32EXPORT_SYMBOL (memset);
33EXPORT_SYMBOL (memcpy);
34EXPORT_SYMBOL (memmove);
35
36/*
37 * libgcc functions - functions that are used internally by the
38 * compiler... (prototypes are not correct though, but that
39 * doesn't really matter since they're not versioned).
40 */
41extern void __ashldi3 (void);
42extern void __ashrdi3 (void);
43extern void __lshrdi3 (void);
44extern void __muldi3 (void);
45extern void __negdi2 (void);
46
47EXPORT_SYMBOL (__ashldi3);
48EXPORT_SYMBOL (__ashrdi3);
49EXPORT_SYMBOL (__lshrdi3);
50EXPORT_SYMBOL (__muldi3);
51EXPORT_SYMBOL (__negdi2);
diff --git a/arch/v850/kernel/v850e2_cache.c b/arch/v850/kernel/v850e2_cache.c
deleted file mode 100644
index 4570312c689c..000000000000
--- a/arch/v850/kernel/v850e2_cache.c
+++ /dev/null
@@ -1,127 +0,0 @@
1/*
2 * arch/v850/kernel/v850e2_cache.c -- Cache control for V850E2 cache
3 * memories
4 *
5 * Copyright (C) 2003 NEC Electronics Corporation
6 * Copyright (C) 2003 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 */
14
15#include <linux/mm.h>
16
17#include <asm/v850e2_cache.h>
18
19/* Cache operations we can do. The encoding corresponds directly to the
20 value we need to write into the COPR register. */
21enum cache_op {
22 OP_SYNC_IF_DIRTY = V850E2_CACHE_COPR_CFC(0), /* 000 */
23 OP_SYNC_IF_VALID = V850E2_CACHE_COPR_CFC(1), /* 001 */
24 OP_SYNC_IF_VALID_AND_CLEAR = V850E2_CACHE_COPR_CFC(3), /* 011 */
25 OP_WAY_CLEAR = V850E2_CACHE_COPR_CFC(4), /* 100 */
26 OP_FILL = V850E2_CACHE_COPR_CFC(5), /* 101 */
27 OP_CLEAR = V850E2_CACHE_COPR_CFC(6), /* 110 */
28 OP_CREATE_DIRTY = V850E2_CACHE_COPR_CFC(7) /* 111 */
29};
30
31/* Which cache to use. This encoding also corresponds directly to the
32 value we need to write into the COPR register. */
33enum cache {
34 ICACHE = 0,
35 DCACHE = V850E2_CACHE_COPR_LBSL
36};
37
38/* Returns ADDR rounded down to the beginning of its cache-line. */
39#define CACHE_LINE_ADDR(addr) \
40 ((addr) & ~(V850E2_CACHE_LINE_SIZE - 1))
41/* Returns END_ADDR rounded up to the `limit' of its cache-line. */
42#define CACHE_LINE_END_ADDR(end_addr) \
43 CACHE_LINE_ADDR(end_addr + (V850E2_CACHE_LINE_SIZE - 1))
44
45
46/* Low-level cache ops. */
47
48/* Apply cache-op OP to all entries in CACHE. */
49static inline void cache_op_all (enum cache_op op, enum cache cache)
50{
51 int cmd = op | cache | V850E2_CACHE_COPR_WSLE | V850E2_CACHE_COPR_STRT;
52
53 if (op != OP_WAY_CLEAR) {
54 /* The WAY_CLEAR operation does the whole way, but other
55 ops take begin-index and count params; we just indicate
56 the entire cache. */
57 V850E2_CACHE_CADL = 0;
58 V850E2_CACHE_CADH = 0;
59 V850E2_CACHE_CCNT = V850E2_CACHE_WAY_SIZE - 1;
60 }
61
62 V850E2_CACHE_COPR = cmd | V850E2_CACHE_COPR_WSL(0); /* way 0 */
63 V850E2_CACHE_COPR = cmd | V850E2_CACHE_COPR_WSL(1); /* way 1 */
64 V850E2_CACHE_COPR = cmd | V850E2_CACHE_COPR_WSL(2); /* way 2 */
65 V850E2_CACHE_COPR = cmd | V850E2_CACHE_COPR_WSL(3); /* way 3 */
66}
67
68/* Apply cache-op OP to all entries in CACHE covering addresses ADDR
69 through ADDR+LEN. */
70static inline void cache_op_range (enum cache_op op, u32 addr, u32 len,
71 enum cache cache)
72{
73 u32 start = CACHE_LINE_ADDR (addr);
74 u32 end = CACHE_LINE_END_ADDR (addr + len);
75 u32 num_lines = (end - start) >> V850E2_CACHE_LINE_SIZE_BITS;
76
77 V850E2_CACHE_CADL = start & 0xFFFF;
78 V850E2_CACHE_CADH = start >> 16;
79 V850E2_CACHE_CCNT = num_lines - 1;
80
81 V850E2_CACHE_COPR = op | cache | V850E2_CACHE_COPR_STRT;
82}
83
84
85/* High-level ops. */
86
87static void cache_exec_after_store_all (void)
88{
89 cache_op_all (OP_SYNC_IF_DIRTY, DCACHE);
90 cache_op_all (OP_WAY_CLEAR, ICACHE);
91}
92
93static void cache_exec_after_store_range (u32 start, u32 len)
94{
95 cache_op_range (OP_SYNC_IF_DIRTY, start, len, DCACHE);
96 cache_op_range (OP_CLEAR, start, len, ICACHE);
97}
98
99
100/* Exported functions. */
101
102void flush_icache (void)
103{
104 cache_exec_after_store_all ();
105}
106
107void flush_icache_range (unsigned long start, unsigned long end)
108{
109 cache_exec_after_store_range (start, end - start);
110}
111
112void flush_icache_page (struct vm_area_struct *vma, struct page *page)
113{
114 cache_exec_after_store_range (page_to_virt (page), PAGE_SIZE);
115}
116
117void flush_icache_user_range (struct vm_area_struct *vma, struct page *page,
118 unsigned long addr, int len)
119{
120 cache_exec_after_store_range (addr, len);
121}
122
123void flush_cache_sigtramp (unsigned long addr)
124{
125 /* For the exact size, see signal.c, but 16 bytes should be enough. */
126 cache_exec_after_store_range (addr, 16);
127}
diff --git a/arch/v850/kernel/v850e_cache.c b/arch/v850/kernel/v850e_cache.c
deleted file mode 100644
index ea3e51cfb259..000000000000
--- a/arch/v850/kernel/v850e_cache.c
+++ /dev/null
@@ -1,174 +0,0 @@
1/*
2 * arch/v850/kernel/v850e_cache.c -- Cache control for V850E cache memories
3 *
4 * Copyright (C) 2003 NEC Electronics Corporation
5 * Copyright (C) 2003 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14/* This file implements cache control for the rather simple cache used on
15 some V850E CPUs, specifically the NB85E/TEG CPU-core and the V850E/ME2
16 CPU. V850E2 processors have their own (better) cache
17 implementation. */
18
19#include <asm/entry.h>
20#include <asm/cacheflush.h>
21#include <asm/v850e_cache.h>
22
23#define WAIT_UNTIL_CLEAR(value) while (value) {}
24
25/* Set caching params via the BHC and DCC registers. */
26void v850e_cache_enable (u16 bhc, u16 icc, u16 dcc)
27{
28 unsigned long *r0_ram = (unsigned long *)R0_RAM_ADDR;
29 register u16 bhc_val asm ("r6") = bhc;
30
31 /* Read the instruction cache control register (ICC) and confirm
32 that bits 0 and 1 (TCLR0, TCLR1) are all cleared. */
33 WAIT_UNTIL_CLEAR (V850E_CACHE_ICC & 0x3);
34 V850E_CACHE_ICC = icc;
35
36#ifdef V850E_CACHE_DCC
37 /* Configure data-cache. */
38 V850E_CACHE_DCC = dcc;
39#endif /* V850E_CACHE_DCC */
40
41 /* Configure caching for various memory regions by writing the BHC
42 register. The documentation says that an instruction _cannot_
43 enable/disable caching for the memory region in which the
44 instruction itself exists; to work around this, we store
45 appropriate instructions into the on-chip RAM area (which is never
46 cached), and briefly jump there to do the work. */
47#ifdef V850E_CACHE_WRITE_IBS
48 *r0_ram++ = 0xf0720760; /* st.h r0, 0xfffff072[r0] */
49#endif
50 *r0_ram++ = 0xf06a3760; /* st.h r6, 0xfffff06a[r0] */
51 *r0_ram = 0x5640006b; /* jmp [r11] */
52
53 asm ("mov hilo(1f), r11; jmp [%1]; 1:;"
54 :: "r" (bhc_val), "r" (R0_RAM_ADDR) : "r11");
55}
56
57static void clear_icache (void)
58{
59 /* 1. Read the instruction cache control register (ICC) and confirm
60 that bits 0 and 1 (TCLR0, TCLR1) are all cleared. */
61 WAIT_UNTIL_CLEAR (V850E_CACHE_ICC & 0x3);
62
63 /* 2. Read the ICC register and confirm that bit 12 (LOCK0) is
64 cleared. Bit 13 of the ICC register is always cleared. */
65 WAIT_UNTIL_CLEAR (V850E_CACHE_ICC & 0x1000);
66
67 /* 3. Set the TCLR0 and TCLR1 bits of the ICC register as follows,
68 when clearing way 0 and way 1 at the same time:
69 (a) Set the TCLR0 and TCLR1 bits.
70 (b) Read the TCLR0 and TCLR1 bits to confirm that these bits
71 are cleared.
72 (c) Perform (a) and (b) above again. */
73 V850E_CACHE_ICC |= 0x3;
74 WAIT_UNTIL_CLEAR (V850E_CACHE_ICC & 0x3);
75
76#ifdef V850E_CACHE_REPEAT_ICC_WRITE
77 /* Do it again. */
78 V850E_CACHE_ICC |= 0x3;
79 WAIT_UNTIL_CLEAR (V850E_CACHE_ICC & 0x3);
80#endif
81}
82
83#ifdef V850E_CACHE_DCC
84/* Flush or clear (or both) the data cache, depending on the value of FLAGS;
85 the procedure is the same for both, just the control bits used differ (and
86 both may be performed simultaneously). */
87static void dcache_op (unsigned short flags)
88{
89 /* 1. Read the data cache control register (DCC) and confirm that bits
90 0, 1, 4, and 5 (DC00, DC01, DC04, DC05) are all cleared. */
91 WAIT_UNTIL_CLEAR (V850E_CACHE_DCC & 0x33);
92
93 /* 2. Clear DCC register bit 12 (DC12), bit 13 (DC13), or both
94 depending on the way for which tags are to be cleared. */
95 V850E_CACHE_DCC &= ~0xC000;
96
97 /* 3. Set DCC register bit 0 (DC00), bit 1 (DC01) or both depending on
98 the way for which tags are to be cleared.
99 ...
100 Set DCC register bit 4 (DC04), bit 5 (DC05), or both depending
101 on the way to be data flushed. */
102 V850E_CACHE_DCC |= flags;
103
104 /* 4. Read DCC register bit DC00, DC01 [DC04, DC05], or both depending
105 on the way for which tags were cleared [flushed] and confirm
106 that that bit is cleared. */
107 WAIT_UNTIL_CLEAR (V850E_CACHE_DCC & flags);
108}
109#endif /* V850E_CACHE_DCC */
110
111/* Flushes the contents of the dcache to memory. */
112static inline void flush_dcache (void)
113{
114#ifdef V850E_CACHE_DCC
115 /* We only need to do something if in write-back mode. */
116 if (V850E_CACHE_DCC & 0x0400)
117 dcache_op (0x30);
118#endif /* V850E_CACHE_DCC */
119}
120
121/* Flushes the contents of the dcache to memory, and then clears it. */
122static inline void clear_dcache (void)
123{
124#ifdef V850E_CACHE_DCC
125 /* We only need to do something if the dcache is enabled. */
126 if (V850E_CACHE_DCC & 0x0C00)
127 dcache_op (0x33);
128#endif /* V850E_CACHE_DCC */
129}
130
131/* Clears the dcache without flushing to memory first. */
132static inline void clear_dcache_no_flush (void)
133{
134#ifdef V850E_CACHE_DCC
135 /* We only need to do something if the dcache is enabled. */
136 if (V850E_CACHE_DCC & 0x0C00)
137 dcache_op (0x3);
138#endif /* V850E_CACHE_DCC */
139}
140
141static inline void cache_exec_after_store (void)
142{
143 flush_dcache ();
144 clear_icache ();
145}
146
147
148/* Exported functions. */
149
150void flush_icache (void)
151{
152 cache_exec_after_store ();
153}
154
155void flush_icache_range (unsigned long start, unsigned long end)
156{
157 cache_exec_after_store ();
158}
159
160void flush_icache_page (struct vm_area_struct *vma, struct page *page)
161{
162 cache_exec_after_store ();
163}
164
165void flush_icache_user_range (struct vm_area_struct *vma, struct page *page,
166 unsigned long adr, int len)
167{
168 cache_exec_after_store ();
169}
170
171void flush_cache_sigtramp (unsigned long addr)
172{
173 cache_exec_after_store ();
174}
diff --git a/arch/v850/kernel/v850e_intc.c b/arch/v850/kernel/v850e_intc.c
deleted file mode 100644
index 8d39a52ee6d1..000000000000
--- a/arch/v850/kernel/v850e_intc.c
+++ /dev/null
@@ -1,104 +0,0 @@
1/*
2 * arch/v850/kernel/v850e_intc.c -- V850E interrupt controller (INTC)
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/irq.h>
17
18#include <asm/v850e_intc.h>
19
20static void irq_nop (unsigned irq) { }
21
22static unsigned v850e_intc_irq_startup (unsigned irq)
23{
24 v850e_intc_clear_pending_irq (irq);
25 v850e_intc_enable_irq (irq);
26 return 0;
27}
28
29static void v850e_intc_end_irq (unsigned irq)
30{
31 unsigned long psw, temp;
32
33 /* Clear the highest-level bit in the In-service priority register
34 (ISPR), to allow this interrupt (or another of the same or
35 lesser priority) to happen again.
36
37 The `reti' instruction normally does this automatically when the
38 PSW bits EP and NP are zero, but we can't always rely on reti
39 being used consistently to return after an interrupt (another
40 process can be scheduled, for instance, which can delay the
41 associated reti for a long time, or this process may be being
42 single-stepped, which uses the `dbret' instruction to return
43 from the kernel).
44
45 We also set the PSW EP bit, which prevents reti from also
46 trying to modify the ISPR itself. */
47
48 /* Get PSW and disable interrupts. */
49 asm volatile ("stsr psw, %0; di" : "=r" (psw));
50 /* We don't want to do anything for NMIs (they don't use the ISPR). */
51 if (! (psw & 0xC0)) {
52 /* Transition to `trap' state, so that an eventual real
53 reti instruction won't modify the ISPR. */
54 psw |= 0x40;
55 /* Fake an interrupt return, which automatically clears the
56 appropriate bit in the ISPR. */
57 asm volatile ("mov hilo(1f), %0;"
58 "ldsr %0, eipc; ldsr %1, eipsw;"
59 "reti;"
60 "1:"
61 : "=&r" (temp) : "r" (psw));
62 }
63}
64
65/* Initialize HW_IRQ_TYPES for INTC-controlled irqs described in array
66 INITS (which is terminated by an entry with the name field == 0). */
67void __init v850e_intc_init_irq_types (struct v850e_intc_irq_init *inits,
68 struct hw_interrupt_type *hw_irq_types)
69{
70 struct v850e_intc_irq_init *init;
71 for (init = inits; init->name; init++) {
72 unsigned i;
73 struct hw_interrupt_type *hwit = hw_irq_types++;
74
75 hwit->typename = init->name;
76
77 hwit->startup = v850e_intc_irq_startup;
78 hwit->shutdown = v850e_intc_disable_irq;
79 hwit->enable = v850e_intc_enable_irq;
80 hwit->disable = v850e_intc_disable_irq;
81 hwit->ack = irq_nop;
82 hwit->end = v850e_intc_end_irq;
83
84 /* Initialize kernel IRQ infrastructure for this interrupt. */
85 init_irq_handlers(init->base, init->num, init->interval, hwit);
86
87 /* Set the interrupt priorities. */
88 for (i = 0; i < init->num; i++) {
89 unsigned irq = init->base + i * init->interval;
90
91 /* If the interrupt is currently enabled (all
92 interrupts are initially disabled), then
93 assume whoever enabled it has set things up
94 properly, and avoid messing with it. */
95 if (! v850e_intc_irq_enabled (irq))
96 /* This write also (1) disables the
97 interrupt, and (2) clears any pending
98 interrupts. */
99 V850E_INTC_IC (irq)
100 = (V850E_INTC_IC_PR (init->priority)
101 | V850E_INTC_IC_MK);
102 }
103 }
104}
diff --git a/arch/v850/kernel/v850e_timer_d.c b/arch/v850/kernel/v850e_timer_d.c
deleted file mode 100644
index d2a4ece2574c..000000000000
--- a/arch/v850/kernel/v850e_timer_d.c
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * include/asm-v850/v850e_timer_d.c -- `Timer D' component often used
3 * with V850E CPUs
4 *
5 * Copyright (C) 2001,02,03 NEC Electronics Corporation
6 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 */
14
15#include <linux/kernel.h>
16
17#include <asm/v850e_utils.h>
18#include <asm/v850e_timer_d.h>
19
20/* Start interval timer TIMER (0-3). The timer will issue the
21 corresponding INTCMD interrupt RATE times per second.
22 This function does not enable the interrupt. */
23void v850e_timer_d_configure (unsigned timer, unsigned rate)
24{
25 unsigned divlog2, count;
26
27 /* Calculate params for timer. */
28 if (! calc_counter_params (
29 V850E_TIMER_D_BASE_FREQ, rate,
30 V850E_TIMER_D_TMCD_CS_MIN, V850E_TIMER_D_TMCD_CS_MAX, 16,
31 &divlog2, &count))
32 printk (KERN_WARNING
33 "Cannot find interval timer %d setting suitable"
34 " for rate of %dHz.\n"
35 "Using rate of %dHz instead.\n",
36 timer, rate,
37 (V850E_TIMER_D_BASE_FREQ >> divlog2) >> 16);
38
39 /* Do the actual hardware timer initialization: */
40
41 /* Enable timer. */
42 V850E_TIMER_D_TMCD(timer) = V850E_TIMER_D_TMCD_CAE;
43 /* Set clock divider. */
44 V850E_TIMER_D_TMCD(timer)
45 = V850E_TIMER_D_TMCD_CAE
46 | V850E_TIMER_D_TMCD_CS(divlog2);
47 /* Set timer compare register. */
48 V850E_TIMER_D_CMD(timer) = count;
49 /* Start counting. */
50 V850E_TIMER_D_TMCD(timer)
51 = V850E_TIMER_D_TMCD_CAE
52 | V850E_TIMER_D_TMCD_CS(divlog2)
53 | V850E_TIMER_D_TMCD_CE;
54}
diff --git a/arch/v850/kernel/v850e_utils.c b/arch/v850/kernel/v850e_utils.c
deleted file mode 100644
index e6807ef8dee6..000000000000
--- a/arch/v850/kernel/v850e_utils.c
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * include/asm-v850/v850e_utils.h -- Utility functions associated with
3 * V850E CPUs
4 *
5 * Copyright (C) 2001,02,03 NEC Electronics Corporation
6 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 */
14
15#include <asm/v850e_utils.h>
16
17/* Calculate counter clock-divider and count values to attain the
18 desired frequency RATE from the base frequency BASE_FREQ. The
19 counter is expected to have a clock-divider, which can divide the
20 system cpu clock by a power of two value from MIN_DIVLOG2 to
21 MAX_DIV_LOG2, and a word-size of COUNTER_SIZE bits (the counter
22 counts up and resets whenever it's equal to the compare register,
23 generating an interrupt or whatever when it does so). The returned
24 values are: *DIVLOG2 -- log2 of the desired clock divider and *COUNT
25 -- the counter compare value to use. Returns true if it was possible
26 to find a reasonable value, otherwise false (and the other return
27 values will be set to be as good as possible). */
28int calc_counter_params (unsigned long base_freq,
29 unsigned long rate,
30 unsigned min_divlog2, unsigned max_divlog2,
31 unsigned counter_size,
32 unsigned *divlog2, unsigned *count)
33{
34 unsigned _divlog2;
35 int ok = 0;
36
37 /* Find the lowest clock divider setting that can represent RATE. */
38 for (_divlog2 = min_divlog2; _divlog2 <= max_divlog2; _divlog2++) {
39 /* Minimum interrupt rate possible using this divider. */
40 unsigned min_int_rate
41 = (base_freq >> _divlog2) >> counter_size;
42
43 if (min_int_rate <= rate) {
44 /* This setting is the highest resolution
45 setting that's slow enough enough to attain
46 RATE interrupts per second, so use it. */
47 ok = 1;
48 break;
49 }
50 }
51
52 if (_divlog2 > max_divlog2)
53 /* Can't find correct setting. */
54 _divlog2 = max_divlog2;
55
56 if (divlog2)
57 *divlog2 = _divlog2;
58 if (count)
59 *count = ((base_freq >> _divlog2) + rate/2) / rate;
60
61 return ok;
62}
diff --git a/arch/v850/kernel/vmlinux.lds.S b/arch/v850/kernel/vmlinux.lds.S
deleted file mode 100644
index d08cd1d27f27..000000000000
--- a/arch/v850/kernel/vmlinux.lds.S
+++ /dev/null
@@ -1,306 +0,0 @@
1/*
2 * arch/v850/vmlinux.lds.S -- kernel linker script for v850 platforms
3 *
4 * Copyright (C) 2002,03,04,05 NEC Electronics Corporation
5 * Copyright (C) 2002,03,04,05 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14
15#define VMLINUX_SYMBOL(_sym_) _##_sym_
16#include <asm-generic/vmlinux.lds.h>
17
18/* For most platforms, this will define useful things like RAM addr/size. */
19#include <asm/machdep.h>
20
21
22/* The following macros contain the usual definitions for various data areas.
23 The prefix `RAMK_' is used to indicate macros suitable for kernels loaded
24 into RAM, and similarly `ROMK_' for ROM-resident kernels. Note that all
25 symbols are prefixed with an extra `_' for compatibility with the v850
26 toolchain. */
27
28
29/* Interrupt vectors. */
30#define INTV_CONTENTS \
31 . = ALIGN (0x10) ; \
32 __intv_start = . ; \
33 *(.intv.reset) /* Reset vector */ \
34 . = __intv_start + 0x10 ; \
35 *(.intv.common) /* Vectors common to all v850e proc */\
36 . = __intv_start + 0x80 ; \
37 *(.intv.mach) /* Machine-specific int. vectors. */ \
38 __intv_end = . ;
39
40#define RODATA_CONTENTS \
41 . = ALIGN (16) ; \
42 *(.rodata) *(.rodata.*) \
43 *(__vermagic) /* Kernel version magic */ \
44 *(.rodata1) \
45 /* PCI quirks */ \
46 ___start_pci_fixups_early = . ; \
47 *(.pci_fixup_early) \
48 ___end_pci_fixups_early = . ; \
49 ___start_pci_fixups_header = . ; \
50 *(.pci_fixup_header) \
51 ___end_pci_fixups_header = . ; \
52 ___start_pci_fixups_final = . ; \
53 *(.pci_fixup_final) \
54 ___end_pci_fixups_final = . ; \
55 ___start_pci_fixups_enable = . ; \
56 *(.pci_fixup_enable) \
57 ___end_pci_fixups_enable = . ; \
58 /* Kernel symbol table: Normal symbols */ \
59 ___start___ksymtab = .; \
60 *(__ksymtab) \
61 ___stop___ksymtab = .; \
62 /* Kernel symbol table: GPL-only symbols */ \
63 ___start___ksymtab_gpl = .; \
64 *(__ksymtab_gpl) \
65 ___stop___ksymtab_gpl = .; \
66 /* Kernel symbol table: GPL-future symbols */ \
67 ___start___ksymtab_gpl_future = .; \
68 *(__ksymtab_gpl_future) \
69 ___stop___ksymtab_gpl_future = .; \
70 /* Kernel symbol table: strings */ \
71 *(__ksymtab_strings) \
72 /* Kernel symbol table: Normal symbols */ \
73 ___start___kcrctab = .; \
74 *(__kcrctab) \
75 ___stop___kcrctab = .; \
76 /* Kernel symbol table: GPL-only symbols */ \
77 ___start___kcrctab_gpl = .; \
78 *(__kcrctab_gpl) \
79 ___stop___kcrctab_gpl = .; \
80 /* Kernel symbol table: GPL-future symbols */ \
81 ___start___kcrctab_gpl_future = .; \
82 *(__kcrctab_gpl_future) \
83 ___stop___kcrctab_gpl_future = .; \
84 /* Built-in module parameters */ \
85 . = ALIGN (4) ; \
86 ___start___param = .; \
87 *(__param) \
88 ___stop___param = .;
89
90
91/* Kernel text segment, and some constant data areas. */
92#define TEXT_CONTENTS \
93 _text = .; \
94 __stext = . ; \
95 TEXT_TEXT \
96 SCHED_TEXT \
97 *(.exit.text) /* 2.5 convention */ \
98 *(.text.exit) /* 2.4 convention */ \
99 *(.text.lock) \
100 *(.exitcall.exit) \
101 __real_etext = . ; /* There may be data after here. */ \
102 RODATA_CONTENTS \
103 . = ALIGN (4) ; \
104 *(.call_table_data) \
105 *(.call_table_text) \
106 . = ALIGN (16) ; /* Exception table. */ \
107 ___start___ex_table = . ; \
108 *(__ex_table) \
109 ___stop___ex_table = . ; \
110 . = ALIGN (4) ; \
111 __etext = . ;
112
113/* Kernel data segment. */
114#define DATA_CONTENTS \
115 __sdata = . ; \
116 DATA_DATA \
117 EXIT_DATA /* 2.5 convention */ \
118 *(.data.exit) /* 2.4 convention */ \
119 . = ALIGN (16) ; \
120 *(.data.cacheline_aligned) \
121 . = ALIGN (0x2000) ; \
122 *(.data.init_task) \
123 . = ALIGN (0x2000) ; \
124 __edata = . ;
125
126/* Kernel BSS segment. */
127#define BSS_CONTENTS \
128 __sbss = . ; \
129 *(.bss) \
130 *(COMMON) \
131 . = ALIGN (4) ; \
132 __init_stack_end = . ; \
133 __ebss = . ;
134
135/* `initcall' tables. */
136#define INITCALL_CONTENTS \
137 . = ALIGN (16) ; \
138 ___setup_start = . ; \
139 *(.init.setup) /* 2.5 convention */ \
140 *(.setup.init) /* 2.4 convention */ \
141 ___setup_end = . ; \
142 ___initcall_start = . ; \
143 *(.initcall.init) \
144 INITCALLS \
145 . = ALIGN (4) ; \
146 ___initcall_end = . ; \
147 ___con_initcall_start = .; \
148 *(.con_initcall.init) \
149 ___con_initcall_end = .;
150
151/* Contents of `init' section for a kernel that's loaded into RAM. */
152#define RAMK_INIT_CONTENTS \
153 RAMK_INIT_CONTENTS_NO_END \
154 __init_end = . ;
155/* Same as RAMK_INIT_CONTENTS, but doesn't define the `__init_end' symbol. */
156#define RAMK_INIT_CONTENTS_NO_END \
157 . = ALIGN (4096) ; \
158 __init_start = . ; \
159 __sinittext = .; \
160 INIT_TEXT /* 2.5 convention */ \
161 __einittext = .; \
162 INIT_DATA \
163 *(.text.init) /* 2.4 convention */ \
164 *(.data.init) \
165 INITCALL_CONTENTS \
166 INITRAMFS_CONTENTS
167
168/* The contents of `init' section for a ROM-resident kernel which
169 should go into RAM. */
170#define ROMK_INIT_RAM_CONTENTS \
171 . = ALIGN (4096) ; \
172 __init_start = . ; \
173 INIT_DATA /* 2.5 convention */ \
174 *(.data.init) /* 2.4 convention */ \
175 __init_end = . ; \
176 . = ALIGN (4096) ;
177
178/* The contents of `init' section for a ROM-resident kernel which
179 should go into ROM. */
180#define ROMK_INIT_ROM_CONTENTS \
181 _sinittext = .; \
182 INIT_TEXT /* 2.5 convention */ \
183 _einittext = .; \
184 *(.text.init) /* 2.4 convention */ \
185 INITCALL_CONTENTS \
186 INITRAMFS_CONTENTS
187
188/* A root filesystem image, for kernels with an embedded root filesystem. */
189#define ROOT_FS_CONTENTS \
190 __root_fs_image_start = . ; \
191 *(.root) \
192 __root_fs_image_end = . ;
193
194#ifdef CONFIG_BLK_DEV_INITRD
195/* The initramfs archive. */
196#define INITRAMFS_CONTENTS \
197 . = ALIGN (4) ; \
198 ___initramfs_start = . ; \
199 *(.init.ramfs) \
200 ___initramfs_end = . ;
201#endif
202
203/* Where the initial bootmap (bitmap for the boot-time memory allocator)
204 should be place. */
205#define BOOTMAP_CONTENTS \
206 . = ALIGN (4096) ; \
207 __bootmap = . ; \
208 . = . + 4096 ; /* enough for 128MB. */
209
210/* The contents of a `typical' kram area for a kernel in RAM. */
211#define RAMK_KRAM_CONTENTS \
212 __kram_start = . ; \
213 TEXT_CONTENTS \
214 DATA_CONTENTS \
215 BSS_CONTENTS \
216 RAMK_INIT_CONTENTS \
217 __kram_end = . ; \
218 BOOTMAP_CONTENTS
219
220
221/* Define output sections normally used for a ROM-resident kernel.
222 ROM and RAM should be appropriate memory areas to use for kernel
223 ROM and RAM data. This assumes that ROM starts at 0 (and thus can
224 hold the interrupt vectors). */
225#define ROMK_SECTIONS(ROM, RAM) \
226 .rom : { \
227 INTV_CONTENTS \
228 TEXT_CONTENTS \
229 ROMK_INIT_ROM_CONTENTS \
230 ROOT_FS_CONTENTS \
231 } > ROM \
232 \
233 __rom_copy_src_start = . ; \
234 \
235 .data : { \
236 __kram_start = . ; \
237 __rom_copy_dst_start = . ; \
238 DATA_CONTENTS \
239 ROMK_INIT_RAM_CONTENTS \
240 __rom_copy_dst_end = . ; \
241 } > RAM AT> ROM \
242 \
243 .bss ALIGN (4) : { \
244 BSS_CONTENTS \
245 __kram_end = . ; \
246 BOOTMAP_CONTENTS \
247 } > RAM
248
249
250/* The 32-bit variable `jiffies' is just the lower 32-bits of `jiffies_64'. */
251_jiffies = _jiffies_64 ;
252
253
254/* Include an appropriate platform-dependent linker-script (which
255 usually should use the above macros to do most of the work). */
256
257#ifdef CONFIG_V850E_SIM
258# include "sim.ld"
259#endif
260
261#ifdef CONFIG_V850E2_SIM85E2
262# include "sim85e2.ld"
263#endif
264
265#ifdef CONFIG_V850E2_FPGA85E2C
266# include "fpga85e2c.ld"
267#endif
268
269#ifdef CONFIG_V850E2_ANNA
270# ifdef CONFIG_ROM_KERNEL
271# include "anna-rom.ld"
272# else
273# include "anna.ld"
274# endif
275#endif
276
277#ifdef CONFIG_V850E_AS85EP1
278# ifdef CONFIG_ROM_KERNEL
279# include "as85ep1-rom.ld"
280# else
281# include "as85ep1.ld"
282# endif
283#endif
284
285#ifdef CONFIG_RTE_CB_MA1
286# ifdef CONFIG_ROM_KERNEL
287# include "rte_ma1_cb-rom.ld"
288# else
289# include "rte_ma1_cb.ld"
290# endif
291#endif
292
293#ifdef CONFIG_RTE_CB_NB85E
294# ifdef CONFIG_ROM_KERNEL
295# include "rte_nb85e_cb-rom.ld"
296# elif defined(CONFIG_RTE_CB_MULTI)
297# include "rte_nb85e_cb-multi.ld"
298# else
299# include "rte_nb85e_cb.ld"
300# endif
301#endif
302
303#ifdef CONFIG_RTE_CB_ME2
304# include "rte_me2_cb.ld"
305#endif
306
diff --git a/arch/v850/lib/Makefile b/arch/v850/lib/Makefile
deleted file mode 100644
index 1c78b728a117..000000000000
--- a/arch/v850/lib/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
1#
2# arch/v850/lib/Makefile
3#
4
5lib-y = ashrdi3.o ashldi3.o lshrdi3.o muldi3.o negdi2.o \
6 checksum.o memcpy.o memset.o
diff --git a/arch/v850/lib/ashldi3.c b/arch/v850/lib/ashldi3.c
deleted file mode 100644
index 9e792d53f0e4..000000000000
--- a/arch/v850/lib/ashldi3.c
+++ /dev/null
@@ -1,62 +0,0 @@
1/* ashldi3.c extracted from gcc-2.95.2/libgcc2.c which is: */
2/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
3
4This file is part of GNU CC.
5
6GNU CC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 2, or (at your option)
9any later version.
10
11GNU CC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU CC; see the file COPYING. If not, write to
18the Free Software Foundation, 59 Temple Place - Suite 330,
19Boston, MA 02111-1307, USA. */
20
21#define BITS_PER_UNIT 8
22
23typedef int SItype __attribute__ ((mode (SI)));
24typedef unsigned int USItype __attribute__ ((mode (SI)));
25typedef int DItype __attribute__ ((mode (DI)));
26typedef int word_type __attribute__ ((mode (__word__)));
27
28struct DIstruct {SItype high, low;};
29
30typedef union
31{
32 struct DIstruct s;
33 DItype ll;
34} DIunion;
35
36DItype
37__ashldi3 (DItype u, word_type b)
38{
39 DIunion w;
40 word_type bm;
41 DIunion uu;
42
43 if (b == 0)
44 return u;
45
46 uu.ll = u;
47
48 bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
49 if (bm <= 0)
50 {
51 w.s.low = 0;
52 w.s.high = (USItype)uu.s.low << -bm;
53 }
54 else
55 {
56 USItype carries = (USItype)uu.s.low >> bm;
57 w.s.low = (USItype)uu.s.low << b;
58 w.s.high = ((USItype)uu.s.high << b) | carries;
59 }
60
61 return w.ll;
62}
diff --git a/arch/v850/lib/ashrdi3.c b/arch/v850/lib/ashrdi3.c
deleted file mode 100644
index 78efb65e315a..000000000000
--- a/arch/v850/lib/ashrdi3.c
+++ /dev/null
@@ -1,63 +0,0 @@
1/* ashrdi3.c extracted from gcc-2.7.2/libgcc2.c which is: */
2/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
3
4This file is part of GNU CC.
5
6GNU CC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 2, or (at your option)
9any later version.
10
11GNU CC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU CC; see the file COPYING. If not, write to
18the Free Software Foundation, 59 Temple Place - Suite 330,
19Boston, MA 02111-1307, USA. */
20
21#define BITS_PER_UNIT 8
22
23typedef int SItype __attribute__ ((mode (SI)));
24typedef unsigned int USItype __attribute__ ((mode (SI)));
25typedef int DItype __attribute__ ((mode (DI)));
26typedef int word_type __attribute__ ((mode (__word__)));
27
28struct DIstruct {SItype high, low;};
29
30typedef union
31{
32 struct DIstruct s;
33 DItype ll;
34} DIunion;
35
36DItype
37__ashrdi3 (DItype u, word_type b)
38{
39 DIunion w;
40 word_type bm;
41 DIunion uu;
42
43 if (b == 0)
44 return u;
45
46 uu.ll = u;
47
48 bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
49 if (bm <= 0)
50 {
51 /* w.s.high = 1..1 or 0..0 */
52 w.s.high = uu.s.high >> (sizeof (SItype) * BITS_PER_UNIT - 1);
53 w.s.low = uu.s.high >> -bm;
54 }
55 else
56 {
57 USItype carries = (USItype)uu.s.high << bm;
58 w.s.high = uu.s.high >> b;
59 w.s.low = ((USItype)uu.s.low >> b) | carries;
60 }
61
62 return w.ll;
63}
diff --git a/arch/v850/lib/checksum.c b/arch/v850/lib/checksum.c
deleted file mode 100644
index 042158dfe17a..000000000000
--- a/arch/v850/lib/checksum.c
+++ /dev/null
@@ -1,155 +0,0 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * MIPS specific IP/TCP/UDP checksumming routines
7 *
8 * Authors: Ralf Baechle, <ralf@waldorf-gmbh.de>
9 * Lots of code moved from tcp.c and ip.c; see those files
10 * for more names.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * $Id: checksum.c,v 1.1 2002/09/28 14:58:40 gerg Exp $
18 */
19#include <net/checksum.h>
20#include <linux/module.h>
21#include <linux/types.h>
22#include <asm/byteorder.h>
23#include <asm/string.h>
24#include <asm/uaccess.h>
25
26static inline unsigned short from32to16 (unsigned long sum)
27{
28 unsigned int result;
29 /*
30 %0 %1
31 hsw %1, %0 H L L H
32 add %1, %0 H L H+L+C H+L
33 */
34 asm ("hsw %1, %0; add %1, %0" : "=&r" (result) : "r" (sum));
35 return result >> 16;
36}
37
38static inline unsigned int do_csum(const unsigned char * buff, int len)
39{
40 int odd, count;
41 unsigned int result = 0;
42
43 if (len <= 0)
44 goto out;
45 odd = 1 & (unsigned long) buff;
46 if (odd) {
47 result = be16_to_cpu(*buff);
48 len--;
49 buff++;
50 }
51 count = len >> 1; /* nr of 16-bit words.. */
52 if (count) {
53 if (2 & (unsigned long) buff) {
54 result += *(unsigned short *) buff;
55 count--;
56 len -= 2;
57 buff += 2;
58 }
59 count >>= 1; /* nr of 32-bit words.. */
60 if (count) {
61 unsigned int carry = 0;
62 do {
63 unsigned int w = *(unsigned int *) buff;
64 count--;
65 buff += 4;
66 result += carry;
67 result += w;
68 carry = (w > result);
69 } while (count);
70 result += carry;
71 result = (result & 0xffff) + (result >> 16);
72 }
73 if (len & 2) {
74 result += *(unsigned short *) buff;
75 buff += 2;
76 }
77 }
78 if (len & 1)
79 result += le16_to_cpu(*buff);
80 result = from32to16(result);
81 if (odd)
82 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
83out:
84 return result;
85}
86
87/*
88 * This is a version of ip_compute_csum() optimized for IP headers,
89 * which always checksum on 4 octet boundaries.
90 */
91__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
92{
93 return (__force __sum16)~do_csum(iph,ihl*4);
94}
95
96/*
97 * this routine is used for miscellaneous IP-like checksums, mainly
98 * in icmp.c
99 */
100__sum16 ip_compute_csum(const void *buff, int len)
101{
102 return (__force __sum16)~do_csum(buff,len);
103}
104
105/*
106 * computes a partial checksum, e.g. for TCP/UDP fragments
107 */
108__wsum csum_partial(const void *buff, int len, __wsum sum)
109{
110 unsigned int result = do_csum(buff, len);
111
112 /* add in old sum, and carry.. */
113 result += (__force u32)sum;
114 if ((__force u32)sum > result)
115 result += 1;
116 return (__force __wsum)result;
117}
118
119EXPORT_SYMBOL(csum_partial);
120
121/*
122 * copy while checksumming, otherwise like csum_partial
123 */
124__wsum csum_partial_copy_nocheck(const void *src, void *dst,
125 int len, __wsum sum)
126{
127 /*
128 * It's 2:30 am and I don't feel like doing it real ...
129 * This is lots slower than the real thing (tm)
130 */
131 sum = csum_partial(src, len, sum);
132 memcpy(dst, src, len);
133
134 return sum;
135}
136
137/*
138 * Copy from userspace and compute checksum. If we catch an exception
139 * then zero the rest of the buffer.
140 */
141__wsum csum_partial_copy_from_user (const void *src,
142 void *dst,
143 int len, __wsum sum,
144 int *err_ptr)
145{
146 int missing;
147
148 missing = copy_from_user(dst, src, len);
149 if (missing) {
150 memset(dst + len - missing, 0, missing);
151 *err_ptr = -EFAULT;
152 }
153
154 return csum_partial(dst, len, sum);
155}
diff --git a/arch/v850/lib/lshrdi3.c b/arch/v850/lib/lshrdi3.c
deleted file mode 100644
index 93b1cb6fdee8..000000000000
--- a/arch/v850/lib/lshrdi3.c
+++ /dev/null
@@ -1,62 +0,0 @@
1/* lshrdi3.c extracted from gcc-2.7.2/libgcc2.c which is: */
2/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
3
4This file is part of GNU CC.
5
6GNU CC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 2, or (at your option)
9any later version.
10
11GNU CC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU CC; see the file COPYING. If not, write to
18the Free Software Foundation, 59 Temple Place - Suite 330,
19Boston, MA 02111-1307, USA. */
20
21#define BITS_PER_UNIT 8
22
23typedef int SItype __attribute__ ((mode (SI)));
24typedef unsigned int USItype __attribute__ ((mode (SI)));
25typedef int DItype __attribute__ ((mode (DI)));
26typedef int word_type __attribute__ ((mode (__word__)));
27
28struct DIstruct {SItype high, low;};
29
30typedef union
31{
32 struct DIstruct s;
33 DItype ll;
34} DIunion;
35
36DItype
37__lshrdi3 (DItype u, word_type b)
38{
39 DIunion w;
40 word_type bm;
41 DIunion uu;
42
43 if (b == 0)
44 return u;
45
46 uu.ll = u;
47
48 bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
49 if (bm <= 0)
50 {
51 w.s.high = 0;
52 w.s.low = (USItype)uu.s.high >> -bm;
53 }
54 else
55 {
56 USItype carries = (USItype)uu.s.high << bm;
57 w.s.high = (USItype)uu.s.high >> b;
58 w.s.low = ((USItype)uu.s.low >> b) | carries;
59 }
60
61 return w.ll;
62}
diff --git a/arch/v850/lib/memcpy.c b/arch/v850/lib/memcpy.c
deleted file mode 100644
index 492847b3e612..000000000000
--- a/arch/v850/lib/memcpy.c
+++ /dev/null
@@ -1,92 +0,0 @@
1/*
2 * arch/v850/lib/memcpy.c -- Memory copying
3 *
4 * Copyright (C) 2001,02 NEC Corporation
5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/types.h>
15#include <asm/string.h>
16
17#define CHUNK_SIZE 32 /* bytes */
18#define CHUNK_ALIGNED(addr) (((unsigned long)addr & 0x3) == 0)
19
20/* Note that this macro uses 8 call-clobbered registers (not including
21 R1), which are few enough so that the following functions don't need
22 to spill anything to memory. It also uses R1, which is nominally
23 reserved for the assembler, but here it should be OK. */
24#define COPY_CHUNK(src, dst) \
25 asm ("mov %0, ep;" \
26 "sld.w 0[ep], r1; sld.w 4[ep], r12;" \
27 "sld.w 8[ep], r13; sld.w 12[ep], r14;" \
28 "sld.w 16[ep], r15; sld.w 20[ep], r17;" \
29 "sld.w 24[ep], r18; sld.w 28[ep], r19;" \
30 "mov %1, ep;" \
31 "sst.w r1, 0[ep]; sst.w r12, 4[ep];" \
32 "sst.w r13, 8[ep]; sst.w r14, 12[ep];" \
33 "sst.w r15, 16[ep]; sst.w r17, 20[ep];" \
34 "sst.w r18, 24[ep]; sst.w r19, 28[ep]" \
35 :: "r" (src), "r" (dst) \
36 : "r1", "r12", "r13", "r14", "r15", \
37 "r17", "r18", "r19", "ep", "memory");
38
39void *memcpy (void *dst, const void *src, __kernel_size_t size)
40{
41 char *_dst = dst;
42 const char *_src = src;
43
44 if (size >= CHUNK_SIZE && CHUNK_ALIGNED(_src) && CHUNK_ALIGNED(_dst)) {
45 /* Copy large blocks efficiently. */
46 unsigned count;
47 for (count = size / CHUNK_SIZE; count; count--) {
48 COPY_CHUNK (_src, _dst);
49 _src += CHUNK_SIZE;
50 _dst += CHUNK_SIZE;
51 }
52 size %= CHUNK_SIZE;
53 }
54
55 if (size > 0)
56 do
57 *_dst++ = *_src++;
58 while (--size);
59
60 return dst;
61}
62
63void *memmove (void *dst, const void *src, __kernel_size_t size)
64{
65 if ((unsigned long)dst < (unsigned long)src
66 || (unsigned long)src + size < (unsigned long)dst)
67 return memcpy (dst, src, size);
68 else {
69 char *_dst = dst + size;
70 const char *_src = src + size;
71
72 if (size >= CHUNK_SIZE
73 && CHUNK_ALIGNED (_src) && CHUNK_ALIGNED (_dst))
74 {
75 /* Copy large blocks efficiently. */
76 unsigned count;
77 for (count = size / CHUNK_SIZE; count; count--) {
78 _src -= CHUNK_SIZE;
79 _dst -= CHUNK_SIZE;
80 COPY_CHUNK (_src, _dst);
81 }
82 size %= CHUNK_SIZE;
83 }
84
85 if (size > 0)
86 do
87 *--_dst = *--_src;
88 while (--size);
89
90 return _dst;
91 }
92}
diff --git a/arch/v850/lib/memset.c b/arch/v850/lib/memset.c
deleted file mode 100644
index d1b2ad821b15..000000000000
--- a/arch/v850/lib/memset.c
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * arch/v850/lib/memset.c -- Memory initialization
3 *
4 * Copyright (C) 2001,02,04 NEC Corporation
5 * Copyright (C) 2001,02,04 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#include <linux/types.h>
15
16void *memset (void *dst, int val, __kernel_size_t count)
17{
18 if (count) {
19 register unsigned loop;
20 register void *ptr asm ("ep") = dst;
21
22 /* replicate VAL into a long. */
23 val &= 0xff;
24 val |= val << 8;
25 val |= val << 16;
26
27 /* copy initial unaligned bytes. */
28 if ((long)ptr & 1) {
29 *(char *)ptr = val;
30 ptr = (void *)((char *)ptr + 1);
31 count--;
32 }
33 if (count > 2 && ((long)ptr & 2)) {
34 *(short *)ptr = val;
35 ptr = (void *)((short *)ptr + 1);
36 count -= 2;
37 }
38
39 /* 32-byte copying loop. */
40 for (loop = count / 32; loop; loop--) {
41 asm ("sst.w %0, 0[ep]; sst.w %0, 4[ep];"
42 "sst.w %0, 8[ep]; sst.w %0, 12[ep];"
43 "sst.w %0, 16[ep]; sst.w %0, 20[ep];"
44 "sst.w %0, 24[ep]; sst.w %0, 28[ep]"
45 :: "r" (val) : "memory");
46 ptr += 32;
47 }
48 count %= 32;
49
50 /* long copying loop. */
51 for (loop = count / 4; loop; loop--) {
52 *(long *)ptr = val;
53 ptr = (void *)((long *)ptr + 1);
54 }
55 count %= 4;
56
57 /* finish up with any trailing bytes. */
58 if (count & 2) {
59 *(short *)ptr = val;
60 ptr = (void *)((short *)ptr + 1);
61 }
62 if (count & 1) {
63 *(char *)ptr = val;
64 }
65 }
66
67 return dst;
68}
diff --git a/arch/v850/lib/muldi3.c b/arch/v850/lib/muldi3.c
deleted file mode 100644
index 277ca25c82c8..000000000000
--- a/arch/v850/lib/muldi3.c
+++ /dev/null
@@ -1,61 +0,0 @@
1/* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and
2 gcc-2.7.2.3/longlong.h which is: */
3/* Copyright (C) 1989, 1992, 1993, 1994, 1995, 2001 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22#define umul_ppmm(w1, w0, u, v) \
23 __asm__ ("mulu %3, %0, %1" \
24 : "=r" ((USItype)(w0)), \
25 "=r" ((USItype)(w1)) \
26 : "%0" ((USItype)(u)), \
27 "r" ((USItype)(v)))
28
29#define __umulsidi3(u, v) \
30 ({DIunion __w; \
31 umul_ppmm (__w.s.high, __w.s.low, u, v); \
32 __w.ll; })
33
34typedef int SItype __attribute__ ((mode (SI)));
35typedef unsigned int USItype __attribute__ ((mode (SI)));
36typedef int DItype __attribute__ ((mode (DI)));
37typedef int word_type __attribute__ ((mode (__word__)));
38
39struct DIstruct {SItype high, low;};
40
41typedef union
42{
43 struct DIstruct s;
44 DItype ll;
45} DIunion;
46
47DItype
48__muldi3 (DItype u, DItype v)
49{
50 DIunion w;
51 DIunion uu, vv;
52
53 uu.ll = u,
54 vv.ll = v;
55
56 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
57 w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
58 + (USItype) uu.s.high * (USItype) vv.s.low);
59
60 return w.ll;
61}
diff --git a/arch/v850/lib/negdi2.c b/arch/v850/lib/negdi2.c
deleted file mode 100644
index 571e04fc619a..000000000000
--- a/arch/v850/lib/negdi2.c
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * arch/v850/lib/negdi2.c -- 64-bit negation
3 *
4 * Copyright (C) 2001 NEC Corporation
5 * Copyright (C) 2001 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14typedef int DItype __attribute__ ((mode (DI)));
15
16DItype __negdi2 (DItype x)
17{
18 __asm__ __volatile__
19 ("not r6, r10;"
20 "add 1, r10;"
21 "setf c, r6;"
22 "not r7, r11;"
23 "add r6, r11"
24 ::: "r6", "r7", "r10", "r11");
25}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 03980cb04291..e3cba0b45600 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -21,12 +21,15 @@ config X86
21 select HAVE_UNSTABLE_SCHED_CLOCK 21 select HAVE_UNSTABLE_SCHED_CLOCK
22 select HAVE_IDE 22 select HAVE_IDE
23 select HAVE_OPROFILE 23 select HAVE_OPROFILE
24 select HAVE_IOREMAP_PROT
24 select HAVE_KPROBES 25 select HAVE_KPROBES
26 select ARCH_WANT_OPTIONAL_GPIOLIB if !X86_RDC321X
25 select HAVE_KRETPROBES 27 select HAVE_KRETPROBES
26 select HAVE_DYNAMIC_FTRACE 28 select HAVE_DYNAMIC_FTRACE
27 select HAVE_FTRACE 29 select HAVE_FTRACE
28 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 30 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
29 select HAVE_ARCH_KGDB if !X86_VOYAGER 31 select HAVE_ARCH_KGDB if !X86_VOYAGER
32 select HAVE_EFFICIENT_UNALIGNED_ACCESS
30 33
31config ARCH_DEFCONFIG 34config ARCH_DEFCONFIG
32 string 35 string
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index bc5553b496f7..9fea73706479 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -182,8 +182,6 @@ static unsigned outcnt;
182static int fill_inbuf(void); 182static int fill_inbuf(void);
183static void flush_window(void); 183static void flush_window(void);
184static void error(char *m); 184static void error(char *m);
185static void gzip_mark(void **);
186static void gzip_release(void **);
187 185
188/* 186/*
189 * This is set up by the setup-routine at boot-time 187 * This is set up by the setup-routine at boot-time
@@ -196,9 +194,6 @@ extern int input_len;
196 194
197static long bytes_out; 195static long bytes_out;
198 196
199static void *malloc(int size);
200static void free(void *where);
201
202static void *memset(void *s, int c, unsigned n); 197static void *memset(void *s, int c, unsigned n);
203static void *memcpy(void *dest, const void *src, unsigned n); 198static void *memcpy(void *dest, const void *src, unsigned n);
204 199
@@ -220,40 +215,6 @@ static int lines, cols;
220 215
221#include "../../../../lib/inflate.c" 216#include "../../../../lib/inflate.c"
222 217
223static void *malloc(int size)
224{
225 void *p;
226
227 if (size < 0)
228 error("Malloc error");
229 if (free_mem_ptr <= 0)
230 error("Memory error");
231
232 free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
233
234 p = (void *)free_mem_ptr;
235 free_mem_ptr += size;
236
237 if (free_mem_ptr >= free_mem_end_ptr)
238 error("Out of memory");
239
240 return p;
241}
242
243static void free(void *where)
244{ /* Don't care */
245}
246
247static void gzip_mark(void **ptr)
248{
249 *ptr = (void *) free_mem_ptr;
250}
251
252static void gzip_release(void **ptr)
253{
254 free_mem_ptr = (memptr) *ptr;
255}
256
257static void scroll(void) 218static void scroll(void)
258{ 219{
259 int i; 220 int i;
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 23d146ce676b..e4bd1793a5e4 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -15,6 +15,16 @@
15#include <asm/irqflags.h> 15#include <asm/irqflags.h>
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17 17
18/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
19#include <linux/elf-em.h>
20#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
21#define __AUDIT_ARCH_LE 0x40000000
22
23#ifndef CONFIG_AUDITSYSCALL
24#define sysexit_audit int_ret_from_sys_call
25#define sysretl_audit int_ret_from_sys_call
26#endif
27
18#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) 28#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
19 29
20 .macro IA32_ARG_FIXUP noebp=0 30 .macro IA32_ARG_FIXUP noebp=0
@@ -148,13 +158,15 @@ ENTRY(ia32_sysenter_target)
148 ja ia32_badsys 158 ja ia32_badsys
149sysenter_do_call: 159sysenter_do_call:
150 IA32_ARG_FIXUP 1 160 IA32_ARG_FIXUP 1
161sysenter_dispatch:
151 call *ia32_sys_call_table(,%rax,8) 162 call *ia32_sys_call_table(,%rax,8)
152 movq %rax,RAX-ARGOFFSET(%rsp) 163 movq %rax,RAX-ARGOFFSET(%rsp)
153 GET_THREAD_INFO(%r10) 164 GET_THREAD_INFO(%r10)
154 DISABLE_INTERRUPTS(CLBR_NONE) 165 DISABLE_INTERRUPTS(CLBR_NONE)
155 TRACE_IRQS_OFF 166 TRACE_IRQS_OFF
156 testl $_TIF_ALLWORK_MASK,TI_flags(%r10) 167 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
157 jnz int_ret_from_sys_call 168 jnz sysexit_audit
169sysexit_from_sys_call:
158 andl $~TS_COMPAT,TI_status(%r10) 170 andl $~TS_COMPAT,TI_status(%r10)
159 /* clear IF, that popfq doesn't enable interrupts early */ 171 /* clear IF, that popfq doesn't enable interrupts early */
160 andl $~0x200,EFLAGS-R11(%rsp) 172 andl $~0x200,EFLAGS-R11(%rsp)
@@ -170,9 +182,63 @@ sysenter_do_call:
170 TRACE_IRQS_ON 182 TRACE_IRQS_ON
171 ENABLE_INTERRUPTS_SYSEXIT32 183 ENABLE_INTERRUPTS_SYSEXIT32
172 184
173sysenter_tracesys: 185#ifdef CONFIG_AUDITSYSCALL
186 .macro auditsys_entry_common
187 movl %esi,%r9d /* 6th arg: 4th syscall arg */
188 movl %edx,%r8d /* 5th arg: 3rd syscall arg */
189 /* (already in %ecx) 4th arg: 2nd syscall arg */
190 movl %ebx,%edx /* 3rd arg: 1st syscall arg */
191 movl %eax,%esi /* 2nd arg: syscall number */
192 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
193 call audit_syscall_entry
194 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
195 cmpl $(IA32_NR_syscalls-1),%eax
196 ja ia32_badsys
197 movl %ebx,%edi /* reload 1st syscall arg */
198 movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */
199 movl RDX-ARGOFFSET(%rsp),%edx /* reload 3rd syscall arg */
200 movl RSI-ARGOFFSET(%rsp),%ecx /* reload 4th syscall arg */
201 movl RDI-ARGOFFSET(%rsp),%r8d /* reload 5th syscall arg */
202 .endm
203
204 .macro auditsys_exit exit
205 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
206 jnz int_ret_from_sys_call
207 TRACE_IRQS_ON
208 sti
209 movl %eax,%esi /* second arg, syscall return value */
210 cmpl $0,%eax /* is it < 0? */
211 setl %al /* 1 if so, 0 if not */
212 movzbl %al,%edi /* zero-extend that into %edi */
213 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
214 call audit_syscall_exit
215 GET_THREAD_INFO(%r10)
216 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
217 movl RBP-ARGOFFSET(%rsp),%ebp /* reload user register value */
218 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
219 cli
220 TRACE_IRQS_OFF
221 testl %edi,TI_flags(%r10)
222 jnz int_with_check
223 jmp \exit
224 .endm
225
226sysenter_auditsys:
174 CFI_RESTORE_STATE 227 CFI_RESTORE_STATE
228 auditsys_entry_common
229 movl %ebp,%r9d /* reload 6th syscall arg */
230 jmp sysenter_dispatch
231
232sysexit_audit:
233 auditsys_exit sysexit_from_sys_call
234#endif
235
236sysenter_tracesys:
175 xchgl %r9d,%ebp 237 xchgl %r9d,%ebp
238#ifdef CONFIG_AUDITSYSCALL
239 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
240 jz sysenter_auditsys
241#endif
176 SAVE_REST 242 SAVE_REST
177 CLEAR_RREGS 243 CLEAR_RREGS
178 movq %r9,R9(%rsp) 244 movq %r9,R9(%rsp)
@@ -252,13 +318,15 @@ cstar_do_call:
252 cmpl $IA32_NR_syscalls-1,%eax 318 cmpl $IA32_NR_syscalls-1,%eax
253 ja ia32_badsys 319 ja ia32_badsys
254 IA32_ARG_FIXUP 1 320 IA32_ARG_FIXUP 1
321cstar_dispatch:
255 call *ia32_sys_call_table(,%rax,8) 322 call *ia32_sys_call_table(,%rax,8)
256 movq %rax,RAX-ARGOFFSET(%rsp) 323 movq %rax,RAX-ARGOFFSET(%rsp)
257 GET_THREAD_INFO(%r10) 324 GET_THREAD_INFO(%r10)
258 DISABLE_INTERRUPTS(CLBR_NONE) 325 DISABLE_INTERRUPTS(CLBR_NONE)
259 TRACE_IRQS_OFF 326 TRACE_IRQS_OFF
260 testl $_TIF_ALLWORK_MASK,TI_flags(%r10) 327 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
261 jnz int_ret_from_sys_call 328 jnz sysretl_audit
329sysretl_from_sys_call:
262 andl $~TS_COMPAT,TI_status(%r10) 330 andl $~TS_COMPAT,TI_status(%r10)
263 RESTORE_ARGS 1,-ARG_SKIP,1,1,1 331 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
264 movl RIP-ARGOFFSET(%rsp),%ecx 332 movl RIP-ARGOFFSET(%rsp),%ecx
@@ -270,8 +338,23 @@ cstar_do_call:
270 CFI_RESTORE rsp 338 CFI_RESTORE rsp
271 USERGS_SYSRET32 339 USERGS_SYSRET32
272 340
273cstar_tracesys: 341#ifdef CONFIG_AUDITSYSCALL
342cstar_auditsys:
274 CFI_RESTORE_STATE 343 CFI_RESTORE_STATE
344 movl %r9d,R9-ARGOFFSET(%rsp) /* register to be clobbered by call */
345 auditsys_entry_common
346 movl R9-ARGOFFSET(%rsp),%r9d /* reload 6th syscall arg */
347 jmp cstar_dispatch
348
349sysretl_audit:
350 auditsys_exit sysretl_from_sys_call
351#endif
352
353cstar_tracesys:
354#ifdef CONFIG_AUDITSYSCALL
355 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
356 jz cstar_auditsys
357#endif
275 xchgl %r9d,%ebp 358 xchgl %r9d,%ebp
276 SAVE_REST 359 SAVE_REST
277 CLEAR_RREGS 360 CLEAR_RREGS
@@ -743,4 +826,10 @@ ia32_sys_call_table:
743 .quad sys32_fallocate 826 .quad sys32_fallocate
744 .quad compat_sys_timerfd_settime /* 325 */ 827 .quad compat_sys_timerfd_settime /* 325 */
745 .quad compat_sys_timerfd_gettime 828 .quad compat_sys_timerfd_gettime
829 .quad compat_sys_signalfd4
830 .quad sys_eventfd2
831 .quad sys_epoll_create1
832 .quad sys_dup3 /* 330 */
833 .quad sys_pipe2
834 .quad sys_inotify_init1
746ia32_syscall_end: 835ia32_syscall_end:
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index f00afdf61e67..d3c64088b981 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -238,7 +238,7 @@ asmlinkage long sys32_pipe(int __user *fd)
238 int retval; 238 int retval;
239 int fds[2]; 239 int fds[2];
240 240
241 retval = do_pipe(fds); 241 retval = do_pipe_flags(fds, 0);
242 if (retval) 242 if (retval)
243 goto out; 243 goto out;
244 if (copy_to_user(fd, fds, sizeof(fds))) 244 if (copy_to_user(fd, fds, sizeof(fds)))
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index c2502eb9aa83..9220cf46aa10 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -73,6 +73,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
73 struct cpuinfo_x86 *c = &cpu_data(cpu); 73 struct cpuinfo_x86 *c = &cpu_data(cpu);
74 74
75 cpumask_t saved_mask; 75 cpumask_t saved_mask;
76 cpumask_of_cpu_ptr(new_mask, cpu);
76 int retval; 77 int retval;
77 unsigned int eax, ebx, ecx, edx; 78 unsigned int eax, ebx, ecx, edx;
78 unsigned int edx_part; 79 unsigned int edx_part;
@@ -91,7 +92,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
91 92
92 /* Make sure we are running on right CPU */ 93 /* Make sure we are running on right CPU */
93 saved_mask = current->cpus_allowed; 94 saved_mask = current->cpus_allowed;
94 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 95 retval = set_cpus_allowed_ptr(current, new_mask);
95 if (retval) 96 if (retval)
96 return -1; 97 return -1;
97 98
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index a3ddad18aaa3..fa2161d5003b 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -150,6 +150,10 @@ static int __init acpi_sleep_setup(char *str)
150 acpi_realmode_flags |= 2; 150 acpi_realmode_flags |= 2;
151 if (strncmp(str, "s3_beep", 7) == 0) 151 if (strncmp(str, "s3_beep", 7) == 0)
152 acpi_realmode_flags |= 4; 152 acpi_realmode_flags |= 4;
153#ifdef CONFIG_HIBERNATION
154 if (strncmp(str, "s4_nohwsig", 10) == 0)
155 acpi_no_s4_hw_signature();
156#endif
153 if (strncmp(str, "old_ordering", 12) == 0) 157 if (strncmp(str, "old_ordering", 12) == 0)
154 acpi_old_suspend_ordering(); 158 acpi_old_suspend_ordering();
155 str = strchr(str, ','); 159 str = strchr(str, ',');
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index bf9b441331e9..9ee24e6bc4b0 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -219,7 +219,6 @@
219#include <linux/time.h> 219#include <linux/time.h>
220#include <linux/sched.h> 220#include <linux/sched.h>
221#include <linux/pm.h> 221#include <linux/pm.h>
222#include <linux/pm_legacy.h>
223#include <linux/capability.h> 222#include <linux/capability.h>
224#include <linux/device.h> 223#include <linux/device.h>
225#include <linux/kernel.h> 224#include <linux/kernel.h>
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index b0c8208df9fa..ff2fff56f0a8 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -200,10 +200,12 @@ static void drv_read(struct drv_cmd *cmd)
200static void drv_write(struct drv_cmd *cmd) 200static void drv_write(struct drv_cmd *cmd)
201{ 201{
202 cpumask_t saved_mask = current->cpus_allowed; 202 cpumask_t saved_mask = current->cpus_allowed;
203 cpumask_of_cpu_ptr_declare(cpu_mask);
203 unsigned int i; 204 unsigned int i;
204 205
205 for_each_cpu_mask(i, cmd->mask) { 206 for_each_cpu_mask_nr(i, cmd->mask) {
206 set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); 207 cpumask_of_cpu_ptr_next(cpu_mask, i);
208 set_cpus_allowed_ptr(current, cpu_mask);
207 do_drv_write(cmd); 209 do_drv_write(cmd);
208 } 210 }
209 211
@@ -267,11 +269,12 @@ static unsigned int get_measured_perf(unsigned int cpu)
267 } aperf_cur, mperf_cur; 269 } aperf_cur, mperf_cur;
268 270
269 cpumask_t saved_mask; 271 cpumask_t saved_mask;
272 cpumask_of_cpu_ptr(cpu_mask, cpu);
270 unsigned int perf_percent; 273 unsigned int perf_percent;
271 unsigned int retval; 274 unsigned int retval;
272 275
273 saved_mask = current->cpus_allowed; 276 saved_mask = current->cpus_allowed;
274 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 277 set_cpus_allowed_ptr(current, cpu_mask);
275 if (get_cpu() != cpu) { 278 if (get_cpu() != cpu) {
276 /* We were not able to run on requested processor */ 279 /* We were not able to run on requested processor */
277 put_cpu(); 280 put_cpu();
@@ -337,6 +340,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
337 340
338static unsigned int get_cur_freq_on_cpu(unsigned int cpu) 341static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
339{ 342{
343 cpumask_of_cpu_ptr(cpu_mask, cpu);
340 struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); 344 struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
341 unsigned int freq; 345 unsigned int freq;
342 unsigned int cached_freq; 346 unsigned int cached_freq;
@@ -349,7 +353,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
349 } 353 }
350 354
351 cached_freq = data->freq_table[data->acpi_data->state].frequency; 355 cached_freq = data->freq_table[data->acpi_data->state].frequency;
352 freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); 356 freq = extract_freq(get_cur_val(cpu_mask), data);
353 if (freq != cached_freq) { 357 if (freq != cached_freq) {
354 /* 358 /*
355 * The dreaded BIOS frequency change behind our back. 359 * The dreaded BIOS frequency change behind our back.
@@ -451,7 +455,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
451 455
452 freqs.old = perf->states[perf->state].core_frequency * 1000; 456 freqs.old = perf->states[perf->state].core_frequency * 1000;
453 freqs.new = data->freq_table[next_state].frequency; 457 freqs.new = data->freq_table[next_state].frequency;
454 for_each_cpu_mask(i, cmd.mask) { 458 for_each_cpu_mask_nr(i, cmd.mask) {
455 freqs.cpu = i; 459 freqs.cpu = i;
456 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 460 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
457 } 461 }
@@ -466,7 +470,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
466 } 470 }
467 } 471 }
468 472
469 for_each_cpu_mask(i, cmd.mask) { 473 for_each_cpu_mask_nr(i, cmd.mask) {
470 freqs.cpu = i; 474 freqs.cpu = i;
471 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 475 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
472 } 476 }
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 199e4e05e5dc..f1685fb91fbd 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
122 return 0; 122 return 0;
123 123
124 /* notifiers */ 124 /* notifiers */
125 for_each_cpu_mask(i, policy->cpus) { 125 for_each_cpu_mask_nr(i, policy->cpus) {
126 freqs.cpu = i; 126 freqs.cpu = i;
127 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 127 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
128 } 128 }
@@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
130 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software 130 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
131 * Developer's Manual, Volume 3 131 * Developer's Manual, Volume 3
132 */ 132 */
133 for_each_cpu_mask(i, policy->cpus) 133 for_each_cpu_mask_nr(i, policy->cpus)
134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); 134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
135 135
136 /* notifiers */ 136 /* notifiers */
137 for_each_cpu_mask(i, policy->cpus) { 137 for_each_cpu_mask_nr(i, policy->cpus) {
138 freqs.cpu = i; 138 freqs.cpu = i;
139 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 139 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
140 } 140 }
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 206791eb46e3..53c7b6936973 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -479,11 +479,12 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
479static int check_supported_cpu(unsigned int cpu) 479static int check_supported_cpu(unsigned int cpu)
480{ 480{
481 cpumask_t oldmask; 481 cpumask_t oldmask;
482 cpumask_of_cpu_ptr(cpu_mask, cpu);
482 u32 eax, ebx, ecx, edx; 483 u32 eax, ebx, ecx, edx;
483 unsigned int rc = 0; 484 unsigned int rc = 0;
484 485
485 oldmask = current->cpus_allowed; 486 oldmask = current->cpus_allowed;
486 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 487 set_cpus_allowed_ptr(current, cpu_mask);
487 488
488 if (smp_processor_id() != cpu) { 489 if (smp_processor_id() != cpu) {
489 printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); 490 printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
@@ -966,7 +967,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i
966 freqs.old = find_khz_freq_from_fid(data->currfid); 967 freqs.old = find_khz_freq_from_fid(data->currfid);
967 freqs.new = find_khz_freq_from_fid(fid); 968 freqs.new = find_khz_freq_from_fid(fid);
968 969
969 for_each_cpu_mask(i, *(data->available_cores)) { 970 for_each_cpu_mask_nr(i, *(data->available_cores)) {
970 freqs.cpu = i; 971 freqs.cpu = i;
971 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 972 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
972 } 973 }
@@ -974,7 +975,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i
974 res = transition_fid_vid(data, fid, vid); 975 res = transition_fid_vid(data, fid, vid);
975 freqs.new = find_khz_freq_from_fid(data->currfid); 976 freqs.new = find_khz_freq_from_fid(data->currfid);
976 977
977 for_each_cpu_mask(i, *(data->available_cores)) { 978 for_each_cpu_mask_nr(i, *(data->available_cores)) {
978 freqs.cpu = i; 979 freqs.cpu = i;
979 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 980 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
980 } 981 }
@@ -997,7 +998,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
997 freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); 998 freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
998 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); 999 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
999 1000
1000 for_each_cpu_mask(i, *(data->available_cores)) { 1001 for_each_cpu_mask_nr(i, *(data->available_cores)) {
1001 freqs.cpu = i; 1002 freqs.cpu = i;
1002 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 1003 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1003 } 1004 }
@@ -1005,7 +1006,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
1005 res = transition_pstate(data, pstate); 1006 res = transition_pstate(data, pstate);
1006 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); 1007 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1007 1008
1008 for_each_cpu_mask(i, *(data->available_cores)) { 1009 for_each_cpu_mask_nr(i, *(data->available_cores)) {
1009 freqs.cpu = i; 1010 freqs.cpu = i;
1010 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 1011 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1011 } 1012 }
@@ -1016,6 +1017,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
1016static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) 1017static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
1017{ 1018{
1018 cpumask_t oldmask; 1019 cpumask_t oldmask;
1020 cpumask_of_cpu_ptr(cpu_mask, pol->cpu);
1019 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 1021 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1020 u32 checkfid; 1022 u32 checkfid;
1021 u32 checkvid; 1023 u32 checkvid;
@@ -1030,7 +1032,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
1030 1032
1031 /* only run on specific CPU from here on */ 1033 /* only run on specific CPU from here on */
1032 oldmask = current->cpus_allowed; 1034 oldmask = current->cpus_allowed;
1033 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); 1035 set_cpus_allowed_ptr(current, cpu_mask);
1034 1036
1035 if (smp_processor_id() != pol->cpu) { 1037 if (smp_processor_id() != pol->cpu) {
1036 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); 1038 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1105,6 +1107,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1105{ 1107{
1106 struct powernow_k8_data *data; 1108 struct powernow_k8_data *data;
1107 cpumask_t oldmask; 1109 cpumask_t oldmask;
1110 cpumask_of_cpu_ptr_declare(newmask);
1108 int rc; 1111 int rc;
1109 1112
1110 if (!cpu_online(pol->cpu)) 1113 if (!cpu_online(pol->cpu))
@@ -1156,7 +1159,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1156 1159
1157 /* only run on specific CPU from here on */ 1160 /* only run on specific CPU from here on */
1158 oldmask = current->cpus_allowed; 1161 oldmask = current->cpus_allowed;
1159 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); 1162 cpumask_of_cpu_ptr_next(newmask, pol->cpu);
1163 set_cpus_allowed_ptr(current, newmask);
1160 1164
1161 if (smp_processor_id() != pol->cpu) { 1165 if (smp_processor_id() != pol->cpu) {
1162 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); 1166 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1178,7 +1182,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1178 set_cpus_allowed_ptr(current, &oldmask); 1182 set_cpus_allowed_ptr(current, &oldmask);
1179 1183
1180 if (cpu_family == CPU_HW_PSTATE) 1184 if (cpu_family == CPU_HW_PSTATE)
1181 pol->cpus = cpumask_of_cpu(pol->cpu); 1185 pol->cpus = *newmask;
1182 else 1186 else
1183 pol->cpus = per_cpu(cpu_core_map, pol->cpu); 1187 pol->cpus = per_cpu(cpu_core_map, pol->cpu);
1184 data->available_cores = &(pol->cpus); 1188 data->available_cores = &(pol->cpus);
@@ -1244,6 +1248,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
1244{ 1248{
1245 struct powernow_k8_data *data; 1249 struct powernow_k8_data *data;
1246 cpumask_t oldmask = current->cpus_allowed; 1250 cpumask_t oldmask = current->cpus_allowed;
1251 cpumask_of_cpu_ptr(newmask, cpu);
1247 unsigned int khz = 0; 1252 unsigned int khz = 0;
1248 unsigned int first; 1253 unsigned int first;
1249 1254
@@ -1253,7 +1258,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
1253 if (!data) 1258 if (!data)
1254 return -EINVAL; 1259 return -EINVAL;
1255 1260
1256 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 1261 set_cpus_allowed_ptr(current, newmask);
1257 if (smp_processor_id() != cpu) { 1262 if (smp_processor_id() != cpu) {
1258 printk(KERN_ERR PFX 1263 printk(KERN_ERR PFX
1259 "limiting to CPU %d failed in powernowk8_get\n", cpu); 1264 "limiting to CPU %d failed in powernowk8_get\n", cpu);
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index 908dd347c67e..ca2ac13b7af2 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -28,7 +28,8 @@
28#define PFX "speedstep-centrino: " 28#define PFX "speedstep-centrino: "
29#define MAINTAINER "cpufreq@lists.linux.org.uk" 29#define MAINTAINER "cpufreq@lists.linux.org.uk"
30 30
31#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) 31#define dprintk(msg...) \
32 cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
32 33
33#define INTEL_MSR_RANGE (0xffff) 34#define INTEL_MSR_RANGE (0xffff)
34 35
@@ -66,11 +67,12 @@ struct cpu_model
66 67
67 struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */ 68 struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */
68}; 69};
69static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x); 70static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
71 const struct cpu_id *x);
70 72
71/* Operating points for current CPU */ 73/* Operating points for current CPU */
72static struct cpu_model *centrino_model[NR_CPUS]; 74static DEFINE_PER_CPU(struct cpu_model *, centrino_model);
73static const struct cpu_id *centrino_cpu[NR_CPUS]; 75static DEFINE_PER_CPU(const struct cpu_id *, centrino_cpu);
74 76
75static struct cpufreq_driver centrino_driver; 77static struct cpufreq_driver centrino_driver;
76 78
@@ -255,7 +257,7 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy)
255 return -ENOENT; 257 return -ENOENT;
256 } 258 }
257 259
258 centrino_model[policy->cpu] = model; 260 per_cpu(centrino_model, policy->cpu) = model;
259 261
260 dprintk("found \"%s\": max frequency: %dkHz\n", 262 dprintk("found \"%s\": max frequency: %dkHz\n",
261 model->model_name, model->max_freq); 263 model->model_name, model->max_freq);
@@ -264,10 +266,14 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy)
264} 266}
265 267
266#else 268#else
267static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) { return -ENODEV; } 269static inline int centrino_cpu_init_table(struct cpufreq_policy *policy)
270{
271 return -ENODEV;
272}
268#endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */ 273#endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */
269 274
270static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x) 275static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
276 const struct cpu_id *x)
271{ 277{
272 if ((c->x86 == x->x86) && 278 if ((c->x86 == x->x86) &&
273 (c->x86_model == x->x86_model) && 279 (c->x86_model == x->x86_model) &&
@@ -286,23 +292,28 @@ static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
286 * for centrino, as some DSDTs are buggy. 292 * for centrino, as some DSDTs are buggy.
287 * Ideally, this can be done using the acpi_data structure. 293 * Ideally, this can be done using the acpi_data structure.
288 */ 294 */
289 if ((centrino_cpu[cpu] == &cpu_ids[CPU_BANIAS]) || 295 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) ||
290 (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_A1]) || 296 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) ||
291 (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_B0])) { 297 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) {
292 msr = (msr >> 8) & 0xff; 298 msr = (msr >> 8) & 0xff;
293 return msr * 100000; 299 return msr * 100000;
294 } 300 }
295 301
296 if ((!centrino_model[cpu]) || (!centrino_model[cpu]->op_points)) 302 if ((!per_cpu(centrino_model, cpu)) ||
303 (!per_cpu(centrino_model, cpu)->op_points))
297 return 0; 304 return 0;
298 305
299 msr &= 0xffff; 306 msr &= 0xffff;
300 for (i=0;centrino_model[cpu]->op_points[i].frequency != CPUFREQ_TABLE_END; i++) { 307 for (i = 0;
301 if (msr == centrino_model[cpu]->op_points[i].index) 308 per_cpu(centrino_model, cpu)->op_points[i].frequency
302 return centrino_model[cpu]->op_points[i].frequency; 309 != CPUFREQ_TABLE_END;
310 i++) {
311 if (msr == per_cpu(centrino_model, cpu)->op_points[i].index)
312 return per_cpu(centrino_model, cpu)->
313 op_points[i].frequency;
303 } 314 }
304 if (failsafe) 315 if (failsafe)
305 return centrino_model[cpu]->op_points[i-1].frequency; 316 return per_cpu(centrino_model, cpu)->op_points[i-1].frequency;
306 else 317 else
307 return 0; 318 return 0;
308} 319}
@@ -313,9 +324,10 @@ static unsigned int get_cur_freq(unsigned int cpu)
313 unsigned l, h; 324 unsigned l, h;
314 unsigned clock_freq; 325 unsigned clock_freq;
315 cpumask_t saved_mask; 326 cpumask_t saved_mask;
327 cpumask_of_cpu_ptr(new_mask, cpu);
316 328
317 saved_mask = current->cpus_allowed; 329 saved_mask = current->cpus_allowed;
318 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 330 set_cpus_allowed_ptr(current, new_mask);
319 if (smp_processor_id() != cpu) 331 if (smp_processor_id() != cpu)
320 return 0; 332 return 0;
321 333
@@ -347,7 +359,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
347 int i; 359 int i;
348 360
349 /* Only Intel makes Enhanced Speedstep-capable CPUs */ 361 /* Only Intel makes Enhanced Speedstep-capable CPUs */
350 if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST)) 362 if (cpu->x86_vendor != X86_VENDOR_INTEL ||
363 !cpu_has(cpu, X86_FEATURE_EST))
351 return -ENODEV; 364 return -ENODEV;
352 365
353 if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) 366 if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
@@ -361,9 +374,9 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
361 break; 374 break;
362 375
363 if (i != N_IDS) 376 if (i != N_IDS)
364 centrino_cpu[policy->cpu] = &cpu_ids[i]; 377 per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i];
365 378
366 if (!centrino_cpu[policy->cpu]) { 379 if (!per_cpu(centrino_cpu, policy->cpu)) {
367 dprintk("found unsupported CPU with " 380 dprintk("found unsupported CPU with "
368 "Enhanced SpeedStep: send /proc/cpuinfo to " 381 "Enhanced SpeedStep: send /proc/cpuinfo to "
369 MAINTAINER "\n"); 382 MAINTAINER "\n");
@@ -386,23 +399,26 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
386 /* check to see if it stuck */ 399 /* check to see if it stuck */
387 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 400 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
388 if (!(l & (1<<16))) { 401 if (!(l & (1<<16))) {
389 printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n"); 402 printk(KERN_INFO PFX
403 "couldn't enable Enhanced SpeedStep\n");
390 return -ENODEV; 404 return -ENODEV;
391 } 405 }
392 } 406 }
393 407
394 freq = get_cur_freq(policy->cpu); 408 freq = get_cur_freq(policy->cpu);
395 409 policy->cpuinfo.transition_latency = 10000;
396 policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */ 410 /* 10uS transition latency */
397 policy->cur = freq; 411 policy->cur = freq;
398 412
399 dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur); 413 dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur);
400 414
401 ret = cpufreq_frequency_table_cpuinfo(policy, centrino_model[policy->cpu]->op_points); 415 ret = cpufreq_frequency_table_cpuinfo(policy,
416 per_cpu(centrino_model, policy->cpu)->op_points);
402 if (ret) 417 if (ret)
403 return (ret); 418 return (ret);
404 419
405 cpufreq_frequency_table_get_attr(centrino_model[policy->cpu]->op_points, policy->cpu); 420 cpufreq_frequency_table_get_attr(
421 per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu);
406 422
407 return 0; 423 return 0;
408} 424}
@@ -411,12 +427,12 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
411{ 427{
412 unsigned int cpu = policy->cpu; 428 unsigned int cpu = policy->cpu;
413 429
414 if (!centrino_model[cpu]) 430 if (!per_cpu(centrino_model, cpu))
415 return -ENODEV; 431 return -ENODEV;
416 432
417 cpufreq_frequency_table_put_attr(cpu); 433 cpufreq_frequency_table_put_attr(cpu);
418 434
419 centrino_model[cpu] = NULL; 435 per_cpu(centrino_model, cpu) = NULL;
420 436
421 return 0; 437 return 0;
422} 438}
@@ -430,17 +446,26 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
430 */ 446 */
431static int centrino_verify (struct cpufreq_policy *policy) 447static int centrino_verify (struct cpufreq_policy *policy)
432{ 448{
433 return cpufreq_frequency_table_verify(policy, centrino_model[policy->cpu]->op_points); 449 return cpufreq_frequency_table_verify(policy,
450 per_cpu(centrino_model, policy->cpu)->op_points);
434} 451}
435 452
436/** 453/**
437 * centrino_setpolicy - set a new CPUFreq policy 454 * centrino_setpolicy - set a new CPUFreq policy
438 * @policy: new policy 455 * @policy: new policy
439 * @target_freq: the target frequency 456 * @target_freq: the target frequency
440 * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) 457 * @relation: how that frequency relates to achieved frequency
458 * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
441 * 459 *
442 * Sets a new CPUFreq policy. 460 * Sets a new CPUFreq policy.
443 */ 461 */
462struct allmasks {
463 cpumask_t online_policy_cpus;
464 cpumask_t saved_mask;
465 cpumask_t set_mask;
466 cpumask_t covered_cpus;
467};
468
444static int centrino_target (struct cpufreq_policy *policy, 469static int centrino_target (struct cpufreq_policy *policy,
445 unsigned int target_freq, 470 unsigned int target_freq,
446 unsigned int relation) 471 unsigned int relation)
@@ -448,48 +473,55 @@ static int centrino_target (struct cpufreq_policy *policy,
448 unsigned int newstate = 0; 473 unsigned int newstate = 0;
449 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; 474 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
450 struct cpufreq_freqs freqs; 475 struct cpufreq_freqs freqs;
451 cpumask_t online_policy_cpus;
452 cpumask_t saved_mask;
453 cpumask_t set_mask;
454 cpumask_t covered_cpus;
455 int retval = 0; 476 int retval = 0;
456 unsigned int j, k, first_cpu, tmp; 477 unsigned int j, k, first_cpu, tmp;
457 478 CPUMASK_ALLOC(allmasks);
458 if (unlikely(centrino_model[cpu] == NULL)) 479 CPUMASK_PTR(online_policy_cpus, allmasks);
459 return -ENODEV; 480 CPUMASK_PTR(saved_mask, allmasks);
481 CPUMASK_PTR(set_mask, allmasks);
482 CPUMASK_PTR(covered_cpus, allmasks);
483
484 if (unlikely(allmasks == NULL))
485 return -ENOMEM;
486
487 if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
488 retval = -ENODEV;
489 goto out;
490 }
460 491
461 if (unlikely(cpufreq_frequency_table_target(policy, 492 if (unlikely(cpufreq_frequency_table_target(policy,
462 centrino_model[cpu]->op_points, 493 per_cpu(centrino_model, cpu)->op_points,
463 target_freq, 494 target_freq,
464 relation, 495 relation,
465 &newstate))) { 496 &newstate))) {
466 return -EINVAL; 497 retval = -EINVAL;
498 goto out;
467 } 499 }
468 500
469#ifdef CONFIG_HOTPLUG_CPU 501#ifdef CONFIG_HOTPLUG_CPU
470 /* cpufreq holds the hotplug lock, so we are safe from here on */ 502 /* cpufreq holds the hotplug lock, so we are safe from here on */
471 cpus_and(online_policy_cpus, cpu_online_map, policy->cpus); 503 cpus_and(*online_policy_cpus, cpu_online_map, policy->cpus);
472#else 504#else
473 online_policy_cpus = policy->cpus; 505 *online_policy_cpus = policy->cpus;
474#endif 506#endif
475 507
476 saved_mask = current->cpus_allowed; 508 *saved_mask = current->cpus_allowed;
477 first_cpu = 1; 509 first_cpu = 1;
478 cpus_clear(covered_cpus); 510 cpus_clear(*covered_cpus);
479 for_each_cpu_mask(j, online_policy_cpus) { 511 for_each_cpu_mask_nr(j, *online_policy_cpus) {
480 /* 512 /*
481 * Support for SMP systems. 513 * Support for SMP systems.
482 * Make sure we are running on CPU that wants to change freq 514 * Make sure we are running on CPU that wants to change freq
483 */ 515 */
484 cpus_clear(set_mask); 516 cpus_clear(*set_mask);
485 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 517 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
486 cpus_or(set_mask, set_mask, online_policy_cpus); 518 cpus_or(*set_mask, *set_mask, *online_policy_cpus);
487 else 519 else
488 cpu_set(j, set_mask); 520 cpu_set(j, *set_mask);
489 521
490 set_cpus_allowed_ptr(current, &set_mask); 522 set_cpus_allowed_ptr(current, set_mask);
491 preempt_disable(); 523 preempt_disable();
492 if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) { 524 if (unlikely(!cpu_isset(smp_processor_id(), *set_mask))) {
493 dprintk("couldn't limit to CPUs in this domain\n"); 525 dprintk("couldn't limit to CPUs in this domain\n");
494 retval = -EAGAIN; 526 retval = -EAGAIN;
495 if (first_cpu) { 527 if (first_cpu) {
@@ -500,7 +532,7 @@ static int centrino_target (struct cpufreq_policy *policy,
500 break; 532 break;
501 } 533 }
502 534
503 msr = centrino_model[cpu]->op_points[newstate].index; 535 msr = per_cpu(centrino_model, cpu)->op_points[newstate].index;
504 536
505 if (first_cpu) { 537 if (first_cpu) {
506 rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); 538 rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
@@ -517,7 +549,7 @@ static int centrino_target (struct cpufreq_policy *policy,
517 dprintk("target=%dkHz old=%d new=%d msr=%04x\n", 549 dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
518 target_freq, freqs.old, freqs.new, msr); 550 target_freq, freqs.old, freqs.new, msr);
519 551
520 for_each_cpu_mask(k, online_policy_cpus) { 552 for_each_cpu_mask_nr(k, *online_policy_cpus) {
521 freqs.cpu = k; 553 freqs.cpu = k;
522 cpufreq_notify_transition(&freqs, 554 cpufreq_notify_transition(&freqs,
523 CPUFREQ_PRECHANGE); 555 CPUFREQ_PRECHANGE);
@@ -536,11 +568,11 @@ static int centrino_target (struct cpufreq_policy *policy,
536 break; 568 break;
537 } 569 }
538 570
539 cpu_set(j, covered_cpus); 571 cpu_set(j, *covered_cpus);
540 preempt_enable(); 572 preempt_enable();
541 } 573 }
542 574
543 for_each_cpu_mask(k, online_policy_cpus) { 575 for_each_cpu_mask_nr(k, *online_policy_cpus) {
544 freqs.cpu = k; 576 freqs.cpu = k;
545 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 577 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
546 } 578 }
@@ -553,10 +585,12 @@ static int centrino_target (struct cpufreq_policy *policy,
553 * Best effort undo.. 585 * Best effort undo..
554 */ 586 */
555 587
556 if (!cpus_empty(covered_cpus)) { 588 if (!cpus_empty(*covered_cpus)) {
557 for_each_cpu_mask(j, covered_cpus) { 589 cpumask_of_cpu_ptr_declare(new_mask);
558 set_cpus_allowed_ptr(current, 590
559 &cpumask_of_cpu(j)); 591 for_each_cpu_mask_nr(j, *covered_cpus) {
592 cpumask_of_cpu_ptr_next(new_mask, j);
593 set_cpus_allowed_ptr(current, new_mask);
560 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); 594 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
561 } 595 }
562 } 596 }
@@ -564,19 +598,22 @@ static int centrino_target (struct cpufreq_policy *policy,
564 tmp = freqs.new; 598 tmp = freqs.new;
565 freqs.new = freqs.old; 599 freqs.new = freqs.old;
566 freqs.old = tmp; 600 freqs.old = tmp;
567 for_each_cpu_mask(j, online_policy_cpus) { 601 for_each_cpu_mask_nr(j, *online_policy_cpus) {
568 freqs.cpu = j; 602 freqs.cpu = j;
569 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 603 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
570 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 604 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
571 } 605 }
572 } 606 }
573 set_cpus_allowed_ptr(current, &saved_mask); 607 set_cpus_allowed_ptr(current, saved_mask);
574 return 0; 608 retval = 0;
609 goto out;
575 610
576migrate_end: 611migrate_end:
577 preempt_enable(); 612 preempt_enable();
578 set_cpus_allowed_ptr(current, &saved_mask); 613 set_cpus_allowed_ptr(current, saved_mask);
579 return 0; 614out:
615 CPUMASK_FREE(allmasks);
616 return retval;
580} 617}
581 618
582static struct freq_attr* centrino_attr[] = { 619static struct freq_attr* centrino_attr[] = {
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 1b50244b1fdf..2f3728dc24f6 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -244,7 +244,8 @@ static unsigned int _speedstep_get(const cpumask_t *cpus)
244 244
245static unsigned int speedstep_get(unsigned int cpu) 245static unsigned int speedstep_get(unsigned int cpu)
246{ 246{
247 return _speedstep_get(&cpumask_of_cpu(cpu)); 247 cpumask_of_cpu_ptr(newmask, cpu);
248 return _speedstep_get(newmask);
248} 249}
249 250
250/** 251/**
@@ -279,7 +280,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
279 280
280 cpus_allowed = current->cpus_allowed; 281 cpus_allowed = current->cpus_allowed;
281 282
282 for_each_cpu_mask(i, policy->cpus) { 283 for_each_cpu_mask_nr(i, policy->cpus) {
283 freqs.cpu = i; 284 freqs.cpu = i;
284 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 285 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
285 } 286 }
@@ -292,7 +293,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
292 /* allow to be run on all CPUs */ 293 /* allow to be run on all CPUs */
293 set_cpus_allowed_ptr(current, &cpus_allowed); 294 set_cpus_allowed_ptr(current, &cpus_allowed);
294 295
295 for_each_cpu_mask(i, policy->cpus) { 296 for_each_cpu_mask_nr(i, policy->cpus) {
296 freqs.cpu = i; 297 freqs.cpu = i;
297 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 298 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
298 } 299 }
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index ff517f0b8cc4..650d40f7912b 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -489,7 +489,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
489 int sibling; 489 int sibling;
490 490
491 this_leaf = CPUID4_INFO_IDX(cpu, index); 491 this_leaf = CPUID4_INFO_IDX(cpu, index);
492 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) { 492 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
493 sibling_leaf = CPUID4_INFO_IDX(sibling, index); 493 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
494 cpu_clear(cpu, sibling_leaf->shared_cpu_map); 494 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
495 } 495 }
@@ -516,6 +516,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
516 unsigned long j; 516 unsigned long j;
517 int retval; 517 int retval;
518 cpumask_t oldmask; 518 cpumask_t oldmask;
519 cpumask_of_cpu_ptr(newmask, cpu);
519 520
520 if (num_cache_leaves == 0) 521 if (num_cache_leaves == 0)
521 return -ENOENT; 522 return -ENOENT;
@@ -526,7 +527,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
526 return -ENOMEM; 527 return -ENOMEM;
527 528
528 oldmask = current->cpus_allowed; 529 oldmask = current->cpus_allowed;
529 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 530 retval = set_cpus_allowed_ptr(current, newmask);
530 if (retval) 531 if (retval)
531 goto out; 532 goto out;
532 533
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 9ab65be82427..65a339678ece 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -580,7 +580,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
580 char __user *buf = ubuf; 580 char __user *buf = ubuf;
581 int i, err; 581 int i, err;
582 582
583 cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL); 583 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
584 if (!cpu_tsc) 584 if (!cpu_tsc)
585 return -ENOMEM; 585 return -ENOMEM;
586 586
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 7c9a813e1193..88736cadbaa6 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -527,7 +527,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
527 if (err) 527 if (err)
528 goto out_free; 528 goto out_free;
529 529
530 for_each_cpu_mask(i, b->cpus) { 530 for_each_cpu_mask_nr(i, b->cpus) {
531 if (i == cpu) 531 if (i == cpu)
532 continue; 532 continue;
533 533
@@ -617,7 +617,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
617#endif 617#endif
618 618
619 /* remove all sibling symlinks before unregistering */ 619 /* remove all sibling symlinks before unregistering */
620 for_each_cpu_mask(i, b->cpus) { 620 for_each_cpu_mask_nr(i, b->cpus) {
621 if (i == cpu) 621 if (i == cpu)
622 continue; 622 continue;
623 623
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 0d0d9057e7c0..a26c480b9491 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -160,7 +160,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
160{ 160{
161 if (*pos == 0) /* just in case, cpu 0 is not the first */ 161 if (*pos == 0) /* just in case, cpu 0 is not the first */
162 *pos = first_cpu(cpu_online_map); 162 *pos = first_cpu(cpu_online_map);
163 if ((*pos) < NR_CPUS && cpu_online(*pos)) 163 if ((*pos) < nr_cpu_ids && cpu_online(*pos))
164 return &cpu_data(*pos); 164 return &cpu_data(*pos);
165 return NULL; 165 return NULL;
166} 166}
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index cdfd94cc6b14..109792bc7cfa 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -54,6 +54,16 @@
54#include <asm/ftrace.h> 54#include <asm/ftrace.h>
55#include <asm/irq_vectors.h> 55#include <asm/irq_vectors.h>
56 56
57/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
58#include <linux/elf-em.h>
59#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
60#define __AUDIT_ARCH_LE 0x40000000
61
62#ifndef CONFIG_AUDITSYSCALL
63#define sysenter_audit syscall_trace_entry
64#define sysexit_audit syscall_exit_work
65#endif
66
57/* 67/*
58 * We use macros for low-level operations which need to be overridden 68 * We use macros for low-level operations which need to be overridden
59 * for paravirtualization. The following will never clobber any registers: 69 * for paravirtualization. The following will never clobber any registers:
@@ -333,7 +343,8 @@ sysenter_past_esp:
333 343
334 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ 344 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
335 testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) 345 testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
336 jnz syscall_trace_entry 346 jnz sysenter_audit
347sysenter_do_call:
337 cmpl $(nr_syscalls), %eax 348 cmpl $(nr_syscalls), %eax
338 jae syscall_badsys 349 jae syscall_badsys
339 call *sys_call_table(,%eax,4) 350 call *sys_call_table(,%eax,4)
@@ -343,7 +354,8 @@ sysenter_past_esp:
343 TRACE_IRQS_OFF 354 TRACE_IRQS_OFF
344 movl TI_flags(%ebp), %ecx 355 movl TI_flags(%ebp), %ecx
345 testw $_TIF_ALLWORK_MASK, %cx 356 testw $_TIF_ALLWORK_MASK, %cx
346 jne syscall_exit_work 357 jne sysexit_audit
358sysenter_exit:
347/* if something modifies registers it must also disable sysexit */ 359/* if something modifies registers it must also disable sysexit */
348 movl PT_EIP(%esp), %edx 360 movl PT_EIP(%esp), %edx
349 movl PT_OLDESP(%esp), %ecx 361 movl PT_OLDESP(%esp), %ecx
@@ -351,6 +363,45 @@ sysenter_past_esp:
351 TRACE_IRQS_ON 363 TRACE_IRQS_ON
3521: mov PT_FS(%esp), %fs 3641: mov PT_FS(%esp), %fs
353 ENABLE_INTERRUPTS_SYSEXIT 365 ENABLE_INTERRUPTS_SYSEXIT
366
367#ifdef CONFIG_AUDITSYSCALL
368sysenter_audit:
369 testw $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
370 jnz syscall_trace_entry
371 addl $4,%esp
372 CFI_ADJUST_CFA_OFFSET -4
373 /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
374 /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
375 /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
376 movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
377 movl %eax,%edx /* 2nd arg: syscall number */
378 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
379 call audit_syscall_entry
380 pushl %ebx
381 CFI_ADJUST_CFA_OFFSET 4
382 movl PT_EAX(%esp),%eax /* reload syscall number */
383 jmp sysenter_do_call
384
385sysexit_audit:
386 testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx
387 jne syscall_exit_work
388 TRACE_IRQS_ON
389 ENABLE_INTERRUPTS(CLBR_ANY)
390 movl %eax,%edx /* second arg, syscall return value */
391 cmpl $0,%eax /* is it < 0? */
392 setl %al /* 1 if so, 0 if not */
393 movzbl %al,%eax /* zero-extend that */
394 inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
395 call audit_syscall_exit
396 DISABLE_INTERRUPTS(CLBR_ANY)
397 TRACE_IRQS_OFF
398 movl TI_flags(%ebp), %ecx
399 testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx
400 jne syscall_exit_work
401 movl PT_EAX(%esp),%eax /* reload syscall return value */
402 jmp sysenter_exit
403#endif
404
354 CFI_ENDPROC 405 CFI_ENDPROC
355.pushsection .fixup,"ax" 406.pushsection .fixup,"ax"
3562: movl $0,PT_FS(%esp) 4072: movl $0,PT_FS(%esp)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 8410e26f4183..89434d439605 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -53,6 +53,12 @@
53#include <asm/paravirt.h> 53#include <asm/paravirt.h>
54#include <asm/ftrace.h> 54#include <asm/ftrace.h>
55 55
56/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57#include <linux/elf-em.h>
58#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59#define __AUDIT_ARCH_64BIT 0x80000000
60#define __AUDIT_ARCH_LE 0x40000000
61
56 .code64 62 .code64
57 63
58#ifdef CONFIG_FTRACE 64#ifdef CONFIG_FTRACE
@@ -351,6 +357,7 @@ ENTRY(system_call_after_swapgs)
351 GET_THREAD_INFO(%rcx) 357 GET_THREAD_INFO(%rcx)
352 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx) 358 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
353 jnz tracesys 359 jnz tracesys
360system_call_fastpath:
354 cmpq $__NR_syscall_max,%rax 361 cmpq $__NR_syscall_max,%rax
355 ja badsys 362 ja badsys
356 movq %r10,%rcx 363 movq %r10,%rcx
@@ -402,16 +409,16 @@ sysret_careful:
402sysret_signal: 409sysret_signal:
403 TRACE_IRQS_ON 410 TRACE_IRQS_ON
404 ENABLE_INTERRUPTS(CLBR_NONE) 411 ENABLE_INTERRUPTS(CLBR_NONE)
405 testl $_TIF_DO_NOTIFY_MASK,%edx 412#ifdef CONFIG_AUDITSYSCALL
406 jz 1f 413 bt $TIF_SYSCALL_AUDIT,%edx
407 414 jc sysret_audit
408 /* Really a signal */ 415#endif
409 /* edx: work flags (arg3) */ 416 /* edx: work flags (arg3) */
410 leaq do_notify_resume(%rip),%rax 417 leaq do_notify_resume(%rip),%rax
411 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 418 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
412 xorl %esi,%esi # oldset -> arg2 419 xorl %esi,%esi # oldset -> arg2
413 call ptregscall_common 420 call ptregscall_common
4141: movl $_TIF_WORK_MASK,%edi 421 movl $_TIF_WORK_MASK,%edi
415 /* Use IRET because user could have changed frame. This 422 /* Use IRET because user could have changed frame. This
416 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ 423 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
417 DISABLE_INTERRUPTS(CLBR_NONE) 424 DISABLE_INTERRUPTS(CLBR_NONE)
@@ -422,8 +429,45 @@ badsys:
422 movq $-ENOSYS,RAX-ARGOFFSET(%rsp) 429 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
423 jmp ret_from_sys_call 430 jmp ret_from_sys_call
424 431
432#ifdef CONFIG_AUDITSYSCALL
433 /*
434 * Fast path for syscall audit without full syscall trace.
435 * We just call audit_syscall_entry() directly, and then
436 * jump back to the normal fast path.
437 */
438auditsys:
439 movq %r10,%r9 /* 6th arg: 4th syscall arg */
440 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
441 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
442 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
443 movq %rax,%rsi /* 2nd arg: syscall number */
444 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
445 call audit_syscall_entry
446 LOAD_ARGS 0 /* reload call-clobbered registers */
447 jmp system_call_fastpath
448
449 /*
450 * Return fast path for syscall audit. Call audit_syscall_exit()
451 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
452 * masked off.
453 */
454sysret_audit:
455 movq %rax,%rsi /* second arg, syscall return value */
456 cmpq $0,%rax /* is it < 0? */
457 setl %al /* 1 if so, 0 if not */
458 movzbl %al,%edi /* zero-extend that into %edi */
459 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
460 call audit_syscall_exit
461 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
462 jmp sysret_check
463#endif /* CONFIG_AUDITSYSCALL */
464
425 /* Do syscall tracing */ 465 /* Do syscall tracing */
426tracesys: 466tracesys:
467#ifdef CONFIG_AUDITSYSCALL
468 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
469 jz auditsys
470#endif
427 SAVE_REST 471 SAVE_REST
428 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ 472 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
429 FIXUP_TOP_OF_STACK %rdi 473 FIXUP_TOP_OF_STACK %rdi
@@ -448,6 +492,7 @@ tracesys:
448 * Has correct top of stack, but partial stack frame. 492 * Has correct top of stack, but partial stack frame.
449 */ 493 */
450 .globl int_ret_from_sys_call 494 .globl int_ret_from_sys_call
495 .globl int_with_check
451int_ret_from_sys_call: 496int_ret_from_sys_call:
452 DISABLE_INTERRUPTS(CLBR_NONE) 497 DISABLE_INTERRUPTS(CLBR_NONE)
453 TRACE_IRQS_OFF 498 TRACE_IRQS_OFF
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index 1a9c68845ee8..786548a62d38 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -168,7 +168,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
168 * May as well be the first. 168 * May as well be the first.
169 */ 169 */
170 cpu = first_cpu(cpumask); 170 cpu = first_cpu(cpumask);
171 if ((unsigned)cpu < NR_CPUS) 171 if ((unsigned)cpu < nr_cpu_ids)
172 return per_cpu(x86_cpu_to_apicid, cpu); 172 return per_cpu(x86_cpu_to_apicid, cpu);
173 else 173 else
174 return BAD_APICID; 174 return BAD_APICID;
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index 3c3929340692..2cfcbded888a 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -98,7 +98,7 @@ static void uv_send_IPI_mask(cpumask_t mask, int vector)
98{ 98{
99 unsigned int cpu; 99 unsigned int cpu;
100 100
101 for (cpu = 0; cpu < NR_CPUS; ++cpu) 101 for_each_possible_cpu(cpu)
102 if (cpu_isset(cpu, mask)) 102 if (cpu_isset(cpu, mask))
103 uv_send_IPI_one(cpu, vector); 103 uv_send_IPI_one(cpu, vector);
104} 104}
@@ -132,7 +132,7 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
132 * May as well be the first. 132 * May as well be the first.
133 */ 133 */
134 cpu = first_cpu(cpumask); 134 cpu = first_cpu(cpumask);
135 if ((unsigned)cpu < NR_CPUS) 135 if ((unsigned)cpu < nr_cpu_ids)
136 return per_cpu(x86_cpu_to_apicid, cpu); 136 return per_cpu(x86_cpu_to_apicid, cpu);
137 else 137 else
138 return BAD_APICID; 138 return BAD_APICID;
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 0ea6a19bfdfe..ad2b15a1334d 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -468,7 +468,7 @@ void hpet_disable(void)
468#define RTC_NUM_INTS 1 468#define RTC_NUM_INTS 1
469 469
470static unsigned long hpet_rtc_flags; 470static unsigned long hpet_rtc_flags;
471static unsigned long hpet_prev_update_sec; 471static int hpet_prev_update_sec;
472static struct rtc_time hpet_alarm_time; 472static struct rtc_time hpet_alarm_time;
473static unsigned long hpet_pie_count; 473static unsigned long hpet_pie_count;
474static unsigned long hpet_t1_cmp; 474static unsigned long hpet_t1_cmp;
@@ -575,6 +575,9 @@ int hpet_set_rtc_irq_bit(unsigned long bit_mask)
575 575
576 hpet_rtc_flags |= bit_mask; 576 hpet_rtc_flags |= bit_mask;
577 577
578 if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE))
579 hpet_prev_update_sec = -1;
580
578 if (!oldbits) 581 if (!oldbits)
579 hpet_rtc_timer_init(); 582 hpet_rtc_timer_init();
580 583
@@ -652,7 +655,7 @@ static void hpet_rtc_timer_reinit(void)
652 if (hpet_rtc_flags & RTC_PIE) 655 if (hpet_rtc_flags & RTC_PIE)
653 hpet_pie_count += lost_ints; 656 hpet_pie_count += lost_ints;
654 if (printk_ratelimit()) 657 if (printk_ratelimit())
655 printk(KERN_WARNING "rtc: lost %d interrupts\n", 658 printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n",
656 lost_ints); 659 lost_ints);
657 } 660 }
658} 661}
@@ -670,7 +673,8 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
670 673
671 if (hpet_rtc_flags & RTC_UIE && 674 if (hpet_rtc_flags & RTC_UIE &&
672 curr_time.tm_sec != hpet_prev_update_sec) { 675 curr_time.tm_sec != hpet_prev_update_sec) {
673 rtc_int_flag = RTC_UF; 676 if (hpet_prev_update_sec >= 0)
677 rtc_int_flag = RTC_UF;
674 hpet_prev_update_sec = curr_time.tm_sec; 678 hpet_prev_update_sec = curr_time.tm_sec;
675 } 679 }
676 680
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 64a46affd858..8269434d1707 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -732,7 +732,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
732 return 0; 732 return 0;
733 } 733 }
734 734
735 for_each_cpu_mask(cpu, mask) { 735 for_each_cpu_mask_nr(cpu, mask) {
736 cpumask_t domain, new_mask; 736 cpumask_t domain, new_mask;
737 int new_cpu; 737 int new_cpu;
738 int vector, offset; 738 int vector, offset;
@@ -753,7 +753,7 @@ next:
753 continue; 753 continue;
754 if (vector == IA32_SYSCALL_VECTOR) 754 if (vector == IA32_SYSCALL_VECTOR)
755 goto next; 755 goto next;
756 for_each_cpu_mask(new_cpu, new_mask) 756 for_each_cpu_mask_nr(new_cpu, new_mask)
757 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 757 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
758 goto next; 758 goto next;
759 /* Found one! */ 759 /* Found one! */
@@ -763,7 +763,7 @@ next:
763 cfg->move_in_progress = 1; 763 cfg->move_in_progress = 1;
764 cfg->old_domain = cfg->domain; 764 cfg->old_domain = cfg->domain;
765 } 765 }
766 for_each_cpu_mask(new_cpu, new_mask) 766 for_each_cpu_mask_nr(new_cpu, new_mask)
767 per_cpu(vector_irq, new_cpu)[vector] = irq; 767 per_cpu(vector_irq, new_cpu)[vector] = irq;
768 cfg->vector = vector; 768 cfg->vector = vector;
769 cfg->domain = domain; 769 cfg->domain = domain;
@@ -795,7 +795,7 @@ static void __clear_irq_vector(int irq)
795 795
796 vector = cfg->vector; 796 vector = cfg->vector;
797 cpus_and(mask, cfg->domain, cpu_online_map); 797 cpus_and(mask, cfg->domain, cpu_online_map);
798 for_each_cpu_mask(cpu, mask) 798 for_each_cpu_mask_nr(cpu, mask)
799 per_cpu(vector_irq, cpu)[vector] = -1; 799 per_cpu(vector_irq, cpu)[vector] = -1;
800 800
801 cfg->vector = 0; 801 cfg->vector = 0;
@@ -1373,12 +1373,10 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
1373static int ioapic_retrigger_irq(unsigned int irq) 1373static int ioapic_retrigger_irq(unsigned int irq)
1374{ 1374{
1375 struct irq_cfg *cfg = &irq_cfg[irq]; 1375 struct irq_cfg *cfg = &irq_cfg[irq];
1376 cpumask_t mask;
1377 unsigned long flags; 1376 unsigned long flags;
1378 1377
1379 spin_lock_irqsave(&vector_lock, flags); 1378 spin_lock_irqsave(&vector_lock, flags);
1380 mask = cpumask_of_cpu(first_cpu(cfg->domain)); 1379 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
1381 send_IPI_mask(mask, cfg->vector);
1382 spin_unlock_irqrestore(&vector_lock, flags); 1380 spin_unlock_irqrestore(&vector_lock, flags);
1383 1381
1384 return 1; 1382 return 1;
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
index 0373e88de95a..1f26fd9ec4f4 100644
--- a/arch/x86/kernel/irqinit_64.c
+++ b/arch/x86/kernel/irqinit_64.c
@@ -43,10 +43,11 @@
43 43
44#define BUILD_IRQ(nr) \ 44#define BUILD_IRQ(nr) \
45 asmlinkage void IRQ_NAME(nr); \ 45 asmlinkage void IRQ_NAME(nr); \
46 asm("\n.p2align\n" \ 46 asm("\n.text\n.p2align\n" \
47 "IRQ" #nr "_interrupt:\n\t" \ 47 "IRQ" #nr "_interrupt:\n\t" \
48 "push $~(" #nr ") ; " \ 48 "push $~(" #nr ") ; " \
49 "jmp common_interrupt"); 49 "jmp common_interrupt\n" \
50 ".previous");
50 51
51#define BI(x,y) \ 52#define BI(x,y) \
52 BUILD_IRQ(x##y) 53 BUILD_IRQ(x##y)
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 43c019f85f0d..6c27679ec6aa 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -431,7 +431,6 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
431 regs->ip = (unsigned long)p->ainsn.insn; 431 regs->ip = (unsigned long)p->ainsn.insn;
432} 432}
433 433
434/* Called with kretprobe_lock held */
435void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 434void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
436 struct pt_regs *regs) 435 struct pt_regs *regs)
437{ 436{
@@ -682,8 +681,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
682 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 681 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
683 682
684 INIT_HLIST_HEAD(&empty_rp); 683 INIT_HLIST_HEAD(&empty_rp);
685 spin_lock_irqsave(&kretprobe_lock, flags); 684 kretprobe_hash_lock(current, &head, &flags);
686 head = kretprobe_inst_table_head(current);
687 /* fixup registers */ 685 /* fixup registers */
688#ifdef CONFIG_X86_64 686#ifdef CONFIG_X86_64
689 regs->cs = __KERNEL_CS; 687 regs->cs = __KERNEL_CS;
@@ -732,7 +730,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
732 730
733 kretprobe_assert(ri, orig_ret_address, trampoline_address); 731 kretprobe_assert(ri, orig_ret_address, trampoline_address);
734 732
735 spin_unlock_irqrestore(&kretprobe_lock, flags); 733 kretprobe_hash_unlock(current, &flags);
736 734
737 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 735 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
738 hlist_del(&ri->hlist); 736 hlist_del(&ri->hlist);
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index a8449571858a..3fee2aa50f3f 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -62,12 +62,12 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
62 62
63 if (reload) { 63 if (reload) {
64#ifdef CONFIG_SMP 64#ifdef CONFIG_SMP
65 cpumask_t mask; 65 cpumask_of_cpu_ptr_declare(mask);
66 66
67 preempt_disable(); 67 preempt_disable();
68 load_LDT(pc); 68 load_LDT(pc);
69 mask = cpumask_of_cpu(smp_processor_id()); 69 cpumask_of_cpu_ptr_next(mask, smp_processor_id());
70 if (!cpus_equal(current->mm->cpu_vm_mask, mask)) 70 if (!cpus_equal(current->mm->cpu_vm_mask, *mask))
71 smp_call_function(flush_ldt, current->mm, 1); 71 smp_call_function(flush_ldt, current->mm, 1);
72 preempt_enable(); 72 preempt_enable();
73#else 73#else
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c
index fc4790638b69..6994c751590e 100644
--- a/arch/x86/kernel/microcode.c
+++ b/arch/x86/kernel/microcode.c
@@ -388,6 +388,7 @@ static int do_microcode_update (void)
388 void *new_mc = NULL; 388 void *new_mc = NULL;
389 int cpu; 389 int cpu;
390 cpumask_t old; 390 cpumask_t old;
391 cpumask_of_cpu_ptr_declare(newmask);
391 392
392 old = current->cpus_allowed; 393 old = current->cpus_allowed;
393 394
@@ -404,7 +405,8 @@ static int do_microcode_update (void)
404 405
405 if (!uci->valid) 406 if (!uci->valid)
406 continue; 407 continue;
407 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 408 cpumask_of_cpu_ptr_next(newmask, cpu);
409 set_cpus_allowed_ptr(current, newmask);
408 error = get_maching_microcode(new_mc, cpu); 410 error = get_maching_microcode(new_mc, cpu);
409 if (error < 0) 411 if (error < 0)
410 goto out; 412 goto out;
@@ -574,6 +576,7 @@ static int apply_microcode_check_cpu(int cpu)
574 struct cpuinfo_x86 *c = &cpu_data(cpu); 576 struct cpuinfo_x86 *c = &cpu_data(cpu);
575 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 577 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
576 cpumask_t old; 578 cpumask_t old;
579 cpumask_of_cpu_ptr(newmask, cpu);
577 unsigned int val[2]; 580 unsigned int val[2];
578 int err = 0; 581 int err = 0;
579 582
@@ -582,7 +585,7 @@ static int apply_microcode_check_cpu(int cpu)
582 return 0; 585 return 0;
583 586
584 old = current->cpus_allowed; 587 old = current->cpus_allowed;
585 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 588 set_cpus_allowed_ptr(current, newmask);
586 589
587 /* Check if the microcode we have in memory matches the CPU */ 590 /* Check if the microcode we have in memory matches the CPU */
588 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || 591 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
@@ -620,11 +623,12 @@ static int apply_microcode_check_cpu(int cpu)
620static void microcode_init_cpu(int cpu, int resume) 623static void microcode_init_cpu(int cpu, int resume)
621{ 624{
622 cpumask_t old; 625 cpumask_t old;
626 cpumask_of_cpu_ptr(newmask, cpu);
623 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 627 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
624 628
625 old = current->cpus_allowed; 629 old = current->cpus_allowed;
626 630
627 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 631 set_cpus_allowed_ptr(current, newmask);
628 mutex_lock(&microcode_mutex); 632 mutex_lock(&microcode_mutex);
629 collect_cpu_info(cpu); 633 collect_cpu_info(cpu);
630 if (uci->valid && system_state == SYSTEM_RUNNING && !resume) 634 if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
@@ -658,11 +662,12 @@ static ssize_t reload_store(struct sys_device *dev,
658 return -EINVAL; 662 return -EINVAL;
659 if (val == 1) { 663 if (val == 1) {
660 cpumask_t old; 664 cpumask_t old;
665 cpumask_of_cpu_ptr(newmask, cpu);
661 666
662 old = current->cpus_allowed; 667 old = current->cpus_allowed;
663 668
664 get_online_cpus(); 669 get_online_cpus();
665 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 670 set_cpus_allowed_ptr(current, newmask);
666 671
667 mutex_lock(&microcode_mutex); 672 mutex_lock(&microcode_mutex);
668 if (uci->valid) 673 if (uci->valid)
diff --git a/arch/x86/kernel/module_64.c b/arch/x86/kernel/module_64.c
index 0e867676b5a5..6ba87830d4b1 100644
--- a/arch/x86/kernel/module_64.c
+++ b/arch/x86/kernel/module_64.c
@@ -22,6 +22,7 @@
22#include <linux/fs.h> 22#include <linux/fs.h>
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/mm.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/bug.h> 27#include <linux/bug.h>
27 28
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 151f2d171f7c..19e7fc7c2c4f 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -29,6 +29,7 @@
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/crash_dump.h>
32#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
33#include <linux/bitops.h> 34#include <linux/bitops.h>
34#include <linux/pci_ids.h> 35#include <linux/pci_ids.h>
@@ -167,6 +168,8 @@ static void calgary_dump_error_regs(struct iommu_table *tbl);
167static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev); 168static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev);
168static void calioc2_tce_cache_blast(struct iommu_table *tbl); 169static void calioc2_tce_cache_blast(struct iommu_table *tbl);
169static void calioc2_dump_error_regs(struct iommu_table *tbl); 170static void calioc2_dump_error_regs(struct iommu_table *tbl);
171static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl);
172static void get_tce_space_from_tar(void);
170 173
171static struct cal_chipset_ops calgary_chip_ops = { 174static struct cal_chipset_ops calgary_chip_ops = {
172 .handle_quirks = calgary_handle_quirks, 175 .handle_quirks = calgary_handle_quirks,
@@ -830,7 +833,11 @@ static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
830 833
831 tbl = pci_iommu(dev->bus); 834 tbl = pci_iommu(dev->bus);
832 tbl->it_base = (unsigned long)bus_info[dev->bus->number].tce_space; 835 tbl->it_base = (unsigned long)bus_info[dev->bus->number].tce_space;
833 tce_free(tbl, 0, tbl->it_size); 836
837 if (is_kdump_kernel())
838 calgary_init_bitmap_from_tce_table(tbl);
839 else
840 tce_free(tbl, 0, tbl->it_size);
834 841
835 if (is_calgary(dev->device)) 842 if (is_calgary(dev->device))
836 tbl->chip_ops = &calgary_chip_ops; 843 tbl->chip_ops = &calgary_chip_ops;
@@ -1209,6 +1216,10 @@ static int __init calgary_init(void)
1209 if (ret) 1216 if (ret)
1210 return ret; 1217 return ret;
1211 1218
1219 /* Purely for kdump kernel case */
1220 if (is_kdump_kernel())
1221 get_tce_space_from_tar();
1222
1212 do { 1223 do {
1213 dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev); 1224 dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
1214 if (!dev) 1225 if (!dev)
@@ -1339,6 +1350,61 @@ static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev)
1339 return (val != 0xffffffff); 1350 return (val != 0xffffffff);
1340} 1351}
1341 1352
1353/*
1354 * calgary_init_bitmap_from_tce_table():
1355 * Funtion for kdump case. In the second/kdump kernel initialize
1356 * the bitmap based on the tce table entries obtained from first kernel
1357 */
1358static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl)
1359{
1360 u64 *tp;
1361 unsigned int index;
1362 tp = ((u64 *)tbl->it_base);
1363 for (index = 0 ; index < tbl->it_size; index++) {
1364 if (*tp != 0x0)
1365 set_bit(index, tbl->it_map);
1366 tp++;
1367 }
1368}
1369
1370/*
1371 * get_tce_space_from_tar():
1372 * Function for kdump case. Get the tce tables from first kernel
1373 * by reading the contents of the base adress register of calgary iommu
1374 */
1375static void get_tce_space_from_tar()
1376{
1377 int bus;
1378 void __iomem *target;
1379 unsigned long tce_space;
1380
1381 for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
1382 struct calgary_bus_info *info = &bus_info[bus];
1383 unsigned short pci_device;
1384 u32 val;
1385
1386 val = read_pci_config(bus, 0, 0, 0);
1387 pci_device = (val & 0xFFFF0000) >> 16;
1388
1389 if (!is_cal_pci_dev(pci_device))
1390 continue;
1391 if (info->translation_disabled)
1392 continue;
1393
1394 if (calgary_bus_has_devices(bus, pci_device) ||
1395 translate_empty_slots) {
1396 target = calgary_reg(bus_info[bus].bbar,
1397 tar_offset(bus));
1398 tce_space = be64_to_cpu(readq(target));
1399 tce_space = tce_space & TAR_SW_BITS;
1400
1401 tce_space = tce_space & (~specified_table_size);
1402 info->tce_space = (u64 *)__va(tce_space);
1403 }
1404 }
1405 return;
1406}
1407
1342void __init detect_calgary(void) 1408void __init detect_calgary(void)
1343{ 1409{
1344 int bus; 1410 int bus;
@@ -1394,7 +1460,8 @@ void __init detect_calgary(void)
1394 return; 1460 return;
1395 } 1461 }
1396 1462
1397 specified_table_size = determine_tce_table_size(max_pfn * PAGE_SIZE); 1463 specified_table_size = determine_tce_table_size((is_kdump_kernel() ?
1464 saved_max_pfn : max_pfn) * PAGE_SIZE);
1398 1465
1399 for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) { 1466 for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
1400 struct calgary_bus_info *info = &bus_info[bus]; 1467 struct calgary_bus_info *info = &bus_info[bus];
@@ -1412,10 +1479,16 @@ void __init detect_calgary(void)
1412 1479
1413 if (calgary_bus_has_devices(bus, pci_device) || 1480 if (calgary_bus_has_devices(bus, pci_device) ||
1414 translate_empty_slots) { 1481 translate_empty_slots) {
1415 tbl = alloc_tce_table(); 1482 /*
1416 if (!tbl) 1483 * If it is kdump kernel, find and use tce tables
1417 goto cleanup; 1484 * from first kernel, else allocate tce tables here
1418 info->tce_space = tbl; 1485 */
1486 if (!is_kdump_kernel()) {
1487 tbl = alloc_tce_table();
1488 if (!tbl)
1489 goto cleanup;
1490 info->tce_space = tbl;
1491 }
1419 calgary_found = 1; 1492 calgary_found = 1;
1420 } 1493 }
1421 } 1494 }
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 0c3927accb00..53bc653ed5ca 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -128,7 +128,7 @@ void cpu_idle(void)
128 128
129 /* endless idle loop with no priority at all */ 129 /* endless idle loop with no priority at all */
130 while (1) { 130 while (1) {
131 tick_nohz_stop_sched_tick(); 131 tick_nohz_stop_sched_tick(1);
132 while (!need_resched()) { 132 while (!need_resched()) {
133 133
134 check_pgt_cache(); 134 check_pgt_cache();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index e8a8e1b99817..3fb62a7d9a16 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -120,7 +120,7 @@ void cpu_idle(void)
120 current_thread_info()->status |= TS_POLLING; 120 current_thread_info()->status |= TS_POLLING;
121 /* endless idle loop with no priority at all */ 121 /* endless idle loop with no priority at all */
122 while (1) { 122 while (1) {
123 tick_nohz_stop_sched_tick(); 123 tick_nohz_stop_sched_tick(1);
124 while (!need_resched()) { 124 while (!need_resched()) {
125 125
126 rmb(); 126 rmb();
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 9dcf39c02972..06a9f643817e 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -411,24 +411,28 @@ void native_machine_shutdown(void)
411{ 411{
412 /* Stop the cpus and apics */ 412 /* Stop the cpus and apics */
413#ifdef CONFIG_SMP 413#ifdef CONFIG_SMP
414 int reboot_cpu_id;
415 414
416 /* The boot cpu is always logical cpu 0 */ 415 /* The boot cpu is always logical cpu 0 */
417 reboot_cpu_id = 0; 416 int reboot_cpu_id = 0;
417 cpumask_of_cpu_ptr(newmask, reboot_cpu_id);
418 418
419#ifdef CONFIG_X86_32 419#ifdef CONFIG_X86_32
420 /* See if there has been given a command line override */ 420 /* See if there has been given a command line override */
421 if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && 421 if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
422 cpu_online(reboot_cpu)) 422 cpu_online(reboot_cpu)) {
423 reboot_cpu_id = reboot_cpu; 423 reboot_cpu_id = reboot_cpu;
424 cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
425 }
424#endif 426#endif
425 427
426 /* Make certain the cpu I'm about to reboot on is online */ 428 /* Make certain the cpu I'm about to reboot on is online */
427 if (!cpu_online(reboot_cpu_id)) 429 if (!cpu_online(reboot_cpu_id)) {
428 reboot_cpu_id = smp_processor_id(); 430 reboot_cpu_id = smp_processor_id();
431 cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
432 }
429 433
430 /* Make certain I only run on the appropriate processor */ 434 /* Make certain I only run on the appropriate processor */
431 set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); 435 set_cpus_allowed_ptr(current, newmask);
432 436
433 /* O.K Now that I'm on the appropriate processor, 437 /* O.K Now that I'm on the appropriate processor,
434 * stop all of the others. 438 * stop all of the others.
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b4aacb9f52e3..b520dae02bf4 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -597,11 +597,11 @@ void __init setup_arch(char **cmdline_p)
597 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); 597 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
598 visws_early_detect(); 598 visws_early_detect();
599 pre_setup_arch_hook(); 599 pre_setup_arch_hook();
600 early_cpu_init();
601#else 600#else
602 printk(KERN_INFO "Command line: %s\n", boot_command_line); 601 printk(KERN_INFO "Command line: %s\n", boot_command_line);
603#endif 602#endif
604 603
604 early_cpu_init();
605 early_ioremap_init(); 605 early_ioremap_init();
606 606
607 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); 607 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
@@ -665,9 +665,6 @@ void __init setup_arch(char **cmdline_p)
665 bss_resource.start = virt_to_phys(&__bss_start); 665 bss_resource.start = virt_to_phys(&__bss_start);
666 bss_resource.end = virt_to_phys(&__bss_stop)-1; 666 bss_resource.end = virt_to_phys(&__bss_stop)-1;
667 667
668#ifdef CONFIG_X86_64
669 early_cpu_init();
670#endif
671 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 668 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
672 *cmdline_p = command_line; 669 *cmdline_p = command_line;
673 670
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index 07faaa5109cb..6fb5bcdd8933 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -661,8 +661,5 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
661 if (thread_info_flags & _TIF_SIGPENDING) 661 if (thread_info_flags & _TIF_SIGPENDING)
662 do_signal(regs); 662 do_signal(regs);
663 663
664 if (thread_info_flags & _TIF_HRTICK_RESCHED)
665 hrtick_resched();
666
667 clear_thread_flag(TIF_IRET); 664 clear_thread_flag(TIF_IRET);
668} 665}
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index bf87684474f1..b45ef8ddd651 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -53,6 +53,59 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
53 return do_sigaltstack(uss, uoss, regs->sp); 53 return do_sigaltstack(uss, uoss, regs->sp);
54} 54}
55 55
56/*
57 * Signal frame handlers.
58 */
59
60static inline int save_i387(struct _fpstate __user *buf)
61{
62 struct task_struct *tsk = current;
63 int err = 0;
64
65 BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
66 sizeof(tsk->thread.xstate->fxsave));
67
68 if ((unsigned long)buf % 16)
69 printk("save_i387: bad fpstate %p\n", buf);
70
71 if (!used_math())
72 return 0;
73 clear_used_math(); /* trigger finit */
74 if (task_thread_info(tsk)->status & TS_USEDFPU) {
75 err = save_i387_checking((struct i387_fxsave_struct __user *)
76 buf);
77 if (err)
78 return err;
79 task_thread_info(tsk)->status &= ~TS_USEDFPU;
80 stts();
81 } else {
82 if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
83 sizeof(struct i387_fxsave_struct)))
84 return -1;
85 }
86 return 1;
87}
88
89/*
90 * This restores directly out of user space. Exceptions are handled.
91 */
92static inline int restore_i387(struct _fpstate __user *buf)
93{
94 struct task_struct *tsk = current;
95 int err;
96
97 if (!used_math()) {
98 err = init_fpu(tsk);
99 if (err)
100 return err;
101 }
102
103 if (!(task_thread_info(current)->status & TS_USEDFPU)) {
104 clts();
105 task_thread_info(current)->status |= TS_USEDFPU;
106 }
107 return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
108}
56 109
57/* 110/*
58 * Do a signal return; undo the signal stack. 111 * Do a signal return; undo the signal stack.
@@ -496,9 +549,6 @@ void do_notify_resume(struct pt_regs *regs, void *unused,
496 /* deal with pending signal delivery */ 549 /* deal with pending signal delivery */
497 if (thread_info_flags & _TIF_SIGPENDING) 550 if (thread_info_flags & _TIF_SIGPENDING)
498 do_signal(regs); 551 do_signal(regs);
499
500 if (thread_info_flags & _TIF_HRTICK_RESCHED)
501 hrtick_resched();
502} 552}
503 553
504void signal_fault(struct pt_regs *regs, void __user *frame, char *where) 554void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 4b53a647bc0a..332512767f4f 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -438,7 +438,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
438 cpu_set(cpu, cpu_sibling_setup_map); 438 cpu_set(cpu, cpu_sibling_setup_map);
439 439
440 if (smp_num_siblings > 1) { 440 if (smp_num_siblings > 1) {
441 for_each_cpu_mask(i, cpu_sibling_setup_map) { 441 for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
442 if (c->phys_proc_id == cpu_data(i).phys_proc_id && 442 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
443 c->cpu_core_id == cpu_data(i).cpu_core_id) { 443 c->cpu_core_id == cpu_data(i).cpu_core_id) {
444 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 444 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
@@ -461,7 +461,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
461 return; 461 return;
462 } 462 }
463 463
464 for_each_cpu_mask(i, cpu_sibling_setup_map) { 464 for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
465 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && 465 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
466 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 466 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
467 cpu_set(i, c->llc_shared_map); 467 cpu_set(i, c->llc_shared_map);
@@ -1219,7 +1219,7 @@ static void remove_siblinginfo(int cpu)
1219 int sibling; 1219 int sibling;
1220 struct cpuinfo_x86 *c = &cpu_data(cpu); 1220 struct cpuinfo_x86 *c = &cpu_data(cpu);
1221 1221
1222 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { 1222 for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
1223 cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); 1223 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1224 /*/ 1224 /*/
1225 * last thread sibling in this cpu core going down 1225 * last thread sibling in this cpu core going down
@@ -1228,7 +1228,7 @@ static void remove_siblinginfo(int cpu)
1228 cpu_data(sibling).booted_cores--; 1228 cpu_data(sibling).booted_cores--;
1229 } 1229 }
1230 1230
1231 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) 1231 for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
1232 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 1232 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1233 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1233 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1234 cpus_clear(per_cpu(cpu_core_map, cpu)); 1234 cpus_clear(per_cpu(cpu_core_map, cpu));
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index adff5562f5fd..d44395ff34c3 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -326,3 +326,9 @@ ENTRY(sys_call_table)
326 .long sys_fallocate 326 .long sys_fallocate
327 .long sys_timerfd_settime /* 325 */ 327 .long sys_timerfd_settime /* 325 */
328 .long sys_timerfd_gettime 328 .long sys_timerfd_gettime
329 .long sys_signalfd4
330 .long sys_eventfd2
331 .long sys_epoll_create1
332 .long sys_dup3 /* 330 */
333 .long sys_pipe2
334 .long sys_inotify_init1
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 5dfef9fa061a..62fa440678d8 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -42,7 +42,6 @@
42 42
43struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 43struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
44EXPORT_SYMBOL(node_data); 44EXPORT_SYMBOL(node_data);
45static bootmem_data_t node0_bdata;
46 45
47/* 46/*
48 * numa interface - we expect the numa architecture specific code to have 47 * numa interface - we expect the numa architecture specific code to have
@@ -385,7 +384,7 @@ void __init initmem_init(unsigned long start_pfn,
385 for_each_online_node(nid) 384 for_each_online_node(nid)
386 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 385 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
387 386
388 NODE_DATA(0)->bdata = &node0_bdata; 387 NODE_DATA(0)->bdata = &bootmem_node_data[0];
389 setup_bootmem_allocator(); 388 setup_bootmem_allocator();
390} 389}
391 390
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 0b3d567e686d..8f307d914c2e 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -124,7 +124,8 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
124 return 1; 124 return 1;
125} 125}
126 126
127pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 127pte_t *huge_pte_alloc(struct mm_struct *mm,
128 unsigned long addr, unsigned long sz)
128{ 129{
129 pgd_t *pgd; 130 pgd_t *pgd;
130 pud_t *pud; 131 pud_t *pud;
@@ -133,9 +134,14 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
133 pgd = pgd_offset(mm, addr); 134 pgd = pgd_offset(mm, addr);
134 pud = pud_alloc(mm, pgd, addr); 135 pud = pud_alloc(mm, pgd, addr);
135 if (pud) { 136 if (pud) {
136 if (pud_none(*pud)) 137 if (sz == PUD_SIZE) {
137 huge_pmd_share(mm, addr, pud); 138 pte = (pte_t *)pud;
138 pte = (pte_t *) pmd_alloc(mm, pud, addr); 139 } else {
140 BUG_ON(sz != PMD_SIZE);
141 if (pud_none(*pud))
142 huge_pmd_share(mm, addr, pud);
143 pte = (pte_t *) pmd_alloc(mm, pud, addr);
144 }
139 } 145 }
140 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); 146 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
141 147
@@ -151,8 +157,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
151 pgd = pgd_offset(mm, addr); 157 pgd = pgd_offset(mm, addr);
152 if (pgd_present(*pgd)) { 158 if (pgd_present(*pgd)) {
153 pud = pud_offset(pgd, addr); 159 pud = pud_offset(pgd, addr);
154 if (pud_present(*pud)) 160 if (pud_present(*pud)) {
161 if (pud_large(*pud))
162 return (pte_t *)pud;
155 pmd = pmd_offset(pud, addr); 163 pmd = pmd_offset(pud, addr);
164 }
156 } 165 }
157 return (pte_t *) pmd; 166 return (pte_t *) pmd;
158} 167}
@@ -188,6 +197,11 @@ int pmd_huge(pmd_t pmd)
188 return 0; 197 return 0;
189} 198}
190 199
200int pud_huge(pud_t pud)
201{
202 return 0;
203}
204
191struct page * 205struct page *
192follow_huge_pmd(struct mm_struct *mm, unsigned long address, 206follow_huge_pmd(struct mm_struct *mm, unsigned long address,
193 pmd_t *pmd, int write) 207 pmd_t *pmd, int write)
@@ -208,6 +222,11 @@ int pmd_huge(pmd_t pmd)
208 return !!(pmd_val(pmd) & _PAGE_PSE); 222 return !!(pmd_val(pmd) & _PAGE_PSE);
209} 223}
210 224
225int pud_huge(pud_t pud)
226{
227 return !!(pud_val(pud) & _PAGE_PSE);
228}
229
211struct page * 230struct page *
212follow_huge_pmd(struct mm_struct *mm, unsigned long address, 231follow_huge_pmd(struct mm_struct *mm, unsigned long address,
213 pmd_t *pmd, int write) 232 pmd_t *pmd, int write)
@@ -216,9 +235,22 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
216 235
217 page = pte_page(*(pte_t *)pmd); 236 page = pte_page(*(pte_t *)pmd);
218 if (page) 237 if (page)
219 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); 238 page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
239 return page;
240}
241
242struct page *
243follow_huge_pud(struct mm_struct *mm, unsigned long address,
244 pud_t *pud, int write)
245{
246 struct page *page;
247
248 page = pte_page(*(pte_t *)pud);
249 if (page)
250 page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
220 return page; 251 return page;
221} 252}
253
222#endif 254#endif
223 255
224/* x86_64 also uses this file */ 256/* x86_64 also uses this file */
@@ -228,6 +260,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
228 unsigned long addr, unsigned long len, 260 unsigned long addr, unsigned long len,
229 unsigned long pgoff, unsigned long flags) 261 unsigned long pgoff, unsigned long flags)
230{ 262{
263 struct hstate *h = hstate_file(file);
231 struct mm_struct *mm = current->mm; 264 struct mm_struct *mm = current->mm;
232 struct vm_area_struct *vma; 265 struct vm_area_struct *vma;
233 unsigned long start_addr; 266 unsigned long start_addr;
@@ -240,7 +273,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
240 } 273 }
241 274
242full_search: 275full_search:
243 addr = ALIGN(start_addr, HPAGE_SIZE); 276 addr = ALIGN(start_addr, huge_page_size(h));
244 277
245 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 278 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
246 /* At this point: (!vma || addr < vma->vm_end). */ 279 /* At this point: (!vma || addr < vma->vm_end). */
@@ -262,7 +295,7 @@ full_search:
262 } 295 }
263 if (addr + mm->cached_hole_size < vma->vm_start) 296 if (addr + mm->cached_hole_size < vma->vm_start)
264 mm->cached_hole_size = vma->vm_start - addr; 297 mm->cached_hole_size = vma->vm_start - addr;
265 addr = ALIGN(vma->vm_end, HPAGE_SIZE); 298 addr = ALIGN(vma->vm_end, huge_page_size(h));
266 } 299 }
267} 300}
268 301
@@ -270,6 +303,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
270 unsigned long addr0, unsigned long len, 303 unsigned long addr0, unsigned long len,
271 unsigned long pgoff, unsigned long flags) 304 unsigned long pgoff, unsigned long flags)
272{ 305{
306 struct hstate *h = hstate_file(file);
273 struct mm_struct *mm = current->mm; 307 struct mm_struct *mm = current->mm;
274 struct vm_area_struct *vma, *prev_vma; 308 struct vm_area_struct *vma, *prev_vma;
275 unsigned long base = mm->mmap_base, addr = addr0; 309 unsigned long base = mm->mmap_base, addr = addr0;
@@ -290,7 +324,7 @@ try_again:
290 goto fail; 324 goto fail;
291 325
292 /* either no address requested or cant fit in requested address hole */ 326 /* either no address requested or cant fit in requested address hole */
293 addr = (mm->free_area_cache - len) & HPAGE_MASK; 327 addr = (mm->free_area_cache - len) & huge_page_mask(h);
294 do { 328 do {
295 /* 329 /*
296 * Lookup failure means no vma is above this address, 330 * Lookup failure means no vma is above this address,
@@ -321,7 +355,7 @@ try_again:
321 largest_hole = vma->vm_start - addr; 355 largest_hole = vma->vm_start - addr;
322 356
323 /* try just below the current vma->vm_start */ 357 /* try just below the current vma->vm_start */
324 addr = (vma->vm_start - len) & HPAGE_MASK; 358 addr = (vma->vm_start - len) & huge_page_mask(h);
325 } while (len <= vma->vm_start); 359 } while (len <= vma->vm_start);
326 360
327fail: 361fail:
@@ -359,22 +393,23 @@ unsigned long
359hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 393hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
360 unsigned long len, unsigned long pgoff, unsigned long flags) 394 unsigned long len, unsigned long pgoff, unsigned long flags)
361{ 395{
396 struct hstate *h = hstate_file(file);
362 struct mm_struct *mm = current->mm; 397 struct mm_struct *mm = current->mm;
363 struct vm_area_struct *vma; 398 struct vm_area_struct *vma;
364 399
365 if (len & ~HPAGE_MASK) 400 if (len & ~huge_page_mask(h))
366 return -EINVAL; 401 return -EINVAL;
367 if (len > TASK_SIZE) 402 if (len > TASK_SIZE)
368 return -ENOMEM; 403 return -ENOMEM;
369 404
370 if (flags & MAP_FIXED) { 405 if (flags & MAP_FIXED) {
371 if (prepare_hugepage_range(addr, len)) 406 if (prepare_hugepage_range(file, addr, len))
372 return -EINVAL; 407 return -EINVAL;
373 return addr; 408 return addr;
374 } 409 }
375 410
376 if (addr) { 411 if (addr) {
377 addr = ALIGN(addr, HPAGE_SIZE); 412 addr = ALIGN(addr, huge_page_size(h));
378 vma = find_vma(mm, addr); 413 vma = find_vma(mm, addr);
379 if (TASK_SIZE - len >= addr && 414 if (TASK_SIZE - len >= addr &&
380 (!vma || addr + len <= vma->vm_start)) 415 (!vma || addr + len <= vma->vm_start))
@@ -390,3 +425,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
390 425
391#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ 426#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
392 427
428#ifdef CONFIG_X86_64
429static __init int setup_hugepagesz(char *opt)
430{
431 unsigned long ps = memparse(opt, &opt);
432 if (ps == PMD_SIZE) {
433 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
434 } else if (ps == PUD_SIZE && cpu_has_gbpages) {
435 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
436 } else {
437 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
438 ps >> 20);
439 return 0;
440 }
441 return 1;
442}
443__setup("hugepagesz=", setup_hugepagesz);
444#endif
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 24c1d3c30186..016f335bbeea 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -330,6 +330,14 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
330 return (void __iomem *)ret; 330 return (void __iomem *)ret;
331} 331}
332 332
333void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
334 unsigned long prot_val)
335{
336 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
337 __builtin_return_address(0));
338}
339EXPORT_SYMBOL(ioremap_prot);
340
333/** 341/**
334 * iounmap - Free a IO remapping 342 * iounmap - Free a IO remapping
335 * @addr: virtual address from ioremap_* 343 * @addr: virtual address from ioremap_*
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 9782f42dd319..a4dd793d6003 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -23,8 +23,6 @@
23struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 23struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
24EXPORT_SYMBOL(node_data); 24EXPORT_SYMBOL(node_data);
25 25
26static bootmem_data_t plat_node_bdata[MAX_NUMNODES];
27
28struct memnode memnode; 26struct memnode memnode;
29 27
30s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { 28s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
@@ -198,7 +196,7 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
198 nodedata_phys + pgdat_size - 1); 196 nodedata_phys + pgdat_size - 1);
199 197
200 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t)); 198 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
201 NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid]; 199 NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid];
202 NODE_DATA(nodeid)->node_start_pfn = start_pfn; 200 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
203 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn; 201 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
204 202
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 7f3329b55d2e..3f90289410e6 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -369,20 +369,34 @@ static int __init ppro_init(char **cpu_type)
369{ 369{
370 __u8 cpu_model = boot_cpu_data.x86_model; 370 __u8 cpu_model = boot_cpu_data.x86_model;
371 371
372 if (cpu_model == 14) 372 switch (cpu_model) {
373 case 0 ... 2:
374 *cpu_type = "i386/ppro";
375 break;
376 case 3 ... 5:
377 *cpu_type = "i386/pii";
378 break;
379 case 6 ... 8:
380 *cpu_type = "i386/piii";
381 break;
382 case 9:
383 *cpu_type = "i386/p6_mobile";
384 break;
385 case 10 ... 13:
386 *cpu_type = "i386/p6";
387 break;
388 case 14:
373 *cpu_type = "i386/core"; 389 *cpu_type = "i386/core";
374 else if (cpu_model == 15 || cpu_model == 23) 390 break;
391 case 15: case 23:
392 *cpu_type = "i386/core_2";
393 break;
394 case 26:
375 *cpu_type = "i386/core_2"; 395 *cpu_type = "i386/core_2";
376 else if (cpu_model > 0xd) 396 break;
397 default:
398 /* Unknown */
377 return 0; 399 return 0;
378 else if (cpu_model == 9) {
379 *cpu_type = "i386/p6_mobile";
380 } else if (cpu_model > 5) {
381 *cpu_type = "i386/piii";
382 } else if (cpu_model > 2) {
383 *cpu_type = "i386/pii";
384 } else {
385 *cpu_type = "i386/ppro";
386 } 400 }
387 401
388 model = &op_ppro_spec; 402 model = &op_ppro_spec;
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 2aafb67dc5f1..a09505806b82 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -280,6 +280,7 @@ static void pci_track_mmap_page_range(struct vm_area_struct *vma)
280static struct vm_operations_struct pci_mmap_ops = { 280static struct vm_operations_struct pci_mmap_ops = {
281 .open = pci_track_mmap_page_range, 281 .open = pci_track_mmap_page_range,
282 .close = pci_unmap_page_range, 282 .close = pci_unmap_page_range,
283 .access = generic_access_phys,
283}; 284};
284 285
285int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 286int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index e693812ac59a..d8faf79a0a1d 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -367,7 +367,7 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
367 367
368 cpus_and(mask, mask, cpu_online_map); 368 cpus_and(mask, mask, cpu_online_map);
369 369
370 for_each_cpu_mask(cpu, mask) 370 for_each_cpu_mask_nr(cpu, mask)
371 xen_send_IPI_one(cpu, vector); 371 xen_send_IPI_one(cpu, vector);
372} 372}
373 373
@@ -378,7 +378,7 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask)
378 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 378 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
379 379
380 /* Make sure other vcpus get a chance to run if they need to. */ 380 /* Make sure other vcpus get a chance to run if they need to. */
381 for_each_cpu_mask(cpu, mask) { 381 for_each_cpu_mask_nr(cpu, mask) {
382 if (xen_vcpu_stolen(cpu)) { 382 if (xen_vcpu_stolen(cpu)) {
383 HYPERVISOR_sched_op(SCHEDOP_yield, 0); 383 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
384 break; 384 break;
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 4038cbfe3331..7f58304fafb3 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -173,7 +173,7 @@ ENTRY(xen_sysexit)
173 pushq $__USER32_CS 173 pushq $__USER32_CS
174 pushq %rdx 174 pushq %rdx
175 175
176 pushq $VGCF_in_syscall 176 pushq $0
1771: jmp hypercall_iret 1771: jmp hypercall_iret
178ENDPATCH(xen_sysexit) 178ENDPATCH(xen_sysexit)
179RELOC(xen_sysexit, 1b+1) 179RELOC(xen_sysexit, 1b+1)
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 5e6d75c9f92b..a00359e8f7a8 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/mm.h>
19#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
20#include <linux/screen_info.h> 21#include <linux/screen_info.h>
21#include <linux/bootmem.h> 22#include <linux/bootmem.h>
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index f3e16efcd47a..ac15ecbdf919 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -49,7 +49,7 @@ asmlinkage long xtensa_pipe(int __user *userfds)
49 int fd[2]; 49 int fd[2];
50 int error; 50 int error;
51 51
52 error = do_pipe(fd); 52 error = do_pipe_flags(fd, 0);
53 if (!error) { 53 if (!error) {
54 if (copy_to_user(userfds, fd, 2 * sizeof(int))) 54 if (copy_to_user(userfds, fd, 2 * sizeof(int)))
55 error = -EFAULT; 55 error = -EFAULT;
diff --git a/block/ioctl.c b/block/ioctl.c
index 52d6385216ad..77185e5c026a 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -17,6 +17,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
17 long long start, length; 17 long long start, length;
18 int part; 18 int part;
19 int i; 19 int i;
20 int err;
20 21
21 if (!capable(CAP_SYS_ADMIN)) 22 if (!capable(CAP_SYS_ADMIN))
22 return -EACCES; 23 return -EACCES;
@@ -61,9 +62,9 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
61 } 62 }
62 } 63 }
63 /* all seems OK */ 64 /* all seems OK */
64 add_partition(disk, part, start, length, ADDPART_FLAG_NONE); 65 err = add_partition(disk, part, start, length, ADDPART_FLAG_NONE);
65 mutex_unlock(&bdev->bd_mutex); 66 mutex_unlock(&bdev->bd_mutex);
66 return 0; 67 return err;
67 case BLKPG_DEL_PARTITION: 68 case BLKPG_DEL_PARTITION:
68 if (!disk->part[part-1]) 69 if (!disk->part[part-1])
69 return -ENXIO; 70 return -ENXIO;
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index a5eda80e8427..ddccfb01c416 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -73,15 +73,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
73 pr_debug("%s: (sync) len: %zu\n", __func__, len); 73 pr_debug("%s: (sync) len: %zu\n", __func__, len);
74 74
75 /* wait for any prerequisite operations */ 75 /* wait for any prerequisite operations */
76 if (depend_tx) { 76 async_tx_quiesce(&depend_tx);
77 /* if ack is already set then we cannot be sure
78 * we are referring to the correct operation
79 */
80 BUG_ON(async_tx_test_ack(depend_tx));
81 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
82 panic("%s: DMA_ERROR waiting for depend_tx\n",
83 __func__);
84 }
85 77
86 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; 78 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
87 src_buf = kmap_atomic(src, KM_USER1) + src_offset; 79 src_buf = kmap_atomic(src, KM_USER1) + src_offset;
@@ -91,7 +83,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
91 kunmap_atomic(dest_buf, KM_USER0); 83 kunmap_atomic(dest_buf, KM_USER0);
92 kunmap_atomic(src_buf, KM_USER1); 84 kunmap_atomic(src_buf, KM_USER1);
93 85
94 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 86 async_tx_sync_epilog(cb_fn, cb_param);
95 } 87 }
96 88
97 return tx; 89 return tx;
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index f5ff3906b035..5b5eb99bb244 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -72,19 +72,11 @@ async_memset(struct page *dest, int val, unsigned int offset,
72 dest_buf = (void *) (((char *) page_address(dest)) + offset); 72 dest_buf = (void *) (((char *) page_address(dest)) + offset);
73 73
74 /* wait for any prerequisite operations */ 74 /* wait for any prerequisite operations */
75 if (depend_tx) { 75 async_tx_quiesce(&depend_tx);
76 /* if ack is already set then we cannot be sure
77 * we are referring to the correct operation
78 */
79 BUG_ON(depend_tx->ack);
80 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
81 panic("%s: DMA_ERROR waiting for depend_tx\n",
82 __func__);
83 }
84 76
85 memset(dest_buf, val, len); 77 memset(dest_buf, val, len);
86 78
87 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 79 async_tx_sync_epilog(cb_fn, cb_param);
88 } 80 }
89 81
90 return tx; 82 return tx;
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 095c798d3170..85eaf7b1c531 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -295,7 +295,7 @@ dma_channel_add_remove(struct dma_client *client,
295 case DMA_RESOURCE_REMOVED: 295 case DMA_RESOURCE_REMOVED:
296 found = 0; 296 found = 0;
297 spin_lock_irqsave(&async_tx_lock, flags); 297 spin_lock_irqsave(&async_tx_lock, flags);
298 list_for_each_entry_rcu(ref, &async_tx_master_list, node) 298 list_for_each_entry(ref, &async_tx_master_list, node)
299 if (ref->chan == chan) { 299 if (ref->chan == chan) {
300 /* permit backing devices to go away */ 300 /* permit backing devices to go away */
301 dma_chan_put(ref->chan); 301 dma_chan_put(ref->chan);
@@ -608,23 +608,34 @@ async_trigger_callback(enum async_tx_flags flags,
608 pr_debug("%s: (sync)\n", __func__); 608 pr_debug("%s: (sync)\n", __func__);
609 609
610 /* wait for any prerequisite operations */ 610 /* wait for any prerequisite operations */
611 if (depend_tx) { 611 async_tx_quiesce(&depend_tx);
612 /* if ack is already set then we cannot be sure
613 * we are referring to the correct operation
614 */
615 BUG_ON(async_tx_test_ack(depend_tx));
616 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
617 panic("%s: DMA_ERROR waiting for depend_tx\n",
618 __func__);
619 }
620 612
621 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 613 async_tx_sync_epilog(cb_fn, cb_param);
622 } 614 }
623 615
624 return tx; 616 return tx;
625} 617}
626EXPORT_SYMBOL_GPL(async_trigger_callback); 618EXPORT_SYMBOL_GPL(async_trigger_callback);
627 619
620/**
621 * async_tx_quiesce - ensure tx is complete and freeable upon return
622 * @tx - transaction to quiesce
623 */
624void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
625{
626 if (*tx) {
627 /* if ack is already set then we cannot be sure
628 * we are referring to the correct operation
629 */
630 BUG_ON(async_tx_test_ack(*tx));
631 if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
632 panic("DMA_ERROR waiting for transaction\n");
633 async_tx_ack(*tx);
634 *tx = NULL;
635 }
636}
637EXPORT_SYMBOL_GPL(async_tx_quiesce);
638
628module_init(async_tx_init); 639module_init(async_tx_init);
629module_exit(async_tx_exit); 640module_exit(async_tx_exit);
630 641
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 3a0dddca5a10..65974c6d3d7a 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -35,74 +35,121 @@
35 * when CONFIG_DMA_ENGINE=n 35 * when CONFIG_DMA_ENGINE=n
36 */ 36 */
37static __always_inline struct dma_async_tx_descriptor * 37static __always_inline struct dma_async_tx_descriptor *
38do_async_xor(struct dma_device *device, 38do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
39 struct dma_chan *chan, struct page *dest, struct page **src_list, 39 unsigned int offset, int src_cnt, size_t len,
40 unsigned int offset, unsigned int src_cnt, size_t len, 40 enum async_tx_flags flags,
41 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, 41 struct dma_async_tx_descriptor *depend_tx,
42 dma_async_tx_callback cb_fn, void *cb_param) 42 dma_async_tx_callback cb_fn, void *cb_param)
43{ 43{
44 dma_addr_t dma_dest; 44 struct dma_device *dma = chan->device;
45 dma_addr_t *dma_src = (dma_addr_t *) src_list; 45 dma_addr_t *dma_src = (dma_addr_t *) src_list;
46 struct dma_async_tx_descriptor *tx; 46 struct dma_async_tx_descriptor *tx = NULL;
47 int src_off = 0;
47 int i; 48 int i;
48 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 49 dma_async_tx_callback _cb_fn;
49 50 void *_cb_param;
50 pr_debug("%s: len: %zu\n", __func__, len); 51 enum async_tx_flags async_flags;
51 52 enum dma_ctrl_flags dma_flags;
52 dma_dest = dma_map_page(device->dev, dest, offset, len, 53 int xor_src_cnt;
53 DMA_FROM_DEVICE); 54 dma_addr_t dma_dest;
54 55
56 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_FROM_DEVICE);
55 for (i = 0; i < src_cnt; i++) 57 for (i = 0; i < src_cnt; i++)
56 dma_src[i] = dma_map_page(device->dev, src_list[i], offset, 58 dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
57 len, DMA_TO_DEVICE); 59 len, DMA_TO_DEVICE);
58 60
59 /* Since we have clobbered the src_list we are committed 61 while (src_cnt) {
60 * to doing this asynchronously. Drivers force forward progress 62 async_flags = flags;
61 * in case they can not provide a descriptor 63 dma_flags = 0;
62 */ 64 xor_src_cnt = min(src_cnt, dma->max_xor);
63 tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len, 65 /* if we are submitting additional xors, leave the chain open,
64 dma_prep_flags); 66 * clear the callback parameters, and leave the destination
65 if (!tx) { 67 * buffer mapped
66 if (depend_tx) 68 */
67 dma_wait_for_async_tx(depend_tx); 69 if (src_cnt > xor_src_cnt) {
68 70 async_flags &= ~ASYNC_TX_ACK;
69 while (!tx) 71 dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
70 tx = device->device_prep_dma_xor(chan, dma_dest, 72 _cb_fn = NULL;
71 dma_src, src_cnt, len, 73 _cb_param = NULL;
72 dma_prep_flags); 74 } else {
73 } 75 _cb_fn = cb_fn;
76 _cb_param = cb_param;
77 }
78 if (_cb_fn)
79 dma_flags |= DMA_PREP_INTERRUPT;
74 80
75 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 81 /* Since we have clobbered the src_list we are committed
82 * to doing this asynchronously. Drivers force forward progress
83 * in case they can not provide a descriptor
84 */
85 tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off],
86 xor_src_cnt, len, dma_flags);
87
88 if (unlikely(!tx))
89 async_tx_quiesce(&depend_tx);
90
91 /* spin wait for the preceeding transactions to complete */
92 while (unlikely(!tx)) {
93 dma_async_issue_pending(chan);
94 tx = dma->device_prep_dma_xor(chan, dma_dest,
95 &dma_src[src_off],
96 xor_src_cnt, len,
97 dma_flags);
98 }
99
100 async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn,
101 _cb_param);
102
103 depend_tx = tx;
104 flags |= ASYNC_TX_DEP_ACK;
105
106 if (src_cnt > xor_src_cnt) {
107 /* drop completed sources */
108 src_cnt -= xor_src_cnt;
109 src_off += xor_src_cnt;
110
111 /* use the intermediate result a source */
112 dma_src[--src_off] = dma_dest;
113 src_cnt++;
114 } else
115 break;
116 }
76 117
77 return tx; 118 return tx;
78} 119}
79 120
80static void 121static void
81do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, 122do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
82 unsigned int src_cnt, size_t len, enum async_tx_flags flags, 123 int src_cnt, size_t len, enum async_tx_flags flags,
83 struct dma_async_tx_descriptor *depend_tx, 124 dma_async_tx_callback cb_fn, void *cb_param)
84 dma_async_tx_callback cb_fn, void *cb_param)
85{ 125{
86 void *_dest;
87 int i; 126 int i;
88 127 int xor_src_cnt;
89 pr_debug("%s: len: %zu\n", __func__, len); 128 int src_off = 0;
129 void *dest_buf;
130 void **srcs = (void **) src_list;
90 131
91 /* reuse the 'src_list' array to convert to buffer pointers */ 132 /* reuse the 'src_list' array to convert to buffer pointers */
92 for (i = 0; i < src_cnt; i++) 133 for (i = 0; i < src_cnt; i++)
93 src_list[i] = (struct page *) 134 srcs[i] = page_address(src_list[i]) + offset;
94 (page_address(src_list[i]) + offset);
95 135
96 /* set destination address */ 136 /* set destination address */
97 _dest = page_address(dest) + offset; 137 dest_buf = page_address(dest) + offset;
98 138
99 if (flags & ASYNC_TX_XOR_ZERO_DST) 139 if (flags & ASYNC_TX_XOR_ZERO_DST)
100 memset(_dest, 0, len); 140 memset(dest_buf, 0, len);
101 141
102 xor_blocks(src_cnt, len, _dest, 142 while (src_cnt > 0) {
103 (void **) src_list); 143 /* process up to 'MAX_XOR_BLOCKS' sources */
144 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
145 xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]);
104 146
105 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 147 /* drop completed sources */
148 src_cnt -= xor_src_cnt;
149 src_off += xor_src_cnt;
150 }
151
152 async_tx_sync_epilog(cb_fn, cb_param);
106} 153}
107 154
108/** 155/**
@@ -132,106 +179,34 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
132 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, 179 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
133 &dest, 1, src_list, 180 &dest, 1, src_list,
134 src_cnt, len); 181 src_cnt, len);
135 struct dma_device *device = chan ? chan->device : NULL;
136 struct dma_async_tx_descriptor *tx = NULL;
137 dma_async_tx_callback _cb_fn;
138 void *_cb_param;
139 unsigned long local_flags;
140 int xor_src_cnt;
141 int i = 0, src_off = 0;
142
143 BUG_ON(src_cnt <= 1); 182 BUG_ON(src_cnt <= 1);
144 183
145 while (src_cnt) { 184 if (chan) {
146 local_flags = flags; 185 /* run the xor asynchronously */
147 if (device) { /* run the xor asynchronously */ 186 pr_debug("%s (async): len: %zu\n", __func__, len);
148 xor_src_cnt = min(src_cnt, device->max_xor);
149 /* if we are submitting additional xors
150 * only set the callback on the last transaction
151 */
152 if (src_cnt > xor_src_cnt) {
153 local_flags &= ~ASYNC_TX_ACK;
154 _cb_fn = NULL;
155 _cb_param = NULL;
156 } else {
157 _cb_fn = cb_fn;
158 _cb_param = cb_param;
159 }
160
161 tx = do_async_xor(device, chan, dest,
162 &src_list[src_off], offset,
163 xor_src_cnt, len, local_flags,
164 depend_tx, _cb_fn, _cb_param);
165 } else { /* run the xor synchronously */
166 /* in the sync case the dest is an implied source
167 * (assumes the dest is at the src_off index)
168 */
169 if (flags & ASYNC_TX_XOR_DROP_DST) {
170 src_cnt--;
171 src_off++;
172 }
173
174 /* process up to 'MAX_XOR_BLOCKS' sources */
175 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
176
177 /* if we are submitting additional xors
178 * only set the callback on the last transaction
179 */
180 if (src_cnt > xor_src_cnt) {
181 local_flags &= ~ASYNC_TX_ACK;
182 _cb_fn = NULL;
183 _cb_param = NULL;
184 } else {
185 _cb_fn = cb_fn;
186 _cb_param = cb_param;
187 }
188
189 /* wait for any prerequisite operations */
190 if (depend_tx) {
191 /* if ack is already set then we cannot be sure
192 * we are referring to the correct operation
193 */
194 BUG_ON(async_tx_test_ack(depend_tx));
195 if (dma_wait_for_async_tx(depend_tx) ==
196 DMA_ERROR)
197 panic("%s: DMA_ERROR waiting for "
198 "depend_tx\n",
199 __func__);
200 }
201
202 do_sync_xor(dest, &src_list[src_off], offset,
203 xor_src_cnt, len, local_flags, depend_tx,
204 _cb_fn, _cb_param);
205 }
206 187
207 /* the previous tx is hidden from the client, 188 return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
208 * so ack it 189 flags, depend_tx, cb_fn, cb_param);
209 */ 190 } else {
210 if (i && depend_tx) 191 /* run the xor synchronously */
211 async_tx_ack(depend_tx); 192 pr_debug("%s (sync): len: %zu\n", __func__, len);
212 193
213 depend_tx = tx; 194 /* in the sync case the dest is an implied source
195 * (assumes the dest is the first source)
196 */
197 if (flags & ASYNC_TX_XOR_DROP_DST) {
198 src_cnt--;
199 src_list++;
200 }
214 201
215 if (src_cnt > xor_src_cnt) { 202 /* wait for any prerequisite operations */
216 /* drop completed sources */ 203 async_tx_quiesce(&depend_tx);
217 src_cnt -= xor_src_cnt;
218 src_off += xor_src_cnt;
219 204
220 /* unconditionally preserve the destination */ 205 do_sync_xor(dest, src_list, offset, src_cnt, len,
221 flags &= ~ASYNC_TX_XOR_ZERO_DST; 206 flags, cb_fn, cb_param);
222 207
223 /* use the intermediate result a source, but remember 208 return NULL;
224 * it's dropped, because it's implied, in the sync case
225 */
226 src_list[--src_off] = dest;
227 src_cnt++;
228 flags |= ASYNC_TX_XOR_DROP_DST;
229 } else
230 src_cnt = 0;
231 i++;
232 } 209 }
233
234 return tx;
235} 210}
236EXPORT_SYMBOL_GPL(async_xor); 211EXPORT_SYMBOL_GPL(async_xor);
237 212
@@ -285,11 +260,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
285 tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, 260 tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
286 len, result, 261 len, result,
287 dma_prep_flags); 262 dma_prep_flags);
288 if (!tx) { 263 if (unlikely(!tx)) {
289 if (depend_tx) 264 async_tx_quiesce(&depend_tx);
290 dma_wait_for_async_tx(depend_tx);
291 265
292 while (!tx) 266 while (!tx)
267 dma_async_issue_pending(chan);
293 tx = device->device_prep_dma_zero_sum(chan, 268 tx = device->device_prep_dma_zero_sum(chan,
294 dma_src, src_cnt, len, result, 269 dma_src, src_cnt, len, result,
295 dma_prep_flags); 270 dma_prep_flags);
@@ -307,18 +282,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
307 tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, 282 tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
308 depend_tx, NULL, NULL); 283 depend_tx, NULL, NULL);
309 284
310 if (tx) { 285 async_tx_quiesce(&tx);
311 if (dma_wait_for_async_tx(tx) == DMA_ERROR)
312 panic("%s: DMA_ERROR waiting for tx\n",
313 __func__);
314 async_tx_ack(tx);
315 }
316 286
317 *result = page_is_zero(dest, offset, len) ? 0 : 1; 287 *result = page_is_zero(dest, offset, len) ? 0 : 1;
318 288
319 tx = NULL; 289 async_tx_sync_epilog(cb_fn, cb_param);
320
321 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
322 } 290 }
323 291
324 return tx; 292 return tx;
diff --git a/drivers/Makefile b/drivers/Makefile
index 808e0ae66aa8..54ec5e718c0e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -5,7 +5,7 @@
5# Rewritten to use lists instead of if-statements. 5# Rewritten to use lists instead of if-statements.
6# 6#
7 7
8obj-$(CONFIG_HAVE_GPIO_LIB) += gpio/ 8obj-y += gpio/
9obj-$(CONFIG_PCI) += pci/ 9obj-$(CONFIG_PCI) += pci/
10obj-$(CONFIG_PARISC) += parisc/ 10obj-$(CONFIG_PARISC) += parisc/
11obj-$(CONFIG_RAPIDIO) += rapidio/ 11obj-$(CONFIG_RAPIDIO) += rapidio/
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c
index cffef1bcbdbc..549db42f16cf 100644
--- a/drivers/acpi/namespace/nsnames.c
+++ b/drivers/acpi/namespace/nsnames.c
@@ -137,6 +137,10 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
137 /* Calculate required buffer size based on depth below root */ 137 /* Calculate required buffer size based on depth below root */
138 138
139 size = acpi_ns_get_pathname_length(node); 139 size = acpi_ns_get_pathname_length(node);
140 if (!size) {
141 ACPI_ERROR((AE_INFO, "Invalid node failure"));
142 return_PTR(NULL);
143 }
140 144
141 /* Allocate a buffer to be returned to caller */ 145 /* Allocate a buffer to be returned to caller */
142 146
@@ -229,6 +233,10 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
229 /* Determine size required for the caller buffer */ 233 /* Determine size required for the caller buffer */
230 234
231 required_size = acpi_ns_get_pathname_length(node); 235 required_size = acpi_ns_get_pathname_length(node);
236 if (!required_size) {
237 ACPI_ERROR((AE_INFO, "Invalid node failure"));
238 return_ACPI_STATUS(AE_ERROR);
239 }
232 240
233 /* Validate/Allocate/Clear caller buffer */ 241 /* Validate/Allocate/Clear caller buffer */
234 242
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 233c40c51684..89f3b2abfdc7 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -113,20 +113,23 @@ acpi_pci_link_check_possible(struct acpi_resource *resource, void *context)
113 113
114 switch (resource->type) { 114 switch (resource->type) {
115 case ACPI_RESOURCE_TYPE_START_DEPENDENT: 115 case ACPI_RESOURCE_TYPE_START_DEPENDENT:
116 case ACPI_RESOURCE_TYPE_END_TAG:
116 return AE_OK; 117 return AE_OK;
117 case ACPI_RESOURCE_TYPE_IRQ: 118 case ACPI_RESOURCE_TYPE_IRQ:
118 { 119 {
119 struct acpi_resource_irq *p = &resource->data.irq; 120 struct acpi_resource_irq *p = &resource->data.irq;
120 if (!p || !p->interrupt_count) { 121 if (!p || !p->interrupt_count) {
121 printk(KERN_WARNING PREFIX "Blank IRQ resource\n"); 122 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
123 "Blank _PRS IRQ resource\n"));
122 return AE_OK; 124 return AE_OK;
123 } 125 }
124 for (i = 0; 126 for (i = 0;
125 (i < p->interrupt_count 127 (i < p->interrupt_count
126 && i < ACPI_PCI_LINK_MAX_POSSIBLE); i++) { 128 && i < ACPI_PCI_LINK_MAX_POSSIBLE); i++) {
127 if (!p->interrupts[i]) { 129 if (!p->interrupts[i]) {
128 printk(KERN_WARNING PREFIX "Invalid IRQ %d\n", 130 printk(KERN_WARNING PREFIX
129 p->interrupts[i]); 131 "Invalid _PRS IRQ %d\n",
132 p->interrupts[i]);
130 continue; 133 continue;
131 } 134 }
132 link->irq.possible[i] = p->interrupts[i]; 135 link->irq.possible[i] = p->interrupts[i];
@@ -143,15 +146,16 @@ acpi_pci_link_check_possible(struct acpi_resource *resource, void *context)
143 &resource->data.extended_irq; 146 &resource->data.extended_irq;
144 if (!p || !p->interrupt_count) { 147 if (!p || !p->interrupt_count) {
145 printk(KERN_WARNING PREFIX 148 printk(KERN_WARNING PREFIX
146 "Blank EXT IRQ resource\n"); 149 "Blank _PRS EXT IRQ resource\n");
147 return AE_OK; 150 return AE_OK;
148 } 151 }
149 for (i = 0; 152 for (i = 0;
150 (i < p->interrupt_count 153 (i < p->interrupt_count
151 && i < ACPI_PCI_LINK_MAX_POSSIBLE); i++) { 154 && i < ACPI_PCI_LINK_MAX_POSSIBLE); i++) {
152 if (!p->interrupts[i]) { 155 if (!p->interrupts[i]) {
153 printk(KERN_WARNING PREFIX "Invalid IRQ %d\n", 156 printk(KERN_WARNING PREFIX
154 p->interrupts[i]); 157 "Invalid _PRS IRQ %d\n",
158 p->interrupts[i]);
155 continue; 159 continue;
156 } 160 }
157 link->irq.possible[i] = p->interrupts[i]; 161 link->irq.possible[i] = p->interrupts[i];
@@ -163,7 +167,8 @@ acpi_pci_link_check_possible(struct acpi_resource *resource, void *context)
163 break; 167 break;
164 } 168 }
165 default: 169 default:
166 printk(KERN_ERR PREFIX "Resource is not an IRQ entry\n"); 170 printk(KERN_ERR PREFIX "_PRS resource type 0x%x isn't an IRQ\n",
171 resource->type);
167 return AE_OK; 172 return AE_OK;
168 } 173 }
169 174
@@ -199,6 +204,9 @@ acpi_pci_link_check_current(struct acpi_resource *resource, void *context)
199 204
200 205
201 switch (resource->type) { 206 switch (resource->type) {
207 case ACPI_RESOURCE_TYPE_START_DEPENDENT:
208 case ACPI_RESOURCE_TYPE_END_TAG:
209 return AE_OK;
202 case ACPI_RESOURCE_TYPE_IRQ: 210 case ACPI_RESOURCE_TYPE_IRQ:
203 { 211 {
204 struct acpi_resource_irq *p = &resource->data.irq; 212 struct acpi_resource_irq *p = &resource->data.irq;
@@ -208,7 +216,7 @@ acpi_pci_link_check_current(struct acpi_resource *resource, void *context)
208 * particularly those those w/ _STA disabled 216 * particularly those those w/ _STA disabled
209 */ 217 */
210 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 218 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
211 "Blank IRQ resource\n")); 219 "Blank _CRS IRQ resource\n"));
212 return AE_OK; 220 return AE_OK;
213 } 221 }
214 *irq = p->interrupts[0]; 222 *irq = p->interrupts[0];
@@ -224,7 +232,7 @@ acpi_pci_link_check_current(struct acpi_resource *resource, void *context)
224 * return at least 1 IRQ 232 * return at least 1 IRQ
225 */ 233 */
226 printk(KERN_WARNING PREFIX 234 printk(KERN_WARNING PREFIX
227 "Blank EXT IRQ resource\n"); 235 "Blank _CRS EXT IRQ resource\n");
228 return AE_OK; 236 return AE_OK;
229 } 237 }
230 *irq = p->interrupts[0]; 238 *irq = p->interrupts[0];
@@ -232,10 +240,11 @@ acpi_pci_link_check_current(struct acpi_resource *resource, void *context)
232 } 240 }
233 break; 241 break;
234 default: 242 default:
235 printk(KERN_ERR PREFIX "Resource %d isn't an IRQ\n", resource->type); 243 printk(KERN_ERR PREFIX "_CRS resource type 0x%x isn't an IRQ\n",
236 case ACPI_RESOURCE_TYPE_END_TAG: 244 resource->type);
237 return AE_OK; 245 return AE_OK;
238 } 246 }
247
239 return AE_CTRL_TERMINATE; 248 return AE_CTRL_TERMINATE;
240} 249}
241 250
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index b9ab030a52d5..dd376f7ad090 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -6,8 +6,8 @@
6 * Thanks to Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> for code 6 * Thanks to Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> for code
7 * review and fixes. 7 * review and fixes.
8 * 8 *
9 * Copyright (C) 2007 Alex Chiang <achiang@hp.com> 9 * Copyright (C) 2007-2008 Hewlett-Packard Development Company, L.P.
10 * Copyright (C) 2007 Hewlett-Packard Development Company, L.P. 10 * Alex Chiang <achiang@hp.com>
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License, 13 * under the terms and conditions of the GNU General Public License,
@@ -158,6 +158,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
158 if (IS_ERR(pci_slot)) { 158 if (IS_ERR(pci_slot)) {
159 err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot)); 159 err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
160 kfree(slot); 160 kfree(slot);
161 return AE_OK;
161 } 162 }
162 163
163 slot->root_handle = parent_context->root_handle; 164 slot->root_handle = parent_context->root_handle;
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 0622ace05220..a2c3f9cfa549 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -827,6 +827,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
827static int acpi_processor_get_throttling(struct acpi_processor *pr) 827static int acpi_processor_get_throttling(struct acpi_processor *pr)
828{ 828{
829 cpumask_t saved_mask; 829 cpumask_t saved_mask;
830 cpumask_of_cpu_ptr_declare(new_mask);
830 int ret; 831 int ret;
831 832
832 if (!pr) 833 if (!pr)
@@ -838,7 +839,8 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
838 * Migrate task to the cpu pointed by pr. 839 * Migrate task to the cpu pointed by pr.
839 */ 840 */
840 saved_mask = current->cpus_allowed; 841 saved_mask = current->cpus_allowed;
841 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 842 cpumask_of_cpu_ptr_next(new_mask, pr->id);
843 set_cpus_allowed_ptr(current, new_mask);
842 ret = pr->throttling.acpi_processor_get_throttling(pr); 844 ret = pr->throttling.acpi_processor_get_throttling(pr);
843 /* restore the previous state */ 845 /* restore the previous state */
844 set_cpus_allowed_ptr(current, &saved_mask); 846 set_cpus_allowed_ptr(current, &saved_mask);
@@ -987,6 +989,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
987int acpi_processor_set_throttling(struct acpi_processor *pr, int state) 989int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
988{ 990{
989 cpumask_t saved_mask; 991 cpumask_t saved_mask;
992 cpumask_of_cpu_ptr_declare(new_mask);
990 int ret = 0; 993 int ret = 0;
991 unsigned int i; 994 unsigned int i;
992 struct acpi_processor *match_pr; 995 struct acpi_processor *match_pr;
@@ -1013,7 +1016,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1013 * affected cpu in order to get one proper T-state. 1016 * affected cpu in order to get one proper T-state.
1014 * The notifier event is THROTTLING_PRECHANGE. 1017 * The notifier event is THROTTLING_PRECHANGE.
1015 */ 1018 */
1016 for_each_cpu_mask(i, online_throttling_cpus) { 1019 for_each_cpu_mask_nr(i, online_throttling_cpus) {
1017 t_state.cpu = i; 1020 t_state.cpu = i;
1018 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1021 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
1019 &t_state); 1022 &t_state);
@@ -1025,7 +1028,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1025 * it can be called only for the cpu pointed by pr. 1028 * it can be called only for the cpu pointed by pr.
1026 */ 1029 */
1027 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1030 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1028 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 1031 cpumask_of_cpu_ptr_next(new_mask, pr->id);
1032 set_cpus_allowed_ptr(current, new_mask);
1029 ret = p_throttling->acpi_processor_set_throttling(pr, 1033 ret = p_throttling->acpi_processor_set_throttling(pr,
1030 t_state.target_state); 1034 t_state.target_state);
1031 } else { 1035 } else {
@@ -1034,7 +1038,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1034 * it is necessary to set T-state for every affected 1038 * it is necessary to set T-state for every affected
1035 * cpus. 1039 * cpus.
1036 */ 1040 */
1037 for_each_cpu_mask(i, online_throttling_cpus) { 1041 for_each_cpu_mask_nr(i, online_throttling_cpus) {
1038 match_pr = per_cpu(processors, i); 1042 match_pr = per_cpu(processors, i);
1039 /* 1043 /*
1040 * If the pointer is invalid, we will report the 1044 * If the pointer is invalid, we will report the
@@ -1056,7 +1060,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1056 continue; 1060 continue;
1057 } 1061 }
1058 t_state.cpu = i; 1062 t_state.cpu = i;
1059 set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); 1063 cpumask_of_cpu_ptr_next(new_mask, i);
1064 set_cpus_allowed_ptr(current, new_mask);
1060 ret = match_pr->throttling. 1065 ret = match_pr->throttling.
1061 acpi_processor_set_throttling( 1066 acpi_processor_set_throttling(
1062 match_pr, t_state.target_state); 1067 match_pr, t_state.target_state);
@@ -1068,7 +1073,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1068 * affected cpu to update the T-states. 1073 * affected cpu to update the T-states.
1069 * The notifier event is THROTTLING_POSTCHANGE 1074 * The notifier event is THROTTLING_POSTCHANGE
1070 */ 1075 */
1071 for_each_cpu_mask(i, online_throttling_cpus) { 1076 for_each_cpu_mask_nr(i, online_throttling_cpus) {
1072 t_state.cpu = i; 1077 t_state.cpu = i;
1073 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1078 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
1074 &t_state); 1079 &t_state);
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index 0489a7d1d42c..d13194a031bf 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -280,9 +280,36 @@ static struct platform_suspend_ops acpi_suspend_ops_old = {
280 .end = acpi_pm_end, 280 .end = acpi_pm_end,
281 .recover = acpi_pm_finish, 281 .recover = acpi_pm_finish,
282}; 282};
283
284static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
285{
286 old_suspend_ordering = true;
287 return 0;
288}
289
290static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
291 {
292 .callback = init_old_suspend_ordering,
293 .ident = "Abit KN9 (nForce4 variant)",
294 .matches = {
295 DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
296 DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
297 },
298 },
299 {},
300};
283#endif /* CONFIG_SUSPEND */ 301#endif /* CONFIG_SUSPEND */
284 302
285#ifdef CONFIG_HIBERNATION 303#ifdef CONFIG_HIBERNATION
304static unsigned long s4_hardware_signature;
305static struct acpi_table_facs *facs;
306static bool nosigcheck;
307
308void __init acpi_no_s4_hw_signature(void)
309{
310 nosigcheck = true;
311}
312
286static int acpi_hibernation_begin(void) 313static int acpi_hibernation_begin(void)
287{ 314{
288 acpi_target_sleep_state = ACPI_STATE_S4; 315 acpi_target_sleep_state = ACPI_STATE_S4;
@@ -316,6 +343,12 @@ static void acpi_hibernation_leave(void)
316 acpi_enable(); 343 acpi_enable();
317 /* Reprogram control registers and execute _BFS */ 344 /* Reprogram control registers and execute _BFS */
318 acpi_leave_sleep_state_prep(ACPI_STATE_S4); 345 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
346 /* Check the hardware signature */
347 if (facs && s4_hardware_signature != facs->hardware_signature) {
348 printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
349 "cannot resume!\n");
350 panic("ACPI S4 hardware signature mismatch");
351 }
319} 352}
320 353
321static void acpi_pm_enable_gpes(void) 354static void acpi_pm_enable_gpes(void)
@@ -516,6 +549,8 @@ int __init acpi_sleep_init(void)
516 u8 type_a, type_b; 549 u8 type_a, type_b;
517#ifdef CONFIG_SUSPEND 550#ifdef CONFIG_SUSPEND
518 int i = 0; 551 int i = 0;
552
553 dmi_check_system(acpisleep_dmi_table);
519#endif 554#endif
520 555
521 if (acpi_disabled) 556 if (acpi_disabled)
@@ -544,6 +579,13 @@ int __init acpi_sleep_init(void)
544 &acpi_hibernation_ops_old : &acpi_hibernation_ops); 579 &acpi_hibernation_ops_old : &acpi_hibernation_ops);
545 sleep_states[ACPI_STATE_S4] = 1; 580 sleep_states[ACPI_STATE_S4] = 1;
546 printk(" S4"); 581 printk(" S4");
582 if (!nosigcheck) {
583 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
584 (struct acpi_table_header **)&facs);
585 if (facs)
586 s4_hardware_signature =
587 facs->hardware_signature;
588 }
547 } 589 }
548#endif 590#endif
549 status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); 591 status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index d8e3f153b295..91dec448b3ed 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -26,6 +26,7 @@
26#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
27#include <linux/seq_file.h> 27#include <linux/seq_file.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/string.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30 31
31#include <acpi/acpi_drivers.h> 32#include <acpi/acpi_drivers.h>
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
index ccb5b64bbef3..a4a41ba2484b 100644
--- a/drivers/acpi/tables/tbfadt.c
+++ b/drivers/acpi/tables/tbfadt.c
@@ -124,7 +124,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
124 124
125static void inline 125static void inline
126acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, 126acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
127 u8 byte_width, u64 address) 127 u8 bit_width, u64 address)
128{ 128{
129 129
130 /* 130 /*
@@ -136,7 +136,7 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
136 /* All other fields are byte-wide */ 136 /* All other fields are byte-wide */
137 137
138 generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO; 138 generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO;
139 generic_address->bit_width = byte_width << 3; 139 generic_address->bit_width = bit_width;
140 generic_address->bit_offset = 0; 140 generic_address->bit_offset = 0;
141 generic_address->access_width = 0; 141 generic_address->access_width = 0;
142} 142}
@@ -343,11 +343,9 @@ static void acpi_tb_convert_fadt(void)
343 * 343 *
344 * The PM event blocks are split into two register blocks, first is the 344 * The PM event blocks are split into two register blocks, first is the
345 * PM Status Register block, followed immediately by the PM Enable Register 345 * PM Status Register block, followed immediately by the PM Enable Register
346 * block. Each is of length (xpm1x_event_block.bit_width/2) 346 * block. Each is of length (pm1_event_length/2)
347 */ 347 */
348 WARN_ON(ACPI_MOD_16(acpi_gbl_FADT.xpm1a_event_block.bit_width)); 348 pm1_register_length = (u8) ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length);
349 pm1_register_length = (u8) ACPI_DIV_16(acpi_gbl_FADT
350 .xpm1a_event_block.bit_width);
351 349
352 /* The PM1A register block is required */ 350 /* The PM1A register block is required */
353 351
@@ -362,17 +360,14 @@ static void acpi_tb_convert_fadt(void)
362 /* The PM1B register block is optional, ignore if not present */ 360 /* The PM1B register block is optional, ignore if not present */
363 361
364 if (acpi_gbl_FADT.xpm1b_event_block.address) { 362 if (acpi_gbl_FADT.xpm1b_event_block.address) {
365 WARN_ON(ACPI_MOD_16(acpi_gbl_FADT.xpm1b_event_block.bit_width));
366 pm1_register_length = (u8) ACPI_DIV_16(acpi_gbl_FADT
367 .xpm1b_event_block
368 .bit_width);
369 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable, 363 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
370 pm1_register_length, 364 pm1_register_length,
371 (acpi_gbl_FADT.xpm1b_event_block. 365 (acpi_gbl_FADT.xpm1b_event_block.
372 address + pm1_register_length)); 366 address + pm1_register_length));
373 /* Don't forget to copy space_id of the GAS */ 367 /* Don't forget to copy space_id of the GAS */
374 acpi_gbl_xpm1b_enable.space_id = 368 acpi_gbl_xpm1b_enable.space_id =
375 acpi_gbl_FADT.xpm1b_event_block.space_id; 369 acpi_gbl_FADT.xpm1a_event_block.space_id;
370
376 } 371 }
377} 372}
378 373
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 30a341337933..912703691d36 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -769,6 +769,47 @@ static void acpi_thermal_run(unsigned long data)
769 acpi_os_execute(OSL_GPE_HANDLER, acpi_thermal_check, (void *)data); 769 acpi_os_execute(OSL_GPE_HANDLER, acpi_thermal_check, (void *)data);
770} 770}
771 771
772static void acpi_thermal_active_off(void *data)
773{
774 int result = 0;
775 struct acpi_thermal *tz = data;
776 int i = 0;
777 int j = 0;
778 struct acpi_thermal_active *active = NULL;
779
780 if (!tz) {
781 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
782 return;
783 }
784
785 result = acpi_thermal_get_temperature(tz);
786 if (result)
787 return;
788
789 for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
790 active = &(tz->trips.active[i]);
791 if (!active || !active->flags.valid)
792 break;
793 if (tz->temperature >= active->temperature) {
794 /*
795 * If the thermal temperature is greater than the
796 * active threshod, unnecessary to turn off the
797 * the active cooling device.
798 */
799 continue;
800 }
801 /*
802 * Below Threshold?
803 * ----------------
804 * Turn OFF all cooling devices associated with this
805 * threshold.
806 */
807 for (j = 0; j < active->devices.count; j++)
808 result = acpi_bus_set_power(active->devices.handles[j],
809 ACPI_STATE_D3);
810 }
811}
812
772static void acpi_thermal_check(void *data) 813static void acpi_thermal_check(void *data)
773{ 814{
774 int result = 0; 815 int result = 0;
@@ -1624,6 +1665,8 @@ static int acpi_thermal_add(struct acpi_device *device)
1624 1665
1625 init_timer(&tz->timer); 1666 init_timer(&tz->timer);
1626 1667
1668 acpi_thermal_active_off(tz);
1669
1627 acpi_thermal_check(tz); 1670 acpi_thermal_check(tz);
1628 1671
1629 status = acpi_install_notify_handler(device->handle, 1672 status = acpi_install_notify_handler(device->handle,
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index 3dfb8a442b26..e7bf34a7b1d2 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -242,6 +242,10 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
242{ 242{
243 acpi_status status = AE_OK; 243 acpi_status status = AE_OK;
244 244
245 if (!required_length) {
246 WARN_ON(1);
247 return AE_ERROR;
248 }
245 switch (buffer->length) { 249 switch (buffer->length) {
246 case ACPI_NO_BUFFER: 250 case ACPI_NO_BUFFER:
247 251
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 37b9e16710d6..e8a51a1700f7 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -741,7 +741,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
741 741
742 max_level = acpi_video_init_brightness(device); 742 max_level = acpi_video_init_brightness(device);
743 743
744 if (device->cap._BCL && device->cap._BCM && device->cap._BQC && max_level > 0){ 744 if (device->cap._BCL && device->cap._BCM && max_level > 0) {
745 int result; 745 int result;
746 static int count = 0; 746 static int count = 0;
747 char *name; 747 char *name;
@@ -753,7 +753,17 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
753 device->backlight = backlight_device_register(name, 753 device->backlight = backlight_device_register(name,
754 NULL, device, &acpi_backlight_ops); 754 NULL, device, &acpi_backlight_ops);
755 device->backlight->props.max_brightness = device->brightness->count-3; 755 device->backlight->props.max_brightness = device->brightness->count-3;
756 device->backlight->props.brightness = acpi_video_get_brightness(device->backlight); 756 /*
757 * If there exists the _BQC object, the _BQC object will be
758 * called to get the current backlight brightness. Otherwise
759 * the brightness will be set to the maximum.
760 */
761 if (device->cap._BQC)
762 device->backlight->props.brightness =
763 acpi_video_get_brightness(device->backlight);
764 else
765 device->backlight->props.brightness =
766 device->backlight->props.max_brightness;
757 backlight_update_status(device->backlight); 767 backlight_update_status(device->backlight);
758 kfree(name); 768 kfree(name);
759 769
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dc7596f028b6..ef3e5522e1a4 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1273,7 +1273,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1273 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1273 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1274 u32 em_ctl; 1274 u32 em_ctl;
1275 u32 message[] = {0, 0}; 1275 u32 message[] = {0, 0};
1276 unsigned int flags; 1276 unsigned long flags;
1277 int pmp; 1277 int pmp;
1278 struct ahci_em_priv *emp; 1278 struct ahci_em_priv *emp;
1279 1279
diff --git a/drivers/auxdisplay/cfag12864b.c b/drivers/auxdisplay/cfag12864b.c
index 683509f013ab..eacb175f6bd3 100644
--- a/drivers/auxdisplay/cfag12864b.c
+++ b/drivers/auxdisplay/cfag12864b.c
@@ -336,16 +336,9 @@ static int __init cfag12864b_init(void)
336 "ks0108 is not initialized\n"); 336 "ks0108 is not initialized\n");
337 goto none; 337 goto none;
338 } 338 }
339 BUILD_BUG_ON(PAGE_SIZE < CFAG12864B_SIZE);
339 340
340 if (PAGE_SIZE < CFAG12864B_SIZE) { 341 cfag12864b_buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL);
341 printk(KERN_ERR CFAG12864B_NAME ": ERROR: "
342 "page size (%i) < cfag12864b size (%i)\n",
343 (unsigned int)PAGE_SIZE, CFAG12864B_SIZE);
344 ret = -ENOMEM;
345 goto none;
346 }
347
348 cfag12864b_buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
349 if (cfag12864b_buffer == NULL) { 342 if (cfag12864b_buffer == NULL) {
350 printk(KERN_ERR CFAG12864B_NAME ": ERROR: " 343 printk(KERN_ERR CFAG12864B_NAME ": ERROR: "
351 "can't get a free page\n"); 344 "can't get a free page\n");
@@ -367,8 +360,6 @@ static int __init cfag12864b_init(void)
367 if (cfag12864b_workqueue == NULL) 360 if (cfag12864b_workqueue == NULL)
368 goto cachealloced; 361 goto cachealloced;
369 362
370 memset(cfag12864b_buffer, 0, CFAG12864B_SIZE);
371
372 cfag12864b_clear(); 363 cfag12864b_clear();
373 cfag12864b_on(); 364 cfag12864b_on();
374 365
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 20537d507909..64f5d54f7edc 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -121,14 +121,14 @@ static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \
121{ \ 121{ \
122 return print_cpus_map(buf, &cpu_##type##_map); \ 122 return print_cpus_map(buf, &cpu_##type##_map); \
123} \ 123} \
124struct sysdev_class_attribute attr_##type##_map = \ 124static struct sysdev_class_attribute attr_##type##_map = \
125 _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) 125 _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL)
126 126
127print_cpus_func(online); 127print_cpus_func(online);
128print_cpus_func(possible); 128print_cpus_func(possible);
129print_cpus_func(present); 129print_cpus_func(present);
130 130
131struct sysdev_class_attribute *cpu_state_attr[] = { 131static struct sysdev_class_attribute *cpu_state_attr[] = {
132 &attr_online_map, 132 &attr_online_map,
133 &attr_possible_map, 133 &attr_possible_map,
134 &attr_present_map, 134 &attr_present_map,
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index b0be1d18fee2..c9c92b00fd55 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -184,7 +184,7 @@ firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
184 struct device *dev = to_dev(kobj); 184 struct device *dev = to_dev(kobj);
185 struct firmware_priv *fw_priv = dev_get_drvdata(dev); 185 struct firmware_priv *fw_priv = dev_get_drvdata(dev);
186 struct firmware *fw; 186 struct firmware *fw;
187 ssize_t ret_count = count; 187 ssize_t ret_count;
188 188
189 mutex_lock(&fw_lock); 189 mutex_lock(&fw_lock);
190 fw = fw_priv->fw; 190 fw = fw_priv->fw;
@@ -192,14 +192,8 @@ firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
192 ret_count = -ENODEV; 192 ret_count = -ENODEV;
193 goto out; 193 goto out;
194 } 194 }
195 if (offset > fw->size) { 195 ret_count = memory_read_from_buffer(buffer, count, &offset,
196 ret_count = 0; 196 fw->data, fw->size);
197 goto out;
198 }
199 if (offset + ret_count > fw->size)
200 ret_count = fw->size - offset;
201
202 memcpy(buffer, fw->data + offset, ret_count);
203out: 197out:
204 mutex_unlock(&fw_lock); 198 mutex_unlock(&fw_lock);
205 return ret_count; 199 return ret_count;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 4d4e0e7b6e92..855ed1a9f97b 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -101,6 +101,21 @@ static ssize_t show_mem_phys_index(struct sys_device *dev,
101} 101}
102 102
103/* 103/*
104 * Show whether the section of memory is likely to be hot-removable
105 */
106static ssize_t show_mem_removable(struct sys_device *dev, char *buf)
107{
108 unsigned long start_pfn;
109 int ret;
110 struct memory_block *mem =
111 container_of(dev, struct memory_block, sysdev);
112
113 start_pfn = section_nr_to_pfn(mem->phys_index);
114 ret = is_mem_section_removable(start_pfn, PAGES_PER_SECTION);
115 return sprintf(buf, "%d\n", ret);
116}
117
118/*
104 * online, offline, going offline, etc. 119 * online, offline, going offline, etc.
105 */ 120 */
106static ssize_t show_mem_state(struct sys_device *dev, 121static ssize_t show_mem_state(struct sys_device *dev,
@@ -262,6 +277,7 @@ static ssize_t show_phys_device(struct sys_device *dev,
262static SYSDEV_ATTR(phys_index, 0444, show_mem_phys_index, NULL); 277static SYSDEV_ATTR(phys_index, 0444, show_mem_phys_index, NULL);
263static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state); 278static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state);
264static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL); 279static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL);
280static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
265 281
266#define mem_create_simple_file(mem, attr_name) \ 282#define mem_create_simple_file(mem, attr_name) \
267 sysdev_create_file(&mem->sysdev, &attr_##attr_name) 283 sysdev_create_file(&mem->sysdev, &attr_##attr_name)
@@ -350,6 +366,8 @@ static int add_memory_block(unsigned long node_id, struct mem_section *section,
350 ret = mem_create_simple_file(mem, state); 366 ret = mem_create_simple_file(mem, state);
351 if (!ret) 367 if (!ret)
352 ret = mem_create_simple_file(mem, phys_device); 368 ret = mem_create_simple_file(mem, phys_device);
369 if (!ret)
370 ret = mem_create_simple_file(mem, removable);
353 371
354 return ret; 372 return ret;
355} 373}
@@ -394,6 +412,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section,
394 mem_remove_simple_file(mem, phys_index); 412 mem_remove_simple_file(mem, phys_index);
395 mem_remove_simple_file(mem, state); 413 mem_remove_simple_file(mem, state);
396 mem_remove_simple_file(mem, phys_device); 414 mem_remove_simple_file(mem, phys_device);
415 mem_remove_simple_file(mem, removable);
397 unregister_memory(mem, section); 416 unregister_memory(mem, section);
398 417
399 return 0; 418 return 0;
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index c04440cd6a32..181ebb85f0be 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -6,6 +6,7 @@
6 6
7#include <linux/hdreg.h> 7#include <linux/hdreg.h>
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/completion.h>
9#include <linux/delay.h> 10#include <linux/delay.h>
10#include <linux/smp_lock.h> 11#include <linux/smp_lock.h>
11#include "aoe.h" 12#include "aoe.h"
@@ -36,7 +37,7 @@ struct ErrMsg {
36 37
37static struct ErrMsg emsgs[NMSG]; 38static struct ErrMsg emsgs[NMSG];
38static int emsgs_head_idx, emsgs_tail_idx; 39static int emsgs_head_idx, emsgs_tail_idx;
39static struct semaphore emsgs_sema; 40static struct completion emsgs_comp;
40static spinlock_t emsgs_lock; 41static spinlock_t emsgs_lock;
41static int nblocked_emsgs_readers; 42static int nblocked_emsgs_readers;
42static struct class *aoe_class; 43static struct class *aoe_class;
@@ -141,7 +142,7 @@ bail: spin_unlock_irqrestore(&emsgs_lock, flags);
141 spin_unlock_irqrestore(&emsgs_lock, flags); 142 spin_unlock_irqrestore(&emsgs_lock, flags);
142 143
143 if (nblocked_emsgs_readers) 144 if (nblocked_emsgs_readers)
144 up(&emsgs_sema); 145 complete(&emsgs_comp);
145} 146}
146 147
147static ssize_t 148static ssize_t
@@ -221,7 +222,7 @@ aoechr_read(struct file *filp, char __user *buf, size_t cnt, loff_t *off)
221 222
222 spin_unlock_irqrestore(&emsgs_lock, flags); 223 spin_unlock_irqrestore(&emsgs_lock, flags);
223 224
224 n = down_interruptible(&emsgs_sema); 225 n = wait_for_completion_interruptible(&emsgs_comp);
225 226
226 spin_lock_irqsave(&emsgs_lock, flags); 227 spin_lock_irqsave(&emsgs_lock, flags);
227 228
@@ -269,7 +270,7 @@ aoechr_init(void)
269 printk(KERN_ERR "aoe: can't register char device\n"); 270 printk(KERN_ERR "aoe: can't register char device\n");
270 return n; 271 return n;
271 } 272 }
272 sema_init(&emsgs_sema, 0); 273 init_completion(&emsgs_comp);
273 spin_lock_init(&emsgs_lock); 274 spin_lock_init(&emsgs_lock);
274 aoe_class = class_create(THIS_MODULE, "aoe"); 275 aoe_class = class_create(THIS_MODULE, "aoe");
275 if (IS_ERR(aoe_class)) { 276 if (IS_ERR(aoe_class)) {
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index dd7ea203f940..42251095134f 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -196,6 +196,7 @@ static int virtblk_probe(struct virtio_device *vdev)
196 int err; 196 int err;
197 u64 cap; 197 u64 cap;
198 u32 v; 198 u32 v;
199 u32 blk_size;
199 200
200 if (index_to_minor(index) >= 1 << MINORBITS) 201 if (index_to_minor(index) >= 1 << MINORBITS)
201 return -ENOSPC; 202 return -ENOSPC;
@@ -290,6 +291,13 @@ static int virtblk_probe(struct virtio_device *vdev)
290 if (!err) 291 if (!err)
291 blk_queue_max_hw_segments(vblk->disk->queue, v); 292 blk_queue_max_hw_segments(vblk->disk->queue, v);
292 293
294 /* Host can optionally specify the block size of the device */
295 err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
296 offsetof(struct virtio_blk_config, blk_size),
297 &blk_size);
298 if (!err)
299 blk_queue_hardsect_size(vblk->disk->queue, blk_size);
300
293 add_disk(vblk->disk); 301 add_disk(vblk->disk);
294 return 0; 302 return 0;
295 303
@@ -330,7 +338,7 @@ static struct virtio_device_id id_table[] = {
330 338
331static unsigned int features[] = { 339static unsigned int features[] = {
332 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, 340 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
333 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, 341 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
334}; 342};
335 343
336static struct virtio_driver virtio_blk = { 344static struct virtio_driver virtio_blk = {
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index e0bbbfb6a36b..d0ac944e1696 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -36,6 +36,14 @@ config VT
36 If unsure, say Y, or else you won't be able to do much with your new 36 If unsure, say Y, or else you won't be able to do much with your new
37 shiny Linux system :-) 37 shiny Linux system :-)
38 38
39config CONSOLE_TRANSLATIONS
40 depends on VT
41 default y
42 bool "Enable character translations in console" if EMBEDDED
43 ---help---
44 This enables support for font mapping and Unicode translation
45 on virtual consoles.
46
39config VT_CONSOLE 47config VT_CONSOLE
40 bool "Support for console on virtual terminal" if EMBEDDED 48 bool "Support for console on virtual terminal" if EMBEDDED
41 depends on VT 49 depends on VT
@@ -578,11 +586,14 @@ config HVC_DRIVER
578 It will automatically be selected if one of the back-end console drivers 586 It will automatically be selected if one of the back-end console drivers
579 is selected. 587 is selected.
580 588
589config HVC_IRQ
590 bool
581 591
582config HVC_CONSOLE 592config HVC_CONSOLE
583 bool "pSeries Hypervisor Virtual Console support" 593 bool "pSeries Hypervisor Virtual Console support"
584 depends on PPC_PSERIES 594 depends on PPC_PSERIES
585 select HVC_DRIVER 595 select HVC_DRIVER
596 select HVC_IRQ
586 help 597 help
587 pSeries machines when partitioned support a hypervisor virtual 598 pSeries machines when partitioned support a hypervisor virtual
588 console. This driver allows each pSeries partition to have a console 599 console. This driver allows each pSeries partition to have a console
@@ -593,6 +604,7 @@ config HVC_ISERIES
593 depends on PPC_ISERIES 604 depends on PPC_ISERIES
594 default y 605 default y
595 select HVC_DRIVER 606 select HVC_DRIVER
607 select HVC_IRQ
596 help 608 help
597 iSeries machines support a hypervisor virtual console. 609 iSeries machines support a hypervisor virtual console.
598 610
@@ -614,13 +626,18 @@ config HVC_XEN
614 bool "Xen Hypervisor Console support" 626 bool "Xen Hypervisor Console support"
615 depends on XEN 627 depends on XEN
616 select HVC_DRIVER 628 select HVC_DRIVER
629 select HVC_IRQ
617 default y 630 default y
618 help 631 help
619 Xen virtual console device driver 632 Xen virtual console device driver
620 633
621config VIRTIO_CONSOLE 634config VIRTIO_CONSOLE
622 bool 635 tristate "Virtio console"
636 depends on VIRTIO
623 select HVC_DRIVER 637 select HVC_DRIVER
638 help
639 Virtio console for use with lguest and other hypervisors.
640
624 641
625config HVCS 642config HVCS
626 tristate "IBM Hypervisor Virtual Console Server support" 643 tristate "IBM Hypervisor Virtual Console Server support"
@@ -857,13 +874,6 @@ config DS1302
857 874
858endif # RTC_LIB 875endif # RTC_LIB
859 876
860config COBALT_LCD
861 bool "Support for Cobalt LCD"
862 depends on MIPS_COBALT
863 help
864 This option enables support for the LCD display and buttons found
865 on Cobalt systems through a misc device.
866
867config DTLK 877config DTLK
868 tristate "Double Talk PC internal speech card support" 878 tristate "Double Talk PC internal speech card support"
869 depends on ISA 879 depends on ISA
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index dc5a327d72d5..8a161c30e1dc 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -12,8 +12,8 @@ obj-y += mem.o random.o tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o
12obj-$(CONFIG_LEGACY_PTYS) += pty.o 12obj-$(CONFIG_LEGACY_PTYS) += pty.o
13obj-$(CONFIG_UNIX98_PTYS) += pty.o 13obj-$(CONFIG_UNIX98_PTYS) += pty.o
14obj-y += misc.o 14obj-y += misc.o
15obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o consolemap.o \ 15obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o selection.o keyboard.o
16 consolemap_deftbl.o selection.o keyboard.o 16obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
17obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o 17obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
18obj-$(CONFIG_AUDIT) += tty_audit.o 18obj-$(CONFIG_AUDIT) += tty_audit.o
19obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o 19obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
@@ -48,6 +48,7 @@ obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
48obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o 48obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
49obj-$(CONFIG_HVC_BEAT) += hvc_beat.o 49obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
50obj-$(CONFIG_HVC_DRIVER) += hvc_console.o 50obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
51obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
51obj-$(CONFIG_HVC_XEN) += hvc_xen.o 52obj-$(CONFIG_HVC_XEN) += hvc_xen.o
52obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o 53obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
53obj-$(CONFIG_RAW_DRIVER) += raw.o 54obj-$(CONFIG_RAW_DRIVER) += raw.o
@@ -63,7 +64,6 @@ obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o
63obj-$(CONFIG_BFIN_OTP) += bfin-otp.o 64obj-$(CONFIG_BFIN_OTP) += bfin-otp.o
64 65
65obj-$(CONFIG_PRINTER) += lp.o 66obj-$(CONFIG_PRINTER) += lp.o
66obj-$(CONFIG_TIPAR) += tipar.o
67 67
68obj-$(CONFIG_APM_EMULATION) += apm-emulation.o 68obj-$(CONFIG_APM_EMULATION) += apm-emulation.o
69 69
@@ -88,7 +88,6 @@ obj-$(CONFIG_TOSHIBA) += toshiba.o
88obj-$(CONFIG_I8K) += i8k.o 88obj-$(CONFIG_I8K) += i8k.o
89obj-$(CONFIG_DS1620) += ds1620.o 89obj-$(CONFIG_DS1620) += ds1620.o
90obj-$(CONFIG_HW_RANDOM) += hw_random/ 90obj-$(CONFIG_HW_RANDOM) += hw_random/
91obj-$(CONFIG_COBALT_LCD) += lcd.o
92obj-$(CONFIG_PPDEV) += ppdev.o 91obj-$(CONFIG_PPDEV) += ppdev.o
93obj-$(CONFIG_NWBUTTON) += nwbutton.o 92obj-$(CONFIG_NWBUTTON) += nwbutton.o
94obj-$(CONFIG_NWFLASH) += nwflash.o 93obj-$(CONFIG_NWFLASH) += nwflash.o
diff --git a/drivers/char/ds1302.c b/drivers/char/ds1302.c
index fada6ddefbae..c5e67a623951 100644
--- a/drivers/char/ds1302.c
+++ b/drivers/char/ds1302.c
@@ -20,10 +20,11 @@
20#include <linux/miscdevice.h> 20#include <linux/miscdevice.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/bcd.h> 22#include <linux/bcd.h>
23#include <linux/smp_lock.h>
24#include <linux/uaccess.h>
25#include <linux/io.h>
23 26
24#include <asm/uaccess.h>
25#include <asm/system.h> 27#include <asm/system.h>
26#include <asm/io.h>
27#include <asm/rtc.h> 28#include <asm/rtc.h>
28#if defined(CONFIG_M32R) 29#if defined(CONFIG_M32R)
29#include <asm/m32r.h> 30#include <asm/m32r.h>
@@ -153,9 +154,7 @@ static unsigned char days_in_mo[] =
153 154
154/* ioctl that supports RTC_RD_TIME and RTC_SET_TIME (read and set time/date). */ 155/* ioctl that supports RTC_RD_TIME and RTC_SET_TIME (read and set time/date). */
155 156
156static int 157static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
157rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
158 unsigned long arg)
159{ 158{
160 unsigned long flags; 159 unsigned long flags;
161 160
@@ -165,7 +164,9 @@ rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
165 struct rtc_time rtc_tm; 164 struct rtc_time rtc_tm;
166 165
167 memset(&rtc_tm, 0, sizeof (struct rtc_time)); 166 memset(&rtc_tm, 0, sizeof (struct rtc_time));
167 lock_kernel();
168 get_rtc_time(&rtc_tm); 168 get_rtc_time(&rtc_tm);
169 unlock_kernel();
169 if (copy_to_user((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time))) 170 if (copy_to_user((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time)))
170 return -EFAULT; 171 return -EFAULT;
171 return 0; 172 return 0;
@@ -217,6 +218,7 @@ rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
217 BIN_TO_BCD(mon); 218 BIN_TO_BCD(mon);
218 BIN_TO_BCD(yrs); 219 BIN_TO_BCD(yrs);
219 220
221 lock_kernel();
220 local_irq_save(flags); 222 local_irq_save(flags);
221 CMOS_WRITE(yrs, RTC_YEAR); 223 CMOS_WRITE(yrs, RTC_YEAR);
222 CMOS_WRITE(mon, RTC_MONTH); 224 CMOS_WRITE(mon, RTC_MONTH);
@@ -225,6 +227,7 @@ rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
225 CMOS_WRITE(min, RTC_MINUTES); 227 CMOS_WRITE(min, RTC_MINUTES);
226 CMOS_WRITE(sec, RTC_SECONDS); 228 CMOS_WRITE(sec, RTC_SECONDS);
227 local_irq_restore(flags); 229 local_irq_restore(flags);
230 unlock_kernel();
228 231
229 /* Notice that at this point, the RTC is updated but 232 /* Notice that at this point, the RTC is updated but
230 * the kernel is still running with the old time. 233 * the kernel is still running with the old time.
@@ -244,8 +247,10 @@ rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
244 if(copy_from_user(&tcs_val, (int*)arg, sizeof(int))) 247 if(copy_from_user(&tcs_val, (int*)arg, sizeof(int)))
245 return -EFAULT; 248 return -EFAULT;
246 249
250 lock_kernel();
247 tcs_val = RTC_TCR_PATTERN | (tcs_val & 0x0F); 251 tcs_val = RTC_TCR_PATTERN | (tcs_val & 0x0F);
248 ds1302_writereg(RTC_TRICKLECHARGER, tcs_val); 252 ds1302_writereg(RTC_TRICKLECHARGER, tcs_val);
253 unlock_kernel();
249 return 0; 254 return 0;
250 } 255 }
251 default: 256 default:
@@ -282,7 +287,7 @@ get_rtc_status(char *buf)
282 287
283static const struct file_operations rtc_fops = { 288static const struct file_operations rtc_fops = {
284 .owner = THIS_MODULE, 289 .owner = THIS_MODULE,
285 .ioctl = rtc_ioctl, 290 .unlocked_ioctl = rtc_ioctl,
286}; 291};
287 292
288/* Probe for the chip by writing something to its RAM and try reading it back. */ 293/* Probe for the chip by writing something to its RAM and try reading it back. */
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 33c466a4888f..19b88504e960 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -36,10 +36,10 @@
36#include <linux/smp_lock.h> 36#include <linux/smp_lock.h>
37#include <linux/firmware.h> 37#include <linux/firmware.h>
38#include <linux/platform_device.h> 38#include <linux/platform_device.h>
39#include <linux/uaccess.h> /* For put_user and get_user */
39 40
40#include <asm/atarihw.h> 41#include <asm/atarihw.h>
41#include <asm/traps.h> 42#include <asm/traps.h>
42#include <asm/uaccess.h> /* For put_user and get_user */
43 43
44#include <asm/dsp56k.h> 44#include <asm/dsp56k.h>
45 45
@@ -303,8 +303,8 @@ static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t co
303 } 303 }
304} 304}
305 305
306static int dsp56k_ioctl(struct inode *inode, struct file *file, 306static long dsp56k_ioctl(struct file *file, unsigned int cmd,
307 unsigned int cmd, unsigned long arg) 307 unsigned long arg)
308{ 308{
309 int dev = iminor(inode) & 0x0f; 309 int dev = iminor(inode) & 0x0f;
310 void __user *argp = (void __user *)arg; 310 void __user *argp = (void __user *)arg;
@@ -331,8 +331,9 @@ static int dsp56k_ioctl(struct inode *inode, struct file *file,
331 if (len > DSP56K_MAX_BINARY_LENGTH) { 331 if (len > DSP56K_MAX_BINARY_LENGTH) {
332 return -EINVAL; 332 return -EINVAL;
333 } 333 }
334 334 lock_kernel();
335 r = dsp56k_upload(bin, len); 335 r = dsp56k_upload(bin, len);
336 unlock_kernel();
336 if (r < 0) { 337 if (r < 0) {
337 return r; 338 return r;
338 } 339 }
@@ -342,12 +343,16 @@ static int dsp56k_ioctl(struct inode *inode, struct file *file,
342 case DSP56K_SET_TX_WSIZE: 343 case DSP56K_SET_TX_WSIZE:
343 if (arg > 4 || arg < 1) 344 if (arg > 4 || arg < 1)
344 return -EINVAL; 345 return -EINVAL;
346 lock_kernel();
345 dsp56k.tx_wsize = (int) arg; 347 dsp56k.tx_wsize = (int) arg;
348 unlock_kernel();
346 break; 349 break;
347 case DSP56K_SET_RX_WSIZE: 350 case DSP56K_SET_RX_WSIZE:
348 if (arg > 4 || arg < 1) 351 if (arg > 4 || arg < 1)
349 return -EINVAL; 352 return -EINVAL;
353 lock_kernel();
350 dsp56k.rx_wsize = (int) arg; 354 dsp56k.rx_wsize = (int) arg;
355 unlock_kernel();
351 break; 356 break;
352 case DSP56K_HOST_FLAGS: 357 case DSP56K_HOST_FLAGS:
353 { 358 {
@@ -359,6 +364,7 @@ static int dsp56k_ioctl(struct inode *inode, struct file *file,
359 if(get_user(out, &hf->out) < 0) 364 if(get_user(out, &hf->out) < 0)
360 return -EFAULT; 365 return -EFAULT;
361 366
367 lock_kernel();
362 if ((dir & 0x1) && (out & 0x1)) 368 if ((dir & 0x1) && (out & 0x1))
363 dsp56k_host_interface.icr |= DSP56K_ICR_HF0; 369 dsp56k_host_interface.icr |= DSP56K_ICR_HF0;
364 else if (dir & 0x1) 370 else if (dir & 0x1)
@@ -373,14 +379,16 @@ static int dsp56k_ioctl(struct inode *inode, struct file *file,
373 if (dsp56k_host_interface.icr & DSP56K_ICR_HF1) status |= 0x2; 379 if (dsp56k_host_interface.icr & DSP56K_ICR_HF1) status |= 0x2;
374 if (dsp56k_host_interface.isr & DSP56K_ISR_HF2) status |= 0x4; 380 if (dsp56k_host_interface.isr & DSP56K_ISR_HF2) status |= 0x4;
375 if (dsp56k_host_interface.isr & DSP56K_ISR_HF3) status |= 0x8; 381 if (dsp56k_host_interface.isr & DSP56K_ISR_HF3) status |= 0x8;
376 382 unlock_kernel();
377 return put_user(status, &hf->status); 383 return put_user(status, &hf->status);
378 } 384 }
379 case DSP56K_HOST_CMD: 385 case DSP56K_HOST_CMD:
380 if (arg > 31 || arg < 0) 386 if (arg > 31 || arg < 0)
381 return -EINVAL; 387 return -EINVAL;
388 lock_kernel();
382 dsp56k_host_interface.cvr = (u_char)((arg & DSP56K_CVR_HV_MASK) | 389 dsp56k_host_interface.cvr = (u_char)((arg & DSP56K_CVR_HV_MASK) |
383 DSP56K_CVR_HC); 390 DSP56K_CVR_HC);
391 unlock_kernel();
384 break; 392 break;
385 default: 393 default:
386 return -EINVAL; 394 return -EINVAL;
@@ -472,7 +480,7 @@ static const struct file_operations dsp56k_fops = {
472 .owner = THIS_MODULE, 480 .owner = THIS_MODULE,
473 .read = dsp56k_read, 481 .read = dsp56k_read,
474 .write = dsp56k_write, 482 .write = dsp56k_write,
475 .ioctl = dsp56k_ioctl, 483 .unlocked_ioctl = dsp56k_ioctl,
476 .open = dsp56k_open, 484 .open = dsp56k_open,
477 .release = dsp56k_release, 485 .release = dsp56k_release,
478}; 486};
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index d57ca3e4e534..67fbd7aab5db 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -37,8 +37,9 @@
37#include <linux/rtc.h> 37#include <linux/rtc.h>
38#include <linux/proc_fs.h> 38#include <linux/proc_fs.h>
39#include <linux/efi.h> 39#include <linux/efi.h>
40#include <linux/smp_lock.h>
41#include <linux/uaccess.h>
40 42
41#include <asm/uaccess.h>
42#include <asm/system.h> 43#include <asm/system.h>
43 44
44#define EFI_RTC_VERSION "0.4" 45#define EFI_RTC_VERSION "0.4"
@@ -51,8 +52,8 @@
51 52
52static DEFINE_SPINLOCK(efi_rtc_lock); 53static DEFINE_SPINLOCK(efi_rtc_lock);
53 54
54static int efi_rtc_ioctl(struct inode *inode, struct file *file, 55static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
55 unsigned int cmd, unsigned long arg); 56 unsigned long arg);
56 57
57#define is_leap(year) \ 58#define is_leap(year) \
58 ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) 59 ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
@@ -146,9 +147,8 @@ convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime)
146 } 147 }
147} 148}
148 149
149static int 150static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
150efi_rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 151 unsigned long arg)
151 unsigned long arg)
152{ 152{
153 153
154 efi_status_t status; 154 efi_status_t status;
@@ -175,13 +175,13 @@ efi_rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
175 return -EINVAL; 175 return -EINVAL;
176 176
177 case RTC_RD_TIME: 177 case RTC_RD_TIME:
178 178 lock_kernel();
179 spin_lock_irqsave(&efi_rtc_lock, flags); 179 spin_lock_irqsave(&efi_rtc_lock, flags);
180 180
181 status = efi.get_time(&eft, &cap); 181 status = efi.get_time(&eft, &cap);
182 182
183 spin_unlock_irqrestore(&efi_rtc_lock,flags); 183 spin_unlock_irqrestore(&efi_rtc_lock,flags);
184 184 unlock_kernel();
185 if (status != EFI_SUCCESS) { 185 if (status != EFI_SUCCESS) {
186 /* should never happen */ 186 /* should never happen */
187 printk(KERN_ERR "efitime: can't read time\n"); 187 printk(KERN_ERR "efitime: can't read time\n");
@@ -203,11 +203,13 @@ efi_rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
203 203
204 convert_to_efi_time(&wtime, &eft); 204 convert_to_efi_time(&wtime, &eft);
205 205
206 lock_kernel();
206 spin_lock_irqsave(&efi_rtc_lock, flags); 207 spin_lock_irqsave(&efi_rtc_lock, flags);
207 208
208 status = efi.set_time(&eft); 209 status = efi.set_time(&eft);
209 210
210 spin_unlock_irqrestore(&efi_rtc_lock,flags); 211 spin_unlock_irqrestore(&efi_rtc_lock,flags);
212 unlock_kernel();
211 213
212 return status == EFI_SUCCESS ? 0 : -EINVAL; 214 return status == EFI_SUCCESS ? 0 : -EINVAL;
213 215
@@ -223,6 +225,7 @@ efi_rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
223 225
224 convert_to_efi_time(&wtime, &eft); 226 convert_to_efi_time(&wtime, &eft);
225 227
228 lock_kernel();
226 spin_lock_irqsave(&efi_rtc_lock, flags); 229 spin_lock_irqsave(&efi_rtc_lock, flags);
227 /* 230 /*
228 * XXX Fixme: 231 * XXX Fixme:
@@ -233,16 +236,19 @@ efi_rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
233 status = efi.set_wakeup_time((efi_bool_t)enabled, &eft); 236 status = efi.set_wakeup_time((efi_bool_t)enabled, &eft);
234 237
235 spin_unlock_irqrestore(&efi_rtc_lock,flags); 238 spin_unlock_irqrestore(&efi_rtc_lock,flags);
239 unlock_kernel();
236 240
237 return status == EFI_SUCCESS ? 0 : -EINVAL; 241 return status == EFI_SUCCESS ? 0 : -EINVAL;
238 242
239 case RTC_WKALM_RD: 243 case RTC_WKALM_RD:
240 244
245 lock_kernel();
241 spin_lock_irqsave(&efi_rtc_lock, flags); 246 spin_lock_irqsave(&efi_rtc_lock, flags);
242 247
243 status = efi.get_wakeup_time((efi_bool_t *)&enabled, (efi_bool_t *)&pending, &eft); 248 status = efi.get_wakeup_time((efi_bool_t *)&enabled, (efi_bool_t *)&pending, &eft);
244 249
245 spin_unlock_irqrestore(&efi_rtc_lock,flags); 250 spin_unlock_irqrestore(&efi_rtc_lock,flags);
251 unlock_kernel();
246 252
247 if (status != EFI_SUCCESS) return -EINVAL; 253 if (status != EFI_SUCCESS) return -EINVAL;
248 254
@@ -256,7 +262,7 @@ efi_rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
256 return copy_to_user(&ewp->time, &wtime, 262 return copy_to_user(&ewp->time, &wtime,
257 sizeof(struct rtc_time)) ? -EFAULT : 0; 263 sizeof(struct rtc_time)) ? -EFAULT : 0;
258 } 264 }
259 return -EINVAL; 265 return -ENOTTY;
260} 266}
261 267
262/* 268/*
@@ -265,8 +271,7 @@ efi_rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
265 * up things on a close. 271 * up things on a close.
266 */ 272 */
267 273
268static int 274static int efi_rtc_open(struct inode *inode, struct file *file)
269efi_rtc_open(struct inode *inode, struct file *file)
270{ 275{
271 /* 276 /*
272 * nothing special to do here 277 * nothing special to do here
@@ -277,8 +282,7 @@ efi_rtc_open(struct inode *inode, struct file *file)
277 return 0; 282 return 0;
278} 283}
279 284
280static int 285static int efi_rtc_close(struct inode *inode, struct file *file)
281efi_rtc_close(struct inode *inode, struct file *file)
282{ 286{
283 return 0; 287 return 0;
284} 288}
@@ -289,13 +293,12 @@ efi_rtc_close(struct inode *inode, struct file *file)
289 293
290static const struct file_operations efi_rtc_fops = { 294static const struct file_operations efi_rtc_fops = {
291 .owner = THIS_MODULE, 295 .owner = THIS_MODULE,
292 .ioctl = efi_rtc_ioctl, 296 .unlocked_ioctl = efi_rtc_ioctl,
293 .open = efi_rtc_open, 297 .open = efi_rtc_open,
294 .release = efi_rtc_close, 298 .release = efi_rtc_close,
295}; 299};
296 300
297static struct miscdevice efi_rtc_dev= 301static struct miscdevice efi_rtc_dev= {
298{
299 EFI_RTC_MINOR, 302 EFI_RTC_MINOR,
300 "efirtc", 303 "efirtc",
301 &efi_rtc_fops 304 &efi_rtc_fops
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index fb0a85a1eb36..b3f5dbc6d880 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -623,6 +623,7 @@ static inline int hpet_tpcheck(struct hpet_task *tp)
623 return -ENXIO; 623 return -ENXIO;
624} 624}
625 625
626#if 0
626int hpet_unregister(struct hpet_task *tp) 627int hpet_unregister(struct hpet_task *tp)
627{ 628{
628 struct hpet_dev *devp; 629 struct hpet_dev *devp;
@@ -652,6 +653,7 @@ int hpet_unregister(struct hpet_task *tp)
652 653
653 return 0; 654 return 0;
654} 655}
656#endif /* 0 */
655 657
656static ctl_table hpet_table[] = { 658static ctl_table hpet_table[] = {
657 { 659 {
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 2f9759d625cc..02aac104842d 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -27,7 +27,6 @@
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/kbd_kern.h> 28#include <linux/kbd_kern.h>
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/kref.h>
31#include <linux/kthread.h> 30#include <linux/kthread.h>
32#include <linux/list.h> 31#include <linux/list.h>
33#include <linux/module.h> 32#include <linux/module.h>
@@ -75,23 +74,6 @@ static int hvc_init(void);
75static int sysrq_pressed; 74static int sysrq_pressed;
76#endif 75#endif
77 76
78struct hvc_struct {
79 spinlock_t lock;
80 int index;
81 struct tty_struct *tty;
82 unsigned int count;
83 int do_wakeup;
84 char *outbuf;
85 int outbuf_size;
86 int n_outbuf;
87 uint32_t vtermno;
88 struct hv_ops *ops;
89 int irq_requested;
90 int irq;
91 struct list_head next;
92 struct kref kref; /* ref count & hvc_struct lifetime */
93};
94
95/* dynamic list of hvc_struct instances */ 77/* dynamic list of hvc_struct instances */
96static LIST_HEAD(hvc_structs); 78static LIST_HEAD(hvc_structs);
97 79
@@ -298,27 +280,15 @@ int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
298 280
299 return 0; 281 return 0;
300} 282}
283EXPORT_SYMBOL_GPL(hvc_instantiate);
301 284
302/* Wake the sleeping khvcd */ 285/* Wake the sleeping khvcd */
303static void hvc_kick(void) 286void hvc_kick(void)
304{ 287{
305 hvc_kicked = 1; 288 hvc_kicked = 1;
306 wake_up_process(hvc_task); 289 wake_up_process(hvc_task);
307} 290}
308 291EXPORT_SYMBOL_GPL(hvc_kick);
309static int hvc_poll(struct hvc_struct *hp);
310
311/*
312 * NOTE: This API isn't used if the console adapter doesn't support interrupts.
313 * In this case the console is poll driven.
314 */
315static irqreturn_t hvc_handle_interrupt(int irq, void *dev_instance)
316{
317 /* if hvc_poll request a repoll, then kick the hvcd thread */
318 if (hvc_poll(dev_instance))
319 hvc_kick();
320 return IRQ_HANDLED;
321}
322 292
323static void hvc_unthrottle(struct tty_struct *tty) 293static void hvc_unthrottle(struct tty_struct *tty)
324{ 294{
@@ -333,7 +303,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
333{ 303{
334 struct hvc_struct *hp; 304 struct hvc_struct *hp;
335 unsigned long flags; 305 unsigned long flags;
336 int irq = 0;
337 int rc = 0; 306 int rc = 0;
338 307
339 /* Auto increments kref reference if found. */ 308 /* Auto increments kref reference if found. */
@@ -352,18 +321,15 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
352 tty->low_latency = 1; /* Makes flushes to ldisc synchronous. */ 321 tty->low_latency = 1; /* Makes flushes to ldisc synchronous. */
353 322
354 hp->tty = tty; 323 hp->tty = tty;
355 /* Save for request_irq outside of spin_lock. */ 324
356 irq = hp->irq; 325 if (hp->ops->notifier_add)
357 if (irq) 326 rc = hp->ops->notifier_add(hp, hp->data);
358 hp->irq_requested = 1;
359 327
360 spin_unlock_irqrestore(&hp->lock, flags); 328 spin_unlock_irqrestore(&hp->lock, flags);
361 /* check error, fallback to non-irq */ 329
362 if (irq)
363 rc = request_irq(irq, hvc_handle_interrupt, IRQF_DISABLED, "hvc_console", hp);
364 330
365 /* 331 /*
366 * If the request_irq() fails and we return an error. The tty layer 332 * If the notifier fails we return an error. The tty layer
367 * will call hvc_close() after a failed open but we don't want to clean 333 * will call hvc_close() after a failed open but we don't want to clean
368 * up there so we'll clean up here and clear out the previously set 334 * up there so we'll clean up here and clear out the previously set
369 * tty fields and return the kref reference. 335 * tty fields and return the kref reference.
@@ -371,7 +337,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
371 if (rc) { 337 if (rc) {
372 spin_lock_irqsave(&hp->lock, flags); 338 spin_lock_irqsave(&hp->lock, flags);
373 hp->tty = NULL; 339 hp->tty = NULL;
374 hp->irq_requested = 0;
375 spin_unlock_irqrestore(&hp->lock, flags); 340 spin_unlock_irqrestore(&hp->lock, flags);
376 tty->driver_data = NULL; 341 tty->driver_data = NULL;
377 kref_put(&hp->kref, destroy_hvc_struct); 342 kref_put(&hp->kref, destroy_hvc_struct);
@@ -386,7 +351,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
386static void hvc_close(struct tty_struct *tty, struct file * filp) 351static void hvc_close(struct tty_struct *tty, struct file * filp)
387{ 352{
388 struct hvc_struct *hp; 353 struct hvc_struct *hp;
389 int irq = 0;
390 unsigned long flags; 354 unsigned long flags;
391 355
392 if (tty_hung_up_p(filp)) 356 if (tty_hung_up_p(filp))
@@ -404,9 +368,8 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
404 spin_lock_irqsave(&hp->lock, flags); 368 spin_lock_irqsave(&hp->lock, flags);
405 369
406 if (--hp->count == 0) { 370 if (--hp->count == 0) {
407 if (hp->irq_requested) 371 if (hp->ops->notifier_del)
408 irq = hp->irq; 372 hp->ops->notifier_del(hp, hp->data);
409 hp->irq_requested = 0;
410 373
411 /* We are done with the tty pointer now. */ 374 /* We are done with the tty pointer now. */
412 hp->tty = NULL; 375 hp->tty = NULL;
@@ -418,10 +381,6 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
418 * waking periodically to check chars_in_buffer(). 381 * waking periodically to check chars_in_buffer().
419 */ 382 */
420 tty_wait_until_sent(tty, HVC_CLOSE_WAIT); 383 tty_wait_until_sent(tty, HVC_CLOSE_WAIT);
421
422 if (irq)
423 free_irq(irq, hp);
424
425 } else { 384 } else {
426 if (hp->count < 0) 385 if (hp->count < 0)
427 printk(KERN_ERR "hvc_close %X: oops, count is %d\n", 386 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
@@ -436,7 +395,6 @@ static void hvc_hangup(struct tty_struct *tty)
436{ 395{
437 struct hvc_struct *hp = tty->driver_data; 396 struct hvc_struct *hp = tty->driver_data;
438 unsigned long flags; 397 unsigned long flags;
439 int irq = 0;
440 int temp_open_count; 398 int temp_open_count;
441 399
442 if (!hp) 400 if (!hp)
@@ -458,13 +416,12 @@ static void hvc_hangup(struct tty_struct *tty)
458 hp->count = 0; 416 hp->count = 0;
459 hp->n_outbuf = 0; 417 hp->n_outbuf = 0;
460 hp->tty = NULL; 418 hp->tty = NULL;
461 if (hp->irq_requested) 419
462 /* Saved for use outside of spin_lock. */ 420 if (hp->ops->notifier_del)
463 irq = hp->irq; 421 hp->ops->notifier_del(hp, hp->data);
464 hp->irq_requested = 0; 422
465 spin_unlock_irqrestore(&hp->lock, flags); 423 spin_unlock_irqrestore(&hp->lock, flags);
466 if (irq) 424
467 free_irq(irq, hp);
468 while(temp_open_count) { 425 while(temp_open_count) {
469 --temp_open_count; 426 --temp_open_count;
470 kref_put(&hp->kref, destroy_hvc_struct); 427 kref_put(&hp->kref, destroy_hvc_struct);
@@ -575,7 +532,7 @@ static u32 timeout = MIN_TIMEOUT;
575#define HVC_POLL_READ 0x00000001 532#define HVC_POLL_READ 0x00000001
576#define HVC_POLL_WRITE 0x00000002 533#define HVC_POLL_WRITE 0x00000002
577 534
578static int hvc_poll(struct hvc_struct *hp) 535int hvc_poll(struct hvc_struct *hp)
579{ 536{
580 struct tty_struct *tty; 537 struct tty_struct *tty;
581 int i, n, poll_mask = 0; 538 int i, n, poll_mask = 0;
@@ -602,10 +559,10 @@ static int hvc_poll(struct hvc_struct *hp)
602 if (test_bit(TTY_THROTTLED, &tty->flags)) 559 if (test_bit(TTY_THROTTLED, &tty->flags))
603 goto throttled; 560 goto throttled;
604 561
605 /* If we aren't interrupt driven and aren't throttled, we always 562 /* If we aren't notifier driven and aren't throttled, we always
606 * request a reschedule 563 * request a reschedule
607 */ 564 */
608 if (hp->irq == 0) 565 if (!hp->irq_requested)
609 poll_mask |= HVC_POLL_READ; 566 poll_mask |= HVC_POLL_READ;
610 567
611 /* Read data if any */ 568 /* Read data if any */
@@ -674,6 +631,7 @@ static int hvc_poll(struct hvc_struct *hp)
674 631
675 return poll_mask; 632 return poll_mask;
676} 633}
634EXPORT_SYMBOL_GPL(hvc_poll);
677 635
678/* 636/*
679 * This kthread is either polling or interrupt driven. This is determined by 637 * This kthread is either polling or interrupt driven. This is determined by
@@ -733,7 +691,7 @@ static const struct tty_operations hvc_ops = {
733 .chars_in_buffer = hvc_chars_in_buffer, 691 .chars_in_buffer = hvc_chars_in_buffer,
734}; 692};
735 693
736struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq, 694struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
737 struct hv_ops *ops, int outbuf_size) 695 struct hv_ops *ops, int outbuf_size)
738{ 696{
739 struct hvc_struct *hp; 697 struct hvc_struct *hp;
@@ -754,7 +712,7 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
754 memset(hp, 0x00, sizeof(*hp)); 712 memset(hp, 0x00, sizeof(*hp));
755 713
756 hp->vtermno = vtermno; 714 hp->vtermno = vtermno;
757 hp->irq = irq; 715 hp->data = data;
758 hp->ops = ops; 716 hp->ops = ops;
759 hp->outbuf_size = outbuf_size; 717 hp->outbuf_size = outbuf_size;
760 hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))]; 718 hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))];
@@ -784,6 +742,7 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
784 742
785 return hp; 743 return hp;
786} 744}
745EXPORT_SYMBOL_GPL(hvc_alloc);
787 746
788int __devexit hvc_remove(struct hvc_struct *hp) 747int __devexit hvc_remove(struct hvc_struct *hp)
789{ 748{
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index 42ffb17e15df..d9ce10915625 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -26,6 +26,7 @@
26 26
27#ifndef HVC_CONSOLE_H 27#ifndef HVC_CONSOLE_H
28#define HVC_CONSOLE_H 28#define HVC_CONSOLE_H
29#include <linux/kref.h>
29 30
30/* 31/*
31 * This is the max number of console adapters that can/will be found as 32 * This is the max number of console adapters that can/will be found as
@@ -42,24 +43,50 @@
42 */ 43 */
43#define HVC_ALLOC_TTY_ADAPTERS 8 44#define HVC_ALLOC_TTY_ADAPTERS 8
44 45
46struct hvc_struct {
47 spinlock_t lock;
48 int index;
49 struct tty_struct *tty;
50 unsigned int count;
51 int do_wakeup;
52 char *outbuf;
53 int outbuf_size;
54 int n_outbuf;
55 uint32_t vtermno;
56 struct hv_ops *ops;
57 int irq_requested;
58 int data;
59 struct list_head next;
60 struct kref kref; /* ref count & hvc_struct lifetime */
61};
45 62
46/* implemented by a low level driver */ 63/* implemented by a low level driver */
47struct hv_ops { 64struct hv_ops {
48 int (*get_chars)(uint32_t vtermno, char *buf, int count); 65 int (*get_chars)(uint32_t vtermno, char *buf, int count);
49 int (*put_chars)(uint32_t vtermno, const char *buf, int count); 66 int (*put_chars)(uint32_t vtermno, const char *buf, int count);
50};
51 67
52struct hvc_struct; 68 /* Callbacks for notification. Called in open and close */
69 int (*notifier_add)(struct hvc_struct *hp, int irq);
70 void (*notifier_del)(struct hvc_struct *hp, int irq);
71};
53 72
54/* Register a vterm and a slot index for use as a console (console_init) */ 73/* Register a vterm and a slot index for use as a console (console_init) */
55extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops); 74extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
56 75
57/* register a vterm for hvc tty operation (module_init or hotplug add) */ 76/* register a vterm for hvc tty operation (module_init or hotplug add) */
58extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int irq, 77extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
59 struct hv_ops *ops, int outbuf_size); 78 struct hv_ops *ops, int outbuf_size);
60/* remove a vterm from hvc tty operation (modele_exit or hotplug remove) */ 79/* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
61extern int __devexit hvc_remove(struct hvc_struct *hp); 80extern int __devexit hvc_remove(struct hvc_struct *hp);
62 81
82/* data available */
83int hvc_poll(struct hvc_struct *hp);
84void hvc_kick(void);
85
86/* default notifier for irq based notification */
87extern int notifier_add_irq(struct hvc_struct *hp, int data);
88extern void notifier_del_irq(struct hvc_struct *hp, int data);
89
63 90
64#if defined(CONFIG_XMON) && defined(CONFIG_SMP) 91#if defined(CONFIG_XMON) && defined(CONFIG_SMP)
65#include <asm/xmon.h> 92#include <asm/xmon.h>
diff --git a/drivers/char/hvc_irq.c b/drivers/char/hvc_irq.c
new file mode 100644
index 000000000000..73a59cdb8947
--- /dev/null
+++ b/drivers/char/hvc_irq.c
@@ -0,0 +1,44 @@
1/*
2 * Copyright IBM Corp. 2001,2008
3 *
4 * This file contains the IRQ specific code for hvc_console
5 *
6 */
7
8#include <linux/interrupt.h>
9
10#include "hvc_console.h"
11
12static irqreturn_t hvc_handle_interrupt(int irq, void *dev_instance)
13{
14 /* if hvc_poll request a repoll, then kick the hvcd thread */
15 if (hvc_poll(dev_instance))
16 hvc_kick();
17 return IRQ_HANDLED;
18}
19
20/*
21 * For IRQ based systems these callbacks can be used
22 */
23int notifier_add_irq(struct hvc_struct *hp, int irq)
24{
25 int rc;
26
27 if (!irq) {
28 hp->irq_requested = 0;
29 return 0;
30 }
31 rc = request_irq(irq, hvc_handle_interrupt, IRQF_DISABLED,
32 "hvc_console", hp);
33 if (!rc)
34 hp->irq_requested = 1;
35 return rc;
36}
37
38void notifier_del_irq(struct hvc_struct *hp, int irq)
39{
40 if (!irq)
41 return;
42 free_irq(irq, hp);
43 hp->irq_requested = 0;
44}
diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
index a08f8f981c11..b71c610fe5ae 100644
--- a/drivers/char/hvc_iseries.c
+++ b/drivers/char/hvc_iseries.c
@@ -200,6 +200,8 @@ done:
200static struct hv_ops hvc_get_put_ops = { 200static struct hv_ops hvc_get_put_ops = {
201 .get_chars = get_chars, 201 .get_chars = get_chars,
202 .put_chars = put_chars, 202 .put_chars = put_chars,
203 .notifier_add = notifier_add_irq,
204 .notifier_del = notifier_del_irq,
203}; 205};
204 206
205static int __devinit hvc_vio_probe(struct vio_dev *vdev, 207static int __devinit hvc_vio_probe(struct vio_dev *vdev,
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 79711aa4b41d..93f3840c1682 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -80,6 +80,8 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
80static struct hv_ops hvc_get_put_ops = { 80static struct hv_ops hvc_get_put_ops = {
81 .get_chars = filtered_get_chars, 81 .get_chars = filtered_get_chars,
82 .put_chars = hvc_put_chars, 82 .put_chars = hvc_put_chars,
83 .notifier_add = notifier_add_irq,
84 .notifier_del = notifier_del_irq,
83}; 85};
84 86
85static int __devinit hvc_vio_probe(struct vio_dev *vdev, 87static int __devinit hvc_vio_probe(struct vio_dev *vdev,
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index db2ae4216279..6b70aa66a587 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -100,6 +100,8 @@ static int read_console(uint32_t vtermno, char *buf, int len)
100static struct hv_ops hvc_ops = { 100static struct hv_ops hvc_ops = {
101 .get_chars = read_console, 101 .get_chars = read_console,
102 .put_chars = write_console, 102 .put_chars = write_console,
103 .notifier_add = notifier_add_irq,
104 .notifier_del = notifier_del_irq,
103}; 105};
104 106
105static int __init xen_init(void) 107static int __init xen_init(void)
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 9cb48fcd316c..689f9dcd3b86 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -203,7 +203,7 @@ static int set_serial_info(i2ChanStrPtr, struct serial_struct __user *);
203 203
204static ssize_t ip2_ipl_read(struct file *, char __user *, size_t, loff_t *); 204static ssize_t ip2_ipl_read(struct file *, char __user *, size_t, loff_t *);
205static ssize_t ip2_ipl_write(struct file *, const char __user *, size_t, loff_t *); 205static ssize_t ip2_ipl_write(struct file *, const char __user *, size_t, loff_t *);
206static int ip2_ipl_ioctl(struct inode *, struct file *, UINT, ULONG); 206static long ip2_ipl_ioctl(struct file *, UINT, ULONG);
207static int ip2_ipl_open(struct inode *, struct file *); 207static int ip2_ipl_open(struct inode *, struct file *);
208 208
209static int DumpTraceBuffer(char __user *, int); 209static int DumpTraceBuffer(char __user *, int);
@@ -236,7 +236,7 @@ static const struct file_operations ip2_ipl = {
236 .owner = THIS_MODULE, 236 .owner = THIS_MODULE,
237 .read = ip2_ipl_read, 237 .read = ip2_ipl_read,
238 .write = ip2_ipl_write, 238 .write = ip2_ipl_write,
239 .ioctl = ip2_ipl_ioctl, 239 .unlocked_ioctl = ip2_ipl_ioctl,
240 .open = ip2_ipl_open, 240 .open = ip2_ipl_open,
241}; 241};
242 242
@@ -2845,10 +2845,10 @@ ip2_ipl_write(struct file *pFile, const char __user *pData, size_t count, loff_t
2845/* */ 2845/* */
2846/* */ 2846/* */
2847/******************************************************************************/ 2847/******************************************************************************/
2848static int 2848static long
2849ip2_ipl_ioctl ( struct inode *pInode, struct file *pFile, UINT cmd, ULONG arg ) 2849ip2_ipl_ioctl (struct file *pFile, UINT cmd, ULONG arg )
2850{ 2850{
2851 unsigned int iplminor = iminor(pInode); 2851 unsigned int iplminor = iminor(pFile->f_path.dentry->d_inode);
2852 int rc = 0; 2852 int rc = 0;
2853 void __user *argp = (void __user *)arg; 2853 void __user *argp = (void __user *)arg;
2854 ULONG __user *pIndex = argp; 2854 ULONG __user *pIndex = argp;
@@ -2859,6 +2859,8 @@ ip2_ipl_ioctl ( struct inode *pInode, struct file *pFile, UINT cmd, ULONG arg )
2859 printk (KERN_DEBUG "IP2IPL: ioctl cmd %d, arg %ld\n", cmd, arg ); 2859 printk (KERN_DEBUG "IP2IPL: ioctl cmd %d, arg %ld\n", cmd, arg );
2860#endif 2860#endif
2861 2861
2862 lock_kernel();
2863
2862 switch ( iplminor ) { 2864 switch ( iplminor ) {
2863 case 0: // IPL device 2865 case 0: // IPL device
2864 rc = -EINVAL; 2866 rc = -EINVAL;
@@ -2919,6 +2921,7 @@ ip2_ipl_ioctl ( struct inode *pInode, struct file *pFile, UINT cmd, ULONG arg )
2919 rc = -ENODEV; 2921 rc = -ENODEV;
2920 break; 2922 break;
2921 } 2923 }
2924 unlock_kernel();
2922 return rc; 2925 return rc;
2923} 2926}
2924 2927
diff --git a/drivers/char/lcd.c b/drivers/char/lcd.c
deleted file mode 100644
index 1c29b20e4f4c..000000000000
--- a/drivers/char/lcd.c
+++ /dev/null
@@ -1,516 +0,0 @@
1/*
2 * LCD, LED and Button interface for Cobalt
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1996, 1997 by Andrew Bose
9 *
10 * Linux kernel version history:
11 * March 2001: Ported from 2.0.34 by Liam Davies
12 *
13 */
14#include <linux/types.h>
15#include <linux/errno.h>
16#include <linux/miscdevice.h>
17#include <linux/slab.h>
18#include <linux/ioport.h>
19#include <linux/fcntl.h>
20#include <linux/mc146818rtc.h>
21#include <linux/netdevice.h>
22#include <linux/sched.h>
23#include <linux/smp_lock.h>
24#include <linux/delay.h>
25
26#include <asm/io.h>
27#include <asm/uaccess.h>
28#include <asm/system.h>
29
30#include "lcd.h"
31
32static int lcd_ioctl(struct inode *inode, struct file *file,
33 unsigned int cmd, unsigned long arg);
34
35static unsigned int lcd_present = 1;
36
37/* used in arch/mips/cobalt/reset.c */
38int led_state = 0;
39
40#if defined(CONFIG_TULIP) && 0
41
42#define MAX_INTERFACES 8
43static linkcheck_func_t linkcheck_callbacks[MAX_INTERFACES];
44static void *linkcheck_cookies[MAX_INTERFACES];
45
46int lcd_register_linkcheck_func(int iface_num, void *func, void *cookie)
47{
48 if (iface_num < 0 ||
49 iface_num >= MAX_INTERFACES ||
50 linkcheck_callbacks[iface_num] != NULL)
51 return -1;
52 linkcheck_callbacks[iface_num] = (linkcheck_func_t) func;
53 linkcheck_cookies[iface_num] = cookie;
54 return 0;
55}
56#endif
57
58static int lcd_ioctl(struct inode *inode, struct file *file,
59 unsigned int cmd, unsigned long arg)
60{
61 struct lcd_display button_display;
62 unsigned long address, a;
63
64 switch (cmd) {
65 case LCD_On:
66 udelay(150);
67 BusyCheck();
68 LCDWriteInst(0x0F);
69 break;
70
71 case LCD_Off:
72 udelay(150);
73 BusyCheck();
74 LCDWriteInst(0x08);
75 break;
76
77 case LCD_Reset:
78 udelay(150);
79 LCDWriteInst(0x3F);
80 udelay(150);
81 LCDWriteInst(0x3F);
82 udelay(150);
83 LCDWriteInst(0x3F);
84 udelay(150);
85 LCDWriteInst(0x3F);
86 udelay(150);
87 LCDWriteInst(0x01);
88 udelay(150);
89 LCDWriteInst(0x06);
90 break;
91
92 case LCD_Clear:
93 udelay(150);
94 BusyCheck();
95 LCDWriteInst(0x01);
96 break;
97
98 case LCD_Cursor_Left:
99 udelay(150);
100 BusyCheck();
101 LCDWriteInst(0x10);
102 break;
103
104 case LCD_Cursor_Right:
105 udelay(150);
106 BusyCheck();
107 LCDWriteInst(0x14);
108 break;
109
110 case LCD_Cursor_Off:
111 udelay(150);
112 BusyCheck();
113 LCDWriteInst(0x0C);
114 break;
115
116 case LCD_Cursor_On:
117 udelay(150);
118 BusyCheck();
119 LCDWriteInst(0x0F);
120 break;
121
122 case LCD_Blink_Off:
123 udelay(150);
124 BusyCheck();
125 LCDWriteInst(0x0E);
126 break;
127
128 case LCD_Get_Cursor_Pos:{
129 struct lcd_display display;
130
131 udelay(150);
132 BusyCheck();
133 display.cursor_address = (LCDReadInst);
134 display.cursor_address =
135 (display.cursor_address & 0x07F);
136 if (copy_to_user
137 ((struct lcd_display *) arg, &display,
138 sizeof(struct lcd_display)))
139 return -EFAULT;
140
141 break;
142 }
143
144
145 case LCD_Set_Cursor_Pos:{
146 struct lcd_display display;
147
148 if (copy_from_user
149 (&display, (struct lcd_display *) arg,
150 sizeof(struct lcd_display)))
151 return -EFAULT;
152
153 a = (display.cursor_address | kLCD_Addr);
154
155 udelay(150);
156 BusyCheck();
157 LCDWriteInst(a);
158
159 break;
160 }
161
162 case LCD_Get_Cursor:{
163 struct lcd_display display;
164
165 udelay(150);
166 BusyCheck();
167 display.character = LCDReadData;
168
169 if (copy_to_user
170 ((struct lcd_display *) arg, &display,
171 sizeof(struct lcd_display)))
172 return -EFAULT;
173 udelay(150);
174 BusyCheck();
175 LCDWriteInst(0x10);
176
177 break;
178 }
179
180 case LCD_Set_Cursor:{
181 struct lcd_display display;
182
183 if (copy_from_user
184 (&display, (struct lcd_display *) arg,
185 sizeof(struct lcd_display)))
186 return -EFAULT;
187
188 udelay(150);
189 BusyCheck();
190 LCDWriteData(display.character);
191 udelay(150);
192 BusyCheck();
193 LCDWriteInst(0x10);
194
195 break;
196 }
197
198
199 case LCD_Disp_Left:
200 udelay(150);
201 BusyCheck();
202 LCDWriteInst(0x18);
203 break;
204
205 case LCD_Disp_Right:
206 udelay(150);
207 BusyCheck();
208 LCDWriteInst(0x1C);
209 break;
210
211 case LCD_Home:
212 udelay(150);
213 BusyCheck();
214 LCDWriteInst(0x02);
215 break;
216
217 case LCD_Write:{
218 struct lcd_display display;
219 unsigned int index;
220
221
222 if (copy_from_user
223 (&display, (struct lcd_display *) arg,
224 sizeof(struct lcd_display)))
225 return -EFAULT;
226
227 udelay(150);
228 BusyCheck();
229 LCDWriteInst(0x80);
230 udelay(150);
231 BusyCheck();
232
233 for (index = 0; index < (display.size1); index++) {
234 udelay(150);
235 BusyCheck();
236 LCDWriteData(display.line1[index]);
237 BusyCheck();
238 }
239
240 udelay(150);
241 BusyCheck();
242 LCDWriteInst(0xC0);
243 udelay(150);
244 BusyCheck();
245 for (index = 0; index < (display.size2); index++) {
246 udelay(150);
247 BusyCheck();
248 LCDWriteData(display.line2[index]);
249 }
250
251 break;
252 }
253
254 case LCD_Read:{
255 struct lcd_display display;
256
257 BusyCheck();
258 for (address = kDD_R00; address <= kDD_R01;
259 address++) {
260 a = (address | kLCD_Addr);
261
262 udelay(150);
263 BusyCheck();
264 LCDWriteInst(a);
265 udelay(150);
266 BusyCheck();
267 display.line1[address] = LCDReadData;
268 }
269
270 display.line1[0x27] = '\0';
271
272 for (address = kDD_R10; address <= kDD_R11;
273 address++) {
274 a = (address | kLCD_Addr);
275
276 udelay(150);
277 BusyCheck();
278 LCDWriteInst(a);
279
280 udelay(150);
281 BusyCheck();
282 display.line2[address - 0x40] =
283 LCDReadData;
284 }
285
286 display.line2[0x27] = '\0';
287
288 if (copy_to_user
289 ((struct lcd_display *) arg, &display,
290 sizeof(struct lcd_display)))
291 return -EFAULT;
292 break;
293 }
294
295// set all GPIO leds to led_display.leds
296
297 case LED_Set:{
298 struct lcd_display led_display;
299
300
301 if (copy_from_user
302 (&led_display, (struct lcd_display *) arg,
303 sizeof(struct lcd_display)))
304 return -EFAULT;
305
306 led_state = led_display.leds;
307 LEDSet(led_state);
308
309 break;
310 }
311
312
313// set only bit led_display.leds
314
315 case LED_Bit_Set:{
316 unsigned int i;
317 int bit = 1;
318 struct lcd_display led_display;
319
320
321 if (copy_from_user
322 (&led_display, (struct lcd_display *) arg,
323 sizeof(struct lcd_display)))
324 return -EFAULT;
325
326 for (i = 0; i < (int) led_display.leds; i++) {
327 bit = 2 * bit;
328 }
329
330 led_state = led_state | bit;
331 LEDSet(led_state);
332 break;
333 }
334
335// clear only bit led_display.leds
336
337 case LED_Bit_Clear:{
338 unsigned int i;
339 int bit = 1;
340 struct lcd_display led_display;
341
342
343 if (copy_from_user
344 (&led_display, (struct lcd_display *) arg,
345 sizeof(struct lcd_display)))
346 return -EFAULT;
347
348 for (i = 0; i < (int) led_display.leds; i++) {
349 bit = 2 * bit;
350 }
351
352 led_state = led_state & ~bit;
353 LEDSet(led_state);
354 break;
355 }
356
357
358 case BUTTON_Read:{
359 button_display.buttons = GPIRead;
360 if (copy_to_user
361 ((struct lcd_display *) arg, &button_display,
362 sizeof(struct lcd_display)))
363 return -EFAULT;
364 break;
365 }
366
367 case LINK_Check:{
368 button_display.buttons =
369 *((volatile unsigned long *) (0xB0100060));
370 if (copy_to_user
371 ((struct lcd_display *) arg, &button_display,
372 sizeof(struct lcd_display)))
373 return -EFAULT;
374 break;
375 }
376
377 case LINK_Check_2:{
378 int iface_num;
379
380 /* panel-utils should pass in the desired interface status is wanted for
381 * in "buttons" of the structure. We will set this to non-zero if the
382 * link is in fact up for the requested interface. --DaveM
383 */
384 if (copy_from_user
385 (&button_display, (struct lcd_display *) arg,
386 sizeof(button_display)))
387 return -EFAULT;
388 iface_num = button_display.buttons;
389#if defined(CONFIG_TULIP) && 0
390 if (iface_num >= 0 &&
391 iface_num < MAX_INTERFACES &&
392 linkcheck_callbacks[iface_num] != NULL) {
393 button_display.buttons =
394 linkcheck_callbacks[iface_num]
395 (linkcheck_cookies[iface_num]);
396 } else
397#endif
398 button_display.buttons = 0;
399
400 if (__copy_to_user
401 ((struct lcd_display *) arg, &button_display,
402 sizeof(struct lcd_display)))
403 return -EFAULT;
404 break;
405 }
406
407 default:
408 return -EINVAL;
409
410 }
411
412 return 0;
413
414}
415
416static int lcd_open(struct inode *inode, struct file *file)
417{
418 cycle_kernel_lock();
419
420 if (!lcd_present)
421 return -ENXIO;
422 else
423 return 0;
424}
425
426/* Only RESET or NEXT counts as button pressed */
427
428static inline int button_pressed(void)
429{
430 unsigned long buttons = GPIRead;
431
432 if ((buttons == BUTTON_Next) || (buttons == BUTTON_Next_B)
433 || (buttons == BUTTON_Reset_B))
434 return buttons;
435 return 0;
436}
437
438/* LED daemon sits on this and we wake him up once a key is pressed. */
439
440static int lcd_waiters = 0;
441
442static ssize_t lcd_read(struct file *file, char *buf,
443 size_t count, loff_t *ofs)
444{
445 long buttons_now;
446
447 if (lcd_waiters > 0)
448 return -EINVAL;
449
450 lcd_waiters++;
451 while (((buttons_now = (long) button_pressed()) == 0) &&
452 !(signal_pending(current))) {
453 msleep_interruptible(2000);
454 }
455 lcd_waiters--;
456
457 if (signal_pending(current))
458 return -ERESTARTSYS;
459 return buttons_now;
460}
461
462/*
463 * The various file operations we support.
464 */
465
466static const struct file_operations lcd_fops = {
467 .read = lcd_read,
468 .ioctl = lcd_ioctl,
469 .open = lcd_open,
470};
471
472static struct miscdevice lcd_dev = {
473 MISC_DYNAMIC_MINOR,
474 "lcd",
475 &lcd_fops
476};
477
478static int lcd_init(void)
479{
480 int ret;
481 unsigned long data;
482
483 pr_info("%s\n", LCD_DRIVER);
484 ret = misc_register(&lcd_dev);
485 if (ret) {
486 printk(KERN_WARNING LCD "Unable to register misc device.\n");
487 return ret;
488 }
489
490 /* Check region? Naaah! Just snarf it up. */
491/* request_region(RTC_PORT(0), RTC_IO_EXTENT, "lcd");*/
492
493 udelay(150);
494 data = LCDReadData;
495 if ((data & 0x000000FF) == (0x00)) {
496 lcd_present = 0;
497 pr_info(LCD "LCD Not Present\n");
498 } else {
499 lcd_present = 1;
500 WRITE_GAL(kGal_DevBank2PReg, kGal_DevBank2Cfg);
501 WRITE_GAL(kGal_DevBank3PReg, kGal_DevBank3Cfg);
502 }
503
504 return 0;
505}
506
507static void __exit lcd_exit(void)
508{
509 misc_deregister(&lcd_dev);
510}
511
512module_init(lcd_init);
513module_exit(lcd_exit);
514
515MODULE_AUTHOR("Andrew Bose");
516MODULE_LICENSE("GPL");
diff --git a/drivers/char/lcd.h b/drivers/char/lcd.h
deleted file mode 100644
index 290b3ff23b03..000000000000
--- a/drivers/char/lcd.h
+++ /dev/null
@@ -1,154 +0,0 @@
1/*
2 * LED, LCD and Button panel driver for Cobalt
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1996, 1997 by Andrew Bose
9 *
10 * Linux kernel version history:
11 * March 2001: Ported from 2.0.34 by Liam Davies
12 *
13 */
14
15// function headers
16
17#define LCD_CHARS_PER_LINE 40
18#define MAX_IDLE_TIME 120
19
20struct lcd_display {
21 unsigned buttons;
22 int size1;
23 int size2;
24 unsigned char line1[LCD_CHARS_PER_LINE];
25 unsigned char line2[LCD_CHARS_PER_LINE];
26 unsigned char cursor_address;
27 unsigned char character;
28 unsigned char leds;
29 unsigned char *RomImage;
30};
31
32
33
34#define LCD_DRIVER "Cobalt LCD Driver v2.10"
35
36#define LCD "lcd: "
37
38#define kLCD_IR 0x0F000000
39#define kLCD_DR 0x0F000010
40#define kGPI 0x0D000000
41#define kLED 0x0C000000
42
43#define kDD_R00 0x00
44#define kDD_R01 0x27
45#define kDD_R10 0x40
46#define kDD_R11 0x67
47
48#define kLCD_Addr 0x00000080
49
50#define LCDTimeoutValue 0xfff
51
52
53// Macros
54
55#define LCDWriteData(x) outl((x << 24), kLCD_DR)
56#define LCDWriteInst(x) outl((x << 24), kLCD_IR)
57
58#define LCDReadData (inl(kLCD_DR) >> 24)
59#define LCDReadInst (inl(kLCD_IR) >> 24)
60
61#define GPIRead (inl(kGPI) >> 24)
62
63#define LEDSet(x) outb((char)x, kLED)
64
65#define WRITE_GAL(x,y) outl(y, 0x04000000 | (x))
66#define BusyCheck() while ((LCDReadInst & 0x80) == 0x80)
67
68
69
70/*
71 * Function command codes for io_ctl.
72 */
73#define LCD_On 1
74#define LCD_Off 2
75#define LCD_Clear 3
76#define LCD_Reset 4
77#define LCD_Cursor_Left 5
78#define LCD_Cursor_Right 6
79#define LCD_Disp_Left 7
80#define LCD_Disp_Right 8
81#define LCD_Get_Cursor 9
82#define LCD_Set_Cursor 10
83#define LCD_Home 11
84#define LCD_Read 12
85#define LCD_Write 13
86#define LCD_Cursor_Off 14
87#define LCD_Cursor_On 15
88#define LCD_Get_Cursor_Pos 16
89#define LCD_Set_Cursor_Pos 17
90#define LCD_Blink_Off 18
91
92#define LED_Set 40
93#define LED_Bit_Set 41
94#define LED_Bit_Clear 42
95
96
97// Button defs
98#define BUTTON_Read 50
99
100
101// Ethernet LINK check hackaroo
102#define LINK_Check 90
103#define LINK_Check_2 91
104
105// Button patterns _B - single layer lcd boards
106
107#define BUTTON_NONE 0x3F
108#define BUTTON_NONE_B 0xFE
109
110#define BUTTON_Left 0x3B
111#define BUTTON_Left_B 0xFA
112
113#define BUTTON_Right 0x37
114#define BUTTON_Right_B 0xDE
115
116#define BUTTON_Up 0x2F
117#define BUTTON_Up_B 0xF6
118
119#define BUTTON_Down 0x1F
120#define BUTTON_Down_B 0xEE
121
122#define BUTTON_Next 0x3D
123#define BUTTON_Next_B 0x7E
124
125#define BUTTON_Enter 0x3E
126#define BUTTON_Enter_B 0xBE
127
128#define BUTTON_Reset_B 0xFC
129
130
131// debounce constants
132
133#define BUTTON_SENSE 160000
134#define BUTTON_DEBOUNCE 5000
135
136
137// Galileo register stuff
138
139#define kGal_DevBank2Cfg 0x1466DB33
140#define kGal_DevBank2PReg 0x464
141#define kGal_DevBank3Cfg 0x146FDFFB
142#define kGal_DevBank3PReg 0x468
143
144// Network
145
146#define kIPADDR 1
147#define kNETMASK 2
148#define kGATEWAY 3
149#define kDNS 4
150
151#define kClassA 5
152#define kClassB 6
153#define kClassC 7
154
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index c2dba82eb5f7..672b08e694d0 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -327,7 +327,10 @@ static void mmap_mem_close(struct vm_area_struct *vma)
327 327
328static struct vm_operations_struct mmap_mem_ops = { 328static struct vm_operations_struct mmap_mem_ops = {
329 .open = mmap_mem_open, 329 .open = mmap_mem_open,
330 .close = mmap_mem_close 330 .close = mmap_mem_close,
331#ifdef CONFIG_HAVE_IOREMAP_PROT
332 .access = generic_access_phys
333#endif
331}; 334};
332 335
333static int mmap_mem(struct file * file, struct vm_area_struct * vma) 336static int mmap_mem(struct file * file, struct vm_area_struct * vma)
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index fe2a95b5d3c0..30f095a8c2d4 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -193,25 +193,23 @@ mspec_close(struct vm_area_struct *vma)
193} 193}
194 194
195/* 195/*
196 * mspec_nopfn 196 * mspec_fault
197 * 197 *
198 * Creates a mspec page and maps it to user space. 198 * Creates a mspec page and maps it to user space.
199 */ 199 */
200static unsigned long 200static int
201mspec_nopfn(struct vm_area_struct *vma, unsigned long address) 201mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
202{ 202{
203 unsigned long paddr, maddr; 203 unsigned long paddr, maddr;
204 unsigned long pfn; 204 unsigned long pfn;
205 int index; 205 pgoff_t index = vmf->pgoff;
206 struct vma_data *vdata = vma->vm_private_data; 206 struct vma_data *vdata = vma->vm_private_data;
207 207
208 BUG_ON(address < vdata->vm_start || address >= vdata->vm_end);
209 index = (address - vdata->vm_start) >> PAGE_SHIFT;
210 maddr = (volatile unsigned long) vdata->maddr[index]; 208 maddr = (volatile unsigned long) vdata->maddr[index];
211 if (maddr == 0) { 209 if (maddr == 0) {
212 maddr = uncached_alloc_page(numa_node_id(), 1); 210 maddr = uncached_alloc_page(numa_node_id(), 1);
213 if (maddr == 0) 211 if (maddr == 0)
214 return NOPFN_OOM; 212 return VM_FAULT_OOM;
215 213
216 spin_lock(&vdata->lock); 214 spin_lock(&vdata->lock);
217 if (vdata->maddr[index] == 0) { 215 if (vdata->maddr[index] == 0) {
@@ -231,13 +229,20 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
231 229
232 pfn = paddr >> PAGE_SHIFT; 230 pfn = paddr >> PAGE_SHIFT;
233 231
234 return pfn; 232 /*
233 * vm_insert_pfn can fail with -EBUSY, but in that case it will
234 * be because another thread has installed the pte first, so it
235 * is no problem.
236 */
237 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
238
239 return VM_FAULT_NOPAGE;
235} 240}
236 241
237static struct vm_operations_struct mspec_vm_ops = { 242static struct vm_operations_struct mspec_vm_ops = {
238 .open = mspec_open, 243 .open = mspec_open,
239 .close = mspec_close, 244 .close = mspec_close,
240 .nopfn = mspec_nopfn 245 .fault = mspec_fault,
241}; 246};
242 247
243/* 248/*
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index 50243fcd87e8..4f8d67fed292 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -86,8 +86,8 @@ module_param(mwave_uart_io, int, 0);
86 86
87static int mwave_open(struct inode *inode, struct file *file); 87static int mwave_open(struct inode *inode, struct file *file);
88static int mwave_close(struct inode *inode, struct file *file); 88static int mwave_close(struct inode *inode, struct file *file);
89static int mwave_ioctl(struct inode *inode, struct file *filp, 89static long mwave_ioctl(struct file *filp, unsigned int iocmd,
90 unsigned int iocmd, unsigned long ioarg); 90 unsigned long ioarg);
91 91
92MWAVE_DEVICE_DATA mwave_s_mdd; 92MWAVE_DEVICE_DATA mwave_s_mdd;
93 93
@@ -119,16 +119,16 @@ static int mwave_close(struct inode *inode, struct file *file)
119 return retval; 119 return retval;
120} 120}
121 121
122static int mwave_ioctl(struct inode *inode, struct file *file, 122static long mwave_ioctl(struct file *file, unsigned int iocmd,
123 unsigned int iocmd, unsigned long ioarg) 123 unsigned long ioarg)
124{ 124{
125 unsigned int retval = 0; 125 unsigned int retval = 0;
126 pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; 126 pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
127 void __user *arg = (void __user *)ioarg; 127 void __user *arg = (void __user *)ioarg;
128 128
129 PRINTK_5(TRACE_MWAVE, 129 PRINTK_4(TRACE_MWAVE,
130 "mwavedd::mwave_ioctl, entry inode %p file %p cmd %x arg %x\n", 130 "mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n",
131 inode, file, iocmd, (int) ioarg); 131 file, iocmd, (int) ioarg);
132 132
133 switch (iocmd) { 133 switch (iocmd) {
134 134
@@ -136,7 +136,9 @@ static int mwave_ioctl(struct inode *inode, struct file *file,
136 PRINTK_1(TRACE_MWAVE, 136 PRINTK_1(TRACE_MWAVE,
137 "mwavedd::mwave_ioctl, IOCTL_MW_RESET" 137 "mwavedd::mwave_ioctl, IOCTL_MW_RESET"
138 " calling tp3780I_ResetDSP\n"); 138 " calling tp3780I_ResetDSP\n");
139 lock_kernel();
139 retval = tp3780I_ResetDSP(&pDrvData->rBDData); 140 retval = tp3780I_ResetDSP(&pDrvData->rBDData);
141 unlock_kernel();
140 PRINTK_2(TRACE_MWAVE, 142 PRINTK_2(TRACE_MWAVE,
141 "mwavedd::mwave_ioctl, IOCTL_MW_RESET" 143 "mwavedd::mwave_ioctl, IOCTL_MW_RESET"
142 " retval %x from tp3780I_ResetDSP\n", 144 " retval %x from tp3780I_ResetDSP\n",
@@ -147,7 +149,9 @@ static int mwave_ioctl(struct inode *inode, struct file *file,
147 PRINTK_1(TRACE_MWAVE, 149 PRINTK_1(TRACE_MWAVE,
148 "mwavedd::mwave_ioctl, IOCTL_MW_RUN" 150 "mwavedd::mwave_ioctl, IOCTL_MW_RUN"
149 " calling tp3780I_StartDSP\n"); 151 " calling tp3780I_StartDSP\n");
152 lock_kernel();
150 retval = tp3780I_StartDSP(&pDrvData->rBDData); 153 retval = tp3780I_StartDSP(&pDrvData->rBDData);
154 unlock_kernel();
151 PRINTK_2(TRACE_MWAVE, 155 PRINTK_2(TRACE_MWAVE,
152 "mwavedd::mwave_ioctl, IOCTL_MW_RUN" 156 "mwavedd::mwave_ioctl, IOCTL_MW_RUN"
153 " retval %x from tp3780I_StartDSP\n", 157 " retval %x from tp3780I_StartDSP\n",
@@ -161,8 +165,10 @@ static int mwave_ioctl(struct inode *inode, struct file *file,
161 "mwavedd::mwave_ioctl," 165 "mwavedd::mwave_ioctl,"
162 " IOCTL_MW_DSP_ABILITIES calling" 166 " IOCTL_MW_DSP_ABILITIES calling"
163 " tp3780I_QueryAbilities\n"); 167 " tp3780I_QueryAbilities\n");
168 lock_kernel();
164 retval = tp3780I_QueryAbilities(&pDrvData->rBDData, 169 retval = tp3780I_QueryAbilities(&pDrvData->rBDData,
165 &rAbilities); 170 &rAbilities);
171 unlock_kernel();
166 PRINTK_2(TRACE_MWAVE, 172 PRINTK_2(TRACE_MWAVE,
167 "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES" 173 "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
168 " retval %x from tp3780I_QueryAbilities\n", 174 " retval %x from tp3780I_QueryAbilities\n",
@@ -193,11 +199,13 @@ static int mwave_ioctl(struct inode *inode, struct file *file,
193 "mwavedd::mwave_ioctl IOCTL_MW_READ_DATA," 199 "mwavedd::mwave_ioctl IOCTL_MW_READ_DATA,"
194 " size %lx, ioarg %lx pusBuffer %p\n", 200 " size %lx, ioarg %lx pusBuffer %p\n",
195 rReadData.ulDataLength, ioarg, pusBuffer); 201 rReadData.ulDataLength, ioarg, pusBuffer);
202 lock_kernel();
196 retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, 203 retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
197 iocmd, 204 iocmd,
198 pusBuffer, 205 pusBuffer,
199 rReadData.ulDataLength, 206 rReadData.ulDataLength,
200 rReadData.usDspAddress); 207 rReadData.usDspAddress);
208 unlock_kernel();
201 } 209 }
202 break; 210 break;
203 211
@@ -215,10 +223,12 @@ static int mwave_ioctl(struct inode *inode, struct file *file,
215 " size %lx, ioarg %lx pusBuffer %p\n", 223 " size %lx, ioarg %lx pusBuffer %p\n",
216 rReadData.ulDataLength / 2, ioarg, 224 rReadData.ulDataLength / 2, ioarg,
217 pusBuffer); 225 pusBuffer);
226 lock_kernel();
218 retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, 227 retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
219 iocmd, pusBuffer, 228 iocmd, pusBuffer,
220 rReadData.ulDataLength / 2, 229 rReadData.ulDataLength / 2,
221 rReadData.usDspAddress); 230 rReadData.usDspAddress);
231 unlock_kernel();
222 } 232 }
223 break; 233 break;
224 234
@@ -236,10 +246,12 @@ static int mwave_ioctl(struct inode *inode, struct file *file,
236 " size %lx, ioarg %lx pusBuffer %p\n", 246 " size %lx, ioarg %lx pusBuffer %p\n",
237 rWriteData.ulDataLength, ioarg, 247 rWriteData.ulDataLength, ioarg,
238 pusBuffer); 248 pusBuffer);
249 lock_kernel();
239 retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, 250 retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
240 iocmd, pusBuffer, 251 iocmd, pusBuffer,
241 rWriteData.ulDataLength, 252 rWriteData.ulDataLength,
242 rWriteData.usDspAddress); 253 rWriteData.usDspAddress);
254 unlock_kernel();
243 } 255 }
244 break; 256 break;
245 257
@@ -257,10 +269,12 @@ static int mwave_ioctl(struct inode *inode, struct file *file,
257 " size %lx, ioarg %lx pusBuffer %p\n", 269 " size %lx, ioarg %lx pusBuffer %p\n",
258 rWriteData.ulDataLength, ioarg, 270 rWriteData.ulDataLength, ioarg,
259 pusBuffer); 271 pusBuffer);
272 lock_kernel();
260 retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData, 273 retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData,
261 iocmd, pusBuffer, 274 iocmd, pusBuffer,
262 rWriteData.ulDataLength, 275 rWriteData.ulDataLength,
263 rWriteData.usDspAddress); 276 rWriteData.usDspAddress);
277 unlock_kernel();
264 } 278 }
265 break; 279 break;
266 280
@@ -281,8 +295,10 @@ static int mwave_ioctl(struct inode *inode, struct file *file,
281 ipcnum); 295 ipcnum);
282 return -EINVAL; 296 return -EINVAL;
283 } 297 }
298 lock_kernel();
284 pDrvData->IPCs[ipcnum].bIsHere = FALSE; 299 pDrvData->IPCs[ipcnum].bIsHere = FALSE;
285 pDrvData->IPCs[ipcnum].bIsEnabled = TRUE; 300 pDrvData->IPCs[ipcnum].bIsEnabled = TRUE;
301 unlock_kernel();
286 302
287 PRINTK_2(TRACE_MWAVE, 303 PRINTK_2(TRACE_MWAVE,
288 "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" 304 "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
@@ -307,6 +323,7 @@ static int mwave_ioctl(struct inode *inode, struct file *file,
307 return -EINVAL; 323 return -EINVAL;
308 } 324 }
309 325
326 lock_kernel();
310 if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) { 327 if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) {
311 DECLARE_WAITQUEUE(wait, current); 328 DECLARE_WAITQUEUE(wait, current);
312 329
@@ -347,6 +364,7 @@ static int mwave_ioctl(struct inode *inode, struct file *file,
347 " processing\n", 364 " processing\n",
348 ipcnum); 365 ipcnum);
349 } 366 }
367 unlock_kernel();
350 } 368 }
351 break; 369 break;
352 370
@@ -365,19 +383,18 @@ static int mwave_ioctl(struct inode *inode, struct file *file,
365 ipcnum); 383 ipcnum);
366 return -EINVAL; 384 return -EINVAL;
367 } 385 }
386 lock_kernel();
368 if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) { 387 if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) {
369 pDrvData->IPCs[ipcnum].bIsEnabled = FALSE; 388 pDrvData->IPCs[ipcnum].bIsEnabled = FALSE;
370 if (pDrvData->IPCs[ipcnum].bIsHere == TRUE) { 389 if (pDrvData->IPCs[ipcnum].bIsHere == TRUE) {
371 wake_up_interruptible(&pDrvData->IPCs[ipcnum].ipc_wait_queue); 390 wake_up_interruptible(&pDrvData->IPCs[ipcnum].ipc_wait_queue);
372 } 391 }
373 } 392 }
393 unlock_kernel();
374 } 394 }
375 break; 395 break;
376 396
377 default: 397 default:
378 PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_ioctl:"
379 " Error: Unrecognized iocmd %x\n",
380 iocmd);
381 return -ENOTTY; 398 return -ENOTTY;
382 break; 399 break;
383 } /* switch */ 400 } /* switch */
@@ -460,7 +477,7 @@ static const struct file_operations mwave_fops = {
460 .owner = THIS_MODULE, 477 .owner = THIS_MODULE,
461 .read = mwave_read, 478 .read = mwave_read,
462 .write = mwave_write, 479 .write = mwave_write,
463 .ioctl = mwave_ioctl, 480 .unlocked_ioctl = mwave_ioctl,
464 .open = mwave_open, 481 .open = mwave_open,
465 .release = mwave_close 482 .release = mwave_close
466}; 483};
diff --git a/drivers/char/mwave/mwavedd.h b/drivers/char/mwave/mwavedd.h
index 8eca61e0a19c..7e0d530e2e07 100644
--- a/drivers/char/mwave/mwavedd.h
+++ b/drivers/char/mwave/mwavedd.h
@@ -147,4 +147,6 @@ typedef struct _MWAVE_DEVICE_DATA {
147 147
148} MWAVE_DEVICE_DATA, *pMWAVE_DEVICE_DATA; 148} MWAVE_DEVICE_DATA, *pMWAVE_DEVICE_DATA;
149 149
150extern MWAVE_DEVICE_DATA mwave_s_mdd;
151
150#endif 152#endif
diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
index f282976daaac..c68969708068 100644
--- a/drivers/char/mwave/tp3780i.c
+++ b/drivers/char/mwave/tp3780i.c
@@ -57,8 +57,6 @@
57#include "3780i.h" 57#include "3780i.h"
58#include "mwavepub.h" 58#include "mwavepub.h"
59 59
60extern MWAVE_DEVICE_DATA mwave_s_mdd;
61
62static unsigned short s_ausThinkpadIrqToField[16] = 60static unsigned short s_ausThinkpadIrqToField[16] =
63 { 0xFFFF, 0xFFFF, 0xFFFF, 0x0001, 0x0002, 0x0003, 0xFFFF, 0x0004, 61 { 0xFFFF, 0xFFFF, 0xFFFF, 0x0001, 0x0002, 0x0003, 0xFFFF, 0x0004,
64 0xFFFF, 0xFFFF, 0x0005, 0x0006, 0xFFFF, 0xFFFF, 0xFFFF, 0x0007 }; 62 0xFFFF, 0xFFFF, 0x0005, 0x0006, 0xFFFF, 0xFFFF, 0xFFFF, 0x0007 };
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 4c756bbba948..e30575e87648 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -16,7 +16,6 @@
16 * Fed through a cleanup, indent and remove of non 2.6 code by Alan Cox 16 * Fed through a cleanup, indent and remove of non 2.6 code by Alan Cox
17 * <alan@redhat.com>. The original 1.8 code is available on www.moxa.com. 17 * <alan@redhat.com>. The original 1.8 code is available on www.moxa.com.
18 * - Fixed x86_64 cleanness 18 * - Fixed x86_64 cleanness
19 * - Fixed sleep with spinlock held in mxser_send_break
20 */ 19 */
21 20
22#include <linux/module.h> 21#include <linux/module.h>
@@ -49,18 +48,12 @@
49 48
50#define MXSER_VERSION "2.0.4" /* 1.12 */ 49#define MXSER_VERSION "2.0.4" /* 1.12 */
51#define MXSERMAJOR 174 50#define MXSERMAJOR 174
52#define MXSERCUMAJOR 175
53 51
54#define MXSER_BOARDS 4 /* Max. boards */ 52#define MXSER_BOARDS 4 /* Max. boards */
55#define MXSER_PORTS_PER_BOARD 8 /* Max. ports per board */ 53#define MXSER_PORTS_PER_BOARD 8 /* Max. ports per board */
56#define MXSER_PORTS (MXSER_BOARDS * MXSER_PORTS_PER_BOARD) 54#define MXSER_PORTS (MXSER_BOARDS * MXSER_PORTS_PER_BOARD)
57#define MXSER_ISR_PASS_LIMIT 100 55#define MXSER_ISR_PASS_LIMIT 100
58 56
59#define MXSER_ERR_IOADDR -1
60#define MXSER_ERR_IRQ -2
61#define MXSER_ERR_IRQ_CONFLIT -3
62#define MXSER_ERR_VECTOR -4
63
64/*CheckIsMoxaMust return value*/ 57/*CheckIsMoxaMust return value*/
65#define MOXA_OTHER_UART 0x00 58#define MOXA_OTHER_UART 0x00
66#define MOXA_MUST_MU150_HWID 0x01 59#define MOXA_MUST_MU150_HWID 0x01
@@ -179,14 +172,15 @@ static struct pci_device_id mxser_pcibrds[] = {
179}; 172};
180MODULE_DEVICE_TABLE(pci, mxser_pcibrds); 173MODULE_DEVICE_TABLE(pci, mxser_pcibrds);
181 174
182static int ioaddr[MXSER_BOARDS] = { 0, 0, 0, 0 }; 175static unsigned long ioaddr[MXSER_BOARDS];
183static int ttymajor = MXSERMAJOR; 176static int ttymajor = MXSERMAJOR;
184 177
185/* Variables for insmod */ 178/* Variables for insmod */
186 179
187MODULE_AUTHOR("Casper Yang"); 180MODULE_AUTHOR("Casper Yang");
188MODULE_DESCRIPTION("MOXA Smartio/Industio Family Multiport Board Device Driver"); 181MODULE_DESCRIPTION("MOXA Smartio/Industio Family Multiport Board Device Driver");
189module_param_array(ioaddr, int, NULL, 0); 182module_param_array(ioaddr, ulong, NULL, 0);
183MODULE_PARM_DESC(ioaddr, "ISA io addresses to look for a moxa board");
190module_param(ttymajor, int, 0); 184module_param(ttymajor, int, 0);
191MODULE_LICENSE("GPL"); 185MODULE_LICENSE("GPL");
192 186
@@ -196,7 +190,6 @@ struct mxser_log {
196 unsigned long txcnt[MXSER_PORTS]; 190 unsigned long txcnt[MXSER_PORTS];
197}; 191};
198 192
199
200struct mxser_mon { 193struct mxser_mon {
201 unsigned long rxcnt; 194 unsigned long rxcnt;
202 unsigned long txcnt; 195 unsigned long txcnt;
@@ -287,19 +280,9 @@ struct mxser_mstatus {
287 int dcd; 280 int dcd;
288}; 281};
289 282
290static struct mxser_mstatus GMStatus[MXSER_PORTS];
291
292static int mxserBoardCAP[MXSER_BOARDS] = {
293 0, 0, 0, 0
294 /* 0x180, 0x280, 0x200, 0x320 */
295};
296
297static struct mxser_board mxser_boards[MXSER_BOARDS]; 283static struct mxser_board mxser_boards[MXSER_BOARDS];
298static struct tty_driver *mxvar_sdriver; 284static struct tty_driver *mxvar_sdriver;
299static struct mxser_log mxvar_log; 285static struct mxser_log mxvar_log;
300static int mxvar_diagflag;
301static unsigned char mxser_msr[MXSER_PORTS + 1];
302static struct mxser_mon_ext mon_data_ext;
303static int mxser_set_baud_method[MXSER_PORTS + 1]; 286static int mxser_set_baud_method[MXSER_PORTS + 1];
304 287
305static void mxser_enable_must_enchance_mode(unsigned long baseio) 288static void mxser_enable_must_enchance_mode(unsigned long baseio)
@@ -543,6 +526,7 @@ static void process_txrx_fifo(struct mxser_port *info)
543 526
544static unsigned char mxser_get_msr(int baseaddr, int mode, int port) 527static unsigned char mxser_get_msr(int baseaddr, int mode, int port)
545{ 528{
529 static unsigned char mxser_msr[MXSER_PORTS + 1];
546 unsigned char status = 0; 530 unsigned char status = 0;
547 531
548 status = inb(baseaddr + UART_MSR); 532 status = inb(baseaddr + UART_MSR);
@@ -1319,13 +1303,9 @@ static void mxser_flush_chars(struct tty_struct *tty)
1319 struct mxser_port *info = tty->driver_data; 1303 struct mxser_port *info = tty->driver_data;
1320 unsigned long flags; 1304 unsigned long flags;
1321 1305
1322 if (info->xmit_cnt <= 0 || 1306 if (info->xmit_cnt <= 0 || tty->stopped || !info->port.xmit_buf ||
1323 tty->stopped || 1307 (tty->hw_stopped && info->type != PORT_16550A &&
1324 !info->port.xmit_buf || 1308 !info->board->chip_flag))
1325 (tty->hw_stopped &&
1326 (info->type != PORT_16550A) &&
1327 (!info->board->chip_flag)
1328 ))
1329 return; 1309 return;
1330 1310
1331 spin_lock_irqsave(&info->slock, flags); 1311 spin_lock_irqsave(&info->slock, flags);
@@ -1343,9 +1323,7 @@ static int mxser_write_room(struct tty_struct *tty)
1343 int ret; 1323 int ret;
1344 1324
1345 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1; 1325 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
1346 if (ret < 0) 1326 return ret < 0 ? 0 : ret;
1347 ret = 0;
1348 return ret;
1349} 1327}
1350 1328
1351static int mxser_chars_in_buffer(struct tty_struct *tty) 1329static int mxser_chars_in_buffer(struct tty_struct *tty)
@@ -1634,6 +1612,8 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
1634 1612
1635 switch (cmd) { 1613 switch (cmd) {
1636 case MOXA_GET_MAJOR: 1614 case MOXA_GET_MAJOR:
1615 printk(KERN_WARNING "mxser: '%s' uses deprecated ioctl %x, fix "
1616 "your userspace\n", current->comm, cmd);
1637 return put_user(ttymajor, (int __user *)argp); 1617 return put_user(ttymajor, (int __user *)argp);
1638 1618
1639 case MOXA_CHKPORTENABLE: 1619 case MOXA_CHKPORTENABLE:
@@ -1651,62 +1631,60 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
1651 ret = -EFAULT; 1631 ret = -EFAULT;
1652 unlock_kernel(); 1632 unlock_kernel();
1653 return ret; 1633 return ret;
1654 case MOXA_GETMSTATUS: 1634 case MOXA_GETMSTATUS: {
1635 struct mxser_mstatus ms, __user *msu = argp;
1655 lock_kernel(); 1636 lock_kernel();
1656 for (i = 0; i < MXSER_BOARDS; i++) 1637 for (i = 0; i < MXSER_BOARDS; i++)
1657 for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) { 1638 for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
1658 port = &mxser_boards[i].ports[j]; 1639 port = &mxser_boards[i].ports[j];
1640 memset(&ms, 0, sizeof(ms));
1659 1641
1660 GMStatus[i].ri = 0; 1642 if (!port->ioaddr)
1661 if (!port->ioaddr) { 1643 goto copy;
1662 GMStatus[i].dcd = 0;
1663 GMStatus[i].dsr = 0;
1664 GMStatus[i].cts = 0;
1665 continue;
1666 }
1667 1644
1668 if (!port->port.tty || !port->port.tty->termios) 1645 if (!port->port.tty || !port->port.tty->termios)
1669 GMStatus[i].cflag = 1646 ms.cflag = port->normal_termios.c_cflag;
1670 port->normal_termios.c_cflag;
1671 else 1647 else
1672 GMStatus[i].cflag = 1648 ms.cflag = port->port.tty->termios->c_cflag;
1673 port->port.tty->termios->c_cflag;
1674 1649
1675 status = inb(port->ioaddr + UART_MSR); 1650 status = inb(port->ioaddr + UART_MSR);
1676 if (status & 0x80 /*UART_MSR_DCD */ ) 1651 if (status & UART_MSR_DCD)
1677 GMStatus[i].dcd = 1; 1652 ms.dcd = 1;
1678 else 1653 if (status & UART_MSR_DSR)
1679 GMStatus[i].dcd = 0; 1654 ms.dsr = 1;
1680 1655 if (status & UART_MSR_CTS)
1681 if (status & 0x20 /*UART_MSR_DSR */ ) 1656 ms.cts = 1;
1682 GMStatus[i].dsr = 1; 1657 copy:
1683 else 1658 if (copy_to_user(msu, &ms, sizeof(ms))) {
1684 GMStatus[i].dsr = 0; 1659 unlock_kernel();
1685 1660 return -EFAULT;
1686 1661 }
1687 if (status & 0x10 /*UART_MSR_CTS */ ) 1662 msu++;
1688 GMStatus[i].cts = 1;
1689 else
1690 GMStatus[i].cts = 0;
1691 } 1663 }
1692 unlock_kernel(); 1664 unlock_kernel();
1693 if (copy_to_user(argp, GMStatus,
1694 sizeof(struct mxser_mstatus) * MXSER_PORTS))
1695 return -EFAULT;
1696 return 0; 1665 return 0;
1666 }
1697 case MOXA_ASPP_MON_EXT: { 1667 case MOXA_ASPP_MON_EXT: {
1698 int p, shiftbit; 1668 struct mxser_mon_ext *me; /* it's 2k, stack unfriendly */
1699 unsigned long opmode; 1669 unsigned int cflag, iflag, p;
1700 unsigned cflag, iflag; 1670 u8 opmode;
1671
1672 me = kzalloc(sizeof(*me), GFP_KERNEL);
1673 if (!me)
1674 return -ENOMEM;
1701 1675
1702 lock_kernel(); 1676 lock_kernel();
1703 for (i = 0; i < MXSER_BOARDS; i++) { 1677 for (i = 0, p = 0; i < MXSER_BOARDS; i++) {
1704 for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) { 1678 for (j = 0; j < MXSER_PORTS_PER_BOARD; j++, p++) {
1679 if (p >= ARRAY_SIZE(me->rx_cnt)) {
1680 i = MXSER_BOARDS;
1681 break;
1682 }
1705 port = &mxser_boards[i].ports[j]; 1683 port = &mxser_boards[i].ports[j];
1706 if (!port->ioaddr) 1684 if (!port->ioaddr)
1707 continue; 1685 continue;
1708 1686
1709 status = mxser_get_msr(port->ioaddr, 0, i); 1687 status = mxser_get_msr(port->ioaddr, 0, p);
1710 1688
1711 if (status & UART_MSR_TERI) 1689 if (status & UART_MSR_TERI)
1712 port->icount.rng++; 1690 port->icount.rng++;
@@ -1718,16 +1696,13 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
1718 port->icount.cts++; 1696 port->icount.cts++;
1719 1697
1720 port->mon_data.modem_status = status; 1698 port->mon_data.modem_status = status;
1721 mon_data_ext.rx_cnt[i] = port->mon_data.rxcnt; 1699 me->rx_cnt[p] = port->mon_data.rxcnt;
1722 mon_data_ext.tx_cnt[i] = port->mon_data.txcnt; 1700 me->tx_cnt[p] = port->mon_data.txcnt;
1723 mon_data_ext.up_rxcnt[i] = 1701 me->up_rxcnt[p] = port->mon_data.up_rxcnt;
1724 port->mon_data.up_rxcnt; 1702 me->up_txcnt[p] = port->mon_data.up_txcnt;
1725 mon_data_ext.up_txcnt[i] = 1703 me->modem_status[p] =
1726 port->mon_data.up_txcnt;
1727 mon_data_ext.modem_status[i] =
1728 port->mon_data.modem_status; 1704 port->mon_data.modem_status;
1729 mon_data_ext.baudrate[i] = 1705 me->baudrate[p] = tty_get_baud_rate(port->port.tty);
1730 tty_get_baud_rate(port->port.tty);
1731 1706
1732 if (!port->port.tty || !port->port.tty->termios) { 1707 if (!port->port.tty || !port->port.tty->termios) {
1733 cflag = port->normal_termios.c_cflag; 1708 cflag = port->normal_termios.c_cflag;
@@ -1737,40 +1712,31 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
1737 iflag = port->port.tty->termios->c_iflag; 1712 iflag = port->port.tty->termios->c_iflag;
1738 } 1713 }
1739 1714
1740 mon_data_ext.databits[i] = cflag & CSIZE; 1715 me->databits[p] = cflag & CSIZE;
1741 1716 me->stopbits[p] = cflag & CSTOPB;
1742 mon_data_ext.stopbits[i] = cflag & CSTOPB; 1717 me->parity[p] = cflag & (PARENB | PARODD |
1743 1718 CMSPAR);
1744 mon_data_ext.parity[i] =
1745 cflag & (PARENB | PARODD | CMSPAR);
1746
1747 mon_data_ext.flowctrl[i] = 0x00;
1748 1719
1749 if (cflag & CRTSCTS) 1720 if (cflag & CRTSCTS)
1750 mon_data_ext.flowctrl[i] |= 0x03; 1721 me->flowctrl[p] |= 0x03;
1751 1722
1752 if (iflag & (IXON | IXOFF)) 1723 if (iflag & (IXON | IXOFF))
1753 mon_data_ext.flowctrl[i] |= 0x0C; 1724 me->flowctrl[p] |= 0x0C;
1754 1725
1755 if (port->type == PORT_16550A) 1726 if (port->type == PORT_16550A)
1756 mon_data_ext.fifo[i] = 1; 1727 me->fifo[p] = 1;
1757 else
1758 mon_data_ext.fifo[i] = 0;
1759 1728
1760 p = i % 4; 1729 opmode = inb(port->opmode_ioaddr) >>
1761 shiftbit = p * 2; 1730 ((p % 4) * 2);
1762 opmode = inb(port->opmode_ioaddr) >> shiftbit;
1763 opmode &= OP_MODE_MASK; 1731 opmode &= OP_MODE_MASK;
1764 1732 me->iftype[p] = opmode;
1765 mon_data_ext.iftype[i] = opmode;
1766
1767 } 1733 }
1768 } 1734 }
1769 unlock_kernel(); 1735 unlock_kernel();
1770 if (copy_to_user(argp, &mon_data_ext, 1736 if (copy_to_user(argp, me, sizeof(*me)))
1771 sizeof(mon_data_ext))) 1737 ret = -EFAULT;
1772 return -EFAULT; 1738 kfree(me);
1773 return 0; 1739 return ret;
1774 } 1740 }
1775 default: 1741 default:
1776 return -ENOIOCTLCMD; 1742 return -ENOIOCTLCMD;
@@ -1804,7 +1770,6 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
1804{ 1770{
1805 struct mxser_port *info = tty->driver_data; 1771 struct mxser_port *info = tty->driver_data;
1806 struct async_icount cnow; 1772 struct async_icount cnow;
1807 struct serial_icounter_struct __user *p_cuser;
1808 unsigned long flags; 1773 unsigned long flags;
1809 void __user *argp = (void __user *)arg; 1774 void __user *argp = (void __user *)arg;
1810 int retval; 1775 int retval;
@@ -1884,30 +1849,26 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
1884 * NB: both 1->0 and 0->1 transitions are counted except for 1849 * NB: both 1->0 and 0->1 transitions are counted except for
1885 * RI where only 0->1 is counted. 1850 * RI where only 0->1 is counted.
1886 */ 1851 */
1887 case TIOCGICOUNT: 1852 case TIOCGICOUNT: {
1853 struct serial_icounter_struct icnt = { 0 };
1888 spin_lock_irqsave(&info->slock, flags); 1854 spin_lock_irqsave(&info->slock, flags);
1889 cnow = info->icount; 1855 cnow = info->icount;
1890 spin_unlock_irqrestore(&info->slock, flags); 1856 spin_unlock_irqrestore(&info->slock, flags);
1891 p_cuser = argp; 1857
1892 if (put_user(cnow.frame, &p_cuser->frame)) 1858 icnt.frame = cnow.frame;
1893 return -EFAULT; 1859 icnt.brk = cnow.brk;
1894 if (put_user(cnow.brk, &p_cuser->brk)) 1860 icnt.overrun = cnow.overrun;
1895 return -EFAULT; 1861 icnt.buf_overrun = cnow.buf_overrun;
1896 if (put_user(cnow.overrun, &p_cuser->overrun)) 1862 icnt.parity = cnow.parity;
1897 return -EFAULT; 1863 icnt.rx = cnow.rx;
1898 if (put_user(cnow.buf_overrun, &p_cuser->buf_overrun)) 1864 icnt.tx = cnow.tx;
1899 return -EFAULT; 1865 icnt.cts = cnow.cts;
1900 if (put_user(cnow.parity, &p_cuser->parity)) 1866 icnt.dsr = cnow.dsr;
1901 return -EFAULT; 1867 icnt.rng = cnow.rng;
1902 if (put_user(cnow.rx, &p_cuser->rx)) 1868 icnt.dcd = cnow.dcd;
1903 return -EFAULT; 1869
1904 if (put_user(cnow.tx, &p_cuser->tx)) 1870 return copy_to_user(argp, &icnt, sizeof(icnt)) ? -EFAULT : 0;
1905 return -EFAULT; 1871 }
1906 put_user(cnow.cts, &p_cuser->cts);
1907 put_user(cnow.dsr, &p_cuser->dsr);
1908 put_user(cnow.rng, &p_cuser->rng);
1909 put_user(cnow.dcd, &p_cuser->dcd);
1910 return 0;
1911 case MOXA_HighSpeedOn: 1872 case MOXA_HighSpeedOn:
1912 return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp); 1873 return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
1913 case MOXA_SDS_RSTICOUNTER: 1874 case MOXA_SDS_RSTICOUNTER:
@@ -2503,7 +2464,8 @@ static int __devinit mxser_initbrd(struct mxser_board *brd,
2503 unsigned int i; 2464 unsigned int i;
2504 int retval; 2465 int retval;
2505 2466
2506 printk(KERN_INFO "max. baud rate = %d bps.\n", brd->ports[0].max_baud); 2467 printk(KERN_INFO "mxser: max. baud rate = %d bps\n",
2468 brd->ports[0].max_baud);
2507 2469
2508 for (i = 0; i < brd->info->nports; i++) { 2470 for (i = 0; i < brd->info->nports; i++) {
2509 info = &brd->ports[i]; 2471 info = &brd->ports[i];
@@ -2586,28 +2548,32 @@ static int __init mxser_get_ISA_conf(int cap, struct mxser_board *brd)
2586 irq = regs[9] & 0xF000; 2548 irq = regs[9] & 0xF000;
2587 irq = irq | (irq >> 4); 2549 irq = irq | (irq >> 4);
2588 if (irq != (regs[9] & 0xFF00)) 2550 if (irq != (regs[9] & 0xFF00))
2589 return MXSER_ERR_IRQ_CONFLIT; 2551 goto err_irqconflict;
2590 } else if (brd->info->nports == 4) { 2552 } else if (brd->info->nports == 4) {
2591 irq = regs[9] & 0xF000; 2553 irq = regs[9] & 0xF000;
2592 irq = irq | (irq >> 4); 2554 irq = irq | (irq >> 4);
2593 irq = irq | (irq >> 8); 2555 irq = irq | (irq >> 8);
2594 if (irq != regs[9]) 2556 if (irq != regs[9])
2595 return MXSER_ERR_IRQ_CONFLIT; 2557 goto err_irqconflict;
2596 } else if (brd->info->nports == 8) { 2558 } else if (brd->info->nports == 8) {
2597 irq = regs[9] & 0xF000; 2559 irq = regs[9] & 0xF000;
2598 irq = irq | (irq >> 4); 2560 irq = irq | (irq >> 4);
2599 irq = irq | (irq >> 8); 2561 irq = irq | (irq >> 8);
2600 if ((irq != regs[9]) || (irq != regs[10])) 2562 if ((irq != regs[9]) || (irq != regs[10]))
2601 return MXSER_ERR_IRQ_CONFLIT; 2563 goto err_irqconflict;
2602 } 2564 }
2603 2565
2604 if (!irq) 2566 if (!irq) {
2605 return MXSER_ERR_IRQ; 2567 printk(KERN_ERR "mxser: interrupt number unset\n");
2568 return -EIO;
2569 }
2606 brd->irq = ((int)(irq & 0xF000) >> 12); 2570 brd->irq = ((int)(irq & 0xF000) >> 12);
2607 for (i = 0; i < 8; i++) 2571 for (i = 0; i < 8; i++)
2608 brd->ports[i].ioaddr = (int) regs[i + 1] & 0xFFF8; 2572 brd->ports[i].ioaddr = (int) regs[i + 1] & 0xFFF8;
2609 if ((regs[12] & 0x80) == 0) 2573 if ((regs[12] & 0x80) == 0) {
2610 return MXSER_ERR_VECTOR; 2574 printk(KERN_ERR "mxser: invalid interrupt vector\n");
2575 return -EIO;
2576 }
2611 brd->vector = (int)regs[11]; /* interrupt vector */ 2577 brd->vector = (int)regs[11]; /* interrupt vector */
2612 if (id == 1) 2578 if (id == 1)
2613 brd->vector_mask = 0x00FF; 2579 brd->vector_mask = 0x00FF;
@@ -2634,13 +2600,26 @@ static int __init mxser_get_ISA_conf(int cap, struct mxser_board *brd)
2634 else 2600 else
2635 brd->uart_type = PORT_16450; 2601 brd->uart_type = PORT_16450;
2636 if (!request_region(brd->ports[0].ioaddr, 8 * brd->info->nports, 2602 if (!request_region(brd->ports[0].ioaddr, 8 * brd->info->nports,
2637 "mxser(IO)")) 2603 "mxser(IO)")) {
2638 return MXSER_ERR_IOADDR; 2604 printk(KERN_ERR "mxser: can't request ports I/O region: "
2605 "0x%.8lx-0x%.8lx\n",
2606 brd->ports[0].ioaddr, brd->ports[0].ioaddr +
2607 8 * brd->info->nports - 1);
2608 return -EIO;
2609 }
2639 if (!request_region(brd->vector, 1, "mxser(vector)")) { 2610 if (!request_region(brd->vector, 1, "mxser(vector)")) {
2640 release_region(brd->ports[0].ioaddr, 8 * brd->info->nports); 2611 release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
2641 return MXSER_ERR_VECTOR; 2612 printk(KERN_ERR "mxser: can't request interrupt vector region: "
2613 "0x%.8lx-0x%.8lx\n",
2614 brd->ports[0].ioaddr, brd->ports[0].ioaddr +
2615 8 * brd->info->nports - 1);
2616 return -EIO;
2642 } 2617 }
2643 return brd->info->nports; 2618 return brd->info->nports;
2619
2620err_irqconflict:
2621 printk(KERN_ERR "mxser: invalid interrupt number\n");
2622 return -EIO;
2644} 2623}
2645 2624
2646static int __devinit mxser_probe(struct pci_dev *pdev, 2625static int __devinit mxser_probe(struct pci_dev *pdev,
@@ -2657,20 +2636,20 @@ static int __devinit mxser_probe(struct pci_dev *pdev,
2657 break; 2636 break;
2658 2637
2659 if (i >= MXSER_BOARDS) { 2638 if (i >= MXSER_BOARDS) {
2660 printk(KERN_ERR "Too many Smartio/Industio family boards found " 2639 dev_err(&pdev->dev, "too many boards found (maximum %d), board "
2661 "(maximum %d), board not configured\n", MXSER_BOARDS); 2640 "not configured\n", MXSER_BOARDS);
2662 goto err; 2641 goto err;
2663 } 2642 }
2664 2643
2665 brd = &mxser_boards[i]; 2644 brd = &mxser_boards[i];
2666 brd->idx = i * MXSER_PORTS_PER_BOARD; 2645 brd->idx = i * MXSER_PORTS_PER_BOARD;
2667 printk(KERN_INFO "Found MOXA %s board (BusNo=%d, DevNo=%d)\n", 2646 dev_info(&pdev->dev, "found MOXA %s board (BusNo=%d, DevNo=%d)\n",
2668 mxser_cards[ent->driver_data].name, 2647 mxser_cards[ent->driver_data].name,
2669 pdev->bus->number, PCI_SLOT(pdev->devfn)); 2648 pdev->bus->number, PCI_SLOT(pdev->devfn));
2670 2649
2671 retval = pci_enable_device(pdev); 2650 retval = pci_enable_device(pdev);
2672 if (retval) { 2651 if (retval) {
2673 printk(KERN_ERR "Moxa SmartI/O PCI enable fail !\n"); 2652 dev_err(&pdev->dev, "PCI enable failed\n");
2674 goto err; 2653 goto err;
2675 } 2654 }
2676 2655
@@ -2772,11 +2751,8 @@ static struct pci_driver mxser_driver = {
2772static int __init mxser_module_init(void) 2751static int __init mxser_module_init(void)
2773{ 2752{
2774 struct mxser_board *brd; 2753 struct mxser_board *brd;
2775 unsigned long cap; 2754 unsigned int b, i, m;
2776 unsigned int i, m, isaloop; 2755 int retval;
2777 int retval, b;
2778
2779 pr_debug("Loading module mxser ...\n");
2780 2756
2781 mxvar_sdriver = alloc_tty_driver(MXSER_PORTS + 1); 2757 mxvar_sdriver = alloc_tty_driver(MXSER_PORTS + 1);
2782 if (!mxvar_sdriver) 2758 if (!mxvar_sdriver)
@@ -2806,74 +2782,43 @@ static int __init mxser_module_init(void)
2806 goto err_put; 2782 goto err_put;
2807 } 2783 }
2808 2784
2809 mxvar_diagflag = 0;
2810
2811 m = 0;
2812 /* Start finding ISA boards here */ 2785 /* Start finding ISA boards here */
2813 for (isaloop = 0; isaloop < 2; isaloop++) 2786 for (m = 0, b = 0; b < MXSER_BOARDS; b++) {
2814 for (b = 0; b < MXSER_BOARDS && m < MXSER_BOARDS; b++) { 2787 if (!ioaddr[b])
2815 if (!isaloop) 2788 continue;
2816 cap = mxserBoardCAP[b]; /* predefined */ 2789
2817 else 2790 brd = &mxser_boards[m];
2818 cap = ioaddr[b]; /* module param */ 2791 retval = mxser_get_ISA_conf(!ioaddr[b], brd);
2819 2792 if (retval <= 0) {
2820 if (!cap) 2793 brd->info = NULL;
2821 continue; 2794 continue;
2795 }
2822 2796
2823 brd = &mxser_boards[m]; 2797 printk(KERN_INFO "mxser: found MOXA %s board (CAP=0x%lx)\n",
2824 retval = mxser_get_ISA_conf(cap, brd); 2798 brd->info->name, ioaddr[b]);
2825
2826 if (retval != 0)
2827 printk(KERN_INFO "Found MOXA %s board "
2828 "(CAP=0x%x)\n",
2829 brd->info->name, ioaddr[b]);
2830
2831 if (retval <= 0) {
2832 if (retval == MXSER_ERR_IRQ)
2833 printk(KERN_ERR "Invalid interrupt "
2834 "number, board not "
2835 "configured\n");
2836 else if (retval == MXSER_ERR_IRQ_CONFLIT)
2837 printk(KERN_ERR "Invalid interrupt "
2838 "number, board not "
2839 "configured\n");
2840 else if (retval == MXSER_ERR_VECTOR)
2841 printk(KERN_ERR "Invalid interrupt "
2842 "vector, board not "
2843 "configured\n");
2844 else if (retval == MXSER_ERR_IOADDR)
2845 printk(KERN_ERR "Invalid I/O address, "
2846 "board not configured\n");
2847
2848 brd->info = NULL;
2849 continue;
2850 }
2851 2799
2852 /* mxser_initbrd will hook ISR. */ 2800 /* mxser_initbrd will hook ISR. */
2853 if (mxser_initbrd(brd, NULL) < 0) { 2801 if (mxser_initbrd(brd, NULL) < 0) {
2854 brd->info = NULL; 2802 brd->info = NULL;
2855 continue; 2803 continue;
2856 } 2804 }
2857 2805
2858 brd->idx = m * MXSER_PORTS_PER_BOARD; 2806 brd->idx = m * MXSER_PORTS_PER_BOARD;
2859 for (i = 0; i < brd->info->nports; i++) 2807 for (i = 0; i < brd->info->nports; i++)
2860 tty_register_device(mxvar_sdriver, brd->idx + i, 2808 tty_register_device(mxvar_sdriver, brd->idx + i, NULL);
2861 NULL);
2862 2809
2863 m++; 2810 m++;
2864 } 2811 }
2865 2812
2866 retval = pci_register_driver(&mxser_driver); 2813 retval = pci_register_driver(&mxser_driver);
2867 if (retval) { 2814 if (retval) {
2868 printk(KERN_ERR "Can't register pci driver\n"); 2815 printk(KERN_ERR "mxser: can't register pci driver\n");
2869 if (!m) { 2816 if (!m) {
2870 retval = -ENODEV; 2817 retval = -ENODEV;
2871 goto err_unr; 2818 goto err_unr;
2872 } /* else: we have some ISA cards under control */ 2819 } /* else: we have some ISA cards under control */
2873 } 2820 }
2874 2821
2875 pr_debug("Done.\n");
2876
2877 return 0; 2822 return 0;
2878err_unr: 2823err_unr:
2879 tty_unregister_driver(mxvar_sdriver); 2824 tty_unregister_driver(mxvar_sdriver);
@@ -2886,8 +2831,6 @@ static void __exit mxser_module_exit(void)
2886{ 2831{
2887 unsigned int i, j; 2832 unsigned int i, j;
2888 2833
2889 pr_debug("Unloading module mxser ...\n");
2890
2891 pci_unregister_driver(&mxser_driver); 2834 pci_unregister_driver(&mxser_driver);
2892 2835
2893 for (i = 0; i < MXSER_BOARDS; i++) /* ISA remains */ 2836 for (i = 0; i < MXSER_BOARDS; i++) /* ISA remains */
@@ -2901,8 +2844,6 @@ static void __exit mxser_module_exit(void)
2901 for (i = 0; i < MXSER_BOARDS; i++) 2844 for (i = 0; i < MXSER_BOARDS; i++)
2902 if (mxser_boards[i].info != NULL) 2845 if (mxser_boards[i].info != NULL)
2903 mxser_release_res(&mxser_boards[i], NULL, 1); 2846 mxser_release_res(&mxser_boards[i], NULL, 1);
2904
2905 pr_debug("Done.\n");
2906} 2847}
2907 2848
2908module_init(mxser_module_init); 2849module_init(mxser_module_init);
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index a22662b6a1a5..39f6357e3b5d 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -107,7 +107,6 @@
107#include <linux/init.h> 107#include <linux/init.h>
108#include <linux/proc_fs.h> 108#include <linux/proc_fs.h>
109#include <linux/spinlock.h> 109#include <linux/spinlock.h>
110#include <linux/smp_lock.h>
111 110
112#include <asm/io.h> 111#include <asm/io.h>
113#include <asm/uaccess.h> 112#include <asm/uaccess.h>
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index ba012c2bdf7a..f9f72a211292 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -122,35 +122,20 @@ static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cm
122static ssize_t flash_read(struct file *file, char __user *buf, size_t size, 122static ssize_t flash_read(struct file *file, char __user *buf, size_t size,
123 loff_t *ppos) 123 loff_t *ppos)
124{ 124{
125 unsigned long p = *ppos; 125 ssize_t ret;
126 unsigned int count = size;
127 int ret = 0;
128 126
129 if (flashdebug) 127 if (flashdebug)
130 printk(KERN_DEBUG "flash_read: flash_read: offset=0x%lX, " 128 printk(KERN_DEBUG "flash_read: flash_read: offset=0x%lX, "
131 "buffer=%p, count=0x%X.\n", p, buf, count); 129 "buffer=%p, count=0x%X.\n", p, buf, count);
130 /*
131 * We now lock against reads and writes. --rmk
132 */
133 if (mutex_lock_interruptible(&nwflash_mutex))
134 return -ERESTARTSYS;
132 135
133 if (count) 136 ret = simple_read_from_buffer(buf, size, ppos, FLASH_BASE, gbFlashSize);
134 ret = -ENXIO; 137 mutex_unlock(&nwflash_mutex);
135
136 if (p < gbFlashSize) {
137 if (count > gbFlashSize - p)
138 count = gbFlashSize - p;
139 138
140 /*
141 * We now lock against reads and writes. --rmk
142 */
143 if (mutex_lock_interruptible(&nwflash_mutex))
144 return -ERESTARTSYS;
145
146 ret = copy_to_user(buf, (void *)(FLASH_BASE + p), count);
147 if (ret == 0) {
148 ret = count;
149 *ppos += count;
150 } else
151 ret = -EFAULT;
152 mutex_unlock(&nwflash_mutex);
153 }
154 return ret; 139 return ret;
155} 140}
156 141
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 7af7a7e6b9c2..bee39fdfba73 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -67,7 +67,7 @@
67#include <linux/major.h> 67#include <linux/major.h>
68#include <linux/ppdev.h> 68#include <linux/ppdev.h>
69#include <linux/smp_lock.h> 69#include <linux/smp_lock.h>
70#include <asm/uaccess.h> 70#include <linux/uaccess.h>
71 71
72#define PP_VERSION "ppdev: user-space parallel port driver" 72#define PP_VERSION "ppdev: user-space parallel port driver"
73#define CHRDEV "ppdev" 73#define CHRDEV "ppdev"
@@ -328,10 +328,9 @@ static enum ieee1284_phase init_phase (int mode)
328 return IEEE1284_PH_FWD_IDLE; 328 return IEEE1284_PH_FWD_IDLE;
329} 329}
330 330
331static int pp_ioctl(struct inode *inode, struct file *file, 331static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
332 unsigned int cmd, unsigned long arg)
333{ 332{
334 unsigned int minor = iminor(inode); 333 unsigned int minor = iminor(file->f_path.dentry->d_inode);
335 struct pp_struct *pp = file->private_data; 334 struct pp_struct *pp = file->private_data;
336 struct parport * port; 335 struct parport * port;
337 void __user *argp = (void __user *)arg; 336 void __user *argp = (void __user *)arg;
@@ -634,6 +633,15 @@ static int pp_ioctl(struct inode *inode, struct file *file,
634 return 0; 633 return 0;
635} 634}
636 635
636static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
637{
638 long ret;
639 lock_kernel();
640 ret = pp_do_ioctl(file, cmd, arg);
641 unlock_kernel();
642 return ret;
643}
644
637static int pp_open (struct inode * inode, struct file * file) 645static int pp_open (struct inode * inode, struct file * file)
638{ 646{
639 unsigned int minor = iminor(inode); 647 unsigned int minor = iminor(inode);
@@ -745,7 +753,7 @@ static const struct file_operations pp_fops = {
745 .read = pp_read, 753 .read = pp_read,
746 .write = pp_write, 754 .write = pp_write,
747 .poll = pp_poll, 755 .poll = pp_poll,
748 .ioctl = pp_ioctl, 756 .unlocked_ioctl = pp_ioctl,
749 .open = pp_open, 757 .open = pp_open,
750 .release = pp_release, 758 .release = pp_release,
751}; 759};
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0cf98bd4f2d2..e0d0e371909c 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -236,6 +236,7 @@
236#include <linux/fs.h> 236#include <linux/fs.h>
237#include <linux/genhd.h> 237#include <linux/genhd.h>
238#include <linux/interrupt.h> 238#include <linux/interrupt.h>
239#include <linux/mm.h>
239#include <linux/spinlock.h> 240#include <linux/spinlock.h>
240#include <linux/percpu.h> 241#include <linux/percpu.h>
241#include <linux/cryptohash.h> 242#include <linux/cryptohash.h>
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c
index 0cdfee152916..a8f68a3f14dd 100644
--- a/drivers/char/rio/rio_linux.c
+++ b/drivers/char/rio/rio_linux.c
@@ -179,7 +179,7 @@ static int rio_set_real_termios(void *ptr);
179static void rio_hungup(void *ptr); 179static void rio_hungup(void *ptr);
180static void rio_close(void *ptr); 180static void rio_close(void *ptr);
181static int rio_chars_in_buffer(void *ptr); 181static int rio_chars_in_buffer(void *ptr);
182static int rio_fw_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); 182static long rio_fw_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
183static int rio_init_drivers(void); 183static int rio_init_drivers(void);
184 184
185static void my_hd(void *addr, int len); 185static void my_hd(void *addr, int len);
@@ -240,7 +240,7 @@ static struct real_driver rio_real_driver = {
240 240
241static const struct file_operations rio_fw_fops = { 241static const struct file_operations rio_fw_fops = {
242 .owner = THIS_MODULE, 242 .owner = THIS_MODULE,
243 .ioctl = rio_fw_ioctl, 243 .unlocked_ioctl = rio_fw_ioctl,
244}; 244};
245 245
246static struct miscdevice rio_fw_device = { 246static struct miscdevice rio_fw_device = {
@@ -560,13 +560,15 @@ static void rio_close(void *ptr)
560 560
561 561
562 562
563static int rio_fw_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) 563static long rio_fw_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
564{ 564{
565 int rc = 0; 565 int rc = 0;
566 func_enter(); 566 func_enter();
567 567
568 /* The "dev" argument isn't used. */ 568 /* The "dev" argument isn't used. */
569 lock_kernel();
569 rc = riocontrol(p, 0, cmd, arg, capable(CAP_SYS_ADMIN)); 570 rc = riocontrol(p, 0, cmd, arg, capable(CAP_SYS_ADMIN));
571 unlock_kernel();
570 572
571 func_exit(); 573 func_exit();
572 return rc; 574 return rc;
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index fa92a8af5a5a..dbefbb30ed44 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -78,9 +78,10 @@
78#include <linux/wait.h> 78#include <linux/wait.h>
79#include <linux/bcd.h> 79#include <linux/bcd.h>
80#include <linux/delay.h> 80#include <linux/delay.h>
81#include <linux/smp_lock.h>
82#include <linux/uaccess.h>
81 83
82#include <asm/current.h> 84#include <asm/current.h>
83#include <asm/uaccess.h>
84#include <asm/system.h> 85#include <asm/system.h>
85 86
86#ifdef CONFIG_X86 87#ifdef CONFIG_X86
@@ -120,8 +121,6 @@ static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
120 return 0; 121 return 0;
121} 122}
122#endif 123#endif
123#else
124extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
125#endif 124#endif
126 125
127/* 126/*
@@ -144,8 +143,7 @@ static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq, 0, 0);
144static ssize_t rtc_read(struct file *file, char __user *buf, 143static ssize_t rtc_read(struct file *file, char __user *buf,
145 size_t count, loff_t *ppos); 144 size_t count, loff_t *ppos);
146 145
147static int rtc_ioctl(struct inode *inode, struct file *file, 146static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
148 unsigned int cmd, unsigned long arg);
149 147
150#ifdef RTC_IRQ 148#ifdef RTC_IRQ
151static unsigned int rtc_poll(struct file *file, poll_table *wait); 149static unsigned int rtc_poll(struct file *file, poll_table *wait);
@@ -719,10 +717,13 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
719 &wtime, sizeof wtime) ? -EFAULT : 0; 717 &wtime, sizeof wtime) ? -EFAULT : 0;
720} 718}
721 719
722static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 720static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
723 unsigned long arg)
724{ 721{
725 return rtc_do_ioctl(cmd, arg, 0); 722 long ret;
723 lock_kernel();
724 ret = rtc_do_ioctl(cmd, arg, 0);
725 unlock_kernel();
726 return ret;
726} 727}
727 728
728/* 729/*
@@ -915,7 +916,7 @@ static const struct file_operations rtc_fops = {
915#ifdef RTC_IRQ 916#ifdef RTC_IRQ
916 .poll = rtc_poll, 917 .poll = rtc_poll,
917#endif 918#endif
918 .ioctl = rtc_ioctl, 919 .unlocked_ioctl = rtc_ioctl,
919 .open = rtc_open, 920 .open = rtc_open,
920 .release = rtc_release, 921 .release = rtc_release,
921 .fasync = rtc_fasync, 922 .fasync = rtc_fasync,
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index b976248e1072..19db1eb87c26 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -1256,7 +1256,6 @@ static int stl_tiocmset(struct tty_struct *tty, struct file *file,
1256static int stl_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) 1256static int stl_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg)
1257{ 1257{
1258 struct stlport *portp; 1258 struct stlport *portp;
1259 unsigned int ival;
1260 int rc; 1259 int rc;
1261 void __user *argp = (void __user *)arg; 1260 void __user *argp = (void __user *)arg;
1262 1261
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index 2162439bbe48..c385206f9db5 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -286,8 +286,8 @@ static void sx_close(void *ptr);
286static int sx_chars_in_buffer(void *ptr); 286static int sx_chars_in_buffer(void *ptr);
287static int sx_init_board(struct sx_board *board); 287static int sx_init_board(struct sx_board *board);
288static int sx_init_portstructs(int nboards, int nports); 288static int sx_init_portstructs(int nboards, int nports);
289static int sx_fw_ioctl(struct inode *inode, struct file *filp, 289static long sx_fw_ioctl(struct file *filp, unsigned int cmd,
290 unsigned int cmd, unsigned long arg); 290 unsigned long arg);
291static int sx_init_drivers(void); 291static int sx_init_drivers(void);
292 292
293static struct tty_driver *sx_driver; 293static struct tty_driver *sx_driver;
@@ -396,7 +396,7 @@ static struct real_driver sx_real_driver = {
396 396
397static const struct file_operations sx_fw_fops = { 397static const struct file_operations sx_fw_fops = {
398 .owner = THIS_MODULE, 398 .owner = THIS_MODULE,
399 .ioctl = sx_fw_ioctl, 399 .unlocked_ioctl = sx_fw_ioctl,
400}; 400};
401 401
402static struct miscdevice sx_fw_device = { 402static struct miscdevice sx_fw_device = {
@@ -1686,10 +1686,10 @@ static int do_memtest_w(struct sx_board *board, int min, int max)
1686} 1686}
1687#endif 1687#endif
1688 1688
1689static int sx_fw_ioctl(struct inode *inode, struct file *filp, 1689static long sx_fw_ioctl(struct file *filp, unsigned int cmd,
1690 unsigned int cmd, unsigned long arg) 1690 unsigned long arg)
1691{ 1691{
1692 int rc = 0; 1692 long rc = 0;
1693 int __user *descr = (int __user *)arg; 1693 int __user *descr = (int __user *)arg;
1694 int i; 1694 int i;
1695 static struct sx_board *board = NULL; 1695 static struct sx_board *board = NULL;
@@ -1699,13 +1699,10 @@ static int sx_fw_ioctl(struct inode *inode, struct file *filp,
1699 1699
1700 func_enter(); 1700 func_enter();
1701 1701
1702#if 0 1702 if (!capable(CAP_SYS_RAWIO))
1703 /* Removed superuser check: Sysops can use the permissions on the device
1704 file to restrict access. Recommendation: Root only. (root.root 600) */
1705 if (!capable(CAP_SYS_ADMIN)) {
1706 return -EPERM; 1703 return -EPERM;
1707 } 1704
1708#endif 1705 lock_kernel();
1709 1706
1710 sx_dprintk(SX_DEBUG_FIRMWARE, "IOCTL %x: %lx\n", cmd, arg); 1707 sx_dprintk(SX_DEBUG_FIRMWARE, "IOCTL %x: %lx\n", cmd, arg);
1711 1708
@@ -1720,19 +1717,23 @@ static int sx_fw_ioctl(struct inode *inode, struct file *filp,
1720 for (i = 0; i < SX_NBOARDS; i++) 1717 for (i = 0; i < SX_NBOARDS; i++)
1721 sx_dprintk(SX_DEBUG_FIRMWARE, "<%x> ", boards[i].flags); 1718 sx_dprintk(SX_DEBUG_FIRMWARE, "<%x> ", boards[i].flags);
1722 sx_dprintk(SX_DEBUG_FIRMWARE, "\n"); 1719 sx_dprintk(SX_DEBUG_FIRMWARE, "\n");
1720 unlock_kernel();
1723 return -EIO; 1721 return -EIO;
1724 } 1722 }
1725 1723
1726 switch (cmd) { 1724 switch (cmd) {
1727 case SXIO_SET_BOARD: 1725 case SXIO_SET_BOARD:
1728 sx_dprintk(SX_DEBUG_FIRMWARE, "set board to %ld\n", arg); 1726 sx_dprintk(SX_DEBUG_FIRMWARE, "set board to %ld\n", arg);
1727 rc = -EIO;
1729 if (arg >= SX_NBOARDS) 1728 if (arg >= SX_NBOARDS)
1730 return -EIO; 1729 break;
1731 sx_dprintk(SX_DEBUG_FIRMWARE, "not out of range\n"); 1730 sx_dprintk(SX_DEBUG_FIRMWARE, "not out of range\n");
1732 if (!(boards[arg].flags & SX_BOARD_PRESENT)) 1731 if (!(boards[arg].flags & SX_BOARD_PRESENT))
1733 return -EIO; 1732 break;
1734 sx_dprintk(SX_DEBUG_FIRMWARE, ".. and present!\n"); 1733 sx_dprintk(SX_DEBUG_FIRMWARE, ".. and present!\n");
1735 board = &boards[arg]; 1734 board = &boards[arg];
1735 rc = 0;
1736 /* FIXME: And this does ... nothing?? */
1736 break; 1737 break;
1737 case SXIO_GET_TYPE: 1738 case SXIO_GET_TYPE:
1738 rc = -ENOENT; /* If we manage to miss one, return error. */ 1739 rc = -ENOENT; /* If we manage to miss one, return error. */
@@ -1746,7 +1747,7 @@ static int sx_fw_ioctl(struct inode *inode, struct file *filp,
1746 rc = SX_TYPE_SI; 1747 rc = SX_TYPE_SI;
1747 if (IS_EISA_BOARD(board)) 1748 if (IS_EISA_BOARD(board))
1748 rc = SX_TYPE_SI; 1749 rc = SX_TYPE_SI;
1749 sx_dprintk(SX_DEBUG_FIRMWARE, "returning type= %d\n", rc); 1750 sx_dprintk(SX_DEBUG_FIRMWARE, "returning type= %ld\n", rc);
1750 break; 1751 break;
1751 case SXIO_DO_RAMTEST: 1752 case SXIO_DO_RAMTEST:
1752 if (sx_initialized) /* Already initialized: better not ramtest the board. */ 1753 if (sx_initialized) /* Already initialized: better not ramtest the board. */
@@ -1760,19 +1761,26 @@ static int sx_fw_ioctl(struct inode *inode, struct file *filp,
1760 rc = do_memtest(board, 0, 0x7ff8); 1761 rc = do_memtest(board, 0, 0x7ff8);
1761 /* if (!rc) rc = do_memtest_w (board, 0, 0x7ff8); */ 1762 /* if (!rc) rc = do_memtest_w (board, 0, 0x7ff8); */
1762 } 1763 }
1763 sx_dprintk(SX_DEBUG_FIRMWARE, "returning memtest result= %d\n", 1764 sx_dprintk(SX_DEBUG_FIRMWARE,
1764 rc); 1765 "returning memtest result= %ld\n", rc);
1765 break; 1766 break;
1766 case SXIO_DOWNLOAD: 1767 case SXIO_DOWNLOAD:
1767 if (sx_initialized) /* Already initialized */ 1768 if (sx_initialized) {/* Already initialized */
1768 return -EEXIST; 1769 rc = -EEXIST;
1769 if (!sx_reset(board)) 1770 break;
1770 return -EIO; 1771 }
1772 if (!sx_reset(board)) {
1773 rc = -EIO;
1774 break;
1775 }
1771 sx_dprintk(SX_DEBUG_INIT, "reset the board...\n"); 1776 sx_dprintk(SX_DEBUG_INIT, "reset the board...\n");
1772 1777
1773 tmp = kmalloc(SX_CHUNK_SIZE, GFP_USER); 1778 tmp = kmalloc(SX_CHUNK_SIZE, GFP_USER);
1774 if (!tmp) 1779 if (!tmp) {
1775 return -ENOMEM; 1780 rc = -ENOMEM;
1781 break;
1782 }
1783 /* FIXME: check returns */
1776 get_user(nbytes, descr++); 1784 get_user(nbytes, descr++);
1777 get_user(offset, descr++); 1785 get_user(offset, descr++);
1778 get_user(data, descr++); 1786 get_user(data, descr++);
@@ -1782,7 +1790,8 @@ static int sx_fw_ioctl(struct inode *inode, struct file *filp,
1782 (i + SX_CHUNK_SIZE > nbytes) ? 1790 (i + SX_CHUNK_SIZE > nbytes) ?
1783 nbytes - i : SX_CHUNK_SIZE)) { 1791 nbytes - i : SX_CHUNK_SIZE)) {
1784 kfree(tmp); 1792 kfree(tmp);
1785 return -EFAULT; 1793 rc = -EFAULT;
1794 break;
1786 } 1795 }
1787 memcpy_toio(board->base2 + offset + i, tmp, 1796 memcpy_toio(board->base2 + offset + i, tmp,
1788 (i + SX_CHUNK_SIZE > nbytes) ? 1797 (i + SX_CHUNK_SIZE > nbytes) ?
@@ -1798,13 +1807,17 @@ static int sx_fw_ioctl(struct inode *inode, struct file *filp,
1798 rc = sx_nports; 1807 rc = sx_nports;
1799 break; 1808 break;
1800 case SXIO_INIT: 1809 case SXIO_INIT:
1801 if (sx_initialized) /* Already initialized */ 1810 if (sx_initialized) { /* Already initialized */
1802 return -EEXIST; 1811 rc = -EEXIST;
1812 break;
1813 }
1803 /* This is not allowed until all boards are initialized... */ 1814 /* This is not allowed until all boards are initialized... */
1804 for (i = 0; i < SX_NBOARDS; i++) { 1815 for (i = 0; i < SX_NBOARDS; i++) {
1805 if ((boards[i].flags & SX_BOARD_PRESENT) && 1816 if ((boards[i].flags & SX_BOARD_PRESENT) &&
1806 !(boards[i].flags & SX_BOARD_INITIALIZED)) 1817 !(boards[i].flags & SX_BOARD_INITIALIZED)) {
1807 return -EIO; 1818 rc = -EIO;
1819 break;
1820 }
1808 } 1821 }
1809 for (i = 0; i < SX_NBOARDS; i++) 1822 for (i = 0; i < SX_NBOARDS; i++)
1810 if (!(boards[i].flags & SX_BOARD_PRESENT)) 1823 if (!(boards[i].flags & SX_BOARD_PRESENT))
@@ -1832,10 +1845,10 @@ static int sx_fw_ioctl(struct inode *inode, struct file *filp,
1832 rc = sx_nports; 1845 rc = sx_nports;
1833 break; 1846 break;
1834 default: 1847 default:
1835 printk(KERN_WARNING "Unknown ioctl on firmware device (%x).\n", 1848 rc = -ENOTTY;
1836 cmd);
1837 break; 1849 break;
1838 } 1850 }
1851 unlock_kernel();
1839 func_exit(); 1852 func_exit();
1840 return rc; 1853 return rc;
1841} 1854}
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 15e597d03002..e1b46bc7e43c 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -915,7 +915,7 @@ static void tty_reset_termios(struct tty_struct *tty)
915 * do_tty_hangup - actual handler for hangup events 915 * do_tty_hangup - actual handler for hangup events
916 * @work: tty device 916 * @work: tty device
917 * 917 *
918k * This can be called by the "eventd" kernel thread. That is process 918 * This can be called by the "eventd" kernel thread. That is process
919 * synchronous but doesn't hold any locks, so we need to make sure we 919 * synchronous but doesn't hold any locks, so we need to make sure we
920 * have the appropriate locks for what we're doing. 920 * have the appropriate locks for what we're doing.
921 * 921 *
@@ -1119,19 +1119,6 @@ int tty_hung_up_p(struct file *filp)
1119 1119
1120EXPORT_SYMBOL(tty_hung_up_p); 1120EXPORT_SYMBOL(tty_hung_up_p);
1121 1121
1122/**
1123 * is_tty - checker whether file is a TTY
1124 * @filp: file handle that may be a tty
1125 *
1126 * Check if the file handle is a tty handle.
1127 */
1128
1129int is_tty(struct file *filp)
1130{
1131 return filp->f_op->read == tty_read
1132 || filp->f_op->read == hung_up_tty_read;
1133}
1134
1135static void session_clear_tty(struct pid *session) 1122static void session_clear_tty(struct pid *session)
1136{ 1123{
1137 struct task_struct *p; 1124 struct task_struct *p;
@@ -3593,7 +3580,6 @@ void proc_clear_tty(struct task_struct *p)
3593 p->signal->tty = NULL; 3580 p->signal->tty = NULL;
3594 spin_unlock_irq(&p->sighand->siglock); 3581 spin_unlock_irq(&p->sighand->siglock);
3595} 3582}
3596EXPORT_SYMBOL(proc_clear_tty);
3597 3583
3598/* Called under the sighand lock */ 3584/* Called under the sighand lock */
3599 3585
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index dc17fe3a88bc..d0f4eb6fdb7f 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -46,6 +46,9 @@ static char *in, *inbuf;
46/* The operations for our console. */ 46/* The operations for our console. */
47static struct hv_ops virtio_cons; 47static struct hv_ops virtio_cons;
48 48
49/* The hvc device */
50static struct hvc_struct *hvc;
51
49/*D:310 The put_chars() callback is pretty straightforward. 52/*D:310 The put_chars() callback is pretty straightforward.
50 * 53 *
51 * We turn the characters into a scatter-gather list, add it to the output 54 * We turn the characters into a scatter-gather list, add it to the output
@@ -134,6 +137,27 @@ int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
134 return hvc_instantiate(0, 0, &virtio_cons); 137 return hvc_instantiate(0, 0, &virtio_cons);
135} 138}
136 139
140/*
141 * we support only one console, the hvc struct is a global var
142 * There is no need to do anything
143 */
144static int notifier_add_vio(struct hvc_struct *hp, int data)
145{
146 hp->irq_requested = 1;
147 return 0;
148}
149
150static void notifier_del_vio(struct hvc_struct *hp, int data)
151{
152 hp->irq_requested = 0;
153}
154
155static void hvc_handle_input(struct virtqueue *vq)
156{
157 if (hvc_poll(hvc))
158 hvc_kick();
159}
160
137/*D:370 Once we're further in boot, we get probed like any other virtio device. 161/*D:370 Once we're further in boot, we get probed like any other virtio device.
138 * At this stage we set up the output virtqueue. 162 * At this stage we set up the output virtqueue.
139 * 163 *
@@ -144,7 +168,6 @@ int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
144static int __devinit virtcons_probe(struct virtio_device *dev) 168static int __devinit virtcons_probe(struct virtio_device *dev)
145{ 169{
146 int err; 170 int err;
147 struct hvc_struct *hvc;
148 171
149 vdev = dev; 172 vdev = dev;
150 173
@@ -158,7 +181,7 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
158 /* Find the input queue. */ 181 /* Find the input queue. */
159 /* FIXME: This is why we want to wean off hvc: we do nothing 182 /* FIXME: This is why we want to wean off hvc: we do nothing
160 * when input comes in. */ 183 * when input comes in. */
161 in_vq = vdev->config->find_vq(vdev, 0, NULL); 184 in_vq = vdev->config->find_vq(vdev, 0, hvc_handle_input);
162 if (IS_ERR(in_vq)) { 185 if (IS_ERR(in_vq)) {
163 err = PTR_ERR(in_vq); 186 err = PTR_ERR(in_vq);
164 goto free; 187 goto free;
@@ -173,15 +196,18 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
173 /* Start using the new console output. */ 196 /* Start using the new console output. */
174 virtio_cons.get_chars = get_chars; 197 virtio_cons.get_chars = get_chars;
175 virtio_cons.put_chars = put_chars; 198 virtio_cons.put_chars = put_chars;
199 virtio_cons.notifier_add = notifier_add_vio;
200 virtio_cons.notifier_del = notifier_del_vio;
176 201
177 /* The first argument of hvc_alloc() is the virtual console number, so 202 /* The first argument of hvc_alloc() is the virtual console number, so
178 * we use zero. The second argument is the interrupt number; we 203 * we use zero. The second argument is the parameter for the
179 * currently leave this as zero: it would be better not to use the 204 * notification mechanism (like irq number). We currently leave this
180 * hvc mechanism and fix this (FIXME!). 205 * as zero, virtqueues have implicit notifications.
181 * 206 *
182 * The third argument is a "struct hv_ops" containing the put_chars() 207 * The third argument is a "struct hv_ops" containing the put_chars()
183 * and get_chars() pointers. The final argument is the output buffer 208 * get_chars(), notifier_add() and notifier_del() pointers.
184 * size: we can do any size, so we put PAGE_SIZE here. */ 209 * The final argument is the output buffer size: we can do any size,
210 * so we put PAGE_SIZE here. */
185 hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE); 211 hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE);
186 if (IS_ERR(hvc)) { 212 if (IS_ERR(hvc)) {
187 err = PTR_ERR(hvc); 213 err = PTR_ERR(hvc);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index e32a076d5f1f..82a51f38a546 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -261,7 +261,7 @@ static void notify_update(struct vc_data *vc)
261#ifdef VT_BUF_VRAM_ONLY 261#ifdef VT_BUF_VRAM_ONLY
262#define DO_UPDATE(vc) 0 262#define DO_UPDATE(vc) 0
263#else 263#else
264#define DO_UPDATE(vc) CON_IS_VISIBLE(vc) 264#define DO_UPDATE(vc) (CON_IS_VISIBLE(vc) && !console_blanked)
265#endif 265#endif
266 266
267static inline unsigned short *screenpos(struct vc_data *vc, int offset, int viewed) 267static inline unsigned short *screenpos(struct vc_data *vc, int offset, int viewed)
@@ -2211,7 +2211,7 @@ rescan_last_byte:
2211 c = 0xfffd; 2211 c = 0xfffd;
2212 tc = c; 2212 tc = c;
2213 } else { /* no utf or alternate charset mode */ 2213 } else { /* no utf or alternate charset mode */
2214 tc = vc->vc_translate[vc->vc_toggle_meta ? (c | 0x80) : c]; 2214 tc = vc_translate(vc, c);
2215 } 2215 }
2216 2216
2217 param.c = tc; 2217 param.c = tc;
@@ -2749,8 +2749,8 @@ static int con_open(struct tty_struct *tty, struct file *filp)
2749 tty->termios->c_iflag |= IUTF8; 2749 tty->termios->c_iflag |= IUTF8;
2750 else 2750 else
2751 tty->termios->c_iflag &= ~IUTF8; 2751 tty->termios->c_iflag &= ~IUTF8;
2752 release_console_sem();
2753 vcs_make_sysfs(tty); 2752 vcs_make_sysfs(tty);
2753 release_console_sem();
2754 return ret; 2754 return ret;
2755 } 2755 }
2756 } 2756 }
@@ -2775,8 +2775,8 @@ static void con_close(struct tty_struct *tty, struct file *filp)
2775 if (vc) 2775 if (vc)
2776 vc->vc_tty = NULL; 2776 vc->vc_tty = NULL;
2777 tty->driver_data = NULL; 2777 tty->driver_data = NULL;
2778 release_console_sem();
2779 vcs_remove_sysfs(tty); 2778 vcs_remove_sysfs(tty);
2779 release_console_sem();
2780 mutex_unlock(&tty_mutex); 2780 mutex_unlock(&tty_mutex);
2781 /* 2781 /*
2782 * tty_mutex is released, but we still hold BKL, so there is 2782 * tty_mutex is released, but we still hold BKL, so there is
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 51966ccf4ea3..8bfee5fb7223 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -87,7 +87,6 @@
87#include <linux/mutex.h> 87#include <linux/mutex.h>
88#include <linux/smp_lock.h> 88#include <linux/smp_lock.h>
89#include <linux/sysctl.h> 89#include <linux/sysctl.h>
90#include <linux/version.h>
91#include <linux/fs.h> 90#include <linux/fs.h>
92#include <linux/cdev.h> 91#include <linux/cdev.h>
93#include <linux/platform_device.h> 92#include <linux/platform_device.h>
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ee1df0d45e81..8d6a3ff02672 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -589,7 +589,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf)
589 ssize_t i = 0; 589 ssize_t i = 0;
590 unsigned int cpu; 590 unsigned int cpu;
591 591
592 for_each_cpu_mask(cpu, mask) { 592 for_each_cpu_mask_nr(cpu, mask) {
593 if (i) 593 if (i)
594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
@@ -835,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
835 } 835 }
836#endif 836#endif
837 837
838 for_each_cpu_mask(j, policy->cpus) { 838 for_each_cpu_mask_nr(j, policy->cpus) {
839 if (cpu == j) 839 if (cpu == j)
840 continue; 840 continue;
841 841
@@ -898,14 +898,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
898 } 898 }
899 899
900 spin_lock_irqsave(&cpufreq_driver_lock, flags); 900 spin_lock_irqsave(&cpufreq_driver_lock, flags);
901 for_each_cpu_mask(j, policy->cpus) { 901 for_each_cpu_mask_nr(j, policy->cpus) {
902 per_cpu(cpufreq_cpu_data, j) = policy; 902 per_cpu(cpufreq_cpu_data, j) = policy;
903 per_cpu(policy_cpu, j) = policy->cpu; 903 per_cpu(policy_cpu, j) = policy->cpu;
904 } 904 }
905 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 905 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
906 906
907 /* symlink affected CPUs */ 907 /* symlink affected CPUs */
908 for_each_cpu_mask(j, policy->cpus) { 908 for_each_cpu_mask_nr(j, policy->cpus) {
909 if (j == cpu) 909 if (j == cpu)
910 continue; 910 continue;
911 if (!cpu_online(j)) 911 if (!cpu_online(j))
@@ -945,7 +945,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
945 945
946err_out_unregister: 946err_out_unregister:
947 spin_lock_irqsave(&cpufreq_driver_lock, flags); 947 spin_lock_irqsave(&cpufreq_driver_lock, flags);
948 for_each_cpu_mask(j, policy->cpus) 948 for_each_cpu_mask_nr(j, policy->cpus)
949 per_cpu(cpufreq_cpu_data, j) = NULL; 949 per_cpu(cpufreq_cpu_data, j) = NULL;
950 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 950 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
951 951
@@ -1028,7 +1028,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1028 * the sysfs links afterwards. 1028 * the sysfs links afterwards.
1029 */ 1029 */
1030 if (unlikely(cpus_weight(data->cpus) > 1)) { 1030 if (unlikely(cpus_weight(data->cpus) > 1)) {
1031 for_each_cpu_mask(j, data->cpus) { 1031 for_each_cpu_mask_nr(j, data->cpus) {
1032 if (j == cpu) 1032 if (j == cpu)
1033 continue; 1033 continue;
1034 per_cpu(cpufreq_cpu_data, j) = NULL; 1034 per_cpu(cpufreq_cpu_data, j) = NULL;
@@ -1038,7 +1038,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1038 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1038 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1039 1039
1040 if (unlikely(cpus_weight(data->cpus) > 1)) { 1040 if (unlikely(cpus_weight(data->cpus) > 1)) {
1041 for_each_cpu_mask(j, data->cpus) { 1041 for_each_cpu_mask_nr(j, data->cpus) {
1042 if (j == cpu) 1042 if (j == cpu)
1043 continue; 1043 continue;
1044 dprintk("removing link for cpu %u\n", j); 1044 dprintk("removing link for cpu %u\n", j);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 5d3a04ba6ad2..fe565ee43757 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -497,7 +497,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
497 return rc; 497 return rc;
498 } 498 }
499 499
500 for_each_cpu_mask(j, policy->cpus) { 500 for_each_cpu_mask_nr(j, policy->cpus) {
501 struct cpu_dbs_info_s *j_dbs_info; 501 struct cpu_dbs_info_s *j_dbs_info;
502 j_dbs_info = &per_cpu(cpu_dbs_info, j); 502 j_dbs_info = &per_cpu(cpu_dbs_info, j);
503 j_dbs_info->cur_policy = policy; 503 j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index d2af20dda382..33855cb3cf16 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -367,7 +367,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
367 367
368 /* Get Idle Time */ 368 /* Get Idle Time */
369 idle_ticks = UINT_MAX; 369 idle_ticks = UINT_MAX;
370 for_each_cpu_mask(j, policy->cpus) { 370 for_each_cpu_mask_nr(j, policy->cpus) {
371 cputime64_t total_idle_ticks; 371 cputime64_t total_idle_ticks;
372 unsigned int tmp_idle_ticks; 372 unsigned int tmp_idle_ticks;
373 struct cpu_dbs_info_s *j_dbs_info; 373 struct cpu_dbs_info_s *j_dbs_info;
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
521 return rc; 521 return rc;
522 } 522 }
523 523
524 for_each_cpu_mask(j, policy->cpus) { 524 for_each_cpu_mask_nr(j, policy->cpus) {
525 struct cpu_dbs_info_s *j_dbs_info; 525 struct cpu_dbs_info_s *j_dbs_info;
526 j_dbs_info = &per_cpu(cpu_dbs_info, j); 526 j_dbs_info = &per_cpu(cpu_dbs_info, j);
527 j_dbs_info->cur_policy = policy; 527 j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index cb2ac01a41a1..32244aa7cc0c 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -30,16 +30,18 @@
30/** 30/**
31 * A few values needed by the userspace governor 31 * A few values needed by the userspace governor
32 */ 32 */
33static unsigned int cpu_max_freq[NR_CPUS]; 33static DEFINE_PER_CPU(unsigned int, cpu_max_freq);
34static unsigned int cpu_min_freq[NR_CPUS]; 34static DEFINE_PER_CPU(unsigned int, cpu_min_freq);
35static unsigned int cpu_cur_freq[NR_CPUS]; /* current CPU freq */ 35static DEFINE_PER_CPU(unsigned int, cpu_cur_freq); /* current CPU freq */
36static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */ 36static DEFINE_PER_CPU(unsigned int, cpu_set_freq); /* CPU freq desired by
37static unsigned int cpu_is_managed[NR_CPUS]; 37 userspace */
38static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
38 39
39static DEFINE_MUTEX (userspace_mutex); 40static DEFINE_MUTEX (userspace_mutex);
40static int cpus_using_userspace_governor; 41static int cpus_using_userspace_governor;
41 42
42#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) 43#define dprintk(msg...) \
44 cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
43 45
44/* keep track of frequency transitions */ 46/* keep track of frequency transitions */
45static int 47static int
@@ -48,12 +50,12 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
48{ 50{
49 struct cpufreq_freqs *freq = data; 51 struct cpufreq_freqs *freq = data;
50 52
51 if (!cpu_is_managed[freq->cpu]) 53 if (!per_cpu(cpu_is_managed, freq->cpu))
52 return 0; 54 return 0;
53 55
54 dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", 56 dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n",
55 freq->cpu, freq->new); 57 freq->cpu, freq->new);
56 cpu_cur_freq[freq->cpu] = freq->new; 58 per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
57 59
58 return 0; 60 return 0;
59} 61}
@@ -77,15 +79,15 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
77 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 79 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
78 80
79 mutex_lock(&userspace_mutex); 81 mutex_lock(&userspace_mutex);
80 if (!cpu_is_managed[policy->cpu]) 82 if (!per_cpu(cpu_is_managed, policy->cpu))
81 goto err; 83 goto err;
82 84
83 cpu_set_freq[policy->cpu] = freq; 85 per_cpu(cpu_set_freq, policy->cpu) = freq;
84 86
85 if (freq < cpu_min_freq[policy->cpu]) 87 if (freq < per_cpu(cpu_min_freq, policy->cpu))
86 freq = cpu_min_freq[policy->cpu]; 88 freq = per_cpu(cpu_min_freq, policy->cpu);
87 if (freq > cpu_max_freq[policy->cpu]) 89 if (freq > per_cpu(cpu_max_freq, policy->cpu))
88 freq = cpu_max_freq[policy->cpu]; 90 freq = per_cpu(cpu_max_freq, policy->cpu);
89 91
90 /* 92 /*
91 * We're safe from concurrent calls to ->target() here 93 * We're safe from concurrent calls to ->target() here
@@ -104,7 +106,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
104 106
105static ssize_t show_speed(struct cpufreq_policy *policy, char *buf) 107static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
106{ 108{
107 return sprintf(buf, "%u\n", cpu_cur_freq[policy->cpu]); 109 return sprintf(buf, "%u\n", per_cpu(cpu_cur_freq, policy->cpu));
108} 110}
109 111
110static int cpufreq_governor_userspace(struct cpufreq_policy *policy, 112static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
@@ -127,12 +129,17 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
127 } 129 }
128 cpus_using_userspace_governor++; 130 cpus_using_userspace_governor++;
129 131
130 cpu_is_managed[cpu] = 1; 132 per_cpu(cpu_is_managed, cpu) = 1;
131 cpu_min_freq[cpu] = policy->min; 133 per_cpu(cpu_min_freq, cpu) = policy->min;
132 cpu_max_freq[cpu] = policy->max; 134 per_cpu(cpu_max_freq, cpu) = policy->max;
133 cpu_cur_freq[cpu] = policy->cur; 135 per_cpu(cpu_cur_freq, cpu) = policy->cur;
134 cpu_set_freq[cpu] = policy->cur; 136 per_cpu(cpu_set_freq, cpu) = policy->cur;
135 dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]); 137 dprintk("managing cpu %u started "
138 "(%u - %u kHz, currently %u kHz)\n",
139 cpu,
140 per_cpu(cpu_min_freq, cpu),
141 per_cpu(cpu_max_freq, cpu),
142 per_cpu(cpu_cur_freq, cpu));
136 143
137 mutex_unlock(&userspace_mutex); 144 mutex_unlock(&userspace_mutex);
138 break; 145 break;
@@ -145,34 +152,34 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
145 CPUFREQ_TRANSITION_NOTIFIER); 152 CPUFREQ_TRANSITION_NOTIFIER);
146 } 153 }
147 154
148 cpu_is_managed[cpu] = 0; 155 per_cpu(cpu_is_managed, cpu) = 0;
149 cpu_min_freq[cpu] = 0; 156 per_cpu(cpu_min_freq, cpu) = 0;
150 cpu_max_freq[cpu] = 0; 157 per_cpu(cpu_max_freq, cpu) = 0;
151 cpu_set_freq[cpu] = 0; 158 per_cpu(cpu_set_freq, cpu) = 0;
152 dprintk("managing cpu %u stopped\n", cpu); 159 dprintk("managing cpu %u stopped\n", cpu);
153 mutex_unlock(&userspace_mutex); 160 mutex_unlock(&userspace_mutex);
154 break; 161 break;
155 case CPUFREQ_GOV_LIMITS: 162 case CPUFREQ_GOV_LIMITS:
156 mutex_lock(&userspace_mutex); 163 mutex_lock(&userspace_mutex);
157 dprintk("limit event for cpu %u: %u - %u kHz," 164 dprintk("limit event for cpu %u: %u - %u kHz, "
158 "currently %u kHz, last set to %u kHz\n", 165 "currently %u kHz, last set to %u kHz\n",
159 cpu, policy->min, policy->max, 166 cpu, policy->min, policy->max,
160 cpu_cur_freq[cpu], cpu_set_freq[cpu]); 167 per_cpu(cpu_cur_freq, cpu),
161 if (policy->max < cpu_set_freq[cpu]) { 168 per_cpu(cpu_set_freq, cpu));
169 if (policy->max < per_cpu(cpu_set_freq, cpu)) {
162 __cpufreq_driver_target(policy, policy->max, 170 __cpufreq_driver_target(policy, policy->max,
163 CPUFREQ_RELATION_H); 171 CPUFREQ_RELATION_H);
164 } 172 } else if (policy->min > per_cpu(cpu_set_freq, cpu)) {
165 else if (policy->min > cpu_set_freq[cpu]) {
166 __cpufreq_driver_target(policy, policy->min, 173 __cpufreq_driver_target(policy, policy->min,
167 CPUFREQ_RELATION_L); 174 CPUFREQ_RELATION_L);
168 } 175 } else {
169 else { 176 __cpufreq_driver_target(policy,
170 __cpufreq_driver_target(policy, cpu_set_freq[cpu], 177 per_cpu(cpu_set_freq, cpu),
171 CPUFREQ_RELATION_L); 178 CPUFREQ_RELATION_L);
172 } 179 }
173 cpu_min_freq[cpu] = policy->min; 180 per_cpu(cpu_min_freq, cpu) = policy->min;
174 cpu_max_freq[cpu] = policy->max; 181 per_cpu(cpu_max_freq, cpu) = policy->max;
175 cpu_cur_freq[cpu] = policy->cur; 182 per_cpu(cpu_cur_freq, cpu) = policy->cur;
176 mutex_unlock(&userspace_mutex); 183 mutex_unlock(&userspace_mutex);
177 break; 184 break;
178 } 185 }
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index bf5b92f86df7..ec249d2db633 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -28,13 +28,29 @@
28#include <linux/device.h> 28#include <linux/device.h>
29#include <linux/dca.h> 29#include <linux/dca.h>
30 30
31MODULE_LICENSE("GPL"); 31#define DCA_VERSION "1.4"
32 32
33/* For now we're assuming a single, global, DCA provider for the system. */ 33MODULE_VERSION(DCA_VERSION);
34MODULE_LICENSE("GPL");
35MODULE_AUTHOR("Intel Corporation");
34 36
35static DEFINE_SPINLOCK(dca_lock); 37static DEFINE_SPINLOCK(dca_lock);
36 38
37static struct dca_provider *global_dca = NULL; 39static LIST_HEAD(dca_providers);
40
41static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
42{
43 struct dca_provider *dca, *ret = NULL;
44
45 list_for_each_entry(dca, &dca_providers, node) {
46 if ((!dev) || (dca->ops->dev_managed(dca, dev))) {
47 ret = dca;
48 break;
49 }
50 }
51
52 return ret;
53}
38 54
39/** 55/**
40 * dca_add_requester - add a dca client to the list 56 * dca_add_requester - add a dca client to the list
@@ -42,25 +58,39 @@ static struct dca_provider *global_dca = NULL;
42 */ 58 */
43int dca_add_requester(struct device *dev) 59int dca_add_requester(struct device *dev)
44{ 60{
45 int err, slot; 61 struct dca_provider *dca;
62 int err, slot = -ENODEV;
46 63
47 if (!global_dca) 64 if (!dev)
48 return -ENODEV; 65 return -EFAULT;
49 66
50 spin_lock(&dca_lock); 67 spin_lock(&dca_lock);
51 slot = global_dca->ops->add_requester(global_dca, dev); 68
52 spin_unlock(&dca_lock); 69 /* check if the requester has not been added already */
53 if (slot < 0) 70 dca = dca_find_provider_by_dev(dev);
71 if (dca) {
72 spin_unlock(&dca_lock);
73 return -EEXIST;
74 }
75
76 list_for_each_entry(dca, &dca_providers, node) {
77 slot = dca->ops->add_requester(dca, dev);
78 if (slot >= 0)
79 break;
80 }
81 if (slot < 0) {
82 spin_unlock(&dca_lock);
54 return slot; 83 return slot;
84 }
55 85
56 err = dca_sysfs_add_req(global_dca, dev, slot); 86 err = dca_sysfs_add_req(dca, dev, slot);
57 if (err) { 87 if (err) {
58 spin_lock(&dca_lock); 88 dca->ops->remove_requester(dca, dev);
59 global_dca->ops->remove_requester(global_dca, dev);
60 spin_unlock(&dca_lock); 89 spin_unlock(&dca_lock);
61 return err; 90 return err;
62 } 91 }
63 92
93 spin_unlock(&dca_lock);
64 return 0; 94 return 0;
65} 95}
66EXPORT_SYMBOL_GPL(dca_add_requester); 96EXPORT_SYMBOL_GPL(dca_add_requester);
@@ -71,30 +101,78 @@ EXPORT_SYMBOL_GPL(dca_add_requester);
71 */ 101 */
72int dca_remove_requester(struct device *dev) 102int dca_remove_requester(struct device *dev)
73{ 103{
104 struct dca_provider *dca;
74 int slot; 105 int slot;
75 if (!global_dca) 106
76 return -ENODEV; 107 if (!dev)
108 return -EFAULT;
77 109
78 spin_lock(&dca_lock); 110 spin_lock(&dca_lock);
79 slot = global_dca->ops->remove_requester(global_dca, dev); 111 dca = dca_find_provider_by_dev(dev);
80 spin_unlock(&dca_lock); 112 if (!dca) {
81 if (slot < 0) 113 spin_unlock(&dca_lock);
114 return -ENODEV;
115 }
116 slot = dca->ops->remove_requester(dca, dev);
117 if (slot < 0) {
118 spin_unlock(&dca_lock);
82 return slot; 119 return slot;
120 }
83 121
84 dca_sysfs_remove_req(global_dca, slot); 122 dca_sysfs_remove_req(dca, slot);
123
124 spin_unlock(&dca_lock);
85 return 0; 125 return 0;
86} 126}
87EXPORT_SYMBOL_GPL(dca_remove_requester); 127EXPORT_SYMBOL_GPL(dca_remove_requester);
88 128
89/** 129/**
90 * dca_get_tag - return the dca tag for the given cpu 130 * dca_common_get_tag - return the dca tag (serves both new and old api)
131 * @dev - the device that wants dca service
91 * @cpu - the cpuid as returned by get_cpu() 132 * @cpu - the cpuid as returned by get_cpu()
92 */ 133 */
93u8 dca_get_tag(int cpu) 134u8 dca_common_get_tag(struct device *dev, int cpu)
94{ 135{
95 if (!global_dca) 136 struct dca_provider *dca;
137 u8 tag;
138
139 spin_lock(&dca_lock);
140
141 dca = dca_find_provider_by_dev(dev);
142 if (!dca) {
143 spin_unlock(&dca_lock);
96 return -ENODEV; 144 return -ENODEV;
97 return global_dca->ops->get_tag(global_dca, cpu); 145 }
146 tag = dca->ops->get_tag(dca, dev, cpu);
147
148 spin_unlock(&dca_lock);
149 return tag;
150}
151
152/**
153 * dca3_get_tag - return the dca tag to the requester device
154 * for the given cpu (new api)
155 * @dev - the device that wants dca service
156 * @cpu - the cpuid as returned by get_cpu()
157 */
158u8 dca3_get_tag(struct device *dev, int cpu)
159{
160 if (!dev)
161 return -EFAULT;
162
163 return dca_common_get_tag(dev, cpu);
164}
165EXPORT_SYMBOL_GPL(dca3_get_tag);
166
167/**
168 * dca_get_tag - return the dca tag for the given cpu (old api)
169 * @cpu - the cpuid as returned by get_cpu()
170 */
171u8 dca_get_tag(int cpu)
172{
173 struct device *dev = NULL;
174
175 return dca_common_get_tag(dev, cpu);
98} 176}
99EXPORT_SYMBOL_GPL(dca_get_tag); 177EXPORT_SYMBOL_GPL(dca_get_tag);
100 178
@@ -140,12 +218,10 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
140{ 218{
141 int err; 219 int err;
142 220
143 if (global_dca)
144 return -EEXIST;
145 err = dca_sysfs_add_provider(dca, dev); 221 err = dca_sysfs_add_provider(dca, dev);
146 if (err) 222 if (err)
147 return err; 223 return err;
148 global_dca = dca; 224 list_add(&dca->node, &dca_providers);
149 blocking_notifier_call_chain(&dca_provider_chain, 225 blocking_notifier_call_chain(&dca_provider_chain,
150 DCA_PROVIDER_ADD, NULL); 226 DCA_PROVIDER_ADD, NULL);
151 return 0; 227 return 0;
@@ -158,11 +234,9 @@ EXPORT_SYMBOL_GPL(register_dca_provider);
158 */ 234 */
159void unregister_dca_provider(struct dca_provider *dca) 235void unregister_dca_provider(struct dca_provider *dca)
160{ 236{
161 if (!global_dca)
162 return;
163 blocking_notifier_call_chain(&dca_provider_chain, 237 blocking_notifier_call_chain(&dca_provider_chain,
164 DCA_PROVIDER_REMOVE, NULL); 238 DCA_PROVIDER_REMOVE, NULL);
165 global_dca = NULL; 239 list_del(&dca->node);
166 dca_sysfs_remove_provider(dca); 240 dca_sysfs_remove_provider(dca);
167} 241}
168EXPORT_SYMBOL_GPL(unregister_dca_provider); 242EXPORT_SYMBOL_GPL(unregister_dca_provider);
@@ -187,6 +261,7 @@ EXPORT_SYMBOL_GPL(dca_unregister_notify);
187 261
188static int __init dca_init(void) 262static int __init dca_init(void)
189{ 263{
264 printk(KERN_ERR "dca service started, version %s\n", DCA_VERSION);
190 return dca_sysfs_init(); 265 return dca_sysfs_init();
191} 266}
192 267
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
index 9a70377bfb34..7af4b403bd2d 100644
--- a/drivers/dca/dca-sysfs.c
+++ b/drivers/dca/dca-sysfs.c
@@ -13,10 +13,11 @@ static spinlock_t dca_idr_lock;
13int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot) 13int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot)
14{ 14{
15 struct device *cd; 15 struct device *cd;
16 static int req_count;
16 17
17 cd = device_create_drvdata(dca_class, dca->cd, 18 cd = device_create_drvdata(dca_class, dca->cd,
18 MKDEV(0, slot + 1), NULL, 19 MKDEV(0, slot + 1), NULL,
19 "requester%d", slot); 20 "requester%d", req_count++);
20 if (IS_ERR(cd)) 21 if (IS_ERR(cd))
21 return PTR_ERR(cd); 22 return PTR_ERR(cd);
22 return 0; 23 return 0;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6239c3df30ac..cd303901eb5b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -4,13 +4,14 @@
4 4
5menuconfig DMADEVICES 5menuconfig DMADEVICES
6 bool "DMA Engine support" 6 bool "DMA Engine support"
7 depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC 7 depends on !HIGHMEM64G && HAS_DMA
8 depends on !HIGHMEM64G
9 help 8 help
10 DMA engines can do asynchronous data transfers without 9 DMA engines can do asynchronous data transfers without
11 involving the host CPU. Currently, this framework can be 10 involving the host CPU. Currently, this framework can be
12 used to offload memory copies in the network stack and 11 used to offload memory copies in the network stack and
13 RAID operations in the MD driver. 12 RAID operations in the MD driver. This menu only presents
13 DMA Device drivers supported by the configured arch, it may
14 be empty in some cases.
14 15
15if DMADEVICES 16if DMADEVICES
16 17
@@ -37,6 +38,15 @@ config INTEL_IOP_ADMA
37 help 38 help
38 Enable support for the Intel(R) IOP Series RAID engines. 39 Enable support for the Intel(R) IOP Series RAID engines.
39 40
41config DW_DMAC
42 tristate "Synopsys DesignWare AHB DMA support"
43 depends on AVR32
44 select DMA_ENGINE
45 default y if CPU_AT32AP7000
46 help
47 Support the Synopsys DesignWare AHB DMA controller. This
48 can be integrated in chips such as the Atmel AT32ap7000.
49
40config FSL_DMA 50config FSL_DMA
41 bool "Freescale MPC85xx/MPC83xx DMA support" 51 bool "Freescale MPC85xx/MPC83xx DMA support"
42 depends on PPC 52 depends on PPC
@@ -46,6 +56,14 @@ config FSL_DMA
46 MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. 56 MPC8560/40, MPC8555, MPC8548 and MPC8641 processors.
47 The MPC8349, MPC8360 is also supported. 57 The MPC8349, MPC8360 is also supported.
48 58
59config MV_XOR
60 bool "Marvell XOR engine support"
61 depends on PLAT_ORION
62 select ASYNC_CORE
63 select DMA_ENGINE
64 ---help---
65 Enable support for the Marvell XOR engine.
66
49config DMA_ENGINE 67config DMA_ENGINE
50 bool 68 bool
51 69
@@ -55,10 +73,19 @@ comment "DMA Clients"
55config NET_DMA 73config NET_DMA
56 bool "Network: TCP receive copy offload" 74 bool "Network: TCP receive copy offload"
57 depends on DMA_ENGINE && NET 75 depends on DMA_ENGINE && NET
76 default (INTEL_IOATDMA || FSL_DMA)
58 help 77 help
59 This enables the use of DMA engines in the network stack to 78 This enables the use of DMA engines in the network stack to
60 offload receive copy-to-user operations, freeing CPU cycles. 79 offload receive copy-to-user operations, freeing CPU cycles.
61 Since this is the main user of the DMA engine, it should be enabled; 80
62 say Y here. 81 Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
82 say N.
83
84config DMATEST
85 tristate "DMA Test client"
86 depends on DMA_ENGINE
87 help
88 Simple DMA test client. Say N unless you're debugging a
89 DMA Device driver.
63 90
64endif 91endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index c8036d945902..14f59527d4f6 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,6 +1,9 @@
1obj-$(CONFIG_DMA_ENGINE) += dmaengine.o 1obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
2obj-$(CONFIG_NET_DMA) += iovlock.o 2obj-$(CONFIG_NET_DMA) += iovlock.o
3obj-$(CONFIG_DMATEST) += dmatest.o
3obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o 4obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
4ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o 5ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
5obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o 6obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
6obj-$(CONFIG_FSL_DMA) += fsldma.o 7obj-$(CONFIG_FSL_DMA) += fsldma.o
8obj-$(CONFIG_MV_XOR) += mv_xor.o
9obj-$(CONFIG_DW_DMAC) += dw_dmac.o
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 97b329e76798..dc003a3a787d 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -169,12 +169,18 @@ static void dma_client_chan_alloc(struct dma_client *client)
169 enum dma_state_client ack; 169 enum dma_state_client ack;
170 170
171 /* Find a channel */ 171 /* Find a channel */
172 list_for_each_entry(device, &dma_device_list, global_node) 172 list_for_each_entry(device, &dma_device_list, global_node) {
173 /* Does the client require a specific DMA controller? */
174 if (client->slave && client->slave->dma_dev
175 && client->slave->dma_dev != device->dev)
176 continue;
177
173 list_for_each_entry(chan, &device->channels, device_node) { 178 list_for_each_entry(chan, &device->channels, device_node) {
174 if (!dma_chan_satisfies_mask(chan, client->cap_mask)) 179 if (!dma_chan_satisfies_mask(chan, client->cap_mask))
175 continue; 180 continue;
176 181
177 desc = chan->device->device_alloc_chan_resources(chan); 182 desc = chan->device->device_alloc_chan_resources(
183 chan, client);
178 if (desc >= 0) { 184 if (desc >= 0) {
179 ack = client->event_callback(client, 185 ack = client->event_callback(client,
180 chan, 186 chan,
@@ -183,12 +189,14 @@ static void dma_client_chan_alloc(struct dma_client *client)
183 /* we are done once this client rejects 189 /* we are done once this client rejects
184 * an available resource 190 * an available resource
185 */ 191 */
186 if (ack == DMA_ACK) 192 if (ack == DMA_ACK) {
187 dma_chan_get(chan); 193 dma_chan_get(chan);
188 else if (ack == DMA_NAK) 194 chan->client_count++;
195 } else if (ack == DMA_NAK)
189 return; 196 return;
190 } 197 }
191 } 198 }
199 }
192} 200}
193 201
194enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 202enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -272,8 +280,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
272 /* client was holding resources for this channel so 280 /* client was holding resources for this channel so
273 * free it 281 * free it
274 */ 282 */
275 if (ack == DMA_ACK) 283 if (ack == DMA_ACK) {
276 dma_chan_put(chan); 284 dma_chan_put(chan);
285 chan->client_count--;
286 }
277 } 287 }
278 288
279 mutex_unlock(&dma_list_mutex); 289 mutex_unlock(&dma_list_mutex);
@@ -285,6 +295,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
285 */ 295 */
286void dma_async_client_register(struct dma_client *client) 296void dma_async_client_register(struct dma_client *client)
287{ 297{
298 /* validate client data */
299 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
300 !client->slave);
301
288 mutex_lock(&dma_list_mutex); 302 mutex_lock(&dma_list_mutex);
289 list_add_tail(&client->global_node, &dma_client_list); 303 list_add_tail(&client->global_node, &dma_client_list);
290 mutex_unlock(&dma_list_mutex); 304 mutex_unlock(&dma_list_mutex);
@@ -313,8 +327,10 @@ void dma_async_client_unregister(struct dma_client *client)
313 ack = client->event_callback(client, chan, 327 ack = client->event_callback(client, chan,
314 DMA_RESOURCE_REMOVED); 328 DMA_RESOURCE_REMOVED);
315 329
316 if (ack == DMA_ACK) 330 if (ack == DMA_ACK) {
317 dma_chan_put(chan); 331 dma_chan_put(chan);
332 chan->client_count--;
333 }
318 } 334 }
319 335
320 list_del(&client->global_node); 336 list_del(&client->global_node);
@@ -359,6 +375,10 @@ int dma_async_device_register(struct dma_device *device)
359 !device->device_prep_dma_memset); 375 !device->device_prep_dma_memset);
360 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 376 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
361 !device->device_prep_dma_interrupt); 377 !device->device_prep_dma_interrupt);
378 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
379 !device->device_prep_slave_sg);
380 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
381 !device->device_terminate_all);
362 382
363 BUG_ON(!device->device_alloc_chan_resources); 383 BUG_ON(!device->device_alloc_chan_resources);
364 BUG_ON(!device->device_free_chan_resources); 384 BUG_ON(!device->device_free_chan_resources);
@@ -378,7 +398,7 @@ int dma_async_device_register(struct dma_device *device)
378 398
379 chan->chan_id = chancnt++; 399 chan->chan_id = chancnt++;
380 chan->dev.class = &dma_devclass; 400 chan->dev.class = &dma_devclass;
381 chan->dev.parent = NULL; 401 chan->dev.parent = device->dev;
382 snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d", 402 snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d",
383 device->dev_id, chan->chan_id); 403 device->dev_id, chan->chan_id);
384 404
@@ -394,6 +414,7 @@ int dma_async_device_register(struct dma_device *device)
394 kref_get(&device->refcount); 414 kref_get(&device->refcount);
395 kref_get(&device->refcount); 415 kref_get(&device->refcount);
396 kref_init(&chan->refcount); 416 kref_init(&chan->refcount);
417 chan->client_count = 0;
397 chan->slow_ref = 0; 418 chan->slow_ref = 0;
398 INIT_RCU_HEAD(&chan->rcu); 419 INIT_RCU_HEAD(&chan->rcu);
399 } 420 }
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
new file mode 100644
index 000000000000..a08d19704743
--- /dev/null
+++ b/drivers/dma/dmatest.c
@@ -0,0 +1,444 @@
1/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/delay.h>
11#include <linux/dmaengine.h>
12#include <linux/init.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/random.h>
17#include <linux/wait.h>
18
19static unsigned int test_buf_size = 16384;
20module_param(test_buf_size, uint, S_IRUGO);
21MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
22
23static char test_channel[BUS_ID_SIZE];
24module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
25MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
26
27static char test_device[BUS_ID_SIZE];
28module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
29MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
30
31static unsigned int threads_per_chan = 1;
32module_param(threads_per_chan, uint, S_IRUGO);
33MODULE_PARM_DESC(threads_per_chan,
34 "Number of threads to start per channel (default: 1)");
35
36static unsigned int max_channels;
37module_param(max_channels, uint, S_IRUGO);
38MODULE_PARM_DESC(nr_channels,
39 "Maximum number of channels to use (default: all)");
40
41/*
42 * Initialization patterns. All bytes in the source buffer has bit 7
43 * set, all bytes in the destination buffer has bit 7 cleared.
44 *
45 * Bit 6 is set for all bytes which are to be copied by the DMA
46 * engine. Bit 5 is set for all bytes which are to be overwritten by
47 * the DMA engine.
48 *
49 * The remaining bits are the inverse of a counter which increments by
50 * one for each byte address.
51 */
52#define PATTERN_SRC 0x80
53#define PATTERN_DST 0x00
54#define PATTERN_COPY 0x40
55#define PATTERN_OVERWRITE 0x20
56#define PATTERN_COUNT_MASK 0x1f
57
58struct dmatest_thread {
59 struct list_head node;
60 struct task_struct *task;
61 struct dma_chan *chan;
62 u8 *srcbuf;
63 u8 *dstbuf;
64};
65
66struct dmatest_chan {
67 struct list_head node;
68 struct dma_chan *chan;
69 struct list_head threads;
70};
71
72/*
73 * These are protected by dma_list_mutex since they're only used by
74 * the DMA client event callback
75 */
76static LIST_HEAD(dmatest_channels);
77static unsigned int nr_channels;
78
79static bool dmatest_match_channel(struct dma_chan *chan)
80{
81 if (test_channel[0] == '\0')
82 return true;
83 return strcmp(chan->dev.bus_id, test_channel) == 0;
84}
85
86static bool dmatest_match_device(struct dma_device *device)
87{
88 if (test_device[0] == '\0')
89 return true;
90 return strcmp(device->dev->bus_id, test_device) == 0;
91}
92
93static unsigned long dmatest_random(void)
94{
95 unsigned long buf;
96
97 get_random_bytes(&buf, sizeof(buf));
98 return buf;
99}
100
101static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len)
102{
103 unsigned int i;
104
105 for (i = 0; i < start; i++)
106 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
107 for ( ; i < start + len; i++)
108 buf[i] = PATTERN_SRC | PATTERN_COPY
109 | (~i & PATTERN_COUNT_MASK);;
110 for ( ; i < test_buf_size; i++)
111 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
112}
113
114static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len)
115{
116 unsigned int i;
117
118 for (i = 0; i < start; i++)
119 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
120 for ( ; i < start + len; i++)
121 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
122 | (~i & PATTERN_COUNT_MASK);
123 for ( ; i < test_buf_size; i++)
124 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
125}
126
127static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
128 unsigned int counter, bool is_srcbuf)
129{
130 u8 diff = actual ^ pattern;
131 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
132 const char *thread_name = current->comm;
133
134 if (is_srcbuf)
135 pr_warning("%s: srcbuf[0x%x] overwritten!"
136 " Expected %02x, got %02x\n",
137 thread_name, index, expected, actual);
138 else if ((pattern & PATTERN_COPY)
139 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
140 pr_warning("%s: dstbuf[0x%x] not copied!"
141 " Expected %02x, got %02x\n",
142 thread_name, index, expected, actual);
143 else if (diff & PATTERN_SRC)
144 pr_warning("%s: dstbuf[0x%x] was copied!"
145 " Expected %02x, got %02x\n",
146 thread_name, index, expected, actual);
147 else
148 pr_warning("%s: dstbuf[0x%x] mismatch!"
149 " Expected %02x, got %02x\n",
150 thread_name, index, expected, actual);
151}
152
153static unsigned int dmatest_verify(u8 *buf, unsigned int start,
154 unsigned int end, unsigned int counter, u8 pattern,
155 bool is_srcbuf)
156{
157 unsigned int i;
158 unsigned int error_count = 0;
159 u8 actual;
160
161 for (i = start; i < end; i++) {
162 actual = buf[i];
163 if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) {
164 if (error_count < 32)
165 dmatest_mismatch(actual, pattern, i, counter,
166 is_srcbuf);
167 error_count++;
168 }
169 counter++;
170 }
171
172 if (error_count > 32)
173 pr_warning("%s: %u errors suppressed\n",
174 current->comm, error_count - 32);
175
176 return error_count;
177}
178
179/*
180 * This function repeatedly tests DMA transfers of various lengths and
181 * offsets until it is told to exit by kthread_stop(). There may be
182 * multiple threads running this function in parallel for a single
183 * channel, and there may be multiple channels being tested in
184 * parallel.
185 *
186 * Before each test, the source and destination buffer is initialized
187 * with a known pattern. This pattern is different depending on
188 * whether it's in an area which is supposed to be copied or
189 * overwritten, and different in the source and destination buffers.
190 * So if the DMA engine doesn't copy exactly what we tell it to copy,
191 * we'll notice.
192 */
193static int dmatest_func(void *data)
194{
195 struct dmatest_thread *thread = data;
196 struct dma_chan *chan;
197 const char *thread_name;
198 unsigned int src_off, dst_off, len;
199 unsigned int error_count;
200 unsigned int failed_tests = 0;
201 unsigned int total_tests = 0;
202 dma_cookie_t cookie;
203 enum dma_status status;
204 int ret;
205
206 thread_name = current->comm;
207
208 ret = -ENOMEM;
209 thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL);
210 if (!thread->srcbuf)
211 goto err_srcbuf;
212 thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL);
213 if (!thread->dstbuf)
214 goto err_dstbuf;
215
216 smp_rmb();
217 chan = thread->chan;
218 dma_chan_get(chan);
219
220 while (!kthread_should_stop()) {
221 total_tests++;
222
223 len = dmatest_random() % test_buf_size + 1;
224 src_off = dmatest_random() % (test_buf_size - len + 1);
225 dst_off = dmatest_random() % (test_buf_size - len + 1);
226
227 dmatest_init_srcbuf(thread->srcbuf, src_off, len);
228 dmatest_init_dstbuf(thread->dstbuf, dst_off, len);
229
230 cookie = dma_async_memcpy_buf_to_buf(chan,
231 thread->dstbuf + dst_off,
232 thread->srcbuf + src_off,
233 len);
234 if (dma_submit_error(cookie)) {
235 pr_warning("%s: #%u: submit error %d with src_off=0x%x "
236 "dst_off=0x%x len=0x%x\n",
237 thread_name, total_tests - 1, cookie,
238 src_off, dst_off, len);
239 msleep(100);
240 failed_tests++;
241 continue;
242 }
243 dma_async_memcpy_issue_pending(chan);
244
245 do {
246 msleep(1);
247 status = dma_async_memcpy_complete(
248 chan, cookie, NULL, NULL);
249 } while (status == DMA_IN_PROGRESS);
250
251 if (status == DMA_ERROR) {
252 pr_warning("%s: #%u: error during copy\n",
253 thread_name, total_tests - 1);
254 failed_tests++;
255 continue;
256 }
257
258 error_count = 0;
259
260 pr_debug("%s: verifying source buffer...\n", thread_name);
261 error_count += dmatest_verify(thread->srcbuf, 0, src_off,
262 0, PATTERN_SRC, true);
263 error_count += dmatest_verify(thread->srcbuf, src_off,
264 src_off + len, src_off,
265 PATTERN_SRC | PATTERN_COPY, true);
266 error_count += dmatest_verify(thread->srcbuf, src_off + len,
267 test_buf_size, src_off + len,
268 PATTERN_SRC, true);
269
270 pr_debug("%s: verifying dest buffer...\n",
271 thread->task->comm);
272 error_count += dmatest_verify(thread->dstbuf, 0, dst_off,
273 0, PATTERN_DST, false);
274 error_count += dmatest_verify(thread->dstbuf, dst_off,
275 dst_off + len, src_off,
276 PATTERN_SRC | PATTERN_COPY, false);
277 error_count += dmatest_verify(thread->dstbuf, dst_off + len,
278 test_buf_size, dst_off + len,
279 PATTERN_DST, false);
280
281 if (error_count) {
282 pr_warning("%s: #%u: %u errors with "
283 "src_off=0x%x dst_off=0x%x len=0x%x\n",
284 thread_name, total_tests - 1, error_count,
285 src_off, dst_off, len);
286 failed_tests++;
287 } else {
288 pr_debug("%s: #%u: No errors with "
289 "src_off=0x%x dst_off=0x%x len=0x%x\n",
290 thread_name, total_tests - 1,
291 src_off, dst_off, len);
292 }
293 }
294
295 ret = 0;
296 dma_chan_put(chan);
297 kfree(thread->dstbuf);
298err_dstbuf:
299 kfree(thread->srcbuf);
300err_srcbuf:
301 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
302 thread_name, total_tests, failed_tests, ret);
303 return ret;
304}
305
306static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
307{
308 struct dmatest_thread *thread;
309 struct dmatest_thread *_thread;
310 int ret;
311
312 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
313 ret = kthread_stop(thread->task);
314 pr_debug("dmatest: thread %s exited with status %d\n",
315 thread->task->comm, ret);
316 list_del(&thread->node);
317 kfree(thread);
318 }
319 kfree(dtc);
320}
321
322static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
323{
324 struct dmatest_chan *dtc;
325 struct dmatest_thread *thread;
326 unsigned int i;
327
328 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_ATOMIC);
329 if (!dtc) {
330 pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id);
331 return DMA_NAK;
332 }
333
334 dtc->chan = chan;
335 INIT_LIST_HEAD(&dtc->threads);
336
337 for (i = 0; i < threads_per_chan; i++) {
338 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
339 if (!thread) {
340 pr_warning("dmatest: No memory for %s-test%u\n",
341 chan->dev.bus_id, i);
342 break;
343 }
344 thread->chan = dtc->chan;
345 smp_wmb();
346 thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
347 chan->dev.bus_id, i);
348 if (IS_ERR(thread->task)) {
349 pr_warning("dmatest: Failed to run thread %s-test%u\n",
350 chan->dev.bus_id, i);
351 kfree(thread);
352 break;
353 }
354
355 /* srcbuf and dstbuf are allocated by the thread itself */
356
357 list_add_tail(&thread->node, &dtc->threads);
358 }
359
360 pr_info("dmatest: Started %u threads using %s\n", i, chan->dev.bus_id);
361
362 list_add_tail(&dtc->node, &dmatest_channels);
363 nr_channels++;
364
365 return DMA_ACK;
366}
367
368static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan)
369{
370 struct dmatest_chan *dtc, *_dtc;
371
372 list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
373 if (dtc->chan == chan) {
374 list_del(&dtc->node);
375 dmatest_cleanup_channel(dtc);
376 pr_debug("dmatest: lost channel %s\n",
377 chan->dev.bus_id);
378 return DMA_ACK;
379 }
380 }
381
382 return DMA_DUP;
383}
384
385/*
386 * Start testing threads as new channels are assigned to us, and kill
387 * them when the channels go away.
388 *
389 * When we unregister the client, all channels are removed so this
390 * will also take care of cleaning things up when the module is
391 * unloaded.
392 */
393static enum dma_state_client
394dmatest_event(struct dma_client *client, struct dma_chan *chan,
395 enum dma_state state)
396{
397 enum dma_state_client ack = DMA_NAK;
398
399 switch (state) {
400 case DMA_RESOURCE_AVAILABLE:
401 if (!dmatest_match_channel(chan)
402 || !dmatest_match_device(chan->device))
403 ack = DMA_DUP;
404 else if (max_channels && nr_channels >= max_channels)
405 ack = DMA_NAK;
406 else
407 ack = dmatest_add_channel(chan);
408 break;
409
410 case DMA_RESOURCE_REMOVED:
411 ack = dmatest_remove_channel(chan);
412 break;
413
414 default:
415 pr_info("dmatest: Unhandled event %u (%s)\n",
416 state, chan->dev.bus_id);
417 break;
418 }
419
420 return ack;
421}
422
423static struct dma_client dmatest_client = {
424 .event_callback = dmatest_event,
425};
426
427static int __init dmatest_init(void)
428{
429 dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask);
430 dma_async_client_register(&dmatest_client);
431 dma_async_client_chan_request(&dmatest_client);
432
433 return 0;
434}
435module_init(dmatest_init);
436
437static void __exit dmatest_exit(void)
438{
439 dma_async_client_unregister(&dmatest_client);
440}
441module_exit(dmatest_exit);
442
443MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
444MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
new file mode 100644
index 000000000000..94df91771243
--- /dev/null
+++ b/drivers/dma/dw_dmac.c
@@ -0,0 +1,1122 @@
1/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3 * AVR32 systems.)
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/dmaengine.h>
14#include <linux/dma-mapping.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22
23#include "dw_dmac_regs.h"
24
25/*
26 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
27 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
28 * of which use ARM any more). See the "Databook" from Synopsys for
29 * information beyond what licensees probably provide.
30 *
31 * The driver has currently been tested only with the Atmel AT32AP7000,
32 * which does not support descriptor writeback.
33 */
34
35/* NOTE: DMS+SMS is system-specific. We should get this information
36 * from the platform code somehow.
37 */
38#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \
39 | DWC_CTLL_SRC_MSIZE(0) \
40 | DWC_CTLL_DMS(0) \
41 | DWC_CTLL_SMS(1) \
42 | DWC_CTLL_LLP_D_EN \
43 | DWC_CTLL_LLP_S_EN)
44
45/*
46 * This is configuration-dependent and usually a funny size like 4095.
47 * Let's round it down to the nearest power of two.
48 *
49 * Note that this is a transfer count, i.e. if we transfer 32-bit
50 * words, we can do 8192 bytes per descriptor.
51 *
52 * This parameter is also system-specific.
53 */
54#define DWC_MAX_COUNT 2048U
55
56/*
57 * Number of descriptors to allocate for each channel. This should be
58 * made configurable somehow; preferably, the clients (at least the
59 * ones using slave transfers) should be able to give us a hint.
60 */
61#define NR_DESCS_PER_CHANNEL 64
62
63/*----------------------------------------------------------------------*/
64
65/*
66 * Because we're not relying on writeback from the controller (it may not
67 * even be configured into the core!) we don't need to use dma_pool. These
68 * descriptors -- and associated data -- are cacheable. We do need to make
69 * sure their dcache entries are written back before handing them off to
70 * the controller, though.
71 */
72
73static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
74{
75 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
76}
77
78static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc)
79{
80 return list_entry(dwc->queue.next, struct dw_desc, desc_node);
81}
82
83static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
84{
85 struct dw_desc *desc, *_desc;
86 struct dw_desc *ret = NULL;
87 unsigned int i = 0;
88
89 spin_lock_bh(&dwc->lock);
90 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
91 if (async_tx_test_ack(&desc->txd)) {
92 list_del(&desc->desc_node);
93 ret = desc;
94 break;
95 }
96 dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc);
97 i++;
98 }
99 spin_unlock_bh(&dwc->lock);
100
101 dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i);
102
103 return ret;
104}
105
106static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
107{
108 struct dw_desc *child;
109
110 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
111 dma_sync_single_for_cpu(dwc->chan.dev.parent,
112 child->txd.phys, sizeof(child->lli),
113 DMA_TO_DEVICE);
114 dma_sync_single_for_cpu(dwc->chan.dev.parent,
115 desc->txd.phys, sizeof(desc->lli),
116 DMA_TO_DEVICE);
117}
118
119/*
120 * Move a descriptor, including any children, to the free list.
121 * `desc' must not be on any lists.
122 */
123static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
124{
125 if (desc) {
126 struct dw_desc *child;
127
128 dwc_sync_desc_for_cpu(dwc, desc);
129
130 spin_lock_bh(&dwc->lock);
131 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
132 dev_vdbg(&dwc->chan.dev,
133 "moving child desc %p to freelist\n",
134 child);
135 list_splice_init(&desc->txd.tx_list, &dwc->free_list);
136 dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc);
137 list_add(&desc->desc_node, &dwc->free_list);
138 spin_unlock_bh(&dwc->lock);
139 }
140}
141
142/* Called with dwc->lock held and bh disabled */
143static dma_cookie_t
144dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
145{
146 dma_cookie_t cookie = dwc->chan.cookie;
147
148 if (++cookie < 0)
149 cookie = 1;
150
151 dwc->chan.cookie = cookie;
152 desc->txd.cookie = cookie;
153
154 return cookie;
155}
156
157/*----------------------------------------------------------------------*/
158
159/* Called with dwc->lock held and bh disabled */
160static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
161{
162 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
163
164 /* ASSERT: channel is idle */
165 if (dma_readl(dw, CH_EN) & dwc->mask) {
166 dev_err(&dwc->chan.dev,
167 "BUG: Attempted to start non-idle channel\n");
168 dev_err(&dwc->chan.dev,
169 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
170 channel_readl(dwc, SAR),
171 channel_readl(dwc, DAR),
172 channel_readl(dwc, LLP),
173 channel_readl(dwc, CTL_HI),
174 channel_readl(dwc, CTL_LO));
175
176 /* The tasklet will hopefully advance the queue... */
177 return;
178 }
179
180 channel_writel(dwc, LLP, first->txd.phys);
181 channel_writel(dwc, CTL_LO,
182 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
183 channel_writel(dwc, CTL_HI, 0);
184 channel_set_bit(dw, CH_EN, dwc->mask);
185}
186
187/*----------------------------------------------------------------------*/
188
189static void
190dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
191{
192 dma_async_tx_callback callback;
193 void *param;
194 struct dma_async_tx_descriptor *txd = &desc->txd;
195
196 dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie);
197
198 dwc->completed = txd->cookie;
199 callback = txd->callback;
200 param = txd->callback_param;
201
202 dwc_sync_desc_for_cpu(dwc, desc);
203 list_splice_init(&txd->tx_list, &dwc->free_list);
204 list_move(&desc->desc_node, &dwc->free_list);
205
206 /*
207 * We use dma_unmap_page() regardless of how the buffers were
208 * mapped before they were submitted...
209 */
210 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
211 dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len,
212 DMA_FROM_DEVICE);
213 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
214 dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len,
215 DMA_TO_DEVICE);
216
217 /*
218 * The API requires that no submissions are done from a
219 * callback, so we don't need to drop the lock here
220 */
221 if (callback)
222 callback(param);
223}
224
225static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
226{
227 struct dw_desc *desc, *_desc;
228 LIST_HEAD(list);
229
230 if (dma_readl(dw, CH_EN) & dwc->mask) {
231 dev_err(&dwc->chan.dev,
232 "BUG: XFER bit set, but channel not idle!\n");
233
234 /* Try to continue after resetting the channel... */
235 channel_clear_bit(dw, CH_EN, dwc->mask);
236 while (dma_readl(dw, CH_EN) & dwc->mask)
237 cpu_relax();
238 }
239
240 /*
241 * Submit queued descriptors ASAP, i.e. before we go through
242 * the completed ones.
243 */
244 if (!list_empty(&dwc->queue))
245 dwc_dostart(dwc, dwc_first_queued(dwc));
246 list_splice_init(&dwc->active_list, &list);
247 list_splice_init(&dwc->queue, &dwc->active_list);
248
249 list_for_each_entry_safe(desc, _desc, &list, desc_node)
250 dwc_descriptor_complete(dwc, desc);
251}
252
253static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
254{
255 dma_addr_t llp;
256 struct dw_desc *desc, *_desc;
257 struct dw_desc *child;
258 u32 status_xfer;
259
260 /*
261 * Clear block interrupt flag before scanning so that we don't
262 * miss any, and read LLP before RAW_XFER to ensure it is
263 * valid if we decide to scan the list.
264 */
265 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
266 llp = channel_readl(dwc, LLP);
267 status_xfer = dma_readl(dw, RAW.XFER);
268
269 if (status_xfer & dwc->mask) {
270 /* Everything we've submitted is done */
271 dma_writel(dw, CLEAR.XFER, dwc->mask);
272 dwc_complete_all(dw, dwc);
273 return;
274 }
275
276 dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp);
277
278 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
279 if (desc->lli.llp == llp)
280 /* This one is currently in progress */
281 return;
282
283 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
284 if (child->lli.llp == llp)
285 /* Currently in progress */
286 return;
287
288 /*
289 * No descriptors so far seem to be in progress, i.e.
290 * this one must be done.
291 */
292 dwc_descriptor_complete(dwc, desc);
293 }
294
295 dev_err(&dwc->chan.dev,
296 "BUG: All descriptors done, but channel not idle!\n");
297
298 /* Try to continue after resetting the channel... */
299 channel_clear_bit(dw, CH_EN, dwc->mask);
300 while (dma_readl(dw, CH_EN) & dwc->mask)
301 cpu_relax();
302
303 if (!list_empty(&dwc->queue)) {
304 dwc_dostart(dwc, dwc_first_queued(dwc));
305 list_splice_init(&dwc->queue, &dwc->active_list);
306 }
307}
308
309static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
310{
311 dev_printk(KERN_CRIT, &dwc->chan.dev,
312 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
313 lli->sar, lli->dar, lli->llp,
314 lli->ctlhi, lli->ctllo);
315}
316
317static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
318{
319 struct dw_desc *bad_desc;
320 struct dw_desc *child;
321
322 dwc_scan_descriptors(dw, dwc);
323
324 /*
325 * The descriptor currently at the head of the active list is
326 * borked. Since we don't have any way to report errors, we'll
327 * just have to scream loudly and try to carry on.
328 */
329 bad_desc = dwc_first_active(dwc);
330 list_del_init(&bad_desc->desc_node);
331 list_splice_init(&dwc->queue, dwc->active_list.prev);
332
333 /* Clear the error flag and try to restart the controller */
334 dma_writel(dw, CLEAR.ERROR, dwc->mask);
335 if (!list_empty(&dwc->active_list))
336 dwc_dostart(dwc, dwc_first_active(dwc));
337
338 /*
339 * KERN_CRITICAL may seem harsh, but since this only happens
340 * when someone submits a bad physical address in a
341 * descriptor, we should consider ourselves lucky that the
342 * controller flagged an error instead of scribbling over
343 * random memory locations.
344 */
345 dev_printk(KERN_CRIT, &dwc->chan.dev,
346 "Bad descriptor submitted for DMA!\n");
347 dev_printk(KERN_CRIT, &dwc->chan.dev,
348 " cookie: %d\n", bad_desc->txd.cookie);
349 dwc_dump_lli(dwc, &bad_desc->lli);
350 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
351 dwc_dump_lli(dwc, &child->lli);
352
353 /* Pretend the descriptor completed successfully */
354 dwc_descriptor_complete(dwc, bad_desc);
355}
356
357static void dw_dma_tasklet(unsigned long data)
358{
359 struct dw_dma *dw = (struct dw_dma *)data;
360 struct dw_dma_chan *dwc;
361 u32 status_block;
362 u32 status_xfer;
363 u32 status_err;
364 int i;
365
366 status_block = dma_readl(dw, RAW.BLOCK);
367 status_xfer = dma_readl(dw, RAW.BLOCK);
368 status_err = dma_readl(dw, RAW.ERROR);
369
370 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
371 status_block, status_err);
372
373 for (i = 0; i < dw->dma.chancnt; i++) {
374 dwc = &dw->chan[i];
375 spin_lock(&dwc->lock);
376 if (status_err & (1 << i))
377 dwc_handle_error(dw, dwc);
378 else if ((status_block | status_xfer) & (1 << i))
379 dwc_scan_descriptors(dw, dwc);
380 spin_unlock(&dwc->lock);
381 }
382
383 /*
384 * Re-enable interrupts. Block Complete interrupts are only
385 * enabled if the INT_EN bit in the descriptor is set. This
386 * will trigger a scan before the whole list is done.
387 */
388 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
389 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
390 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
391}
392
393static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
394{
395 struct dw_dma *dw = dev_id;
396 u32 status;
397
398 dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
399 dma_readl(dw, STATUS_INT));
400
401 /*
402 * Just disable the interrupts. We'll turn them back on in the
403 * softirq handler.
404 */
405 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
406 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
407 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
408
409 status = dma_readl(dw, STATUS_INT);
410 if (status) {
411 dev_err(dw->dma.dev,
412 "BUG: Unexpected interrupts pending: 0x%x\n",
413 status);
414
415 /* Try to recover */
416 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
417 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
418 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
419 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
420 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
421 }
422
423 tasklet_schedule(&dw->tasklet);
424
425 return IRQ_HANDLED;
426}
427
428/*----------------------------------------------------------------------*/
429
430static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
431{
432 struct dw_desc *desc = txd_to_dw_desc(tx);
433 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
434 dma_cookie_t cookie;
435
436 spin_lock_bh(&dwc->lock);
437 cookie = dwc_assign_cookie(dwc, desc);
438
439 /*
440 * REVISIT: We should attempt to chain as many descriptors as
441 * possible, perhaps even appending to those already submitted
442 * for DMA. But this is hard to do in a race-free manner.
443 */
444 if (list_empty(&dwc->active_list)) {
445 dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n",
446 desc->txd.cookie);
447 dwc_dostart(dwc, desc);
448 list_add_tail(&desc->desc_node, &dwc->active_list);
449 } else {
450 dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n",
451 desc->txd.cookie);
452
453 list_add_tail(&desc->desc_node, &dwc->queue);
454 }
455
456 spin_unlock_bh(&dwc->lock);
457
458 return cookie;
459}
460
461static struct dma_async_tx_descriptor *
462dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
463 size_t len, unsigned long flags)
464{
465 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
466 struct dw_desc *desc;
467 struct dw_desc *first;
468 struct dw_desc *prev;
469 size_t xfer_count;
470 size_t offset;
471 unsigned int src_width;
472 unsigned int dst_width;
473 u32 ctllo;
474
475 dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
476 dest, src, len, flags);
477
478 if (unlikely(!len)) {
479 dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n");
480 return NULL;
481 }
482
483 /*
484 * We can be a lot more clever here, but this should take care
485 * of the most common optimization.
486 */
487 if (!((src | dest | len) & 3))
488 src_width = dst_width = 2;
489 else if (!((src | dest | len) & 1))
490 src_width = dst_width = 1;
491 else
492 src_width = dst_width = 0;
493
494 ctllo = DWC_DEFAULT_CTLLO
495 | DWC_CTLL_DST_WIDTH(dst_width)
496 | DWC_CTLL_SRC_WIDTH(src_width)
497 | DWC_CTLL_DST_INC
498 | DWC_CTLL_SRC_INC
499 | DWC_CTLL_FC_M2M;
500 prev = first = NULL;
501
502 for (offset = 0; offset < len; offset += xfer_count << src_width) {
503 xfer_count = min_t(size_t, (len - offset) >> src_width,
504 DWC_MAX_COUNT);
505
506 desc = dwc_desc_get(dwc);
507 if (!desc)
508 goto err_desc_get;
509
510 desc->lli.sar = src + offset;
511 desc->lli.dar = dest + offset;
512 desc->lli.ctllo = ctllo;
513 desc->lli.ctlhi = xfer_count;
514
515 if (!first) {
516 first = desc;
517 } else {
518 prev->lli.llp = desc->txd.phys;
519 dma_sync_single_for_device(chan->dev.parent,
520 prev->txd.phys, sizeof(prev->lli),
521 DMA_TO_DEVICE);
522 list_add_tail(&desc->desc_node,
523 &first->txd.tx_list);
524 }
525 prev = desc;
526 }
527
528
529 if (flags & DMA_PREP_INTERRUPT)
530 /* Trigger interrupt after last block */
531 prev->lli.ctllo |= DWC_CTLL_INT_EN;
532
533 prev->lli.llp = 0;
534 dma_sync_single_for_device(chan->dev.parent,
535 prev->txd.phys, sizeof(prev->lli),
536 DMA_TO_DEVICE);
537
538 first->txd.flags = flags;
539 first->len = len;
540
541 return &first->txd;
542
543err_desc_get:
544 dwc_desc_put(dwc, first);
545 return NULL;
546}
547
548static struct dma_async_tx_descriptor *
549dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
550 unsigned int sg_len, enum dma_data_direction direction,
551 unsigned long flags)
552{
553 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
554 struct dw_dma_slave *dws = dwc->dws;
555 struct dw_desc *prev;
556 struct dw_desc *first;
557 u32 ctllo;
558 dma_addr_t reg;
559 unsigned int reg_width;
560 unsigned int mem_width;
561 unsigned int i;
562 struct scatterlist *sg;
563 size_t total_len = 0;
564
565 dev_vdbg(&chan->dev, "prep_dma_slave\n");
566
567 if (unlikely(!dws || !sg_len))
568 return NULL;
569
570 reg_width = dws->slave.reg_width;
571 prev = first = NULL;
572
573 sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction);
574
575 switch (direction) {
576 case DMA_TO_DEVICE:
577 ctllo = (DWC_DEFAULT_CTLLO
578 | DWC_CTLL_DST_WIDTH(reg_width)
579 | DWC_CTLL_DST_FIX
580 | DWC_CTLL_SRC_INC
581 | DWC_CTLL_FC_M2P);
582 reg = dws->slave.tx_reg;
583 for_each_sg(sgl, sg, sg_len, i) {
584 struct dw_desc *desc;
585 u32 len;
586 u32 mem;
587
588 desc = dwc_desc_get(dwc);
589 if (!desc) {
590 dev_err(&chan->dev,
591 "not enough descriptors available\n");
592 goto err_desc_get;
593 }
594
595 mem = sg_phys(sg);
596 len = sg_dma_len(sg);
597 mem_width = 2;
598 if (unlikely(mem & 3 || len & 3))
599 mem_width = 0;
600
601 desc->lli.sar = mem;
602 desc->lli.dar = reg;
603 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
604 desc->lli.ctlhi = len >> mem_width;
605
606 if (!first) {
607 first = desc;
608 } else {
609 prev->lli.llp = desc->txd.phys;
610 dma_sync_single_for_device(chan->dev.parent,
611 prev->txd.phys,
612 sizeof(prev->lli),
613 DMA_TO_DEVICE);
614 list_add_tail(&desc->desc_node,
615 &first->txd.tx_list);
616 }
617 prev = desc;
618 total_len += len;
619 }
620 break;
621 case DMA_FROM_DEVICE:
622 ctllo = (DWC_DEFAULT_CTLLO
623 | DWC_CTLL_SRC_WIDTH(reg_width)
624 | DWC_CTLL_DST_INC
625 | DWC_CTLL_SRC_FIX
626 | DWC_CTLL_FC_P2M);
627
628 reg = dws->slave.rx_reg;
629 for_each_sg(sgl, sg, sg_len, i) {
630 struct dw_desc *desc;
631 u32 len;
632 u32 mem;
633
634 desc = dwc_desc_get(dwc);
635 if (!desc) {
636 dev_err(&chan->dev,
637 "not enough descriptors available\n");
638 goto err_desc_get;
639 }
640
641 mem = sg_phys(sg);
642 len = sg_dma_len(sg);
643 mem_width = 2;
644 if (unlikely(mem & 3 || len & 3))
645 mem_width = 0;
646
647 desc->lli.sar = reg;
648 desc->lli.dar = mem;
649 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
650 desc->lli.ctlhi = len >> reg_width;
651
652 if (!first) {
653 first = desc;
654 } else {
655 prev->lli.llp = desc->txd.phys;
656 dma_sync_single_for_device(chan->dev.parent,
657 prev->txd.phys,
658 sizeof(prev->lli),
659 DMA_TO_DEVICE);
660 list_add_tail(&desc->desc_node,
661 &first->txd.tx_list);
662 }
663 prev = desc;
664 total_len += len;
665 }
666 break;
667 default:
668 return NULL;
669 }
670
671 if (flags & DMA_PREP_INTERRUPT)
672 /* Trigger interrupt after last block */
673 prev->lli.ctllo |= DWC_CTLL_INT_EN;
674
675 prev->lli.llp = 0;
676 dma_sync_single_for_device(chan->dev.parent,
677 prev->txd.phys, sizeof(prev->lli),
678 DMA_TO_DEVICE);
679
680 first->len = total_len;
681
682 return &first->txd;
683
684err_desc_get:
685 dwc_desc_put(dwc, first);
686 return NULL;
687}
688
689static void dwc_terminate_all(struct dma_chan *chan)
690{
691 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
692 struct dw_dma *dw = to_dw_dma(chan->device);
693 struct dw_desc *desc, *_desc;
694 LIST_HEAD(list);
695
696 /*
697 * This is only called when something went wrong elsewhere, so
698 * we don't really care about the data. Just disable the
699 * channel. We still have to poll the channel enable bit due
700 * to AHB/HSB limitations.
701 */
702 spin_lock_bh(&dwc->lock);
703
704 channel_clear_bit(dw, CH_EN, dwc->mask);
705
706 while (dma_readl(dw, CH_EN) & dwc->mask)
707 cpu_relax();
708
709 /* active_list entries will end up before queued entries */
710 list_splice_init(&dwc->queue, &list);
711 list_splice_init(&dwc->active_list, &list);
712
713 spin_unlock_bh(&dwc->lock);
714
715 /* Flush all pending and queued descriptors */
716 list_for_each_entry_safe(desc, _desc, &list, desc_node)
717 dwc_descriptor_complete(dwc, desc);
718}
719
720static enum dma_status
721dwc_is_tx_complete(struct dma_chan *chan,
722 dma_cookie_t cookie,
723 dma_cookie_t *done, dma_cookie_t *used)
724{
725 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
726 dma_cookie_t last_used;
727 dma_cookie_t last_complete;
728 int ret;
729
730 last_complete = dwc->completed;
731 last_used = chan->cookie;
732
733 ret = dma_async_is_complete(cookie, last_complete, last_used);
734 if (ret != DMA_SUCCESS) {
735 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
736
737 last_complete = dwc->completed;
738 last_used = chan->cookie;
739
740 ret = dma_async_is_complete(cookie, last_complete, last_used);
741 }
742
743 if (done)
744 *done = last_complete;
745 if (used)
746 *used = last_used;
747
748 return ret;
749}
750
751static void dwc_issue_pending(struct dma_chan *chan)
752{
753 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
754
755 spin_lock_bh(&dwc->lock);
756 if (!list_empty(&dwc->queue))
757 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
758 spin_unlock_bh(&dwc->lock);
759}
760
761static int dwc_alloc_chan_resources(struct dma_chan *chan,
762 struct dma_client *client)
763{
764 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
765 struct dw_dma *dw = to_dw_dma(chan->device);
766 struct dw_desc *desc;
767 struct dma_slave *slave;
768 struct dw_dma_slave *dws;
769 int i;
770 u32 cfghi;
771 u32 cfglo;
772
773 dev_vdbg(&chan->dev, "alloc_chan_resources\n");
774
775 /* Channels doing slave DMA can only handle one client. */
776 if (dwc->dws || client->slave) {
777 if (chan->client_count)
778 return -EBUSY;
779 }
780
781 /* ASSERT: channel is idle */
782 if (dma_readl(dw, CH_EN) & dwc->mask) {
783 dev_dbg(&chan->dev, "DMA channel not idle?\n");
784 return -EIO;
785 }
786
787 dwc->completed = chan->cookie = 1;
788
789 cfghi = DWC_CFGH_FIFO_MODE;
790 cfglo = 0;
791
792 slave = client->slave;
793 if (slave) {
794 /*
795 * We need controller-specific data to set up slave
796 * transfers.
797 */
798 BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev);
799
800 dws = container_of(slave, struct dw_dma_slave, slave);
801
802 dwc->dws = dws;
803 cfghi = dws->cfg_hi;
804 cfglo = dws->cfg_lo;
805 } else {
806 dwc->dws = NULL;
807 }
808
809 channel_writel(dwc, CFG_LO, cfglo);
810 channel_writel(dwc, CFG_HI, cfghi);
811
812 /*
813 * NOTE: some controllers may have additional features that we
814 * need to initialize here, like "scatter-gather" (which
815 * doesn't mean what you think it means), and status writeback.
816 */
817
818 spin_lock_bh(&dwc->lock);
819 i = dwc->descs_allocated;
820 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
821 spin_unlock_bh(&dwc->lock);
822
823 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
824 if (!desc) {
825 dev_info(&chan->dev,
826 "only allocated %d descriptors\n", i);
827 spin_lock_bh(&dwc->lock);
828 break;
829 }
830
831 dma_async_tx_descriptor_init(&desc->txd, chan);
832 desc->txd.tx_submit = dwc_tx_submit;
833 desc->txd.flags = DMA_CTRL_ACK;
834 INIT_LIST_HEAD(&desc->txd.tx_list);
835 desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli,
836 sizeof(desc->lli), DMA_TO_DEVICE);
837 dwc_desc_put(dwc, desc);
838
839 spin_lock_bh(&dwc->lock);
840 i = ++dwc->descs_allocated;
841 }
842
843 /* Enable interrupts */
844 channel_set_bit(dw, MASK.XFER, dwc->mask);
845 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
846 channel_set_bit(dw, MASK.ERROR, dwc->mask);
847
848 spin_unlock_bh(&dwc->lock);
849
850 dev_dbg(&chan->dev,
851 "alloc_chan_resources allocated %d descriptors\n", i);
852
853 return i;
854}
855
856static void dwc_free_chan_resources(struct dma_chan *chan)
857{
858 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
859 struct dw_dma *dw = to_dw_dma(chan->device);
860 struct dw_desc *desc, *_desc;
861 LIST_HEAD(list);
862
863 dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n",
864 dwc->descs_allocated);
865
866 /* ASSERT: channel is idle */
867 BUG_ON(!list_empty(&dwc->active_list));
868 BUG_ON(!list_empty(&dwc->queue));
869 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
870
871 spin_lock_bh(&dwc->lock);
872 list_splice_init(&dwc->free_list, &list);
873 dwc->descs_allocated = 0;
874 dwc->dws = NULL;
875
876 /* Disable interrupts */
877 channel_clear_bit(dw, MASK.XFER, dwc->mask);
878 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
879 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
880
881 spin_unlock_bh(&dwc->lock);
882
883 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
884 dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc);
885 dma_unmap_single(chan->dev.parent, desc->txd.phys,
886 sizeof(desc->lli), DMA_TO_DEVICE);
887 kfree(desc);
888 }
889
890 dev_vdbg(&chan->dev, "free_chan_resources done\n");
891}
892
893/*----------------------------------------------------------------------*/
894
895static void dw_dma_off(struct dw_dma *dw)
896{
897 dma_writel(dw, CFG, 0);
898
899 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
900 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
901 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
902 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
903 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
904
905 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
906 cpu_relax();
907}
908
909static int __init dw_probe(struct platform_device *pdev)
910{
911 struct dw_dma_platform_data *pdata;
912 struct resource *io;
913 struct dw_dma *dw;
914 size_t size;
915 int irq;
916 int err;
917 int i;
918
919 pdata = pdev->dev.platform_data;
920 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
921 return -EINVAL;
922
923 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
924 if (!io)
925 return -EINVAL;
926
927 irq = platform_get_irq(pdev, 0);
928 if (irq < 0)
929 return irq;
930
931 size = sizeof(struct dw_dma);
932 size += pdata->nr_channels * sizeof(struct dw_dma_chan);
933 dw = kzalloc(size, GFP_KERNEL);
934 if (!dw)
935 return -ENOMEM;
936
937 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
938 err = -EBUSY;
939 goto err_kfree;
940 }
941
942 memset(dw, 0, sizeof *dw);
943
944 dw->regs = ioremap(io->start, DW_REGLEN);
945 if (!dw->regs) {
946 err = -ENOMEM;
947 goto err_release_r;
948 }
949
950 dw->clk = clk_get(&pdev->dev, "hclk");
951 if (IS_ERR(dw->clk)) {
952 err = PTR_ERR(dw->clk);
953 goto err_clk;
954 }
955 clk_enable(dw->clk);
956
957 /* force dma off, just in case */
958 dw_dma_off(dw);
959
960 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
961 if (err)
962 goto err_irq;
963
964 platform_set_drvdata(pdev, dw);
965
966 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
967
968 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
969
970 INIT_LIST_HEAD(&dw->dma.channels);
971 for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
972 struct dw_dma_chan *dwc = &dw->chan[i];
973
974 dwc->chan.device = &dw->dma;
975 dwc->chan.cookie = dwc->completed = 1;
976 dwc->chan.chan_id = i;
977 list_add_tail(&dwc->chan.device_node, &dw->dma.channels);
978
979 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
980 spin_lock_init(&dwc->lock);
981 dwc->mask = 1 << i;
982
983 INIT_LIST_HEAD(&dwc->active_list);
984 INIT_LIST_HEAD(&dwc->queue);
985 INIT_LIST_HEAD(&dwc->free_list);
986
987 channel_clear_bit(dw, CH_EN, dwc->mask);
988 }
989
990 /* Clear/disable all interrupts on all channels. */
991 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
992 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
993 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
994 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
995 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
996
997 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
998 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
999 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1000 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1001 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1002
1003 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1004 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1005 dw->dma.dev = &pdev->dev;
1006 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1007 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1008
1009 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1010
1011 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1012 dw->dma.device_terminate_all = dwc_terminate_all;
1013
1014 dw->dma.device_is_tx_complete = dwc_is_tx_complete;
1015 dw->dma.device_issue_pending = dwc_issue_pending;
1016
1017 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1018
1019 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1020 pdev->dev.bus_id, dw->dma.chancnt);
1021
1022 dma_async_device_register(&dw->dma);
1023
1024 return 0;
1025
1026err_irq:
1027 clk_disable(dw->clk);
1028 clk_put(dw->clk);
1029err_clk:
1030 iounmap(dw->regs);
1031 dw->regs = NULL;
1032err_release_r:
1033 release_resource(io);
1034err_kfree:
1035 kfree(dw);
1036 return err;
1037}
1038
1039static int __exit dw_remove(struct platform_device *pdev)
1040{
1041 struct dw_dma *dw = platform_get_drvdata(pdev);
1042 struct dw_dma_chan *dwc, *_dwc;
1043 struct resource *io;
1044
1045 dw_dma_off(dw);
1046 dma_async_device_unregister(&dw->dma);
1047
1048 free_irq(platform_get_irq(pdev, 0), dw);
1049 tasklet_kill(&dw->tasklet);
1050
1051 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1052 chan.device_node) {
1053 list_del(&dwc->chan.device_node);
1054 channel_clear_bit(dw, CH_EN, dwc->mask);
1055 }
1056
1057 clk_disable(dw->clk);
1058 clk_put(dw->clk);
1059
1060 iounmap(dw->regs);
1061 dw->regs = NULL;
1062
1063 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1064 release_mem_region(io->start, DW_REGLEN);
1065
1066 kfree(dw);
1067
1068 return 0;
1069}
1070
1071static void dw_shutdown(struct platform_device *pdev)
1072{
1073 struct dw_dma *dw = platform_get_drvdata(pdev);
1074
1075 dw_dma_off(platform_get_drvdata(pdev));
1076 clk_disable(dw->clk);
1077}
1078
1079static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg)
1080{
1081 struct dw_dma *dw = platform_get_drvdata(pdev);
1082
1083 dw_dma_off(platform_get_drvdata(pdev));
1084 clk_disable(dw->clk);
1085 return 0;
1086}
1087
1088static int dw_resume_early(struct platform_device *pdev)
1089{
1090 struct dw_dma *dw = platform_get_drvdata(pdev);
1091
1092 clk_enable(dw->clk);
1093 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1094 return 0;
1095
1096}
1097
1098static struct platform_driver dw_driver = {
1099 .remove = __exit_p(dw_remove),
1100 .shutdown = dw_shutdown,
1101 .suspend_late = dw_suspend_late,
1102 .resume_early = dw_resume_early,
1103 .driver = {
1104 .name = "dw_dmac",
1105 },
1106};
1107
1108static int __init dw_init(void)
1109{
1110 return platform_driver_probe(&dw_driver, dw_probe);
1111}
1112module_init(dw_init);
1113
1114static void __exit dw_exit(void)
1115{
1116 platform_driver_unregister(&dw_driver);
1117}
1118module_exit(dw_exit);
1119
1120MODULE_LICENSE("GPL v2");
1121MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1122MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
new file mode 100644
index 000000000000..00fdd187bb0c
--- /dev/null
+++ b/drivers/dma/dw_dmac_regs.h
@@ -0,0 +1,225 @@
1/*
2 * Driver for the Synopsys DesignWare AHB DMA Controller
3 *
4 * Copyright (C) 2005-2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/dw_dmac.h>
12
13#define DW_DMA_MAX_NR_CHANNELS 8
14
15/*
16 * Redefine this macro to handle differences between 32- and 64-bit
17 * addressing, big vs. little endian, etc.
18 */
19#define DW_REG(name) u32 name; u32 __pad_##name
20
21/* Hardware register definitions. */
22struct dw_dma_chan_regs {
23 DW_REG(SAR); /* Source Address Register */
24 DW_REG(DAR); /* Destination Address Register */
25 DW_REG(LLP); /* Linked List Pointer */
26 u32 CTL_LO; /* Control Register Low */
27 u32 CTL_HI; /* Control Register High */
28 DW_REG(SSTAT);
29 DW_REG(DSTAT);
30 DW_REG(SSTATAR);
31 DW_REG(DSTATAR);
32 u32 CFG_LO; /* Configuration Register Low */
33 u32 CFG_HI; /* Configuration Register High */
34 DW_REG(SGR);
35 DW_REG(DSR);
36};
37
38struct dw_dma_irq_regs {
39 DW_REG(XFER);
40 DW_REG(BLOCK);
41 DW_REG(SRC_TRAN);
42 DW_REG(DST_TRAN);
43 DW_REG(ERROR);
44};
45
46struct dw_dma_regs {
47 /* per-channel registers */
48 struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];
49
50 /* irq handling */
51 struct dw_dma_irq_regs RAW; /* r */
52 struct dw_dma_irq_regs STATUS; /* r (raw & mask) */
53 struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */
54 struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */
55
56 DW_REG(STATUS_INT); /* r */
57
58 /* software handshaking */
59 DW_REG(REQ_SRC);
60 DW_REG(REQ_DST);
61 DW_REG(SGL_REQ_SRC);
62 DW_REG(SGL_REQ_DST);
63 DW_REG(LAST_SRC);
64 DW_REG(LAST_DST);
65
66 /* miscellaneous */
67 DW_REG(CFG);
68 DW_REG(CH_EN);
69 DW_REG(ID);
70 DW_REG(TEST);
71
72 /* optional encoded params, 0x3c8..0x3 */
73};
74
75/* Bitfields in CTL_LO */
76#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
77#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
78#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4)
79#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */
80#define DWC_CTLL_DST_DEC (1<<7)
81#define DWC_CTLL_DST_FIX (2<<7)
82#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */
83#define DWC_CTLL_SRC_DEC (1<<9)
84#define DWC_CTLL_SRC_FIX (2<<9)
85#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
86#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
87#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
88#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
89#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
90#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
91#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
92#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */
93/* plus 4 transfer types for peripheral-as-flow-controller */
94#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */
95#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */
96#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
97#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */
98
99/* Bitfields in CTL_HI */
100#define DWC_CTLH_DONE 0x00001000
101#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
102
103/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
104#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
105#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
106#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
107#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */
108#define DWC_CFGL_MAX_BURST(x) ((x) << 20)
109#define DWC_CFGL_RELOAD_SAR (1 << 30)
110#define DWC_CFGL_RELOAD_DAR (1 << 31)
111
112/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
113#define DWC_CFGH_DS_UPD_EN (1 << 5)
114#define DWC_CFGH_SS_UPD_EN (1 << 6)
115
116/* Bitfields in SGR */
117#define DWC_SGR_SGI(x) ((x) << 0)
118#define DWC_SGR_SGC(x) ((x) << 20)
119
120/* Bitfields in DSR */
121#define DWC_DSR_DSI(x) ((x) << 0)
122#define DWC_DSR_DSC(x) ((x) << 20)
123
124/* Bitfields in CFG */
125#define DW_CFG_DMA_EN (1 << 0)
126
127#define DW_REGLEN 0x400
128
129struct dw_dma_chan {
130 struct dma_chan chan;
131 void __iomem *ch_regs;
132 u8 mask;
133
134 spinlock_t lock;
135
136 /* these other elements are all protected by lock */
137 dma_cookie_t completed;
138 struct list_head active_list;
139 struct list_head queue;
140 struct list_head free_list;
141
142 struct dw_dma_slave *dws;
143
144 unsigned int descs_allocated;
145};
146
147static inline struct dw_dma_chan_regs __iomem *
148__dwc_regs(struct dw_dma_chan *dwc)
149{
150 return dwc->ch_regs;
151}
152
153#define channel_readl(dwc, name) \
154 __raw_readl(&(__dwc_regs(dwc)->name))
155#define channel_writel(dwc, name, val) \
156 __raw_writel((val), &(__dwc_regs(dwc)->name))
157
158static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
159{
160 return container_of(chan, struct dw_dma_chan, chan);
161}
162
163
164struct dw_dma {
165 struct dma_device dma;
166 void __iomem *regs;
167 struct tasklet_struct tasklet;
168 struct clk *clk;
169
170 u8 all_chan_mask;
171
172 struct dw_dma_chan chan[0];
173};
174
175static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
176{
177 return dw->regs;
178}
179
180#define dma_readl(dw, name) \
181 __raw_readl(&(__dw_regs(dw)->name))
182#define dma_writel(dw, name, val) \
183 __raw_writel((val), &(__dw_regs(dw)->name))
184
185#define channel_set_bit(dw, reg, mask) \
186 dma_writel(dw, reg, ((mask) << 8) | (mask))
187#define channel_clear_bit(dw, reg, mask) \
188 dma_writel(dw, reg, ((mask) << 8) | 0)
189
190static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
191{
192 return container_of(ddev, struct dw_dma, dma);
193}
194
195/* LLI == Linked List Item; a.k.a. DMA block descriptor */
196struct dw_lli {
197 /* values that are not changed by hardware */
198 dma_addr_t sar;
199 dma_addr_t dar;
200 dma_addr_t llp; /* chain to next lli */
201 u32 ctllo;
202 /* values that may get written back: */
203 u32 ctlhi;
204 /* sstat and dstat can snapshot peripheral register state.
205 * silicon config may discard either or both...
206 */
207 u32 sstat;
208 u32 dstat;
209};
210
211struct dw_desc {
212 /* FIRST values the hardware uses */
213 struct dw_lli lli;
214
215 /* THEN values for driver housekeeping */
216 struct list_head desc_node;
217 struct dma_async_tx_descriptor txd;
218 size_t len;
219};
220
221static inline struct dw_desc *
222txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
223{
224 return container_of(txd, struct dw_desc, txd);
225}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 054eabffc185..c0059ca58340 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -366,7 +366,8 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
366 * 366 *
367 * Return - The number of descriptors allocated. 367 * Return - The number of descriptors allocated.
368 */ 368 */
369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) 369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan,
370 struct dma_client *client)
370{ 371{
371 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 372 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
372 LIST_HEAD(tmp_list); 373 LIST_HEAD(tmp_list);
@@ -809,8 +810,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
809 if (!src) { 810 if (!src) {
810 dev_err(fsl_chan->dev, 811 dev_err(fsl_chan->dev,
811 "selftest: Cannot alloc memory for test!\n"); 812 "selftest: Cannot alloc memory for test!\n");
812 err = -ENOMEM; 813 return -ENOMEM;
813 goto out;
814 } 814 }
815 815
816 dest = src + test_size; 816 dest = src + test_size;
@@ -820,7 +820,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
820 820
821 chan = &fsl_chan->common; 821 chan = &fsl_chan->common;
822 822
823 if (fsl_dma_alloc_chan_resources(chan) < 1) { 823 if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
824 dev_err(fsl_chan->dev, 824 dev_err(fsl_chan->dev,
825 "selftest: Cannot alloc resources for DMA\n"); 825 "selftest: Cannot alloc resources for DMA\n");
826 err = -ENODEV; 826 err = -ENODEV;
@@ -842,13 +842,13 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
842 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { 842 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
843 dev_err(fsl_chan->dev, "selftest: Time out!\n"); 843 dev_err(fsl_chan->dev, "selftest: Time out!\n");
844 err = -ENODEV; 844 err = -ENODEV;
845 goto out; 845 goto free_resources;
846 } 846 }
847 847
848 /* Test free and re-alloc channel resources */ 848 /* Test free and re-alloc channel resources */
849 fsl_dma_free_chan_resources(chan); 849 fsl_dma_free_chan_resources(chan);
850 850
851 if (fsl_dma_alloc_chan_resources(chan) < 1) { 851 if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
852 dev_err(fsl_chan->dev, 852 dev_err(fsl_chan->dev,
853 "selftest: Cannot alloc resources for DMA\n"); 853 "selftest: Cannot alloc resources for DMA\n");
854 err = -ENODEV; 854 err = -ENODEV;
@@ -927,8 +927,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
927 if (!new_fsl_chan) { 927 if (!new_fsl_chan) {
928 dev_err(&dev->dev, "No free memory for allocating " 928 dev_err(&dev->dev, "No free memory for allocating "
929 "dma channels!\n"); 929 "dma channels!\n");
930 err = -ENOMEM; 930 return -ENOMEM;
931 goto err;
932 } 931 }
933 932
934 /* get dma channel register base */ 933 /* get dma channel register base */
@@ -936,7 +935,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
936 if (err) { 935 if (err) {
937 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 936 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
938 dev->node->full_name); 937 dev->node->full_name);
939 goto err; 938 goto err_no_reg;
940 } 939 }
941 940
942 new_fsl_chan->feature = *(u32 *)match->data; 941 new_fsl_chan->feature = *(u32 *)match->data;
@@ -958,7 +957,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
958 dev_err(&dev->dev, "There is no %d channel!\n", 957 dev_err(&dev->dev, "There is no %d channel!\n",
959 new_fsl_chan->id); 958 new_fsl_chan->id);
960 err = -EINVAL; 959 err = -EINVAL;
961 goto err; 960 goto err_no_chan;
962 } 961 }
963 fdev->chan[new_fsl_chan->id] = new_fsl_chan; 962 fdev->chan[new_fsl_chan->id] = new_fsl_chan;
964 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, 963 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
@@ -997,23 +996,26 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
997 if (err) { 996 if (err) {
998 dev_err(&dev->dev, "DMA channel %s request_irq error " 997 dev_err(&dev->dev, "DMA channel %s request_irq error "
999 "with return %d\n", dev->node->full_name, err); 998 "with return %d\n", dev->node->full_name, err);
1000 goto err; 999 goto err_no_irq;
1001 } 1000 }
1002 } 1001 }
1003 1002
1004 err = fsl_dma_self_test(new_fsl_chan); 1003 err = fsl_dma_self_test(new_fsl_chan);
1005 if (err) 1004 if (err)
1006 goto err; 1005 goto err_self_test;
1007 1006
1008 dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, 1007 dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
1009 match->compatible, new_fsl_chan->irq); 1008 match->compatible, new_fsl_chan->irq);
1010 1009
1011 return 0; 1010 return 0;
1012err: 1011
1013 dma_halt(new_fsl_chan); 1012err_self_test:
1014 iounmap(new_fsl_chan->reg_base);
1015 free_irq(new_fsl_chan->irq, new_fsl_chan); 1013 free_irq(new_fsl_chan->irq, new_fsl_chan);
1014err_no_irq:
1016 list_del(&new_fsl_chan->common.device_node); 1015 list_del(&new_fsl_chan->common.device_node);
1016err_no_chan:
1017 iounmap(new_fsl_chan->reg_base);
1018err_no_reg:
1017 kfree(new_fsl_chan); 1019 kfree(new_fsl_chan);
1018 return err; 1020 return err;
1019} 1021}
@@ -1054,8 +1056,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1054 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); 1056 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
1055 if (!fdev) { 1057 if (!fdev) {
1056 dev_err(&dev->dev, "No enough memory for 'priv'\n"); 1058 dev_err(&dev->dev, "No enough memory for 'priv'\n");
1057 err = -ENOMEM; 1059 return -ENOMEM;
1058 goto err;
1059 } 1060 }
1060 fdev->dev = &dev->dev; 1061 fdev->dev = &dev->dev;
1061 INIT_LIST_HEAD(&fdev->common.channels); 1062 INIT_LIST_HEAD(&fdev->common.channels);
@@ -1065,7 +1066,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1065 if (err) { 1066 if (err) {
1066 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 1067 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
1067 dev->node->full_name); 1068 dev->node->full_name);
1068 goto err; 1069 goto err_no_reg;
1069 } 1070 }
1070 1071
1071 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 1072 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
@@ -1103,6 +1104,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1103 1104
1104err: 1105err:
1105 iounmap(fdev->reg_base); 1106 iounmap(fdev->reg_base);
1107err_no_reg:
1106 kfree(fdev); 1108 kfree(fdev);
1107 return err; 1109 return err;
1108} 1110}
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
index 16e0fd8facfb..9b16a3af9a0a 100644
--- a/drivers/dma/ioat.c
+++ b/drivers/dma/ioat.c
@@ -47,6 +47,16 @@ static struct pci_device_id ioat_pci_tbl[] = {
47 47
48 /* I/OAT v2 platforms */ 48 /* I/OAT v2 platforms */
49 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) }, 49 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
50
51 /* I/OAT v3 platforms */
52 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
53 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
54 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
55 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
56 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
57 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
58 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
59 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
50 { 0, } 60 { 0, }
51}; 61};
52 62
@@ -83,6 +93,11 @@ static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase)
83 if (device->dma && ioat_dca_enabled) 93 if (device->dma && ioat_dca_enabled)
84 device->dca = ioat2_dca_init(pdev, iobase); 94 device->dca = ioat2_dca_init(pdev, iobase);
85 break; 95 break;
96 case IOAT_VER_3_0:
97 device->dma = ioat_dma_probe(pdev, iobase);
98 if (device->dma && ioat_dca_enabled)
99 device->dca = ioat3_dca_init(pdev, iobase);
100 break;
86 default: 101 default:
87 err = -ENODEV; 102 err = -ENODEV;
88 break; 103 break;
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat_dca.c
index 9e922760b7ff..6cf622da0286 100644
--- a/drivers/dma/ioat_dca.c
+++ b/drivers/dma/ioat_dca.c
@@ -37,12 +37,18 @@
37#include "ioatdma_registers.h" 37#include "ioatdma_registers.h"
38 38
39/* 39/*
40 * Bit 16 of a tag map entry is the "valid" bit, if it is set then bits 0:15 40 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
41 * contain the bit number of the APIC ID to map into the DCA tag. If the valid 41 * contain the bit number of the APIC ID to map into the DCA tag. If the valid
42 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag. 42 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
43 */ 43 */
44#define DCA_TAG_MAP_VALID 0x80 44#define DCA_TAG_MAP_VALID 0x80
45 45
46#define DCA3_TAG_MAP_BIT_TO_INV 0x80
47#define DCA3_TAG_MAP_BIT_TO_SEL 0x40
48#define DCA3_TAG_MAP_LITERAL_VAL 0x1
49
50#define DCA_TAG_MAP_MASK 0xDF
51
46/* 52/*
47 * "Legacy" DCA systems do not implement the DCA register set in the 53 * "Legacy" DCA systems do not implement the DCA register set in the
48 * I/OAT device. Software needs direct support for their tag mappings. 54 * I/OAT device. Software needs direct support for their tag mappings.
@@ -95,6 +101,7 @@ struct ioat_dca_slot {
95}; 101};
96 102
97#define IOAT_DCA_MAX_REQ 6 103#define IOAT_DCA_MAX_REQ 6
104#define IOAT3_DCA_MAX_REQ 2
98 105
99struct ioat_dca_priv { 106struct ioat_dca_priv {
100 void __iomem *iobase; 107 void __iomem *iobase;
@@ -171,7 +178,9 @@ static int ioat_dca_remove_requester(struct dca_provider *dca,
171 return -ENODEV; 178 return -ENODEV;
172} 179}
173 180
174static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu) 181static u8 ioat_dca_get_tag(struct dca_provider *dca,
182 struct device *dev,
183 int cpu)
175{ 184{
176 struct ioat_dca_priv *ioatdca = dca_priv(dca); 185 struct ioat_dca_priv *ioatdca = dca_priv(dca);
177 int i, apic_id, bit, value; 186 int i, apic_id, bit, value;
@@ -193,10 +202,26 @@ static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu)
193 return tag; 202 return tag;
194} 203}
195 204
205static int ioat_dca_dev_managed(struct dca_provider *dca,
206 struct device *dev)
207{
208 struct ioat_dca_priv *ioatdca = dca_priv(dca);
209 struct pci_dev *pdev;
210 int i;
211
212 pdev = to_pci_dev(dev);
213 for (i = 0; i < ioatdca->max_requesters; i++) {
214 if (ioatdca->req_slots[i].pdev == pdev)
215 return 1;
216 }
217 return 0;
218}
219
196static struct dca_ops ioat_dca_ops = { 220static struct dca_ops ioat_dca_ops = {
197 .add_requester = ioat_dca_add_requester, 221 .add_requester = ioat_dca_add_requester,
198 .remove_requester = ioat_dca_remove_requester, 222 .remove_requester = ioat_dca_remove_requester,
199 .get_tag = ioat_dca_get_tag, 223 .get_tag = ioat_dca_get_tag,
224 .dev_managed = ioat_dca_dev_managed,
200}; 225};
201 226
202 227
@@ -207,6 +232,8 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
207 u8 *tag_map = NULL; 232 u8 *tag_map = NULL;
208 int i; 233 int i;
209 int err; 234 int err;
235 u8 version;
236 u8 max_requesters;
210 237
211 if (!system_has_dca_enabled(pdev)) 238 if (!system_has_dca_enabled(pdev))
212 return NULL; 239 return NULL;
@@ -237,15 +264,20 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
237 if (tag_map == NULL) 264 if (tag_map == NULL)
238 return NULL; 265 return NULL;
239 266
267 version = readb(iobase + IOAT_VER_OFFSET);
268 if (version == IOAT_VER_3_0)
269 max_requesters = IOAT3_DCA_MAX_REQ;
270 else
271 max_requesters = IOAT_DCA_MAX_REQ;
272
240 dca = alloc_dca_provider(&ioat_dca_ops, 273 dca = alloc_dca_provider(&ioat_dca_ops,
241 sizeof(*ioatdca) + 274 sizeof(*ioatdca) +
242 (sizeof(struct ioat_dca_slot) * IOAT_DCA_MAX_REQ)); 275 (sizeof(struct ioat_dca_slot) * max_requesters));
243 if (!dca) 276 if (!dca)
244 return NULL; 277 return NULL;
245 278
246 ioatdca = dca_priv(dca); 279 ioatdca = dca_priv(dca);
247 ioatdca->max_requesters = IOAT_DCA_MAX_REQ; 280 ioatdca->max_requesters = max_requesters;
248
249 ioatdca->dca_base = iobase + 0x54; 281 ioatdca->dca_base = iobase + 0x54;
250 282
251 /* copy over the APIC ID to DCA tag mapping */ 283 /* copy over the APIC ID to DCA tag mapping */
@@ -323,11 +355,13 @@ static int ioat2_dca_remove_requester(struct dca_provider *dca,
323 return -ENODEV; 355 return -ENODEV;
324} 356}
325 357
326static u8 ioat2_dca_get_tag(struct dca_provider *dca, int cpu) 358static u8 ioat2_dca_get_tag(struct dca_provider *dca,
359 struct device *dev,
360 int cpu)
327{ 361{
328 u8 tag; 362 u8 tag;
329 363
330 tag = ioat_dca_get_tag(dca, cpu); 364 tag = ioat_dca_get_tag(dca, dev, cpu);
331 tag = (~tag) & 0x1F; 365 tag = (~tag) & 0x1F;
332 return tag; 366 return tag;
333} 367}
@@ -336,6 +370,7 @@ static struct dca_ops ioat2_dca_ops = {
336 .add_requester = ioat2_dca_add_requester, 370 .add_requester = ioat2_dca_add_requester,
337 .remove_requester = ioat2_dca_remove_requester, 371 .remove_requester = ioat2_dca_remove_requester,
338 .get_tag = ioat2_dca_get_tag, 372 .get_tag = ioat2_dca_get_tag,
373 .dev_managed = ioat_dca_dev_managed,
339}; 374};
340 375
341static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) 376static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
@@ -425,3 +460,198 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
425 460
426 return dca; 461 return dca;
427} 462}
463
464static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
465{
466 struct ioat_dca_priv *ioatdca = dca_priv(dca);
467 struct pci_dev *pdev;
468 int i;
469 u16 id;
470 u16 global_req_table;
471
472 /* This implementation only supports PCI-Express */
473 if (dev->bus != &pci_bus_type)
474 return -ENODEV;
475 pdev = to_pci_dev(dev);
476 id = dcaid_from_pcidev(pdev);
477
478 if (ioatdca->requester_count == ioatdca->max_requesters)
479 return -ENODEV;
480
481 for (i = 0; i < ioatdca->max_requesters; i++) {
482 if (ioatdca->req_slots[i].pdev == NULL) {
483 /* found an empty slot */
484 ioatdca->requester_count++;
485 ioatdca->req_slots[i].pdev = pdev;
486 ioatdca->req_slots[i].rid = id;
487 global_req_table =
488 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
489 writel(id | IOAT_DCA_GREQID_VALID,
490 ioatdca->iobase + global_req_table + (i * 4));
491 return i;
492 }
493 }
494 /* Error, ioatdma->requester_count is out of whack */
495 return -EFAULT;
496}
497
498static int ioat3_dca_remove_requester(struct dca_provider *dca,
499 struct device *dev)
500{
501 struct ioat_dca_priv *ioatdca = dca_priv(dca);
502 struct pci_dev *pdev;
503 int i;
504 u16 global_req_table;
505
506 /* This implementation only supports PCI-Express */
507 if (dev->bus != &pci_bus_type)
508 return -ENODEV;
509 pdev = to_pci_dev(dev);
510
511 for (i = 0; i < ioatdca->max_requesters; i++) {
512 if (ioatdca->req_slots[i].pdev == pdev) {
513 global_req_table =
514 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
515 writel(0, ioatdca->iobase + global_req_table + (i * 4));
516 ioatdca->req_slots[i].pdev = NULL;
517 ioatdca->req_slots[i].rid = 0;
518 ioatdca->requester_count--;
519 return i;
520 }
521 }
522 return -ENODEV;
523}
524
525static u8 ioat3_dca_get_tag(struct dca_provider *dca,
526 struct device *dev,
527 int cpu)
528{
529 u8 tag;
530
531 struct ioat_dca_priv *ioatdca = dca_priv(dca);
532 int i, apic_id, bit, value;
533 u8 entry;
534
535 tag = 0;
536 apic_id = cpu_physical_id(cpu);
537
538 for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
539 entry = ioatdca->tag_map[i];
540 if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
541 bit = entry &
542 ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
543 value = (apic_id & (1 << bit)) ? 1 : 0;
544 } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
545 bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
546 value = (apic_id & (1 << bit)) ? 0 : 1;
547 } else {
548 value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
549 }
550 tag |= (value << i);
551 }
552
553 return tag;
554}
555
556static struct dca_ops ioat3_dca_ops = {
557 .add_requester = ioat3_dca_add_requester,
558 .remove_requester = ioat3_dca_remove_requester,
559 .get_tag = ioat3_dca_get_tag,
560 .dev_managed = ioat_dca_dev_managed,
561};
562
563static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
564{
565 int slots = 0;
566 u32 req;
567 u16 global_req_table;
568
569 global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
570 if (global_req_table == 0)
571 return 0;
572
573 do {
574 req = readl(iobase + global_req_table + (slots * sizeof(u32)));
575 slots++;
576 } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
577
578 return slots;
579}
580
581struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
582{
583 struct dca_provider *dca;
584 struct ioat_dca_priv *ioatdca;
585 int slots;
586 int i;
587 int err;
588 u16 dca_offset;
589 u16 csi_fsb_control;
590 u16 pcie_control;
591 u8 bit;
592
593 union {
594 u64 full;
595 struct {
596 u32 low;
597 u32 high;
598 };
599 } tag_map;
600
601 if (!system_has_dca_enabled(pdev))
602 return NULL;
603
604 dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
605 if (dca_offset == 0)
606 return NULL;
607
608 slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
609 if (slots == 0)
610 return NULL;
611
612 dca = alloc_dca_provider(&ioat3_dca_ops,
613 sizeof(*ioatdca)
614 + (sizeof(struct ioat_dca_slot) * slots));
615 if (!dca)
616 return NULL;
617
618 ioatdca = dca_priv(dca);
619 ioatdca->iobase = iobase;
620 ioatdca->dca_base = iobase + dca_offset;
621 ioatdca->max_requesters = slots;
622
623 /* some bios might not know to turn these on */
624 csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
625 if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
626 csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
627 writew(csi_fsb_control,
628 ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
629 }
630 pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
631 if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
632 pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
633 writew(pcie_control,
634 ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
635 }
636
637
638 /* TODO version, compatibility and configuration checks */
639
640 /* copy out the APIC to DCA tag map */
641 tag_map.low =
642 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
643 tag_map.high =
644 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
645 for (i = 0; i < 8; i++) {
646 bit = tag_map.full >> (8 * i);
647 ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
648 }
649
650 err = register_dca_provider(dca, &pdev->dev);
651 if (err) {
652 free_dca_provider(dca);
653 return NULL;
654 }
655
656 return dca;
657}
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 318e8a22d814..a52156e56886 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -32,6 +32,7 @@
32#include <linux/dmaengine.h> 32#include <linux/dmaengine.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/workqueue.h>
35#include "ioatdma.h" 36#include "ioatdma.h"
36#include "ioatdma_registers.h" 37#include "ioatdma_registers.h"
37#include "ioatdma_hw.h" 38#include "ioatdma_hw.h"
@@ -41,11 +42,23 @@
41#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) 42#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
42#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) 43#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
43 44
45#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
44static int ioat_pending_level = 4; 46static int ioat_pending_level = 4;
45module_param(ioat_pending_level, int, 0644); 47module_param(ioat_pending_level, int, 0644);
46MODULE_PARM_DESC(ioat_pending_level, 48MODULE_PARM_DESC(ioat_pending_level,
47 "high-water mark for pushing ioat descriptors (default: 4)"); 49 "high-water mark for pushing ioat descriptors (default: 4)");
48 50
51#define RESET_DELAY msecs_to_jiffies(100)
52#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000))
53static void ioat_dma_chan_reset_part2(struct work_struct *work);
54static void ioat_dma_chan_watchdog(struct work_struct *work);
55
56/*
57 * workaround for IOAT ver.3.0 null descriptor issue
58 * (channel returns error when size is 0)
59 */
60#define NULL_DESC_BUFFER_SIZE 1
61
49/* internal functions */ 62/* internal functions */
50static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); 63static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
51static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); 64static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
@@ -122,6 +135,38 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
122 int i; 135 int i;
123 struct ioat_dma_chan *ioat_chan; 136 struct ioat_dma_chan *ioat_chan;
124 137
138 /*
139 * IOAT ver.3 workarounds
140 */
141 if (device->version == IOAT_VER_3_0) {
142 u32 chan_err_mask;
143 u16 dev_id;
144 u32 dmauncerrsts;
145
146 /*
147 * Write CHANERRMSK_INT with 3E07h to mask out the errors
148 * that can cause stability issues for IOAT ver.3
149 */
150 chan_err_mask = 0x3E07;
151 pci_write_config_dword(device->pdev,
152 IOAT_PCI_CHANERRMASK_INT_OFFSET,
153 chan_err_mask);
154
155 /*
156 * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
157 * (workaround for spurious config parity error after restart)
158 */
159 pci_read_config_word(device->pdev,
160 IOAT_PCI_DEVICE_ID_OFFSET,
161 &dev_id);
162 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
163 dmauncerrsts = 0x10;
164 pci_write_config_dword(device->pdev,
165 IOAT_PCI_DMAUNCERRSTS_OFFSET,
166 dmauncerrsts);
167 }
168 }
169
125 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); 170 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
126 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); 171 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
127 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); 172 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
@@ -137,6 +182,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
137 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); 182 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
138 ioat_chan->xfercap = xfercap; 183 ioat_chan->xfercap = xfercap;
139 ioat_chan->desccount = 0; 184 ioat_chan->desccount = 0;
185 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
140 if (ioat_chan->device->version != IOAT_VER_1_2) { 186 if (ioat_chan->device->version != IOAT_VER_1_2) {
141 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE 187 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
142 | IOAT_DMA_DCA_ANY_CPU, 188 | IOAT_DMA_DCA_ANY_CPU,
@@ -175,7 +221,7 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
175{ 221{
176 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 222 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
177 223
178 if (ioat_chan->pending != 0) { 224 if (ioat_chan->pending > 0) {
179 spin_lock_bh(&ioat_chan->desc_lock); 225 spin_lock_bh(&ioat_chan->desc_lock);
180 __ioat1_dma_memcpy_issue_pending(ioat_chan); 226 __ioat1_dma_memcpy_issue_pending(ioat_chan);
181 spin_unlock_bh(&ioat_chan->desc_lock); 227 spin_unlock_bh(&ioat_chan->desc_lock);
@@ -194,13 +240,228 @@ static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
194{ 240{
195 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 241 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
196 242
197 if (ioat_chan->pending != 0) { 243 if (ioat_chan->pending > 0) {
198 spin_lock_bh(&ioat_chan->desc_lock); 244 spin_lock_bh(&ioat_chan->desc_lock);
199 __ioat2_dma_memcpy_issue_pending(ioat_chan); 245 __ioat2_dma_memcpy_issue_pending(ioat_chan);
200 spin_unlock_bh(&ioat_chan->desc_lock); 246 spin_unlock_bh(&ioat_chan->desc_lock);
201 } 247 }
202} 248}
203 249
250
251/**
252 * ioat_dma_chan_reset_part2 - reinit the channel after a reset
253 */
254static void ioat_dma_chan_reset_part2(struct work_struct *work)
255{
256 struct ioat_dma_chan *ioat_chan =
257 container_of(work, struct ioat_dma_chan, work.work);
258 struct ioat_desc_sw *desc;
259
260 spin_lock_bh(&ioat_chan->cleanup_lock);
261 spin_lock_bh(&ioat_chan->desc_lock);
262
263 ioat_chan->completion_virt->low = 0;
264 ioat_chan->completion_virt->high = 0;
265 ioat_chan->pending = 0;
266
267 /*
268 * count the descriptors waiting, and be sure to do it
269 * right for both the CB1 line and the CB2 ring
270 */
271 ioat_chan->dmacount = 0;
272 if (ioat_chan->used_desc.prev) {
273 desc = to_ioat_desc(ioat_chan->used_desc.prev);
274 do {
275 ioat_chan->dmacount++;
276 desc = to_ioat_desc(desc->node.next);
277 } while (&desc->node != ioat_chan->used_desc.next);
278 }
279
280 /*
281 * write the new starting descriptor address
282 * this puts channel engine into ARMED state
283 */
284 desc = to_ioat_desc(ioat_chan->used_desc.prev);
285 switch (ioat_chan->device->version) {
286 case IOAT_VER_1_2:
287 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
288 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
289 writel(((u64) desc->async_tx.phys) >> 32,
290 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
291
292 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
293 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
294 break;
295 case IOAT_VER_2_0:
296 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
297 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
298 writel(((u64) desc->async_tx.phys) >> 32,
299 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
300
301 /* tell the engine to go with what's left to be done */
302 writew(ioat_chan->dmacount,
303 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
304
305 break;
306 }
307 dev_err(&ioat_chan->device->pdev->dev,
308 "chan%d reset - %d descs waiting, %d total desc\n",
309 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
310
311 spin_unlock_bh(&ioat_chan->desc_lock);
312 spin_unlock_bh(&ioat_chan->cleanup_lock);
313}
314
315/**
316 * ioat_dma_reset_channel - restart a channel
317 * @ioat_chan: IOAT DMA channel handle
318 */
319static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
320{
321 u32 chansts, chanerr;
322
323 if (!ioat_chan->used_desc.prev)
324 return;
325
326 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
327 chansts = (ioat_chan->completion_virt->low
328 & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
329 if (chanerr) {
330 dev_err(&ioat_chan->device->pdev->dev,
331 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
332 chan_num(ioat_chan), chansts, chanerr);
333 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
334 }
335
336 /*
337 * whack it upside the head with a reset
338 * and wait for things to settle out.
339 * force the pending count to a really big negative
340 * to make sure no one forces an issue_pending
341 * while we're waiting.
342 */
343
344 spin_lock_bh(&ioat_chan->desc_lock);
345 ioat_chan->pending = INT_MIN;
346 writeb(IOAT_CHANCMD_RESET,
347 ioat_chan->reg_base
348 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
349 spin_unlock_bh(&ioat_chan->desc_lock);
350
351 /* schedule the 2nd half instead of sleeping a long time */
352 schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
353}
354
355/**
356 * ioat_dma_chan_watchdog - watch for stuck channels
357 */
358static void ioat_dma_chan_watchdog(struct work_struct *work)
359{
360 struct ioatdma_device *device =
361 container_of(work, struct ioatdma_device, work.work);
362 struct ioat_dma_chan *ioat_chan;
363 int i;
364
365 union {
366 u64 full;
367 struct {
368 u32 low;
369 u32 high;
370 };
371 } completion_hw;
372 unsigned long compl_desc_addr_hw;
373
374 for (i = 0; i < device->common.chancnt; i++) {
375 ioat_chan = ioat_lookup_chan_by_index(device, i);
376
377 if (ioat_chan->device->version == IOAT_VER_1_2
378 /* have we started processing anything yet */
379 && ioat_chan->last_completion
380 /* have we completed any since last watchdog cycle? */
381 && (ioat_chan->last_completion ==
382 ioat_chan->watchdog_completion)
383 /* has TCP stuck on one cookie since last watchdog? */
384 && (ioat_chan->watchdog_tcp_cookie ==
385 ioat_chan->watchdog_last_tcp_cookie)
386 && (ioat_chan->watchdog_tcp_cookie !=
387 ioat_chan->completed_cookie)
388 /* is there something in the chain to be processed? */
389 /* CB1 chain always has at least the last one processed */
390 && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
391 && ioat_chan->pending == 0) {
392
393 /*
394 * check CHANSTS register for completed
395 * descriptor address.
396 * if it is different than completion writeback,
397 * it is not zero
398 * and it has changed since the last watchdog
399 * we can assume that channel
400 * is still working correctly
401 * and the problem is in completion writeback.
402 * update completion writeback
403 * with actual CHANSTS value
404 * else
405 * try resetting the channel
406 */
407
408 completion_hw.low = readl(ioat_chan->reg_base +
409 IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
410 completion_hw.high = readl(ioat_chan->reg_base +
411 IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
412#if (BITS_PER_LONG == 64)
413 compl_desc_addr_hw =
414 completion_hw.full
415 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
416#else
417 compl_desc_addr_hw =
418 completion_hw.low & IOAT_LOW_COMPLETION_MASK;
419#endif
420
421 if ((compl_desc_addr_hw != 0)
422 && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
423 && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
424 ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
425 ioat_chan->completion_virt->low = completion_hw.low;
426 ioat_chan->completion_virt->high = completion_hw.high;
427 } else {
428 ioat_dma_reset_channel(ioat_chan);
429 ioat_chan->watchdog_completion = 0;
430 ioat_chan->last_compl_desc_addr_hw = 0;
431 }
432
433 /*
434 * for version 2.0 if there are descriptors yet to be processed
435 * and the last completed hasn't changed since the last watchdog
436 * if they haven't hit the pending level
437 * issue the pending to push them through
438 * else
439 * try resetting the channel
440 */
441 } else if (ioat_chan->device->version == IOAT_VER_2_0
442 && ioat_chan->used_desc.prev
443 && ioat_chan->last_completion
444 && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
445
446 if (ioat_chan->pending < ioat_pending_level)
447 ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
448 else {
449 ioat_dma_reset_channel(ioat_chan);
450 ioat_chan->watchdog_completion = 0;
451 }
452 } else {
453 ioat_chan->last_compl_desc_addr_hw = 0;
454 ioat_chan->watchdog_completion
455 = ioat_chan->last_completion;
456 }
457
458 ioat_chan->watchdog_last_tcp_cookie =
459 ioat_chan->watchdog_tcp_cookie;
460 }
461
462 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
463}
464
204static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) 465static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
205{ 466{
206 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); 467 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
@@ -250,6 +511,13 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
250 prev = new; 511 prev = new;
251 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); 512 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
252 513
514 if (!new) {
515 dev_err(&ioat_chan->device->pdev->dev,
516 "tx submit failed\n");
517 spin_unlock_bh(&ioat_chan->desc_lock);
518 return -ENOMEM;
519 }
520
253 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 521 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
254 if (new->async_tx.callback) { 522 if (new->async_tx.callback) {
255 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; 523 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
@@ -335,7 +603,14 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
335 desc_count++; 603 desc_count++;
336 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); 604 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
337 605
338 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 606 if (!new) {
607 dev_err(&ioat_chan->device->pdev->dev,
608 "tx submit failed\n");
609 spin_unlock_bh(&ioat_chan->desc_lock);
610 return -ENOMEM;
611 }
612
613 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
339 if (new->async_tx.callback) { 614 if (new->async_tx.callback) {
340 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; 615 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
341 if (first != new) { 616 if (first != new) {
@@ -406,6 +681,7 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
406 desc_sw->async_tx.tx_submit = ioat1_tx_submit; 681 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
407 break; 682 break;
408 case IOAT_VER_2_0: 683 case IOAT_VER_2_0:
684 case IOAT_VER_3_0:
409 desc_sw->async_tx.tx_submit = ioat2_tx_submit; 685 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
410 break; 686 break;
411 } 687 }
@@ -452,7 +728,8 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
452 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors 728 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
453 * @chan: the channel to be filled out 729 * @chan: the channel to be filled out
454 */ 730 */
455static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) 731static int ioat_dma_alloc_chan_resources(struct dma_chan *chan,
732 struct dma_client *client)
456{ 733{
457 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 734 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
458 struct ioat_desc_sw *desc; 735 struct ioat_desc_sw *desc;
@@ -555,6 +832,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
555 } 832 }
556 break; 833 break;
557 case IOAT_VER_2_0: 834 case IOAT_VER_2_0:
835 case IOAT_VER_3_0:
558 list_for_each_entry_safe(desc, _desc, 836 list_for_each_entry_safe(desc, _desc,
559 ioat_chan->free_desc.next, node) { 837 ioat_chan->free_desc.next, node) {
560 list_del(&desc->node); 838 list_del(&desc->node);
@@ -585,6 +863,10 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
585 ioat_chan->last_completion = ioat_chan->completion_addr = 0; 863 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
586 ioat_chan->pending = 0; 864 ioat_chan->pending = 0;
587 ioat_chan->dmacount = 0; 865 ioat_chan->dmacount = 0;
866 ioat_chan->watchdog_completion = 0;
867 ioat_chan->last_compl_desc_addr_hw = 0;
868 ioat_chan->watchdog_tcp_cookie =
869 ioat_chan->watchdog_last_tcp_cookie = 0;
588} 870}
589 871
590/** 872/**
@@ -640,7 +922,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
640 922
641 /* set up the noop descriptor */ 923 /* set up the noop descriptor */
642 noop_desc = to_ioat_desc(ioat_chan->used_desc.next); 924 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
643 noop_desc->hw->size = 0; 925 /* set size to non-zero value (channel returns error when size is 0) */
926 noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
644 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; 927 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
645 noop_desc->hw->src_addr = 0; 928 noop_desc->hw->src_addr = 0;
646 noop_desc->hw->dst_addr = 0; 929 noop_desc->hw->dst_addr = 0;
@@ -690,6 +973,7 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
690 return ioat1_dma_get_next_descriptor(ioat_chan); 973 return ioat1_dma_get_next_descriptor(ioat_chan);
691 break; 974 break;
692 case IOAT_VER_2_0: 975 case IOAT_VER_2_0:
976 case IOAT_VER_3_0:
693 return ioat2_dma_get_next_descriptor(ioat_chan); 977 return ioat2_dma_get_next_descriptor(ioat_chan);
694 break; 978 break;
695 } 979 }
@@ -716,8 +1000,12 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
716 new->src = dma_src; 1000 new->src = dma_src;
717 new->async_tx.flags = flags; 1001 new->async_tx.flags = flags;
718 return &new->async_tx; 1002 return &new->async_tx;
719 } else 1003 } else {
1004 dev_err(&ioat_chan->device->pdev->dev,
1005 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1006 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
720 return NULL; 1007 return NULL;
1008 }
721} 1009}
722 1010
723static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( 1011static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
@@ -744,8 +1032,13 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
744 new->src = dma_src; 1032 new->src = dma_src;
745 new->async_tx.flags = flags; 1033 new->async_tx.flags = flags;
746 return &new->async_tx; 1034 return &new->async_tx;
747 } else 1035 } else {
1036 spin_unlock_bh(&ioat_chan->desc_lock);
1037 dev_err(&ioat_chan->device->pdev->dev,
1038 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1039 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
748 return NULL; 1040 return NULL;
1041 }
749} 1042}
750 1043
751static void ioat_dma_cleanup_tasklet(unsigned long data) 1044static void ioat_dma_cleanup_tasklet(unsigned long data)
@@ -756,6 +1049,27 @@ static void ioat_dma_cleanup_tasklet(unsigned long data)
756 chan->reg_base + IOAT_CHANCTRL_OFFSET); 1049 chan->reg_base + IOAT_CHANCTRL_OFFSET);
757} 1050}
758 1051
1052static void
1053ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
1054{
1055 /*
1056 * yes we are unmapping both _page and _single
1057 * alloc'd regions with unmap_page. Is this
1058 * *really* that bad?
1059 */
1060 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP))
1061 pci_unmap_page(ioat_chan->device->pdev,
1062 pci_unmap_addr(desc, dst),
1063 pci_unmap_len(desc, len),
1064 PCI_DMA_FROMDEVICE);
1065
1066 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP))
1067 pci_unmap_page(ioat_chan->device->pdev,
1068 pci_unmap_addr(desc, src),
1069 pci_unmap_len(desc, len),
1070 PCI_DMA_TODEVICE);
1071}
1072
759/** 1073/**
760 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors 1074 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
761 * @chan: ioat channel to be cleaned up 1075 * @chan: ioat channel to be cleaned up
@@ -799,11 +1113,27 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
799 1113
800 if (phys_complete == ioat_chan->last_completion) { 1114 if (phys_complete == ioat_chan->last_completion) {
801 spin_unlock_bh(&ioat_chan->cleanup_lock); 1115 spin_unlock_bh(&ioat_chan->cleanup_lock);
1116 /*
1117 * perhaps we're stuck so hard that the watchdog can't go off?
1118 * try to catch it after 2 seconds
1119 */
1120 if (ioat_chan->device->version != IOAT_VER_3_0) {
1121 if (time_after(jiffies,
1122 ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
1123 ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
1124 ioat_chan->last_completion_time = jiffies;
1125 }
1126 }
802 return; 1127 return;
803 } 1128 }
1129 ioat_chan->last_completion_time = jiffies;
804 1130
805 cookie = 0; 1131 cookie = 0;
806 spin_lock_bh(&ioat_chan->desc_lock); 1132 if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
1133 spin_unlock_bh(&ioat_chan->cleanup_lock);
1134 return;
1135 }
1136
807 switch (ioat_chan->device->version) { 1137 switch (ioat_chan->device->version) {
808 case IOAT_VER_1_2: 1138 case IOAT_VER_1_2:
809 list_for_each_entry_safe(desc, _desc, 1139 list_for_each_entry_safe(desc, _desc,
@@ -816,21 +1146,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
816 */ 1146 */
817 if (desc->async_tx.cookie) { 1147 if (desc->async_tx.cookie) {
818 cookie = desc->async_tx.cookie; 1148 cookie = desc->async_tx.cookie;
819 1149 ioat_dma_unmap(ioat_chan, desc);
820 /*
821 * yes we are unmapping both _page and _single
822 * alloc'd regions with unmap_page. Is this
823 * *really* that bad?
824 */
825 pci_unmap_page(ioat_chan->device->pdev,
826 pci_unmap_addr(desc, dst),
827 pci_unmap_len(desc, len),
828 PCI_DMA_FROMDEVICE);
829 pci_unmap_page(ioat_chan->device->pdev,
830 pci_unmap_addr(desc, src),
831 pci_unmap_len(desc, len),
832 PCI_DMA_TODEVICE);
833
834 if (desc->async_tx.callback) { 1150 if (desc->async_tx.callback) {
835 desc->async_tx.callback(desc->async_tx.callback_param); 1151 desc->async_tx.callback(desc->async_tx.callback_param);
836 desc->async_tx.callback = NULL; 1152 desc->async_tx.callback = NULL;
@@ -862,6 +1178,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
862 } 1178 }
863 break; 1179 break;
864 case IOAT_VER_2_0: 1180 case IOAT_VER_2_0:
1181 case IOAT_VER_3_0:
865 /* has some other thread has already cleaned up? */ 1182 /* has some other thread has already cleaned up? */
866 if (ioat_chan->used_desc.prev == NULL) 1183 if (ioat_chan->used_desc.prev == NULL)
867 break; 1184 break;
@@ -889,16 +1206,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
889 if (desc->async_tx.cookie) { 1206 if (desc->async_tx.cookie) {
890 cookie = desc->async_tx.cookie; 1207 cookie = desc->async_tx.cookie;
891 desc->async_tx.cookie = 0; 1208 desc->async_tx.cookie = 0;
892 1209 ioat_dma_unmap(ioat_chan, desc);
893 pci_unmap_page(ioat_chan->device->pdev,
894 pci_unmap_addr(desc, dst),
895 pci_unmap_len(desc, len),
896 PCI_DMA_FROMDEVICE);
897 pci_unmap_page(ioat_chan->device->pdev,
898 pci_unmap_addr(desc, src),
899 pci_unmap_len(desc, len),
900 PCI_DMA_TODEVICE);
901
902 if (desc->async_tx.callback) { 1210 if (desc->async_tx.callback) {
903 desc->async_tx.callback(desc->async_tx.callback_param); 1211 desc->async_tx.callback(desc->async_tx.callback_param);
904 desc->async_tx.callback = NULL; 1212 desc->async_tx.callback = NULL;
@@ -943,6 +1251,7 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
943 1251
944 last_used = chan->cookie; 1252 last_used = chan->cookie;
945 last_complete = ioat_chan->completed_cookie; 1253 last_complete = ioat_chan->completed_cookie;
1254 ioat_chan->watchdog_tcp_cookie = cookie;
946 1255
947 if (done) 1256 if (done)
948 *done = last_complete; 1257 *done = last_complete;
@@ -973,10 +1282,19 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
973 spin_lock_bh(&ioat_chan->desc_lock); 1282 spin_lock_bh(&ioat_chan->desc_lock);
974 1283
975 desc = ioat_dma_get_next_descriptor(ioat_chan); 1284 desc = ioat_dma_get_next_descriptor(ioat_chan);
1285
1286 if (!desc) {
1287 dev_err(&ioat_chan->device->pdev->dev,
1288 "Unable to start null desc - get next desc failed\n");
1289 spin_unlock_bh(&ioat_chan->desc_lock);
1290 return;
1291 }
1292
976 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL 1293 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
977 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN 1294 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
978 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 1295 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
979 desc->hw->size = 0; 1296 /* set size to non-zero value (channel returns error when size is 0) */
1297 desc->hw->size = NULL_DESC_BUFFER_SIZE;
980 desc->hw->src_addr = 0; 1298 desc->hw->src_addr = 0;
981 desc->hw->dst_addr = 0; 1299 desc->hw->dst_addr = 0;
982 async_tx_ack(&desc->async_tx); 1300 async_tx_ack(&desc->async_tx);
@@ -994,6 +1312,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
994 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); 1312 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
995 break; 1313 break;
996 case IOAT_VER_2_0: 1314 case IOAT_VER_2_0:
1315 case IOAT_VER_3_0:
997 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, 1316 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
998 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); 1317 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
999 writel(((u64) desc->async_tx.phys) >> 32, 1318 writel(((u64) desc->async_tx.phys) >> 32,
@@ -1049,7 +1368,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1049 dma_chan = container_of(device->common.channels.next, 1368 dma_chan = container_of(device->common.channels.next,
1050 struct dma_chan, 1369 struct dma_chan,
1051 device_node); 1370 device_node);
1052 if (device->common.device_alloc_chan_resources(dma_chan) < 1) { 1371 if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) {
1053 dev_err(&device->pdev->dev, 1372 dev_err(&device->pdev->dev,
1054 "selftest cannot allocate chan resource\n"); 1373 "selftest cannot allocate chan resource\n");
1055 err = -ENODEV; 1374 err = -ENODEV;
@@ -1312,6 +1631,7 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1312 ioat1_dma_memcpy_issue_pending; 1631 ioat1_dma_memcpy_issue_pending;
1313 break; 1632 break;
1314 case IOAT_VER_2_0: 1633 case IOAT_VER_2_0:
1634 case IOAT_VER_3_0:
1315 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; 1635 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1316 device->common.device_issue_pending = 1636 device->common.device_issue_pending =
1317 ioat2_dma_memcpy_issue_pending; 1637 ioat2_dma_memcpy_issue_pending;
@@ -1331,8 +1651,16 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1331 if (err) 1651 if (err)
1332 goto err_self_test; 1652 goto err_self_test;
1333 1653
1654 ioat_set_tcp_copy_break(device);
1655
1334 dma_async_device_register(&device->common); 1656 dma_async_device_register(&device->common);
1335 1657
1658 if (device->version != IOAT_VER_3_0) {
1659 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
1660 schedule_delayed_work(&device->work,
1661 WATCHDOG_DELAY);
1662 }
1663
1336 return device; 1664 return device;
1337 1665
1338err_self_test: 1666err_self_test:
@@ -1365,6 +1693,10 @@ void ioat_dma_remove(struct ioatdma_device *device)
1365 pci_release_regions(device->pdev); 1693 pci_release_regions(device->pdev);
1366 pci_disable_device(device->pdev); 1694 pci_disable_device(device->pdev);
1367 1695
1696 if (device->version != IOAT_VER_3_0) {
1697 cancel_delayed_work(&device->work);
1698 }
1699
1368 list_for_each_entry_safe(chan, _chan, 1700 list_for_each_entry_safe(chan, _chan,
1369 &device->common.channels, device_node) { 1701 &device->common.channels, device_node) {
1370 ioat_chan = to_ioat_chan(chan); 1702 ioat_chan = to_ioat_chan(chan);
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h
index f2c7fedbf009..a3306d0e1372 100644
--- a/drivers/dma/ioatdma.h
+++ b/drivers/dma/ioatdma.h
@@ -27,8 +27,9 @@
27#include <linux/dmapool.h> 27#include <linux/dmapool.h>
28#include <linux/cache.h> 28#include <linux/cache.h>
29#include <linux/pci_ids.h> 29#include <linux/pci_ids.h>
30#include <net/tcp.h>
30 31
31#define IOAT_DMA_VERSION "2.04" 32#define IOAT_DMA_VERSION "3.30"
32 33
33enum ioat_interrupt { 34enum ioat_interrupt {
34 none = 0, 35 none = 0,
@@ -40,6 +41,7 @@ enum ioat_interrupt {
40 41
41#define IOAT_LOW_COMPLETION_MASK 0xffffffc0 42#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
42#define IOAT_DMA_DCA_ANY_CPU ~0 43#define IOAT_DMA_DCA_ANY_CPU ~0
44#define IOAT_WATCHDOG_PERIOD (2 * HZ)
43 45
44 46
45/** 47/**
@@ -62,6 +64,7 @@ struct ioatdma_device {
62 struct dma_device common; 64 struct dma_device common;
63 u8 version; 65 u8 version;
64 enum ioat_interrupt irq_mode; 66 enum ioat_interrupt irq_mode;
67 struct delayed_work work;
65 struct msix_entry msix_entries[4]; 68 struct msix_entry msix_entries[4];
66 struct ioat_dma_chan *idx[4]; 69 struct ioat_dma_chan *idx[4];
67}; 70};
@@ -75,6 +78,7 @@ struct ioat_dma_chan {
75 78
76 dma_cookie_t completed_cookie; 79 dma_cookie_t completed_cookie;
77 unsigned long last_completion; 80 unsigned long last_completion;
81 unsigned long last_completion_time;
78 82
79 size_t xfercap; /* XFERCAP register value expanded out */ 83 size_t xfercap; /* XFERCAP register value expanded out */
80 84
@@ -82,6 +86,10 @@ struct ioat_dma_chan {
82 spinlock_t desc_lock; 86 spinlock_t desc_lock;
83 struct list_head free_desc; 87 struct list_head free_desc;
84 struct list_head used_desc; 88 struct list_head used_desc;
89 unsigned long watchdog_completion;
90 int watchdog_tcp_cookie;
91 u32 watchdog_last_tcp_cookie;
92 struct delayed_work work;
85 93
86 int pending; 94 int pending;
87 int dmacount; 95 int dmacount;
@@ -98,6 +106,7 @@ struct ioat_dma_chan {
98 u32 high; 106 u32 high;
99 }; 107 };
100 } *completion_virt; 108 } *completion_virt;
109 unsigned long last_compl_desc_addr_hw;
101 struct tasklet_struct cleanup_task; 110 struct tasklet_struct cleanup_task;
102}; 111};
103 112
@@ -121,17 +130,34 @@ struct ioat_desc_sw {
121 struct dma_async_tx_descriptor async_tx; 130 struct dma_async_tx_descriptor async_tx;
122}; 131};
123 132
133static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev)
134{
135 #ifdef CONFIG_NET_DMA
136 switch (dev->version) {
137 case IOAT_VER_1_2:
138 case IOAT_VER_3_0:
139 sysctl_tcp_dma_copybreak = 4096;
140 break;
141 case IOAT_VER_2_0:
142 sysctl_tcp_dma_copybreak = 2048;
143 break;
144 }
145 #endif
146}
147
124#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE) 148#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE)
125struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, 149struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
126 void __iomem *iobase); 150 void __iomem *iobase);
127void ioat_dma_remove(struct ioatdma_device *device); 151void ioat_dma_remove(struct ioatdma_device *device);
128struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); 152struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
129struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); 153struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
154struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
130#else 155#else
131#define ioat_dma_probe(pdev, iobase) NULL 156#define ioat_dma_probe(pdev, iobase) NULL
132#define ioat_dma_remove(device) do { } while (0) 157#define ioat_dma_remove(device) do { } while (0)
133#define ioat_dca_init(pdev, iobase) NULL 158#define ioat_dca_init(pdev, iobase) NULL
134#define ioat2_dca_init(pdev, iobase) NULL 159#define ioat2_dca_init(pdev, iobase) NULL
160#define ioat3_dca_init(pdev, iobase) NULL
135#endif 161#endif
136 162
137#endif /* IOATDMA_H */ 163#endif /* IOATDMA_H */
diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h
index dd470fa91d86..f1ae2c776f74 100644
--- a/drivers/dma/ioatdma_hw.h
+++ b/drivers/dma/ioatdma_hw.h
@@ -35,6 +35,7 @@
35#define IOAT_PCI_SID 0x8086 35#define IOAT_PCI_SID 0x8086
36#define IOAT_VER_1_2 0x12 /* Version 1.2 */ 36#define IOAT_VER_1_2 0x12 /* Version 1.2 */
37#define IOAT_VER_2_0 0x20 /* Version 2.0 */ 37#define IOAT_VER_2_0 0x20 /* Version 2.0 */
38#define IOAT_VER_3_0 0x30 /* Version 3.0 */
38 39
39struct ioat_dma_descriptor { 40struct ioat_dma_descriptor {
40 uint32_t size; 41 uint32_t size;
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h
index 9832d7ebd931..827cb503cac6 100644
--- a/drivers/dma/ioatdma_registers.h
+++ b/drivers/dma/ioatdma_registers.h
@@ -25,6 +25,10 @@
25#define IOAT_PCI_DMACTRL_DMA_EN 0x00000001 25#define IOAT_PCI_DMACTRL_DMA_EN 0x00000001
26#define IOAT_PCI_DMACTRL_MSI_EN 0x00000002 26#define IOAT_PCI_DMACTRL_MSI_EN 0x00000002
27 27
28#define IOAT_PCI_DEVICE_ID_OFFSET 0x02
29#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
30#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
31
28/* MMIO Device Registers */ 32/* MMIO Device Registers */
29#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */ 33#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */
30 34
@@ -149,7 +153,23 @@
149#define IOAT_DCA_GREQID_VALID 0x20000000 153#define IOAT_DCA_GREQID_VALID 0x20000000
150#define IOAT_DCA_GREQID_LASTID 0x80000000 154#define IOAT_DCA_GREQID_LASTID 0x80000000
151 155
156#define IOAT3_CSI_CAPABILITY_OFFSET 0x08
157#define IOAT3_CSI_CAPABILITY_PREFETCH 0x1
158
159#define IOAT3_PCI_CAPABILITY_OFFSET 0x0A
160#define IOAT3_PCI_CAPABILITY_MEMWR 0x1
161
162#define IOAT3_CSI_CONTROL_OFFSET 0x0C
163#define IOAT3_CSI_CONTROL_PREFETCH 0x1
164
165#define IOAT3_PCI_CONTROL_OFFSET 0x0E
166#define IOAT3_PCI_CONTROL_MEMWR 0x1
167
168#define IOAT3_APICID_TAG_MAP_OFFSET 0x10
169#define IOAT3_APICID_TAG_MAP_OFFSET_LOW 0x10
170#define IOAT3_APICID_TAG_MAP_OFFSET_HIGH 0x14
152 171
172#define IOAT3_DCA_GREQID_OFFSET 0x02
153 173
154#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */ 174#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */
155#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */ 175#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 0ec0f431e6a1..85bfeba4d85e 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -82,17 +82,24 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
82 struct device *dev = 82 struct device *dev =
83 &iop_chan->device->pdev->dev; 83 &iop_chan->device->pdev->dev;
84 u32 len = unmap->unmap_len; 84 u32 len = unmap->unmap_len;
85 u32 src_cnt = unmap->unmap_src_cnt; 85 enum dma_ctrl_flags flags = desc->async_tx.flags;
86 dma_addr_t addr = iop_desc_get_dest_addr(unmap, 86 u32 src_cnt;
87 iop_chan); 87 dma_addr_t addr;
88 88
89 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); 89 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
90 while (src_cnt--) { 90 addr = iop_desc_get_dest_addr(unmap, iop_chan);
91 addr = iop_desc_get_src_addr(unmap, 91 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
92 iop_chan, 92 }
93 src_cnt); 93
94 dma_unmap_page(dev, addr, len, 94 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
95 DMA_TO_DEVICE); 95 src_cnt = unmap->unmap_src_cnt;
96 while (src_cnt--) {
97 addr = iop_desc_get_src_addr(unmap,
98 iop_chan,
99 src_cnt);
100 dma_unmap_page(dev, addr, len,
101 DMA_TO_DEVICE);
102 }
96 } 103 }
97 desc->group_head = NULL; 104 desc->group_head = NULL;
98 } 105 }
@@ -366,8 +373,8 @@ retry:
366 if (!retry++) 373 if (!retry++)
367 goto retry; 374 goto retry;
368 375
369 /* try to free some slots if the allocation fails */ 376 /* perform direct reclaim if the allocation fails */
370 tasklet_schedule(&iop_chan->irq_tasklet); 377 __iop_adma_slot_cleanup(iop_chan);
371 378
372 return NULL; 379 return NULL;
373} 380}
@@ -443,8 +450,18 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
443static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); 450static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
444static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); 451static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
445 452
446/* returns the number of allocated descriptors */ 453/**
447static int iop_adma_alloc_chan_resources(struct dma_chan *chan) 454 * iop_adma_alloc_chan_resources - returns the number of allocated descriptors
455 * @chan - allocate descriptor resources for this channel
456 * @client - current client requesting the channel be ready for requests
457 *
458 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To
459 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
460 * greater than 2x the number slots needed to satisfy a device->max_xor
461 * request.
462 * */
463static int iop_adma_alloc_chan_resources(struct dma_chan *chan,
464 struct dma_client *client)
448{ 465{
449 char *hw_desc; 466 char *hw_desc;
450 int idx; 467 int idx;
@@ -838,7 +855,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
838 dma_chan = container_of(device->common.channels.next, 855 dma_chan = container_of(device->common.channels.next,
839 struct dma_chan, 856 struct dma_chan,
840 device_node); 857 device_node);
841 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 858 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
842 err = -ENODEV; 859 err = -ENODEV;
843 goto out; 860 goto out;
844 } 861 }
@@ -936,7 +953,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
936 dma_chan = container_of(device->common.channels.next, 953 dma_chan = container_of(device->common.channels.next,
937 struct dma_chan, 954 struct dma_chan,
938 device_node); 955 device_node);
939 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 956 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
940 err = -ENODEV; 957 err = -ENODEV;
941 goto out; 958 goto out;
942 } 959 }
@@ -1387,6 +1404,8 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1387 spin_unlock_bh(&iop_chan->lock); 1404 spin_unlock_bh(&iop_chan->lock);
1388} 1405}
1389 1406
1407MODULE_ALIAS("platform:iop-adma");
1408
1390static struct platform_driver iop_adma_driver = { 1409static struct platform_driver iop_adma_driver = {
1391 .probe = iop_adma_probe, 1410 .probe = iop_adma_probe,
1392 .remove = iop_adma_remove, 1411 .remove = iop_adma_remove,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
new file mode 100644
index 000000000000..a4e4494663bf
--- /dev/null
+++ b/drivers/dma/mv_xor.c
@@ -0,0 +1,1375 @@
1/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/async_tx.h>
22#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
28#include <asm/plat-orion/mv_xor.h>
29#include "mv_xor.h"
30
31static void mv_xor_issue_pending(struct dma_chan *chan);
32
33#define to_mv_xor_chan(chan) \
34 container_of(chan, struct mv_xor_chan, common)
35
36#define to_mv_xor_device(dev) \
37 container_of(dev, struct mv_xor_device, common)
38
39#define to_mv_xor_slot(tx) \
40 container_of(tx, struct mv_xor_desc_slot, async_tx)
41
42static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
43{
44 struct mv_xor_desc *hw_desc = desc->hw_desc;
45
46 hw_desc->status = (1 << 31);
47 hw_desc->phy_next_desc = 0;
48 hw_desc->desc_command = (1 << 31);
49}
50
51static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
52{
53 struct mv_xor_desc *hw_desc = desc->hw_desc;
54 return hw_desc->phy_dest_addr;
55}
56
57static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
58 int src_idx)
59{
60 struct mv_xor_desc *hw_desc = desc->hw_desc;
61 return hw_desc->phy_src_addr[src_idx];
62}
63
64
65static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
66 u32 byte_count)
67{
68 struct mv_xor_desc *hw_desc = desc->hw_desc;
69 hw_desc->byte_count = byte_count;
70}
71
72static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
73 u32 next_desc_addr)
74{
75 struct mv_xor_desc *hw_desc = desc->hw_desc;
76 BUG_ON(hw_desc->phy_next_desc);
77 hw_desc->phy_next_desc = next_desc_addr;
78}
79
80static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
81{
82 struct mv_xor_desc *hw_desc = desc->hw_desc;
83 hw_desc->phy_next_desc = 0;
84}
85
86static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
87{
88 desc->value = val;
89}
90
91static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
92 dma_addr_t addr)
93{
94 struct mv_xor_desc *hw_desc = desc->hw_desc;
95 hw_desc->phy_dest_addr = addr;
96}
97
98static int mv_chan_memset_slot_count(size_t len)
99{
100 return 1;
101}
102
103#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
104
105static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
106 int index, dma_addr_t addr)
107{
108 struct mv_xor_desc *hw_desc = desc->hw_desc;
109 hw_desc->phy_src_addr[index] = addr;
110 if (desc->type == DMA_XOR)
111 hw_desc->desc_command |= (1 << index);
112}
113
114static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
115{
116 return __raw_readl(XOR_CURR_DESC(chan));
117}
118
119static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
120 u32 next_desc_addr)
121{
122 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
123}
124
125static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
126{
127 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
128}
129
130static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
131{
132 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
133}
134
135static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
136{
137 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
138 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
139}
140
141static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
142{
143 u32 val = __raw_readl(XOR_INTR_MASK(chan));
144 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
145 __raw_writel(val, XOR_INTR_MASK(chan));
146}
147
148static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
149{
150 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
151 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
152 return intr_cause;
153}
154
155static int mv_is_err_intr(u32 intr_cause)
156{
157 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
158 return 1;
159
160 return 0;
161}
162
163static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
164{
165 u32 val = (1 << (1 + (chan->idx * 16)));
166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
167 __raw_writel(val, XOR_INTR_CAUSE(chan));
168}
169
170static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
171{
172 u32 val = 0xFFFF0000 >> (chan->idx * 16);
173 __raw_writel(val, XOR_INTR_CAUSE(chan));
174}
175
176static int mv_can_chain(struct mv_xor_desc_slot *desc)
177{
178 struct mv_xor_desc_slot *chain_old_tail = list_entry(
179 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
180
181 if (chain_old_tail->type != desc->type)
182 return 0;
183 if (desc->type == DMA_MEMSET)
184 return 0;
185
186 return 1;
187}
188
189static void mv_set_mode(struct mv_xor_chan *chan,
190 enum dma_transaction_type type)
191{
192 u32 op_mode;
193 u32 config = __raw_readl(XOR_CONFIG(chan));
194
195 switch (type) {
196 case DMA_XOR:
197 op_mode = XOR_OPERATION_MODE_XOR;
198 break;
199 case DMA_MEMCPY:
200 op_mode = XOR_OPERATION_MODE_MEMCPY;
201 break;
202 case DMA_MEMSET:
203 op_mode = XOR_OPERATION_MODE_MEMSET;
204 break;
205 default:
206 dev_printk(KERN_ERR, chan->device->common.dev,
207 "error: unsupported operation %d.\n",
208 type);
209 BUG();
210 return;
211 }
212
213 config &= ~0x7;
214 config |= op_mode;
215 __raw_writel(config, XOR_CONFIG(chan));
216 chan->current_type = type;
217}
218
219static void mv_chan_activate(struct mv_xor_chan *chan)
220{
221 u32 activation;
222
223 dev_dbg(chan->device->common.dev, " activate chan.\n");
224 activation = __raw_readl(XOR_ACTIVATION(chan));
225 activation |= 0x1;
226 __raw_writel(activation, XOR_ACTIVATION(chan));
227}
228
229static char mv_chan_is_busy(struct mv_xor_chan *chan)
230{
231 u32 state = __raw_readl(XOR_ACTIVATION(chan));
232
233 state = (state >> 4) & 0x3;
234
235 return (state == 1) ? 1 : 0;
236}
237
238static int mv_chan_xor_slot_count(size_t len, int src_cnt)
239{
240 return 1;
241}
242
243/**
244 * mv_xor_free_slots - flags descriptor slots for reuse
245 * @slot: Slot to free
246 * Caller must hold &mv_chan->lock while calling this function
247 */
248static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
249 struct mv_xor_desc_slot *slot)
250{
251 dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
252 __func__, __LINE__, slot);
253
254 slot->slots_per_op = 0;
255
256}
257
258/*
259 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
260 * sw_desc
261 * Caller must hold &mv_chan->lock while calling this function
262 */
263static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
264 struct mv_xor_desc_slot *sw_desc)
265{
266 dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
267 __func__, __LINE__, sw_desc);
268 if (sw_desc->type != mv_chan->current_type)
269 mv_set_mode(mv_chan, sw_desc->type);
270
271 if (sw_desc->type == DMA_MEMSET) {
272 /* for memset requests we need to program the engine, no
273 * descriptors used.
274 */
275 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
276 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
277 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
278 mv_chan_set_value(mv_chan, sw_desc->value);
279 } else {
280 /* set the hardware chain */
281 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
282 }
283 mv_chan->pending += sw_desc->slot_cnt;
284 mv_xor_issue_pending(&mv_chan->common);
285}
286
287static dma_cookie_t
288mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
289 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
290{
291 BUG_ON(desc->async_tx.cookie < 0);
292
293 if (desc->async_tx.cookie > 0) {
294 cookie = desc->async_tx.cookie;
295
296 /* call the callback (must not sleep or submit new
297 * operations to this channel)
298 */
299 if (desc->async_tx.callback)
300 desc->async_tx.callback(
301 desc->async_tx.callback_param);
302
303 /* unmap dma addresses
304 * (unmap_single vs unmap_page?)
305 */
306 if (desc->group_head && desc->unmap_len) {
307 struct mv_xor_desc_slot *unmap = desc->group_head;
308 struct device *dev =
309 &mv_chan->device->pdev->dev;
310 u32 len = unmap->unmap_len;
311 enum dma_ctrl_flags flags = desc->async_tx.flags;
312 u32 src_cnt;
313 dma_addr_t addr;
314
315 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
316 addr = mv_desc_get_dest_addr(unmap);
317 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
318 }
319
320 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
321 src_cnt = unmap->unmap_src_cnt;
322 while (src_cnt--) {
323 addr = mv_desc_get_src_addr(unmap,
324 src_cnt);
325 dma_unmap_page(dev, addr, len,
326 DMA_TO_DEVICE);
327 }
328 }
329 desc->group_head = NULL;
330 }
331 }
332
333 /* run dependent operations */
334 async_tx_run_dependencies(&desc->async_tx);
335
336 return cookie;
337}
338
339static int
340mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
341{
342 struct mv_xor_desc_slot *iter, *_iter;
343
344 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
345 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
346 completed_node) {
347
348 if (async_tx_test_ack(&iter->async_tx)) {
349 list_del(&iter->completed_node);
350 mv_xor_free_slots(mv_chan, iter);
351 }
352 }
353 return 0;
354}
355
356static int
357mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
358 struct mv_xor_chan *mv_chan)
359{
360 dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
361 __func__, __LINE__, desc, desc->async_tx.flags);
362 list_del(&desc->chain_node);
363 /* the client is allowed to attach dependent operations
364 * until 'ack' is set
365 */
366 if (!async_tx_test_ack(&desc->async_tx)) {
367 /* move this slot to the completed_slots */
368 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
369 return 0;
370 }
371
372 mv_xor_free_slots(mv_chan, desc);
373 return 0;
374}
375
376static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
377{
378 struct mv_xor_desc_slot *iter, *_iter;
379 dma_cookie_t cookie = 0;
380 int busy = mv_chan_is_busy(mv_chan);
381 u32 current_desc = mv_chan_get_current_desc(mv_chan);
382 int seen_current = 0;
383
384 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
385 dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
386 mv_xor_clean_completed_slots(mv_chan);
387
388 /* free completed slots from the chain starting with
389 * the oldest descriptor
390 */
391
392 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
393 chain_node) {
394 prefetch(_iter);
395 prefetch(&_iter->async_tx);
396
397 /* do not advance past the current descriptor loaded into the
398 * hardware channel, subsequent descriptors are either in
399 * process or have not been submitted
400 */
401 if (seen_current)
402 break;
403
404 /* stop the search if we reach the current descriptor and the
405 * channel is busy
406 */
407 if (iter->async_tx.phys == current_desc) {
408 seen_current = 1;
409 if (busy)
410 break;
411 }
412
413 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
414
415 if (mv_xor_clean_slot(iter, mv_chan))
416 break;
417 }
418
419 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
420 struct mv_xor_desc_slot *chain_head;
421 chain_head = list_entry(mv_chan->chain.next,
422 struct mv_xor_desc_slot,
423 chain_node);
424
425 mv_xor_start_new_chain(mv_chan, chain_head);
426 }
427
428 if (cookie > 0)
429 mv_chan->completed_cookie = cookie;
430}
431
432static void
433mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
434{
435 spin_lock_bh(&mv_chan->lock);
436 __mv_xor_slot_cleanup(mv_chan);
437 spin_unlock_bh(&mv_chan->lock);
438}
439
440static void mv_xor_tasklet(unsigned long data)
441{
442 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
443 __mv_xor_slot_cleanup(chan);
444}
445
446static struct mv_xor_desc_slot *
447mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
448 int slots_per_op)
449{
450 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
451 LIST_HEAD(chain);
452 int slots_found, retry = 0;
453
454 /* start search from the last allocated descrtiptor
455 * if a contiguous allocation can not be found start searching
456 * from the beginning of the list
457 */
458retry:
459 slots_found = 0;
460 if (retry == 0)
461 iter = mv_chan->last_used;
462 else
463 iter = list_entry(&mv_chan->all_slots,
464 struct mv_xor_desc_slot,
465 slot_node);
466
467 list_for_each_entry_safe_continue(
468 iter, _iter, &mv_chan->all_slots, slot_node) {
469 prefetch(_iter);
470 prefetch(&_iter->async_tx);
471 if (iter->slots_per_op) {
472 /* give up after finding the first busy slot
473 * on the second pass through the list
474 */
475 if (retry)
476 break;
477
478 slots_found = 0;
479 continue;
480 }
481
482 /* start the allocation if the slot is correctly aligned */
483 if (!slots_found++)
484 alloc_start = iter;
485
486 if (slots_found == num_slots) {
487 struct mv_xor_desc_slot *alloc_tail = NULL;
488 struct mv_xor_desc_slot *last_used = NULL;
489 iter = alloc_start;
490 while (num_slots) {
491 int i;
492
493 /* pre-ack all but the last descriptor */
494 async_tx_ack(&iter->async_tx);
495
496 list_add_tail(&iter->chain_node, &chain);
497 alloc_tail = iter;
498 iter->async_tx.cookie = 0;
499 iter->slot_cnt = num_slots;
500 iter->xor_check_result = NULL;
501 for (i = 0; i < slots_per_op; i++) {
502 iter->slots_per_op = slots_per_op - i;
503 last_used = iter;
504 iter = list_entry(iter->slot_node.next,
505 struct mv_xor_desc_slot,
506 slot_node);
507 }
508 num_slots -= slots_per_op;
509 }
510 alloc_tail->group_head = alloc_start;
511 alloc_tail->async_tx.cookie = -EBUSY;
512 list_splice(&chain, &alloc_tail->async_tx.tx_list);
513 mv_chan->last_used = last_used;
514 mv_desc_clear_next_desc(alloc_start);
515 mv_desc_clear_next_desc(alloc_tail);
516 return alloc_tail;
517 }
518 }
519 if (!retry++)
520 goto retry;
521
522 /* try to free some slots if the allocation fails */
523 tasklet_schedule(&mv_chan->irq_tasklet);
524
525 return NULL;
526}
527
528static dma_cookie_t
529mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
530 struct mv_xor_desc_slot *desc)
531{
532 dma_cookie_t cookie = mv_chan->common.cookie;
533
534 if (++cookie < 0)
535 cookie = 1;
536 mv_chan->common.cookie = desc->async_tx.cookie = cookie;
537 return cookie;
538}
539
540/************************ DMA engine API functions ****************************/
541static dma_cookie_t
542mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
543{
544 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
545 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
546 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
547 dma_cookie_t cookie;
548 int new_hw_chain = 1;
549
550 dev_dbg(mv_chan->device->common.dev,
551 "%s sw_desc %p: async_tx %p\n",
552 __func__, sw_desc, &sw_desc->async_tx);
553
554 grp_start = sw_desc->group_head;
555
556 spin_lock_bh(&mv_chan->lock);
557 cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
558
559 if (list_empty(&mv_chan->chain))
560 list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain);
561 else {
562 new_hw_chain = 0;
563
564 old_chain_tail = list_entry(mv_chan->chain.prev,
565 struct mv_xor_desc_slot,
566 chain_node);
567 list_splice_init(&grp_start->async_tx.tx_list,
568 &old_chain_tail->chain_node);
569
570 if (!mv_can_chain(grp_start))
571 goto submit_done;
572
573 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
574 old_chain_tail->async_tx.phys);
575
576 /* fix up the hardware chain */
577 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
578
579 /* if the channel is not busy */
580 if (!mv_chan_is_busy(mv_chan)) {
581 u32 current_desc = mv_chan_get_current_desc(mv_chan);
582 /*
583 * and the curren desc is the end of the chain before
584 * the append, then we need to start the channel
585 */
586 if (current_desc == old_chain_tail->async_tx.phys)
587 new_hw_chain = 1;
588 }
589 }
590
591 if (new_hw_chain)
592 mv_xor_start_new_chain(mv_chan, grp_start);
593
594submit_done:
595 spin_unlock_bh(&mv_chan->lock);
596
597 return cookie;
598}
599
600/* returns the number of allocated descriptors */
601static int mv_xor_alloc_chan_resources(struct dma_chan *chan,
602 struct dma_client *client)
603{
604 char *hw_desc;
605 int idx;
606 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
607 struct mv_xor_desc_slot *slot = NULL;
608 struct mv_xor_platform_data *plat_data =
609 mv_chan->device->pdev->dev.platform_data;
610 int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
611
612 /* Allocate descriptor slots */
613 idx = mv_chan->slots_allocated;
614 while (idx < num_descs_in_pool) {
615 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
616 if (!slot) {
617 printk(KERN_INFO "MV XOR Channel only initialized"
618 " %d descriptor slots", idx);
619 break;
620 }
621 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
622 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
623
624 dma_async_tx_descriptor_init(&slot->async_tx, chan);
625 slot->async_tx.tx_submit = mv_xor_tx_submit;
626 INIT_LIST_HEAD(&slot->chain_node);
627 INIT_LIST_HEAD(&slot->slot_node);
628 INIT_LIST_HEAD(&slot->async_tx.tx_list);
629 hw_desc = (char *) mv_chan->device->dma_desc_pool;
630 slot->async_tx.phys =
631 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
632 slot->idx = idx++;
633
634 spin_lock_bh(&mv_chan->lock);
635 mv_chan->slots_allocated = idx;
636 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
637 spin_unlock_bh(&mv_chan->lock);
638 }
639
640 if (mv_chan->slots_allocated && !mv_chan->last_used)
641 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
642 struct mv_xor_desc_slot,
643 slot_node);
644
645 dev_dbg(mv_chan->device->common.dev,
646 "allocated %d descriptor slots last_used: %p\n",
647 mv_chan->slots_allocated, mv_chan->last_used);
648
649 return mv_chan->slots_allocated ? : -ENOMEM;
650}
651
652static struct dma_async_tx_descriptor *
653mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
654 size_t len, unsigned long flags)
655{
656 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
657 struct mv_xor_desc_slot *sw_desc, *grp_start;
658 int slot_cnt;
659
660 dev_dbg(mv_chan->device->common.dev,
661 "%s dest: %x src %x len: %u flags: %ld\n",
662 __func__, dest, src, len, flags);
663 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
664 return NULL;
665
666 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
667
668 spin_lock_bh(&mv_chan->lock);
669 slot_cnt = mv_chan_memcpy_slot_count(len);
670 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
671 if (sw_desc) {
672 sw_desc->type = DMA_MEMCPY;
673 sw_desc->async_tx.flags = flags;
674 grp_start = sw_desc->group_head;
675 mv_desc_init(grp_start, flags);
676 mv_desc_set_byte_count(grp_start, len);
677 mv_desc_set_dest_addr(sw_desc->group_head, dest);
678 mv_desc_set_src_addr(grp_start, 0, src);
679 sw_desc->unmap_src_cnt = 1;
680 sw_desc->unmap_len = len;
681 }
682 spin_unlock_bh(&mv_chan->lock);
683
684 dev_dbg(mv_chan->device->common.dev,
685 "%s sw_desc %p async_tx %p\n",
686 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
687
688 return sw_desc ? &sw_desc->async_tx : NULL;
689}
690
691static struct dma_async_tx_descriptor *
692mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
693 size_t len, unsigned long flags)
694{
695 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
696 struct mv_xor_desc_slot *sw_desc, *grp_start;
697 int slot_cnt;
698
699 dev_dbg(mv_chan->device->common.dev,
700 "%s dest: %x len: %u flags: %ld\n",
701 __func__, dest, len, flags);
702 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
703 return NULL;
704
705 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
706
707 spin_lock_bh(&mv_chan->lock);
708 slot_cnt = mv_chan_memset_slot_count(len);
709 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
710 if (sw_desc) {
711 sw_desc->type = DMA_MEMSET;
712 sw_desc->async_tx.flags = flags;
713 grp_start = sw_desc->group_head;
714 mv_desc_init(grp_start, flags);
715 mv_desc_set_byte_count(grp_start, len);
716 mv_desc_set_dest_addr(sw_desc->group_head, dest);
717 mv_desc_set_block_fill_val(grp_start, value);
718 sw_desc->unmap_src_cnt = 1;
719 sw_desc->unmap_len = len;
720 }
721 spin_unlock_bh(&mv_chan->lock);
722 dev_dbg(mv_chan->device->common.dev,
723 "%s sw_desc %p async_tx %p \n",
724 __func__, sw_desc, &sw_desc->async_tx);
725 return sw_desc ? &sw_desc->async_tx : NULL;
726}
727
728static struct dma_async_tx_descriptor *
729mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
730 unsigned int src_cnt, size_t len, unsigned long flags)
731{
732 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
733 struct mv_xor_desc_slot *sw_desc, *grp_start;
734 int slot_cnt;
735
736 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
737 return NULL;
738
739 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
740
741 dev_dbg(mv_chan->device->common.dev,
742 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
743 __func__, src_cnt, len, dest, flags);
744
745 spin_lock_bh(&mv_chan->lock);
746 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
747 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
748 if (sw_desc) {
749 sw_desc->type = DMA_XOR;
750 sw_desc->async_tx.flags = flags;
751 grp_start = sw_desc->group_head;
752 mv_desc_init(grp_start, flags);
753 /* the byte count field is the same as in memcpy desc*/
754 mv_desc_set_byte_count(grp_start, len);
755 mv_desc_set_dest_addr(sw_desc->group_head, dest);
756 sw_desc->unmap_src_cnt = src_cnt;
757 sw_desc->unmap_len = len;
758 while (src_cnt--)
759 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
760 }
761 spin_unlock_bh(&mv_chan->lock);
762 dev_dbg(mv_chan->device->common.dev,
763 "%s sw_desc %p async_tx %p \n",
764 __func__, sw_desc, &sw_desc->async_tx);
765 return sw_desc ? &sw_desc->async_tx : NULL;
766}
767
768static void mv_xor_free_chan_resources(struct dma_chan *chan)
769{
770 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
771 struct mv_xor_desc_slot *iter, *_iter;
772 int in_use_descs = 0;
773
774 mv_xor_slot_cleanup(mv_chan);
775
776 spin_lock_bh(&mv_chan->lock);
777 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
778 chain_node) {
779 in_use_descs++;
780 list_del(&iter->chain_node);
781 }
782 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
783 completed_node) {
784 in_use_descs++;
785 list_del(&iter->completed_node);
786 }
787 list_for_each_entry_safe_reverse(
788 iter, _iter, &mv_chan->all_slots, slot_node) {
789 list_del(&iter->slot_node);
790 kfree(iter);
791 mv_chan->slots_allocated--;
792 }
793 mv_chan->last_used = NULL;
794
795 dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
796 __func__, mv_chan->slots_allocated);
797 spin_unlock_bh(&mv_chan->lock);
798
799 if (in_use_descs)
800 dev_err(mv_chan->device->common.dev,
801 "freeing %d in use descriptors!\n", in_use_descs);
802}
803
804/**
805 * mv_xor_is_complete - poll the status of an XOR transaction
806 * @chan: XOR channel handle
807 * @cookie: XOR transaction identifier
808 */
809static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
810 dma_cookie_t cookie,
811 dma_cookie_t *done,
812 dma_cookie_t *used)
813{
814 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
815 dma_cookie_t last_used;
816 dma_cookie_t last_complete;
817 enum dma_status ret;
818
819 last_used = chan->cookie;
820 last_complete = mv_chan->completed_cookie;
821 mv_chan->is_complete_cookie = cookie;
822 if (done)
823 *done = last_complete;
824 if (used)
825 *used = last_used;
826
827 ret = dma_async_is_complete(cookie, last_complete, last_used);
828 if (ret == DMA_SUCCESS) {
829 mv_xor_clean_completed_slots(mv_chan);
830 return ret;
831 }
832 mv_xor_slot_cleanup(mv_chan);
833
834 last_used = chan->cookie;
835 last_complete = mv_chan->completed_cookie;
836
837 if (done)
838 *done = last_complete;
839 if (used)
840 *used = last_used;
841
842 return dma_async_is_complete(cookie, last_complete, last_used);
843}
844
845static void mv_dump_xor_regs(struct mv_xor_chan *chan)
846{
847 u32 val;
848
849 val = __raw_readl(XOR_CONFIG(chan));
850 dev_printk(KERN_ERR, chan->device->common.dev,
851 "config 0x%08x.\n", val);
852
853 val = __raw_readl(XOR_ACTIVATION(chan));
854 dev_printk(KERN_ERR, chan->device->common.dev,
855 "activation 0x%08x.\n", val);
856
857 val = __raw_readl(XOR_INTR_CAUSE(chan));
858 dev_printk(KERN_ERR, chan->device->common.dev,
859 "intr cause 0x%08x.\n", val);
860
861 val = __raw_readl(XOR_INTR_MASK(chan));
862 dev_printk(KERN_ERR, chan->device->common.dev,
863 "intr mask 0x%08x.\n", val);
864
865 val = __raw_readl(XOR_ERROR_CAUSE(chan));
866 dev_printk(KERN_ERR, chan->device->common.dev,
867 "error cause 0x%08x.\n", val);
868
869 val = __raw_readl(XOR_ERROR_ADDR(chan));
870 dev_printk(KERN_ERR, chan->device->common.dev,
871 "error addr 0x%08x.\n", val);
872}
873
874static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
875 u32 intr_cause)
876{
877 if (intr_cause & (1 << 4)) {
878 dev_dbg(chan->device->common.dev,
879 "ignore this error\n");
880 return;
881 }
882
883 dev_printk(KERN_ERR, chan->device->common.dev,
884 "error on chan %d. intr cause 0x%08x.\n",
885 chan->idx, intr_cause);
886
887 mv_dump_xor_regs(chan);
888 BUG();
889}
890
891static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
892{
893 struct mv_xor_chan *chan = data;
894 u32 intr_cause = mv_chan_get_intr_cause(chan);
895
896 dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
897
898 if (mv_is_err_intr(intr_cause))
899 mv_xor_err_interrupt_handler(chan, intr_cause);
900
901 tasklet_schedule(&chan->irq_tasklet);
902
903 mv_xor_device_clear_eoc_cause(chan);
904
905 return IRQ_HANDLED;
906}
907
908static void mv_xor_issue_pending(struct dma_chan *chan)
909{
910 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
911
912 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
913 mv_chan->pending = 0;
914 mv_chan_activate(mv_chan);
915 }
916}
917
918/*
919 * Perform a transaction to verify the HW works.
920 */
921#define MV_XOR_TEST_SIZE 2000
922
923static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
924{
925 int i;
926 void *src, *dest;
927 dma_addr_t src_dma, dest_dma;
928 struct dma_chan *dma_chan;
929 dma_cookie_t cookie;
930 struct dma_async_tx_descriptor *tx;
931 int err = 0;
932 struct mv_xor_chan *mv_chan;
933
934 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
935 if (!src)
936 return -ENOMEM;
937
938 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
939 if (!dest) {
940 kfree(src);
941 return -ENOMEM;
942 }
943
944 /* Fill in src buffer */
945 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
946 ((u8 *) src)[i] = (u8)i;
947
948 /* Start copy, using first DMA channel */
949 dma_chan = container_of(device->common.channels.next,
950 struct dma_chan,
951 device_node);
952 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
953 err = -ENODEV;
954 goto out;
955 }
956
957 dest_dma = dma_map_single(dma_chan->device->dev, dest,
958 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
959
960 src_dma = dma_map_single(dma_chan->device->dev, src,
961 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
962
963 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
964 MV_XOR_TEST_SIZE, 0);
965 cookie = mv_xor_tx_submit(tx);
966 mv_xor_issue_pending(dma_chan);
967 async_tx_ack(tx);
968 msleep(1);
969
970 if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
971 DMA_SUCCESS) {
972 dev_printk(KERN_ERR, dma_chan->device->dev,
973 "Self-test copy timed out, disabling\n");
974 err = -ENODEV;
975 goto free_resources;
976 }
977
978 mv_chan = to_mv_xor_chan(dma_chan);
979 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
980 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
981 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
982 dev_printk(KERN_ERR, dma_chan->device->dev,
983 "Self-test copy failed compare, disabling\n");
984 err = -ENODEV;
985 goto free_resources;
986 }
987
988free_resources:
989 mv_xor_free_chan_resources(dma_chan);
990out:
991 kfree(src);
992 kfree(dest);
993 return err;
994}
995
996#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
997static int __devinit
998mv_xor_xor_self_test(struct mv_xor_device *device)
999{
1000 int i, src_idx;
1001 struct page *dest;
1002 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
1003 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
1004 dma_addr_t dest_dma;
1005 struct dma_async_tx_descriptor *tx;
1006 struct dma_chan *dma_chan;
1007 dma_cookie_t cookie;
1008 u8 cmp_byte = 0;
1009 u32 cmp_word;
1010 int err = 0;
1011 struct mv_xor_chan *mv_chan;
1012
1013 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1014 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1015 if (!xor_srcs[src_idx])
1016 while (src_idx--) {
1017 __free_page(xor_srcs[src_idx]);
1018 return -ENOMEM;
1019 }
1020 }
1021
1022 dest = alloc_page(GFP_KERNEL);
1023 if (!dest)
1024 while (src_idx--) {
1025 __free_page(xor_srcs[src_idx]);
1026 return -ENOMEM;
1027 }
1028
1029 /* Fill in src buffers */
1030 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1031 u8 *ptr = page_address(xor_srcs[src_idx]);
1032 for (i = 0; i < PAGE_SIZE; i++)
1033 ptr[i] = (1 << src_idx);
1034 }
1035
1036 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1037 cmp_byte ^= (u8) (1 << src_idx);
1038
1039 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1040 (cmp_byte << 8) | cmp_byte;
1041
1042 memset(page_address(dest), 0, PAGE_SIZE);
1043
1044 dma_chan = container_of(device->common.channels.next,
1045 struct dma_chan,
1046 device_node);
1047 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
1048 err = -ENODEV;
1049 goto out;
1050 }
1051
1052 /* test xor */
1053 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1054 DMA_FROM_DEVICE);
1055
1056 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1057 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1058 0, PAGE_SIZE, DMA_TO_DEVICE);
1059
1060 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1061 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1062
1063 cookie = mv_xor_tx_submit(tx);
1064 mv_xor_issue_pending(dma_chan);
1065 async_tx_ack(tx);
1066 msleep(8);
1067
1068 if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
1069 DMA_SUCCESS) {
1070 dev_printk(KERN_ERR, dma_chan->device->dev,
1071 "Self-test xor timed out, disabling\n");
1072 err = -ENODEV;
1073 goto free_resources;
1074 }
1075
1076 mv_chan = to_mv_xor_chan(dma_chan);
1077 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1078 PAGE_SIZE, DMA_FROM_DEVICE);
1079 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1080 u32 *ptr = page_address(dest);
1081 if (ptr[i] != cmp_word) {
1082 dev_printk(KERN_ERR, dma_chan->device->dev,
1083 "Self-test xor failed compare, disabling."
1084 " index %d, data %x, expected %x\n", i,
1085 ptr[i], cmp_word);
1086 err = -ENODEV;
1087 goto free_resources;
1088 }
1089 }
1090
1091free_resources:
1092 mv_xor_free_chan_resources(dma_chan);
1093out:
1094 src_idx = MV_XOR_NUM_SRC_TEST;
1095 while (src_idx--)
1096 __free_page(xor_srcs[src_idx]);
1097 __free_page(dest);
1098 return err;
1099}
1100
1101static int __devexit mv_xor_remove(struct platform_device *dev)
1102{
1103 struct mv_xor_device *device = platform_get_drvdata(dev);
1104 struct dma_chan *chan, *_chan;
1105 struct mv_xor_chan *mv_chan;
1106 struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
1107
1108 dma_async_device_unregister(&device->common);
1109
1110 dma_free_coherent(&dev->dev, plat_data->pool_size,
1111 device->dma_desc_pool_virt, device->dma_desc_pool);
1112
1113 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1114 device_node) {
1115 mv_chan = to_mv_xor_chan(chan);
1116 list_del(&chan->device_node);
1117 }
1118
1119 return 0;
1120}
1121
1122static int __devinit mv_xor_probe(struct platform_device *pdev)
1123{
1124 int ret = 0;
1125 int irq;
1126 struct mv_xor_device *adev;
1127 struct mv_xor_chan *mv_chan;
1128 struct dma_device *dma_dev;
1129 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1130
1131
1132 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
1133 if (!adev)
1134 return -ENOMEM;
1135
1136 dma_dev = &adev->common;
1137
1138 /* allocate coherent memory for hardware descriptors
1139 * note: writecombine gives slightly better performance, but
1140 * requires that we explicitly flush the writes
1141 */
1142 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1143 plat_data->pool_size,
1144 &adev->dma_desc_pool,
1145 GFP_KERNEL);
1146 if (!adev->dma_desc_pool_virt)
1147 return -ENOMEM;
1148
1149 adev->id = plat_data->hw_id;
1150
1151 /* discover transaction capabilites from the platform data */
1152 dma_dev->cap_mask = plat_data->cap_mask;
1153 adev->pdev = pdev;
1154 platform_set_drvdata(pdev, adev);
1155
1156 adev->shared = platform_get_drvdata(plat_data->shared);
1157
1158 INIT_LIST_HEAD(&dma_dev->channels);
1159
1160 /* set base routines */
1161 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1162 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1163 dma_dev->device_is_tx_complete = mv_xor_is_complete;
1164 dma_dev->device_issue_pending = mv_xor_issue_pending;
1165 dma_dev->dev = &pdev->dev;
1166
1167 /* set prep routines based on capability */
1168 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1169 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1170 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1171 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1172 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1173 dma_dev->max_xor = 8; ;
1174 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1175 }
1176
1177 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1178 if (!mv_chan) {
1179 ret = -ENOMEM;
1180 goto err_free_dma;
1181 }
1182 mv_chan->device = adev;
1183 mv_chan->idx = plat_data->hw_id;
1184 mv_chan->mmr_base = adev->shared->xor_base;
1185
1186 if (!mv_chan->mmr_base) {
1187 ret = -ENOMEM;
1188 goto err_free_dma;
1189 }
1190 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1191 mv_chan);
1192
1193 /* clear errors before enabling interrupts */
1194 mv_xor_device_clear_err_status(mv_chan);
1195
1196 irq = platform_get_irq(pdev, 0);
1197 if (irq < 0) {
1198 ret = irq;
1199 goto err_free_dma;
1200 }
1201 ret = devm_request_irq(&pdev->dev, irq,
1202 mv_xor_interrupt_handler,
1203 0, dev_name(&pdev->dev), mv_chan);
1204 if (ret)
1205 goto err_free_dma;
1206
1207 mv_chan_unmask_interrupts(mv_chan);
1208
1209 mv_set_mode(mv_chan, DMA_MEMCPY);
1210
1211 spin_lock_init(&mv_chan->lock);
1212 INIT_LIST_HEAD(&mv_chan->chain);
1213 INIT_LIST_HEAD(&mv_chan->completed_slots);
1214 INIT_LIST_HEAD(&mv_chan->all_slots);
1215 INIT_RCU_HEAD(&mv_chan->common.rcu);
1216 mv_chan->common.device = dma_dev;
1217
1218 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
1219
1220 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1221 ret = mv_xor_memcpy_self_test(adev);
1222 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1223 if (ret)
1224 goto err_free_dma;
1225 }
1226
1227 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1228 ret = mv_xor_xor_self_test(adev);
1229 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1230 if (ret)
1231 goto err_free_dma;
1232 }
1233
1234 dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
1235 "( %s%s%s%s)\n",
1236 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1237 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1238 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1239 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1240
1241 dma_async_device_register(dma_dev);
1242 goto out;
1243
1244 err_free_dma:
1245 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1246 adev->dma_desc_pool_virt, adev->dma_desc_pool);
1247 out:
1248 return ret;
1249}
1250
1251static void
1252mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
1253 struct mbus_dram_target_info *dram)
1254{
1255 void __iomem *base = msp->xor_base;
1256 u32 win_enable = 0;
1257 int i;
1258
1259 for (i = 0; i < 8; i++) {
1260 writel(0, base + WINDOW_BASE(i));
1261 writel(0, base + WINDOW_SIZE(i));
1262 if (i < 4)
1263 writel(0, base + WINDOW_REMAP_HIGH(i));
1264 }
1265
1266 for (i = 0; i < dram->num_cs; i++) {
1267 struct mbus_dram_window *cs = dram->cs + i;
1268
1269 writel((cs->base & 0xffff0000) |
1270 (cs->mbus_attr << 8) |
1271 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1272 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1273
1274 win_enable |= (1 << i);
1275 win_enable |= 3 << (16 + (2 * i));
1276 }
1277
1278 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1279 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1280}
1281
1282static struct platform_driver mv_xor_driver = {
1283 .probe = mv_xor_probe,
1284 .remove = mv_xor_remove,
1285 .driver = {
1286 .owner = THIS_MODULE,
1287 .name = MV_XOR_NAME,
1288 },
1289};
1290
1291static int mv_xor_shared_probe(struct platform_device *pdev)
1292{
1293 struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data;
1294 struct mv_xor_shared_private *msp;
1295 struct resource *res;
1296
1297 dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
1298
1299 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
1300 if (!msp)
1301 return -ENOMEM;
1302
1303 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1304 if (!res)
1305 return -ENODEV;
1306
1307 msp->xor_base = devm_ioremap(&pdev->dev, res->start,
1308 res->end - res->start + 1);
1309 if (!msp->xor_base)
1310 return -EBUSY;
1311
1312 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1313 if (!res)
1314 return -ENODEV;
1315
1316 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1317 res->end - res->start + 1);
1318 if (!msp->xor_high_base)
1319 return -EBUSY;
1320
1321 platform_set_drvdata(pdev, msp);
1322
1323 /*
1324 * (Re-)program MBUS remapping windows if we are asked to.
1325 */
1326 if (msd != NULL && msd->dram != NULL)
1327 mv_xor_conf_mbus_windows(msp, msd->dram);
1328
1329 return 0;
1330}
1331
1332static int mv_xor_shared_remove(struct platform_device *pdev)
1333{
1334 return 0;
1335}
1336
1337static struct platform_driver mv_xor_shared_driver = {
1338 .probe = mv_xor_shared_probe,
1339 .remove = mv_xor_shared_remove,
1340 .driver = {
1341 .owner = THIS_MODULE,
1342 .name = MV_XOR_SHARED_NAME,
1343 },
1344};
1345
1346
1347static int __init mv_xor_init(void)
1348{
1349 int rc;
1350
1351 rc = platform_driver_register(&mv_xor_shared_driver);
1352 if (!rc) {
1353 rc = platform_driver_register(&mv_xor_driver);
1354 if (rc)
1355 platform_driver_unregister(&mv_xor_shared_driver);
1356 }
1357 return rc;
1358}
1359module_init(mv_xor_init);
1360
1361/* it's currently unsafe to unload this module */
1362#if 0
1363static void __exit mv_xor_exit(void)
1364{
1365 platform_driver_unregister(&mv_xor_driver);
1366 platform_driver_unregister(&mv_xor_shared_driver);
1367 return;
1368}
1369
1370module_exit(mv_xor_exit);
1371#endif
1372
1373MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1374MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1375MODULE_LICENSE("GPL");
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
new file mode 100644
index 000000000000..06cafe1ef521
--- /dev/null
+++ b/drivers/dma/mv_xor.h
@@ -0,0 +1,183 @@
1/*
2 * Copyright (C) 2007, 2008, Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18#ifndef MV_XOR_H
19#define MV_XOR_H
20
21#include <linux/types.h>
22#include <linux/io.h>
23#include <linux/dmaengine.h>
24#include <linux/interrupt.h>
25
26#define USE_TIMER
27#define MV_XOR_SLOT_SIZE 64
28#define MV_XOR_THRESHOLD 1
29
30#define XOR_OPERATION_MODE_XOR 0
31#define XOR_OPERATION_MODE_MEMCPY 2
32#define XOR_OPERATION_MODE_MEMSET 4
33
34#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
35#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
36#define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4))
37#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4))
38#define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4))
39#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0)
40#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4)
41
42#define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4))
43#define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4))
44#define XOR_INTR_CAUSE(chan) (chan->mmr_base + 0x30)
45#define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40)
46#define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50)
47#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60)
48#define XOR_INTR_MASK_VALUE 0x3F5
49
50#define WINDOW_BASE(w) (0x250 + ((w) << 2))
51#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
52#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
53#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
54
55struct mv_xor_shared_private {
56 void __iomem *xor_base;
57 void __iomem *xor_high_base;
58};
59
60
61/**
62 * struct mv_xor_device - internal representation of a XOR device
63 * @pdev: Platform device
64 * @id: HW XOR Device selector
65 * @dma_desc_pool: base of DMA descriptor region (DMA address)
66 * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
67 * @common: embedded struct dma_device
68 */
69struct mv_xor_device {
70 struct platform_device *pdev;
71 int id;
72 dma_addr_t dma_desc_pool;
73 void *dma_desc_pool_virt;
74 struct dma_device common;
75 struct mv_xor_shared_private *shared;
76};
77
78/**
79 * struct mv_xor_chan - internal representation of a XOR channel
80 * @pending: allows batching of hardware operations
81 * @completed_cookie: identifier for the most recently completed operation
82 * @lock: serializes enqueue/dequeue operations to the descriptors pool
83 * @mmr_base: memory mapped register base
84 * @idx: the index of the xor channel
85 * @chain: device chain view of the descriptors
86 * @completed_slots: slots completed by HW but still need to be acked
87 * @device: parent device
88 * @common: common dmaengine channel object members
89 * @last_used: place holder for allocation to continue from where it left off
90 * @all_slots: complete domain of slots usable by the channel
91 * @slots_allocated: records the actual size of the descriptor slot pool
92 * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
93 */
94struct mv_xor_chan {
95 int pending;
96 dma_cookie_t completed_cookie;
97 spinlock_t lock; /* protects the descriptor slot pool */
98 void __iomem *mmr_base;
99 unsigned int idx;
100 enum dma_transaction_type current_type;
101 struct list_head chain;
102 struct list_head completed_slots;
103 struct mv_xor_device *device;
104 struct dma_chan common;
105 struct mv_xor_desc_slot *last_used;
106 struct list_head all_slots;
107 int slots_allocated;
108 struct tasklet_struct irq_tasklet;
109#ifdef USE_TIMER
110 unsigned long cleanup_time;
111 u32 current_on_last_cleanup;
112 dma_cookie_t is_complete_cookie;
113#endif
114};
115
116/**
117 * struct mv_xor_desc_slot - software descriptor
118 * @slot_node: node on the mv_xor_chan.all_slots list
119 * @chain_node: node on the mv_xor_chan.chain list
120 * @completed_node: node on the mv_xor_chan.completed_slots list
121 * @hw_desc: virtual address of the hardware descriptor chain
122 * @phys: hardware address of the hardware descriptor chain
123 * @group_head: first operation in a transaction
124 * @slot_cnt: total slots used in an transaction (group of operations)
125 * @slots_per_op: number of slots per operation
126 * @idx: pool index
127 * @unmap_src_cnt: number of xor sources
128 * @unmap_len: transaction bytecount
129 * @async_tx: support for the async_tx api
130 * @group_list: list of slots that make up a multi-descriptor transaction
131 * for example transfer lengths larger than the supported hw max
132 * @xor_check_result: result of zero sum
133 * @crc32_result: result crc calculation
134 */
135struct mv_xor_desc_slot {
136 struct list_head slot_node;
137 struct list_head chain_node;
138 struct list_head completed_node;
139 enum dma_transaction_type type;
140 void *hw_desc;
141 struct mv_xor_desc_slot *group_head;
142 u16 slot_cnt;
143 u16 slots_per_op;
144 u16 idx;
145 u16 unmap_src_cnt;
146 u32 value;
147 size_t unmap_len;
148 struct dma_async_tx_descriptor async_tx;
149 union {
150 u32 *xor_check_result;
151 u32 *crc32_result;
152 };
153#ifdef USE_TIMER
154 unsigned long arrival_time;
155 struct timer_list timeout;
156#endif
157};
158
159/* This structure describes XOR descriptor size 64bytes */
160struct mv_xor_desc {
161 u32 status; /* descriptor execution status */
162 u32 crc32_result; /* result of CRC-32 calculation */
163 u32 desc_command; /* type of operation to be carried out */
164 u32 phy_next_desc; /* next descriptor address pointer */
165 u32 byte_count; /* size of src/dst blocks in bytes */
166 u32 phy_dest_addr; /* destination block address */
167 u32 phy_src_addr[8]; /* source block addresses */
168 u32 reserved0;
169 u32 reserved1;
170};
171
172#define to_mv_sw_desc(addr_hw_desc) \
173 container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
174
175#define mv_hw_desc_slot_idx(hw_desc, idx) \
176 ((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
177
178#define MV_XOR_MIN_BYTE_COUNT (128)
179#define XOR_MAX_BYTE_COUNT ((16 * 1024 * 1024) - 1)
180#define MV_XOR_MAX_BYTE_COUNT XOR_MAX_BYTE_COUNT
181
182
183#endif
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 6e6c3c4aea6b..5a11e3cbcae2 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -123,6 +123,13 @@ config EDAC_I5000
123 Support for error detection and correction the Intel 123 Support for error detection and correction the Intel
124 Greekcreek/Blackford chipsets. 124 Greekcreek/Blackford chipsets.
125 125
126config EDAC_I5100
127 tristate "Intel San Clemente MCH"
128 depends on EDAC_MM_EDAC && X86 && PCI
129 help
130 Support for error detection and correction the Intel
131 San Clemente MCH.
132
126config EDAC_MPC85XX 133config EDAC_MPC85XX
127 tristate "Freescale MPC85xx" 134 tristate "Freescale MPC85xx"
128 depends on EDAC_MM_EDAC && FSL_SOC && MPC85xx 135 depends on EDAC_MM_EDAC && FSL_SOC && MPC85xx
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 83807731d4a9..e5e9104b5520 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -19,6 +19,7 @@ endif
19 19
20obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o 20obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
21obj-$(CONFIG_EDAC_I5000) += i5000_edac.o 21obj-$(CONFIG_EDAC_I5000) += i5000_edac.o
22obj-$(CONFIG_EDAC_I5100) += i5100_edac.o
22obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o 23obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
23obj-$(CONFIG_EDAC_E752X) += e752x_edac.o 24obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
24obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o 25obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index c94a0eb492cb..facfdb1fa71c 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -28,6 +28,7 @@
28#define E752X_REVISION " Ver: 2.0.2 " __DATE__ 28#define E752X_REVISION " Ver: 2.0.2 " __DATE__
29#define EDAC_MOD_STR "e752x_edac" 29#define EDAC_MOD_STR "e752x_edac"
30 30
31static int report_non_memory_errors;
31static int force_function_unhide; 32static int force_function_unhide;
32static int sysbus_parity = -1; 33static int sysbus_parity = -1;
33 34
@@ -117,7 +118,7 @@ static struct edac_pci_ctl_info *e752x_pci;
117#define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */ 118#define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
118#define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */ 119#define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
119#define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */ 120#define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
120#define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI command reg (8b) */ 121#define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */
121#define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */ 122#define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
122#define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */ 123#define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
123#define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */ 124#define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
@@ -127,7 +128,7 @@ static struct edac_pci_ctl_info *e752x_pci;
127 /* error address register (32b) */ 128 /* error address register (32b) */
128 /* 129 /*
129 * 31 Reserved 130 * 31 Reserved
130 * 30:2 CE address (64 byte block 34:6) 131 * 30:2 CE address (64 byte block 34:6
131 * 1 Reserved 132 * 1 Reserved
132 * 0 HiLoCS 133 * 0 HiLoCS
133 */ 134 */
@@ -147,11 +148,11 @@ static struct edac_pci_ctl_info *e752x_pci;
147 * 1 Reserved 148 * 1 Reserved
148 * 0 HiLoCS 149 * 0 HiLoCS
149 */ 150 */
150#define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM first uncorrectable scrub memory */ 151#define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */
151 /* error address register (32b) */ 152 /* error address register (32b) */
152 /* 153 /*
153 * 31 Reserved 154 * 31 Reserved
154 * 30:2 CE address (64 byte block 34:6) 155 * 30:2 CE address (64 byte block 34:6
155 * 1 Reserved 156 * 1 Reserved
156 * 0 HiLoCS 157 * 0 HiLoCS
157 */ 158 */
@@ -394,9 +395,12 @@ static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
394 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 395 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
395 396
396 error_1b = retry_add; 397 error_1b = retry_add;
397 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */ 398 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
398 row = pvt->mc_symmetric ? ((page >> 1) & 3) : /* chip select are bits 14 & 13 */ 399
400 /* chip select are bits 14 & 13 */
401 row = pvt->mc_symmetric ? ((page >> 1) & 3) :
399 edac_mc_find_csrow_by_page(mci, page); 402 edac_mc_find_csrow_by_page(mci, page);
403
400 e752x_mc_printk(mci, KERN_WARNING, 404 e752x_mc_printk(mci, KERN_WARNING,
401 "CE page 0x%lx, row %d : Memory read retry\n", 405 "CE page 0x%lx, row %d : Memory read retry\n",
402 (long unsigned int)page, row); 406 (long unsigned int)page, row);
@@ -422,12 +426,21 @@ static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
422} 426}
423 427
424static char *global_message[11] = { 428static char *global_message[11] = {
425 "PCI Express C1", "PCI Express C", "PCI Express B1", 429 "PCI Express C1",
426 "PCI Express B", "PCI Express A1", "PCI Express A", 430 "PCI Express C",
427 "DMA Controler", "HUB or NS Interface", "System Bus", 431 "PCI Express B1",
428 "DRAM Controler", "Internal Buffer" 432 "PCI Express B",
433 "PCI Express A1",
434 "PCI Express A",
435 "DMA Controller",
436 "HUB or NS Interface",
437 "System Bus",
438 "DRAM Controller", /* 9th entry */
439 "Internal Buffer"
429}; 440};
430 441
442#define DRAM_ENTRY 9
443
431static char *fatal_message[2] = { "Non-Fatal ", "Fatal " }; 444static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
432 445
433static void do_global_error(int fatal, u32 errors) 446static void do_global_error(int fatal, u32 errors)
@@ -435,9 +448,16 @@ static void do_global_error(int fatal, u32 errors)
435 int i; 448 int i;
436 449
437 for (i = 0; i < 11; i++) { 450 for (i = 0; i < 11; i++) {
438 if (errors & (1 << i)) 451 if (errors & (1 << i)) {
439 e752x_printk(KERN_WARNING, "%sError %s\n", 452 /* If the error is from DRAM Controller OR
440 fatal_message[fatal], global_message[i]); 453 * we are to report ALL errors, then
454 * report the error
455 */
456 if ((i == DRAM_ENTRY) || report_non_memory_errors)
457 e752x_printk(KERN_WARNING, "%sError %s\n",
458 fatal_message[fatal],
459 global_message[i]);
460 }
441 } 461 }
442} 462}
443 463
@@ -1021,7 +1041,7 @@ static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
1021 struct pci_dev *dev; 1041 struct pci_dev *dev;
1022 1042
1023 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, 1043 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
1024 pvt->dev_info->err_dev, pvt->bridge_ck); 1044 pvt->dev_info->err_dev, pvt->bridge_ck);
1025 1045
1026 if (pvt->bridge_ck == NULL) 1046 if (pvt->bridge_ck == NULL)
1027 pvt->bridge_ck = pci_scan_single_device(pdev->bus, 1047 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
@@ -1034,8 +1054,9 @@ static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
1034 return 1; 1054 return 1;
1035 } 1055 }
1036 1056
1037 dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev, 1057 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
1038 NULL); 1058 e752x_devs[dev_idx].ctl_dev,
1059 NULL);
1039 1060
1040 if (dev == NULL) 1061 if (dev == NULL)
1041 goto fail; 1062 goto fail;
@@ -1316,7 +1337,8 @@ MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
1316 1337
1317module_param(force_function_unhide, int, 0444); 1338module_param(force_function_unhide, int, 0444);
1318MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:" 1339MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
1319 " 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access"); 1340 " 1=force unhide and hope BIOS doesn't fight driver for "
1341 "Dev0:Fun1 access");
1320 1342
1321module_param(edac_op_state, int, 0444); 1343module_param(edac_op_state, int, 0444);
1322MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 1344MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
@@ -1324,3 +1346,6 @@ MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1324module_param(sysbus_parity, int, 0444); 1346module_param(sysbus_parity, int, 0444);
1325MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking," 1347MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
1326 " 1=enable system bus parity checking, default=auto-detect"); 1348 " 1=enable system bus parity checking, default=auto-detect");
1349module_param(report_non_memory_errors, int, 0644);
1350MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
1351 "reporting, 1=enable non-memory error reporting");
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 021d18795145..ad218fe4942d 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -44,6 +44,25 @@ int edac_mc_get_poll_msec(void)
44 return edac_mc_poll_msec; 44 return edac_mc_poll_msec;
45} 45}
46 46
47static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
48{
49 long l;
50 int ret;
51
52 if (!val)
53 return -EINVAL;
54
55 ret = strict_strtol(val, 0, &l);
56 if (ret == -EINVAL || ((int)l != l))
57 return -EINVAL;
58 *((int *)kp->arg) = l;
59
60 /* notify edac_mc engine to reset the poll period */
61 edac_mc_reset_delay_period(l);
62
63 return 0;
64}
65
47/* Parameter declarations for above */ 66/* Parameter declarations for above */
48module_param(edac_mc_panic_on_ue, int, 0644); 67module_param(edac_mc_panic_on_ue, int, 0644);
49MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on"); 68MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
@@ -53,7 +72,8 @@ MODULE_PARM_DESC(edac_mc_log_ue,
53module_param(edac_mc_log_ce, int, 0644); 72module_param(edac_mc_log_ce, int, 0644);
54MODULE_PARM_DESC(edac_mc_log_ce, 73MODULE_PARM_DESC(edac_mc_log_ce,
55 "Log correctable error to console: 0=off 1=on"); 74 "Log correctable error to console: 0=off 1=on");
56module_param(edac_mc_poll_msec, int, 0644); 75module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
76 &edac_mc_poll_msec, 0644);
57MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds"); 77MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
58 78
59/* 79/*
@@ -103,16 +123,6 @@ static const char *edac_caps[] = {
103 123
104 124
105 125
106/*
107 * /sys/devices/system/edac/mc;
108 * data structures and methods
109 */
110static ssize_t memctrl_int_show(void *ptr, char *buffer)
111{
112 int *value = (int *)ptr;
113 return sprintf(buffer, "%u\n", *value);
114}
115
116static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count) 126static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
117{ 127{
118 int *value = (int *)ptr; 128 int *value = (int *)ptr;
@@ -123,23 +133,6 @@ static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
123 return count; 133 return count;
124} 134}
125 135
126/*
127 * mc poll_msec time value
128 */
129static ssize_t poll_msec_int_store(void *ptr, const char *buffer, size_t count)
130{
131 int *value = (int *)ptr;
132
133 if (isdigit(*buffer)) {
134 *value = simple_strtoul(buffer, NULL, 0);
135
136 /* notify edac_mc engine to reset the poll period */
137 edac_mc_reset_delay_period(*value);
138 }
139
140 return count;
141}
142
143 136
144/* EDAC sysfs CSROW data structures and methods 137/* EDAC sysfs CSROW data structures and methods
145 */ 138 */
@@ -185,7 +178,11 @@ static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
185static ssize_t channel_dimm_label_show(struct csrow_info *csrow, 178static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
186 char *data, int channel) 179 char *data, int channel)
187{ 180{
188 return snprintf(data, EDAC_MC_LABEL_LEN, "%s", 181 /* if field has not been initialized, there is nothing to send */
182 if (!csrow->channels[channel].label[0])
183 return 0;
184
185 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
189 csrow->channels[channel].label); 186 csrow->channels[channel].label);
190} 187}
191 188
@@ -649,98 +646,10 @@ static struct kobj_type ktype_mci = {
649 .default_attrs = (struct attribute **)mci_attr, 646 .default_attrs = (struct attribute **)mci_attr,
650}; 647};
651 648
652/* show/store, tables, etc for the MC kset */
653
654
655struct memctrl_dev_attribute {
656 struct attribute attr;
657 void *value;
658 ssize_t(*show) (void *, char *);
659 ssize_t(*store) (void *, const char *, size_t);
660};
661
662/* Set of show/store abstract level functions for memory control object */
663static ssize_t memctrl_dev_show(struct kobject *kobj,
664 struct attribute *attr, char *buffer)
665{
666 struct memctrl_dev_attribute *memctrl_dev;
667 memctrl_dev = (struct memctrl_dev_attribute *)attr;
668
669 if (memctrl_dev->show)
670 return memctrl_dev->show(memctrl_dev->value, buffer);
671
672 return -EIO;
673}
674
675static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
676 const char *buffer, size_t count)
677{
678 struct memctrl_dev_attribute *memctrl_dev;
679 memctrl_dev = (struct memctrl_dev_attribute *)attr;
680
681 if (memctrl_dev->store)
682 return memctrl_dev->store(memctrl_dev->value, buffer, count);
683
684 return -EIO;
685}
686
687static struct sysfs_ops memctrlfs_ops = {
688 .show = memctrl_dev_show,
689 .store = memctrl_dev_store
690};
691
692#define MEMCTRL_ATTR(_name, _mode, _show, _store) \
693static struct memctrl_dev_attribute attr_##_name = { \
694 .attr = {.name = __stringify(_name), .mode = _mode }, \
695 .value = &_name, \
696 .show = _show, \
697 .store = _store, \
698};
699
700#define MEMCTRL_STRING_ATTR(_name, _data, _mode, _show, _store) \
701static struct memctrl_dev_attribute attr_##_name = { \
702 .attr = {.name = __stringify(_name), .mode = _mode }, \
703 .value = _data, \
704 .show = _show, \
705 .store = _store, \
706};
707
708/* csrow<id> control files */
709MEMCTRL_ATTR(edac_mc_panic_on_ue,
710 S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
711
712MEMCTRL_ATTR(edac_mc_log_ue,
713 S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
714
715MEMCTRL_ATTR(edac_mc_log_ce,
716 S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
717
718MEMCTRL_ATTR(edac_mc_poll_msec,
719 S_IRUGO | S_IWUSR, memctrl_int_show, poll_msec_int_store);
720
721/* Base Attributes of the memory ECC object */
722static struct memctrl_dev_attribute *memctrl_attr[] = {
723 &attr_edac_mc_panic_on_ue,
724 &attr_edac_mc_log_ue,
725 &attr_edac_mc_log_ce,
726 &attr_edac_mc_poll_msec,
727 NULL,
728};
729
730
731/* the ktype for the mc_kset internal kobj */
732static struct kobj_type ktype_mc_set_attribs = {
733 .sysfs_ops = &memctrlfs_ops,
734 .default_attrs = (struct attribute **)memctrl_attr,
735};
736
737/* EDAC memory controller sysfs kset: 649/* EDAC memory controller sysfs kset:
738 * /sys/devices/system/edac/mc 650 * /sys/devices/system/edac/mc
739 */ 651 */
740static struct kset mc_kset = { 652static struct kset *mc_kset;
741 .kobj = {.ktype = &ktype_mc_set_attribs },
742};
743
744 653
745/* 654/*
746 * edac_mc_register_sysfs_main_kobj 655 * edac_mc_register_sysfs_main_kobj
@@ -771,7 +680,7 @@ int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
771 } 680 }
772 681
773 /* this instance become part of the mc_kset */ 682 /* this instance become part of the mc_kset */
774 kobj_mci->kset = &mc_kset; 683 kobj_mci->kset = mc_kset;
775 684
776 /* register the mc<id> kobject to the mc_kset */ 685 /* register the mc<id> kobject to the mc_kset */
777 err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL, 686 err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL,
@@ -1001,12 +910,9 @@ int edac_sysfs_setup_mc_kset(void)
1001 } 910 }
1002 911
1003 /* Init the MC's kobject */ 912 /* Init the MC's kobject */
1004 kobject_set_name(&mc_kset.kobj, "mc"); 913 mc_kset = kset_create_and_add("mc", NULL, &edac_class->kset.kobj);
1005 mc_kset.kobj.parent = &edac_class->kset.kobj; 914 if (!mc_kset) {
1006 915 err = -ENOMEM;
1007 /* register the mc_kset */
1008 err = kset_register(&mc_kset);
1009 if (err) {
1010 debugf1("%s() Failed to register '.../edac/mc'\n", __func__); 916 debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
1011 goto fail_out; 917 goto fail_out;
1012 } 918 }
@@ -1028,6 +934,6 @@ fail_out:
1028 */ 934 */
1029void edac_sysfs_teardown_mc_kset(void) 935void edac_sysfs_teardown_mc_kset(void)
1030{ 936{
1031 kset_unregister(&mc_kset); 937 kset_unregister(mc_kset);
1032} 938}
1033 939
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 2c1fa1bb6df2..5c153dccc95e 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -28,7 +28,7 @@ static int edac_pci_poll_msec = 1000; /* one second workq period */
28static atomic_t pci_parity_count = ATOMIC_INIT(0); 28static atomic_t pci_parity_count = ATOMIC_INIT(0);
29static atomic_t pci_nonparity_count = ATOMIC_INIT(0); 29static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
30 30
31static struct kobject edac_pci_top_main_kobj; 31static struct kobject *edac_pci_top_main_kobj;
32static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0); 32static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
33 33
34/* getter functions for the data variables */ 34/* getter functions for the data variables */
@@ -83,7 +83,7 @@ static void edac_pci_instance_release(struct kobject *kobj)
83 pci = to_instance(kobj); 83 pci = to_instance(kobj);
84 84
85 /* decrement reference count on top main kobj */ 85 /* decrement reference count on top main kobj */
86 kobject_put(&edac_pci_top_main_kobj); 86 kobject_put(edac_pci_top_main_kobj);
87 87
88 kfree(pci); /* Free the control struct */ 88 kfree(pci); /* Free the control struct */
89} 89}
@@ -166,7 +166,7 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
166 * track the number of PCI instances we have, and thus nest 166 * track the number of PCI instances we have, and thus nest
167 * properly on keeping the module loaded 167 * properly on keeping the module loaded
168 */ 168 */
169 main_kobj = kobject_get(&edac_pci_top_main_kobj); 169 main_kobj = kobject_get(edac_pci_top_main_kobj);
170 if (!main_kobj) { 170 if (!main_kobj) {
171 err = -ENODEV; 171 err = -ENODEV;
172 goto error_out; 172 goto error_out;
@@ -174,11 +174,11 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
174 174
175 /* And now register this new kobject under the main kobj */ 175 /* And now register this new kobject under the main kobj */
176 err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance, 176 err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance,
177 &edac_pci_top_main_kobj, "pci%d", idx); 177 edac_pci_top_main_kobj, "pci%d", idx);
178 if (err != 0) { 178 if (err != 0) {
179 debugf2("%s() failed to register instance pci%d\n", 179 debugf2("%s() failed to register instance pci%d\n",
180 __func__, idx); 180 __func__, idx);
181 kobject_put(&edac_pci_top_main_kobj); 181 kobject_put(edac_pci_top_main_kobj);
182 goto error_out; 182 goto error_out;
183 } 183 }
184 184
@@ -316,9 +316,10 @@ static struct edac_pci_dev_attribute *edac_pci_attr[] = {
316 */ 316 */
317static void edac_pci_release_main_kobj(struct kobject *kobj) 317static void edac_pci_release_main_kobj(struct kobject *kobj)
318{ 318{
319
320 debugf0("%s() here to module_put(THIS_MODULE)\n", __func__); 319 debugf0("%s() here to module_put(THIS_MODULE)\n", __func__);
321 320
321 kfree(kobj);
322
322 /* last reference to top EDAC PCI kobject has been removed, 323 /* last reference to top EDAC PCI kobject has been removed,
323 * NOW release our ref count on the core module 324 * NOW release our ref count on the core module
324 */ 325 */
@@ -369,8 +370,16 @@ static int edac_pci_main_kobj_setup(void)
369 goto decrement_count_fail; 370 goto decrement_count_fail;
370 } 371 }
371 372
373 edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
374 if (!edac_pci_top_main_kobj) {
375 debugf1("Failed to allocate\n");
376 err = -ENOMEM;
377 goto kzalloc_fail;
378 }
379
372 /* Instanstiate the pci object */ 380 /* Instanstiate the pci object */
373 err = kobject_init_and_add(&edac_pci_top_main_kobj, &ktype_edac_pci_main_kobj, 381 err = kobject_init_and_add(edac_pci_top_main_kobj,
382 &ktype_edac_pci_main_kobj,
374 &edac_class->kset.kobj, "pci"); 383 &edac_class->kset.kobj, "pci");
375 if (err) { 384 if (err) {
376 debugf1("Failed to register '.../edac/pci'\n"); 385 debugf1("Failed to register '.../edac/pci'\n");
@@ -381,13 +390,16 @@ static int edac_pci_main_kobj_setup(void)
381 * for EDAC PCI, then edac_pci_main_kobj_teardown() 390 * for EDAC PCI, then edac_pci_main_kobj_teardown()
382 * must be used, for resources to be cleaned up properly 391 * must be used, for resources to be cleaned up properly
383 */ 392 */
384 kobject_uevent(&edac_pci_top_main_kobj, KOBJ_ADD); 393 kobject_uevent(edac_pci_top_main_kobj, KOBJ_ADD);
385 debugf1("Registered '.../edac/pci' kobject\n"); 394 debugf1("Registered '.../edac/pci' kobject\n");
386 395
387 return 0; 396 return 0;
388 397
389 /* Error unwind statck */ 398 /* Error unwind statck */
390kobject_init_and_add_fail: 399kobject_init_and_add_fail:
400 kfree(edac_pci_top_main_kobj);
401
402kzalloc_fail:
391 module_put(THIS_MODULE); 403 module_put(THIS_MODULE);
392 404
393decrement_count_fail: 405decrement_count_fail:
@@ -414,7 +426,7 @@ static void edac_pci_main_kobj_teardown(void)
414 if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) { 426 if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
415 debugf0("%s() called kobject_put on main kobj\n", 427 debugf0("%s() called kobject_put on main kobj\n",
416 __func__); 428 __func__);
417 kobject_put(&edac_pci_top_main_kobj); 429 kobject_put(edac_pci_top_main_kobj);
418 } 430 }
419} 431}
420 432
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
new file mode 100644
index 000000000000..22db05a67bfb
--- /dev/null
+++ b/drivers/edac/i5100_edac.c
@@ -0,0 +1,981 @@
1/*
2 * Intel 5100 Memory Controllers kernel module
3 *
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * This module is based on the following document:
8 *
9 * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
10 * http://download.intel.com/design/chipsets/datashts/318378.pdf
11 *
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/pci.h>
16#include <linux/pci_ids.h>
17#include <linux/slab.h>
18#include <linux/edac.h>
19#include <linux/delay.h>
20#include <linux/mmzone.h>
21
22#include "edac_core.h"
23
24/* register addresses */
25
26/* device 16, func 1 */
27#define I5100_MC 0x40 /* Memory Control Register */
28#define I5100_MS 0x44 /* Memory Status Register */
29#define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
30#define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
31#define I5100_TOLM 0x6c /* Top of Low Memory */
32#define I5100_MIR0 0x80 /* Memory Interleave Range 0 */
33#define I5100_MIR1 0x84 /* Memory Interleave Range 1 */
34#define I5100_AMIR_0 0x8c /* Adjusted Memory Interleave Range 0 */
35#define I5100_AMIR_1 0x90 /* Adjusted Memory Interleave Range 1 */
36#define I5100_FERR_NF_MEM 0xa0 /* MC First Non Fatal Errors */
37#define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16)
38#define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15)
39#define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14)
40#define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12)
41#define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11)
42#define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10)
43#define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6)
44#define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5)
45#define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4)
46#define I5100_FERR_NF_MEM_M1ERR_MASK 1
47#define I5100_FERR_NF_MEM_ANY_MASK \
48 (I5100_FERR_NF_MEM_M16ERR_MASK | \
49 I5100_FERR_NF_MEM_M15ERR_MASK | \
50 I5100_FERR_NF_MEM_M14ERR_MASK | \
51 I5100_FERR_NF_MEM_M12ERR_MASK | \
52 I5100_FERR_NF_MEM_M11ERR_MASK | \
53 I5100_FERR_NF_MEM_M10ERR_MASK | \
54 I5100_FERR_NF_MEM_M6ERR_MASK | \
55 I5100_FERR_NF_MEM_M5ERR_MASK | \
56 I5100_FERR_NF_MEM_M4ERR_MASK | \
57 I5100_FERR_NF_MEM_M1ERR_MASK)
58#define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */
59#define I5100_EMASK_MEM 0xa8 /* MC Error Mask Register */
60
61/* device 21 and 22, func 0 */
62#define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */
63#define I5100_DMIR 0x15c /* DIMM Interleave Range */
64#define I5100_VALIDLOG 0x18c /* Valid Log Markers */
65#define I5100_NRECMEMA 0x190 /* Non-Recoverable Memory Error Log Reg A */
66#define I5100_NRECMEMB 0x194 /* Non-Recoverable Memory Error Log Reg B */
67#define I5100_REDMEMA 0x198 /* Recoverable Memory Data Error Log Reg A */
68#define I5100_REDMEMB 0x19c /* Recoverable Memory Data Error Log Reg B */
69#define I5100_RECMEMA 0x1a0 /* Recoverable Memory Error Log Reg A */
70#define I5100_RECMEMB 0x1a4 /* Recoverable Memory Error Log Reg B */
71#define I5100_MTR_4 0x1b0 /* Memory Technology Registers 4,5 */
72
73/* bit field accessors */
74
75static inline u32 i5100_mc_errdeten(u32 mc)
76{
77 return mc >> 5 & 1;
78}
79
80static inline u16 i5100_spddata_rdo(u16 a)
81{
82 return a >> 15 & 1;
83}
84
85static inline u16 i5100_spddata_sbe(u16 a)
86{
87 return a >> 13 & 1;
88}
89
90static inline u16 i5100_spddata_busy(u16 a)
91{
92 return a >> 12 & 1;
93}
94
95static inline u16 i5100_spddata_data(u16 a)
96{
97 return a & ((1 << 8) - 1);
98}
99
100static inline u32 i5100_spdcmd_create(u32 dti, u32 ckovrd, u32 sa, u32 ba,
101 u32 data, u32 cmd)
102{
103 return ((dti & ((1 << 4) - 1)) << 28) |
104 ((ckovrd & 1) << 27) |
105 ((sa & ((1 << 3) - 1)) << 24) |
106 ((ba & ((1 << 8) - 1)) << 16) |
107 ((data & ((1 << 8) - 1)) << 8) |
108 (cmd & 1);
109}
110
111static inline u16 i5100_tolm_tolm(u16 a)
112{
113 return a >> 12 & ((1 << 4) - 1);
114}
115
116static inline u16 i5100_mir_limit(u16 a)
117{
118 return a >> 4 & ((1 << 12) - 1);
119}
120
121static inline u16 i5100_mir_way1(u16 a)
122{
123 return a >> 1 & 1;
124}
125
126static inline u16 i5100_mir_way0(u16 a)
127{
128 return a & 1;
129}
130
131static inline u32 i5100_ferr_nf_mem_chan_indx(u32 a)
132{
133 return a >> 28 & 1;
134}
135
136static inline u32 i5100_ferr_nf_mem_any(u32 a)
137{
138 return a & I5100_FERR_NF_MEM_ANY_MASK;
139}
140
141static inline u32 i5100_nerr_nf_mem_any(u32 a)
142{
143 return i5100_ferr_nf_mem_any(a);
144}
145
146static inline u32 i5100_dmir_limit(u32 a)
147{
148 return a >> 16 & ((1 << 11) - 1);
149}
150
151static inline u32 i5100_dmir_rank(u32 a, u32 i)
152{
153 return a >> (4 * i) & ((1 << 2) - 1);
154}
155
156static inline u16 i5100_mtr_present(u16 a)
157{
158 return a >> 10 & 1;
159}
160
161static inline u16 i5100_mtr_ethrottle(u16 a)
162{
163 return a >> 9 & 1;
164}
165
166static inline u16 i5100_mtr_width(u16 a)
167{
168 return a >> 8 & 1;
169}
170
171static inline u16 i5100_mtr_numbank(u16 a)
172{
173 return a >> 6 & 1;
174}
175
176static inline u16 i5100_mtr_numrow(u16 a)
177{
178 return a >> 2 & ((1 << 2) - 1);
179}
180
181static inline u16 i5100_mtr_numcol(u16 a)
182{
183 return a & ((1 << 2) - 1);
184}
185
186
187static inline u32 i5100_validlog_redmemvalid(u32 a)
188{
189 return a >> 2 & 1;
190}
191
192static inline u32 i5100_validlog_recmemvalid(u32 a)
193{
194 return a >> 1 & 1;
195}
196
197static inline u32 i5100_validlog_nrecmemvalid(u32 a)
198{
199 return a & 1;
200}
201
202static inline u32 i5100_nrecmema_merr(u32 a)
203{
204 return a >> 15 & ((1 << 5) - 1);
205}
206
207static inline u32 i5100_nrecmema_bank(u32 a)
208{
209 return a >> 12 & ((1 << 3) - 1);
210}
211
212static inline u32 i5100_nrecmema_rank(u32 a)
213{
214 return a >> 8 & ((1 << 3) - 1);
215}
216
217static inline u32 i5100_nrecmema_dm_buf_id(u32 a)
218{
219 return a & ((1 << 8) - 1);
220}
221
222static inline u32 i5100_nrecmemb_cas(u32 a)
223{
224 return a >> 16 & ((1 << 13) - 1);
225}
226
227static inline u32 i5100_nrecmemb_ras(u32 a)
228{
229 return a & ((1 << 16) - 1);
230}
231
232static inline u32 i5100_redmemb_ecc_locator(u32 a)
233{
234 return a & ((1 << 18) - 1);
235}
236
237static inline u32 i5100_recmema_merr(u32 a)
238{
239 return i5100_nrecmema_merr(a);
240}
241
242static inline u32 i5100_recmema_bank(u32 a)
243{
244 return i5100_nrecmema_bank(a);
245}
246
247static inline u32 i5100_recmema_rank(u32 a)
248{
249 return i5100_nrecmema_rank(a);
250}
251
252static inline u32 i5100_recmema_dm_buf_id(u32 a)
253{
254 return i5100_nrecmema_dm_buf_id(a);
255}
256
257static inline u32 i5100_recmemb_cas(u32 a)
258{
259 return i5100_nrecmemb_cas(a);
260}
261
262static inline u32 i5100_recmemb_ras(u32 a)
263{
264 return i5100_nrecmemb_ras(a);
265}
266
267/* some generic limits */
268#define I5100_MAX_RANKS_PER_CTLR 6
269#define I5100_MAX_CTLRS 2
270#define I5100_MAX_RANKS_PER_DIMM 4
271#define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
272#define I5100_MAX_DIMM_SLOTS_PER_CTLR 4
273#define I5100_MAX_RANK_INTERLEAVE 4
274#define I5100_MAX_DMIRS 5
275
276struct i5100_priv {
277 /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
278 int dimm_numrank[I5100_MAX_CTLRS][I5100_MAX_DIMM_SLOTS_PER_CTLR];
279
280 /*
281 * mainboard chip select map -- maps i5100 chip selects to
282 * DIMM slot chip selects. In the case of only 4 ranks per
283 * controller, the mapping is fairly obvious but not unique.
284 * we map -1 -> NC and assume both controllers use the same
285 * map...
286 *
287 */
288 int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CTLR][I5100_MAX_RANKS_PER_DIMM];
289
290 /* memory interleave range */
291 struct {
292 u64 limit;
293 unsigned way[2];
294 } mir[I5100_MAX_CTLRS];
295
296 /* adjusted memory interleave range register */
297 unsigned amir[I5100_MAX_CTLRS];
298
299 /* dimm interleave range */
300 struct {
301 unsigned rank[I5100_MAX_RANK_INTERLEAVE];
302 u64 limit;
303 } dmir[I5100_MAX_CTLRS][I5100_MAX_DMIRS];
304
305 /* memory technology registers... */
306 struct {
307 unsigned present; /* 0 or 1 */
308 unsigned ethrottle; /* 0 or 1 */
309 unsigned width; /* 4 or 8 bits */
310 unsigned numbank; /* 2 or 3 lines */
311 unsigned numrow; /* 13 .. 16 lines */
312 unsigned numcol; /* 11 .. 12 lines */
313 } mtr[I5100_MAX_CTLRS][I5100_MAX_RANKS_PER_CTLR];
314
315 u64 tolm; /* top of low memory in bytes */
316 unsigned ranksperctlr; /* number of ranks per controller */
317
318 struct pci_dev *mc; /* device 16 func 1 */
319 struct pci_dev *ch0mm; /* device 21 func 0 */
320 struct pci_dev *ch1mm; /* device 22 func 0 */
321};
322
323/* map a rank/ctlr to a slot number on the mainboard */
324static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
325 int ctlr, int rank)
326{
327 const struct i5100_priv *priv = mci->pvt_info;
328 int i;
329
330 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
331 int j;
332 const int numrank = priv->dimm_numrank[ctlr][i];
333
334 for (j = 0; j < numrank; j++)
335 if (priv->dimm_csmap[i][j] == rank)
336 return i * 2 + ctlr;
337 }
338
339 return -1;
340}
341
342static const char *i5100_err_msg(unsigned err)
343{
344 static const char *merrs[] = {
345 "unknown", /* 0 */
346 "uncorrectable data ECC on replay", /* 1 */
347 "unknown", /* 2 */
348 "unknown", /* 3 */
349 "aliased uncorrectable demand data ECC", /* 4 */
350 "aliased uncorrectable spare-copy data ECC", /* 5 */
351 "aliased uncorrectable patrol data ECC", /* 6 */
352 "unknown", /* 7 */
353 "unknown", /* 8 */
354 "unknown", /* 9 */
355 "non-aliased uncorrectable demand data ECC", /* 10 */
356 "non-aliased uncorrectable spare-copy data ECC", /* 11 */
357 "non-aliased uncorrectable patrol data ECC", /* 12 */
358 "unknown", /* 13 */
359 "correctable demand data ECC", /* 14 */
360 "correctable spare-copy data ECC", /* 15 */
361 "correctable patrol data ECC", /* 16 */
362 "unknown", /* 17 */
363 "SPD protocol error", /* 18 */
364 "unknown", /* 19 */
365 "spare copy initiated", /* 20 */
366 "spare copy completed", /* 21 */
367 };
368 unsigned i;
369
370 for (i = 0; i < ARRAY_SIZE(merrs); i++)
371 if (1 << i & err)
372 return merrs[i];
373
374 return "none";
375}
376
377/* convert csrow index into a rank (per controller -- 0..5) */
378static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
379{
380 const struct i5100_priv *priv = mci->pvt_info;
381
382 return csrow % priv->ranksperctlr;
383}
384
385/* convert csrow index into a controller (0..1) */
386static int i5100_csrow_to_cntlr(const struct mem_ctl_info *mci, int csrow)
387{
388 const struct i5100_priv *priv = mci->pvt_info;
389
390 return csrow / priv->ranksperctlr;
391}
392
393static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
394 int ctlr, int rank)
395{
396 const struct i5100_priv *priv = mci->pvt_info;
397
398 return ctlr * priv->ranksperctlr + rank;
399}
400
401static void i5100_handle_ce(struct mem_ctl_info *mci,
402 int ctlr,
403 unsigned bank,
404 unsigned rank,
405 unsigned long syndrome,
406 unsigned cas,
407 unsigned ras,
408 const char *msg)
409{
410 const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
411
412 printk(KERN_ERR
413 "CE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
414 "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
415 ctlr, bank, rank, syndrome, cas, ras,
416 csrow, mci->csrows[csrow].channels[0].label, msg);
417
418 mci->ce_count++;
419 mci->csrows[csrow].ce_count++;
420 mci->csrows[csrow].channels[0].ce_count++;
421}
422
423static void i5100_handle_ue(struct mem_ctl_info *mci,
424 int ctlr,
425 unsigned bank,
426 unsigned rank,
427 unsigned long syndrome,
428 unsigned cas,
429 unsigned ras,
430 const char *msg)
431{
432 const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
433
434 printk(KERN_ERR
435 "UE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
436 "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
437 ctlr, bank, rank, syndrome, cas, ras,
438 csrow, mci->csrows[csrow].channels[0].label, msg);
439
440 mci->ue_count++;
441 mci->csrows[csrow].ue_count++;
442}
443
444static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
445 u32 ferr, u32 nerr)
446{
447 struct i5100_priv *priv = mci->pvt_info;
448 struct pci_dev *pdev = (ctlr) ? priv->ch1mm : priv->ch0mm;
449 u32 dw;
450 u32 dw2;
451 unsigned syndrome = 0;
452 unsigned ecc_loc = 0;
453 unsigned merr;
454 unsigned bank;
455 unsigned rank;
456 unsigned cas;
457 unsigned ras;
458
459 pci_read_config_dword(pdev, I5100_VALIDLOG, &dw);
460
461 if (i5100_validlog_redmemvalid(dw)) {
462 pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
463 syndrome = dw2;
464 pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
465 ecc_loc = i5100_redmemb_ecc_locator(dw2);
466 }
467
468 if (i5100_validlog_recmemvalid(dw)) {
469 const char *msg;
470
471 pci_read_config_dword(pdev, I5100_RECMEMA, &dw2);
472 merr = i5100_recmema_merr(dw2);
473 bank = i5100_recmema_bank(dw2);
474 rank = i5100_recmema_rank(dw2);
475
476 pci_read_config_dword(pdev, I5100_RECMEMB, &dw2);
477 cas = i5100_recmemb_cas(dw2);
478 ras = i5100_recmemb_ras(dw2);
479
480 /* FIXME: not really sure if this is what merr is...
481 */
482 if (!merr)
483 msg = i5100_err_msg(ferr);
484 else
485 msg = i5100_err_msg(nerr);
486
487 i5100_handle_ce(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
488 }
489
490 if (i5100_validlog_nrecmemvalid(dw)) {
491 const char *msg;
492
493 pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2);
494 merr = i5100_nrecmema_merr(dw2);
495 bank = i5100_nrecmema_bank(dw2);
496 rank = i5100_nrecmema_rank(dw2);
497
498 pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2);
499 cas = i5100_nrecmemb_cas(dw2);
500 ras = i5100_nrecmemb_ras(dw2);
501
502 /* FIXME: not really sure if this is what merr is...
503 */
504 if (!merr)
505 msg = i5100_err_msg(ferr);
506 else
507 msg = i5100_err_msg(nerr);
508
509 i5100_handle_ue(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
510 }
511
512 pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
513}
514
515static void i5100_check_error(struct mem_ctl_info *mci)
516{
517 struct i5100_priv *priv = mci->pvt_info;
518 u32 dw;
519
520
521 pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw);
522 if (i5100_ferr_nf_mem_any(dw)) {
523 u32 dw2;
524
525 pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2);
526 if (dw2)
527 pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM,
528 dw2);
529 pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw);
530
531 i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw),
532 i5100_ferr_nf_mem_any(dw),
533 i5100_nerr_nf_mem_any(dw2));
534 }
535}
536
537static struct pci_dev *pci_get_device_func(unsigned vendor,
538 unsigned device,
539 unsigned func)
540{
541 struct pci_dev *ret = NULL;
542
543 while (1) {
544 ret = pci_get_device(vendor, device, ret);
545
546 if (!ret)
547 break;
548
549 if (PCI_FUNC(ret->devfn) == func)
550 break;
551 }
552
553 return ret;
554}
555
556static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
557 int csrow)
558{
559 struct i5100_priv *priv = mci->pvt_info;
560 const unsigned ctlr_rank = i5100_csrow_to_rank(mci, csrow);
561 const unsigned ctlr = i5100_csrow_to_cntlr(mci, csrow);
562 unsigned addr_lines;
563
564 /* dimm present? */
565 if (!priv->mtr[ctlr][ctlr_rank].present)
566 return 0ULL;
567
568 addr_lines =
569 I5100_DIMM_ADDR_LINES +
570 priv->mtr[ctlr][ctlr_rank].numcol +
571 priv->mtr[ctlr][ctlr_rank].numrow +
572 priv->mtr[ctlr][ctlr_rank].numbank;
573
574 return (unsigned long)
575 ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
576}
577
578static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
579{
580 struct i5100_priv *priv = mci->pvt_info;
581 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
582 int i;
583
584 for (i = 0; i < I5100_MAX_CTLRS; i++) {
585 int j;
586 struct pci_dev *pdev = mms[i];
587
588 for (j = 0; j < I5100_MAX_RANKS_PER_CTLR; j++) {
589 const unsigned addr =
590 (j < 4) ? I5100_MTR_0 + j * 2 :
591 I5100_MTR_4 + (j - 4) * 2;
592 u16 w;
593
594 pci_read_config_word(pdev, addr, &w);
595
596 priv->mtr[i][j].present = i5100_mtr_present(w);
597 priv->mtr[i][j].ethrottle = i5100_mtr_ethrottle(w);
598 priv->mtr[i][j].width = 4 + 4 * i5100_mtr_width(w);
599 priv->mtr[i][j].numbank = 2 + i5100_mtr_numbank(w);
600 priv->mtr[i][j].numrow = 13 + i5100_mtr_numrow(w);
601 priv->mtr[i][j].numcol = 10 + i5100_mtr_numcol(w);
602 }
603 }
604}
605
606/*
607 * FIXME: make this into a real i2c adapter (so that dimm-decode
608 * will work)?
609 */
610static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
611 u8 ch, u8 slot, u8 addr, u8 *byte)
612{
613 struct i5100_priv *priv = mci->pvt_info;
614 u16 w;
615 unsigned long et;
616
617 pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
618 if (i5100_spddata_busy(w))
619 return -1;
620
621 pci_write_config_dword(priv->mc, I5100_SPDCMD,
622 i5100_spdcmd_create(0xa, 1, ch * 4 + slot, addr,
623 0, 0));
624
625 /* wait up to 100ms */
626 et = jiffies + HZ / 10;
627 udelay(100);
628 while (1) {
629 pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
630 if (!i5100_spddata_busy(w))
631 break;
632 udelay(100);
633 }
634
635 if (!i5100_spddata_rdo(w) || i5100_spddata_sbe(w))
636 return -1;
637
638 *byte = i5100_spddata_data(w);
639
640 return 0;
641}
642
643/*
644 * fill dimm chip select map
645 *
646 * FIXME:
647 * o only valid for 4 ranks per controller
648 * o not the only way to may chip selects to dimm slots
649 * o investigate if there is some way to obtain this map from the bios
650 */
651static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
652{
653 struct i5100_priv *priv = mci->pvt_info;
654 int i;
655
656 WARN_ON(priv->ranksperctlr != 4);
657
658 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
659 int j;
660
661 for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
662 priv->dimm_csmap[i][j] = -1; /* default NC */
663 }
664
665 /* only 2 chip selects per slot... */
666 priv->dimm_csmap[0][0] = 0;
667 priv->dimm_csmap[0][1] = 3;
668 priv->dimm_csmap[1][0] = 1;
669 priv->dimm_csmap[1][1] = 2;
670 priv->dimm_csmap[2][0] = 2;
671 priv->dimm_csmap[3][0] = 3;
672}
673
674static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
675 struct mem_ctl_info *mci)
676{
677 struct i5100_priv *priv = mci->pvt_info;
678 int i;
679
680 for (i = 0; i < I5100_MAX_CTLRS; i++) {
681 int j;
682
683 for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CTLR; j++) {
684 u8 rank;
685
686 if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
687 priv->dimm_numrank[i][j] = 0;
688 else
689 priv->dimm_numrank[i][j] = (rank & 3) + 1;
690 }
691 }
692
693 i5100_init_dimm_csmap(mci);
694}
695
696static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
697 struct mem_ctl_info *mci)
698{
699 u16 w;
700 u32 dw;
701 struct i5100_priv *priv = mci->pvt_info;
702 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
703 int i;
704
705 pci_read_config_word(pdev, I5100_TOLM, &w);
706 priv->tolm = (u64) i5100_tolm_tolm(w) * 256 * 1024 * 1024;
707
708 pci_read_config_word(pdev, I5100_MIR0, &w);
709 priv->mir[0].limit = (u64) i5100_mir_limit(w) << 28;
710 priv->mir[0].way[1] = i5100_mir_way1(w);
711 priv->mir[0].way[0] = i5100_mir_way0(w);
712
713 pci_read_config_word(pdev, I5100_MIR1, &w);
714 priv->mir[1].limit = (u64) i5100_mir_limit(w) << 28;
715 priv->mir[1].way[1] = i5100_mir_way1(w);
716 priv->mir[1].way[0] = i5100_mir_way0(w);
717
718 pci_read_config_word(pdev, I5100_AMIR_0, &w);
719 priv->amir[0] = w;
720 pci_read_config_word(pdev, I5100_AMIR_1, &w);
721 priv->amir[1] = w;
722
723 for (i = 0; i < I5100_MAX_CTLRS; i++) {
724 int j;
725
726 for (j = 0; j < 5; j++) {
727 int k;
728
729 pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw);
730
731 priv->dmir[i][j].limit =
732 (u64) i5100_dmir_limit(dw) << 28;
733 for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++)
734 priv->dmir[i][j].rank[k] =
735 i5100_dmir_rank(dw, k);
736 }
737 }
738
739 i5100_init_mtr(mci);
740}
741
742static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
743{
744 int i;
745 unsigned long total_pages = 0UL;
746 struct i5100_priv *priv = mci->pvt_info;
747
748 for (i = 0; i < mci->nr_csrows; i++) {
749 const unsigned long npages = i5100_npages(mci, i);
750 const unsigned cntlr = i5100_csrow_to_cntlr(mci, i);
751 const unsigned rank = i5100_csrow_to_rank(mci, i);
752
753 if (!npages)
754 continue;
755
756 /*
757 * FIXME: these two are totally bogus -- I don't see how to
758 * map them correctly to this structure...
759 */
760 mci->csrows[i].first_page = total_pages;
761 mci->csrows[i].last_page = total_pages + npages - 1;
762 mci->csrows[i].page_mask = 0UL;
763
764 mci->csrows[i].nr_pages = npages;
765 mci->csrows[i].grain = 32;
766 mci->csrows[i].csrow_idx = i;
767 mci->csrows[i].dtype =
768 (priv->mtr[cntlr][rank].width == 4) ? DEV_X4 : DEV_X8;
769 mci->csrows[i].ue_count = 0;
770 mci->csrows[i].ce_count = 0;
771 mci->csrows[i].mtype = MEM_RDDR2;
772 mci->csrows[i].edac_mode = EDAC_SECDED;
773 mci->csrows[i].mci = mci;
774 mci->csrows[i].nr_channels = 1;
775 mci->csrows[i].channels[0].chan_idx = 0;
776 mci->csrows[i].channels[0].ce_count = 0;
777 mci->csrows[i].channels[0].csrow = mci->csrows + i;
778 snprintf(mci->csrows[i].channels[0].label,
779 sizeof(mci->csrows[i].channels[0].label),
780 "DIMM%u", i5100_rank_to_slot(mci, cntlr, rank));
781
782 total_pages += npages;
783 }
784}
785
786static int __devinit i5100_init_one(struct pci_dev *pdev,
787 const struct pci_device_id *id)
788{
789 int rc;
790 struct mem_ctl_info *mci;
791 struct i5100_priv *priv;
792 struct pci_dev *ch0mm, *ch1mm;
793 int ret = 0;
794 u32 dw;
795 int ranksperch;
796
797 if (PCI_FUNC(pdev->devfn) != 1)
798 return -ENODEV;
799
800 rc = pci_enable_device(pdev);
801 if (rc < 0) {
802 ret = rc;
803 goto bail;
804 }
805
806 /* ECC enabled? */
807 pci_read_config_dword(pdev, I5100_MC, &dw);
808 if (!i5100_mc_errdeten(dw)) {
809 printk(KERN_INFO "i5100_edac: ECC not enabled.\n");
810 ret = -ENODEV;
811 goto bail_pdev;
812 }
813
814 /* figure out how many ranks, from strapped state of 48GB_Mode input */
815 pci_read_config_dword(pdev, I5100_MS, &dw);
816 ranksperch = !!(dw & (1 << 8)) * 2 + 4;
817
818 if (ranksperch != 4) {
819 /* FIXME: get 6 ranks / controller to work - need hw... */
820 printk(KERN_INFO "i5100_edac: unsupported configuration.\n");
821 ret = -ENODEV;
822 goto bail_pdev;
823 }
824
825 /* enable error reporting... */
826 pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
827 dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
828 pci_write_config_dword(pdev, I5100_EMASK_MEM, dw);
829
830 /* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */
831 ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
832 PCI_DEVICE_ID_INTEL_5100_21, 0);
833 if (!ch0mm) {
834 ret = -ENODEV;
835 goto bail_pdev;
836 }
837
838 rc = pci_enable_device(ch0mm);
839 if (rc < 0) {
840 ret = rc;
841 goto bail_ch0;
842 }
843
844 /* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */
845 ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
846 PCI_DEVICE_ID_INTEL_5100_22, 0);
847 if (!ch1mm) {
848 ret = -ENODEV;
849 goto bail_disable_ch0;
850 }
851
852 rc = pci_enable_device(ch1mm);
853 if (rc < 0) {
854 ret = rc;
855 goto bail_ch1;
856 }
857
858 mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0);
859 if (!mci) {
860 ret = -ENOMEM;
861 goto bail_disable_ch1;
862 }
863
864 mci->dev = &pdev->dev;
865
866 priv = mci->pvt_info;
867 priv->ranksperctlr = ranksperch;
868 priv->mc = pdev;
869 priv->ch0mm = ch0mm;
870 priv->ch1mm = ch1mm;
871
872 i5100_init_dimm_layout(pdev, mci);
873 i5100_init_interleaving(pdev, mci);
874
875 mci->mtype_cap = MEM_FLAG_FB_DDR2;
876 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
877 mci->edac_cap = EDAC_FLAG_SECDED;
878 mci->mod_name = "i5100_edac.c";
879 mci->mod_ver = "not versioned";
880 mci->ctl_name = "i5100";
881 mci->dev_name = pci_name(pdev);
882 mci->ctl_page_to_phys = NULL;
883
884 mci->edac_check = i5100_check_error;
885
886 i5100_init_csrows(mci);
887
888 /* this strange construction seems to be in every driver, dunno why */
889 switch (edac_op_state) {
890 case EDAC_OPSTATE_POLL:
891 case EDAC_OPSTATE_NMI:
892 break;
893 default:
894 edac_op_state = EDAC_OPSTATE_POLL;
895 break;
896 }
897
898 if (edac_mc_add_mc(mci)) {
899 ret = -ENODEV;
900 goto bail_mc;
901 }
902
903 return ret;
904
905bail_mc:
906 edac_mc_free(mci);
907
908bail_disable_ch1:
909 pci_disable_device(ch1mm);
910
911bail_ch1:
912 pci_dev_put(ch1mm);
913
914bail_disable_ch0:
915 pci_disable_device(ch0mm);
916
917bail_ch0:
918 pci_dev_put(ch0mm);
919
920bail_pdev:
921 pci_disable_device(pdev);
922
923bail:
924 return ret;
925}
926
927static void __devexit i5100_remove_one(struct pci_dev *pdev)
928{
929 struct mem_ctl_info *mci;
930 struct i5100_priv *priv;
931
932 mci = edac_mc_del_mc(&pdev->dev);
933
934 if (!mci)
935 return;
936
937 priv = mci->pvt_info;
938 pci_disable_device(pdev);
939 pci_disable_device(priv->ch0mm);
940 pci_disable_device(priv->ch1mm);
941 pci_dev_put(priv->ch0mm);
942 pci_dev_put(priv->ch1mm);
943
944 edac_mc_free(mci);
945}
946
947static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
948 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
949 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
950 { 0, }
951};
952MODULE_DEVICE_TABLE(pci, i5100_pci_tbl);
953
954static struct pci_driver i5100_driver = {
955 .name = KBUILD_BASENAME,
956 .probe = i5100_init_one,
957 .remove = __devexit_p(i5100_remove_one),
958 .id_table = i5100_pci_tbl,
959};
960
961static int __init i5100_init(void)
962{
963 int pci_rc;
964
965 pci_rc = pci_register_driver(&i5100_driver);
966
967 return (pci_rc < 0) ? pci_rc : 0;
968}
969
970static void __exit i5100_exit(void)
971{
972 pci_unregister_driver(&i5100_driver);
973}
974
975module_init(i5100_init);
976module_exit(i5100_exit);
977
978MODULE_LICENSE("GPL");
979MODULE_AUTHOR
980 ("Arthur Jones <ajones@riverbed.com>");
981MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index d49361bfe670..2265d9ca1535 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -195,14 +195,15 @@ static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
195 return IRQ_HANDLED; 195 return IRQ_HANDLED;
196} 196}
197 197
198static int __devinit mpc85xx_pci_err_probe(struct platform_device *pdev) 198static int __devinit mpc85xx_pci_err_probe(struct of_device *op,
199 const struct of_device_id *match)
199{ 200{
200 struct edac_pci_ctl_info *pci; 201 struct edac_pci_ctl_info *pci;
201 struct mpc85xx_pci_pdata *pdata; 202 struct mpc85xx_pci_pdata *pdata;
202 struct resource *r; 203 struct resource r;
203 int res = 0; 204 int res = 0;
204 205
205 if (!devres_open_group(&pdev->dev, mpc85xx_pci_err_probe, GFP_KERNEL)) 206 if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
206 return -ENOMEM; 207 return -ENOMEM;
207 208
208 pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err"); 209 pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
@@ -212,34 +213,37 @@ static int __devinit mpc85xx_pci_err_probe(struct platform_device *pdev)
212 pdata = pci->pvt_info; 213 pdata = pci->pvt_info;
213 pdata->name = "mpc85xx_pci_err"; 214 pdata->name = "mpc85xx_pci_err";
214 pdata->irq = NO_IRQ; 215 pdata->irq = NO_IRQ;
215 platform_set_drvdata(pdev, pci); 216 dev_set_drvdata(&op->dev, pci);
216 pci->dev = &pdev->dev; 217 pci->dev = &op->dev;
217 pci->mod_name = EDAC_MOD_STR; 218 pci->mod_name = EDAC_MOD_STR;
218 pci->ctl_name = pdata->name; 219 pci->ctl_name = pdata->name;
219 pci->dev_name = pdev->dev.bus_id; 220 pci->dev_name = op->dev.bus_id;
220 221
221 if (edac_op_state == EDAC_OPSTATE_POLL) 222 if (edac_op_state == EDAC_OPSTATE_POLL)
222 pci->edac_check = mpc85xx_pci_check; 223 pci->edac_check = mpc85xx_pci_check;
223 224
224 pdata->edac_idx = edac_pci_idx++; 225 pdata->edac_idx = edac_pci_idx++;
225 226
226 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 227 res = of_address_to_resource(op->node, 0, &r);
227 if (!r) { 228 if (res) {
228 printk(KERN_ERR "%s: Unable to get resource for " 229 printk(KERN_ERR "%s: Unable to get resource for "
229 "PCI err regs\n", __func__); 230 "PCI err regs\n", __func__);
230 goto err; 231 goto err;
231 } 232 }
232 233
233 if (!devm_request_mem_region(&pdev->dev, r->start, 234 /* we only need the error registers */
234 r->end - r->start + 1, pdata->name)) { 235 r.start += 0xe00;
236
237 if (!devm_request_mem_region(&op->dev, r.start,
238 r.end - r.start + 1, pdata->name)) {
235 printk(KERN_ERR "%s: Error while requesting mem region\n", 239 printk(KERN_ERR "%s: Error while requesting mem region\n",
236 __func__); 240 __func__);
237 res = -EBUSY; 241 res = -EBUSY;
238 goto err; 242 goto err;
239 } 243 }
240 244
241 pdata->pci_vbase = devm_ioremap(&pdev->dev, r->start, 245 pdata->pci_vbase = devm_ioremap(&op->dev, r.start,
242 r->end - r->start + 1); 246 r.end - r.start + 1);
243 if (!pdata->pci_vbase) { 247 if (!pdata->pci_vbase) {
244 printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__); 248 printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
245 res = -ENOMEM; 249 res = -ENOMEM;
@@ -266,14 +270,15 @@ static int __devinit mpc85xx_pci_err_probe(struct platform_device *pdev)
266 } 270 }
267 271
268 if (edac_op_state == EDAC_OPSTATE_INT) { 272 if (edac_op_state == EDAC_OPSTATE_INT) {
269 pdata->irq = platform_get_irq(pdev, 0); 273 pdata->irq = irq_of_parse_and_map(op->node, 0);
270 res = devm_request_irq(&pdev->dev, pdata->irq, 274 res = devm_request_irq(&op->dev, pdata->irq,
271 mpc85xx_pci_isr, IRQF_DISABLED, 275 mpc85xx_pci_isr, IRQF_DISABLED,
272 "[EDAC] PCI err", pci); 276 "[EDAC] PCI err", pci);
273 if (res < 0) { 277 if (res < 0) {
274 printk(KERN_ERR 278 printk(KERN_ERR
275 "%s: Unable to requiest irq %d for " 279 "%s: Unable to requiest irq %d for "
276 "MPC85xx PCI err\n", __func__, pdata->irq); 280 "MPC85xx PCI err\n", __func__, pdata->irq);
281 irq_dispose_mapping(pdata->irq);
277 res = -ENODEV; 282 res = -ENODEV;
278 goto err2; 283 goto err2;
279 } 284 }
@@ -282,23 +287,23 @@ static int __devinit mpc85xx_pci_err_probe(struct platform_device *pdev)
282 pdata->irq); 287 pdata->irq);
283 } 288 }
284 289
285 devres_remove_group(&pdev->dev, mpc85xx_pci_err_probe); 290 devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
286 debugf3("%s(): success\n", __func__); 291 debugf3("%s(): success\n", __func__);
287 printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n"); 292 printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
288 293
289 return 0; 294 return 0;
290 295
291err2: 296err2:
292 edac_pci_del_device(&pdev->dev); 297 edac_pci_del_device(&op->dev);
293err: 298err:
294 edac_pci_free_ctl_info(pci); 299 edac_pci_free_ctl_info(pci);
295 devres_release_group(&pdev->dev, mpc85xx_pci_err_probe); 300 devres_release_group(&op->dev, mpc85xx_pci_err_probe);
296 return res; 301 return res;
297} 302}
298 303
299static int mpc85xx_pci_err_remove(struct platform_device *pdev) 304static int mpc85xx_pci_err_remove(struct of_device *op)
300{ 305{
301 struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev); 306 struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
302 struct mpc85xx_pci_pdata *pdata = pci->pvt_info; 307 struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
303 308
304 debugf0("%s()\n", __func__); 309 debugf0("%s()\n", __func__);
@@ -318,12 +323,26 @@ static int mpc85xx_pci_err_remove(struct platform_device *pdev)
318 return 0; 323 return 0;
319} 324}
320 325
321static struct platform_driver mpc85xx_pci_err_driver = { 326static struct of_device_id mpc85xx_pci_err_of_match[] = {
327 {
328 .compatible = "fsl,mpc8540-pcix",
329 },
330 {
331 .compatible = "fsl,mpc8540-pci",
332 },
333 {},
334};
335
336static struct of_platform_driver mpc85xx_pci_err_driver = {
337 .owner = THIS_MODULE,
338 .name = "mpc85xx_pci_err",
339 .match_table = mpc85xx_pci_err_of_match,
322 .probe = mpc85xx_pci_err_probe, 340 .probe = mpc85xx_pci_err_probe,
323 .remove = __devexit_p(mpc85xx_pci_err_remove), 341 .remove = __devexit_p(mpc85xx_pci_err_remove),
324 .driver = { 342 .driver = {
325 .name = "mpc85xx_pci_err", 343 .name = "mpc85xx_pci_err",
326 } 344 .owner = THIS_MODULE,
345 },
327}; 346};
328 347
329#endif /* CONFIG_PCI */ 348#endif /* CONFIG_PCI */
@@ -1002,7 +1021,7 @@ static int __init mpc85xx_mc_init(void)
1002 printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n"); 1021 printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
1003 1022
1004#ifdef CONFIG_PCI 1023#ifdef CONFIG_PCI
1005 res = platform_driver_register(&mpc85xx_pci_err_driver); 1024 res = of_register_platform_driver(&mpc85xx_pci_err_driver);
1006 if (res) 1025 if (res)
1007 printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n"); 1026 printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
1008#endif 1027#endif
@@ -1025,7 +1044,7 @@ static void __exit mpc85xx_mc_exit(void)
1025{ 1044{
1026 mtspr(SPRN_HID1, orig_hid1); 1045 mtspr(SPRN_HID1, orig_hid1);
1027#ifdef CONFIG_PCI 1046#ifdef CONFIG_PCI
1028 platform_driver_unregister(&mpc85xx_pci_err_driver); 1047 of_unregister_platform_driver(&mpc85xx_pci_err_driver);
1029#endif 1048#endif
1030 of_unregister_platform_driver(&mpc85xx_l2_err_driver); 1049 of_unregister_platform_driver(&mpc85xx_l2_err_driver);
1031 of_unregister_platform_driver(&mpc85xx_mc_err_driver); 1050 of_unregister_platform_driver(&mpc85xx_mc_err_driver);
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index bf071f140a05..083ce8d0c63d 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -71,6 +71,35 @@ static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id)
71 return IRQ_HANDLED; 71 return IRQ_HANDLED;
72} 72}
73 73
74/*
75 * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
76 * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
77 * well. IOW, don't set bit 0.
78 */
79
80/* Erratum FEr PCI-#16: clear bit 0 of PCI SERRn Mask reg. */
81static int __init mv64x60_pci_fixup(struct platform_device *pdev)
82{
83 struct resource *r;
84 void __iomem *pci_serr;
85
86 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
87 if (!r) {
88 printk(KERN_ERR "%s: Unable to get resource for "
89 "PCI err regs\n", __func__);
90 return -ENOENT;
91 }
92
93 pci_serr = ioremap(r->start, r->end - r->start + 1);
94 if (!pci_serr)
95 return -ENOMEM;
96
97 out_le32(pci_serr, in_le32(pci_serr) & ~0x1);
98 iounmap(pci_serr);
99
100 return 0;
101}
102
74static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev) 103static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
75{ 104{
76 struct edac_pci_ctl_info *pci; 105 struct edac_pci_ctl_info *pci;
@@ -128,6 +157,12 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
128 goto err; 157 goto err;
129 } 158 }
130 159
160 res = mv64x60_pci_fixup(pdev);
161 if (res < 0) {
162 printk(KERN_ERR "%s: PCI fixup failed\n", __func__);
163 goto err;
164 }
165
131 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0); 166 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0);
132 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0); 167 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0);
133 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 168 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK,
@@ -612,7 +647,7 @@ static void get_total_mem(struct mv64x60_mc_pdata *pdata)
612 if (!np) 647 if (!np)
613 return; 648 return;
614 649
615 reg = get_property(np, "reg", NULL); 650 reg = of_get_property(np, "reg", NULL);
616 651
617 pdata->total_mem = reg[1]; 652 pdata->total_mem = reg[1];
618} 653}
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 25918f7dfd0f..c66817e7717b 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -152,20 +152,11 @@ static ssize_t smi_data_read(struct kobject *kobj,
152 struct bin_attribute *bin_attr, 152 struct bin_attribute *bin_attr,
153 char *buf, loff_t pos, size_t count) 153 char *buf, loff_t pos, size_t count)
154{ 154{
155 size_t max_read;
156 ssize_t ret; 155 ssize_t ret;
157 156
158 mutex_lock(&smi_data_lock); 157 mutex_lock(&smi_data_lock);
159 158 ret = memory_read_from_buffer(buf, count, &pos, smi_data_buf,
160 if (pos >= smi_data_buf_size) { 159 smi_data_buf_size);
161 ret = 0;
162 goto out;
163 }
164
165 max_read = smi_data_buf_size - pos;
166 ret = min(max_read, count);
167 memcpy(buf, smi_data_buf + pos, ret);
168out:
169 mutex_unlock(&smi_data_lock); 160 mutex_unlock(&smi_data_lock);
170 return ret; 161 return ret;
171} 162}
@@ -254,6 +245,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
254static int smi_request(struct smi_cmd *smi_cmd) 245static int smi_request(struct smi_cmd *smi_cmd)
255{ 246{
256 cpumask_t old_mask; 247 cpumask_t old_mask;
248 cpumask_of_cpu_ptr(new_mask, 0);
257 int ret = 0; 249 int ret = 0;
258 250
259 if (smi_cmd->magic != SMI_CMD_MAGIC) { 251 if (smi_cmd->magic != SMI_CMD_MAGIC) {
@@ -264,7 +256,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
264 256
265 /* SMI requires CPU 0 */ 257 /* SMI requires CPU 0 */
266 old_mask = current->cpus_allowed; 258 old_mask = current->cpus_allowed;
267 set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); 259 set_cpus_allowed_ptr(current, new_mask);
268 if (smp_processor_id() != 0) { 260 if (smp_processor_id() != 0) {
269 dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", 261 dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
270 __func__); 262 __func__);
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index 7430e218cda6..13946ebd77d6 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -507,11 +507,6 @@ static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count)
507 507
508static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count) 508static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
509{ 509{
510 unsigned char *ptemp = NULL;
511 size_t bytes_left = 0;
512 size_t data_length = 0;
513 ssize_t ret_count = 0;
514
515 /* check to see if we have something to return */ 510 /* check to see if we have something to return */
516 if ((rbu_data.image_update_buffer == NULL) || 511 if ((rbu_data.image_update_buffer == NULL) ||
517 (rbu_data.bios_image_size == 0)) { 512 (rbu_data.bios_image_size == 0)) {
@@ -519,28 +514,11 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
519 "bios_image_size %lu\n", 514 "bios_image_size %lu\n",
520 rbu_data.image_update_buffer, 515 rbu_data.image_update_buffer,
521 rbu_data.bios_image_size); 516 rbu_data.bios_image_size);
522 ret_count = -ENOMEM; 517 return -ENOMEM;
523 goto read_rbu_data_exit;
524 }
525
526 if (pos > rbu_data.bios_image_size) {
527 ret_count = 0;
528 goto read_rbu_data_exit;
529 } 518 }
530 519
531 bytes_left = rbu_data.bios_image_size - pos; 520 return memory_read_from_buffer(buffer, count, &pos,
532 data_length = min(bytes_left, count); 521 rbu_data.image_update_buffer, rbu_data.bios_image_size);
533
534 ptemp = rbu_data.image_update_buffer;
535 memcpy(buffer, (ptemp + pos), data_length);
536
537 if ((pos + count) > rbu_data.bios_image_size)
538 /* this was the last copy */
539 ret_count = bytes_left;
540 else
541 ret_count = count;
542 read_rbu_data_exit:
543 return ret_count;
544} 522}
545 523
546static ssize_t read_rbu_data(struct kobject *kobj, 524static ssize_t read_rbu_data(struct kobject *kobj,
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index fced1909cbba..dbd42d6c93a7 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -2,15 +2,40 @@
2# GPIO infrastructure and expanders 2# GPIO infrastructure and expanders
3# 3#
4 4
5config HAVE_GPIO_LIB 5config ARCH_WANT_OPTIONAL_GPIOLIB
6 bool 6 bool
7 help 7 help
8 Select this config option from the architecture Kconfig, if
9 it is possible to use gpiolib on the architecture, but let the
10 user decide whether to actually build it or not.
11 Select this instead of ARCH_REQUIRE_GPIOLIB, if your architecture does
12 not depend on GPIOs being available, but rather let the user
13 decide whether he needs it or not.
14
15config ARCH_REQUIRE_GPIOLIB
16 bool
17 select GPIOLIB
18 help
8 Platforms select gpiolib if they use this infrastructure 19 Platforms select gpiolib if they use this infrastructure
9 for all their GPIOs, usually starting with ones integrated 20 for all their GPIOs, usually starting with ones integrated
10 into SOC processors. 21 into SOC processors.
22 Selecting this from the architecture code will cause the gpiolib
23 code to always get built in.
24
25
26
27menuconfig GPIOLIB
28 bool "GPIO Support"
29 depends on ARCH_WANT_OPTIONAL_GPIOLIB || ARCH_REQUIRE_GPIOLIB
30 select GENERIC_GPIO
31 help
32 This enables GPIO support through the generic GPIO library.
33 You only need to enable this, if you also want to enable
34 one or more of the GPIO expansion card drivers below.
11 35
12menu "GPIO Support" 36 If unsure, say N.
13 depends on HAVE_GPIO_LIB 37
38if GPIOLIB
14 39
15config DEBUG_GPIO 40config DEBUG_GPIO
16 bool "Debug GPIO calls" 41 bool "Debug GPIO calls"
@@ -23,10 +48,44 @@ config DEBUG_GPIO
23 slower. The diagnostics help catch the type of setup errors 48 slower. The diagnostics help catch the type of setup errors
24 that are most common when setting up new platforms or boards. 49 that are most common when setting up new platforms or boards.
25 50
51config GPIO_SYSFS
52 bool "/sys/class/gpio/... (sysfs interface)"
53 depends on SYSFS && EXPERIMENTAL
54 help
55 Say Y here to add a sysfs interface for GPIOs.
56
57 This is mostly useful to work around omissions in a system's
58 kernel support. Those are common in custom and semicustom
59 hardware assembled using standard kernels with a minimum of
60 custom patches. In those cases, userspace code may import
61 a given GPIO from the kernel, if no kernel driver requested it.
62
63 Kernel drivers may also request that a particular GPIO be
64 exported to userspace; this can be useful when debugging.
65
26# put expanders in the right section, in alphabetical order 66# put expanders in the right section, in alphabetical order
27 67
28comment "I2C GPIO expanders:" 68comment "I2C GPIO expanders:"
29 69
70config GPIO_MAX732X
71 tristate "MAX7319, MAX7320-7327 I2C Port Expanders"
72 depends on I2C
73 help
74 Say yes here to support the MAX7319, MAX7320-7327 series of I2C
75 Port Expanders. Each IO port on these chips has a fixed role of
76 Input (designated by 'I'), Push-Pull Output ('O'), or Open-Drain
77 Input and Output (designed by 'P'). The combinations are listed
78 below:
79
80 8 bits: max7319 (8I), max7320 (8O), max7321 (8P),
81 max7322 (4I4O), max7323 (4P4O)
82
83 16 bits: max7324 (8I8O), max7325 (8P8O),
84 max7326 (4I12O), max7327 (4P12O)
85
86 Board setup code must specify the model to use, and the start
87 number for these GPIOs.
88
30config GPIO_PCA953X 89config GPIO_PCA953X
31 tristate "PCA953x, PCA955x, and MAX7310 I/O ports" 90 tristate "PCA953x, PCA955x, and MAX7310 I/O ports"
32 depends on I2C 91 depends on I2C
@@ -68,6 +127,24 @@ config GPIO_PCF857X
68 This driver provides an in-kernel interface to those GPIOs using 127 This driver provides an in-kernel interface to those GPIOs using
69 platform-neutral GPIO calls. 128 platform-neutral GPIO calls.
70 129
130comment "PCI GPIO expanders:"
131
132config GPIO_BT8XX
133 tristate "BT8XX GPIO abuser"
134 depends on PCI && VIDEO_BT848=n
135 help
136 The BT8xx frame grabber chip has 24 GPIO pins than can be abused
137 as a cheap PCI GPIO card.
138
139 This chip can be found on Miro, Hauppauge and STB TV-cards.
140
141 The card needs to be physically altered for using it as a
142 GPIO card. For more information on how to build a GPIO card
143 from a BT8xx TV card, see the documentation file at
144 Documentation/bt8xxgpio.txt
145
146 If unsure, say N.
147
71comment "SPI GPIO expanders:" 148comment "SPI GPIO expanders:"
72 149
73config GPIO_MAX7301 150config GPIO_MAX7301
@@ -83,4 +160,4 @@ config GPIO_MCP23S08
83 SPI driver for Microchip MCP23S08 I/O expander. This provides 160 SPI driver for Microchip MCP23S08 I/O expander. This provides
84 a GPIO interface supporting inputs and outputs. 161 a GPIO interface supporting inputs and outputs.
85 162
86endmenu 163endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 16e796dc5410..01b4bbde1956 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -2,9 +2,11 @@
2 2
3ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG 3ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
4 4
5obj-$(CONFIG_HAVE_GPIO_LIB) += gpiolib.o 5obj-$(CONFIG_GPIOLIB) += gpiolib.o
6 6
7obj-$(CONFIG_GPIO_MAX7301) += max7301.o 7obj-$(CONFIG_GPIO_MAX7301) += max7301.o
8obj-$(CONFIG_GPIO_MAX732X) += max732x.o
8obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o 9obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
9obj-$(CONFIG_GPIO_PCA953X) += pca953x.o 10obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
10obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o 11obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
12obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
diff --git a/drivers/gpio/bt8xxgpio.c b/drivers/gpio/bt8xxgpio.c
new file mode 100644
index 000000000000..7a1168249dd5
--- /dev/null
+++ b/drivers/gpio/bt8xxgpio.c
@@ -0,0 +1,348 @@
1/*
2
3 bt8xx GPIO abuser
4
5 Copyright (C) 2008 Michael Buesch <mb@bu3sch.de>
6
7 Please do _only_ contact the people listed _above_ with issues related to this driver.
8 All the other people listed below are not related to this driver. Their names
9 are only here, because this driver is derived from the bt848 driver.
10
11
12 Derived from the bt848 driver:
13
14 Copyright (C) 1996,97,98 Ralph Metzler
15 & Marcus Metzler
16 (c) 1999-2002 Gerd Knorr
17
18 some v4l2 code lines are taken from Justin's bttv2 driver which is
19 (c) 2000 Justin Schoeman
20
21 V4L1 removal from:
22 (c) 2005-2006 Nickolay V. Shmyrev
23
24 Fixes to be fully V4L2 compliant by
25 (c) 2006 Mauro Carvalho Chehab
26
27 Cropping and overscan support
28 Copyright (C) 2005, 2006 Michael H. Schimek
29 Sponsored by OPQ Systems AB
30
31 This program is free software; you can redistribute it and/or modify
32 it under the terms of the GNU General Public License as published by
33 the Free Software Foundation; either version 2 of the License, or
34 (at your option) any later version.
35
36 This program is distributed in the hope that it will be useful,
37 but WITHOUT ANY WARRANTY; without even the implied warranty of
38 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
39 GNU General Public License for more details.
40
41 You should have received a copy of the GNU General Public License
42 along with this program; if not, write to the Free Software
43 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
44*/
45
46#include <linux/module.h>
47#include <linux/pci.h>
48#include <linux/spinlock.h>
49
50#include <asm/gpio.h>
51
52/* Steal the hardware definitions from the bttv driver. */
53#include "../media/video/bt8xx/bt848.h"
54
55
56#define BT8XXGPIO_NR_GPIOS 24 /* We have 24 GPIO pins */
57
58
59struct bt8xxgpio {
60 spinlock_t lock;
61
62 void __iomem *mmio;
63 struct pci_dev *pdev;
64 struct gpio_chip gpio;
65
66#ifdef CONFIG_PM
67 u32 saved_outen;
68 u32 saved_data;
69#endif
70};
71
72#define bgwrite(dat, adr) writel((dat), bg->mmio+(adr))
73#define bgread(adr) readl(bg->mmio+(adr))
74
75
76static int modparam_gpiobase = -1/* dynamic */;
77module_param_named(gpiobase, modparam_gpiobase, int, 0444);
78MODULE_PARM_DESC(gpiobase, "The GPIO number base. -1 means dynamic, which is the default.");
79
80
81static int bt8xxgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
82{
83 struct bt8xxgpio *bg = container_of(gpio, struct bt8xxgpio, gpio);
84 unsigned long flags;
85 u32 outen, data;
86
87 spin_lock_irqsave(&bg->lock, flags);
88
89 data = bgread(BT848_GPIO_DATA);
90 data &= ~(1 << nr);
91 bgwrite(data, BT848_GPIO_DATA);
92
93 outen = bgread(BT848_GPIO_OUT_EN);
94 outen &= ~(1 << nr);
95 bgwrite(outen, BT848_GPIO_OUT_EN);
96
97 spin_unlock_irqrestore(&bg->lock, flags);
98
99 return 0;
100}
101
102static int bt8xxgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
103{
104 struct bt8xxgpio *bg = container_of(gpio, struct bt8xxgpio, gpio);
105 unsigned long flags;
106 u32 val;
107
108 spin_lock_irqsave(&bg->lock, flags);
109 val = bgread(BT848_GPIO_DATA);
110 spin_unlock_irqrestore(&bg->lock, flags);
111
112 return !!(val & (1 << nr));
113}
114
115static int bt8xxgpio_gpio_direction_output(struct gpio_chip *gpio,
116 unsigned nr, int val)
117{
118 struct bt8xxgpio *bg = container_of(gpio, struct bt8xxgpio, gpio);
119 unsigned long flags;
120 u32 outen, data;
121
122 spin_lock_irqsave(&bg->lock, flags);
123
124 outen = bgread(BT848_GPIO_OUT_EN);
125 outen |= (1 << nr);
126 bgwrite(outen, BT848_GPIO_OUT_EN);
127
128 data = bgread(BT848_GPIO_DATA);
129 if (val)
130 data |= (1 << nr);
131 else
132 data &= ~(1 << nr);
133 bgwrite(data, BT848_GPIO_DATA);
134
135 spin_unlock_irqrestore(&bg->lock, flags);
136
137 return 0;
138}
139
140static void bt8xxgpio_gpio_set(struct gpio_chip *gpio,
141 unsigned nr, int val)
142{
143 struct bt8xxgpio *bg = container_of(gpio, struct bt8xxgpio, gpio);
144 unsigned long flags;
145 u32 data;
146
147 spin_lock_irqsave(&bg->lock, flags);
148
149 data = bgread(BT848_GPIO_DATA);
150 if (val)
151 data |= (1 << nr);
152 else
153 data &= ~(1 << nr);
154 bgwrite(data, BT848_GPIO_DATA);
155
156 spin_unlock_irqrestore(&bg->lock, flags);
157}
158
159static void bt8xxgpio_gpio_setup(struct bt8xxgpio *bg)
160{
161 struct gpio_chip *c = &bg->gpio;
162
163 c->label = bg->pdev->dev.bus_id;
164 c->owner = THIS_MODULE;
165 c->direction_input = bt8xxgpio_gpio_direction_input;
166 c->get = bt8xxgpio_gpio_get;
167 c->direction_output = bt8xxgpio_gpio_direction_output;
168 c->set = bt8xxgpio_gpio_set;
169 c->dbg_show = NULL;
170 c->base = modparam_gpiobase;
171 c->ngpio = BT8XXGPIO_NR_GPIOS;
172 c->can_sleep = 0;
173}
174
175static int bt8xxgpio_probe(struct pci_dev *dev,
176 const struct pci_device_id *pci_id)
177{
178 struct bt8xxgpio *bg;
179 int err;
180
181 bg = kzalloc(sizeof(*bg), GFP_KERNEL);
182 if (!bg)
183 return -ENOMEM;
184
185 bg->pdev = dev;
186 spin_lock_init(&bg->lock);
187
188 err = pci_enable_device(dev);
189 if (err) {
190 printk(KERN_ERR "bt8xxgpio: Can't enable device.\n");
191 goto err_freebg;
192 }
193 if (!request_mem_region(pci_resource_start(dev, 0),
194 pci_resource_len(dev, 0),
195 "bt8xxgpio")) {
196 printk(KERN_WARNING "bt8xxgpio: Can't request iomem (0x%llx).\n",
197 (unsigned long long)pci_resource_start(dev, 0));
198 err = -EBUSY;
199 goto err_disable;
200 }
201 pci_set_master(dev);
202 pci_set_drvdata(dev, bg);
203
204 bg->mmio = ioremap(pci_resource_start(dev, 0), 0x1000);
205 if (!bg->mmio) {
206 printk(KERN_ERR "bt8xxgpio: ioremap() failed\n");
207 err = -EIO;
208 goto err_release_mem;
209 }
210
211 /* Disable interrupts */
212 bgwrite(0, BT848_INT_MASK);
213
214 /* gpio init */
215 bgwrite(0, BT848_GPIO_DMA_CTL);
216 bgwrite(0, BT848_GPIO_REG_INP);
217 bgwrite(0, BT848_GPIO_OUT_EN);
218
219 bt8xxgpio_gpio_setup(bg);
220 err = gpiochip_add(&bg->gpio);
221 if (err) {
222 printk(KERN_ERR "bt8xxgpio: Failed to register GPIOs\n");
223 goto err_release_mem;
224 }
225
226 printk(KERN_INFO "bt8xxgpio: Abusing BT8xx card for GPIOs %d to %d\n",
227 bg->gpio.base, bg->gpio.base + BT8XXGPIO_NR_GPIOS - 1);
228
229 return 0;
230
231err_release_mem:
232 release_mem_region(pci_resource_start(dev, 0),
233 pci_resource_len(dev, 0));
234 pci_set_drvdata(dev, NULL);
235err_disable:
236 pci_disable_device(dev);
237err_freebg:
238 kfree(bg);
239
240 return err;
241}
242
243static void bt8xxgpio_remove(struct pci_dev *pdev)
244{
245 struct bt8xxgpio *bg = pci_get_drvdata(pdev);
246
247 gpiochip_remove(&bg->gpio);
248
249 bgwrite(0, BT848_INT_MASK);
250 bgwrite(~0x0, BT848_INT_STAT);
251 bgwrite(0x0, BT848_GPIO_OUT_EN);
252
253 iounmap(bg->mmio);
254 release_mem_region(pci_resource_start(pdev, 0),
255 pci_resource_len(pdev, 0));
256 pci_disable_device(pdev);
257
258 pci_set_drvdata(pdev, NULL);
259 kfree(bg);
260}
261
262#ifdef CONFIG_PM
263static int bt8xxgpio_suspend(struct pci_dev *pdev, pm_message_t state)
264{
265 struct bt8xxgpio *bg = pci_get_drvdata(pdev);
266 unsigned long flags;
267
268 spin_lock_irqsave(&bg->lock, flags);
269
270 bg->saved_outen = bgread(BT848_GPIO_OUT_EN);
271 bg->saved_data = bgread(BT848_GPIO_DATA);
272
273 bgwrite(0, BT848_INT_MASK);
274 bgwrite(~0x0, BT848_INT_STAT);
275 bgwrite(0x0, BT848_GPIO_OUT_EN);
276
277 spin_unlock_irqrestore(&bg->lock, flags);
278
279 pci_save_state(pdev);
280 pci_disable_device(pdev);
281 pci_set_power_state(pdev, pci_choose_state(pdev, state));
282
283 return 0;
284}
285
286static int bt8xxgpio_resume(struct pci_dev *pdev)
287{
288 struct bt8xxgpio *bg = pci_get_drvdata(pdev);
289 unsigned long flags;
290 int err;
291
292 pci_set_power_state(pdev, 0);
293 err = pci_enable_device(pdev);
294 if (err)
295 return err;
296 pci_restore_state(pdev);
297
298 spin_lock_irqsave(&bg->lock, flags);
299
300 bgwrite(0, BT848_INT_MASK);
301 bgwrite(0, BT848_GPIO_DMA_CTL);
302 bgwrite(0, BT848_GPIO_REG_INP);
303 bgwrite(bg->saved_outen, BT848_GPIO_OUT_EN);
304 bgwrite(bg->saved_data & bg->saved_outen,
305 BT848_GPIO_DATA);
306
307 spin_unlock_irqrestore(&bg->lock, flags);
308
309 return 0;
310}
311#else
312#define bt8xxgpio_suspend NULL
313#define bt8xxgpio_resume NULL
314#endif /* CONFIG_PM */
315
316static struct pci_device_id bt8xxgpio_pci_tbl[] = {
317 { PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT848) },
318 { PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT849) },
319 { PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT878) },
320 { PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT879) },
321 { 0, },
322};
323MODULE_DEVICE_TABLE(pci, bt8xxgpio_pci_tbl);
324
325static struct pci_driver bt8xxgpio_pci_driver = {
326 .name = "bt8xxgpio",
327 .id_table = bt8xxgpio_pci_tbl,
328 .probe = bt8xxgpio_probe,
329 .remove = bt8xxgpio_remove,
330 .suspend = bt8xxgpio_suspend,
331 .resume = bt8xxgpio_resume,
332};
333
334static int bt8xxgpio_init(void)
335{
336 return pci_register_driver(&bt8xxgpio_pci_driver);
337}
338module_init(bt8xxgpio_init)
339
340static void bt8xxgpio_exit(void)
341{
342 pci_unregister_driver(&bt8xxgpio_pci_driver);
343}
344module_exit(bt8xxgpio_exit)
345
346MODULE_LICENSE("GPL");
347MODULE_AUTHOR("Michael Buesch");
348MODULE_DESCRIPTION("Abuse a BT8xx framegrabber card as generic GPIO card");
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index beaf6b3a37dc..8d2940517c99 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2,8 +2,11 @@
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/irq.h> 3#include <linux/irq.h>
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5 5#include <linux/device.h>
6#include <asm/gpio.h> 6#include <linux/err.h>
7#include <linux/debugfs.h>
8#include <linux/seq_file.h>
9#include <linux/gpio.h>
7 10
8 11
9/* Optional implementation infrastructure for GPIO interfaces. 12/* Optional implementation infrastructure for GPIO interfaces.
@@ -44,6 +47,8 @@ struct gpio_desc {
44#define FLAG_REQUESTED 0 47#define FLAG_REQUESTED 0
45#define FLAG_IS_OUT 1 48#define FLAG_IS_OUT 1
46#define FLAG_RESERVED 2 49#define FLAG_RESERVED 2
50#define FLAG_EXPORT 3 /* protected by sysfs_lock */
51#define FLAG_SYSFS 4 /* exported via /sys/class/gpio/control */
47 52
48#ifdef CONFIG_DEBUG_FS 53#ifdef CONFIG_DEBUG_FS
49 const char *label; 54 const char *label;
@@ -151,6 +156,482 @@ err:
151 return ret; 156 return ret;
152} 157}
153 158
159#ifdef CONFIG_GPIO_SYSFS
160
161/* lock protects against unexport_gpio() being called while
162 * sysfs files are active.
163 */
164static DEFINE_MUTEX(sysfs_lock);
165
166/*
167 * /sys/class/gpio/gpioN... only for GPIOs that are exported
168 * /direction
169 * * MAY BE OMITTED if kernel won't allow direction changes
170 * * is read/write as "in" or "out"
171 * * may also be written as "high" or "low", initializing
172 * output value as specified ("out" implies "low")
173 * /value
174 * * always readable, subject to hardware behavior
175 * * may be writable, as zero/nonzero
176 *
177 * REVISIT there will likely be an attribute for configuring async
178 * notifications, e.g. to specify polling interval or IRQ trigger type
179 * that would for example trigger a poll() on the "value".
180 */
181
182static ssize_t gpio_direction_show(struct device *dev,
183 struct device_attribute *attr, char *buf)
184{
185 const struct gpio_desc *desc = dev_get_drvdata(dev);
186 ssize_t status;
187
188 mutex_lock(&sysfs_lock);
189
190 if (!test_bit(FLAG_EXPORT, &desc->flags))
191 status = -EIO;
192 else
193 status = sprintf(buf, "%s\n",
194 test_bit(FLAG_IS_OUT, &desc->flags)
195 ? "out" : "in");
196
197 mutex_unlock(&sysfs_lock);
198 return status;
199}
200
201static ssize_t gpio_direction_store(struct device *dev,
202 struct device_attribute *attr, const char *buf, size_t size)
203{
204 const struct gpio_desc *desc = dev_get_drvdata(dev);
205 unsigned gpio = desc - gpio_desc;
206 ssize_t status;
207
208 mutex_lock(&sysfs_lock);
209
210 if (!test_bit(FLAG_EXPORT, &desc->flags))
211 status = -EIO;
212 else if (sysfs_streq(buf, "high"))
213 status = gpio_direction_output(gpio, 1);
214 else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low"))
215 status = gpio_direction_output(gpio, 0);
216 else if (sysfs_streq(buf, "in"))
217 status = gpio_direction_input(gpio);
218 else
219 status = -EINVAL;
220
221 mutex_unlock(&sysfs_lock);
222 return status ? : size;
223}
224
225static const DEVICE_ATTR(direction, 0644,
226 gpio_direction_show, gpio_direction_store);
227
228static ssize_t gpio_value_show(struct device *dev,
229 struct device_attribute *attr, char *buf)
230{
231 const struct gpio_desc *desc = dev_get_drvdata(dev);
232 unsigned gpio = desc - gpio_desc;
233 ssize_t status;
234
235 mutex_lock(&sysfs_lock);
236
237 if (!test_bit(FLAG_EXPORT, &desc->flags))
238 status = -EIO;
239 else
240 status = sprintf(buf, "%d\n", gpio_get_value_cansleep(gpio));
241
242 mutex_unlock(&sysfs_lock);
243 return status;
244}
245
246static ssize_t gpio_value_store(struct device *dev,
247 struct device_attribute *attr, const char *buf, size_t size)
248{
249 const struct gpio_desc *desc = dev_get_drvdata(dev);
250 unsigned gpio = desc - gpio_desc;
251 ssize_t status;
252
253 mutex_lock(&sysfs_lock);
254
255 if (!test_bit(FLAG_EXPORT, &desc->flags))
256 status = -EIO;
257 else if (!test_bit(FLAG_IS_OUT, &desc->flags))
258 status = -EPERM;
259 else {
260 long value;
261
262 status = strict_strtol(buf, 0, &value);
263 if (status == 0) {
264 gpio_set_value_cansleep(gpio, value != 0);
265 status = size;
266 }
267 }
268
269 mutex_unlock(&sysfs_lock);
270 return status;
271}
272
273static /*const*/ DEVICE_ATTR(value, 0644,
274 gpio_value_show, gpio_value_store);
275
276static const struct attribute *gpio_attrs[] = {
277 &dev_attr_direction.attr,
278 &dev_attr_value.attr,
279 NULL,
280};
281
282static const struct attribute_group gpio_attr_group = {
283 .attrs = (struct attribute **) gpio_attrs,
284};
285
286/*
287 * /sys/class/gpio/gpiochipN/
288 * /base ... matching gpio_chip.base (N)
289 * /label ... matching gpio_chip.label
290 * /ngpio ... matching gpio_chip.ngpio
291 */
292
293static ssize_t chip_base_show(struct device *dev,
294 struct device_attribute *attr, char *buf)
295{
296 const struct gpio_chip *chip = dev_get_drvdata(dev);
297
298 return sprintf(buf, "%d\n", chip->base);
299}
300static DEVICE_ATTR(base, 0444, chip_base_show, NULL);
301
302static ssize_t chip_label_show(struct device *dev,
303 struct device_attribute *attr, char *buf)
304{
305 const struct gpio_chip *chip = dev_get_drvdata(dev);
306
307 return sprintf(buf, "%s\n", chip->label ? : "");
308}
309static DEVICE_ATTR(label, 0444, chip_label_show, NULL);
310
311static ssize_t chip_ngpio_show(struct device *dev,
312 struct device_attribute *attr, char *buf)
313{
314 const struct gpio_chip *chip = dev_get_drvdata(dev);
315
316 return sprintf(buf, "%u\n", chip->ngpio);
317}
318static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
319
320static const struct attribute *gpiochip_attrs[] = {
321 &dev_attr_base.attr,
322 &dev_attr_label.attr,
323 &dev_attr_ngpio.attr,
324 NULL,
325};
326
327static const struct attribute_group gpiochip_attr_group = {
328 .attrs = (struct attribute **) gpiochip_attrs,
329};
330
331/*
332 * /sys/class/gpio/export ... write-only
333 * integer N ... number of GPIO to export (full access)
334 * /sys/class/gpio/unexport ... write-only
335 * integer N ... number of GPIO to unexport
336 */
337static ssize_t export_store(struct class *class, const char *buf, size_t len)
338{
339 long gpio;
340 int status;
341
342 status = strict_strtol(buf, 0, &gpio);
343 if (status < 0)
344 goto done;
345
346 /* No extra locking here; FLAG_SYSFS just signifies that the
347 * request and export were done by on behalf of userspace, so
348 * they may be undone on its behalf too.
349 */
350
351 status = gpio_request(gpio, "sysfs");
352 if (status < 0)
353 goto done;
354
355 status = gpio_export(gpio, true);
356 if (status < 0)
357 gpio_free(gpio);
358 else
359 set_bit(FLAG_SYSFS, &gpio_desc[gpio].flags);
360
361done:
362 if (status)
363 pr_debug("%s: status %d\n", __func__, status);
364 return status ? : len;
365}
366
367static ssize_t unexport_store(struct class *class, const char *buf, size_t len)
368{
369 long gpio;
370 int status;
371
372 status = strict_strtol(buf, 0, &gpio);
373 if (status < 0)
374 goto done;
375
376 status = -EINVAL;
377
378 /* reject bogus commands (gpio_unexport ignores them) */
379 if (!gpio_is_valid(gpio))
380 goto done;
381
382 /* No extra locking here; FLAG_SYSFS just signifies that the
383 * request and export were done by on behalf of userspace, so
384 * they may be undone on its behalf too.
385 */
386 if (test_and_clear_bit(FLAG_SYSFS, &gpio_desc[gpio].flags)) {
387 status = 0;
388 gpio_free(gpio);
389 }
390done:
391 if (status)
392 pr_debug("%s: status %d\n", __func__, status);
393 return status ? : len;
394}
395
396static struct class_attribute gpio_class_attrs[] = {
397 __ATTR(export, 0200, NULL, export_store),
398 __ATTR(unexport, 0200, NULL, unexport_store),
399 __ATTR_NULL,
400};
401
402static struct class gpio_class = {
403 .name = "gpio",
404 .owner = THIS_MODULE,
405
406 .class_attrs = gpio_class_attrs,
407};
408
409
410/**
411 * gpio_export - export a GPIO through sysfs
412 * @gpio: gpio to make available, already requested
413 * @direction_may_change: true if userspace may change gpio direction
414 * Context: arch_initcall or later
415 *
416 * When drivers want to make a GPIO accessible to userspace after they
417 * have requested it -- perhaps while debugging, or as part of their
418 * public interface -- they may use this routine. If the GPIO can
419 * change direction (some can't) and the caller allows it, userspace
420 * will see "direction" sysfs attribute which may be used to change
421 * the gpio's direction. A "value" attribute will always be provided.
422 *
423 * Returns zero on success, else an error.
424 */
425int gpio_export(unsigned gpio, bool direction_may_change)
426{
427 unsigned long flags;
428 struct gpio_desc *desc;
429 int status = -EINVAL;
430
431 /* can't export until sysfs is available ... */
432 if (!gpio_class.p) {
433 pr_debug("%s: called too early!\n", __func__);
434 return -ENOENT;
435 }
436
437 if (!gpio_is_valid(gpio))
438 goto done;
439
440 mutex_lock(&sysfs_lock);
441
442 spin_lock_irqsave(&gpio_lock, flags);
443 desc = &gpio_desc[gpio];
444 if (test_bit(FLAG_REQUESTED, &desc->flags)
445 && !test_bit(FLAG_EXPORT, &desc->flags)) {
446 status = 0;
447 if (!desc->chip->direction_input
448 || !desc->chip->direction_output)
449 direction_may_change = false;
450 }
451 spin_unlock_irqrestore(&gpio_lock, flags);
452
453 if (status == 0) {
454 struct device *dev;
455
456 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
457 desc, "gpio%d", gpio);
458 if (dev) {
459 if (direction_may_change)
460 status = sysfs_create_group(&dev->kobj,
461 &gpio_attr_group);
462 else
463 status = device_create_file(dev,
464 &dev_attr_value);
465 if (status != 0)
466 device_unregister(dev);
467 } else
468 status = -ENODEV;
469 if (status == 0)
470 set_bit(FLAG_EXPORT, &desc->flags);
471 }
472
473 mutex_unlock(&sysfs_lock);
474
475done:
476 if (status)
477 pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
478
479 return status;
480}
481EXPORT_SYMBOL_GPL(gpio_export);
482
483static int match_export(struct device *dev, void *data)
484{
485 return dev_get_drvdata(dev) == data;
486}
487
488/**
489 * gpio_unexport - reverse effect of gpio_export()
490 * @gpio: gpio to make unavailable
491 *
492 * This is implicit on gpio_free().
493 */
494void gpio_unexport(unsigned gpio)
495{
496 struct gpio_desc *desc;
497 int status = -EINVAL;
498
499 if (!gpio_is_valid(gpio))
500 goto done;
501
502 mutex_lock(&sysfs_lock);
503
504 desc = &gpio_desc[gpio];
505 if (test_bit(FLAG_EXPORT, &desc->flags)) {
506 struct device *dev = NULL;
507
508 dev = class_find_device(&gpio_class, NULL, desc, match_export);
509 if (dev) {
510 clear_bit(FLAG_EXPORT, &desc->flags);
511 put_device(dev);
512 device_unregister(dev);
513 status = 0;
514 } else
515 status = -ENODEV;
516 }
517
518 mutex_unlock(&sysfs_lock);
519done:
520 if (status)
521 pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
522}
523EXPORT_SYMBOL_GPL(gpio_unexport);
524
525static int gpiochip_export(struct gpio_chip *chip)
526{
527 int status;
528 struct device *dev;
529
530 /* Many systems register gpio chips for SOC support very early,
531 * before driver model support is available. In those cases we
532 * export this later, in gpiolib_sysfs_init() ... here we just
533 * verify that _some_ field of gpio_class got initialized.
534 */
535 if (!gpio_class.p)
536 return 0;
537
538 /* use chip->base for the ID; it's already known to be unique */
539 mutex_lock(&sysfs_lock);
540 dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip,
541 "gpiochip%d", chip->base);
542 if (dev) {
543 status = sysfs_create_group(&dev->kobj,
544 &gpiochip_attr_group);
545 } else
546 status = -ENODEV;
547 chip->exported = (status == 0);
548 mutex_unlock(&sysfs_lock);
549
550 if (status) {
551 unsigned long flags;
552 unsigned gpio;
553
554 spin_lock_irqsave(&gpio_lock, flags);
555 gpio = chip->base;
556 while (gpio_desc[gpio].chip == chip)
557 gpio_desc[gpio++].chip = NULL;
558 spin_unlock_irqrestore(&gpio_lock, flags);
559
560 pr_debug("%s: chip %s status %d\n", __func__,
561 chip->label, status);
562 }
563
564 return status;
565}
566
567static void gpiochip_unexport(struct gpio_chip *chip)
568{
569 int status;
570 struct device *dev;
571
572 mutex_lock(&sysfs_lock);
573 dev = class_find_device(&gpio_class, NULL, chip, match_export);
574 if (dev) {
575 put_device(dev);
576 device_unregister(dev);
577 chip->exported = 0;
578 status = 0;
579 } else
580 status = -ENODEV;
581 mutex_unlock(&sysfs_lock);
582
583 if (status)
584 pr_debug("%s: chip %s status %d\n", __func__,
585 chip->label, status);
586}
587
588static int __init gpiolib_sysfs_init(void)
589{
590 int status;
591 unsigned long flags;
592 unsigned gpio;
593
594 status = class_register(&gpio_class);
595 if (status < 0)
596 return status;
597
598 /* Scan and register the gpio_chips which registered very
599 * early (e.g. before the class_register above was called).
600 *
601 * We run before arch_initcall() so chip->dev nodes can have
602 * registered, and so arch_initcall() can always gpio_export().
603 */
604 spin_lock_irqsave(&gpio_lock, flags);
605 for (gpio = 0; gpio < ARCH_NR_GPIOS; gpio++) {
606 struct gpio_chip *chip;
607
608 chip = gpio_desc[gpio].chip;
609 if (!chip || chip->exported)
610 continue;
611
612 spin_unlock_irqrestore(&gpio_lock, flags);
613 status = gpiochip_export(chip);
614 spin_lock_irqsave(&gpio_lock, flags);
615 }
616 spin_unlock_irqrestore(&gpio_lock, flags);
617
618
619 return status;
620}
621postcore_initcall(gpiolib_sysfs_init);
622
623#else
624static inline int gpiochip_export(struct gpio_chip *chip)
625{
626 return 0;
627}
628
629static inline void gpiochip_unexport(struct gpio_chip *chip)
630{
631}
632
633#endif /* CONFIG_GPIO_SYSFS */
634
154/** 635/**
155 * gpiochip_add() - register a gpio_chip 636 * gpiochip_add() - register a gpio_chip
156 * @chip: the chip to register, with chip->base initialized 637 * @chip: the chip to register, with chip->base initialized
@@ -160,6 +641,11 @@ err:
160 * because the chip->base is invalid or already associated with a 641 * because the chip->base is invalid or already associated with a
161 * different chip. Otherwise it returns zero as a success code. 642 * different chip. Otherwise it returns zero as a success code.
162 * 643 *
644 * When gpiochip_add() is called very early during boot, so that GPIOs
645 * can be freely used, the chip->dev device must be registered before
646 * the gpio framework's arch_initcall(). Otherwise sysfs initialization
647 * for GPIOs will fail rudely.
648 *
163 * If chip->base is negative, this requests dynamic assignment of 649 * If chip->base is negative, this requests dynamic assignment of
164 * a range of valid GPIOs. 650 * a range of valid GPIOs.
165 */ 651 */
@@ -182,7 +668,7 @@ int gpiochip_add(struct gpio_chip *chip)
182 base = gpiochip_find_base(chip->ngpio); 668 base = gpiochip_find_base(chip->ngpio);
183 if (base < 0) { 669 if (base < 0) {
184 status = base; 670 status = base;
185 goto fail_unlock; 671 goto unlock;
186 } 672 }
187 chip->base = base; 673 chip->base = base;
188 } 674 }
@@ -197,12 +683,23 @@ int gpiochip_add(struct gpio_chip *chip)
197 if (status == 0) { 683 if (status == 0) {
198 for (id = base; id < base + chip->ngpio; id++) { 684 for (id = base; id < base + chip->ngpio; id++) {
199 gpio_desc[id].chip = chip; 685 gpio_desc[id].chip = chip;
200 gpio_desc[id].flags = 0; 686
687 /* REVISIT: most hardware initializes GPIOs as
688 * inputs (often with pullups enabled) so power
689 * usage is minimized. Linux code should set the
690 * gpio direction first thing; but until it does,
691 * we may expose the wrong direction in sysfs.
692 */
693 gpio_desc[id].flags = !chip->direction_input
694 ? (1 << FLAG_IS_OUT)
695 : 0;
201 } 696 }
202 } 697 }
203 698
204fail_unlock: 699unlock:
205 spin_unlock_irqrestore(&gpio_lock, flags); 700 spin_unlock_irqrestore(&gpio_lock, flags);
701 if (status == 0)
702 status = gpiochip_export(chip);
206fail: 703fail:
207 /* failures here can mean systems won't boot... */ 704 /* failures here can mean systems won't boot... */
208 if (status) 705 if (status)
@@ -239,6 +736,10 @@ int gpiochip_remove(struct gpio_chip *chip)
239 } 736 }
240 737
241 spin_unlock_irqrestore(&gpio_lock, flags); 738 spin_unlock_irqrestore(&gpio_lock, flags);
739
740 if (status == 0)
741 gpiochip_unexport(chip);
742
242 return status; 743 return status;
243} 744}
244EXPORT_SYMBOL_GPL(gpiochip_remove); 745EXPORT_SYMBOL_GPL(gpiochip_remove);
@@ -296,6 +797,8 @@ void gpio_free(unsigned gpio)
296 return; 797 return;
297 } 798 }
298 799
800 gpio_unexport(gpio);
801
299 spin_lock_irqsave(&gpio_lock, flags); 802 spin_lock_irqsave(&gpio_lock, flags);
300 803
301 desc = &gpio_desc[gpio]; 804 desc = &gpio_desc[gpio];
@@ -534,10 +1037,6 @@ EXPORT_SYMBOL_GPL(gpio_set_value_cansleep);
534 1037
535#ifdef CONFIG_DEBUG_FS 1038#ifdef CONFIG_DEBUG_FS
536 1039
537#include <linux/debugfs.h>
538#include <linux/seq_file.h>
539
540
541static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip) 1040static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
542{ 1041{
543 unsigned i; 1042 unsigned i;
@@ -614,17 +1113,28 @@ static int gpiolib_show(struct seq_file *s, void *unused)
614 /* REVISIT this isn't locked against gpio_chip removal ... */ 1113 /* REVISIT this isn't locked against gpio_chip removal ... */
615 1114
616 for (gpio = 0; gpio_is_valid(gpio); gpio++) { 1115 for (gpio = 0; gpio_is_valid(gpio); gpio++) {
1116 struct device *dev;
1117
617 if (chip == gpio_desc[gpio].chip) 1118 if (chip == gpio_desc[gpio].chip)
618 continue; 1119 continue;
619 chip = gpio_desc[gpio].chip; 1120 chip = gpio_desc[gpio].chip;
620 if (!chip) 1121 if (!chip)
621 continue; 1122 continue;
622 1123
623 seq_printf(s, "%sGPIOs %d-%d, %s%s:\n", 1124 seq_printf(s, "%sGPIOs %d-%d",
624 started ? "\n" : "", 1125 started ? "\n" : "",
625 chip->base, chip->base + chip->ngpio - 1, 1126 chip->base, chip->base + chip->ngpio - 1);
626 chip->label ? : "generic", 1127 dev = chip->dev;
627 chip->can_sleep ? ", can sleep" : ""); 1128 if (dev)
1129 seq_printf(s, ", %s/%s",
1130 dev->bus ? dev->bus->name : "no-bus",
1131 dev->bus_id);
1132 if (chip->label)
1133 seq_printf(s, ", %s", chip->label);
1134 if (chip->can_sleep)
1135 seq_printf(s, ", can sleep");
1136 seq_printf(s, ":\n");
1137
628 started = 1; 1138 started = 1;
629 if (chip->dbg_show) 1139 if (chip->dbg_show)
630 chip->dbg_show(s, chip); 1140 chip->dbg_show(s, chip);
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
new file mode 100644
index 000000000000..b51c8135ca28
--- /dev/null
+++ b/drivers/gpio/max732x.c
@@ -0,0 +1,385 @@
1/*
2 * max732x.c - I2C Port Expander with 8/16 I/O
3 *
4 * Copyright (C) 2007 Marvell International Ltd.
5 * Copyright (C) 2008 Jack Ren <jack.ren@marvell.com>
6 * Copyright (C) 2008 Eric Miao <eric.miao@marvell.com>
7 *
8 * Derived from drivers/gpio/pca953x.c
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/string.h>
19#include <linux/gpio.h>
20
21#include <linux/i2c.h>
22#include <linux/i2c/max732x.h>
23
24
25/*
26 * Each port of MAX732x (including MAX7319) falls into one of the
27 * following three types:
28 *
29 * - Push Pull Output
30 * - Input
31 * - Open Drain I/O
32 *
33 * designated by 'O', 'I' and 'P' individually according to MAXIM's
34 * datasheets.
35 *
36 * There are two groups of I/O ports, each group usually includes
37 * up to 8 I/O ports, and is accessed by a specific I2C address:
38 *
39 * - Group A : by I2C address 0b'110xxxx
40 * - Group B : by I2C address 0b'101xxxx
41 *
42 * where 'xxxx' is decided by the connections of pin AD2/AD0. The
43 * address used also affects the initial state of output signals.
44 *
45 * Within each group of ports, there are five known combinations of
46 * I/O ports: 4I4O, 4P4O, 8I, 8P, 8O, see the definitions below for
47 * the detailed organization of these ports.
48 *
49 * GPIO numbers start from 'gpio_base + 0' to 'gpio_base + 8/16',
50 * and GPIOs from GROUP_A are numbered before those from GROUP_B
51 * (if there are two groups).
52 *
53 * NOTE: MAX7328/MAX7329 are drop-in replacements for PCF8574/a, so
54 * they are not supported by this driver.
55 */
56
57#define PORT_NONE 0x0 /* '/' No Port */
58#define PORT_OUTPUT 0x1 /* 'O' Push-Pull, Output Only */
59#define PORT_INPUT 0x2 /* 'I' Input Only */
60#define PORT_OPENDRAIN 0x3 /* 'P' Open-Drain, I/O */
61
62#define IO_4I4O 0x5AA5 /* O7 O6 I5 I4 I3 I2 O1 O0 */
63#define IO_4P4O 0x5FF5 /* O7 O6 P5 P4 P3 P2 O1 O0 */
64#define IO_8I 0xAAAA /* I7 I6 I5 I4 I3 I2 I1 I0 */
65#define IO_8P 0xFFFF /* P7 P6 P5 P4 P3 P2 P1 P0 */
66#define IO_8O 0x5555 /* O7 O6 O5 O4 O3 O2 O1 O0 */
67
68#define GROUP_A(x) ((x) & 0xffff) /* I2C Addr: 0b'110xxxx */
69#define GROUP_B(x) ((x) << 16) /* I2C Addr: 0b'101xxxx */
70
71static const struct i2c_device_id max732x_id[] = {
72 { "max7319", GROUP_A(IO_8I) },
73 { "max7320", GROUP_B(IO_8O) },
74 { "max7321", GROUP_A(IO_8P) },
75 { "max7322", GROUP_A(IO_4I4O) },
76 { "max7323", GROUP_A(IO_4P4O) },
77 { "max7324", GROUP_A(IO_8I) | GROUP_B(IO_8O) },
78 { "max7325", GROUP_A(IO_8P) | GROUP_B(IO_8O) },
79 { "max7326", GROUP_A(IO_4I4O) | GROUP_B(IO_8O) },
80 { "max7327", GROUP_A(IO_4P4O) | GROUP_B(IO_8O) },
81 { },
82};
83MODULE_DEVICE_TABLE(i2c, max732x_id);
84
85struct max732x_chip {
86 struct gpio_chip gpio_chip;
87
88 struct i2c_client *client; /* "main" client */
89 struct i2c_client *client_dummy;
90 struct i2c_client *client_group_a;
91 struct i2c_client *client_group_b;
92
93 unsigned int mask_group_a;
94 unsigned int dir_input;
95 unsigned int dir_output;
96
97 struct mutex lock;
98 uint8_t reg_out[2];
99};
100
101static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val)
102{
103 struct i2c_client *client;
104 int ret;
105
106 client = group_a ? chip->client_group_a : chip->client_group_b;
107 ret = i2c_smbus_write_byte(client, val);
108 if (ret < 0) {
109 dev_err(&client->dev, "failed writing\n");
110 return ret;
111 }
112
113 return 0;
114}
115
116static int max732x_read(struct max732x_chip *chip, int group_a, uint8_t *val)
117{
118 struct i2c_client *client;
119 int ret;
120
121 client = group_a ? chip->client_group_a : chip->client_group_b;
122 ret = i2c_smbus_read_byte(client);
123 if (ret < 0) {
124 dev_err(&client->dev, "failed reading\n");
125 return ret;
126 }
127
128 *val = (uint8_t)ret;
129 return 0;
130}
131
132static inline int is_group_a(struct max732x_chip *chip, unsigned off)
133{
134 return (1u << off) & chip->mask_group_a;
135}
136
137static int max732x_gpio_get_value(struct gpio_chip *gc, unsigned off)
138{
139 struct max732x_chip *chip;
140 uint8_t reg_val;
141 int ret;
142
143 chip = container_of(gc, struct max732x_chip, gpio_chip);
144
145 ret = max732x_read(chip, is_group_a(chip, off), &reg_val);
146 if (ret < 0)
147 return 0;
148
149 return reg_val & (1u << (off & 0x7));
150}
151
152static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
153{
154 struct max732x_chip *chip;
155 uint8_t reg_out, mask = 1u << (off & 0x7);
156 int ret;
157
158 chip = container_of(gc, struct max732x_chip, gpio_chip);
159
160 mutex_lock(&chip->lock);
161
162 reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0];
163 reg_out = (val) ? reg_out | mask : reg_out & ~mask;
164
165 ret = max732x_write(chip, is_group_a(chip, off), reg_out);
166 if (ret < 0)
167 goto out;
168
169 /* update the shadow register then */
170 if (off > 7)
171 chip->reg_out[1] = reg_out;
172 else
173 chip->reg_out[0] = reg_out;
174out:
175 mutex_unlock(&chip->lock);
176}
177
178static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
179{
180 struct max732x_chip *chip;
181 unsigned int mask = 1u << off;
182
183 chip = container_of(gc, struct max732x_chip, gpio_chip);
184
185 if ((mask & chip->dir_input) == 0) {
186 dev_dbg(&chip->client->dev, "%s port %d is output only\n",
187 chip->client->name, off);
188 return -EACCES;
189 }
190
191 return 0;
192}
193
194static int max732x_gpio_direction_output(struct gpio_chip *gc,
195 unsigned off, int val)
196{
197 struct max732x_chip *chip;
198 unsigned int mask = 1u << off;
199
200 chip = container_of(gc, struct max732x_chip, gpio_chip);
201
202 if ((mask & chip->dir_output) == 0) {
203 dev_dbg(&chip->client->dev, "%s port %d is input only\n",
204 chip->client->name, off);
205 return -EACCES;
206 }
207
208 max732x_gpio_set_value(gc, off, val);
209 return 0;
210}
211
212static int __devinit max732x_setup_gpio(struct max732x_chip *chip,
213 const struct i2c_device_id *id,
214 unsigned gpio_start)
215{
216 struct gpio_chip *gc = &chip->gpio_chip;
217 uint32_t id_data = id->driver_data;
218 int i, port = 0;
219
220 for (i = 0; i < 16; i++, id_data >>= 2) {
221 unsigned int mask = 1 << port;
222
223 switch (id_data & 0x3) {
224 case PORT_OUTPUT:
225 chip->dir_output |= mask;
226 break;
227 case PORT_INPUT:
228 chip->dir_input |= mask;
229 break;
230 case PORT_OPENDRAIN:
231 chip->dir_output |= mask;
232 chip->dir_input |= mask;
233 break;
234 default:
235 continue;
236 }
237
238 if (i < 8)
239 chip->mask_group_a |= mask;
240 port++;
241 }
242
243 if (chip->dir_input)
244 gc->direction_input = max732x_gpio_direction_input;
245 if (chip->dir_output) {
246 gc->direction_output = max732x_gpio_direction_output;
247 gc->set = max732x_gpio_set_value;
248 }
249 gc->get = max732x_gpio_get_value;
250 gc->can_sleep = 1;
251
252 gc->base = gpio_start;
253 gc->ngpio = port;
254 gc->label = chip->client->name;
255 gc->owner = THIS_MODULE;
256
257 return port;
258}
259
260static int __devinit max732x_probe(struct i2c_client *client,
261 const struct i2c_device_id *id)
262{
263 struct max732x_platform_data *pdata;
264 struct max732x_chip *chip;
265 struct i2c_client *c;
266 uint16_t addr_a, addr_b;
267 int ret, nr_port;
268
269 pdata = client->dev.platform_data;
270 if (pdata == NULL)
271 return -ENODEV;
272
273 chip = kzalloc(sizeof(struct max732x_chip), GFP_KERNEL);
274 if (chip == NULL)
275 return -ENOMEM;
276 chip->client = client;
277
278 nr_port = max732x_setup_gpio(chip, id, pdata->gpio_base);
279
280 addr_a = (client->addr & 0x0f) | 0x60;
281 addr_b = (client->addr & 0x0f) | 0x50;
282
283 switch (client->addr & 0x70) {
284 case 0x60:
285 chip->client_group_a = client;
286 if (nr_port > 7) {
287 c = i2c_new_dummy(client->adapter, addr_b);
288 chip->client_group_b = chip->client_dummy = c;
289 }
290 break;
291 case 0x50:
292 chip->client_group_b = client;
293 if (nr_port > 7) {
294 c = i2c_new_dummy(client->adapter, addr_a);
295 chip->client_group_a = chip->client_dummy = c;
296 }
297 break;
298 default:
299 dev_err(&client->dev, "invalid I2C address specified %02x\n",
300 client->addr);
301 ret = -EINVAL;
302 goto out_failed;
303 }
304
305 mutex_init(&chip->lock);
306
307 max732x_read(chip, is_group_a(chip, 0), &chip->reg_out[0]);
308 if (nr_port > 7)
309 max732x_read(chip, is_group_a(chip, 8), &chip->reg_out[1]);
310
311 ret = gpiochip_add(&chip->gpio_chip);
312 if (ret)
313 goto out_failed;
314
315 if (pdata->setup) {
316 ret = pdata->setup(client, chip->gpio_chip.base,
317 chip->gpio_chip.ngpio, pdata->context);
318 if (ret < 0)
319 dev_warn(&client->dev, "setup failed, %d\n", ret);
320 }
321
322 i2c_set_clientdata(client, chip);
323 return 0;
324
325out_failed:
326 kfree(chip);
327 return ret;
328}
329
330static int __devexit max732x_remove(struct i2c_client *client)
331{
332 struct max732x_platform_data *pdata = client->dev.platform_data;
333 struct max732x_chip *chip = i2c_get_clientdata(client);
334 int ret;
335
336 if (pdata->teardown) {
337 ret = pdata->teardown(client, chip->gpio_chip.base,
338 chip->gpio_chip.ngpio, pdata->context);
339 if (ret < 0) {
340 dev_err(&client->dev, "%s failed, %d\n",
341 "teardown", ret);
342 return ret;
343 }
344 }
345
346 ret = gpiochip_remove(&chip->gpio_chip);
347 if (ret) {
348 dev_err(&client->dev, "%s failed, %d\n",
349 "gpiochip_remove()", ret);
350 return ret;
351 }
352
353 /* unregister any dummy i2c_client */
354 if (chip->client_dummy)
355 i2c_unregister_device(chip->client_dummy);
356
357 kfree(chip);
358 return 0;
359}
360
361static struct i2c_driver max732x_driver = {
362 .driver = {
363 .name = "max732x",
364 .owner = THIS_MODULE,
365 },
366 .probe = max732x_probe,
367 .remove = __devexit_p(max732x_remove),
368 .id_table = max732x_id,
369};
370
371static int __init max732x_init(void)
372{
373 return i2c_add_driver(&max732x_driver);
374}
375module_init(max732x_init);
376
377static void __exit max732x_exit(void)
378{
379 i2c_del_driver(&max732x_driver);
380}
381module_exit(max732x_exit);
382
383MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>");
384MODULE_DESCRIPTION("GPIO expander driver for MAX732X");
385MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/mcp23s08.c b/drivers/gpio/mcp23s08.c
index 7f92fdd5f0e2..8a1b405fefda 100644
--- a/drivers/gpio/mcp23s08.c
+++ b/drivers/gpio/mcp23s08.c
@@ -40,15 +40,26 @@ struct mcp23s08 {
40 struct spi_device *spi; 40 struct spi_device *spi;
41 u8 addr; 41 u8 addr;
42 42
43 u8 cache[11];
43 /* lock protects the cached values */ 44 /* lock protects the cached values */
44 struct mutex lock; 45 struct mutex lock;
45 u8 cache[11];
46 46
47 struct gpio_chip chip; 47 struct gpio_chip chip;
48 48
49 struct work_struct work; 49 struct work_struct work;
50}; 50};
51 51
52/* A given spi_device can represent up to four mcp23s08 chips
53 * sharing the same chipselect but using different addresses
54 * (e.g. chips #0 and #3 might be populated, but not #1 or $2).
55 * Driver data holds all the per-chip data.
56 */
57struct mcp23s08_driver_data {
58 unsigned ngpio;
59 struct mcp23s08 *mcp[4];
60 struct mcp23s08 chip[];
61};
62
52static int mcp23s08_read(struct mcp23s08 *mcp, unsigned reg) 63static int mcp23s08_read(struct mcp23s08 *mcp, unsigned reg)
53{ 64{
54 u8 tx[2], rx[1]; 65 u8 tx[2], rx[1];
@@ -208,25 +219,18 @@ done:
208 219
209/*----------------------------------------------------------------------*/ 220/*----------------------------------------------------------------------*/
210 221
211static int mcp23s08_probe(struct spi_device *spi) 222static int mcp23s08_probe_one(struct spi_device *spi, unsigned addr,
223 unsigned base, unsigned pullups)
212{ 224{
213 struct mcp23s08 *mcp; 225 struct mcp23s08_driver_data *data = spi_get_drvdata(spi);
214 struct mcp23s08_platform_data *pdata; 226 struct mcp23s08 *mcp = data->mcp[addr];
215 int status; 227 int status;
216 int do_update = 0; 228 int do_update = 0;
217 229
218 pdata = spi->dev.platform_data;
219 if (!pdata || pdata->slave > 3 || !pdata->base)
220 return -ENODEV;
221
222 mcp = kzalloc(sizeof *mcp, GFP_KERNEL);
223 if (!mcp)
224 return -ENOMEM;
225
226 mutex_init(&mcp->lock); 230 mutex_init(&mcp->lock);
227 231
228 mcp->spi = spi; 232 mcp->spi = spi;
229 mcp->addr = 0x40 | (pdata->slave << 1); 233 mcp->addr = 0x40 | (addr << 1);
230 234
231 mcp->chip.label = "mcp23s08", 235 mcp->chip.label = "mcp23s08",
232 236
@@ -236,26 +240,28 @@ static int mcp23s08_probe(struct spi_device *spi)
236 mcp->chip.set = mcp23s08_set; 240 mcp->chip.set = mcp23s08_set;
237 mcp->chip.dbg_show = mcp23s08_dbg_show; 241 mcp->chip.dbg_show = mcp23s08_dbg_show;
238 242
239 mcp->chip.base = pdata->base; 243 mcp->chip.base = base;
240 mcp->chip.ngpio = 8; 244 mcp->chip.ngpio = 8;
241 mcp->chip.can_sleep = 1; 245 mcp->chip.can_sleep = 1;
246 mcp->chip.dev = &spi->dev;
242 mcp->chip.owner = THIS_MODULE; 247 mcp->chip.owner = THIS_MODULE;
243 248
244 spi_set_drvdata(spi, mcp); 249 /* verify MCP_IOCON.SEQOP = 0, so sequential reads work,
245 250 * and MCP_IOCON.HAEN = 1, so we work with all chips.
246 /* verify MCP_IOCON.SEQOP = 0, so sequential reads work */ 251 */
247 status = mcp23s08_read(mcp, MCP_IOCON); 252 status = mcp23s08_read(mcp, MCP_IOCON);
248 if (status < 0) 253 if (status < 0)
249 goto fail; 254 goto fail;
250 if (status & IOCON_SEQOP) { 255 if ((status & IOCON_SEQOP) || !(status & IOCON_HAEN)) {
251 status &= ~IOCON_SEQOP; 256 status &= ~IOCON_SEQOP;
257 status |= IOCON_HAEN;
252 status = mcp23s08_write(mcp, MCP_IOCON, (u8) status); 258 status = mcp23s08_write(mcp, MCP_IOCON, (u8) status);
253 if (status < 0) 259 if (status < 0)
254 goto fail; 260 goto fail;
255 } 261 }
256 262
257 /* configure ~100K pullups */ 263 /* configure ~100K pullups */
258 status = mcp23s08_write(mcp, MCP_GPPU, pdata->pullups); 264 status = mcp23s08_write(mcp, MCP_GPPU, pullups);
259 if (status < 0) 265 if (status < 0)
260 goto fail; 266 goto fail;
261 267
@@ -282,11 +288,58 @@ static int mcp23s08_probe(struct spi_device *spi)
282 tx[1] = MCP_IPOL; 288 tx[1] = MCP_IPOL;
283 memcpy(&tx[2], &mcp->cache[MCP_IPOL], sizeof(tx) - 2); 289 memcpy(&tx[2], &mcp->cache[MCP_IPOL], sizeof(tx) - 2);
284 status = spi_write_then_read(mcp->spi, tx, sizeof tx, NULL, 0); 290 status = spi_write_then_read(mcp->spi, tx, sizeof tx, NULL, 0);
285 291 if (status < 0)
286 /* FIXME check status... */ 292 goto fail;
287 } 293 }
288 294
289 status = gpiochip_add(&mcp->chip); 295 status = gpiochip_add(&mcp->chip);
296fail:
297 if (status < 0)
298 dev_dbg(&spi->dev, "can't setup chip %d, --> %d\n",
299 addr, status);
300 return status;
301}
302
303static int mcp23s08_probe(struct spi_device *spi)
304{
305 struct mcp23s08_platform_data *pdata;
306 unsigned addr;
307 unsigned chips = 0;
308 struct mcp23s08_driver_data *data;
309 int status;
310 unsigned base;
311
312 pdata = spi->dev.platform_data;
313 if (!pdata || !gpio_is_valid(pdata->base))
314 return -ENODEV;
315
316 for (addr = 0; addr < 4; addr++) {
317 if (!pdata->chip[addr].is_present)
318 continue;
319 chips++;
320 }
321 if (!chips)
322 return -ENODEV;
323
324 data = kzalloc(sizeof *data + chips * sizeof(struct mcp23s08),
325 GFP_KERNEL);
326 if (!data)
327 return -ENOMEM;
328 spi_set_drvdata(spi, data);
329
330 base = pdata->base;
331 for (addr = 0; addr < 4; addr++) {
332 if (!pdata->chip[addr].is_present)
333 continue;
334 chips--;
335 data->mcp[addr] = &data->chip[chips];
336 status = mcp23s08_probe_one(spi, addr, base,
337 pdata->chip[addr].pullups);
338 if (status < 0)
339 goto fail;
340 base += 8;
341 }
342 data->ngpio = base - pdata->base;
290 343
291 /* NOTE: these chips have a relatively sane IRQ framework, with 344 /* NOTE: these chips have a relatively sane IRQ framework, with
292 * per-signal masking and level/edge triggering. It's not yet 345 * per-signal masking and level/edge triggering. It's not yet
@@ -294,8 +347,9 @@ static int mcp23s08_probe(struct spi_device *spi)
294 */ 347 */
295 348
296 if (pdata->setup) { 349 if (pdata->setup) {
297 status = pdata->setup(spi, mcp->chip.base, 350 status = pdata->setup(spi,
298 mcp->chip.ngpio, pdata->context); 351 pdata->base, data->ngpio,
352 pdata->context);
299 if (status < 0) 353 if (status < 0)
300 dev_dbg(&spi->dev, "setup --> %d\n", status); 354 dev_dbg(&spi->dev, "setup --> %d\n", status);
301 } 355 }
@@ -303,19 +357,29 @@ static int mcp23s08_probe(struct spi_device *spi)
303 return 0; 357 return 0;
304 358
305fail: 359fail:
306 kfree(mcp); 360 for (addr = 0; addr < 4; addr++) {
361 int tmp;
362
363 if (!data->mcp[addr])
364 continue;
365 tmp = gpiochip_remove(&data->mcp[addr]->chip);
366 if (tmp < 0)
367 dev_err(&spi->dev, "%s --> %d\n", "remove", tmp);
368 }
369 kfree(data);
307 return status; 370 return status;
308} 371}
309 372
310static int mcp23s08_remove(struct spi_device *spi) 373static int mcp23s08_remove(struct spi_device *spi)
311{ 374{
312 struct mcp23s08 *mcp = spi_get_drvdata(spi); 375 struct mcp23s08_driver_data *data = spi_get_drvdata(spi);
313 struct mcp23s08_platform_data *pdata = spi->dev.platform_data; 376 struct mcp23s08_platform_data *pdata = spi->dev.platform_data;
377 unsigned addr;
314 int status = 0; 378 int status = 0;
315 379
316 if (pdata->teardown) { 380 if (pdata->teardown) {
317 status = pdata->teardown(spi, 381 status = pdata->teardown(spi,
318 mcp->chip.base, mcp->chip.ngpio, 382 pdata->base, data->ngpio,
319 pdata->context); 383 pdata->context);
320 if (status < 0) { 384 if (status < 0) {
321 dev_err(&spi->dev, "%s --> %d\n", "teardown", status); 385 dev_err(&spi->dev, "%s --> %d\n", "teardown", status);
@@ -323,11 +387,20 @@ static int mcp23s08_remove(struct spi_device *spi)
323 } 387 }
324 } 388 }
325 389
326 status = gpiochip_remove(&mcp->chip); 390 for (addr = 0; addr < 4; addr++) {
391 int tmp;
392
393 if (!data->mcp[addr])
394 continue;
395
396 tmp = gpiochip_remove(&data->mcp[addr]->chip);
397 if (tmp < 0) {
398 dev_err(&spi->dev, "%s --> %d\n", "remove", tmp);
399 status = tmp;
400 }
401 }
327 if (status == 0) 402 if (status == 0)
328 kfree(mcp); 403 kfree(data);
329 else
330 dev_err(&spi->dev, "%s --> %d\n", "remove", status);
331 return status; 404 return status;
332} 405}
333 406
@@ -355,4 +428,3 @@ static void __exit mcp23s08_exit(void)
355module_exit(mcp23s08_exit); 428module_exit(mcp23s08_exit);
356 429
357MODULE_LICENSE("GPL"); 430MODULE_LICENSE("GPL");
358
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index a380730b61ab..cc8468692ae0 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -188,6 +188,7 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
188 gc->base = chip->gpio_start; 188 gc->base = chip->gpio_start;
189 gc->ngpio = gpios; 189 gc->ngpio = gpios;
190 gc->label = chip->client->name; 190 gc->label = chip->client->name;
191 gc->dev = &chip->client->dev;
191 gc->owner = THIS_MODULE; 192 gc->owner = THIS_MODULE;
192} 193}
193 194
diff --git a/drivers/gpio/pcf857x.c b/drivers/gpio/pcf857x.c
index d25d356c4f20..fc9c6ae739ee 100644
--- a/drivers/gpio/pcf857x.c
+++ b/drivers/gpio/pcf857x.c
@@ -200,6 +200,7 @@ static int pcf857x_probe(struct i2c_client *client,
200 200
201 gpio->chip.base = pdata->gpio_base; 201 gpio->chip.base = pdata->gpio_base;
202 gpio->chip.can_sleep = 1; 202 gpio->chip.can_sleep = 1;
203 gpio->chip.dev = &client->dev;
203 gpio->chip.owner = THIS_MODULE; 204 gpio->chip.owner = THIS_MODULE;
204 205
205 /* NOTE: the OnSemi jlc1562b is also largely compatible with 206 /* NOTE: the OnSemi jlc1562b is also largely compatible with
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f43d6d3cf2fa..426ac5add585 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -780,7 +780,7 @@ static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n)
780 */ 780 */
781static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u32 value) 781static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u32 value)
782{ 782{
783 __le64 x; 783 u64 x;
784 u64 m = (1ULL << n) - 1; 784 u64 m = (1ULL << n) - 1;
785 785
786 if (n > 32) 786 if (n > 32)
@@ -796,10 +796,10 @@ static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u3
796 report += offset >> 3; 796 report += offset >> 3;
797 offset &= 7; 797 offset &= 7;
798 798
799 x = get_unaligned((__le64 *)report); 799 x = get_unaligned_le64(report);
800 x &= cpu_to_le64(~(m << offset)); 800 x &= ~(m << offset);
801 x |= cpu_to_le64(((u64) value) << offset); 801 x |= ((u64)value) << offset;
802 put_unaligned(x, (__le64 *) report); 802 put_unaligned_le64(x, report);
803} 803}
804 804
805/* 805/*
diff --git a/drivers/hid/hid-input-quirks.c b/drivers/hid/hid-input-quirks.c
index 4c2052c658f1..16feea014494 100644
--- a/drivers/hid/hid-input-quirks.c
+++ b/drivers/hid/hid-input-quirks.c
@@ -89,6 +89,29 @@ static int quirk_logitech_ultrax_remote(struct hid_usage *usage, struct input_de
89 return 1; 89 return 1;
90} 90}
91 91
92static int quirk_gyration_remote(struct hid_usage *usage, struct input_dev *input,
93 unsigned long **bit, int *max)
94{
95 if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
96 return 0;
97
98 set_bit(EV_REP, input->evbit);
99 switch(usage->hid & HID_USAGE) {
100 /* Reported on Gyration MCE Remote */
101 case 0x00d: map_key_clear(KEY_HOME); break;
102 case 0x024: map_key_clear(KEY_DVD); break;
103 case 0x025: map_key_clear(KEY_PVR); break;
104 case 0x046: map_key_clear(KEY_MEDIA); break;
105 case 0x047: map_key_clear(KEY_MP3); break;
106 case 0x049: map_key_clear(KEY_CAMERA); break;
107 case 0x04a: map_key_clear(KEY_VIDEO); break;
108
109 default:
110 return 0;
111 }
112 return 1;
113}
114
92static int quirk_chicony_tactical_pad(struct hid_usage *usage, struct input_dev *input, 115static int quirk_chicony_tactical_pad(struct hid_usage *usage, struct input_dev *input,
93 unsigned long **bit, int *max) 116 unsigned long **bit, int *max)
94{ 117{
@@ -303,6 +326,9 @@ static int quirk_sunplus_wdesktop(struct hid_usage *usage, struct input_dev *inp
303#define VENDOR_ID_EZKEY 0x0518 326#define VENDOR_ID_EZKEY 0x0518
304#define DEVICE_ID_BTC_8193 0x0002 327#define DEVICE_ID_BTC_8193 0x0002
305 328
329#define VENDOR_ID_GYRATION 0x0c16
330#define DEVICE_ID_GYRATION_REMOTE 0x0002
331
306#define VENDOR_ID_LOGITECH 0x046d 332#define VENDOR_ID_LOGITECH 0x046d
307#define DEVICE_ID_LOGITECH_RECEIVER 0xc101 333#define DEVICE_ID_LOGITECH_RECEIVER 0xc101
308#define DEVICE_ID_S510_RECEIVER 0xc50c 334#define DEVICE_ID_S510_RECEIVER 0xc50c
@@ -337,6 +363,8 @@ static const struct hid_input_blacklist {
337 363
338 { VENDOR_ID_EZKEY, DEVICE_ID_BTC_8193, quirk_btc_8193 }, 364 { VENDOR_ID_EZKEY, DEVICE_ID_BTC_8193, quirk_btc_8193 },
339 365
366 { VENDOR_ID_GYRATION, DEVICE_ID_GYRATION_REMOTE, quirk_gyration_remote },
367
340 { VENDOR_ID_LOGITECH, DEVICE_ID_LOGITECH_RECEIVER, quirk_logitech_ultrax_remote }, 368 { VENDOR_ID_LOGITECH, DEVICE_ID_LOGITECH_RECEIVER, quirk_logitech_ultrax_remote },
341 { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER, quirk_logitech_wireless }, 369 { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER, quirk_logitech_wireless },
342 { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER_2, quirk_logitech_wireless }, 370 { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER_2, quirk_logitech_wireless },
@@ -438,6 +466,18 @@ int hidinput_event_quirks(struct hid_device *hid, struct hid_field *field, struc
438 input_event(input, usage->type, REL_WHEEL, -value); 466 input_event(input, usage->type, REL_WHEEL, -value);
439 return 1; 467 return 1;
440 } 468 }
469
470 /* Gyration MCE remote "Sleep" key */
471 if (hid->vendor == VENDOR_ID_GYRATION &&
472 hid->product == DEVICE_ID_GYRATION_REMOTE &&
473 (usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
474 (usage->hid & 0xff) == 0x82) {
475 input_event(input, usage->type, usage->code, 1);
476 input_sync(input);
477 input_event(input, usage->type, usage->code, 0);
478 input_sync(input);
479 return 1;
480 }
441 return 0; 481 return 0;
442} 482}
443 483
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 5c52a20ad344..1b2e8dc3398d 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -100,6 +100,8 @@ static struct hidinput_key_translation apple_fn_keys[] = {
100 { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY }, 100 { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY },
101 { KEY_F3, KEY_FN_F5, APPLE_FLAG_FKEY }, /* Exposé */ 101 { KEY_F3, KEY_FN_F5, APPLE_FLAG_FKEY }, /* Exposé */
102 { KEY_F4, KEY_FN_F4, APPLE_FLAG_FKEY }, /* Dashboard */ 102 { KEY_F4, KEY_FN_F4, APPLE_FLAG_FKEY }, /* Dashboard */
103 { KEY_F5, KEY_KBDILLUMDOWN, APPLE_FLAG_FKEY },
104 { KEY_F6, KEY_KBDILLUMUP, APPLE_FLAG_FKEY },
103 { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY }, 105 { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY },
104 { KEY_F8, KEY_PLAYPAUSE, APPLE_FLAG_FKEY }, 106 { KEY_F8, KEY_PLAYPAUSE, APPLE_FLAG_FKEY },
105 { KEY_F9, KEY_NEXTSONG, APPLE_FLAG_FKEY }, 107 { KEY_F9, KEY_NEXTSONG, APPLE_FLAG_FKEY },
@@ -612,6 +614,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
612 case 0x0b6: map_key_clear(KEY_PREVIOUSSONG); break; 614 case 0x0b6: map_key_clear(KEY_PREVIOUSSONG); break;
613 case 0x0b7: map_key_clear(KEY_STOPCD); break; 615 case 0x0b7: map_key_clear(KEY_STOPCD); break;
614 case 0x0b8: map_key_clear(KEY_EJECTCD); break; 616 case 0x0b8: map_key_clear(KEY_EJECTCD); break;
617 case 0x0bc: map_key_clear(KEY_MEDIA_REPEAT); break;
615 618
616 case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break; 619 case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break;
617 case 0x0e0: map_abs_clear(ABS_VOLUME); break; 620 case 0x0e0: map_abs_clear(ABS_VOLUME); break;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 0c6b4d4e7e27..c40f0403edaf 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -105,6 +105,7 @@ out:
105static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) 105static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
106{ 106{
107 unsigned int minor = iminor(file->f_path.dentry->d_inode); 107 unsigned int minor = iminor(file->f_path.dentry->d_inode);
108 /* FIXME: What stops hidraw_table going NULL */
108 struct hid_device *dev = hidraw_table[minor]->hid; 109 struct hid_device *dev = hidraw_table[minor]->hid;
109 __u8 *buf; 110 __u8 *buf;
110 int ret = 0; 111 int ret = 0;
@@ -211,38 +212,43 @@ static int hidraw_release(struct inode * inode, struct file * file)
211 kfree(list->hidraw); 212 kfree(list->hidraw);
212 } 213 }
213 214
215 kfree(list);
216
214 return 0; 217 return 0;
215} 218}
216 219
217static int hidraw_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 220static long hidraw_ioctl(struct file *file, unsigned int cmd,
221 unsigned long arg)
218{ 222{
223 struct inode *inode = file->f_path.dentry->d_inode;
219 unsigned int minor = iminor(inode); 224 unsigned int minor = iminor(inode);
225 long ret = 0;
226 /* FIXME: What stops hidraw_table going NULL */
220 struct hidraw *dev = hidraw_table[minor]; 227 struct hidraw *dev = hidraw_table[minor];
221 void __user *user_arg = (void __user*) arg; 228 void __user *user_arg = (void __user*) arg;
222 229
230 lock_kernel();
223 switch (cmd) { 231 switch (cmd) {
224 case HIDIOCGRDESCSIZE: 232 case HIDIOCGRDESCSIZE:
225 if (put_user(dev->hid->rsize, (int __user *)arg)) 233 if (put_user(dev->hid->rsize, (int __user *)arg))
226 return -EFAULT; 234 ret = -EFAULT;
227 return 0; 235 break;
228 236
229 case HIDIOCGRDESC: 237 case HIDIOCGRDESC:
230 { 238 {
231 __u32 len; 239 __u32 len;
232 240
233 if (get_user(len, (int __user *)arg)) 241 if (get_user(len, (int __user *)arg))
234 return -EFAULT; 242 ret = -EFAULT;
235 243 else if (len > HID_MAX_DESCRIPTOR_SIZE - 1)
236 if (len > HID_MAX_DESCRIPTOR_SIZE - 1) 244 ret = -EINVAL;
237 return -EINVAL; 245 else if (copy_to_user(user_arg + offsetof(
238 246 struct hidraw_report_descriptor,
239 if (copy_to_user(user_arg + offsetof( 247 value[0]),
240 struct hidraw_report_descriptor, 248 dev->hid->rdesc,
241 value[0]), 249 min(dev->hid->rsize, len)))
242 dev->hid->rdesc, 250 ret = -EFAULT;
243 min(dev->hid->rsize, len))) 251 break;
244 return -EFAULT;
245 return 0;
246 } 252 }
247 case HIDIOCGRAWINFO: 253 case HIDIOCGRAWINFO:
248 { 254 {
@@ -252,15 +258,13 @@ static int hidraw_ioctl(struct inode *inode, struct file *file, unsigned int cmd
252 dinfo.vendor = dev->hid->vendor; 258 dinfo.vendor = dev->hid->vendor;
253 dinfo.product = dev->hid->product; 259 dinfo.product = dev->hid->product;
254 if (copy_to_user(user_arg, &dinfo, sizeof(dinfo))) 260 if (copy_to_user(user_arg, &dinfo, sizeof(dinfo)))
255 return -EFAULT; 261 ret = -EFAULT;
256 262 break;
257 return 0;
258 } 263 }
259 default: 264 default:
260 printk(KERN_EMERG "hidraw: unsupported ioctl() %x\n", 265 ret = -ENOTTY;
261 cmd);
262 } 266 }
263 return -EINVAL; 267 return ret;
264} 268}
265 269
266static const struct file_operations hidraw_ops = { 270static const struct file_operations hidraw_ops = {
@@ -270,7 +274,7 @@ static const struct file_operations hidraw_ops = {
270 .poll = hidraw_poll, 274 .poll = hidraw_poll,
271 .open = hidraw_open, 275 .open = hidraw_open,
272 .release = hidraw_release, 276 .release = hidraw_release,
273 .ioctl = hidraw_ioctl, 277 .unlocked_ioctl = hidraw_ioctl,
274}; 278};
275 279
276void hidraw_report_event(struct hid_device *hid, u8 *data, int len) 280void hidraw_report_event(struct hid_device *hid, u8 *data, int len)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 1df832a8fcbc..61e78a4369b9 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -69,12 +69,18 @@
69#define USB_DEVICE_ID_APPLE_ALU_ANSI 0x0220 69#define USB_DEVICE_ID_APPLE_ALU_ANSI 0x0220
70#define USB_DEVICE_ID_APPLE_ALU_ISO 0x0221 70#define USB_DEVICE_ID_APPLE_ALU_ISO 0x0221
71#define USB_DEVICE_ID_APPLE_ALU_JIS 0x0222 71#define USB_DEVICE_ID_APPLE_ALU_JIS 0x0222
72#define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223
73#define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224
74#define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225
72#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI 0x0229 75#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI 0x0229
73#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO 0x022a 76#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO 0x022a
74#define USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS 0x022b 77#define USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS 0x022b
75#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI 0x022c 78#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI 0x022c
76#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO 0x022d 79#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO 0x022d
77#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS 0x022e 80#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS 0x022e
81#define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230
82#define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231
83#define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232
78#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 84#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
79#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 85#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
80#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 86#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
@@ -241,6 +247,8 @@
241#define USB_DEVICE_ID_LD_MACHINETEST 0x2040 247#define USB_DEVICE_ID_LD_MACHINETEST 0x2040
242 248
243#define USB_VENDOR_ID_LOGITECH 0x046d 249#define USB_VENDOR_ID_LOGITECH 0x046d
250#define USB_DEVICE_ID_LOGITECH_LX3 0xc044
251#define USB_DEVICE_ID_LOGITECH_V150 0xc047
244#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 252#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
245#define USB_DEVICE_ID_LOGITECH_HARMONY 0xc110 253#define USB_DEVICE_ID_LOGITECH_HARMONY 0xc110
246#define USB_DEVICE_ID_LOGITECH_HARMONY_2 0xc111 254#define USB_DEVICE_ID_LOGITECH_HARMONY_2 0xc111
@@ -314,6 +322,7 @@
314#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517 322#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
315#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512 323#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
316#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513 324#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513
325#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
317#define USB_DEVICE_ID_DINOVO_EDGE 0xc714 326#define USB_DEVICE_ID_DINOVO_EDGE 0xc714
318#define USB_DEVICE_ID_DINOVO_MINI 0xc71f 327#define USB_DEVICE_ID_DINOVO_MINI 0xc71f
319 328
@@ -443,7 +452,8 @@ static const struct hid_blacklist {
443 { USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD }, 452 { USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD },
444 { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD }, 453 { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD },
445 { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD }, 454 { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
446 455
456 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP, HID_QUIRK_DUPLICATE_USAGES },
447 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES }, 457 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES },
448 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI, HID_QUIRK_DUPLICATE_USAGES }, 458 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI, HID_QUIRK_DUPLICATE_USAGES },
449 459
@@ -593,6 +603,8 @@ static const struct hid_blacklist {
593 603
594 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP }, 604 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
595 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP }, 605 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
606 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_LX3, HID_QUIRK_INVERT_HWHEEL },
607 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_V150, HID_QUIRK_INVERT_HWHEEL },
596 608
597 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K, HID_QUIRK_MICROSOFT_KEYS }, 609 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K, HID_QUIRK_MICROSOFT_KEYS },
598 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K, HID_QUIRK_MICROSOFT_KEYS }, 610 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K, HID_QUIRK_MICROSOFT_KEYS },
@@ -642,6 +654,12 @@ static const struct hid_blacklist {
642 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, 654 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
643 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD }, 655 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
644 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, 656 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
657 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, HID_QUIRK_APPLE_HAS_FN },
658 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
659 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, HID_QUIRK_APPLE_HAS_FN },
660 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, HID_QUIRK_APPLE_HAS_FN },
661 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
662 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, HID_QUIRK_APPLE_HAS_FN },
645 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, 663 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
646 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, 664 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
647 665
@@ -1128,7 +1146,7 @@ static void usbhid_fixup_microsoft_descriptor(unsigned char *rdesc, int rsize)
1128 && rdesc[557] == 0x19 1146 && rdesc[557] == 0x19
1129 && rdesc[559] == 0x29) { 1147 && rdesc[559] == 0x29) {
1130 printk(KERN_INFO "Fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n"); 1148 printk(KERN_INFO "Fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n");
1131 rdesc[284] = rdesc[304] = rdesc[558] = 0x35; 1149 rdesc[284] = rdesc[304] = rdesc[557] = 0x35;
1132 rdesc[352] = 0x36; 1150 rdesc[352] = 0x36;
1133 rdesc[286] = rdesc[355] = 0x46; 1151 rdesc[286] = rdesc[355] = 0x46;
1134 rdesc[306] = rdesc[559] = 0x45; 1152 rdesc[306] = rdesc[559] = 0x45;
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 95cc192bc7af..842e9edb888e 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -406,6 +406,7 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
406 uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL); 406 uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL);
407 if (!uref_multi) 407 if (!uref_multi)
408 return -ENOMEM; 408 return -ENOMEM;
409 lock_kernel();
409 uref = &uref_multi->uref; 410 uref = &uref_multi->uref;
410 if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) { 411 if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
411 if (copy_from_user(uref_multi, user_arg, 412 if (copy_from_user(uref_multi, user_arg,
@@ -501,12 +502,15 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
501 } 502 }
502 503
503goodreturn: 504goodreturn:
505 unlock_kernel();
504 kfree(uref_multi); 506 kfree(uref_multi);
505 return 0; 507 return 0;
506fault: 508fault:
509 unlock_kernel();
507 kfree(uref_multi); 510 kfree(uref_multi);
508 return -EFAULT; 511 return -EFAULT;
509inval: 512inval:
513 unlock_kernel();
510 kfree(uref_multi); 514 kfree(uref_multi);
511 return -EINVAL; 515 return -EINVAL;
512 } 516 }
@@ -540,7 +544,7 @@ static noinline int hiddev_ioctl_string(struct hiddev *hiddev, unsigned int cmd,
540 return len; 544 return len;
541} 545}
542 546
543static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 547static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
544{ 548{
545 struct hiddev_list *list = file->private_data; 549 struct hiddev_list *list = file->private_data;
546 struct hiddev *hiddev = list->hiddev; 550 struct hiddev *hiddev = list->hiddev;
@@ -555,7 +559,10 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
555 struct usbhid_device *usbhid = hid->driver_data; 559 struct usbhid_device *usbhid = hid->driver_data;
556 void __user *user_arg = (void __user *)arg; 560 void __user *user_arg = (void __user *)arg;
557 int i; 561 int i;
562
563 /* Called without BKL by compat methods so no BKL taken */
558 564
565 /* FIXME: Who or what stop this racing with a disconnect ?? */
559 if (!hiddev->exist) 566 if (!hiddev->exist)
560 return -EIO; 567 return -EIO;
561 568
@@ -756,8 +763,7 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
756#ifdef CONFIG_COMPAT 763#ifdef CONFIG_COMPAT
757static long hiddev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 764static long hiddev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
758{ 765{
759 struct inode *inode = file->f_path.dentry->d_inode; 766 return hiddev_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
760 return hiddev_ioctl(inode, file, cmd, (unsigned long)compat_ptr(arg));
761} 767}
762#endif 768#endif
763 769
@@ -768,7 +774,7 @@ static const struct file_operations hiddev_fops = {
768 .poll = hiddev_poll, 774 .poll = hiddev_poll,
769 .open = hiddev_open, 775 .open = hiddev_open,
770 .release = hiddev_release, 776 .release = hiddev_release,
771 .ioctl = hiddev_ioctl, 777 .unlocked_ioctl = hiddev_ioctl,
772 .fasync = hiddev_fasync, 778 .fasync = hiddev_fasync,
773#ifdef CONFIG_COMPAT 779#ifdef CONFIG_COMPAT
774 .compat_ioctl = hiddev_compat_ioctl, 780 .compat_ioctl = hiddev_compat_ioctl,
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index 3cd46d2e53c1..0caaafe01843 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -43,7 +43,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
43MODULE_DESCRIPTION(DRIVER_DESC); 43MODULE_DESCRIPTION(DRIVER_DESC);
44MODULE_LICENSE(DRIVER_LICENSE); 44MODULE_LICENSE(DRIVER_LICENSE);
45 45
46static unsigned char usb_kbd_keycode[256] = { 46static const unsigned char usb_kbd_keycode[256] = {
47 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, 47 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38,
48 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, 48 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3,
49 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, 49 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26,
@@ -233,14 +233,6 @@ static int usb_kbd_probe(struct usb_interface *iface,
233 if (!usb_endpoint_is_int_in(endpoint)) 233 if (!usb_endpoint_is_int_in(endpoint))
234 return -ENODEV; 234 return -ENODEV;
235 235
236#ifdef CONFIG_USB_HID
237 if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
238 le16_to_cpu(dev->descriptor.idProduct))
239 & HID_QUIRK_IGNORE) {
240 return -ENODEV;
241 }
242#endif
243
244 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); 236 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
245 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); 237 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
246 238
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index 703e9d0e8714..35689ef172cc 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -129,14 +129,6 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
129 if (!usb_endpoint_is_int_in(endpoint)) 129 if (!usb_endpoint_is_int_in(endpoint))
130 return -ENODEV; 130 return -ENODEV;
131 131
132#ifdef CONFIG_USB_HID
133 if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
134 le16_to_cpu(dev->descriptor.idProduct))
135 & (HID_QUIRK_IGNORE|HID_QUIRK_IGNORE_MOUSE)) {
136 return -ENODEV;
137 }
138#endif
139
140 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); 132 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
141 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); 133 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
142 134
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index 50e0a4653741..a95cb9465d65 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -126,7 +126,7 @@ config ISP1301_OMAP
126 126
127config TPS65010 127config TPS65010
128 tristate "TPS6501x Power Management chips" 128 tristate "TPS6501x Power Management chips"
129 depends on HAVE_GPIO_LIB 129 depends on GPIOLIB
130 default y if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_OSK 130 default y if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_OSK
131 help 131 help
132 If you say yes here you get support for the TPS6501x series of 132 If you say yes here you get support for the TPS6501x series of
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index 85949685191b..cf02e8fceb42 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -636,6 +636,8 @@ static int tps65010_probe(struct i2c_client *client,
636 tps->outmask = board->outmask; 636 tps->outmask = board->outmask;
637 637
638 tps->chip.label = client->name; 638 tps->chip.label = client->name;
639 tps->chip.dev = &client->dev;
640 tps->chip.owner = THIS_MODULE;
639 641
640 tps->chip.set = tps65010_gpio_set; 642 tps->chip.set = tps65010_gpio_set;
641 tps->chip.direction_output = tps65010_output; 643 tps->chip.direction_output = tps65010_output;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 15b09b89588a..130ef64b44f7 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -314,7 +314,7 @@ comment "IDE chipset support/bugfixes"
314 314
315config IDE_GENERIC 315config IDE_GENERIC
316 tristate "generic/default IDE chipset support" 316 tristate "generic/default IDE chipset support"
317 depends on ALPHA || X86 || IA64 || M32R || MIPS || PPC32 317 depends on ALPHA || X86 || IA64 || M32R || MIPS
318 help 318 help
319 If unsure, say N. 319 If unsure, say N.
320 320
@@ -510,6 +510,7 @@ config BLK_DEV_TRIFLEX
510 510
511config BLK_DEV_CY82C693 511config BLK_DEV_CY82C693
512 tristate "CY82C693 chipset support" 512 tristate "CY82C693 chipset support"
513 depends on ALPHA
513 select IDE_TIMINGS 514 select IDE_TIMINGS
514 select BLK_DEV_IDEDMA_PCI 515 select BLK_DEV_IDEDMA_PCI
515 help 516 help
@@ -548,6 +549,7 @@ config BLK_DEV_CS5535
548 549
549config BLK_DEV_HPT34X 550config BLK_DEV_HPT34X
550 tristate "HPT34X chipset support" 551 tristate "HPT34X chipset support"
552 depends on BROKEN
551 select BLK_DEV_IDEDMA_PCI 553 select BLK_DEV_IDEDMA_PCI
552 help 554 help
553 This driver adds up to 4 more EIDE devices sharing a single 555 This driver adds up to 4 more EIDE devices sharing a single
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 5d414e301a5a..64e0ecdc4ed5 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -1,13 +1,6 @@
1# 1#
2# Makefile for the kernel ata, atapi, and ide block device drivers.
3#
4# 12 September 2000, Bartlomiej Zolnierkiewicz <bkz@linux-ide.org>
5# Rewritten to use lists instead of if-statements.
6#
7# Note : at this point, these files are compiled on all systems.
8# In the future, some of these should be built conditionally.
9#
10# link order is important here 2# link order is important here
3#
11 4
12EXTRA_CFLAGS += -Idrivers/ide 5EXTRA_CFLAGS += -Idrivers/ide
13 6
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index 52f58c885783..df4af4083954 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -72,7 +72,7 @@ struct icside_state {
72 void __iomem *ioc_base; 72 void __iomem *ioc_base;
73 unsigned int sel; 73 unsigned int sel;
74 unsigned int type; 74 unsigned int type;
75 ide_hwif_t *hwif[2]; 75 struct ide_host *host;
76}; 76};
77 77
78#define ICS_TYPE_A3IN 0 78#define ICS_TYPE_A3IN 0
@@ -375,12 +375,14 @@ static int icside_dma_test_irq(ide_drive_t *drive)
375 375
376static void icside_dma_timeout(ide_drive_t *drive) 376static void icside_dma_timeout(ide_drive_t *drive)
377{ 377{
378 ide_hwif_t *hwif = drive->hwif;
379
378 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); 380 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
379 381
380 if (icside_dma_test_irq(drive)) 382 if (icside_dma_test_irq(drive))
381 return; 383 return;
382 384
383 ide_dump_status(drive, "DMA timeout", ide_read_status(drive)); 385 ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif));
384 386
385 icside_dma_end(drive); 387 icside_dma_end(drive);
386} 388}
@@ -440,10 +442,10 @@ static void icside_setup_ports(hw_regs_t *hw, void __iomem *base,
440static int __init 442static int __init
441icside_register_v5(struct icside_state *state, struct expansion_card *ec) 443icside_register_v5(struct icside_state *state, struct expansion_card *ec)
442{ 444{
443 ide_hwif_t *hwif;
444 void __iomem *base; 445 void __iomem *base;
445 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 446 struct ide_host *host;
446 hw_regs_t hw; 447 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
448 int ret;
447 449
448 base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); 450 base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
449 if (!base) 451 if (!base)
@@ -463,22 +465,23 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
463 465
464 icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec); 466 icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec);
465 467
466 hwif = ide_find_port(); 468 host = ide_host_alloc(NULL, hws);
467 if (!hwif) 469 if (host == NULL)
468 return -ENODEV; 470 return -ENODEV;
469 471
470 ide_init_port_hw(hwif, &hw); 472 state->host = host;
471 default_hwif_mmiops(hwif);
472
473 state->hwif[0] = hwif;
474 473
475 ecard_set_drvdata(ec, state); 474 ecard_set_drvdata(ec, state);
476 475
477 idx[0] = hwif->index; 476 ret = ide_host_register(host, NULL, hws);
478 477 if (ret)
479 ide_device_add(idx, NULL); 478 goto err_free;
480 479
481 return 0; 480 return 0;
481err_free:
482 ide_host_free(host);
483 ecard_set_drvdata(ec, NULL);
484 return ret;
482} 485}
483 486
484static const struct ide_port_info icside_v6_port_info __initdata = { 487static const struct ide_port_info icside_v6_port_info __initdata = {
@@ -493,13 +496,12 @@ static const struct ide_port_info icside_v6_port_info __initdata = {
493static int __init 496static int __init
494icside_register_v6(struct icside_state *state, struct expansion_card *ec) 497icside_register_v6(struct icside_state *state, struct expansion_card *ec)
495{ 498{
496 ide_hwif_t *hwif, *mate;
497 void __iomem *ioc_base, *easi_base; 499 void __iomem *ioc_base, *easi_base;
500 struct ide_host *host;
498 unsigned int sel = 0; 501 unsigned int sel = 0;
499 int ret; 502 int ret;
500 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 503 hw_regs_t hw[2], *hws[] = { &hw[0], NULL, NULL, NULL };
501 struct ide_port_info d = icside_v6_port_info; 504 struct ide_port_info d = icside_v6_port_info;
502 hw_regs_t hw[2];
503 505
504 ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); 506 ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
505 if (!ioc_base) { 507 if (!ioc_base) {
@@ -538,28 +540,11 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
538 icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec); 540 icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec);
539 icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec); 541 icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec);
540 542
541 /* 543 host = ide_host_alloc(&d, hws);
542 * Find and register the interfaces. 544 if (host == NULL)
543 */
544 hwif = ide_find_port();
545 if (hwif == NULL)
546 return -ENODEV; 545 return -ENODEV;
547 546
548 ide_init_port_hw(hwif, &hw[0]); 547 state->host = host;
549 default_hwif_mmiops(hwif);
550
551 idx[0] = hwif->index;
552
553 mate = ide_find_port();
554 if (mate) {
555 ide_init_port_hw(mate, &hw[1]);
556 default_hwif_mmiops(mate);
557
558 idx[1] = mate->index;
559 }
560
561 state->hwif[0] = hwif;
562 state->hwif[1] = mate;
563 548
564 ecard_set_drvdata(ec, state); 549 ecard_set_drvdata(ec, state);
565 550
@@ -569,11 +554,17 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
569 d.dma_ops = NULL; 554 d.dma_ops = NULL;
570 } 555 }
571 556
572 ide_device_add(idx, &d); 557 ret = ide_host_register(host, NULL, hws);
558 if (ret)
559 goto err_free;
573 560
574 return 0; 561 return 0;
575 562err_free:
576 out: 563 ide_host_free(host);
564 if (d.dma_ops)
565 free_dma(ec->dma);
566 ecard_set_drvdata(ec, NULL);
567out:
577 return ret; 568 return ret;
578} 569}
579 570
@@ -719,8 +710,14 @@ static int __init icside_init(void)
719 return ecard_register_driver(&icside_driver); 710 return ecard_register_driver(&icside_driver);
720} 711}
721 712
713static void __exit icside_exit(void);
714{
715 ecard_unregister_driver(&icside_driver);
716}
717
722MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); 718MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
723MODULE_LICENSE("GPL"); 719MODULE_LICENSE("GPL");
724MODULE_DESCRIPTION("ICS IDE driver"); 720MODULE_DESCRIPTION("ICS IDE driver");
725 721
726module_init(icside_init); 722module_init(icside_init);
723module_exit(icside_exit);
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c
index 2f311da4c963..176532ffae0e 100644
--- a/drivers/ide/arm/ide_arm.c
+++ b/drivers/ide/arm/ide_arm.c
@@ -28,10 +28,8 @@
28 28
29static int __init ide_arm_init(void) 29static int __init ide_arm_init(void)
30{ 30{
31 ide_hwif_t *hwif;
32 hw_regs_t hw;
33 unsigned long base = IDE_ARM_IO, ctl = IDE_ARM_IO + 0x206; 31 unsigned long base = IDE_ARM_IO, ctl = IDE_ARM_IO + 0x206;
34 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 32 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
35 33
36 if (!request_region(base, 8, DRV_NAME)) { 34 if (!request_region(base, 8, DRV_NAME)) {
37 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", 35 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
@@ -51,15 +49,7 @@ static int __init ide_arm_init(void)
51 hw.irq = IDE_ARM_IRQ; 49 hw.irq = IDE_ARM_IRQ;
52 hw.chipset = ide_generic; 50 hw.chipset = ide_generic;
53 51
54 hwif = ide_find_port(); 52 return ide_host_add(NULL, hws, NULL);
55 if (hwif) {
56 ide_init_port_hw(hwif, &hw);
57 idx[0] = hwif->index;
58
59 ide_device_add(idx, NULL);
60 }
61
62 return 0;
63} 53}
64 54
65module_init(ide_arm_init); 55module_init(ide_arm_init);
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index c79b85b6e4a3..3e842d60eae9 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -82,6 +82,7 @@ static const struct palm_bk3710_udmatiming palm_bk3710_udmatimings[6] = {
82 {100, 120}, /* UDMA Mode 2 */ 82 {100, 120}, /* UDMA Mode 2 */
83 {100, 90}, /* UDMA Mode 3 */ 83 {100, 90}, /* UDMA Mode 3 */
84 {100, 60}, /* UDMA Mode 4 */ 84 {100, 60}, /* UDMA Mode 4 */
85 {85, 40}, /* UDMA Mode 5 */
85}; 86};
86 87
87static void palm_bk3710_setudmamode(void __iomem *base, unsigned int dev, 88static void palm_bk3710_setudmamode(void __iomem *base, unsigned int dev,
@@ -316,15 +317,14 @@ static u8 __devinit palm_bk3710_cable_detect(ide_hwif_t *hwif)
316static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif, 317static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif,
317 const struct ide_port_info *d) 318 const struct ide_port_info *d)
318{ 319{
319 unsigned long base =
320 hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
321
322 printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name); 320 printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
323 321
324 if (ide_allocate_dma_engine(hwif)) 322 if (ide_allocate_dma_engine(hwif))
325 return -1; 323 return -1;
326 324
327 ide_setup_dma(hwif, base); 325 hwif->dma_base = hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
326
327 hwif->dma_ops = &sff_dma_ops;
328 328
329 return 0; 329 return 0;
330} 330}
@@ -335,12 +335,11 @@ static const struct ide_port_ops palm_bk3710_ports_ops = {
335 .cable_detect = palm_bk3710_cable_detect, 335 .cable_detect = palm_bk3710_cable_detect,
336}; 336};
337 337
338static const struct ide_port_info __devinitdata palm_bk3710_port_info = { 338static struct ide_port_info __devinitdata palm_bk3710_port_info = {
339 .init_dma = palm_bk3710_init_dma, 339 .init_dma = palm_bk3710_init_dma,
340 .port_ops = &palm_bk3710_ports_ops, 340 .port_ops = &palm_bk3710_ports_ops,
341 .host_flags = IDE_HFLAG_MMIO, 341 .host_flags = IDE_HFLAG_MMIO,
342 .pio_mask = ATA_PIO4, 342 .pio_mask = ATA_PIO4,
343 .udma_mask = ATA_UDMA4, /* (input clk 99MHz) */
344 .mwdma_mask = ATA_MWDMA2, 343 .mwdma_mask = ATA_MWDMA2,
345}; 344};
346 345
@@ -348,13 +347,12 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
348{ 347{
349 struct clk *clk; 348 struct clk *clk;
350 struct resource *mem, *irq; 349 struct resource *mem, *irq;
351 ide_hwif_t *hwif; 350 struct ide_host *host;
352 unsigned long base, rate; 351 unsigned long base, rate;
353 int i; 352 int i, rc;
354 hw_regs_t hw; 353 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
355 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
356 354
357 clk = clk_get(NULL, "IDECLK"); 355 clk = clk_get(&pdev->dev, "IDECLK");
358 if (IS_ERR(clk)) 356 if (IS_ERR(clk))
359 return -ENODEV; 357 return -ENODEV;
360 358
@@ -394,24 +392,17 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
394 hw.irq = irq->start; 392 hw.irq = irq->start;
395 hw.chipset = ide_palm3710; 393 hw.chipset = ide_palm3710;
396 394
397 hwif = ide_find_port(); 395 palm_bk3710_port_info.udma_mask = rate < 100000000 ? ATA_UDMA4 :
398 if (hwif == NULL) 396 ATA_UDMA5;
399 goto out;
400
401 i = hwif->index;
402 397
403 ide_init_port_hw(hwif, &hw); 398 rc = ide_host_add(&palm_bk3710_port_info, hws, NULL);
404 399 if (rc)
405 default_hwif_mmiops(hwif); 400 goto out;
406
407 idx[0] = i;
408
409 ide_device_add(idx, &palm_bk3710_port_info);
410 401
411 return 0; 402 return 0;
412out: 403out:
413 printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n"); 404 printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n");
414 return -ENODEV; 405 return rc;
415} 406}
416 407
417/* work with hotplug and coldplug */ 408/* work with hotplug and coldplug */
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
index 43057e0303c8..78d27d9ae430 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/arm/rapide.c
@@ -32,11 +32,10 @@ static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base,
32static int __devinit 32static int __devinit
33rapide_probe(struct expansion_card *ec, const struct ecard_id *id) 33rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
34{ 34{
35 ide_hwif_t *hwif;
36 void __iomem *base; 35 void __iomem *base;
36 struct ide_host *host;
37 int ret; 37 int ret;
38 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 38 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
39 hw_regs_t hw;
40 39
41 ret = ecard_request_resources(ec); 40 ret = ecard_request_resources(ec);
42 if (ret) 41 if (ret)
@@ -53,20 +52,11 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
53 hw.chipset = ide_generic; 52 hw.chipset = ide_generic;
54 hw.dev = &ec->dev; 53 hw.dev = &ec->dev;
55 54
56 hwif = ide_find_port(); 55 ret = ide_host_add(&rapide_port_info, hws, &host);
57 if (hwif == NULL) { 56 if (ret)
58 ret = -ENOENT;
59 goto release; 57 goto release;
60 }
61
62 ide_init_port_hw(hwif, &hw);
63 default_hwif_mmiops(hwif);
64
65 idx[0] = hwif->index;
66 58
67 ide_device_add(idx, &rapide_port_info); 59 ecard_set_drvdata(ec, host);
68
69 ecard_set_drvdata(ec, hwif);
70 goto out; 60 goto out;
71 61
72 release: 62 release:
@@ -77,11 +67,11 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
77 67
78static void __devexit rapide_remove(struct expansion_card *ec) 68static void __devexit rapide_remove(struct expansion_card *ec)
79{ 69{
80 ide_hwif_t *hwif = ecard_get_drvdata(ec); 70 struct ide_host *host = ecard_get_drvdata(ec);
81 71
82 ecard_set_drvdata(ec, NULL); 72 ecard_set_drvdata(ec, NULL);
83 73
84 ide_unregister(hwif); 74 ide_host_remove(host);
85 75
86 ecard_release_resources(ec); 76 ecard_release_resources(ec);
87} 77}
@@ -105,7 +95,13 @@ static int __init rapide_init(void)
105 return ecard_register_driver(&rapide_driver); 95 return ecard_register_driver(&rapide_driver);
106} 96}
107 97
98static void __exit rapide_exit(void)
99{
100 ecard_unregister_driver(&rapide_driver);
101}
102
108MODULE_LICENSE("GPL"); 103MODULE_LICENSE("GPL");
109MODULE_DESCRIPTION("Yellowstone RAPIDE driver"); 104MODULE_DESCRIPTION("Yellowstone RAPIDE driver");
110 105
111module_init(rapide_init); 106module_init(rapide_init);
107module_exit(rapide_exit);
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
index 20fad6d542cc..bde7a585f198 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/h8300/ide-h8300.c
@@ -100,6 +100,8 @@ static void h8300_tf_read(ide_drive_t *drive, ide_task_t *task)
100 /* be sure we're looking at the low order bits */ 100 /* be sure we're looking at the low order bits */
101 outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); 101 outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
102 102
103 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
104 tf->feature = inb(io_ports->feature_addr);
103 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 105 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
104 tf->nsect = inb(io_ports->nsect_addr); 106 tf->nsect = inb(io_ports->nsect_addr);
105 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 107 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -153,6 +155,21 @@ static void h8300_output_data(ide_drive_t *drive, struct request *rq,
153 mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2); 155 mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
154} 156}
155 157
158static const struct ide_tp_ops h8300_tp_ops = {
159 .exec_command = ide_exec_command,
160 .read_status = ide_read_status,
161 .read_altstatus = ide_read_altstatus,
162 .read_sff_dma_status = ide_read_sff_dma_status,
163
164 .set_irq = ide_set_irq,
165
166 .tf_load = h8300_tf_load,
167 .tf_read = h8300_tf_read,
168
169 .input_data = h8300_input_data,
170 .output_data = h8300_output_data,
171};
172
156#define H8300_IDE_GAP (2) 173#define H8300_IDE_GAP (2)
157 174
158static inline void hw_setup(hw_regs_t *hw) 175static inline void hw_setup(hw_regs_t *hw)
@@ -167,27 +184,14 @@ static inline void hw_setup(hw_regs_t *hw)
167 hw->chipset = ide_generic; 184 hw->chipset = ide_generic;
168} 185}
169 186
170static inline void hwif_setup(ide_hwif_t *hwif)
171{
172 default_hwif_iops(hwif);
173
174 hwif->tf_load = h8300_tf_load;
175 hwif->tf_read = h8300_tf_read;
176
177 hwif->input_data = h8300_input_data;
178 hwif->output_data = h8300_output_data;
179}
180
181static const struct ide_port_info h8300_port_info = { 187static const struct ide_port_info h8300_port_info = {
188 .tp_ops = &h8300_tp_ops,
182 .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA, 189 .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA,
183}; 190};
184 191
185static int __init h8300_ide_init(void) 192static int __init h8300_ide_init(void)
186{ 193{
187 hw_regs_t hw; 194 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
188 ide_hwif_t *hwif;
189 int index;
190 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
191 195
192 printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n"); 196 printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n");
193 197
@@ -200,19 +204,7 @@ static int __init h8300_ide_init(void)
200 204
201 hw_setup(&hw); 205 hw_setup(&hw);
202 206
203 hwif = ide_find_port_slot(&h8300_port_info); 207 return ide_host_add(&h8300_port_info, hws, NULL);
204 if (hwif == NULL)
205 return -ENOENT;
206
207 index = hwif->index;
208 ide_init_port_hw(hwif, &hw);
209 hwif_setup(hwif);
210
211 idx[0] = index;
212
213 ide_device_add(idx, &h8300_port_info);
214
215 return 0;
216 208
217out_busy: 209out_busy:
218 printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n"); 210 printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 2802031de670..adf04f99cdeb 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -22,6 +22,8 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
22 void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned, int)) 22 void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned, int))
23{ 23{
24 ide_hwif_t *hwif = drive->hwif; 24 ide_hwif_t *hwif = drive->hwif;
25 struct request *rq = hwif->hwgroup->rq;
26 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
25 xfer_func_t *xferfunc; 27 xfer_func_t *xferfunc;
26 unsigned int temp; 28 unsigned int temp;
27 u16 bcount; 29 u16 bcount;
@@ -30,12 +32,12 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
30 debug_log("Enter %s - interrupt handler\n", __func__); 32 debug_log("Enter %s - interrupt handler\n", __func__);
31 33
32 if (pc->flags & PC_FLAG_TIMEDOUT) { 34 if (pc->flags & PC_FLAG_TIMEDOUT) {
33 pc->callback(drive); 35 drive->pc_callback(drive);
34 return ide_stopped; 36 return ide_stopped;
35 } 37 }
36 38
37 /* Clear the interrupt */ 39 /* Clear the interrupt */
38 stat = ide_read_status(drive); 40 stat = tp_ops->read_status(hwif);
39 41
40 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) { 42 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
41 if (hwif->dma_ops->dma_end(drive) || 43 if (hwif->dma_ops->dma_end(drive) ||
@@ -63,8 +65,9 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
63 local_irq_enable_in_hardirq(); 65 local_irq_enable_in_hardirq();
64 66
65 if (drive->media == ide_tape && !scsi && 67 if (drive->media == ide_tape && !scsi &&
66 (stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE) 68 (stat & ERR_STAT) && rq->cmd[0] == REQUEST_SENSE)
67 stat &= ~ERR_STAT; 69 stat &= ~ERR_STAT;
70
68 if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) { 71 if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) {
69 /* Error detected */ 72 /* Error detected */
70 debug_log("%s: I/O error\n", drive->name); 73 debug_log("%s: I/O error\n", drive->name);
@@ -75,16 +78,17 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
75 goto cmd_finished; 78 goto cmd_finished;
76 } 79 }
77 80
78 if (pc->c[0] == REQUEST_SENSE) { 81 if (rq->cmd[0] == REQUEST_SENSE) {
79 printk(KERN_ERR "%s: I/O error in request sense" 82 printk(KERN_ERR "%s: I/O error in request sense"
80 " command\n", drive->name); 83 " command\n", drive->name);
81 return ide_do_reset(drive); 84 return ide_do_reset(drive);
82 } 85 }
83 86
84 debug_log("[cmd %x]: check condition\n", pc->c[0]); 87 debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
85 88
86 /* Retry operation */ 89 /* Retry operation */
87 retry_pc(drive); 90 retry_pc(drive);
91
88 /* queued, but not started */ 92 /* queued, but not started */
89 return ide_stopped; 93 return ide_stopped;
90 } 94 }
@@ -95,8 +99,10 @@ cmd_finished:
95 dsc_handle(drive); 99 dsc_handle(drive);
96 return ide_stopped; 100 return ide_stopped;
97 } 101 }
102
98 /* Command finished - Call the callback function */ 103 /* Command finished - Call the callback function */
99 pc->callback(drive); 104 drive->pc_callback(drive);
105
100 return ide_stopped; 106 return ide_stopped;
101 } 107 }
102 108
@@ -107,16 +113,15 @@ cmd_finished:
107 ide_dma_off(drive); 113 ide_dma_off(drive);
108 return ide_do_reset(drive); 114 return ide_do_reset(drive);
109 } 115 }
110 /* Get the number of bytes to transfer on this interrupt. */
111 bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
112 hwif->INB(hwif->io_ports.lbam_addr);
113 116
114 ireason = hwif->INB(hwif->io_ports.nsect_addr); 117 /* Get the number of bytes to transfer on this interrupt. */
118 ide_read_bcount_and_ireason(drive, &bcount, &ireason);
115 119
116 if (ireason & CD) { 120 if (ireason & CD) {
117 printk(KERN_ERR "%s: CoD != 0 in %s\n", drive->name, __func__); 121 printk(KERN_ERR "%s: CoD != 0 in %s\n", drive->name, __func__);
118 return ide_do_reset(drive); 122 return ide_do_reset(drive);
119 } 123 }
124
120 if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) { 125 if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) {
121 /* Hopefully, we will never get here */ 126 /* Hopefully, we will never get here */
122 printk(KERN_ERR "%s: We wanted to %s, but the device wants us " 127 printk(KERN_ERR "%s: We wanted to %s, but the device wants us "
@@ -125,6 +130,7 @@ cmd_finished:
125 (ireason & IO) ? "Read" : "Write"); 130 (ireason & IO) ? "Read" : "Write");
126 return ide_do_reset(drive); 131 return ide_do_reset(drive);
127 } 132 }
133
128 if (!(pc->flags & PC_FLAG_WRITING)) { 134 if (!(pc->flags & PC_FLAG_WRITING)) {
129 /* Reading - Check that we have enough space */ 135 /* Reading - Check that we have enough space */
130 temp = pc->xferred + bcount; 136 temp = pc->xferred + bcount;
@@ -142,7 +148,7 @@ cmd_finished:
142 if (pc->sg) 148 if (pc->sg)
143 io_buffers(drive, pc, temp, 0); 149 io_buffers(drive, pc, temp, 0);
144 else 150 else
145 hwif->input_data(drive, NULL, 151 tp_ops->input_data(drive, NULL,
146 pc->cur_pos, temp); 152 pc->cur_pos, temp);
147 printk(KERN_ERR "%s: transferred %d of " 153 printk(KERN_ERR "%s: transferred %d of "
148 "%d bytes\n", 154 "%d bytes\n",
@@ -159,9 +165,9 @@ cmd_finished:
159 debug_log("The device wants to send us more data than " 165 debug_log("The device wants to send us more data than "
160 "expected - allowing transfer\n"); 166 "expected - allowing transfer\n");
161 } 167 }
162 xferfunc = hwif->input_data; 168 xferfunc = tp_ops->input_data;
163 } else 169 } else
164 xferfunc = hwif->output_data; 170 xferfunc = tp_ops->output_data;
165 171
166 if ((drive->media == ide_floppy && !scsi && !pc->buf) || 172 if ((drive->media == ide_floppy && !scsi && !pc->buf) ||
167 (drive->media == ide_tape && !scsi && pc->bh) || 173 (drive->media == ide_tape && !scsi && pc->bh) ||
@@ -175,7 +181,7 @@ cmd_finished:
175 pc->cur_pos += bcount; 181 pc->cur_pos += bcount;
176 182
177 debug_log("[cmd %x] transferred %d bytes on that intr.\n", 183 debug_log("[cmd %x] transferred %d bytes on that intr.\n",
178 pc->c[0], bcount); 184 rq->cmd[0], bcount);
179 185
180 /* And set the interrupt handler again */ 186 /* And set the interrupt handler again */
181 ide_set_handler(drive, handler, timeout, expiry); 187 ide_set_handler(drive, handler, timeout, expiry);
@@ -183,16 +189,27 @@ cmd_finished:
183} 189}
184EXPORT_SYMBOL_GPL(ide_pc_intr); 190EXPORT_SYMBOL_GPL(ide_pc_intr);
185 191
192static u8 ide_read_ireason(ide_drive_t *drive)
193{
194 ide_task_t task;
195
196 memset(&task, 0, sizeof(task));
197 task.tf_flags = IDE_TFLAG_IN_NSECT;
198
199 drive->hwif->tp_ops->tf_read(drive, &task);
200
201 return task.tf.nsect & 3;
202}
203
186static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason) 204static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason)
187{ 205{
188 ide_hwif_t *hwif = drive->hwif;
189 int retries = 100; 206 int retries = 100;
190 207
191 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) { 208 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
192 printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing " 209 printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing "
193 "a packet command, retrying\n", drive->name); 210 "a packet command, retrying\n", drive->name);
194 udelay(100); 211 udelay(100);
195 ireason = hwif->INB(hwif->io_ports.nsect_addr); 212 ireason = ide_read_ireason(drive);
196 if (retries == 0) { 213 if (retries == 0) {
197 printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing " 214 printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing "
198 "a packet command, ignoring\n", 215 "a packet command, ignoring\n",
@@ -210,6 +227,7 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
210 ide_expiry_t *expiry) 227 ide_expiry_t *expiry)
211{ 228{
212 ide_hwif_t *hwif = drive->hwif; 229 ide_hwif_t *hwif = drive->hwif;
230 struct request *rq = hwif->hwgroup->rq;
213 ide_startstop_t startstop; 231 ide_startstop_t startstop;
214 u8 ireason; 232 u8 ireason;
215 233
@@ -219,7 +237,7 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
219 return startstop; 237 return startstop;
220 } 238 }
221 239
222 ireason = hwif->INB(hwif->io_ports.nsect_addr); 240 ireason = ide_read_ireason(drive);
223 if (drive->media == ide_tape && !drive->scsi) 241 if (drive->media == ide_tape && !drive->scsi)
224 ireason = ide_wait_ireason(drive, ireason); 242 ireason = ide_wait_ireason(drive, ireason);
225 243
@@ -239,8 +257,8 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
239 } 257 }
240 258
241 /* Send the actual packet */ 259 /* Send the actual packet */
242 if ((pc->flags & PC_FLAG_ZIP_DRIVE) == 0) 260 if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
243 hwif->output_data(drive, NULL, pc->c, 12); 261 hwif->tp_ops->output_data(drive, NULL, rq->cmd, 12);
244 262
245 return ide_started; 263 return ide_started;
246} 264}
@@ -284,7 +302,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
284 bcount, dma); 302 bcount, dma);
285 303
286 /* Issue the packet command */ 304 /* Issue the packet command */
287 if (pc->flags & PC_FLAG_DRQ_INTERRUPT) { 305 if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
288 ide_execute_command(drive, WIN_PACKETCMD, handler, 306 ide_execute_command(drive, WIN_PACKETCMD, handler,
289 timeout, NULL); 307 timeout, NULL);
290 return ide_started; 308 return ide_started;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 6e29dd532090..e617cf08aef6 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -57,23 +57,29 @@ static DEFINE_MUTEX(idecd_ref_mutex);
57#define ide_cd_g(disk) \ 57#define ide_cd_g(disk) \
58 container_of((disk)->private_data, struct cdrom_info, driver) 58 container_of((disk)->private_data, struct cdrom_info, driver)
59 59
60static void ide_cd_release(struct kref *);
61
60static struct cdrom_info *ide_cd_get(struct gendisk *disk) 62static struct cdrom_info *ide_cd_get(struct gendisk *disk)
61{ 63{
62 struct cdrom_info *cd = NULL; 64 struct cdrom_info *cd = NULL;
63 65
64 mutex_lock(&idecd_ref_mutex); 66 mutex_lock(&idecd_ref_mutex);
65 cd = ide_cd_g(disk); 67 cd = ide_cd_g(disk);
66 if (cd) 68 if (cd) {
67 kref_get(&cd->kref); 69 kref_get(&cd->kref);
70 if (ide_device_get(cd->drive)) {
71 kref_put(&cd->kref, ide_cd_release);
72 cd = NULL;
73 }
74 }
68 mutex_unlock(&idecd_ref_mutex); 75 mutex_unlock(&idecd_ref_mutex);
69 return cd; 76 return cd;
70} 77}
71 78
72static void ide_cd_release(struct kref *);
73
74static void ide_cd_put(struct cdrom_info *cd) 79static void ide_cd_put(struct cdrom_info *cd)
75{ 80{
76 mutex_lock(&idecd_ref_mutex); 81 mutex_lock(&idecd_ref_mutex);
82 ide_device_put(cd->drive);
77 kref_put(&cd->kref, ide_cd_release); 83 kref_put(&cd->kref, ide_cd_release);
78 mutex_unlock(&idecd_ref_mutex); 84 mutex_unlock(&idecd_ref_mutex);
79} 85}
@@ -85,10 +91,8 @@ static void ide_cd_put(struct cdrom_info *cd)
85/* Mark that we've seen a media change and invalidate our internal buffers. */ 91/* Mark that we've seen a media change and invalidate our internal buffers. */
86static void cdrom_saw_media_change(ide_drive_t *drive) 92static void cdrom_saw_media_change(ide_drive_t *drive)
87{ 93{
88 struct cdrom_info *cd = drive->driver_data; 94 drive->atapi_flags |= IDE_AFLAG_MEDIA_CHANGED;
89 95 drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
90 cd->cd_flags |= IDE_CD_FLAG_MEDIA_CHANGED;
91 cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID;
92} 96}
93 97
94static int cdrom_log_sense(ide_drive_t *drive, struct request *rq, 98static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
@@ -280,11 +284,12 @@ static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st)
280 */ 284 */
281static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) 285static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
282{ 286{
283 struct request *rq = HWGROUP(drive)->rq; 287 ide_hwif_t *hwif = drive->hwif;
288 struct request *rq = hwif->hwgroup->rq;
284 int stat, err, sense_key; 289 int stat, err, sense_key;
285 290
286 /* check for errors */ 291 /* check for errors */
287 stat = ide_read_status(drive); 292 stat = hwif->tp_ops->read_status(hwif);
288 293
289 if (stat_ret) 294 if (stat_ret)
290 *stat_ret = stat; 295 *stat_ret = stat;
@@ -528,7 +533,7 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
528 ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL, 533 ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL,
529 xferlen, info->dma); 534 xferlen, info->dma);
530 535
531 if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) { 536 if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
532 /* waiting for CDB interrupt, not DMA yet. */ 537 /* waiting for CDB interrupt, not DMA yet. */
533 if (info->dma) 538 if (info->dma)
534 drive->waiting_for_dma = 0; 539 drive->waiting_for_dma = 0;
@@ -560,7 +565,7 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
560 struct cdrom_info *info = drive->driver_data; 565 struct cdrom_info *info = drive->driver_data;
561 ide_startstop_t startstop; 566 ide_startstop_t startstop;
562 567
563 if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) { 568 if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
564 /* 569 /*
565 * Here we should have been called after receiving an interrupt 570 * Here we should have been called after receiving an interrupt
566 * from the device. DRQ should how be set. 571 * from the device. DRQ should how be set.
@@ -589,7 +594,7 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
589 cmd_len = ATAPI_MIN_CDB_BYTES; 594 cmd_len = ATAPI_MIN_CDB_BYTES;
590 595
591 /* send the command to the device */ 596 /* send the command to the device */
592 hwif->output_data(drive, NULL, rq->cmd, cmd_len); 597 hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len);
593 598
594 /* start the DMA if need be */ 599 /* start the DMA if need be */
595 if (info->dma) 600 if (info->dma)
@@ -606,6 +611,8 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
606static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq, 611static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
607 int len, int ireason, int rw) 612 int len, int ireason, int rw)
608{ 613{
614 ide_hwif_t *hwif = drive->hwif;
615
609 /* 616 /*
610 * ireason == 0: the drive wants to receive data from us 617 * ireason == 0: the drive wants to receive data from us
611 * ireason == 2: the drive is expecting to transfer data to us 618 * ireason == 2: the drive is expecting to transfer data to us
@@ -624,7 +631,7 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
624 * Some drives (ASUS) seem to tell us that status info is 631 * Some drives (ASUS) seem to tell us that status info is
625 * available. Just get it and ignore. 632 * available. Just get it and ignore.
626 */ 633 */
627 (void)ide_read_status(drive); 634 (void)hwif->tp_ops->read_status(hwif);
628 return 0; 635 return 0;
629 } else { 636 } else {
630 /* drive wants a command packet, or invalid ireason... */ 637 /* drive wants a command packet, or invalid ireason... */
@@ -645,20 +652,18 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
645 */ 652 */
646static int ide_cd_check_transfer_size(ide_drive_t *drive, int len) 653static int ide_cd_check_transfer_size(ide_drive_t *drive, int len)
647{ 654{
648 struct cdrom_info *cd = drive->driver_data;
649
650 if ((len % SECTOR_SIZE) == 0) 655 if ((len % SECTOR_SIZE) == 0)
651 return 0; 656 return 0;
652 657
653 printk(KERN_ERR "%s: %s: Bad transfer size %d\n", 658 printk(KERN_ERR "%s: %s: Bad transfer size %d\n",
654 drive->name, __func__, len); 659 drive->name, __func__, len);
655 660
656 if (cd->cd_flags & IDE_CD_FLAG_LIMIT_NFRAMES) 661 if (drive->atapi_flags & IDE_AFLAG_LIMIT_NFRAMES)
657 printk(KERN_ERR " This drive is not supported by " 662 printk(KERN_ERR " This drive is not supported by "
658 "this version of the driver\n"); 663 "this version of the driver\n");
659 else { 664 else {
660 printk(KERN_ERR " Trying to limit transfer sizes\n"); 665 printk(KERN_ERR " Trying to limit transfer sizes\n");
661 cd->cd_flags |= IDE_CD_FLAG_LIMIT_NFRAMES; 666 drive->atapi_flags |= IDE_AFLAG_LIMIT_NFRAMES;
662 } 667 }
663 668
664 return 1; 669 return 1;
@@ -735,7 +740,7 @@ static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive)
735 if (cdrom_decode_status(drive, 0, &stat)) 740 if (cdrom_decode_status(drive, 0, &stat))
736 return ide_stopped; 741 return ide_stopped;
737 742
738 info->cd_flags |= IDE_CD_FLAG_SEEKING; 743 drive->atapi_flags |= IDE_AFLAG_SEEKING;
739 744
740 if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) { 745 if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) {
741 if (--retry == 0) 746 if (--retry == 0)
@@ -892,10 +897,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
892 struct request *rq = HWGROUP(drive)->rq; 897 struct request *rq = HWGROUP(drive)->rq;
893 xfer_func_t *xferfunc; 898 xfer_func_t *xferfunc;
894 ide_expiry_t *expiry = NULL; 899 ide_expiry_t *expiry = NULL;
895 int dma_error = 0, dma, stat, ireason, len, thislen, uptodate = 0; 900 int dma_error = 0, dma, stat, thislen, uptodate = 0;
896 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0; 901 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0;
897 unsigned int timeout; 902 unsigned int timeout;
898 u8 lowcyl, highcyl; 903 u16 len;
904 u8 ireason;
899 905
900 /* check for errors */ 906 /* check for errors */
901 dma = info->dma; 907 dma = info->dma;
@@ -923,12 +929,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
923 goto end_request; 929 goto end_request;
924 } 930 }
925 931
926 /* ok we fall to pio :/ */ 932 ide_read_bcount_and_ireason(drive, &len, &ireason);
927 ireason = hwif->INB(hwif->io_ports.nsect_addr) & 0x3;
928 lowcyl = hwif->INB(hwif->io_ports.lbam_addr);
929 highcyl = hwif->INB(hwif->io_ports.lbah_addr);
930
931 len = lowcyl + (256 * highcyl);
932 933
933 thislen = blk_fs_request(rq) ? len : rq->data_len; 934 thislen = blk_fs_request(rq) ? len : rq->data_len;
934 if (thislen > len) 935 if (thislen > len)
@@ -991,10 +992,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
991 992
992 if (ireason == 0) { 993 if (ireason == 0) {
993 write = 1; 994 write = 1;
994 xferfunc = hwif->output_data; 995 xferfunc = hwif->tp_ops->output_data;
995 } else { 996 } else {
996 write = 0; 997 write = 0;
997 xferfunc = hwif->input_data; 998 xferfunc = hwif->tp_ops->input_data;
998 } 999 }
999 1000
1000 /* transfer data */ 1001 /* transfer data */
@@ -1198,9 +1199,10 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
1198 int xferlen; 1199 int xferlen;
1199 1200
1200 if (blk_fs_request(rq)) { 1201 if (blk_fs_request(rq)) {
1201 if (info->cd_flags & IDE_CD_FLAG_SEEKING) { 1202 if (drive->atapi_flags & IDE_AFLAG_SEEKING) {
1203 ide_hwif_t *hwif = drive->hwif;
1202 unsigned long elapsed = jiffies - info->start_seek; 1204 unsigned long elapsed = jiffies - info->start_seek;
1203 int stat = ide_read_status(drive); 1205 int stat = hwif->tp_ops->read_status(hwif);
1204 1206
1205 if ((stat & SEEK_STAT) != SEEK_STAT) { 1207 if ((stat & SEEK_STAT) != SEEK_STAT) {
1206 if (elapsed < IDECD_SEEK_TIMEOUT) { 1208 if (elapsed < IDECD_SEEK_TIMEOUT) {
@@ -1211,7 +1213,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
1211 printk(KERN_ERR "%s: DSC timeout\n", 1213 printk(KERN_ERR "%s: DSC timeout\n",
1212 drive->name); 1214 drive->name);
1213 } 1215 }
1214 info->cd_flags &= ~IDE_CD_FLAG_SEEKING; 1216 drive->atapi_flags &= ~IDE_AFLAG_SEEKING;
1215 } 1217 }
1216 if (rq_data_dir(rq) == READ && 1218 if (rq_data_dir(rq) == READ &&
1217 IDE_LARGE_SEEK(info->last_block, block, 1219 IDE_LARGE_SEEK(info->last_block, block,
@@ -1288,7 +1290,7 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
1288 */ 1290 */
1289 cmd[7] = cdi->sanyo_slot % 3; 1291 cmd[7] = cdi->sanyo_slot % 3;
1290 1292
1291 return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, sense, 0, REQ_QUIET); 1293 return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, REQ_QUIET);
1292} 1294}
1293 1295
1294static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, 1296static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
@@ -1296,8 +1298,8 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
1296 struct request_sense *sense) 1298 struct request_sense *sense)
1297{ 1299{
1298 struct { 1300 struct {
1299 __u32 lba; 1301 __be32 lba;
1300 __u32 blocklen; 1302 __be32 blocklen;
1301 } capbuf; 1303 } capbuf;
1302 1304
1303 int stat; 1305 int stat;
@@ -1309,13 +1311,30 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
1309 1311
1310 stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, sense, 0, 1312 stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, sense, 0,
1311 REQ_QUIET); 1313 REQ_QUIET);
1312 if (stat == 0) { 1314 if (stat)
1313 *capacity = 1 + be32_to_cpu(capbuf.lba); 1315 return stat;
1314 *sectors_per_frame = 1316
1315 be32_to_cpu(capbuf.blocklen) >> SECTOR_BITS; 1317 /*
1318 * Sanity check the given block size
1319 */
1320 switch (capbuf.blocklen) {
1321 case __constant_cpu_to_be32(512):
1322 case __constant_cpu_to_be32(1024):
1323 case __constant_cpu_to_be32(2048):
1324 case __constant_cpu_to_be32(4096):
1325 break;
1326 default:
1327 printk(KERN_ERR "%s: weird block size %u\n",
1328 drive->name, capbuf.blocklen);
1329 printk(KERN_ERR "%s: default to 2kb block size\n",
1330 drive->name);
1331 capbuf.blocklen = __constant_cpu_to_be32(2048);
1332 break;
1316 } 1333 }
1317 1334
1318 return stat; 1335 *capacity = 1 + be32_to_cpu(capbuf.lba);
1336 *sectors_per_frame = be32_to_cpu(capbuf.blocklen) >> SECTOR_BITS;
1337 return 0;
1319} 1338}
1320 1339
1321static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag, 1340static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
@@ -1369,7 +1388,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1369 */ 1388 */
1370 (void) cdrom_check_status(drive, sense); 1389 (void) cdrom_check_status(drive, sense);
1371 1390
1372 if (info->cd_flags & IDE_CD_FLAG_TOC_VALID) 1391 if (drive->atapi_flags & IDE_AFLAG_TOC_VALID)
1373 return 0; 1392 return 0;
1374 1393
1375 /* try to get the total cdrom capacity and sector size */ 1394 /* try to get the total cdrom capacity and sector size */
@@ -1391,7 +1410,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1391 if (stat) 1410 if (stat)
1392 return stat; 1411 return stat;
1393 1412
1394 if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) { 1413 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
1395 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); 1414 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track);
1396 toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); 1415 toc->hdr.last_track = BCD2BIN(toc->hdr.last_track);
1397 } 1416 }
@@ -1432,7 +1451,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1432 if (stat) 1451 if (stat)
1433 return stat; 1452 return stat;
1434 1453
1435 if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) { 1454 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
1436 toc->hdr.first_track = (u8)BIN2BCD(CDROM_LEADOUT); 1455 toc->hdr.first_track = (u8)BIN2BCD(CDROM_LEADOUT);
1437 toc->hdr.last_track = (u8)BIN2BCD(CDROM_LEADOUT); 1456 toc->hdr.last_track = (u8)BIN2BCD(CDROM_LEADOUT);
1438 } else { 1457 } else {
@@ -1446,14 +1465,14 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1446 1465
1447 toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length); 1466 toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length);
1448 1467
1449 if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) { 1468 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
1450 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); 1469 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track);
1451 toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); 1470 toc->hdr.last_track = BCD2BIN(toc->hdr.last_track);
1452 } 1471 }
1453 1472
1454 for (i = 0; i <= ntracks; i++) { 1473 for (i = 0; i <= ntracks; i++) {
1455 if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) { 1474 if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
1456 if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) 1475 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD)
1457 toc->ent[i].track = BCD2BIN(toc->ent[i].track); 1476 toc->ent[i].track = BCD2BIN(toc->ent[i].track);
1458 msf_from_bcd(&toc->ent[i].addr.msf); 1477 msf_from_bcd(&toc->ent[i].addr.msf);
1459 } 1478 }
@@ -1476,7 +1495,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1476 toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */ 1495 toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */
1477 } 1496 }
1478 1497
1479 if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) { 1498 if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
1480 /* re-read multisession information using MSF format */ 1499 /* re-read multisession information using MSF format */
1481 stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp, 1500 stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
1482 sizeof(ms_tmp), sense); 1501 sizeof(ms_tmp), sense);
@@ -1500,7 +1519,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1500 } 1519 }
1501 1520
1502 /* Remember that we've read this stuff. */ 1521 /* Remember that we've read this stuff. */
1503 info->cd_flags |= IDE_CD_FLAG_TOC_VALID; 1522 drive->atapi_flags |= IDE_AFLAG_TOC_VALID;
1504 1523
1505 return 0; 1524 return 0;
1506} 1525}
@@ -1512,7 +1531,7 @@ int ide_cdrom_get_capabilities(ide_drive_t *drive, u8 *buf)
1512 struct packet_command cgc; 1531 struct packet_command cgc;
1513 int stat, attempts = 3, size = ATAPI_CAPABILITIES_PAGE_SIZE; 1532 int stat, attempts = 3, size = ATAPI_CAPABILITIES_PAGE_SIZE;
1514 1533
1515 if ((info->cd_flags & IDE_CD_FLAG_FULL_CAPS_PAGE) == 0) 1534 if ((drive->atapi_flags & IDE_AFLAG_FULL_CAPS_PAGE) == 0)
1516 size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE; 1535 size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE;
1517 1536
1518 init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN); 1537 init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN);
@@ -1530,15 +1549,12 @@ void ide_cdrom_update_speed(ide_drive_t *drive, u8 *buf)
1530 struct cdrom_info *cd = drive->driver_data; 1549 struct cdrom_info *cd = drive->driver_data;
1531 u16 curspeed, maxspeed; 1550 u16 curspeed, maxspeed;
1532 1551
1533 curspeed = *(u16 *)&buf[8 + 14]; 1552 if (drive->atapi_flags & IDE_AFLAG_LE_SPEED_FIELDS) {
1534 maxspeed = *(u16 *)&buf[8 + 8]; 1553 curspeed = le16_to_cpup((__le16 *)&buf[8 + 14]);
1535 1554 maxspeed = le16_to_cpup((__le16 *)&buf[8 + 8]);
1536 if (cd->cd_flags & IDE_CD_FLAG_LE_SPEED_FIELDS) {
1537 curspeed = le16_to_cpu(curspeed);
1538 maxspeed = le16_to_cpu(maxspeed);
1539 } else { 1555 } else {
1540 curspeed = be16_to_cpu(curspeed); 1556 curspeed = be16_to_cpup((__be16 *)&buf[8 + 14]);
1541 maxspeed = be16_to_cpu(maxspeed); 1557 maxspeed = be16_to_cpup((__be16 *)&buf[8 + 8]);
1542 } 1558 }
1543 1559
1544 cd->current_speed = (curspeed + (176/2)) / 176; 1560 cd->current_speed = (curspeed + (176/2)) / 176;
@@ -1579,7 +1595,7 @@ static int ide_cdrom_register(ide_drive_t *drive, int nslots)
1579 devinfo->handle = drive; 1595 devinfo->handle = drive;
1580 strcpy(devinfo->name, drive->name); 1596 strcpy(devinfo->name, drive->name);
1581 1597
1582 if (info->cd_flags & IDE_CD_FLAG_NO_SPEED_SELECT) 1598 if (drive->atapi_flags & IDE_AFLAG_NO_SPEED_SELECT)
1583 devinfo->mask |= CDC_SELECT_SPEED; 1599 devinfo->mask |= CDC_SELECT_SPEED;
1584 1600
1585 devinfo->disk = info->disk; 1601 devinfo->disk = info->disk;
@@ -1605,8 +1621,8 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1605 return nslots; 1621 return nslots;
1606 } 1622 }
1607 1623
1608 if (cd->cd_flags & IDE_CD_FLAG_PRE_ATAPI12) { 1624 if (drive->atapi_flags & IDE_AFLAG_PRE_ATAPI12) {
1609 cd->cd_flags &= ~IDE_CD_FLAG_NO_EJECT; 1625 drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT;
1610 cdi->mask &= ~CDC_PLAY_AUDIO; 1626 cdi->mask &= ~CDC_PLAY_AUDIO;
1611 return nslots; 1627 return nslots;
1612 } 1628 }
@@ -1624,9 +1640,9 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1624 return 0; 1640 return 0;
1625 1641
1626 if ((buf[8 + 6] & 0x01) == 0) 1642 if ((buf[8 + 6] & 0x01) == 0)
1627 cd->cd_flags |= IDE_CD_FLAG_NO_DOORLOCK; 1643 drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK;
1628 if (buf[8 + 6] & 0x08) 1644 if (buf[8 + 6] & 0x08)
1629 cd->cd_flags &= ~IDE_CD_FLAG_NO_EJECT; 1645 drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT;
1630 if (buf[8 + 3] & 0x01) 1646 if (buf[8 + 3] & 0x01)
1631 cdi->mask &= ~CDC_CD_R; 1647 cdi->mask &= ~CDC_CD_R;
1632 if (buf[8 + 3] & 0x02) 1648 if (buf[8 + 3] & 0x02)
@@ -1637,7 +1653,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1637 cdi->mask &= ~(CDC_DVD_RAM | CDC_RAM); 1653 cdi->mask &= ~(CDC_DVD_RAM | CDC_RAM);
1638 if (buf[8 + 3] & 0x10) 1654 if (buf[8 + 3] & 0x10)
1639 cdi->mask &= ~CDC_DVD_R; 1655 cdi->mask &= ~CDC_DVD_R;
1640 if ((buf[8 + 4] & 0x01) || (cd->cd_flags & IDE_CD_FLAG_PLAY_AUDIO_OK)) 1656 if ((buf[8 + 4] & 0x01) || (drive->atapi_flags & IDE_AFLAG_PLAY_AUDIO_OK))
1641 cdi->mask &= ~CDC_PLAY_AUDIO; 1657 cdi->mask &= ~CDC_PLAY_AUDIO;
1642 1658
1643 mechtype = buf[8 + 6] >> 5; 1659 mechtype = buf[8 + 6] >> 5;
@@ -1679,7 +1695,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1679 else 1695 else
1680 printk(KERN_CONT " drive"); 1696 printk(KERN_CONT " drive");
1681 1697
1682 printk(KERN_CONT ", %dkB Cache\n", be16_to_cpu(*(u16 *)&buf[8 + 12])); 1698 printk(KERN_CONT ", %dkB Cache\n", be16_to_cpup((__be16 *)&buf[8 + 12]));
1683 1699
1684 return nslots; 1700 return nslots;
1685} 1701}
@@ -1802,43 +1818,43 @@ static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
1802 1818
1803static const struct cd_list_entry ide_cd_quirks_list[] = { 1819static const struct cd_list_entry ide_cd_quirks_list[] = {
1804 /* Limit transfer size per interrupt. */ 1820 /* Limit transfer size per interrupt. */
1805 { "SAMSUNG CD-ROM SCR-2430", NULL, IDE_CD_FLAG_LIMIT_NFRAMES }, 1821 { "SAMSUNG CD-ROM SCR-2430", NULL, IDE_AFLAG_LIMIT_NFRAMES },
1806 { "SAMSUNG CD-ROM SCR-2432", NULL, IDE_CD_FLAG_LIMIT_NFRAMES }, 1822 { "SAMSUNG CD-ROM SCR-2432", NULL, IDE_AFLAG_LIMIT_NFRAMES },
1807 /* SCR-3231 doesn't support the SET_CD_SPEED command. */ 1823 /* SCR-3231 doesn't support the SET_CD_SPEED command. */
1808 { "SAMSUNG CD-ROM SCR-3231", NULL, IDE_CD_FLAG_NO_SPEED_SELECT }, 1824 { "SAMSUNG CD-ROM SCR-3231", NULL, IDE_AFLAG_NO_SPEED_SELECT },
1809 /* Old NEC260 (not R) was released before ATAPI 1.2 spec. */ 1825 /* Old NEC260 (not R) was released before ATAPI 1.2 spec. */
1810 { "NEC CD-ROM DRIVE:260", "1.01", IDE_CD_FLAG_TOCADDR_AS_BCD | 1826 { "NEC CD-ROM DRIVE:260", "1.01", IDE_AFLAG_TOCADDR_AS_BCD |
1811 IDE_CD_FLAG_PRE_ATAPI12, }, 1827 IDE_AFLAG_PRE_ATAPI12, },
1812 /* Vertos 300, some versions of this drive like to talk BCD. */ 1828 /* Vertos 300, some versions of this drive like to talk BCD. */
1813 { "V003S0DS", NULL, IDE_CD_FLAG_VERTOS_300_SSD, }, 1829 { "V003S0DS", NULL, IDE_AFLAG_VERTOS_300_SSD, },
1814 /* Vertos 600 ESD. */ 1830 /* Vertos 600 ESD. */
1815 { "V006E0DS", NULL, IDE_CD_FLAG_VERTOS_600_ESD, }, 1831 { "V006E0DS", NULL, IDE_AFLAG_VERTOS_600_ESD, },
1816 /* 1832 /*
1817 * Sanyo 3 CD changer uses a non-standard command for CD changing 1833 * Sanyo 3 CD changer uses a non-standard command for CD changing
1818 * (by default standard ATAPI support for CD changers is used). 1834 * (by default standard ATAPI support for CD changers is used).
1819 */ 1835 */
1820 { "CD-ROM CDR-C3 G", NULL, IDE_CD_FLAG_SANYO_3CD }, 1836 { "CD-ROM CDR-C3 G", NULL, IDE_AFLAG_SANYO_3CD },
1821 { "CD-ROM CDR-C3G", NULL, IDE_CD_FLAG_SANYO_3CD }, 1837 { "CD-ROM CDR-C3G", NULL, IDE_AFLAG_SANYO_3CD },
1822 { "CD-ROM CDR_C36", NULL, IDE_CD_FLAG_SANYO_3CD }, 1838 { "CD-ROM CDR_C36", NULL, IDE_AFLAG_SANYO_3CD },
1823 /* Stingray 8X CD-ROM. */ 1839 /* Stingray 8X CD-ROM. */
1824 { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_CD_FLAG_PRE_ATAPI12}, 1840 { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_AFLAG_PRE_ATAPI12 },
1825 /* 1841 /*
1826 * ACER 50X CD-ROM and WPI 32X CD-ROM require the full spec length 1842 * ACER 50X CD-ROM and WPI 32X CD-ROM require the full spec length
1827 * mode sense page capabilities size, but older drives break. 1843 * mode sense page capabilities size, but older drives break.
1828 */ 1844 */
1829 { "ATAPI CD ROM DRIVE 50X MAX", NULL, IDE_CD_FLAG_FULL_CAPS_PAGE }, 1845 { "ATAPI CD ROM DRIVE 50X MAX", NULL, IDE_AFLAG_FULL_CAPS_PAGE },
1830 { "WPI CDS-32X", NULL, IDE_CD_FLAG_FULL_CAPS_PAGE }, 1846 { "WPI CDS-32X", NULL, IDE_AFLAG_FULL_CAPS_PAGE },
1831 /* ACER/AOpen 24X CD-ROM has the speed fields byte-swapped. */ 1847 /* ACER/AOpen 24X CD-ROM has the speed fields byte-swapped. */
1832 { "", "241N", IDE_CD_FLAG_LE_SPEED_FIELDS }, 1848 { "", "241N", IDE_AFLAG_LE_SPEED_FIELDS },
1833 /* 1849 /*
1834 * Some drives used by Apple don't advertise audio play 1850 * Some drives used by Apple don't advertise audio play
1835 * but they do support reading TOC & audio datas. 1851 * but they do support reading TOC & audio datas.
1836 */ 1852 */
1837 { "MATSHITADVD-ROM SR-8187", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1853 { "MATSHITADVD-ROM SR-8187", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1838 { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1854 { "MATSHITADVD-ROM SR-8186", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1839 { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1855 { "MATSHITADVD-ROM SR-8176", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1840 { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1856 { "MATSHITADVD-ROM SR-8174", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1841 { "Optiarc DVD RW AD-5200A", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1857 { "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1842 { NULL, NULL, 0 } 1858 { NULL, NULL, 0 }
1843}; 1859};
1844 1860
@@ -1873,20 +1889,20 @@ static int ide_cdrom_setup(ide_drive_t *drive)
1873 1889
1874 drive->special.all = 0; 1890 drive->special.all = 0;
1875 1891
1876 cd->cd_flags = IDE_CD_FLAG_MEDIA_CHANGED | IDE_CD_FLAG_NO_EJECT | 1892 drive->atapi_flags = IDE_AFLAG_MEDIA_CHANGED | IDE_AFLAG_NO_EJECT |
1877 ide_cd_flags(id); 1893 ide_cd_flags(id);
1878 1894
1879 if ((id->config & 0x0060) == 0x20) 1895 if ((id->config & 0x0060) == 0x20)
1880 cd->cd_flags |= IDE_CD_FLAG_DRQ_INTERRUPT; 1896 drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
1881 1897
1882 if ((cd->cd_flags & IDE_CD_FLAG_VERTOS_300_SSD) && 1898 if ((drive->atapi_flags & IDE_AFLAG_VERTOS_300_SSD) &&
1883 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2') 1899 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2')
1884 cd->cd_flags |= (IDE_CD_FLAG_TOCTRACKS_AS_BCD | 1900 drive->atapi_flags |= (IDE_AFLAG_TOCTRACKS_AS_BCD |
1885 IDE_CD_FLAG_TOCADDR_AS_BCD); 1901 IDE_AFLAG_TOCADDR_AS_BCD);
1886 else if ((cd->cd_flags & IDE_CD_FLAG_VERTOS_600_ESD) && 1902 else if ((drive->atapi_flags & IDE_AFLAG_VERTOS_600_ESD) &&
1887 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2') 1903 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2')
1888 cd->cd_flags |= IDE_CD_FLAG_TOCTRACKS_AS_BCD; 1904 drive->atapi_flags |= IDE_AFLAG_TOCTRACKS_AS_BCD;
1889 else if (cd->cd_flags & IDE_CD_FLAG_SANYO_3CD) 1905 else if (drive->atapi_flags & IDE_AFLAG_SANYO_3CD)
1890 /* 3 => use CD in slot 0 */ 1906 /* 3 => use CD in slot 0 */
1891 cdi->sanyo_slot = 3; 1907 cdi->sanyo_slot = 3;
1892 1908
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index fe0ea36e4124..61a4599b77db 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -27,42 +27,6 @@
27#define ATAPI_CAPABILITIES_PAGE_SIZE (8 + 20) 27#define ATAPI_CAPABILITIES_PAGE_SIZE (8 + 20)
28#define ATAPI_CAPABILITIES_PAGE_PAD_SIZE 4 28#define ATAPI_CAPABILITIES_PAGE_PAD_SIZE 4
29 29
30enum {
31 /* Device sends an interrupt when ready for a packet command. */
32 IDE_CD_FLAG_DRQ_INTERRUPT = (1 << 0),
33 /* Drive cannot lock the door. */
34 IDE_CD_FLAG_NO_DOORLOCK = (1 << 1),
35 /* Drive cannot eject the disc. */
36 IDE_CD_FLAG_NO_EJECT = (1 << 2),
37 /* Drive is a pre ATAPI 1.2 drive. */
38 IDE_CD_FLAG_PRE_ATAPI12 = (1 << 3),
39 /* TOC addresses are in BCD. */
40 IDE_CD_FLAG_TOCADDR_AS_BCD = (1 << 4),
41 /* TOC track numbers are in BCD. */
42 IDE_CD_FLAG_TOCTRACKS_AS_BCD = (1 << 5),
43 /*
44 * Drive does not provide data in multiples of SECTOR_SIZE
45 * when more than one interrupt is needed.
46 */
47 IDE_CD_FLAG_LIMIT_NFRAMES = (1 << 6),
48 /* Seeking in progress. */
49 IDE_CD_FLAG_SEEKING = (1 << 7),
50 /* Driver has noticed a media change. */
51 IDE_CD_FLAG_MEDIA_CHANGED = (1 << 8),
52 /* Saved TOC information is current. */
53 IDE_CD_FLAG_TOC_VALID = (1 << 9),
54 /* We think that the drive door is locked. */
55 IDE_CD_FLAG_DOOR_LOCKED = (1 << 10),
56 /* SET_CD_SPEED command is unsupported. */
57 IDE_CD_FLAG_NO_SPEED_SELECT = (1 << 11),
58 IDE_CD_FLAG_VERTOS_300_SSD = (1 << 12),
59 IDE_CD_FLAG_VERTOS_600_ESD = (1 << 13),
60 IDE_CD_FLAG_SANYO_3CD = (1 << 14),
61 IDE_CD_FLAG_FULL_CAPS_PAGE = (1 << 15),
62 IDE_CD_FLAG_PLAY_AUDIO_OK = (1 << 16),
63 IDE_CD_FLAG_LE_SPEED_FIELDS = (1 << 17),
64};
65
66/* Structure of a MSF cdrom address. */ 30/* Structure of a MSF cdrom address. */
67struct atapi_msf { 31struct atapi_msf {
68 byte reserved; 32 byte reserved;
@@ -128,8 +92,6 @@ struct cdrom_info {
128 unsigned long last_block; 92 unsigned long last_block;
129 unsigned long start_seek; 93 unsigned long start_seek;
130 94
131 unsigned int cd_flags;
132
133 u8 max_speed; /* Max speed of the drive. */ 95 u8 max_speed; /* Max speed of the drive. */
134 u8 current_speed; /* Current speed of the drive. */ 96 u8 current_speed; /* Current speed of the drive. */
135 97
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 24d002addf73..74231b41f611 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -27,10 +27,9 @@ int ide_cdrom_open_real(struct cdrom_device_info *cdi, int purpose)
27void ide_cdrom_release_real(struct cdrom_device_info *cdi) 27void ide_cdrom_release_real(struct cdrom_device_info *cdi)
28{ 28{
29 ide_drive_t *drive = cdi->handle; 29 ide_drive_t *drive = cdi->handle;
30 struct cdrom_info *cd = drive->driver_data;
31 30
32 if (!cdi->use_count) 31 if (!cdi->use_count)
33 cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID; 32 drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
34} 33}
35 34
36/* 35/*
@@ -83,13 +82,12 @@ int ide_cdrom_check_media_change_real(struct cdrom_device_info *cdi,
83 int slot_nr) 82 int slot_nr)
84{ 83{
85 ide_drive_t *drive = cdi->handle; 84 ide_drive_t *drive = cdi->handle;
86 struct cdrom_info *cd = drive->driver_data;
87 int retval; 85 int retval;
88 86
89 if (slot_nr == CDSL_CURRENT) { 87 if (slot_nr == CDSL_CURRENT) {
90 (void) cdrom_check_status(drive, NULL); 88 (void) cdrom_check_status(drive, NULL);
91 retval = (cd->cd_flags & IDE_CD_FLAG_MEDIA_CHANGED) ? 1 : 0; 89 retval = (drive->atapi_flags & IDE_AFLAG_MEDIA_CHANGED) ? 1 : 0;
92 cd->cd_flags &= ~IDE_CD_FLAG_MEDIA_CHANGED; 90 drive->atapi_flags &= ~IDE_AFLAG_MEDIA_CHANGED;
93 return retval; 91 return retval;
94 } else { 92 } else {
95 return -EINVAL; 93 return -EINVAL;
@@ -107,11 +105,11 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag,
107 char loej = 0x02; 105 char loej = 0x02;
108 unsigned char cmd[BLK_MAX_CDB]; 106 unsigned char cmd[BLK_MAX_CDB];
109 107
110 if ((cd->cd_flags & IDE_CD_FLAG_NO_EJECT) && !ejectflag) 108 if ((drive->atapi_flags & IDE_AFLAG_NO_EJECT) && !ejectflag)
111 return -EDRIVE_CANT_DO_THIS; 109 return -EDRIVE_CANT_DO_THIS;
112 110
113 /* reload fails on some drives, if the tray is locked */ 111 /* reload fails on some drives, if the tray is locked */
114 if ((cd->cd_flags & IDE_CD_FLAG_DOOR_LOCKED) && ejectflag) 112 if ((drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED) && ejectflag)
115 return 0; 113 return 0;
116 114
117 /* only tell drive to close tray if open, if it can do that */ 115 /* only tell drive to close tray if open, if it can do that */
@@ -123,7 +121,7 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag,
123 cmd[0] = GPCMD_START_STOP_UNIT; 121 cmd[0] = GPCMD_START_STOP_UNIT;
124 cmd[4] = loej | (ejectflag != 0); 122 cmd[4] = loej | (ejectflag != 0);
125 123
126 return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, sense, 0, 0); 124 return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, 0);
127} 125}
128 126
129/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */ 127/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
@@ -131,7 +129,6 @@ static
131int ide_cd_lockdoor(ide_drive_t *drive, int lockflag, 129int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
132 struct request_sense *sense) 130 struct request_sense *sense)
133{ 131{
134 struct cdrom_info *cd = drive->driver_data;
135 struct request_sense my_sense; 132 struct request_sense my_sense;
136 int stat; 133 int stat;
137 134
@@ -139,7 +136,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
139 sense = &my_sense; 136 sense = &my_sense;
140 137
141 /* If the drive cannot lock the door, just pretend. */ 138 /* If the drive cannot lock the door, just pretend. */
142 if (cd->cd_flags & IDE_CD_FLAG_NO_DOORLOCK) { 139 if (drive->atapi_flags & IDE_AFLAG_NO_DOORLOCK) {
143 stat = 0; 140 stat = 0;
144 } else { 141 } else {
145 unsigned char cmd[BLK_MAX_CDB]; 142 unsigned char cmd[BLK_MAX_CDB];
@@ -149,7 +146,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
149 cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; 146 cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
150 cmd[4] = lockflag ? 1 : 0; 147 cmd[4] = lockflag ? 1 : 0;
151 148
152 stat = ide_cd_queue_pc(drive, cmd, 0, NULL, 0, 149 stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL,
153 sense, 0, 0); 150 sense, 0, 0);
154 } 151 }
155 152
@@ -160,7 +157,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
160 (sense->asc == 0x24 || sense->asc == 0x20)) { 157 (sense->asc == 0x24 || sense->asc == 0x20)) {
161 printk(KERN_ERR "%s: door locking not supported\n", 158 printk(KERN_ERR "%s: door locking not supported\n",
162 drive->name); 159 drive->name);
163 cd->cd_flags |= IDE_CD_FLAG_NO_DOORLOCK; 160 drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK;
164 stat = 0; 161 stat = 0;
165 } 162 }
166 163
@@ -170,9 +167,9 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
170 167
171 if (stat == 0) { 168 if (stat == 0) {
172 if (lockflag) 169 if (lockflag)
173 cd->cd_flags |= IDE_CD_FLAG_DOOR_LOCKED; 170 drive->atapi_flags |= IDE_AFLAG_DOOR_LOCKED;
174 else 171 else
175 cd->cd_flags &= ~IDE_CD_FLAG_DOOR_LOCKED; 172 drive->atapi_flags &= ~IDE_AFLAG_DOOR_LOCKED;
176 } 173 }
177 174
178 return stat; 175 return stat;
@@ -231,7 +228,7 @@ int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed)
231 cmd[5] = speed & 0xff; 228 cmd[5] = speed & 0xff;
232 } 229 }
233 230
234 stat = ide_cd_queue_pc(drive, cmd, 0, NULL, 0, &sense, 0, 0); 231 stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
235 232
236 if (!ide_cdrom_get_capabilities(drive, buf)) { 233 if (!ide_cdrom_get_capabilities(drive, buf)) {
237 ide_cdrom_update_speed(drive, buf); 234 ide_cdrom_update_speed(drive, buf);
@@ -250,7 +247,7 @@ int ide_cdrom_get_last_session(struct cdrom_device_info *cdi,
250 struct request_sense sense; 247 struct request_sense sense;
251 int ret; 248 int ret;
252 249
253 if ((info->cd_flags & IDE_CD_FLAG_TOC_VALID) == 0 || !info->toc) { 250 if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0 || !info->toc) {
254 ret = ide_cd_read_toc(drive, &sense); 251 ret = ide_cd_read_toc(drive, &sense);
255 if (ret) 252 if (ret)
256 return ret; 253 return ret;
@@ -308,7 +305,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
308 * A reset will unlock the door. If it was previously locked, 305 * A reset will unlock the door. If it was previously locked,
309 * lock it again. 306 * lock it again.
310 */ 307 */
311 if (cd->cd_flags & IDE_CD_FLAG_DOOR_LOCKED) 308 if (drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED)
312 (void)ide_cd_lockdoor(drive, 1, &sense); 309 (void)ide_cd_lockdoor(drive, 1, &sense);
313 310
314 return ret; 311 return ret;
@@ -324,7 +321,7 @@ static int ide_cd_get_toc_entry(ide_drive_t *drive, int track,
324 /* 321 /*
325 * don't serve cached data, if the toc isn't valid 322 * don't serve cached data, if the toc isn't valid
326 */ 323 */
327 if ((info->cd_flags & IDE_CD_FLAG_TOC_VALID) == 0) 324 if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0)
328 return -EINVAL; 325 return -EINVAL;
329 326
330 /* Check validity of requested track number. */ 327 /* Check validity of requested track number. */
@@ -374,7 +371,7 @@ static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg)
374 lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]); 371 lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]);
375 lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]); 372 lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]);
376 373
377 return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, &sense, 0, 0); 374 return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
378} 375}
379 376
380static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg) 377static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg)
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 3a2e80237c10..28d85b410f7c 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -56,23 +56,29 @@ static DEFINE_MUTEX(idedisk_ref_mutex);
56#define ide_disk_g(disk) \ 56#define ide_disk_g(disk) \
57 container_of((disk)->private_data, struct ide_disk_obj, driver) 57 container_of((disk)->private_data, struct ide_disk_obj, driver)
58 58
59static void ide_disk_release(struct kref *);
60
59static struct ide_disk_obj *ide_disk_get(struct gendisk *disk) 61static struct ide_disk_obj *ide_disk_get(struct gendisk *disk)
60{ 62{
61 struct ide_disk_obj *idkp = NULL; 63 struct ide_disk_obj *idkp = NULL;
62 64
63 mutex_lock(&idedisk_ref_mutex); 65 mutex_lock(&idedisk_ref_mutex);
64 idkp = ide_disk_g(disk); 66 idkp = ide_disk_g(disk);
65 if (idkp) 67 if (idkp) {
66 kref_get(&idkp->kref); 68 kref_get(&idkp->kref);
69 if (ide_device_get(idkp->drive)) {
70 kref_put(&idkp->kref, ide_disk_release);
71 idkp = NULL;
72 }
73 }
67 mutex_unlock(&idedisk_ref_mutex); 74 mutex_unlock(&idedisk_ref_mutex);
68 return idkp; 75 return idkp;
69} 76}
70 77
71static void ide_disk_release(struct kref *);
72
73static void ide_disk_put(struct ide_disk_obj *idkp) 78static void ide_disk_put(struct ide_disk_obj *idkp)
74{ 79{
75 mutex_lock(&idedisk_ref_mutex); 80 mutex_lock(&idedisk_ref_mutex);
81 ide_device_put(idkp->drive);
76 kref_put(&idkp->kref, ide_disk_release); 82 kref_put(&idkp->kref, ide_disk_release);
77 mutex_unlock(&idedisk_ref_mutex); 83 mutex_unlock(&idedisk_ref_mutex);
78} 84}
@@ -158,7 +164,7 @@ static void ide_tf_set_cmd(ide_drive_t *drive, ide_task_t *task, u8 dma)
158 write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0; 164 write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
159 165
160 if (dma) 166 if (dma)
161 index = drive->vdma ? 4 : 8; 167 index = 8;
162 else 168 else
163 index = drive->mult_count ? 0 : 4; 169 index = drive->mult_count ? 0 : 4;
164 170
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 7ee44f86bc54..71c377a7bcf2 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -100,10 +100,11 @@ static const struct drive_list_entry drive_blacklist [] = {
100 100
101ide_startstop_t ide_dma_intr (ide_drive_t *drive) 101ide_startstop_t ide_dma_intr (ide_drive_t *drive)
102{ 102{
103 ide_hwif_t *hwif = drive->hwif;
103 u8 stat = 0, dma_stat = 0; 104 u8 stat = 0, dma_stat = 0;
104 105
105 dma_stat = drive->hwif->dma_ops->dma_end(drive); 106 dma_stat = hwif->dma_ops->dma_end(drive);
106 stat = ide_read_status(drive); 107 stat = hwif->tp_ops->read_status(hwif);
107 108
108 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { 109 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
109 if (!dma_stat) { 110 if (!dma_stat) {
@@ -172,7 +173,7 @@ EXPORT_SYMBOL_GPL(ide_build_sglist);
172int ide_build_dmatable (ide_drive_t *drive, struct request *rq) 173int ide_build_dmatable (ide_drive_t *drive, struct request *rq)
173{ 174{
174 ide_hwif_t *hwif = HWIF(drive); 175 ide_hwif_t *hwif = HWIF(drive);
175 unsigned int *table = hwif->dmatable_cpu; 176 __le32 *table = (__le32 *)hwif->dmatable_cpu;
176 unsigned int is_trm290 = (hwif->chipset == ide_trm290) ? 1 : 0; 177 unsigned int is_trm290 = (hwif->chipset == ide_trm290) ? 1 : 0;
177 unsigned int count = 0; 178 unsigned int count = 0;
178 int i; 179 int i;
@@ -334,7 +335,7 @@ static int config_drive_for_dma (ide_drive_t *drive)
334static int dma_timer_expiry (ide_drive_t *drive) 335static int dma_timer_expiry (ide_drive_t *drive)
335{ 336{
336 ide_hwif_t *hwif = HWIF(drive); 337 ide_hwif_t *hwif = HWIF(drive);
337 u8 dma_stat = hwif->INB(hwif->dma_status); 338 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
338 339
339 printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n", 340 printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n",
340 drive->name, dma_stat); 341 drive->name, dma_stat);
@@ -369,14 +370,18 @@ void ide_dma_host_set(ide_drive_t *drive, int on)
369{ 370{
370 ide_hwif_t *hwif = HWIF(drive); 371 ide_hwif_t *hwif = HWIF(drive);
371 u8 unit = (drive->select.b.unit & 0x01); 372 u8 unit = (drive->select.b.unit & 0x01);
372 u8 dma_stat = hwif->INB(hwif->dma_status); 373 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
373 374
374 if (on) 375 if (on)
375 dma_stat |= (1 << (5 + unit)); 376 dma_stat |= (1 << (5 + unit));
376 else 377 else
377 dma_stat &= ~(1 << (5 + unit)); 378 dma_stat &= ~(1 << (5 + unit));
378 379
379 hwif->OUTB(dma_stat, hwif->dma_status); 380 if (hwif->host_flags & IDE_HFLAG_MMIO)
381 writeb(dma_stat,
382 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
383 else
384 outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
380} 385}
381 386
382EXPORT_SYMBOL_GPL(ide_dma_host_set); 387EXPORT_SYMBOL_GPL(ide_dma_host_set);
@@ -449,6 +454,7 @@ int ide_dma_setup(ide_drive_t *drive)
449 ide_hwif_t *hwif = drive->hwif; 454 ide_hwif_t *hwif = drive->hwif;
450 struct request *rq = HWGROUP(drive)->rq; 455 struct request *rq = HWGROUP(drive)->rq;
451 unsigned int reading; 456 unsigned int reading;
457 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
452 u8 dma_stat; 458 u8 dma_stat;
453 459
454 if (rq_data_dir(rq)) 460 if (rq_data_dir(rq))
@@ -470,13 +476,21 @@ int ide_dma_setup(ide_drive_t *drive)
470 outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS); 476 outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS);
471 477
472 /* specify r/w */ 478 /* specify r/w */
473 hwif->OUTB(reading, hwif->dma_command); 479 if (mmio)
480 writeb(reading, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
481 else
482 outb(reading, hwif->dma_base + ATA_DMA_CMD);
474 483
475 /* read dma_status for INTR & ERROR flags */ 484 /* read DMA status for INTR & ERROR flags */
476 dma_stat = hwif->INB(hwif->dma_status); 485 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
477 486
478 /* clear INTR & ERROR flags */ 487 /* clear INTR & ERROR flags */
479 hwif->OUTB(dma_stat|6, hwif->dma_status); 488 if (mmio)
489 writeb(dma_stat | 6,
490 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
491 else
492 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
493
480 drive->waiting_for_dma = 1; 494 drive->waiting_for_dma = 1;
481 return 0; 495 return 0;
482} 496}
@@ -492,16 +506,24 @@ EXPORT_SYMBOL_GPL(ide_dma_exec_cmd);
492 506
493void ide_dma_start(ide_drive_t *drive) 507void ide_dma_start(ide_drive_t *drive)
494{ 508{
495 ide_hwif_t *hwif = HWIF(drive); 509 ide_hwif_t *hwif = drive->hwif;
496 u8 dma_cmd = hwif->INB(hwif->dma_command); 510 u8 dma_cmd;
497 511
498 /* Note that this is done *after* the cmd has 512 /* Note that this is done *after* the cmd has
499 * been issued to the drive, as per the BM-IDE spec. 513 * been issued to the drive, as per the BM-IDE spec.
500 * The Promise Ultra33 doesn't work correctly when 514 * The Promise Ultra33 doesn't work correctly when
501 * we do this part before issuing the drive cmd. 515 * we do this part before issuing the drive cmd.
502 */ 516 */
503 /* start DMA */ 517 if (hwif->host_flags & IDE_HFLAG_MMIO) {
504 hwif->OUTB(dma_cmd|1, hwif->dma_command); 518 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
519 /* start DMA */
520 writeb(dma_cmd | 1,
521 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
522 } else {
523 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
524 outb(dma_cmd | 1, hwif->dma_base + ATA_DMA_CMD);
525 }
526
505 hwif->dma = 1; 527 hwif->dma = 1;
506 wmb(); 528 wmb();
507} 529}
@@ -511,18 +533,33 @@ EXPORT_SYMBOL_GPL(ide_dma_start);
511/* returns 1 on error, 0 otherwise */ 533/* returns 1 on error, 0 otherwise */
512int __ide_dma_end (ide_drive_t *drive) 534int __ide_dma_end (ide_drive_t *drive)
513{ 535{
514 ide_hwif_t *hwif = HWIF(drive); 536 ide_hwif_t *hwif = drive->hwif;
537 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
515 u8 dma_stat = 0, dma_cmd = 0; 538 u8 dma_stat = 0, dma_cmd = 0;
516 539
517 drive->waiting_for_dma = 0; 540 drive->waiting_for_dma = 0;
518 /* get dma_command mode */ 541
519 dma_cmd = hwif->INB(hwif->dma_command); 542 if (mmio) {
520 /* stop DMA */ 543 /* get DMA command mode */
521 hwif->OUTB(dma_cmd&~1, hwif->dma_command); 544 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
545 /* stop DMA */
546 writeb(dma_cmd & ~1,
547 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
548 } else {
549 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
550 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
551 }
552
522 /* get DMA status */ 553 /* get DMA status */
523 dma_stat = hwif->INB(hwif->dma_status); 554 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
524 /* clear the INTR & ERROR bits */ 555
525 hwif->OUTB(dma_stat|6, hwif->dma_status); 556 if (mmio)
557 /* clear the INTR & ERROR bits */
558 writeb(dma_stat | 6,
559 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
560 else
561 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
562
526 /* purge DMA mappings */ 563 /* purge DMA mappings */
527 ide_destroy_dmatable(drive); 564 ide_destroy_dmatable(drive);
528 /* verify good DMA status */ 565 /* verify good DMA status */
@@ -537,7 +574,7 @@ EXPORT_SYMBOL(__ide_dma_end);
537int ide_dma_test_irq(ide_drive_t *drive) 574int ide_dma_test_irq(ide_drive_t *drive)
538{ 575{
539 ide_hwif_t *hwif = HWIF(drive); 576 ide_hwif_t *hwif = HWIF(drive);
540 u8 dma_stat = hwif->INB(hwif->dma_status); 577 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
541 578
542 /* return 1 if INTR asserted */ 579 /* return 1 if INTR asserted */
543 if ((dma_stat & 4) == 4) 580 if ((dma_stat & 4) == 4)
@@ -719,9 +756,8 @@ static int ide_tune_dma(ide_drive_t *drive)
719static int ide_dma_check(ide_drive_t *drive) 756static int ide_dma_check(ide_drive_t *drive)
720{ 757{
721 ide_hwif_t *hwif = drive->hwif; 758 ide_hwif_t *hwif = drive->hwif;
722 int vdma = (hwif->host_flags & IDE_HFLAG_VDMA)? 1 : 0;
723 759
724 if (!vdma && ide_tune_dma(drive)) 760 if (ide_tune_dma(drive))
725 return 0; 761 return 0;
726 762
727 /* TODO: always do PIO fallback */ 763 /* TODO: always do PIO fallback */
@@ -730,7 +766,7 @@ static int ide_dma_check(ide_drive_t *drive)
730 766
731 ide_set_max_pio(drive); 767 ide_set_max_pio(drive);
732 768
733 return vdma ? 0 : -1; 769 return -1;
734} 770}
735 771
736int ide_id_dma_bug(ide_drive_t *drive) 772int ide_id_dma_bug(ide_drive_t *drive)
@@ -842,7 +878,7 @@ int ide_allocate_dma_engine(ide_hwif_t *hwif)
842} 878}
843EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); 879EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
844 880
845static const struct ide_dma_ops sff_dma_ops = { 881const struct ide_dma_ops sff_dma_ops = {
846 .dma_host_set = ide_dma_host_set, 882 .dma_host_set = ide_dma_host_set,
847 .dma_setup = ide_dma_setup, 883 .dma_setup = ide_dma_setup,
848 .dma_exec_cmd = ide_dma_exec_cmd, 884 .dma_exec_cmd = ide_dma_exec_cmd,
@@ -852,18 +888,5 @@ static const struct ide_dma_ops sff_dma_ops = {
852 .dma_timeout = ide_dma_timeout, 888 .dma_timeout = ide_dma_timeout,
853 .dma_lost_irq = ide_dma_lost_irq, 889 .dma_lost_irq = ide_dma_lost_irq,
854}; 890};
855 891EXPORT_SYMBOL_GPL(sff_dma_ops);
856void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
857{
858 hwif->dma_base = base;
859
860 if (!hwif->dma_command)
861 hwif->dma_command = hwif->dma_base + 0;
862 if (!hwif->dma_status)
863 hwif->dma_status = hwif->dma_base + 2;
864
865 hwif->dma_ops = &sff_dma_ops;
866}
867
868EXPORT_SYMBOL_GPL(ide_setup_dma);
869#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 892#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 011d72011cc4..ca11a26746f1 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -125,26 +125,10 @@ typedef struct ide_floppy_obj {
125 int wp; 125 int wp;
126 /* Supports format progress report */ 126 /* Supports format progress report */
127 int srfp; 127 int srfp;
128 /* Status/Action flags */
129 unsigned long flags;
130} idefloppy_floppy_t; 128} idefloppy_floppy_t;
131 129
132#define IDEFLOPPY_TICKS_DELAY HZ/20 /* default delay for ZIP 100 (50ms) */ 130#define IDEFLOPPY_TICKS_DELAY HZ/20 /* default delay for ZIP 100 (50ms) */
133 131
134/* Floppy flag bits values. */
135enum {
136 /* DRQ interrupt device */
137 IDEFLOPPY_FLAG_DRQ_INTERRUPT = (1 << 0),
138 /* Media may have changed */
139 IDEFLOPPY_FLAG_MEDIA_CHANGED = (1 << 1),
140 /* Format in progress */
141 IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS = (1 << 2),
142 /* Avoid commands not supported in Clik drive */
143 IDEFLOPPY_FLAG_CLIK_DRIVE = (1 << 3),
144 /* Requires BH algorithm for packets */
145 IDEFLOPPY_FLAG_ZIP_DRIVE = (1 << 4),
146};
147
148/* Defines for the MODE SENSE command */ 132/* Defines for the MODE SENSE command */
149#define MODE_SENSE_CURRENT 0x00 133#define MODE_SENSE_CURRENT 0x00
150#define MODE_SENSE_CHANGEABLE 0x01 134#define MODE_SENSE_CHANGEABLE 0x01
@@ -174,23 +158,29 @@ static DEFINE_MUTEX(idefloppy_ref_mutex);
174#define ide_floppy_g(disk) \ 158#define ide_floppy_g(disk) \
175 container_of((disk)->private_data, struct ide_floppy_obj, driver) 159 container_of((disk)->private_data, struct ide_floppy_obj, driver)
176 160
161static void idefloppy_cleanup_obj(struct kref *);
162
177static struct ide_floppy_obj *ide_floppy_get(struct gendisk *disk) 163static struct ide_floppy_obj *ide_floppy_get(struct gendisk *disk)
178{ 164{
179 struct ide_floppy_obj *floppy = NULL; 165 struct ide_floppy_obj *floppy = NULL;
180 166
181 mutex_lock(&idefloppy_ref_mutex); 167 mutex_lock(&idefloppy_ref_mutex);
182 floppy = ide_floppy_g(disk); 168 floppy = ide_floppy_g(disk);
183 if (floppy) 169 if (floppy) {
184 kref_get(&floppy->kref); 170 kref_get(&floppy->kref);
171 if (ide_device_get(floppy->drive)) {
172 kref_put(&floppy->kref, idefloppy_cleanup_obj);
173 floppy = NULL;
174 }
175 }
185 mutex_unlock(&idefloppy_ref_mutex); 176 mutex_unlock(&idefloppy_ref_mutex);
186 return floppy; 177 return floppy;
187} 178}
188 179
189static void idefloppy_cleanup_obj(struct kref *);
190
191static void ide_floppy_put(struct ide_floppy_obj *floppy) 180static void ide_floppy_put(struct ide_floppy_obj *floppy)
192{ 181{
193 mutex_lock(&idefloppy_ref_mutex); 182 mutex_lock(&idefloppy_ref_mutex);
183 ide_device_put(floppy->drive);
194 kref_put(&floppy->kref, idefloppy_cleanup_obj); 184 kref_put(&floppy->kref, idefloppy_cleanup_obj);
195 mutex_unlock(&idefloppy_ref_mutex); 185 mutex_unlock(&idefloppy_ref_mutex);
196} 186}
@@ -247,9 +237,9 @@ static void ide_floppy_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
247 237
248 data = bvec_kmap_irq(bvec, &flags); 238 data = bvec_kmap_irq(bvec, &flags);
249 if (direction) 239 if (direction)
250 hwif->output_data(drive, NULL, data, count); 240 hwif->tp_ops->output_data(drive, NULL, data, count);
251 else 241 else
252 hwif->input_data(drive, NULL, data, count); 242 hwif->tp_ops->input_data(drive, NULL, data, count);
253 bvec_kunmap_irq(data, &flags); 243 bvec_kunmap_irq(data, &flags);
254 244
255 bcount -= count; 245 bcount -= count;
@@ -291,6 +281,7 @@ static void idefloppy_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
291 rq->cmd_type = REQ_TYPE_SPECIAL; 281 rq->cmd_type = REQ_TYPE_SPECIAL;
292 rq->cmd_flags |= REQ_PREEMPT; 282 rq->cmd_flags |= REQ_PREEMPT;
293 rq->rq_disk = floppy->disk; 283 rq->rq_disk = floppy->disk;
284 memcpy(rq->cmd, pc->c, 12);
294 ide_do_drive_cmd(drive, rq); 285 ide_do_drive_cmd(drive, rq);
295} 286}
296 287
@@ -354,7 +345,6 @@ static void idefloppy_init_pc(struct ide_atapi_pc *pc)
354 memset(pc, 0, sizeof(*pc)); 345 memset(pc, 0, sizeof(*pc));
355 pc->buf = pc->pc_buf; 346 pc->buf = pc->pc_buf;
356 pc->buf_size = IDEFLOPPY_PC_BUFFER_SIZE; 347 pc->buf_size = IDEFLOPPY_PC_BUFFER_SIZE;
357 pc->callback = ide_floppy_callback;
358} 348}
359 349
360static void idefloppy_create_request_sense_cmd(struct ide_atapi_pc *pc) 350static void idefloppy_create_request_sense_cmd(struct ide_atapi_pc *pc)
@@ -402,7 +392,7 @@ static int idefloppy_transfer_pc(ide_drive_t *drive)
402 idefloppy_floppy_t *floppy = drive->driver_data; 392 idefloppy_floppy_t *floppy = drive->driver_data;
403 393
404 /* Send the actual packet */ 394 /* Send the actual packet */
405 drive->hwif->output_data(drive, NULL, floppy->pc->c, 12); 395 drive->hwif->tp_ops->output_data(drive, NULL, floppy->pc->c, 12);
406 396
407 /* Timeout for the packet command */ 397 /* Timeout for the packet command */
408 return IDEFLOPPY_WAIT_CMD; 398 return IDEFLOPPY_WAIT_CMD;
@@ -429,7 +419,7 @@ static ide_startstop_t idefloppy_start_pc_transfer(ide_drive_t *drive)
429 * 40 and 50msec work well. idefloppy_pc_intr will not be actually 419 * 40 and 50msec work well. idefloppy_pc_intr will not be actually
430 * used until after the packet is moved in about 50 msec. 420 * used until after the packet is moved in about 50 msec.
431 */ 421 */
432 if (pc->flags & PC_FLAG_ZIP_DRIVE) { 422 if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) {
433 timeout = floppy->ticks; 423 timeout = floppy->ticks;
434 expiry = &idefloppy_transfer_pc; 424 expiry = &idefloppy_transfer_pc;
435 } else { 425 } else {
@@ -474,7 +464,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
474 pc->error = IDEFLOPPY_ERROR_GENERAL; 464 pc->error = IDEFLOPPY_ERROR_GENERAL;
475 465
476 floppy->failed_pc = NULL; 466 floppy->failed_pc = NULL;
477 pc->callback(drive); 467 drive->pc_callback(drive);
478 return ide_stopped; 468 return ide_stopped;
479 } 469 }
480 470
@@ -574,6 +564,8 @@ static void idefloppy_create_rw_cmd(idefloppy_floppy_t *floppy,
574 put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]); 564 put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]);
575 put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]); 565 put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]);
576 566
567 memcpy(rq->cmd, pc->c, 12);
568
577 pc->rq = rq; 569 pc->rq = rq;
578 pc->b_count = cmd == READ ? 0 : rq->bio->bi_size; 570 pc->b_count = cmd == READ ? 0 : rq->bio->bi_size;
579 if (rq->cmd_flags & REQ_RW) 571 if (rq->cmd_flags & REQ_RW)
@@ -647,12 +639,6 @@ static ide_startstop_t idefloppy_do_request(ide_drive_t *drive,
647 return ide_stopped; 639 return ide_stopped;
648 } 640 }
649 641
650 if (floppy->flags & IDEFLOPPY_FLAG_DRQ_INTERRUPT)
651 pc->flags |= PC_FLAG_DRQ_INTERRUPT;
652
653 if (floppy->flags & IDEFLOPPY_FLAG_ZIP_DRIVE)
654 pc->flags |= PC_FLAG_ZIP_DRIVE;
655
656 pc->rq = rq; 642 pc->rq = rq;
657 643
658 return idefloppy_issue_pc(drive, pc); 644 return idefloppy_issue_pc(drive, pc);
@@ -671,6 +657,7 @@ static int idefloppy_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
671 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 657 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
672 rq->buffer = (char *) pc; 658 rq->buffer = (char *) pc;
673 rq->cmd_type = REQ_TYPE_SPECIAL; 659 rq->cmd_type = REQ_TYPE_SPECIAL;
660 memcpy(rq->cmd, pc->c, 12);
674 error = blk_execute_rq(drive->queue, floppy->disk, rq, 0); 661 error = blk_execute_rq(drive->queue, floppy->disk, rq, 0);
675 blk_put_request(rq); 662 blk_put_request(rq);
676 663
@@ -795,7 +782,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
795 switch (pc.buf[desc_start + 4] & 0x03) { 782 switch (pc.buf[desc_start + 4] & 0x03) {
796 /* Clik! drive returns this instead of CAPACITY_CURRENT */ 783 /* Clik! drive returns this instead of CAPACITY_CURRENT */
797 case CAPACITY_UNFORMATTED: 784 case CAPACITY_UNFORMATTED:
798 if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) 785 if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE))
799 /* 786 /*
800 * If it is not a clik drive, break out 787 * If it is not a clik drive, break out
801 * (maintains previous driver behaviour) 788 * (maintains previous driver behaviour)
@@ -841,7 +828,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
841 } 828 }
842 829
843 /* Clik! disk does not support get_flexible_disk_page */ 830 /* Clik! disk does not support get_flexible_disk_page */
844 if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) 831 if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE))
845 (void) ide_floppy_get_flexible_disk_page(drive); 832 (void) ide_floppy_get_flexible_disk_page(drive);
846 833
847 set_capacity(floppy->disk, floppy->blocks * floppy->bs_factor); 834 set_capacity(floppy->disk, floppy->blocks * floppy->bs_factor);
@@ -949,11 +936,12 @@ static int idefloppy_get_format_progress(ide_drive_t *drive, int __user *arg)
949 936
950 /* Else assume format_unit has finished, and we're at 0x10000 */ 937 /* Else assume format_unit has finished, and we're at 0x10000 */
951 } else { 938 } else {
939 ide_hwif_t *hwif = drive->hwif;
952 unsigned long flags; 940 unsigned long flags;
953 u8 stat; 941 u8 stat;
954 942
955 local_irq_save(flags); 943 local_irq_save(flags);
956 stat = ide_read_status(drive); 944 stat = hwif->tp_ops->read_status(hwif);
957 local_irq_restore(flags); 945 local_irq_restore(flags);
958 946
959 progress_indication = ((stat & SEEK_STAT) == 0) ? 0 : 0x10000; 947 progress_indication = ((stat & SEEK_STAT) == 0) ? 0 : 0x10000;
@@ -1039,9 +1027,10 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
1039 1027
1040 *((u16 *) &gcw) = drive->id->config; 1028 *((u16 *) &gcw) = drive->id->config;
1041 floppy->pc = floppy->pc_stack; 1029 floppy->pc = floppy->pc_stack;
1030 drive->pc_callback = ide_floppy_callback;
1042 1031
1043 if (((gcw[0] & 0x60) >> 5) == 1) 1032 if (((gcw[0] & 0x60) >> 5) == 1)
1044 floppy->flags |= IDEFLOPPY_FLAG_DRQ_INTERRUPT; 1033 drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
1045 /* 1034 /*
1046 * We used to check revisions here. At this point however I'm giving up. 1035 * We used to check revisions here. At this point however I'm giving up.
1047 * Just assume they are all broken, its easier. 1036 * Just assume they are all broken, its easier.
@@ -1052,7 +1041,7 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
1052 * we'll leave the limitation below for the 2.2.x tree. 1041 * we'll leave the limitation below for the 2.2.x tree.
1053 */ 1042 */
1054 if (!strncmp(drive->id->model, "IOMEGA ZIP 100 ATAPI", 20)) { 1043 if (!strncmp(drive->id->model, "IOMEGA ZIP 100 ATAPI", 20)) {
1055 floppy->flags |= IDEFLOPPY_FLAG_ZIP_DRIVE; 1044 drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE;
1056 /* This value will be visible in the /proc/ide/hdx/settings */ 1045 /* This value will be visible in the /proc/ide/hdx/settings */
1057 floppy->ticks = IDEFLOPPY_TICKS_DELAY; 1046 floppy->ticks = IDEFLOPPY_TICKS_DELAY;
1058 blk_queue_max_sectors(drive->queue, 64); 1047 blk_queue_max_sectors(drive->queue, 64);
@@ -1064,7 +1053,7 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
1064 */ 1053 */
1065 if (strncmp(drive->id->model, "IOMEGA Clik!", 11) == 0) { 1054 if (strncmp(drive->id->model, "IOMEGA Clik!", 11) == 0) {
1066 blk_queue_max_sectors(drive->queue, 64); 1055 blk_queue_max_sectors(drive->queue, 64);
1067 floppy->flags |= IDEFLOPPY_FLAG_CLIK_DRIVE; 1056 drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
1068 } 1057 }
1069 1058
1070 (void) ide_floppy_get_capacity(drive); 1059 (void) ide_floppy_get_capacity(drive);
@@ -1153,7 +1142,7 @@ static int idefloppy_open(struct inode *inode, struct file *filp)
1153 floppy->openers++; 1142 floppy->openers++;
1154 1143
1155 if (floppy->openers == 1) { 1144 if (floppy->openers == 1) {
1156 floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1145 drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
1157 /* Just in case */ 1146 /* Just in case */
1158 1147
1159 idefloppy_init_pc(&pc); 1148 idefloppy_init_pc(&pc);
@@ -1180,14 +1169,14 @@ static int idefloppy_open(struct inode *inode, struct file *filp)
1180 ret = -EROFS; 1169 ret = -EROFS;
1181 goto out_put_floppy; 1170 goto out_put_floppy;
1182 } 1171 }
1183 floppy->flags |= IDEFLOPPY_FLAG_MEDIA_CHANGED; 1172 drive->atapi_flags |= IDE_AFLAG_MEDIA_CHANGED;
1184 /* IOMEGA Clik! drives do not support lock/unlock commands */ 1173 /* IOMEGA Clik! drives do not support lock/unlock commands */
1185 if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) { 1174 if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) {
1186 idefloppy_create_prevent_cmd(&pc, 1); 1175 idefloppy_create_prevent_cmd(&pc, 1);
1187 (void) idefloppy_queue_pc_tail(drive, &pc); 1176 (void) idefloppy_queue_pc_tail(drive, &pc);
1188 } 1177 }
1189 check_disk_change(inode->i_bdev); 1178 check_disk_change(inode->i_bdev);
1190 } else if (floppy->flags & IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS) { 1179 } else if (drive->atapi_flags & IDE_AFLAG_FORMAT_IN_PROGRESS) {
1191 ret = -EBUSY; 1180 ret = -EBUSY;
1192 goto out_put_floppy; 1181 goto out_put_floppy;
1193 } 1182 }
@@ -1210,12 +1199,12 @@ static int idefloppy_release(struct inode *inode, struct file *filp)
1210 1199
1211 if (floppy->openers == 1) { 1200 if (floppy->openers == 1) {
1212 /* IOMEGA Clik! drives do not support lock/unlock commands */ 1201 /* IOMEGA Clik! drives do not support lock/unlock commands */
1213 if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) { 1202 if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) {
1214 idefloppy_create_prevent_cmd(&pc, 0); 1203 idefloppy_create_prevent_cmd(&pc, 0);
1215 (void) idefloppy_queue_pc_tail(drive, &pc); 1204 (void) idefloppy_queue_pc_tail(drive, &pc);
1216 } 1205 }
1217 1206
1218 floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1207 drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
1219 } 1208 }
1220 1209
1221 floppy->openers--; 1210 floppy->openers--;
@@ -1236,15 +1225,17 @@ static int idefloppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1236 return 0; 1225 return 0;
1237} 1226}
1238 1227
1239static int ide_floppy_lockdoor(idefloppy_floppy_t *floppy, 1228static int ide_floppy_lockdoor(ide_drive_t *drive, struct ide_atapi_pc *pc,
1240 struct ide_atapi_pc *pc, unsigned long arg, unsigned int cmd) 1229 unsigned long arg, unsigned int cmd)
1241{ 1230{
1231 idefloppy_floppy_t *floppy = drive->driver_data;
1232
1242 if (floppy->openers > 1) 1233 if (floppy->openers > 1)
1243 return -EBUSY; 1234 return -EBUSY;
1244 1235
1245 /* The IOMEGA Clik! Drive doesn't support this command - 1236 /* The IOMEGA Clik! Drive doesn't support this command -
1246 * no room for an eject mechanism */ 1237 * no room for an eject mechanism */
1247 if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) { 1238 if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) {
1248 int prevent = arg ? 1 : 0; 1239 int prevent = arg ? 1 : 0;
1249 1240
1250 if (cmd == CDROMEJECT) 1241 if (cmd == CDROMEJECT)
@@ -1265,16 +1256,17 @@ static int ide_floppy_lockdoor(idefloppy_floppy_t *floppy,
1265static int ide_floppy_format_unit(idefloppy_floppy_t *floppy, 1256static int ide_floppy_format_unit(idefloppy_floppy_t *floppy,
1266 int __user *arg) 1257 int __user *arg)
1267{ 1258{
1268 int blocks, length, flags, err = 0;
1269 struct ide_atapi_pc pc; 1259 struct ide_atapi_pc pc;
1260 ide_drive_t *drive = floppy->drive;
1261 int blocks, length, flags, err = 0;
1270 1262
1271 if (floppy->openers > 1) { 1263 if (floppy->openers > 1) {
1272 /* Don't format if someone is using the disk */ 1264 /* Don't format if someone is using the disk */
1273 floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1265 drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
1274 return -EBUSY; 1266 return -EBUSY;
1275 } 1267 }
1276 1268
1277 floppy->flags |= IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1269 drive->atapi_flags |= IDE_AFLAG_FORMAT_IN_PROGRESS;
1278 1270
1279 /* 1271 /*
1280 * Send ATAPI_FORMAT_UNIT to the drive. 1272 * Send ATAPI_FORMAT_UNIT to the drive.
@@ -1298,15 +1290,15 @@ static int ide_floppy_format_unit(idefloppy_floppy_t *floppy,
1298 goto out; 1290 goto out;
1299 } 1291 }
1300 1292
1301 (void) idefloppy_get_sfrp_bit(floppy->drive); 1293 (void) idefloppy_get_sfrp_bit(drive);
1302 idefloppy_create_format_unit_cmd(&pc, blocks, length, flags); 1294 idefloppy_create_format_unit_cmd(&pc, blocks, length, flags);
1303 1295
1304 if (idefloppy_queue_pc_tail(floppy->drive, &pc)) 1296 if (idefloppy_queue_pc_tail(drive, &pc))
1305 err = -EIO; 1297 err = -EIO;
1306 1298
1307out: 1299out:
1308 if (err) 1300 if (err)
1309 floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1301 drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
1310 return err; 1302 return err;
1311} 1303}
1312 1304
@@ -1325,7 +1317,7 @@ static int idefloppy_ioctl(struct inode *inode, struct file *file,
1325 case CDROMEJECT: 1317 case CDROMEJECT:
1326 /* fall through */ 1318 /* fall through */
1327 case CDROM_LOCKDOOR: 1319 case CDROM_LOCKDOOR:
1328 return ide_floppy_lockdoor(floppy, &pc, arg, cmd); 1320 return ide_floppy_lockdoor(drive, &pc, arg, cmd);
1329 case IDEFLOPPY_IOCTL_FORMAT_SUPPORTED: 1321 case IDEFLOPPY_IOCTL_FORMAT_SUPPORTED:
1330 return 0; 1322 return 0;
1331 case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY: 1323 case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY:
@@ -1366,8 +1358,8 @@ static int idefloppy_media_changed(struct gendisk *disk)
1366 drive->attach = 0; 1358 drive->attach = 0;
1367 return 0; 1359 return 0;
1368 } 1360 }
1369 ret = !!(floppy->flags & IDEFLOPPY_FLAG_MEDIA_CHANGED); 1361 ret = !!(drive->atapi_flags & IDE_AFLAG_MEDIA_CHANGED);
1370 floppy->flags &= ~IDEFLOPPY_FLAG_MEDIA_CHANGED; 1362 drive->atapi_flags &= ~IDE_AFLAG_MEDIA_CHANGED;
1371 return ret; 1363 return ret;
1372} 1364}
1373 1365
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index 2d92214096ab..8fe8b5b9cf7d 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -20,6 +20,11 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/ide.h> 21#include <linux/ide.h>
22 22
23/* FIXME: convert m32r to use ide_platform host driver */
24#ifdef CONFIG_M32R
25#include <asm/m32r.h>
26#endif
27
23#define DRV_NAME "ide_generic" 28#define DRV_NAME "ide_generic"
24 29
25static int probe_mask = 0x03; 30static int probe_mask = 0x03;
@@ -28,29 +33,21 @@ MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports");
28 33
29static ssize_t store_add(struct class *cls, const char *buf, size_t n) 34static ssize_t store_add(struct class *cls, const char *buf, size_t n)
30{ 35{
31 ide_hwif_t *hwif;
32 unsigned int base, ctl; 36 unsigned int base, ctl;
33 int irq; 37 int irq, rc;
34 hw_regs_t hw; 38 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
35 u8 idx[] = { 0xff, 0xff, 0xff, 0xff };
36 39
37 if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3) 40 if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3)
38 return -EINVAL; 41 return -EINVAL;
39 42
40 hwif = ide_find_port();
41 if (hwif == NULL)
42 return -ENOENT;
43
44 memset(&hw, 0, sizeof(hw)); 43 memset(&hw, 0, sizeof(hw));
45 ide_std_init_ports(&hw, base, ctl); 44 ide_std_init_ports(&hw, base, ctl);
46 hw.irq = irq; 45 hw.irq = irq;
47 hw.chipset = ide_generic; 46 hw.chipset = ide_generic;
48 47
49 ide_init_port_hw(hwif, &hw); 48 rc = ide_host_add(NULL, hws, NULL);
50 49 if (rc)
51 idx[0] = hwif->index; 50 return rc;
52
53 ide_device_add(idx, NULL);
54 51
55 return n; 52 return n;
56}; 53};
@@ -88,20 +85,41 @@ static int __init ide_generic_sysfs_init(void)
88 return 0; 85 return 0;
89} 86}
90 87
88#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_MAPPI2) \
89 || defined(CONFIG_PLAT_OPSPUT)
90static const u16 legacy_bases[] = { 0x1f0 };
91static const int legacy_irqs[] = { PLD_IRQ_CFIREQ };
92#elif defined(CONFIG_PLAT_MAPPI3)
93static const u16 legacy_bases[] = { 0x1f0, 0x170 };
94static const int legacy_irqs[] = { PLD_IRQ_CFIREQ, PLD_IRQ_IDEIREQ };
95#elif defined(CONFIG_ALPHA)
96static const u16 legacy_bases[] = { 0x1f0, 0x170, 0x1e8, 0x168 };
97static const int legacy_irqs[] = { 14, 15, 11, 10 };
98#else
99static const u16 legacy_bases[] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
100static const int legacy_irqs[] = { 14, 15, 11, 10, 8, 12 };
101#endif
102
91static int __init ide_generic_init(void) 103static int __init ide_generic_init(void)
92{ 104{
93 u8 idx[MAX_HWIFS]; 105 hw_regs_t hw[MAX_HWIFS], *hws[MAX_HWIFS];
94 int i; 106 struct ide_host *host;
95 107 unsigned long io_addr;
108 int i, rc;
109
110#ifdef CONFIG_MIPS
111 if (!ide_probe_legacy())
112 return -ENODEV;
113#endif
96 printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" module " 114 printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" module "
97 "parameter for probing all legacy ISA IDE ports\n"); 115 "parameter for probing all legacy ISA IDE ports\n");
98 116
99 for (i = 0; i < MAX_HWIFS; i++) { 117 memset(hws, 0, sizeof(hw_regs_t *) * MAX_HWIFS);
100 ide_hwif_t *hwif; 118
101 unsigned long io_addr = ide_default_io_base(i); 119 for (i = 0; i < ARRAY_SIZE(legacy_bases); i++) {
102 hw_regs_t hw; 120 io_addr = legacy_bases[i];
103 121
104 idx[i] = 0xff; 122 hws[i] = NULL;
105 123
106 if ((probe_mask & (1 << i)) && io_addr) { 124 if ((probe_mask & (1 << i)) && io_addr) {
107 if (!request_region(io_addr, 8, DRV_NAME)) { 125 if (!request_region(io_addr, 8, DRV_NAME)) {
@@ -119,33 +137,46 @@ static int __init ide_generic_init(void)
119 continue; 137 continue;
120 } 138 }
121 139
122 /* 140 memset(&hw[i], 0, sizeof(hw[i]));
123 * Skip probing if the corresponding 141 ide_std_init_ports(&hw[i], io_addr, io_addr + 0x206);
124 * slot is already occupied. 142#ifdef CONFIG_IA64
125 */ 143 hw[i].irq = isa_irq_to_vector(legacy_irqs[i]);
126 hwif = ide_find_port(); 144#else
127 if (hwif == NULL || hwif->index != i) { 145 hw[i].irq = legacy_irqs[i];
128 idx[i] = 0xff; 146#endif
129 continue; 147 hw[i].chipset = ide_generic;
130 }
131 148
132 memset(&hw, 0, sizeof(hw)); 149 hws[i] = &hw[i];
133 ide_std_init_ports(&hw, io_addr, io_addr + 0x206);
134 hw.irq = ide_default_irq(io_addr);
135 hw.chipset = ide_generic;
136 ide_init_port_hw(hwif, &hw);
137
138 idx[i] = i;
139 } 150 }
140 } 151 }
141 152
142 ide_device_add_all(idx, NULL); 153 host = ide_host_alloc_all(NULL, hws);
154 if (host == NULL) {
155 rc = -ENOMEM;
156 goto err;
157 }
158
159 rc = ide_host_register(host, NULL, hws);
160 if (rc)
161 goto err_free;
143 162
144 if (ide_generic_sysfs_init()) 163 if (ide_generic_sysfs_init())
145 printk(KERN_ERR DRV_NAME ": failed to create ide_generic " 164 printk(KERN_ERR DRV_NAME ": failed to create ide_generic "
146 "class\n"); 165 "class\n");
147 166
148 return 0; 167 return 0;
168err_free:
169 ide_host_free(host);
170err:
171 for (i = 0; i < MAX_HWIFS; i++) {
172 if (hws[i] == NULL)
173 continue;
174
175 io_addr = hws[i]->io_ports.data_addr;
176 release_region(io_addr + 0x206, 1);
177 release_region(io_addr, 8);
178 }
179 return rc;
149} 180}
150 181
151module_init(ide_generic_init); 182module_init(ide_generic_init);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 661b75a89d4d..a896a283f27f 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -330,7 +330,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
330 tf->error = err; 330 tf->error = err;
331 tf->status = stat; 331 tf->status = stat;
332 332
333 drive->hwif->tf_read(drive, task); 333 drive->hwif->tp_ops->tf_read(drive, task);
334 334
335 if (task->tf_flags & IDE_TFLAG_DYN) 335 if (task->tf_flags & IDE_TFLAG_DYN)
336 kfree(task); 336 kfree(task);
@@ -381,8 +381,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
381 if (err == ABRT_ERR) { 381 if (err == ABRT_ERR) {
382 if (drive->select.b.lba && 382 if (drive->select.b.lba &&
383 /* some newer drives don't support WIN_SPECIFY */ 383 /* some newer drives don't support WIN_SPECIFY */
384 hwif->INB(hwif->io_ports.command_addr) == 384 hwif->tp_ops->read_status(hwif) == WIN_SPECIFY)
385 WIN_SPECIFY)
386 return ide_stopped; 385 return ide_stopped;
387 } else if ((err & BAD_CRC) == BAD_CRC) { 386 } else if ((err & BAD_CRC) == BAD_CRC) {
388 /* UDMA crc error, just retry the operation */ 387 /* UDMA crc error, just retry the operation */
@@ -408,7 +407,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
408 return ide_stopped; 407 return ide_stopped;
409 } 408 }
410 409
411 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 410 if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
412 rq->errors |= ERROR_RESET; 411 rq->errors |= ERROR_RESET;
413 412
414 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 413 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
@@ -435,10 +434,9 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u
435 /* add decoding error stuff */ 434 /* add decoding error stuff */
436 } 435 }
437 436
438 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 437 if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
439 /* force an abort */ 438 /* force an abort */
440 hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE, 439 hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE);
441 hwif->io_ports.command_addr);
442 440
443 if (rq->errors >= ERROR_MAX) { 441 if (rq->errors >= ERROR_MAX) {
444 ide_kill_rq(drive, rq); 442 ide_kill_rq(drive, rq);
@@ -712,7 +710,8 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
712#ifdef DEBUG 710#ifdef DEBUG
713 printk("%s: DRIVE_CMD (null)\n", drive->name); 711 printk("%s: DRIVE_CMD (null)\n", drive->name);
714#endif 712#endif
715 ide_end_drive_cmd(drive, ide_read_status(drive), ide_read_error(drive)); 713 ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif),
714 ide_read_error(drive));
716 715
717 return ide_stopped; 716 return ide_stopped;
718} 717}
@@ -747,16 +746,17 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
747 * the bus may be broken enough to walk on our toes at this 746 * the bus may be broken enough to walk on our toes at this
748 * point. 747 * point.
749 */ 748 */
749 ide_hwif_t *hwif = drive->hwif;
750 int rc; 750 int rc;
751#ifdef DEBUG_PM 751#ifdef DEBUG_PM
752 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 752 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
753#endif 753#endif
754 rc = ide_wait_not_busy(HWIF(drive), 35000); 754 rc = ide_wait_not_busy(hwif, 35000);
755 if (rc) 755 if (rc)
756 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 756 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
757 SELECT_DRIVE(drive); 757 SELECT_DRIVE(drive);
758 ide_set_irq(drive, 1); 758 hwif->tp_ops->set_irq(hwif, 1);
759 rc = ide_wait_not_busy(HWIF(drive), 100000); 759 rc = ide_wait_not_busy(hwif, 100000);
760 if (rc) 760 if (rc)
761 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 761 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
762 } 762 }
@@ -1042,7 +1042,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1042 * quirk_list may not like intr setups/cleanups 1042 * quirk_list may not like intr setups/cleanups
1043 */ 1043 */
1044 if (drive->quirk_list != 1) 1044 if (drive->quirk_list != 1)
1045 ide_set_irq(drive, 0); 1045 hwif->tp_ops->set_irq(hwif, 0);
1046 } 1046 }
1047 hwgroup->hwif = hwif; 1047 hwgroup->hwif = hwif;
1048 hwgroup->drive = drive; 1048 hwgroup->drive = drive;
@@ -1142,7 +1142,7 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
1142 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 1142 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
1143 (void)hwif->dma_ops->dma_end(drive); 1143 (void)hwif->dma_ops->dma_end(drive);
1144 ret = ide_error(drive, "dma timeout error", 1144 ret = ide_error(drive, "dma timeout error",
1145 ide_read_status(drive)); 1145 hwif->tp_ops->read_status(hwif));
1146 } else { 1146 } else {
1147 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 1147 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
1148 hwif->dma_ops->dma_timeout(drive); 1148 hwif->dma_ops->dma_timeout(drive);
@@ -1267,7 +1267,7 @@ void ide_timer_expiry (unsigned long data)
1267 } else 1267 } else
1268 startstop = 1268 startstop =
1269 ide_error(drive, "irq timeout", 1269 ide_error(drive, "irq timeout",
1270 ide_read_status(drive)); 1270 hwif->tp_ops->read_status(hwif));
1271 } 1271 }
1272 drive->service_time = jiffies - drive->service_start; 1272 drive->service_time = jiffies - drive->service_start;
1273 spin_lock_irq(&ide_lock); 1273 spin_lock_irq(&ide_lock);
@@ -1323,7 +1323,8 @@ static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
1323 */ 1323 */
1324 do { 1324 do {
1325 if (hwif->irq == irq) { 1325 if (hwif->irq == irq) {
1326 stat = hwif->INB(hwif->io_ports.status_addr); 1326 stat = hwif->tp_ops->read_status(hwif);
1327
1327 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { 1328 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
1328 /* Try to not flood the console with msgs */ 1329 /* Try to not flood the console with msgs */
1329 static unsigned long last_msgtime, count; 1330 static unsigned long last_msgtime, count;
@@ -1413,7 +1414,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1413 * Whack the status register, just in case 1414 * Whack the status register, just in case
1414 * we have a leftover pending IRQ. 1415 * we have a leftover pending IRQ.
1415 */ 1416 */
1416 (void) hwif->INB(hwif->io_ports.status_addr); 1417 (void)hwif->tp_ops->read_status(hwif);
1417#endif /* CONFIG_BLK_DEV_IDEPCI */ 1418#endif /* CONFIG_BLK_DEV_IDEPCI */
1418 } 1419 }
1419 spin_unlock_irqrestore(&ide_lock, flags); 1420 spin_unlock_irqrestore(&ide_lock, flags);
@@ -1519,6 +1520,7 @@ EXPORT_SYMBOL(ide_do_drive_cmd);
1519 1520
1520void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) 1521void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
1521{ 1522{
1523 ide_hwif_t *hwif = drive->hwif;
1522 ide_task_t task; 1524 ide_task_t task;
1523 1525
1524 memset(&task, 0, sizeof(task)); 1526 memset(&task, 0, sizeof(task));
@@ -1529,9 +1531,9 @@ void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
1529 task.tf.lbah = (bcount >> 8) & 0xff; 1531 task.tf.lbah = (bcount >> 8) & 0xff;
1530 1532
1531 ide_tf_dump(drive->name, &task.tf); 1533 ide_tf_dump(drive->name, &task.tf);
1532 ide_set_irq(drive, 1); 1534 hwif->tp_ops->set_irq(hwif, 1);
1533 SELECT_MASK(drive, 0); 1535 SELECT_MASK(drive, 0);
1534 drive->hwif->tf_load(drive, &task); 1536 hwif->tp_ops->tf_load(drive, &task);
1535} 1537}
1536 1538
1537EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); 1539EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
@@ -1543,9 +1545,9 @@ void ide_pad_transfer(ide_drive_t *drive, int write, int len)
1543 1545
1544 while (len > 0) { 1546 while (len > 0) {
1545 if (write) 1547 if (write)
1546 hwif->output_data(drive, NULL, buf, min(4, len)); 1548 hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
1547 else 1549 else
1548 hwif->input_data(drive, NULL, buf, min(4, len)); 1550 hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
1549 len -= 4; 1551 len -= 4;
1550 } 1552 }
1551} 1553}
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 44aaec256a30..8aae91764513 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -42,18 +42,6 @@ static void ide_outb (u8 val, unsigned long port)
42 outb(val, port); 42 outb(val, port);
43} 43}
44 44
45static void ide_outbsync(ide_hwif_t *hwif, u8 addr, unsigned long port)
46{
47 outb(addr, port);
48}
49
50void default_hwif_iops (ide_hwif_t *hwif)
51{
52 hwif->OUTB = ide_outb;
53 hwif->OUTBSYNC = ide_outbsync;
54 hwif->INB = ide_inb;
55}
56
57/* 45/*
58 * MMIO operations, typically used for SATA controllers 46 * MMIO operations, typically used for SATA controllers
59 */ 47 */
@@ -68,31 +56,19 @@ static void ide_mm_outb (u8 value, unsigned long port)
68 writeb(value, (void __iomem *) port); 56 writeb(value, (void __iomem *) port);
69} 57}
70 58
71static void ide_mm_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port)
72{
73 writeb(value, (void __iomem *) port);
74}
75
76void default_hwif_mmiops (ide_hwif_t *hwif)
77{
78 hwif->OUTB = ide_mm_outb;
79 /* Most systems will need to override OUTBSYNC, alas however
80 this one is controller specific! */
81 hwif->OUTBSYNC = ide_mm_outbsync;
82 hwif->INB = ide_mm_inb;
83}
84
85EXPORT_SYMBOL(default_hwif_mmiops);
86
87void SELECT_DRIVE (ide_drive_t *drive) 59void SELECT_DRIVE (ide_drive_t *drive)
88{ 60{
89 ide_hwif_t *hwif = drive->hwif; 61 ide_hwif_t *hwif = drive->hwif;
90 const struct ide_port_ops *port_ops = hwif->port_ops; 62 const struct ide_port_ops *port_ops = hwif->port_ops;
63 ide_task_t task;
91 64
92 if (port_ops && port_ops->selectproc) 65 if (port_ops && port_ops->selectproc)
93 port_ops->selectproc(drive); 66 port_ops->selectproc(drive);
94 67
95 hwif->OUTB(drive->select.all, hwif->io_ports.device_addr); 68 memset(&task, 0, sizeof(task));
69 task.tf_flags = IDE_TFLAG_OUT_DEVICE;
70
71 drive->hwif->tp_ops->tf_load(drive, &task);
96} 72}
97 73
98void SELECT_MASK(ide_drive_t *drive, int mask) 74void SELECT_MASK(ide_drive_t *drive, int mask)
@@ -103,7 +79,61 @@ void SELECT_MASK(ide_drive_t *drive, int mask)
103 port_ops->maskproc(drive, mask); 79 port_ops->maskproc(drive, mask);
104} 80}
105 81
106static void ide_tf_load(ide_drive_t *drive, ide_task_t *task) 82void ide_exec_command(ide_hwif_t *hwif, u8 cmd)
83{
84 if (hwif->host_flags & IDE_HFLAG_MMIO)
85 writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
86 else
87 outb(cmd, hwif->io_ports.command_addr);
88}
89EXPORT_SYMBOL_GPL(ide_exec_command);
90
91u8 ide_read_status(ide_hwif_t *hwif)
92{
93 if (hwif->host_flags & IDE_HFLAG_MMIO)
94 return readb((void __iomem *)hwif->io_ports.status_addr);
95 else
96 return inb(hwif->io_ports.status_addr);
97}
98EXPORT_SYMBOL_GPL(ide_read_status);
99
100u8 ide_read_altstatus(ide_hwif_t *hwif)
101{
102 if (hwif->host_flags & IDE_HFLAG_MMIO)
103 return readb((void __iomem *)hwif->io_ports.ctl_addr);
104 else
105 return inb(hwif->io_ports.ctl_addr);
106}
107EXPORT_SYMBOL_GPL(ide_read_altstatus);
108
109u8 ide_read_sff_dma_status(ide_hwif_t *hwif)
110{
111 if (hwif->host_flags & IDE_HFLAG_MMIO)
112 return readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
113 else
114 return inb(hwif->dma_base + ATA_DMA_STATUS);
115}
116EXPORT_SYMBOL_GPL(ide_read_sff_dma_status);
117
118void ide_set_irq(ide_hwif_t *hwif, int on)
119{
120 u8 ctl = ATA_DEVCTL_OBS;
121
122 if (on == 4) { /* hack for SRST */
123 ctl |= 4;
124 on &= ~4;
125 }
126
127 ctl |= on ? 0 : 2;
128
129 if (hwif->host_flags & IDE_HFLAG_MMIO)
130 writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
131 else
132 outb(ctl, hwif->io_ports.ctl_addr);
133}
134EXPORT_SYMBOL_GPL(ide_set_irq);
135
136void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
107{ 137{
108 ide_hwif_t *hwif = drive->hwif; 138 ide_hwif_t *hwif = drive->hwif;
109 struct ide_io_ports *io_ports = &hwif->io_ports; 139 struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -155,8 +185,9 @@ static void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
155 tf_outb((tf->device & HIHI) | drive->select.all, 185 tf_outb((tf->device & HIHI) | drive->select.all,
156 io_ports->device_addr); 186 io_ports->device_addr);
157} 187}
188EXPORT_SYMBOL_GPL(ide_tf_load);
158 189
159static void ide_tf_read(ide_drive_t *drive, ide_task_t *task) 190void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
160{ 191{
161 ide_hwif_t *hwif = drive->hwif; 192 ide_hwif_t *hwif = drive->hwif;
162 struct ide_io_ports *io_ports = &hwif->io_ports; 193 struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -188,6 +219,8 @@ static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
188 /* be sure we're looking at the low order bits */ 219 /* be sure we're looking at the low order bits */
189 tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); 220 tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
190 221
222 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
223 tf->feature = tf_inb(io_ports->feature_addr);
191 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 224 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
192 tf->nsect = tf_inb(io_ports->nsect_addr); 225 tf->nsect = tf_inb(io_ports->nsect_addr);
193 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 226 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -214,6 +247,7 @@ static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
214 tf->hob_lbah = tf_inb(io_ports->lbah_addr); 247 tf->hob_lbah = tf_inb(io_ports->lbah_addr);
215 } 248 }
216} 249}
250EXPORT_SYMBOL_GPL(ide_tf_read);
217 251
218/* 252/*
219 * Some localbus EIDE interfaces require a special access sequence 253 * Some localbus EIDE interfaces require a special access sequence
@@ -236,8 +270,8 @@ static void ata_vlb_sync(unsigned long port)
236 * so if an odd len is specified, be sure that there's at least one 270 * so if an odd len is specified, be sure that there's at least one
237 * extra byte allocated for the buffer. 271 * extra byte allocated for the buffer.
238 */ 272 */
239static void ata_input_data(ide_drive_t *drive, struct request *rq, 273void ide_input_data(ide_drive_t *drive, struct request *rq, void *buf,
240 void *buf, unsigned int len) 274 unsigned int len)
241{ 275{
242 ide_hwif_t *hwif = drive->hwif; 276 ide_hwif_t *hwif = drive->hwif;
243 struct ide_io_ports *io_ports = &hwif->io_ports; 277 struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -277,12 +311,13 @@ static void ata_input_data(ide_drive_t *drive, struct request *rq,
277 insw(data_addr, buf, len / 2); 311 insw(data_addr, buf, len / 2);
278 } 312 }
279} 313}
314EXPORT_SYMBOL_GPL(ide_input_data);
280 315
281/* 316/*
282 * This is used for most PIO data transfers *to* the IDE interface 317 * This is used for most PIO data transfers *to* the IDE interface
283 */ 318 */
284static void ata_output_data(ide_drive_t *drive, struct request *rq, 319void ide_output_data(ide_drive_t *drive, struct request *rq, void *buf,
285 void *buf, unsigned int len) 320 unsigned int len)
286{ 321{
287 ide_hwif_t *hwif = drive->hwif; 322 ide_hwif_t *hwif = drive->hwif;
288 struct ide_io_ports *io_ports = &hwif->io_ports; 323 struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -320,15 +355,50 @@ static void ata_output_data(ide_drive_t *drive, struct request *rq,
320 outsw(data_addr, buf, len / 2); 355 outsw(data_addr, buf, len / 2);
321 } 356 }
322} 357}
358EXPORT_SYMBOL_GPL(ide_output_data);
359
360u8 ide_read_error(ide_drive_t *drive)
361{
362 ide_task_t task;
363
364 memset(&task, 0, sizeof(task));
365 task.tf_flags = IDE_TFLAG_IN_FEATURE;
366
367 drive->hwif->tp_ops->tf_read(drive, &task);
368
369 return task.tf.error;
370}
371EXPORT_SYMBOL_GPL(ide_read_error);
323 372
324void default_hwif_transport(ide_hwif_t *hwif) 373void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason)
325{ 374{
326 hwif->tf_load = ide_tf_load; 375 ide_task_t task;
327 hwif->tf_read = ide_tf_read;
328 376
329 hwif->input_data = ata_input_data; 377 memset(&task, 0, sizeof(task));
330 hwif->output_data = ata_output_data; 378 task.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM |
379 IDE_TFLAG_IN_NSECT;
380
381 drive->hwif->tp_ops->tf_read(drive, &task);
382
383 *bcount = (task.tf.lbah << 8) | task.tf.lbam;
384 *ireason = task.tf.nsect & 3;
331} 385}
386EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason);
387
388const struct ide_tp_ops default_tp_ops = {
389 .exec_command = ide_exec_command,
390 .read_status = ide_read_status,
391 .read_altstatus = ide_read_altstatus,
392 .read_sff_dma_status = ide_read_sff_dma_status,
393
394 .set_irq = ide_set_irq,
395
396 .tf_load = ide_tf_load,
397 .tf_read = ide_tf_read,
398
399 .input_data = ide_input_data,
400 .output_data = ide_output_data,
401};
332 402
333void ide_fix_driveid (struct hd_driveid *id) 403void ide_fix_driveid (struct hd_driveid *id)
334{ 404{
@@ -440,10 +510,8 @@ void ide_fixstring (u8 *s, const int bytecount, const int byteswap)
440 510
441 if (byteswap) { 511 if (byteswap) {
442 /* convert from big-endian to host byte order */ 512 /* convert from big-endian to host byte order */
443 for (p = end ; p != s;) { 513 for (p = end ; p != s;)
444 unsigned short *pp = (unsigned short *) (p -= 2); 514 be16_to_cpus((u16 *)(p -= 2));
445 *pp = ntohs(*pp);
446 }
447 } 515 }
448 /* strip leading blanks */ 516 /* strip leading blanks */
449 while (s != end && *s == ' ') 517 while (s != end && *s == ' ')
@@ -483,10 +551,10 @@ int drive_is_ready (ide_drive_t *drive)
483 * about possible isa-pnp and pci-pnp issues yet. 551 * about possible isa-pnp and pci-pnp issues yet.
484 */ 552 */
485 if (hwif->io_ports.ctl_addr) 553 if (hwif->io_ports.ctl_addr)
486 stat = ide_read_altstatus(drive); 554 stat = hwif->tp_ops->read_altstatus(hwif);
487 else 555 else
488 /* Note: this may clear a pending IRQ!! */ 556 /* Note: this may clear a pending IRQ!! */
489 stat = ide_read_status(drive); 557 stat = hwif->tp_ops->read_status(hwif);
490 558
491 if (stat & BUSY_STAT) 559 if (stat & BUSY_STAT)
492 /* drive busy: definitely not interrupting */ 560 /* drive busy: definitely not interrupting */
@@ -511,24 +579,26 @@ EXPORT_SYMBOL(drive_is_ready);
511 */ 579 */
512static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat) 580static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat)
513{ 581{
582 ide_hwif_t *hwif = drive->hwif;
583 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
514 unsigned long flags; 584 unsigned long flags;
515 int i; 585 int i;
516 u8 stat; 586 u8 stat;
517 587
518 udelay(1); /* spec allows drive 400ns to assert "BUSY" */ 588 udelay(1); /* spec allows drive 400ns to assert "BUSY" */
519 stat = ide_read_status(drive); 589 stat = tp_ops->read_status(hwif);
520 590
521 if (stat & BUSY_STAT) { 591 if (stat & BUSY_STAT) {
522 local_irq_set(flags); 592 local_irq_set(flags);
523 timeout += jiffies; 593 timeout += jiffies;
524 while ((stat = ide_read_status(drive)) & BUSY_STAT) { 594 while ((stat = tp_ops->read_status(hwif)) & BUSY_STAT) {
525 if (time_after(jiffies, timeout)) { 595 if (time_after(jiffies, timeout)) {
526 /* 596 /*
527 * One last read after the timeout in case 597 * One last read after the timeout in case
528 * heavy interrupt load made us not make any 598 * heavy interrupt load made us not make any
529 * progress during the timeout.. 599 * progress during the timeout..
530 */ 600 */
531 stat = ide_read_status(drive); 601 stat = tp_ops->read_status(hwif);
532 if (!(stat & BUSY_STAT)) 602 if (!(stat & BUSY_STAT))
533 break; 603 break;
534 604
@@ -548,7 +618,7 @@ static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long ti
548 */ 618 */
549 for (i = 0; i < 10; i++) { 619 for (i = 0; i < 10; i++) {
550 udelay(1); 620 udelay(1);
551 stat = ide_read_status(drive); 621 stat = tp_ops->read_status(hwif);
552 622
553 if (OK_STAT(stat, good, bad)) { 623 if (OK_STAT(stat, good, bad)) {
554 *rstat = stat; 624 *rstat = stat;
@@ -674,6 +744,7 @@ no_80w:
674int ide_driveid_update(ide_drive_t *drive) 744int ide_driveid_update(ide_drive_t *drive)
675{ 745{
676 ide_hwif_t *hwif = drive->hwif; 746 ide_hwif_t *hwif = drive->hwif;
747 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
677 struct hd_driveid *id; 748 struct hd_driveid *id;
678 unsigned long timeout, flags; 749 unsigned long timeout, flags;
679 u8 stat; 750 u8 stat;
@@ -684,9 +755,9 @@ int ide_driveid_update(ide_drive_t *drive)
684 */ 755 */
685 756
686 SELECT_MASK(drive, 1); 757 SELECT_MASK(drive, 1);
687 ide_set_irq(drive, 0); 758 tp_ops->set_irq(hwif, 0);
688 msleep(50); 759 msleep(50);
689 hwif->OUTBSYNC(hwif, WIN_IDENTIFY, hwif->io_ports.command_addr); 760 tp_ops->exec_command(hwif, WIN_IDENTIFY);
690 timeout = jiffies + WAIT_WORSTCASE; 761 timeout = jiffies + WAIT_WORSTCASE;
691 do { 762 do {
692 if (time_after(jiffies, timeout)) { 763 if (time_after(jiffies, timeout)) {
@@ -695,11 +766,11 @@ int ide_driveid_update(ide_drive_t *drive)
695 } 766 }
696 767
697 msleep(50); /* give drive a breather */ 768 msleep(50); /* give drive a breather */
698 stat = ide_read_altstatus(drive); 769 stat = tp_ops->read_altstatus(hwif);
699 } while (stat & BUSY_STAT); 770 } while (stat & BUSY_STAT);
700 771
701 msleep(50); /* wait for IRQ and DRQ_STAT */ 772 msleep(50); /* wait for IRQ and DRQ_STAT */
702 stat = ide_read_status(drive); 773 stat = tp_ops->read_status(hwif);
703 774
704 if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) { 775 if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
705 SELECT_MASK(drive, 0); 776 SELECT_MASK(drive, 0);
@@ -713,8 +784,8 @@ int ide_driveid_update(ide_drive_t *drive)
713 local_irq_restore(flags); 784 local_irq_restore(flags);
714 return 0; 785 return 0;
715 } 786 }
716 hwif->input_data(drive, NULL, id, SECTOR_SIZE); 787 tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
717 (void)ide_read_status(drive); /* clear drive IRQ */ 788 (void)tp_ops->read_status(hwif); /* clear drive IRQ */
718 local_irq_enable(); 789 local_irq_enable();
719 local_irq_restore(flags); 790 local_irq_restore(flags);
720 ide_fix_driveid(id); 791 ide_fix_driveid(id);
@@ -735,9 +806,10 @@ int ide_driveid_update(ide_drive_t *drive)
735int ide_config_drive_speed(ide_drive_t *drive, u8 speed) 806int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
736{ 807{
737 ide_hwif_t *hwif = drive->hwif; 808 ide_hwif_t *hwif = drive->hwif;
738 struct ide_io_ports *io_ports = &hwif->io_ports; 809 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
739 int error = 0; 810 int error = 0;
740 u8 stat; 811 u8 stat;
812 ide_task_t task;
741 813
742#ifdef CONFIG_BLK_DEV_IDEDMA 814#ifdef CONFIG_BLK_DEV_IDEDMA
743 if (hwif->dma_ops) /* check if host supports DMA */ 815 if (hwif->dma_ops) /* check if host supports DMA */
@@ -770,12 +842,19 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
770 SELECT_DRIVE(drive); 842 SELECT_DRIVE(drive);
771 SELECT_MASK(drive, 0); 843 SELECT_MASK(drive, 0);
772 udelay(1); 844 udelay(1);
773 ide_set_irq(drive, 0); 845 tp_ops->set_irq(hwif, 0);
774 hwif->OUTB(speed, io_ports->nsect_addr); 846
775 hwif->OUTB(SETFEATURES_XFER, io_ports->feature_addr); 847 memset(&task, 0, sizeof(task));
776 hwif->OUTBSYNC(hwif, WIN_SETFEATURES, io_ports->command_addr); 848 task.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT;
849 task.tf.feature = SETFEATURES_XFER;
850 task.tf.nsect = speed;
851
852 tp_ops->tf_load(drive, &task);
853
854 tp_ops->exec_command(hwif, WIN_SETFEATURES);
855
777 if (drive->quirk_list == 2) 856 if (drive->quirk_list == 2)
778 ide_set_irq(drive, 1); 857 tp_ops->set_irq(hwif, 1);
779 858
780 error = __ide_wait_stat(drive, drive->ready_stat, 859 error = __ide_wait_stat(drive, drive->ready_stat,
781 BUSY_STAT|DRQ_STAT|ERR_STAT, 860 BUSY_STAT|DRQ_STAT|ERR_STAT,
@@ -796,8 +875,7 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
796 875
797 skip: 876 skip:
798#ifdef CONFIG_BLK_DEV_IDEDMA 877#ifdef CONFIG_BLK_DEV_IDEDMA
799 if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) && 878 if (speed >= XFER_SW_DMA_0 && drive->using_dma)
800 drive->using_dma)
801 hwif->dma_ops->dma_host_set(drive, 1); 879 hwif->dma_ops->dma_host_set(drive, 1);
802 else if (hwif->dma_ops) /* check if host supports DMA */ 880 else if (hwif->dma_ops) /* check if host supports DMA */
803 ide_dma_off_quietly(drive); 881 ide_dma_off_quietly(drive);
@@ -881,7 +959,7 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
881 959
882 spin_lock_irqsave(&ide_lock, flags); 960 spin_lock_irqsave(&ide_lock, flags);
883 __ide_set_handler(drive, handler, timeout, expiry); 961 __ide_set_handler(drive, handler, timeout, expiry);
884 hwif->OUTBSYNC(hwif, cmd, hwif->io_ports.command_addr); 962 hwif->tp_ops->exec_command(hwif, cmd);
885 /* 963 /*
886 * Drive takes 400nS to respond, we must avoid the IRQ being 964 * Drive takes 400nS to respond, we must avoid the IRQ being
887 * serviced before that. 965 * serviced before that.
@@ -899,7 +977,7 @@ void ide_execute_pkt_cmd(ide_drive_t *drive)
899 unsigned long flags; 977 unsigned long flags;
900 978
901 spin_lock_irqsave(&ide_lock, flags); 979 spin_lock_irqsave(&ide_lock, flags);
902 hwif->OUTBSYNC(hwif, WIN_PACKETCMD, hwif->io_ports.command_addr); 980 hwif->tp_ops->exec_command(hwif, WIN_PACKETCMD);
903 ndelay(400); 981 ndelay(400);
904 spin_unlock_irqrestore(&ide_lock, flags); 982 spin_unlock_irqrestore(&ide_lock, flags);
905} 983}
@@ -924,12 +1002,13 @@ static ide_startstop_t do_reset1 (ide_drive_t *, int);
924 */ 1002 */
925static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive) 1003static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
926{ 1004{
927 ide_hwgroup_t *hwgroup = HWGROUP(drive); 1005 ide_hwif_t *hwif = drive->hwif;
1006 ide_hwgroup_t *hwgroup = hwif->hwgroup;
928 u8 stat; 1007 u8 stat;
929 1008
930 SELECT_DRIVE(drive); 1009 SELECT_DRIVE(drive);
931 udelay (10); 1010 udelay (10);
932 stat = ide_read_status(drive); 1011 stat = hwif->tp_ops->read_status(hwif);
933 1012
934 if (OK_STAT(stat, 0, BUSY_STAT)) 1013 if (OK_STAT(stat, 0, BUSY_STAT))
935 printk("%s: ATAPI reset complete\n", drive->name); 1014 printk("%s: ATAPI reset complete\n", drive->name);
@@ -975,7 +1054,7 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
975 } 1054 }
976 } 1055 }
977 1056
978 tmp = ide_read_status(drive); 1057 tmp = hwif->tp_ops->read_status(hwif);
979 1058
980 if (!OK_STAT(tmp, 0, BUSY_STAT)) { 1059 if (!OK_STAT(tmp, 0, BUSY_STAT)) {
981 if (time_before(jiffies, hwgroup->poll_timeout)) { 1060 if (time_before(jiffies, hwgroup->poll_timeout)) {
@@ -1089,8 +1168,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1089 ide_hwif_t *hwif; 1168 ide_hwif_t *hwif;
1090 ide_hwgroup_t *hwgroup; 1169 ide_hwgroup_t *hwgroup;
1091 struct ide_io_ports *io_ports; 1170 struct ide_io_ports *io_ports;
1171 const struct ide_tp_ops *tp_ops;
1092 const struct ide_port_ops *port_ops; 1172 const struct ide_port_ops *port_ops;
1093 u8 ctl;
1094 1173
1095 spin_lock_irqsave(&ide_lock, flags); 1174 spin_lock_irqsave(&ide_lock, flags);
1096 hwif = HWIF(drive); 1175 hwif = HWIF(drive);
@@ -1098,6 +1177,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1098 1177
1099 io_ports = &hwif->io_ports; 1178 io_ports = &hwif->io_ports;
1100 1179
1180 tp_ops = hwif->tp_ops;
1181
1101 /* We must not reset with running handlers */ 1182 /* We must not reset with running handlers */
1102 BUG_ON(hwgroup->handler != NULL); 1183 BUG_ON(hwgroup->handler != NULL);
1103 1184
@@ -1106,7 +1187,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1106 pre_reset(drive); 1187 pre_reset(drive);
1107 SELECT_DRIVE(drive); 1188 SELECT_DRIVE(drive);
1108 udelay (20); 1189 udelay (20);
1109 hwif->OUTBSYNC(hwif, WIN_SRST, io_ports->command_addr); 1190 tp_ops->exec_command(hwif, WIN_SRST);
1110 ndelay(400); 1191 ndelay(400);
1111 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; 1192 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
1112 hwgroup->polling = 1; 1193 hwgroup->polling = 1;
@@ -1135,16 +1216,15 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1135 * immediate interrupt due to the edge transition it produces. 1216 * immediate interrupt due to the edge transition it produces.
1136 * This single interrupt gives us a "fast poll" for drives that 1217 * This single interrupt gives us a "fast poll" for drives that
1137 * recover from reset very quickly, saving us the first 50ms wait time. 1218 * recover from reset very quickly, saving us the first 50ms wait time.
1219 *
1220 * TODO: add ->softreset method and stop abusing ->set_irq
1138 */ 1221 */
1139 /* set SRST and nIEN */ 1222 /* set SRST and nIEN */
1140 hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | 6, io_ports->ctl_addr); 1223 tp_ops->set_irq(hwif, 4);
1141 /* more than enough time */ 1224 /* more than enough time */
1142 udelay(10); 1225 udelay(10);
1143 if (drive->quirk_list == 2) 1226 /* clear SRST, leave nIEN (unless device is on the quirk list) */
1144 ctl = ATA_DEVCTL_OBS; /* clear SRST and nIEN */ 1227 tp_ops->set_irq(hwif, drive->quirk_list == 2);
1145 else
1146 ctl = ATA_DEVCTL_OBS | 2; /* clear SRST, leave nIEN */
1147 hwif->OUTBSYNC(hwif, ctl, io_ports->ctl_addr);
1148 /* more than enough time */ 1228 /* more than enough time */
1149 udelay(10); 1229 udelay(10);
1150 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; 1230 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
@@ -1189,7 +1269,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
1189 * about locking issues (2.5 work ?). 1269 * about locking issues (2.5 work ?).
1190 */ 1270 */
1191 mdelay(1); 1271 mdelay(1);
1192 stat = hwif->INB(hwif->io_ports.status_addr); 1272 stat = hwif->tp_ops->read_status(hwif);
1193 if ((stat & BUSY_STAT) == 0) 1273 if ((stat & BUSY_STAT) == 0)
1194 return 0; 1274 return 0;
1195 /* 1275 /*
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 13af72f09ec4..97fefabea8b8 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -266,22 +266,11 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
266 266
267 rate = ide_rate_filter(drive, rate); 267 rate = ide_rate_filter(drive, rate);
268 268
269 BUG_ON(rate < XFER_PIO_0);
270
269 if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5) 271 if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5)
270 return ide_set_pio_mode(drive, rate); 272 return ide_set_pio_mode(drive, rate);
271 273
272 /*
273 * TODO: transfer modes 0x00-0x07 passed from the user-space are
274 * currently handled here which needs fixing (please note that such
275 * case could happen iff the transfer mode has already been set on
276 * the device by ide-proc.c::set_xfer_rate()).
277 */
278 if (rate < XFER_PIO_0) {
279 if (hwif->host_flags & IDE_HFLAG_ABUSE_SET_DMA_MODE)
280 return ide_set_dma_mode(drive, rate);
281 else
282 return ide_config_drive_speed(drive, rate);
283 }
284
285 return ide_set_dma_mode(drive, rate); 274 return ide_set_dma_mode(drive, rate);
286} 275}
287 276
@@ -336,7 +325,7 @@ static void ide_dump_sector(ide_drive_t *drive)
336 else 325 else
337 task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE; 326 task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE;
338 327
339 drive->hwif->tf_read(drive, &task); 328 drive->hwif->tp_ops->tf_read(drive, &task);
340 329
341 if (lba48 || (tf->device & ATA_LBA)) 330 if (lba48 || (tf->device & ATA_LBA))
342 printk(", LBAsect=%llu", 331 printk(", LBAsect=%llu",
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index 03f2ef5470a3..bac9b392b689 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -29,9 +29,10 @@ static struct pnp_device_id idepnp_devices[] = {
29 29
30static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) 30static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
31{ 31{
32 hw_regs_t hw; 32 struct ide_host *host;
33 ide_hwif_t *hwif;
34 unsigned long base, ctl; 33 unsigned long base, ctl;
34 int rc;
35 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
35 36
36 printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n"); 37 printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n");
37 38
@@ -59,31 +60,25 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
59 hw.irq = pnp_irq(dev, 0); 60 hw.irq = pnp_irq(dev, 0);
60 hw.chipset = ide_generic; 61 hw.chipset = ide_generic;
61 62
62 hwif = ide_find_port(); 63 rc = ide_host_add(NULL, hws, &host);
63 if (hwif) { 64 if (rc)
64 u8 index = hwif->index; 65 goto out;
65 u8 idx[4] = { index, 0xff, 0xff, 0xff };
66 66
67 ide_init_port_hw(hwif, &hw); 67 pnp_set_drvdata(dev, host);
68
69 pnp_set_drvdata(dev, hwif);
70
71 ide_device_add(idx, NULL);
72
73 return 0;
74 }
75 68
69 return 0;
70out:
76 release_region(ctl, 1); 71 release_region(ctl, 1);
77 release_region(base, 8); 72 release_region(base, 8);
78 73
79 return -1; 74 return rc;
80} 75}
81 76
82static void idepnp_remove(struct pnp_dev *dev) 77static void idepnp_remove(struct pnp_dev *dev)
83{ 78{
84 ide_hwif_t *hwif = pnp_get_drvdata(dev); 79 struct ide_host *host = pnp_get_drvdata(dev);
85 80
86 ide_unregister(hwif); 81 ide_host_remove(host);
87 82
88 release_region(pnp_port_start(dev, 1), 1); 83 release_region(pnp_port_start(dev, 1), 1);
89 release_region(pnp_port_start(dev, 0), 8); 84 release_region(pnp_port_start(dev, 0), 8);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 235ebdb29b28..994e41099b42 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -39,8 +39,6 @@
39#include <asm/uaccess.h> 39#include <asm/uaccess.h>
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42static ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */
43
44/** 42/**
45 * generic_id - add a generic drive id 43 * generic_id - add a generic drive id
46 * @drive: drive to make an ID block for 44 * @drive: drive to make an ID block for
@@ -126,7 +124,7 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
126 124
127 id = drive->id; 125 id = drive->id;
128 /* read 512 bytes of id info */ 126 /* read 512 bytes of id info */
129 hwif->input_data(drive, NULL, id, SECTOR_SIZE); 127 hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
130 128
131 drive->id_read = 1; 129 drive->id_read = 1;
132 local_irq_enable(); 130 local_irq_enable();
@@ -136,18 +134,6 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
136#endif 134#endif
137 ide_fix_driveid(id); 135 ide_fix_driveid(id);
138 136
139#if defined (CONFIG_SCSI_EATA_PIO) || defined (CONFIG_SCSI_EATA)
140 /*
141 * EATA SCSI controllers do a hardware ATA emulation:
142 * Ignore them if there is a driver for them available.
143 */
144 if ((id->model[0] == 'P' && id->model[1] == 'M') ||
145 (id->model[0] == 'S' && id->model[1] == 'K')) {
146 printk("%s: EATA SCSI HBA %.10s\n", drive->name, id->model);
147 goto err_misc;
148 }
149#endif /* CONFIG_SCSI_EATA || CONFIG_SCSI_EATA_PIO */
150
151 /* 137 /*
152 * WIN_IDENTIFY returns little-endian info, 138 * WIN_IDENTIFY returns little-endian info,
153 * WIN_PIDENTIFY *usually* returns little-endian info. 139 * WIN_PIDENTIFY *usually* returns little-endian info.
@@ -169,7 +155,8 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
169 if (strstr(id->model, "E X A B Y T E N E S T")) 155 if (strstr(id->model, "E X A B Y T E N E S T"))
170 goto err_misc; 156 goto err_misc;
171 157
172 printk("%s: %s, ", drive->name, id->model); 158 printk(KERN_INFO "%s: %s, ", drive->name, id->model);
159
173 drive->present = 1; 160 drive->present = 1;
174 drive->dead = 0; 161 drive->dead = 0;
175 162
@@ -178,16 +165,17 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
178 */ 165 */
179 if (cmd == WIN_PIDENTIFY) { 166 if (cmd == WIN_PIDENTIFY) {
180 u8 type = (id->config >> 8) & 0x1f; 167 u8 type = (id->config >> 8) & 0x1f;
181 printk("ATAPI "); 168
169 printk(KERN_CONT "ATAPI ");
182 switch (type) { 170 switch (type) {
183 case ide_floppy: 171 case ide_floppy:
184 if (!strstr(id->model, "CD-ROM")) { 172 if (!strstr(id->model, "CD-ROM")) {
185 if (!strstr(id->model, "oppy") && 173 if (!strstr(id->model, "oppy") &&
186 !strstr(id->model, "poyp") && 174 !strstr(id->model, "poyp") &&
187 !strstr(id->model, "ZIP")) 175 !strstr(id->model, "ZIP"))
188 printk("cdrom or floppy?, assuming "); 176 printk(KERN_CONT "cdrom or floppy?, assuming ");
189 if (drive->media != ide_cdrom) { 177 if (drive->media != ide_cdrom) {
190 printk ("FLOPPY"); 178 printk(KERN_CONT "FLOPPY");
191 drive->removable = 1; 179 drive->removable = 1;
192 break; 180 break;
193 } 181 }
@@ -200,25 +188,25 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
200 /* kludge for Apple PowerBook internal zip */ 188 /* kludge for Apple PowerBook internal zip */
201 if (!strstr(id->model, "CD-ROM") && 189 if (!strstr(id->model, "CD-ROM") &&
202 strstr(id->model, "ZIP")) { 190 strstr(id->model, "ZIP")) {
203 printk ("FLOPPY"); 191 printk(KERN_CONT "FLOPPY");
204 type = ide_floppy; 192 type = ide_floppy;
205 break; 193 break;
206 } 194 }
207#endif 195#endif
208 printk ("CD/DVD-ROM"); 196 printk(KERN_CONT "CD/DVD-ROM");
209 break; 197 break;
210 case ide_tape: 198 case ide_tape:
211 printk ("TAPE"); 199 printk(KERN_CONT "TAPE");
212 break; 200 break;
213 case ide_optical: 201 case ide_optical:
214 printk ("OPTICAL"); 202 printk(KERN_CONT "OPTICAL");
215 drive->removable = 1; 203 drive->removable = 1;
216 break; 204 break;
217 default: 205 default:
218 printk("UNKNOWN (type %d)", type); 206 printk(KERN_CONT "UNKNOWN (type %d)", type);
219 break; 207 break;
220 } 208 }
221 printk (" drive\n"); 209 printk(KERN_CONT " drive\n");
222 drive->media = type; 210 drive->media = type;
223 /* an ATAPI device ignores DRDY */ 211 /* an ATAPI device ignores DRDY */
224 drive->ready_stat = 0; 212 drive->ready_stat = 0;
@@ -238,7 +226,9 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
238 drive->removable = 1; 226 drive->removable = 1;
239 227
240 drive->media = ide_disk; 228 drive->media = ide_disk;
241 printk("%s DISK drive\n", (id->config == 0x848a) ? "CFA" : "ATA" ); 229
230 printk(KERN_CONT "%s DISK drive\n",
231 (id->config == 0x848a) ? "CFA" : "ATA");
242 232
243 return; 233 return;
244 234
@@ -267,6 +257,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
267{ 257{
268 ide_hwif_t *hwif = HWIF(drive); 258 ide_hwif_t *hwif = HWIF(drive);
269 struct ide_io_ports *io_ports = &hwif->io_ports; 259 struct ide_io_ports *io_ports = &hwif->io_ports;
260 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
270 int use_altstatus = 0, rc; 261 int use_altstatus = 0, rc;
271 unsigned long timeout; 262 unsigned long timeout;
272 u8 s = 0, a = 0; 263 u8 s = 0, a = 0;
@@ -275,8 +266,8 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
275 msleep(50); 266 msleep(50);
276 267
277 if (io_ports->ctl_addr) { 268 if (io_ports->ctl_addr) {
278 a = ide_read_altstatus(drive); 269 a = tp_ops->read_altstatus(hwif);
279 s = ide_read_status(drive); 270 s = tp_ops->read_status(hwif);
280 if ((a ^ s) & ~INDEX_STAT) 271 if ((a ^ s) & ~INDEX_STAT)
281 /* ancient Seagate drives, broken interfaces */ 272 /* ancient Seagate drives, broken interfaces */
282 printk(KERN_INFO "%s: probing with STATUS(0x%02x) " 273 printk(KERN_INFO "%s: probing with STATUS(0x%02x) "
@@ -290,12 +281,18 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
290 /* set features register for atapi 281 /* set features register for atapi
291 * identify command to be sure of reply 282 * identify command to be sure of reply
292 */ 283 */
293 if ((cmd == WIN_PIDENTIFY)) 284 if (cmd == WIN_PIDENTIFY) {
294 /* disable dma & overlap */ 285 ide_task_t task;
295 hwif->OUTB(0, io_ports->feature_addr); 286
287 memset(&task, 0, sizeof(task));
288 /* disable DMA & overlap */
289 task.tf_flags = IDE_TFLAG_OUT_FEATURE;
290
291 tp_ops->tf_load(drive, &task);
292 }
296 293
297 /* ask drive for ID */ 294 /* ask drive for ID */
298 hwif->OUTBSYNC(hwif, cmd, hwif->io_ports.command_addr); 295 tp_ops->exec_command(hwif, cmd);
299 296
300 timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2; 297 timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
301 timeout += jiffies; 298 timeout += jiffies;
@@ -306,13 +303,13 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
306 } 303 }
307 /* give drive a breather */ 304 /* give drive a breather */
308 msleep(50); 305 msleep(50);
309 s = use_altstatus ? ide_read_altstatus(drive) 306 s = use_altstatus ? tp_ops->read_altstatus(hwif)
310 : ide_read_status(drive); 307 : tp_ops->read_status(hwif);
311 } while (s & BUSY_STAT); 308 } while (s & BUSY_STAT);
312 309
313 /* wait for IRQ and DRQ_STAT */ 310 /* wait for IRQ and DRQ_STAT */
314 msleep(50); 311 msleep(50);
315 s = ide_read_status(drive); 312 s = tp_ops->read_status(hwif);
316 313
317 if (OK_STAT(s, DRQ_STAT, BAD_R_STAT)) { 314 if (OK_STAT(s, DRQ_STAT, BAD_R_STAT)) {
318 unsigned long flags; 315 unsigned long flags;
@@ -324,7 +321,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
324 /* drive responded with ID */ 321 /* drive responded with ID */
325 rc = 0; 322 rc = 0;
326 /* clear drive IRQ */ 323 /* clear drive IRQ */
327 (void)ide_read_status(drive); 324 (void)tp_ops->read_status(hwif);
328 local_irq_restore(flags); 325 local_irq_restore(flags);
329 } else { 326 } else {
330 /* drive refused ID */ 327 /* drive refused ID */
@@ -346,6 +343,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
346static int try_to_identify (ide_drive_t *drive, u8 cmd) 343static int try_to_identify (ide_drive_t *drive, u8 cmd)
347{ 344{
348 ide_hwif_t *hwif = HWIF(drive); 345 ide_hwif_t *hwif = HWIF(drive);
346 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
349 int retval; 347 int retval;
350 int autoprobe = 0; 348 int autoprobe = 0;
351 unsigned long cookie = 0; 349 unsigned long cookie = 0;
@@ -361,7 +359,7 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
361 autoprobe = 1; 359 autoprobe = 1;
362 cookie = probe_irq_on(); 360 cookie = probe_irq_on();
363 } 361 }
364 ide_set_irq(drive, autoprobe); 362 tp_ops->set_irq(hwif, autoprobe);
365 } 363 }
366 364
367 retval = actual_try_to_identify(drive, cmd); 365 retval = actual_try_to_identify(drive, cmd);
@@ -369,9 +367,9 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
369 if (autoprobe) { 367 if (autoprobe) {
370 int irq; 368 int irq;
371 369
372 ide_set_irq(drive, 0); 370 tp_ops->set_irq(hwif, 0);
373 /* clear drive IRQ */ 371 /* clear drive IRQ */
374 (void)ide_read_status(drive); 372 (void)tp_ops->read_status(hwif);
375 udelay(5); 373 udelay(5);
376 irq = probe_irq_off(cookie); 374 irq = probe_irq_off(cookie);
377 if (!hwif->irq) { 375 if (!hwif->irq) {
@@ -381,7 +379,7 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
381 /* Mmmm.. multiple IRQs.. 379 /* Mmmm.. multiple IRQs..
382 * don't know which was ours 380 * don't know which was ours
383 */ 381 */
384 printk("%s: IRQ probe failed (0x%lx)\n", 382 printk(KERN_ERR "%s: IRQ probe failed (0x%lx)\n",
385 drive->name, cookie); 383 drive->name, cookie);
386 } 384 }
387 } 385 }
@@ -396,7 +394,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
396 394
397 do { 395 do {
398 msleep(50); 396 msleep(50);
399 stat = hwif->INB(hwif->io_ports.status_addr); 397 stat = hwif->tp_ops->read_status(hwif);
400 if ((stat & BUSY_STAT) == 0) 398 if ((stat & BUSY_STAT) == 0)
401 return 0; 399 return 0;
402 } while (time_before(jiffies, timeout)); 400 } while (time_before(jiffies, timeout));
@@ -404,6 +402,18 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
404 return 1; 402 return 1;
405} 403}
406 404
405static u8 ide_read_device(ide_drive_t *drive)
406{
407 ide_task_t task;
408
409 memset(&task, 0, sizeof(task));
410 task.tf_flags = IDE_TFLAG_IN_DEVICE;
411
412 drive->hwif->tp_ops->tf_read(drive, &task);
413
414 return task.tf.device;
415}
416
407/** 417/**
408 * do_probe - probe an IDE device 418 * do_probe - probe an IDE device
409 * @drive: drive to probe 419 * @drive: drive to probe
@@ -428,7 +438,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
428static int do_probe (ide_drive_t *drive, u8 cmd) 438static int do_probe (ide_drive_t *drive, u8 cmd)
429{ 439{
430 ide_hwif_t *hwif = HWIF(drive); 440 ide_hwif_t *hwif = HWIF(drive);
431 struct ide_io_ports *io_ports = &hwif->io_ports; 441 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
432 int rc; 442 int rc;
433 u8 stat; 443 u8 stat;
434 444
@@ -438,7 +448,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
438 return 4; 448 return 4;
439 } 449 }
440#ifdef DEBUG 450#ifdef DEBUG
441 printk("probing for %s: present=%d, media=%d, probetype=%s\n", 451 printk(KERN_INFO "probing for %s: present=%d, media=%d, probetype=%s\n",
442 drive->name, drive->present, drive->media, 452 drive->name, drive->present, drive->media,
443 (cmd == WIN_IDENTIFY) ? "ATA" : "ATAPI"); 453 (cmd == WIN_IDENTIFY) ? "ATA" : "ATAPI");
444#endif 454#endif
@@ -449,8 +459,8 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
449 msleep(50); 459 msleep(50);
450 SELECT_DRIVE(drive); 460 SELECT_DRIVE(drive);
451 msleep(50); 461 msleep(50);
452 if (hwif->INB(io_ports->device_addr) != drive->select.all && 462
453 !drive->present) { 463 if (ide_read_device(drive) != drive->select.all && !drive->present) {
454 if (drive->select.b.unit != 0) { 464 if (drive->select.b.unit != 0) {
455 /* exit with drive0 selected */ 465 /* exit with drive0 selected */
456 SELECT_DRIVE(&hwif->drives[0]); 466 SELECT_DRIVE(&hwif->drives[0]);
@@ -461,7 +471,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
461 return 3; 471 return 3;
462 } 472 }
463 473
464 stat = ide_read_status(drive); 474 stat = tp_ops->read_status(hwif);
465 475
466 if (OK_STAT(stat, READY_STAT, BUSY_STAT) || 476 if (OK_STAT(stat, READY_STAT, BUSY_STAT) ||
467 drive->present || cmd == WIN_PIDENTIFY) { 477 drive->present || cmd == WIN_PIDENTIFY) {
@@ -471,7 +481,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
471 rc = try_to_identify(drive,cmd); 481 rc = try_to_identify(drive,cmd);
472 } 482 }
473 483
474 stat = ide_read_status(drive); 484 stat = tp_ops->read_status(hwif);
475 485
476 if (stat == (BUSY_STAT | READY_STAT)) 486 if (stat == (BUSY_STAT | READY_STAT))
477 return 4; 487 return 4;
@@ -482,13 +492,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
482 msleep(50); 492 msleep(50);
483 SELECT_DRIVE(drive); 493 SELECT_DRIVE(drive);
484 msleep(50); 494 msleep(50);
485 hwif->OUTBSYNC(hwif, WIN_SRST, io_ports->command_addr); 495 tp_ops->exec_command(hwif, WIN_SRST);
486 (void)ide_busy_sleep(hwif); 496 (void)ide_busy_sleep(hwif);
487 rc = try_to_identify(drive, cmd); 497 rc = try_to_identify(drive, cmd);
488 } 498 }
489 499
490 /* ensure drive IRQ is clear */ 500 /* ensure drive IRQ is clear */
491 stat = ide_read_status(drive); 501 stat = tp_ops->read_status(hwif);
492 502
493 if (rc == 1) 503 if (rc == 1)
494 printk(KERN_ERR "%s: no response (status = 0x%02x)\n", 504 printk(KERN_ERR "%s: no response (status = 0x%02x)\n",
@@ -502,7 +512,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
502 SELECT_DRIVE(&hwif->drives[0]); 512 SELECT_DRIVE(&hwif->drives[0]);
503 msleep(50); 513 msleep(50);
504 /* ensure drive irq is clear */ 514 /* ensure drive irq is clear */
505 (void)ide_read_status(drive); 515 (void)tp_ops->read_status(hwif);
506 } 516 }
507 return rc; 517 return rc;
508} 518}
@@ -513,12 +523,14 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
513static void enable_nest (ide_drive_t *drive) 523static void enable_nest (ide_drive_t *drive)
514{ 524{
515 ide_hwif_t *hwif = HWIF(drive); 525 ide_hwif_t *hwif = HWIF(drive);
526 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
516 u8 stat; 527 u8 stat;
517 528
518 printk("%s: enabling %s -- ", hwif->name, drive->id->model); 529 printk(KERN_INFO "%s: enabling %s -- ", hwif->name, drive->id->model);
530
519 SELECT_DRIVE(drive); 531 SELECT_DRIVE(drive);
520 msleep(50); 532 msleep(50);
521 hwif->OUTBSYNC(hwif, EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr); 533 tp_ops->exec_command(hwif, EXABYTE_ENABLE_NEST);
522 534
523 if (ide_busy_sleep(hwif)) { 535 if (ide_busy_sleep(hwif)) {
524 printk(KERN_CONT "failed (timeout)\n"); 536 printk(KERN_CONT "failed (timeout)\n");
@@ -527,7 +539,7 @@ static void enable_nest (ide_drive_t *drive)
527 539
528 msleep(50); 540 msleep(50);
529 541
530 stat = ide_read_status(drive); 542 stat = tp_ops->read_status(hwif);
531 543
532 if (!OK_STAT(stat, 0, BAD_STAT)) 544 if (!OK_STAT(stat, 0, BAD_STAT))
533 printk(KERN_CONT "failed (status = 0x%02x)\n", stat); 545 printk(KERN_CONT "failed (status = 0x%02x)\n", stat);
@@ -619,7 +631,7 @@ static inline u8 probe_for_drive (ide_drive_t *drive)
619 return drive->present; 631 return drive->present;
620} 632}
621 633
622static void hwif_release_dev (struct device *dev) 634static void hwif_release_dev(struct device *dev)
623{ 635{
624 ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev); 636 ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev);
625 637
@@ -709,7 +721,7 @@ static int ide_port_wait_ready(ide_hwif_t *hwif)
709 /* Ignore disks that we will not probe for later. */ 721 /* Ignore disks that we will not probe for later. */
710 if (!drive->noprobe || drive->present) { 722 if (!drive->noprobe || drive->present) {
711 SELECT_DRIVE(drive); 723 SELECT_DRIVE(drive);
712 ide_set_irq(drive, 1); 724 hwif->tp_ops->set_irq(hwif, 1);
713 mdelay(2); 725 mdelay(2);
714 rc = ide_wait_not_busy(hwif, 35000); 726 rc = ide_wait_not_busy(hwif, 35000);
715 if (rc) 727 if (rc)
@@ -864,7 +876,7 @@ static void save_match(ide_hwif_t *hwif, ide_hwif_t *new, ide_hwif_t **match)
864 if (m && m->hwgroup && m->hwgroup != new->hwgroup) { 876 if (m && m->hwgroup && m->hwgroup != new->hwgroup) {
865 if (!new->hwgroup) 877 if (!new->hwgroup)
866 return; 878 return;
867 printk("%s: potential irq problem with %s and %s\n", 879 printk(KERN_WARNING "%s: potential IRQ problem with %s and %s\n",
868 hwif->name, new->name, m->name); 880 hwif->name, new->name, m->name);
869 } 881 }
870 if (!m || m->irq != hwif->irq) /* don't undo a prior perfect match */ 882 if (!m || m->irq != hwif->irq) /* don't undo a prior perfect match */
@@ -971,6 +983,45 @@ static void ide_port_setup_devices(ide_hwif_t *hwif)
971 mutex_unlock(&ide_cfg_mtx); 983 mutex_unlock(&ide_cfg_mtx);
972} 984}
973 985
986static ide_hwif_t *ide_ports[MAX_HWIFS];
987
988void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
989{
990 ide_hwgroup_t *hwgroup = hwif->hwgroup;
991
992 ide_ports[hwif->index] = NULL;
993
994 spin_lock_irq(&ide_lock);
995 /*
996 * Remove us from the hwgroup, and free
997 * the hwgroup if we were the only member
998 */
999 if (hwif->next == hwif) {
1000 BUG_ON(hwgroup->hwif != hwif);
1001 kfree(hwgroup);
1002 } else {
1003 /* There is another interface in hwgroup.
1004 * Unlink us, and set hwgroup->drive and ->hwif to
1005 * something sane.
1006 */
1007 ide_hwif_t *g = hwgroup->hwif;
1008
1009 while (g->next != hwif)
1010 g = g->next;
1011 g->next = hwif->next;
1012 if (hwgroup->hwif == hwif) {
1013 /* Chose a random hwif for hwgroup->hwif.
1014 * It's guaranteed that there are no drives
1015 * left in the hwgroup.
1016 */
1017 BUG_ON(hwgroup->drive != NULL);
1018 hwgroup->hwif = g;
1019 }
1020 BUG_ON(hwgroup->hwif == hwif);
1021 }
1022 spin_unlock_irq(&ide_lock);
1023}
1024
974/* 1025/*
975 * This routine sets up the irq for an ide interface, and creates a new 1026 * This routine sets up the irq for an ide interface, and creates a new
976 * hwgroup for the irq/hwif if none was previously assigned. 1027 * hwgroup for the irq/hwif if none was previously assigned.
@@ -998,8 +1049,9 @@ static int init_irq (ide_hwif_t *hwif)
998 * Group up with any other hwifs that share our irq(s). 1049 * Group up with any other hwifs that share our irq(s).
999 */ 1050 */
1000 for (index = 0; index < MAX_HWIFS; index++) { 1051 for (index = 0; index < MAX_HWIFS; index++) {
1001 ide_hwif_t *h = &ide_hwifs[index]; 1052 ide_hwif_t *h = ide_ports[index];
1002 if (h->hwgroup) { /* scan only initialized hwif's */ 1053
1054 if (h && h->hwgroup) { /* scan only initialized ports */
1003 if (hwif->irq == h->irq) { 1055 if (hwif->irq == h->irq) {
1004 hwif->sharing_irq = h->sharing_irq = 1; 1056 hwif->sharing_irq = h->sharing_irq = 1;
1005 if (hwif->chipset != ide_pci || 1057 if (hwif->chipset != ide_pci ||
@@ -1053,6 +1105,8 @@ static int init_irq (ide_hwif_t *hwif)
1053 hwgroup->timer.data = (unsigned long) hwgroup; 1105 hwgroup->timer.data = (unsigned long) hwgroup;
1054 } 1106 }
1055 1107
1108 ide_ports[hwif->index] = hwif;
1109
1056 /* 1110 /*
1057 * Allocate the irq, if not already obtained for another hwif 1111 * Allocate the irq, if not already obtained for another hwif
1058 */ 1112 */
@@ -1066,8 +1120,7 @@ static int init_irq (ide_hwif_t *hwif)
1066 sa = IRQF_SHARED; 1120 sa = IRQF_SHARED;
1067 1121
1068 if (io_ports->ctl_addr) 1122 if (io_ports->ctl_addr)
1069 /* clear nIEN */ 1123 hwif->tp_ops->set_irq(hwif, 1);
1070 hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS, io_ports->ctl_addr);
1071 1124
1072 if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup)) 1125 if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup))
1073 goto out_unlink; 1126 goto out_unlink;
@@ -1082,17 +1135,17 @@ static int init_irq (ide_hwif_t *hwif)
1082 } 1135 }
1083 1136
1084#if !defined(__mc68000__) 1137#if !defined(__mc68000__)
1085 printk("%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name, 1138 printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
1086 io_ports->data_addr, io_ports->status_addr, 1139 io_ports->data_addr, io_ports->status_addr,
1087 io_ports->ctl_addr, hwif->irq); 1140 io_ports->ctl_addr, hwif->irq);
1088#else 1141#else
1089 printk("%s at 0x%08lx on irq %d", hwif->name, 1142 printk(KERN_INFO "%s at 0x%08lx on irq %d", hwif->name,
1090 io_ports->data_addr, hwif->irq); 1143 io_ports->data_addr, hwif->irq);
1091#endif /* __mc68000__ */ 1144#endif /* __mc68000__ */
1092 if (match) 1145 if (match)
1093 printk(" (%sed with %s)", 1146 printk(KERN_CONT " (%sed with %s)",
1094 hwif->sharing_irq ? "shar" : "serializ", match->name); 1147 hwif->sharing_irq ? "shar" : "serializ", match->name);
1095 printk("\n"); 1148 printk(KERN_CONT "\n");
1096 1149
1097 mutex_unlock(&ide_cfg_mtx); 1150 mutex_unlock(&ide_cfg_mtx);
1098 return 0; 1151 return 0;
@@ -1227,7 +1280,7 @@ static int hwif_init(ide_hwif_t *hwif)
1227 if (!hwif->irq) { 1280 if (!hwif->irq) {
1228 hwif->irq = __ide_default_irq(hwif->io_ports.data_addr); 1281 hwif->irq = __ide_default_irq(hwif->io_ports.data_addr);
1229 if (!hwif->irq) { 1282 if (!hwif->irq) {
1230 printk("%s: DISABLED, NO IRQ\n", hwif->name); 1283 printk(KERN_ERR "%s: disabled, no IRQ\n", hwif->name);
1231 return 0; 1284 return 0;
1232 } 1285 }
1233 } 1286 }
@@ -1257,16 +1310,16 @@ static int hwif_init(ide_hwif_t *hwif)
1257 */ 1310 */
1258 hwif->irq = __ide_default_irq(hwif->io_ports.data_addr); 1311 hwif->irq = __ide_default_irq(hwif->io_ports.data_addr);
1259 if (!hwif->irq) { 1312 if (!hwif->irq) {
1260 printk("%s: Disabled unable to get IRQ %d.\n", 1313 printk(KERN_ERR "%s: disabled, unable to get IRQ %d\n",
1261 hwif->name, old_irq); 1314 hwif->name, old_irq);
1262 goto out; 1315 goto out;
1263 } 1316 }
1264 if (init_irq(hwif)) { 1317 if (init_irq(hwif)) {
1265 printk("%s: probed IRQ %d and default IRQ %d failed.\n", 1318 printk(KERN_ERR "%s: probed IRQ %d and default IRQ %d failed\n",
1266 hwif->name, old_irq, hwif->irq); 1319 hwif->name, old_irq, hwif->irq);
1267 goto out; 1320 goto out;
1268 } 1321 }
1269 printk("%s: probed IRQ %d failed, using default.\n", 1322 printk(KERN_WARNING "%s: probed IRQ %d failed, using default\n",
1270 hwif->name, hwif->irq); 1323 hwif->name, hwif->irq);
1271 1324
1272done: 1325done:
@@ -1345,6 +1398,9 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1345 hwif->host_flags |= d->host_flags; 1398 hwif->host_flags |= d->host_flags;
1346 hwif->pio_mask = d->pio_mask; 1399 hwif->pio_mask = d->pio_mask;
1347 1400
1401 if (d->tp_ops)
1402 hwif->tp_ops = d->tp_ops;
1403
1348 /* ->set_pio_mode for DTC2278 is currently limited to port 0 */ 1404 /* ->set_pio_mode for DTC2278 is currently limited to port 0 */
1349 if (hwif->chipset != ide_dtc2278 || hwif->channel == 0) 1405 if (hwif->chipset != ide_dtc2278 || hwif->channel == 0)
1350 hwif->port_ops = d->port_ops; 1406 hwif->port_ops = d->port_ops;
@@ -1363,6 +1419,7 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1363 1419
1364 if (rc < 0) { 1420 if (rc < 0) {
1365 printk(KERN_INFO "%s: DMA disabled\n", hwif->name); 1421 printk(KERN_INFO "%s: DMA disabled\n", hwif->name);
1422 hwif->dma_base = 0;
1366 hwif->swdma_mask = 0; 1423 hwif->swdma_mask = 0;
1367 hwif->mwdma_mask = 0; 1424 hwif->mwdma_mask = 0;
1368 hwif->ultra_mask = 0; 1425 hwif->ultra_mask = 0;
@@ -1446,18 +1503,20 @@ static int ide_sysfs_register_port(ide_hwif_t *hwif)
1446 return rc; 1503 return rc;
1447} 1504}
1448 1505
1506static unsigned int ide_indexes;
1507
1449/** 1508/**
1450 * ide_find_port_slot - find free ide_hwifs[] slot 1509 * ide_find_port_slot - find free port slot
1451 * @d: IDE port info 1510 * @d: IDE port info
1452 * 1511 *
1453 * Return the new hwif. If we are out of free slots return NULL. 1512 * Return the new port slot index or -ENOENT if we are out of free slots.
1454 */ 1513 */
1455 1514
1456ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d) 1515static int ide_find_port_slot(const struct ide_port_info *d)
1457{ 1516{
1458 ide_hwif_t *hwif; 1517 int idx = -ENOENT;
1459 int i;
1460 u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1; 1518 u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1;
1519 u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;;
1461 1520
1462 /* 1521 /*
1463 * Claim an unassigned slot. 1522 * Claim an unassigned slot.
@@ -1469,51 +1528,114 @@ ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d)
1469 * Unless there is a bootable card that does not use the standard 1528 * Unless there is a bootable card that does not use the standard
1470 * ports 0x1f0/0x170 (the ide0/ide1 defaults). 1529 * ports 0x1f0/0x170 (the ide0/ide1 defaults).
1471 */ 1530 */
1472 if (bootable) { 1531 mutex_lock(&ide_cfg_mtx);
1473 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0; 1532 if (MAX_HWIFS == 1) {
1474 1533 if (ide_indexes == 0 && i == 0)
1475 for (; i < MAX_HWIFS; i++) { 1534 idx = 1;
1476 hwif = &ide_hwifs[i];
1477 if (hwif->chipset == ide_unknown)
1478 goto out_found;
1479 }
1480 } else { 1535 } else {
1481 for (i = 2; i < MAX_HWIFS; i++) { 1536 if (bootable) {
1482 hwif = &ide_hwifs[i]; 1537 if ((ide_indexes | i) != (1 << MAX_HWIFS) - 1)
1483 if (hwif->chipset == ide_unknown) 1538 idx = ffz(ide_indexes | i);
1484 goto out_found; 1539 } else {
1540 if ((ide_indexes | 3) != (1 << MAX_HWIFS) - 1)
1541 idx = ffz(ide_indexes | 3);
1542 else if ((ide_indexes & 3) != 3)
1543 idx = ffz(ide_indexes);
1485 } 1544 }
1486 for (i = 0; i < 2 && i < MAX_HWIFS; i++) { 1545 }
1487 hwif = &ide_hwifs[i]; 1546 if (idx >= 0)
1488 if (hwif->chipset == ide_unknown) 1547 ide_indexes |= (1 << idx);
1489 goto out_found; 1548 mutex_unlock(&ide_cfg_mtx);
1549
1550 return idx;
1551}
1552
1553static void ide_free_port_slot(int idx)
1554{
1555 mutex_lock(&ide_cfg_mtx);
1556 ide_indexes &= ~(1 << idx);
1557 mutex_unlock(&ide_cfg_mtx);
1558}
1559
1560struct ide_host *ide_host_alloc_all(const struct ide_port_info *d,
1561 hw_regs_t **hws)
1562{
1563 struct ide_host *host;
1564 int i;
1565
1566 host = kzalloc(sizeof(*host), GFP_KERNEL);
1567 if (host == NULL)
1568 return NULL;
1569
1570 for (i = 0; i < MAX_HWIFS; i++) {
1571 ide_hwif_t *hwif;
1572 int idx;
1573
1574 if (hws[i] == NULL)
1575 continue;
1576
1577 hwif = kzalloc(sizeof(*hwif), GFP_KERNEL);
1578 if (hwif == NULL)
1579 continue;
1580
1581 idx = ide_find_port_slot(d);
1582 if (idx < 0) {
1583 printk(KERN_ERR "%s: no free slot for interface\n",
1584 d ? d->name : "ide");
1585 kfree(hwif);
1586 continue;
1490 } 1587 }
1588
1589 ide_init_port_data(hwif, idx);
1590
1591 hwif->host = host;
1592
1593 host->ports[i] = hwif;
1594 host->n_ports++;
1491 } 1595 }
1492 1596
1493 printk(KERN_ERR "%s: no free slot for interface\n", 1597 if (host->n_ports == 0) {
1494 d ? d->name : "ide"); 1598 kfree(host);
1599 return NULL;
1600 }
1495 1601
1496 return NULL; 1602 if (hws[0])
1603 host->dev[0] = hws[0]->dev;
1604
1605 if (d)
1606 host->host_flags = d->host_flags;
1497 1607
1498out_found: 1608 return host;
1499 ide_init_port_data(hwif, i);
1500 return hwif;
1501} 1609}
1502EXPORT_SYMBOL_GPL(ide_find_port_slot); 1610EXPORT_SYMBOL_GPL(ide_host_alloc_all);
1503 1611
1504int ide_device_add_all(u8 *idx, const struct ide_port_info *d) 1612struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws)
1613{
1614 hw_regs_t *hws_all[MAX_HWIFS];
1615 int i;
1616
1617 for (i = 0; i < MAX_HWIFS; i++)
1618 hws_all[i] = (i < 4) ? hws[i] : NULL;
1619
1620 return ide_host_alloc_all(d, hws_all);
1621}
1622EXPORT_SYMBOL_GPL(ide_host_alloc);
1623
1624int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
1625 hw_regs_t **hws)
1505{ 1626{
1506 ide_hwif_t *hwif, *mate = NULL; 1627 ide_hwif_t *hwif, *mate = NULL;
1507 int i, rc = 0; 1628 int i, j = 0;
1508 1629
1509 for (i = 0; i < MAX_HWIFS; i++) { 1630 for (i = 0; i < MAX_HWIFS; i++) {
1510 if (idx[i] == 0xff) { 1631 hwif = host->ports[i];
1632
1633 if (hwif == NULL) {
1511 mate = NULL; 1634 mate = NULL;
1512 continue; 1635 continue;
1513 } 1636 }
1514 1637
1515 hwif = &ide_hwifs[idx[i]]; 1638 ide_init_port_hw(hwif, hws[i]);
1516
1517 ide_port_apply_params(hwif); 1639 ide_port_apply_params(hwif);
1518 1640
1519 if (d == NULL) { 1641 if (d == NULL) {
@@ -1534,10 +1656,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1534 } 1656 }
1535 1657
1536 for (i = 0; i < MAX_HWIFS; i++) { 1658 for (i = 0; i < MAX_HWIFS; i++) {
1537 if (idx[i] == 0xff) 1659 hwif = host->ports[i];
1538 continue;
1539 1660
1540 hwif = &ide_hwifs[idx[i]]; 1661 if (hwif == NULL)
1662 continue;
1541 1663
1542 if (ide_probe_port(hwif) == 0) 1664 if (ide_probe_port(hwif) == 0)
1543 hwif->present = 1; 1665 hwif->present = 1;
@@ -1551,19 +1673,20 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1551 } 1673 }
1552 1674
1553 for (i = 0; i < MAX_HWIFS; i++) { 1675 for (i = 0; i < MAX_HWIFS; i++) {
1554 if (idx[i] == 0xff) 1676 hwif = host->ports[i];
1555 continue;
1556 1677
1557 hwif = &ide_hwifs[idx[i]]; 1678 if (hwif == NULL)
1679 continue;
1558 1680
1559 if (hwif_init(hwif) == 0) { 1681 if (hwif_init(hwif) == 0) {
1560 printk(KERN_INFO "%s: failed to initialize IDE " 1682 printk(KERN_INFO "%s: failed to initialize IDE "
1561 "interface\n", hwif->name); 1683 "interface\n", hwif->name);
1562 hwif->present = 0; 1684 hwif->present = 0;
1563 rc = -1;
1564 continue; 1685 continue;
1565 } 1686 }
1566 1687
1688 j++;
1689
1567 if (hwif->present) 1690 if (hwif->present)
1568 ide_port_setup_devices(hwif); 1691 ide_port_setup_devices(hwif);
1569 1692
@@ -1574,10 +1697,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1574 } 1697 }
1575 1698
1576 for (i = 0; i < MAX_HWIFS; i++) { 1699 for (i = 0; i < MAX_HWIFS; i++) {
1577 if (idx[i] == 0xff) 1700 hwif = host->ports[i];
1578 continue;
1579 1701
1580 hwif = &ide_hwifs[idx[i]]; 1702 if (hwif == NULL)
1703 continue;
1581 1704
1582 if (hwif->chipset == ide_unknown) 1705 if (hwif->chipset == ide_unknown)
1583 hwif->chipset = ide_generic; 1706 hwif->chipset = ide_generic;
@@ -1587,10 +1710,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1587 } 1710 }
1588 1711
1589 for (i = 0; i < MAX_HWIFS; i++) { 1712 for (i = 0; i < MAX_HWIFS; i++) {
1590 if (idx[i] == 0xff) 1713 hwif = host->ports[i];
1591 continue;
1592 1714
1593 hwif = &ide_hwifs[idx[i]]; 1715 if (hwif == NULL)
1716 continue;
1594 1717
1595 ide_sysfs_register_port(hwif); 1718 ide_sysfs_register_port(hwif);
1596 ide_proc_register_port(hwif); 1719 ide_proc_register_port(hwif);
@@ -1599,21 +1722,64 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1599 ide_proc_port_register_devices(hwif); 1722 ide_proc_port_register_devices(hwif);
1600 } 1723 }
1601 1724
1602 return rc; 1725 return j ? 0 : -1;
1603} 1726}
1604EXPORT_SYMBOL_GPL(ide_device_add_all); 1727EXPORT_SYMBOL_GPL(ide_host_register);
1605 1728
1606int ide_device_add(u8 idx[4], const struct ide_port_info *d) 1729int ide_host_add(const struct ide_port_info *d, hw_regs_t **hws,
1730 struct ide_host **hostp)
1607{ 1731{
1608 u8 idx_all[MAX_HWIFS]; 1732 struct ide_host *host;
1733 int rc;
1734
1735 host = ide_host_alloc(d, hws);
1736 if (host == NULL)
1737 return -ENOMEM;
1738
1739 rc = ide_host_register(host, d, hws);
1740 if (rc) {
1741 ide_host_free(host);
1742 return rc;
1743 }
1744
1745 if (hostp)
1746 *hostp = host;
1747
1748 return 0;
1749}
1750EXPORT_SYMBOL_GPL(ide_host_add);
1751
1752void ide_host_free(struct ide_host *host)
1753{
1754 ide_hwif_t *hwif;
1609 int i; 1755 int i;
1610 1756
1611 for (i = 0; i < MAX_HWIFS; i++) 1757 for (i = 0; i < MAX_HWIFS; i++) {
1612 idx_all[i] = (i < 4) ? idx[i] : 0xff; 1758 hwif = host->ports[i];
1759
1760 if (hwif == NULL)
1761 continue;
1762
1763 ide_free_port_slot(hwif->index);
1764 kfree(hwif);
1765 }
1613 1766
1614 return ide_device_add_all(idx_all, d); 1767 kfree(host);
1615} 1768}
1616EXPORT_SYMBOL_GPL(ide_device_add); 1769EXPORT_SYMBOL_GPL(ide_host_free);
1770
1771void ide_host_remove(struct ide_host *host)
1772{
1773 int i;
1774
1775 for (i = 0; i < MAX_HWIFS; i++) {
1776 if (host->ports[i])
1777 ide_unregister(host->ports[i]);
1778 }
1779
1780 ide_host_free(host);
1781}
1782EXPORT_SYMBOL_GPL(ide_host_remove);
1617 1783
1618void ide_port_scan(ide_hwif_t *hwif) 1784void ide_port_scan(ide_hwif_t *hwif)
1619{ 1785{
@@ -1634,11 +1800,10 @@ void ide_port_scan(ide_hwif_t *hwif)
1634} 1800}
1635EXPORT_SYMBOL_GPL(ide_port_scan); 1801EXPORT_SYMBOL_GPL(ide_port_scan);
1636 1802
1637static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no, 1803static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
1638 const struct ide_port_info *d, 1804 u8 port_no, const struct ide_port_info *d,
1639 unsigned long config) 1805 unsigned long config)
1640{ 1806{
1641 ide_hwif_t *hwif;
1642 unsigned long base, ctl; 1807 unsigned long base, ctl;
1643 int irq; 1808 int irq;
1644 1809
@@ -1668,33 +1833,25 @@ static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no,
1668 ide_std_init_ports(hw, base, ctl); 1833 ide_std_init_ports(hw, base, ctl);
1669 hw->irq = irq; 1834 hw->irq = irq;
1670 hw->chipset = d->chipset; 1835 hw->chipset = d->chipset;
1836 hw->config = config;
1671 1837
1672 hwif = ide_find_port_slot(d); 1838 hws[port_no] = hw;
1673 if (hwif) {
1674 ide_init_port_hw(hwif, hw);
1675 if (config)
1676 hwif->config_data = config;
1677 idx[port_no] = hwif->index;
1678 }
1679} 1839}
1680 1840
1681int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config) 1841int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
1682{ 1842{
1683 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 1843 hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
1684 hw_regs_t hw[2];
1685 1844
1686 memset(&hw, 0, sizeof(hw)); 1845 memset(&hw, 0, sizeof(hw));
1687 1846
1688 if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0) 1847 if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
1689 ide_legacy_init_one(idx, &hw[0], 0, d, config); 1848 ide_legacy_init_one(hws, &hw[0], 0, d, config);
1690 ide_legacy_init_one(idx, &hw[1], 1, d, config); 1849 ide_legacy_init_one(hws, &hw[1], 1, d, config);
1691 1850
1692 if (idx[0] == 0xff && idx[1] == 0xff && 1851 if (hws[0] == NULL && hws[1] == NULL &&
1693 (d->host_flags & IDE_HFLAG_SINGLE)) 1852 (d->host_flags & IDE_HFLAG_SINGLE))
1694 return -ENOENT; 1853 return -ENOENT;
1695 1854
1696 ide_device_add(idx, d); 1855 return ide_host_add(d, hws, NULL);
1697
1698 return 0;
1699} 1856}
1700EXPORT_SYMBOL_GPL(ide_legacy_device_add); 1857EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 8af88bf0969b..f66c9c3f6fc6 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -105,7 +105,7 @@ static int proc_ide_read_identify
105 len = sprintf(page, "\n"); 105 len = sprintf(page, "\n");
106 106
107 if (drive) { 107 if (drive) {
108 unsigned short *val = (unsigned short *) page; 108 __le16 *val = (__le16 *)page;
109 109
110 err = taskfile_lib_get_identify(drive, page); 110 err = taskfile_lib_get_identify(drive, page);
111 if (!err) { 111 if (!err) {
@@ -113,7 +113,7 @@ static int proc_ide_read_identify
113 page = out; 113 page = out;
114 do { 114 do {
115 out += sprintf(out, "%04x%c", 115 out += sprintf(out, "%04x%c",
116 le16_to_cpu(*val), (++i & 7) ? ' ' : '\n'); 116 le16_to_cpup(val), (++i & 7) ? ' ' : '\n');
117 val += 1; 117 val += 1;
118 } while (i < (SECTOR_WORDS * 2)); 118 } while (i < (SECTOR_WORDS * 2));
119 len = out - page; 119 len = out - page;
@@ -345,7 +345,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg)
345 ide_task_t task; 345 ide_task_t task;
346 int err; 346 int err;
347 347
348 if (arg < 0 || arg > 70) 348 if (arg < XFER_PIO_0 || arg > XFER_UDMA_6)
349 return -EINVAL; 349 return -EINVAL;
350 350
351 memset(&task, 0, sizeof(task)); 351 memset(&task, 0, sizeof(task));
@@ -357,7 +357,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg)
357 357
358 err = ide_no_data_taskfile(drive, &task); 358 err = ide_no_data_taskfile(drive, &task);
359 359
360 if (!err && arg) { 360 if (!err) {
361 ide_set_xfer_rate(drive, (u8) arg); 361 ide_set_xfer_rate(drive, (u8) arg);
362 ide_driveid_update(drive); 362 ide_driveid_update(drive);
363 } 363 }
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 353dd11b9283..82c2afe4d28a 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -195,23 +195,6 @@ enum {
195#define IDETAPE_BLOCK_DESCRIPTOR 0 195#define IDETAPE_BLOCK_DESCRIPTOR 0
196#define IDETAPE_CAPABILITIES_PAGE 0x2a 196#define IDETAPE_CAPABILITIES_PAGE 0x2a
197 197
198/* Tape flag bits values. */
199enum {
200 IDETAPE_FLAG_IGNORE_DSC = (1 << 0),
201 /* 0 When the tape position is unknown */
202 IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
203 /* Device already opened */
204 IDETAPE_FLAG_BUSY = (1 << 2),
205 /* Attempt to auto-detect the current user block size */
206 IDETAPE_FLAG_DETECT_BS = (1 << 3),
207 /* Currently on a filemark */
208 IDETAPE_FLAG_FILEMARK = (1 << 4),
209 /* DRQ interrupt device */
210 IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 5),
211 /* 0 = no tape is loaded, so we don't rewind after ejecting */
212 IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 6),
213};
214
215/* 198/*
216 * Most of our global data which we need to save even as we leave the driver due 199 * Most of our global data which we need to save even as we leave the driver due
217 * to an interrupt or a timer event is stored in the struct defined below. 200 * to an interrupt or a timer event is stored in the struct defined below.
@@ -312,8 +295,6 @@ typedef struct ide_tape_obj {
312 /* Wasted space in each stage */ 295 /* Wasted space in each stage */
313 int excess_bh_size; 296 int excess_bh_size;
314 297
315 /* Status/Action flags: long for set_bit */
316 unsigned long flags;
317 /* protects the ide-tape queue */ 298 /* protects the ide-tape queue */
318 spinlock_t lock; 299 spinlock_t lock;
319 300
@@ -341,23 +322,29 @@ static struct class *idetape_sysfs_class;
341#define ide_tape_g(disk) \ 322#define ide_tape_g(disk) \
342 container_of((disk)->private_data, struct ide_tape_obj, driver) 323 container_of((disk)->private_data, struct ide_tape_obj, driver)
343 324
325static void ide_tape_release(struct kref *);
326
344static struct ide_tape_obj *ide_tape_get(struct gendisk *disk) 327static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
345{ 328{
346 struct ide_tape_obj *tape = NULL; 329 struct ide_tape_obj *tape = NULL;
347 330
348 mutex_lock(&idetape_ref_mutex); 331 mutex_lock(&idetape_ref_mutex);
349 tape = ide_tape_g(disk); 332 tape = ide_tape_g(disk);
350 if (tape) 333 if (tape) {
351 kref_get(&tape->kref); 334 kref_get(&tape->kref);
335 if (ide_device_get(tape->drive)) {
336 kref_put(&tape->kref, ide_tape_release);
337 tape = NULL;
338 }
339 }
352 mutex_unlock(&idetape_ref_mutex); 340 mutex_unlock(&idetape_ref_mutex);
353 return tape; 341 return tape;
354} 342}
355 343
356static void ide_tape_release(struct kref *);
357
358static void ide_tape_put(struct ide_tape_obj *tape) 344static void ide_tape_put(struct ide_tape_obj *tape)
359{ 345{
360 mutex_lock(&idetape_ref_mutex); 346 mutex_lock(&idetape_ref_mutex);
347 ide_device_put(tape->drive);
361 kref_put(&tape->kref, ide_tape_release); 348 kref_put(&tape->kref, ide_tape_release);
362 mutex_unlock(&idetape_ref_mutex); 349 mutex_unlock(&idetape_ref_mutex);
363} 350}
@@ -398,7 +385,7 @@ static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
398 count = min( 385 count = min(
399 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)), 386 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
400 bcount); 387 bcount);
401 drive->hwif->input_data(drive, NULL, bh->b_data + 388 drive->hwif->tp_ops->input_data(drive, NULL, bh->b_data +
402 atomic_read(&bh->b_count), count); 389 atomic_read(&bh->b_count), count);
403 bcount -= count; 390 bcount -= count;
404 atomic_add(count, &bh->b_count); 391 atomic_add(count, &bh->b_count);
@@ -424,7 +411,7 @@ static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
424 return; 411 return;
425 } 412 }
426 count = min((unsigned int)pc->b_count, (unsigned int)bcount); 413 count = min((unsigned int)pc->b_count, (unsigned int)bcount);
427 drive->hwif->output_data(drive, NULL, pc->b_data, count); 414 drive->hwif->tp_ops->output_data(drive, NULL, pc->b_data, count);
428 bcount -= count; 415 bcount -= count;
429 pc->b_data += count; 416 pc->b_data += count;
430 pc->b_count -= count; 417 pc->b_count -= count;
@@ -585,7 +572,6 @@ static void ide_tape_kfree_buffer(idetape_tape_t *tape)
585 bh = bh->b_reqnext; 572 bh = bh->b_reqnext;
586 kfree(prev_bh); 573 kfree(prev_bh);
587 } 574 }
588 kfree(tape->merge_bh);
589} 575}
590 576
591static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects) 577static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
@@ -665,15 +651,15 @@ static void ide_tape_callback(ide_drive_t *drive)
665 if (readpos[0] & 0x4) { 651 if (readpos[0] & 0x4) {
666 printk(KERN_INFO "ide-tape: Block location is unknown" 652 printk(KERN_INFO "ide-tape: Block location is unknown"
667 "to the tape\n"); 653 "to the tape\n");
668 clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags); 654 clear_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags);
669 uptodate = 0; 655 uptodate = 0;
670 } else { 656 } else {
671 debug_log(DBG_SENSE, "Block Location - %u\n", 657 debug_log(DBG_SENSE, "Block Location - %u\n",
672 be32_to_cpu(*(u32 *)&readpos[4])); 658 be32_to_cpup((__be32 *)&readpos[4]));
673 659
674 tape->partition = readpos[1]; 660 tape->partition = readpos[1];
675 tape->first_frame = be32_to_cpu(*(u32 *)&readpos[4]); 661 tape->first_frame = be32_to_cpup((__be32 *)&readpos[4]);
676 set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags); 662 set_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags);
677 } 663 }
678 } 664 }
679 665
@@ -690,7 +676,6 @@ static void idetape_init_pc(struct ide_atapi_pc *pc)
690 pc->buf_size = IDETAPE_PC_BUFFER_SIZE; 676 pc->buf_size = IDETAPE_PC_BUFFER_SIZE;
691 pc->bh = NULL; 677 pc->bh = NULL;
692 pc->b_data = NULL; 678 pc->b_data = NULL;
693 pc->callback = ide_tape_callback;
694} 679}
695 680
696static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc) 681static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
@@ -705,7 +690,7 @@ static void idetape_init_rq(struct request *rq, u8 cmd)
705{ 690{
706 blk_rq_init(NULL, rq); 691 blk_rq_init(NULL, rq);
707 rq->cmd_type = REQ_TYPE_SPECIAL; 692 rq->cmd_type = REQ_TYPE_SPECIAL;
708 rq->cmd[0] = cmd; 693 rq->cmd[13] = cmd;
709} 694}
710 695
711/* 696/*
@@ -732,6 +717,7 @@ static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
732 rq->cmd_flags |= REQ_PREEMPT; 717 rq->cmd_flags |= REQ_PREEMPT;
733 rq->buffer = (char *) pc; 718 rq->buffer = (char *) pc;
734 rq->rq_disk = tape->disk; 719 rq->rq_disk = tape->disk;
720 memcpy(rq->cmd, pc->c, 12);
735 ide_do_drive_cmd(drive, rq); 721 ide_do_drive_cmd(drive, rq);
736} 722}
737 723
@@ -742,7 +728,6 @@ static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
742 */ 728 */
743static void idetape_retry_pc(ide_drive_t *drive) 729static void idetape_retry_pc(ide_drive_t *drive)
744{ 730{
745 idetape_tape_t *tape = drive->driver_data;
746 struct ide_atapi_pc *pc; 731 struct ide_atapi_pc *pc;
747 struct request *rq; 732 struct request *rq;
748 733
@@ -750,7 +735,7 @@ static void idetape_retry_pc(ide_drive_t *drive)
750 pc = idetape_next_pc_storage(drive); 735 pc = idetape_next_pc_storage(drive);
751 rq = idetape_next_rq_storage(drive); 736 rq = idetape_next_rq_storage(drive);
752 idetape_create_request_sense_cmd(pc); 737 idetape_create_request_sense_cmd(pc);
753 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags); 738 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
754 idetape_queue_pc_head(drive, pc, rq); 739 idetape_queue_pc_head(drive, pc, rq);
755} 740}
756 741
@@ -887,7 +872,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
887 pc->error = IDETAPE_ERROR_GENERAL; 872 pc->error = IDETAPE_ERROR_GENERAL;
888 } 873 }
889 tape->failed_pc = NULL; 874 tape->failed_pc = NULL;
890 pc->callback(drive); 875 drive->pc_callback(drive);
891 return ide_stopped; 876 return ide_stopped;
892 } 877 }
893 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]); 878 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
@@ -927,11 +912,12 @@ static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
927 912
928static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive) 913static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
929{ 914{
915 ide_hwif_t *hwif = drive->hwif;
930 idetape_tape_t *tape = drive->driver_data; 916 idetape_tape_t *tape = drive->driver_data;
931 struct ide_atapi_pc *pc = tape->pc; 917 struct ide_atapi_pc *pc = tape->pc;
932 u8 stat; 918 u8 stat;
933 919
934 stat = ide_read_status(drive); 920 stat = hwif->tp_ops->read_status(hwif);
935 921
936 if (stat & SEEK_STAT) { 922 if (stat & SEEK_STAT) {
937 if (stat & ERR_STAT) { 923 if (stat & ERR_STAT) {
@@ -948,14 +934,17 @@ static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
948 pc->error = IDETAPE_ERROR_GENERAL; 934 pc->error = IDETAPE_ERROR_GENERAL;
949 tape->failed_pc = NULL; 935 tape->failed_pc = NULL;
950 } 936 }
951 pc->callback(drive); 937 drive->pc_callback(drive);
952 return ide_stopped; 938 return ide_stopped;
953} 939}
954 940
955static void ide_tape_create_rw_cmd(idetape_tape_t *tape, 941static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
956 struct ide_atapi_pc *pc, unsigned int length, 942 struct ide_atapi_pc *pc, struct request *rq,
957 struct idetape_bh *bh, u8 opcode) 943 u8 opcode)
958{ 944{
945 struct idetape_bh *bh = (struct idetape_bh *)rq->special;
946 unsigned int length = rq->current_nr_sectors;
947
959 idetape_init_pc(pc); 948 idetape_init_pc(pc);
960 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]); 949 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
961 pc->c[1] = 1; 950 pc->c[1] = 1;
@@ -975,11 +964,14 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
975 pc->b_data = bh->b_data; 964 pc->b_data = bh->b_data;
976 pc->b_count = atomic_read(&bh->b_count); 965 pc->b_count = atomic_read(&bh->b_count);
977 } 966 }
967
968 memcpy(rq->cmd, pc->c, 12);
978} 969}
979 970
980static ide_startstop_t idetape_do_request(ide_drive_t *drive, 971static ide_startstop_t idetape_do_request(ide_drive_t *drive,
981 struct request *rq, sector_t block) 972 struct request *rq, sector_t block)
982{ 973{
974 ide_hwif_t *hwif = drive->hwif;
983 idetape_tape_t *tape = drive->driver_data; 975 idetape_tape_t *tape = drive->driver_data;
984 struct ide_atapi_pc *pc = NULL; 976 struct ide_atapi_pc *pc = NULL;
985 struct request *postponed_rq = tape->postponed_rq; 977 struct request *postponed_rq = tape->postponed_rq;
@@ -1017,17 +1009,17 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1017 * If the tape is still busy, postpone our request and service 1009 * If the tape is still busy, postpone our request and service
1018 * the other device meanwhile. 1010 * the other device meanwhile.
1019 */ 1011 */
1020 stat = ide_read_status(drive); 1012 stat = hwif->tp_ops->read_status(hwif);
1021 1013
1022 if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2)) 1014 if (!drive->dsc_overlap && !(rq->cmd[13] & REQ_IDETAPE_PC2))
1023 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags); 1015 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
1024 1016
1025 if (drive->post_reset == 1) { 1017 if (drive->post_reset == 1) {
1026 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags); 1018 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
1027 drive->post_reset = 0; 1019 drive->post_reset = 0;
1028 } 1020 }
1029 1021
1030 if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) && 1022 if (!test_and_clear_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags) &&
1031 (stat & SEEK_STAT) == 0) { 1023 (stat & SEEK_STAT) == 0) {
1032 if (postponed_rq == NULL) { 1024 if (postponed_rq == NULL) {
1033 tape->dsc_polling_start = jiffies; 1025 tape->dsc_polling_start = jiffies;
@@ -1036,7 +1028,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1036 } else if (time_after(jiffies, tape->dsc_timeout)) { 1028 } else if (time_after(jiffies, tape->dsc_timeout)) {
1037 printk(KERN_ERR "ide-tape: %s: DSC timeout\n", 1029 printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
1038 tape->name); 1030 tape->name);
1039 if (rq->cmd[0] & REQ_IDETAPE_PC2) { 1031 if (rq->cmd[13] & REQ_IDETAPE_PC2) {
1040 idetape_media_access_finished(drive); 1032 idetape_media_access_finished(drive);
1041 return ide_stopped; 1033 return ide_stopped;
1042 } else { 1034 } else {
@@ -1049,35 +1041,29 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1049 idetape_postpone_request(drive); 1041 idetape_postpone_request(drive);
1050 return ide_stopped; 1042 return ide_stopped;
1051 } 1043 }
1052 if (rq->cmd[0] & REQ_IDETAPE_READ) { 1044 if (rq->cmd[13] & REQ_IDETAPE_READ) {
1053 pc = idetape_next_pc_storage(drive); 1045 pc = idetape_next_pc_storage(drive);
1054 ide_tape_create_rw_cmd(tape, pc, rq->current_nr_sectors, 1046 ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
1055 (struct idetape_bh *)rq->special,
1056 READ_6);
1057 goto out; 1047 goto out;
1058 } 1048 }
1059 if (rq->cmd[0] & REQ_IDETAPE_WRITE) { 1049 if (rq->cmd[13] & REQ_IDETAPE_WRITE) {
1060 pc = idetape_next_pc_storage(drive); 1050 pc = idetape_next_pc_storage(drive);
1061 ide_tape_create_rw_cmd(tape, pc, rq->current_nr_sectors, 1051 ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6);
1062 (struct idetape_bh *)rq->special,
1063 WRITE_6);
1064 goto out; 1052 goto out;
1065 } 1053 }
1066 if (rq->cmd[0] & REQ_IDETAPE_PC1) { 1054 if (rq->cmd[13] & REQ_IDETAPE_PC1) {
1067 pc = (struct ide_atapi_pc *) rq->buffer; 1055 pc = (struct ide_atapi_pc *) rq->buffer;
1068 rq->cmd[0] &= ~(REQ_IDETAPE_PC1); 1056 rq->cmd[13] &= ~(REQ_IDETAPE_PC1);
1069 rq->cmd[0] |= REQ_IDETAPE_PC2; 1057 rq->cmd[13] |= REQ_IDETAPE_PC2;
1070 goto out; 1058 goto out;
1071 } 1059 }
1072 if (rq->cmd[0] & REQ_IDETAPE_PC2) { 1060 if (rq->cmd[13] & REQ_IDETAPE_PC2) {
1073 idetape_media_access_finished(drive); 1061 idetape_media_access_finished(drive);
1074 return ide_stopped; 1062 return ide_stopped;
1075 } 1063 }
1076 BUG(); 1064 BUG();
1077out:
1078 if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags))
1079 pc->flags |= PC_FLAG_DRQ_INTERRUPT;
1080 1065
1066out:
1081 return idetape_issue_pc(drive, pc); 1067 return idetape_issue_pc(drive, pc);
1082} 1068}
1083 1069
@@ -1281,8 +1267,9 @@ static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
1281 1267
1282 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 1268 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
1283 rq->cmd_type = REQ_TYPE_SPECIAL; 1269 rq->cmd_type = REQ_TYPE_SPECIAL;
1284 rq->cmd[0] = REQ_IDETAPE_PC1; 1270 rq->cmd[13] = REQ_IDETAPE_PC1;
1285 rq->buffer = (char *)pc; 1271 rq->buffer = (char *)pc;
1272 memcpy(rq->cmd, pc->c, 12);
1286 error = blk_execute_rq(drive->queue, tape->disk, rq, 0); 1273 error = blk_execute_rq(drive->queue, tape->disk, rq, 0);
1287 blk_put_request(rq); 1274 blk_put_request(rq);
1288 return error; 1275 return error;
@@ -1304,7 +1291,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
1304 int load_attempted = 0; 1291 int load_attempted = 0;
1305 1292
1306 /* Wait for the tape to become ready */ 1293 /* Wait for the tape to become ready */
1307 set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags); 1294 set_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags);
1308 timeout += jiffies; 1295 timeout += jiffies;
1309 while (time_before(jiffies, timeout)) { 1296 while (time_before(jiffies, timeout)) {
1310 idetape_create_test_unit_ready_cmd(&pc); 1297 idetape_create_test_unit_ready_cmd(&pc);
@@ -1397,7 +1384,7 @@ static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
1397 if (tape->chrdev_dir != IDETAPE_DIR_READ) 1384 if (tape->chrdev_dir != IDETAPE_DIR_READ)
1398 return; 1385 return;
1399 1386
1400 clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags); 1387 clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags);
1401 tape->merge_bh_size = 0; 1388 tape->merge_bh_size = 0;
1402 if (tape->merge_bh != NULL) { 1389 if (tape->merge_bh != NULL) {
1403 ide_tape_kfree_buffer(tape); 1390 ide_tape_kfree_buffer(tape);
@@ -1465,7 +1452,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
1465 1452
1466 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 1453 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
1467 rq->cmd_type = REQ_TYPE_SPECIAL; 1454 rq->cmd_type = REQ_TYPE_SPECIAL;
1468 rq->cmd[0] = cmd; 1455 rq->cmd[13] = cmd;
1469 rq->rq_disk = tape->disk; 1456 rq->rq_disk = tape->disk;
1470 rq->special = (void *)bh; 1457 rq->special = (void *)bh;
1471 rq->sector = tape->first_frame; 1458 rq->sector = tape->first_frame;
@@ -1636,7 +1623,7 @@ static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
1636 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks); 1623 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
1637 1624
1638 /* If we are at a filemark, return a read length of 0 */ 1625 /* If we are at a filemark, return a read length of 0 */
1639 if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) 1626 if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
1640 return 0; 1627 return 0;
1641 1628
1642 idetape_init_read(drive); 1629 idetape_init_read(drive);
@@ -1746,7 +1733,7 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
1746 1733
1747 if (tape->chrdev_dir == IDETAPE_DIR_READ) { 1734 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
1748 tape->merge_bh_size = 0; 1735 tape->merge_bh_size = 0;
1749 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) 1736 if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
1750 ++count; 1737 ++count;
1751 ide_tape_discard_merge_buffer(drive, 0); 1738 ide_tape_discard_merge_buffer(drive, 0);
1752 } 1739 }
@@ -1801,7 +1788,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
1801 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count); 1788 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
1802 1789
1803 if (tape->chrdev_dir != IDETAPE_DIR_READ) { 1790 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
1804 if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags)) 1791 if (test_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags))
1805 if (count > tape->blk_size && 1792 if (count > tape->blk_size &&
1806 (count % tape->blk_size) == 0) 1793 (count % tape->blk_size) == 0)
1807 tape->user_bs_factor = count / tape->blk_size; 1794 tape->user_bs_factor = count / tape->blk_size;
@@ -1841,7 +1828,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
1841 tape->merge_bh_size = bytes_read-temp; 1828 tape->merge_bh_size = bytes_read-temp;
1842 } 1829 }
1843finish: 1830finish:
1844 if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) { 1831 if (!actually_read && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) {
1845 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name); 1832 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
1846 1833
1847 idetape_space_over_filemarks(drive, MTFSF, 1); 1834 idetape_space_over_filemarks(drive, MTFSF, 1);
@@ -2027,7 +2014,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2027 !IDETAPE_LU_LOAD_MASK); 2014 !IDETAPE_LU_LOAD_MASK);
2028 retval = idetape_queue_pc_tail(drive, &pc); 2015 retval = idetape_queue_pc_tail(drive, &pc);
2029 if (!retval) 2016 if (!retval)
2030 clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags); 2017 clear_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags);
2031 return retval; 2018 return retval;
2032 case MTNOP: 2019 case MTNOP:
2033 ide_tape_discard_merge_buffer(drive, 0); 2020 ide_tape_discard_merge_buffer(drive, 0);
@@ -2050,9 +2037,9 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2050 mt_count % tape->blk_size) 2037 mt_count % tape->blk_size)
2051 return -EIO; 2038 return -EIO;
2052 tape->user_bs_factor = mt_count / tape->blk_size; 2039 tape->user_bs_factor = mt_count / tape->blk_size;
2053 clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags); 2040 clear_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags);
2054 } else 2041 } else
2055 set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags); 2042 set_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags);
2056 return 0; 2043 return 0;
2057 case MTSEEK: 2044 case MTSEEK:
2058 ide_tape_discard_merge_buffer(drive, 0); 2045 ide_tape_discard_merge_buffer(drive, 0);
@@ -2202,20 +2189,20 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
2202 2189
2203 filp->private_data = tape; 2190 filp->private_data = tape;
2204 2191
2205 if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) { 2192 if (test_and_set_bit(IDE_AFLAG_BUSY, &drive->atapi_flags)) {
2206 retval = -EBUSY; 2193 retval = -EBUSY;
2207 goto out_put_tape; 2194 goto out_put_tape;
2208 } 2195 }
2209 2196
2210 retval = idetape_wait_ready(drive, 60 * HZ); 2197 retval = idetape_wait_ready(drive, 60 * HZ);
2211 if (retval) { 2198 if (retval) {
2212 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags); 2199 clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
2213 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name); 2200 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
2214 goto out_put_tape; 2201 goto out_put_tape;
2215 } 2202 }
2216 2203
2217 idetape_read_position(drive); 2204 idetape_read_position(drive);
2218 if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags)) 2205 if (!test_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags))
2219 (void)idetape_rewind_tape(drive); 2206 (void)idetape_rewind_tape(drive);
2220 2207
2221 /* Read block size and write protect status from drive. */ 2208 /* Read block size and write protect status from drive. */
@@ -2231,7 +2218,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
2231 if (tape->write_prot) { 2218 if (tape->write_prot) {
2232 if ((filp->f_flags & O_ACCMODE) == O_WRONLY || 2219 if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
2233 (filp->f_flags & O_ACCMODE) == O_RDWR) { 2220 (filp->f_flags & O_ACCMODE) == O_RDWR) {
2234 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags); 2221 clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
2235 retval = -EROFS; 2222 retval = -EROFS;
2236 goto out_put_tape; 2223 goto out_put_tape;
2237 } 2224 }
@@ -2291,7 +2278,7 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
2291 ide_tape_discard_merge_buffer(drive, 1); 2278 ide_tape_discard_merge_buffer(drive, 1);
2292 } 2279 }
2293 2280
2294 if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags)) 2281 if (minor < 128 && test_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags))
2295 (void) idetape_rewind_tape(drive); 2282 (void) idetape_rewind_tape(drive);
2296 if (tape->chrdev_dir == IDETAPE_DIR_NONE) { 2283 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
2297 if (tape->door_locked == DOOR_LOCKED) { 2284 if (tape->door_locked == DOOR_LOCKED) {
@@ -2301,7 +2288,7 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
2301 } 2288 }
2302 } 2289 }
2303 } 2290 }
2304 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags); 2291 clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
2305 ide_tape_put(tape); 2292 ide_tape_put(tape);
2306 unlock_kernel(); 2293 unlock_kernel();
2307 return 0; 2294 return 0;
@@ -2394,23 +2381,23 @@ static void idetape_get_mode_sense_results(ide_drive_t *drive)
2394 caps = pc.buf + 4 + pc.buf[3]; 2381 caps = pc.buf + 4 + pc.buf[3];
2395 2382
2396 /* convert to host order and save for later use */ 2383 /* convert to host order and save for later use */
2397 speed = be16_to_cpu(*(u16 *)&caps[14]); 2384 speed = be16_to_cpup((__be16 *)&caps[14]);
2398 max_speed = be16_to_cpu(*(u16 *)&caps[8]); 2385 max_speed = be16_to_cpup((__be16 *)&caps[8]);
2399 2386
2400 put_unaligned(max_speed, (u16 *)&caps[8]); 2387 *(u16 *)&caps[8] = max_speed;
2401 put_unaligned(be16_to_cpu(*(u16 *)&caps[12]), (u16 *)&caps[12]); 2388 *(u16 *)&caps[12] = be16_to_cpup((__be16 *)&caps[12]);
2402 put_unaligned(speed, (u16 *)&caps[14]); 2389 *(u16 *)&caps[14] = speed;
2403 put_unaligned(be16_to_cpu(*(u16 *)&caps[16]), (u16 *)&caps[16]); 2390 *(u16 *)&caps[16] = be16_to_cpup((__be16 *)&caps[16]);
2404 2391
2405 if (!speed) { 2392 if (!speed) {
2406 printk(KERN_INFO "ide-tape: %s: invalid tape speed " 2393 printk(KERN_INFO "ide-tape: %s: invalid tape speed "
2407 "(assuming 650KB/sec)\n", drive->name); 2394 "(assuming 650KB/sec)\n", drive->name);
2408 put_unaligned(650, (u16 *)&caps[14]); 2395 *(u16 *)&caps[14] = 650;
2409 } 2396 }
2410 if (!max_speed) { 2397 if (!max_speed) {
2411 printk(KERN_INFO "ide-tape: %s: invalid max_speed " 2398 printk(KERN_INFO "ide-tape: %s: invalid max_speed "
2412 "(assuming 650KB/sec)\n", drive->name); 2399 "(assuming 650KB/sec)\n", drive->name);
2413 put_unaligned(650, (u16 *)&caps[8]); 2400 *(u16 *)&caps[8] = 650;
2414 } 2401 }
2415 2402
2416 memcpy(&tape->caps, caps, 20); 2403 memcpy(&tape->caps, caps, 20);
@@ -2464,6 +2451,8 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
2464 u8 gcw[2]; 2451 u8 gcw[2];
2465 u16 *ctl = (u16 *)&tape->caps[12]; 2452 u16 *ctl = (u16 *)&tape->caps[12];
2466 2453
2454 drive->pc_callback = ide_tape_callback;
2455
2467 spin_lock_init(&tape->lock); 2456 spin_lock_init(&tape->lock);
2468 drive->dsc_overlap = 1; 2457 drive->dsc_overlap = 1;
2469 if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) { 2458 if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
@@ -2484,7 +2473,7 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
2484 2473
2485 /* Command packet DRQ type */ 2474 /* Command packet DRQ type */
2486 if (((gcw[0] & 0x60) >> 5) == 1) 2475 if (((gcw[0] & 0x60) >> 5) == 1)
2487 set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags); 2476 set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags);
2488 2477
2489 idetape_get_inquiry_results(drive); 2478 idetape_get_inquiry_results(drive);
2490 idetape_get_mode_sense_results(drive); 2479 idetape_get_mode_sense_results(drive);
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 1fbdb746dc88..7fb6f1c86272 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -64,6 +64,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
64 ide_hwif_t *hwif = HWIF(drive); 64 ide_hwif_t *hwif = HWIF(drive);
65 struct ide_taskfile *tf = &task->tf; 65 struct ide_taskfile *tf = &task->tf;
66 ide_handler_t *handler = NULL; 66 ide_handler_t *handler = NULL;
67 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
67 const struct ide_dma_ops *dma_ops = hwif->dma_ops; 68 const struct ide_dma_ops *dma_ops = hwif->dma_ops;
68 69
69 if (task->data_phase == TASKFILE_MULTI_IN || 70 if (task->data_phase == TASKFILE_MULTI_IN ||
@@ -80,15 +81,15 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
80 81
81 if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { 82 if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
82 ide_tf_dump(drive->name, tf); 83 ide_tf_dump(drive->name, tf);
83 ide_set_irq(drive, 1); 84 tp_ops->set_irq(hwif, 1);
84 SELECT_MASK(drive, 0); 85 SELECT_MASK(drive, 0);
85 hwif->tf_load(drive, task); 86 tp_ops->tf_load(drive, task);
86 } 87 }
87 88
88 switch (task->data_phase) { 89 switch (task->data_phase) {
89 case TASKFILE_MULTI_OUT: 90 case TASKFILE_MULTI_OUT:
90 case TASKFILE_OUT: 91 case TASKFILE_OUT:
91 hwif->OUTBSYNC(hwif, tf->command, hwif->io_ports.command_addr); 92 tp_ops->exec_command(hwif, tf->command);
92 ndelay(400); /* FIXME */ 93 ndelay(400); /* FIXME */
93 return pre_task_out_intr(drive, task->rq); 94 return pre_task_out_intr(drive, task->rq);
94 case TASKFILE_MULTI_IN: 95 case TASKFILE_MULTI_IN:
@@ -124,7 +125,11 @@ EXPORT_SYMBOL_GPL(do_rw_taskfile);
124 */ 125 */
125static ide_startstop_t set_multmode_intr(ide_drive_t *drive) 126static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
126{ 127{
127 u8 stat = ide_read_status(drive); 128 ide_hwif_t *hwif = drive->hwif;
129 u8 stat;
130
131 local_irq_enable_in_hardirq();
132 stat = hwif->tp_ops->read_status(hwif);
128 133
129 if (OK_STAT(stat, READY_STAT, BAD_STAT)) 134 if (OK_STAT(stat, READY_STAT, BAD_STAT))
130 drive->mult_count = drive->mult_req; 135 drive->mult_count = drive->mult_req;
@@ -141,11 +146,18 @@ static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
141 */ 146 */
142static ide_startstop_t set_geometry_intr(ide_drive_t *drive) 147static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
143{ 148{
149 ide_hwif_t *hwif = drive->hwif;
144 int retries = 5; 150 int retries = 5;
145 u8 stat; 151 u8 stat;
146 152
147 while (((stat = ide_read_status(drive)) & BUSY_STAT) && retries--) 153 local_irq_enable_in_hardirq();
154
155 while (1) {
156 stat = hwif->tp_ops->read_status(hwif);
157 if ((stat & BUSY_STAT) == 0 || retries-- == 0)
158 break;
148 udelay(10); 159 udelay(10);
160 };
149 161
150 if (OK_STAT(stat, READY_STAT, BAD_STAT)) 162 if (OK_STAT(stat, READY_STAT, BAD_STAT))
151 return ide_stopped; 163 return ide_stopped;
@@ -162,7 +174,11 @@ static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
162 */ 174 */
163static ide_startstop_t recal_intr(ide_drive_t *drive) 175static ide_startstop_t recal_intr(ide_drive_t *drive)
164{ 176{
165 u8 stat = ide_read_status(drive); 177 ide_hwif_t *hwif = drive->hwif;
178 u8 stat;
179
180 local_irq_enable_in_hardirq();
181 stat = hwif->tp_ops->read_status(hwif);
166 182
167 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) 183 if (!OK_STAT(stat, READY_STAT, BAD_STAT))
168 return ide_error(drive, "recal_intr", stat); 184 return ide_error(drive, "recal_intr", stat);
@@ -174,11 +190,12 @@ static ide_startstop_t recal_intr(ide_drive_t *drive)
174 */ 190 */
175static ide_startstop_t task_no_data_intr(ide_drive_t *drive) 191static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
176{ 192{
177 ide_task_t *args = HWGROUP(drive)->rq->special; 193 ide_hwif_t *hwif = drive->hwif;
194 ide_task_t *args = hwif->hwgroup->rq->special;
178 u8 stat; 195 u8 stat;
179 196
180 local_irq_enable_in_hardirq(); 197 local_irq_enable_in_hardirq();
181 stat = ide_read_status(drive); 198 stat = hwif->tp_ops->read_status(hwif);
182 199
183 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) 200 if (!OK_STAT(stat, READY_STAT, BAD_STAT))
184 return ide_error(drive, "task_no_data_intr", stat); 201 return ide_error(drive, "task_no_data_intr", stat);
@@ -192,6 +209,7 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
192 209
193static u8 wait_drive_not_busy(ide_drive_t *drive) 210static u8 wait_drive_not_busy(ide_drive_t *drive)
194{ 211{
212 ide_hwif_t *hwif = drive->hwif;
195 int retries; 213 int retries;
196 u8 stat; 214 u8 stat;
197 215
@@ -200,7 +218,7 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
200 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms. 218 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
201 */ 219 */
202 for (retries = 0; retries < 1000; retries++) { 220 for (retries = 0; retries < 1000; retries++) {
203 stat = ide_read_status(drive); 221 stat = hwif->tp_ops->read_status(hwif);
204 222
205 if (stat & BUSY_STAT) 223 if (stat & BUSY_STAT)
206 udelay(10); 224 udelay(10);
@@ -255,9 +273,9 @@ static void ide_pio_sector(ide_drive_t *drive, struct request *rq,
255 273
256 /* do the actual data transfer */ 274 /* do the actual data transfer */
257 if (write) 275 if (write)
258 hwif->output_data(drive, rq, buf, SECTOR_SIZE); 276 hwif->tp_ops->output_data(drive, rq, buf, SECTOR_SIZE);
259 else 277 else
260 hwif->input_data(drive, rq, buf, SECTOR_SIZE); 278 hwif->tp_ops->input_data(drive, rq, buf, SECTOR_SIZE);
261 279
262 kunmap_atomic(buf, KM_BIO_SRC_IRQ); 280 kunmap_atomic(buf, KM_BIO_SRC_IRQ);
263#ifdef CONFIG_HIGHMEM 281#ifdef CONFIG_HIGHMEM
@@ -383,8 +401,8 @@ static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq
383static ide_startstop_t task_in_intr(ide_drive_t *drive) 401static ide_startstop_t task_in_intr(ide_drive_t *drive)
384{ 402{
385 ide_hwif_t *hwif = drive->hwif; 403 ide_hwif_t *hwif = drive->hwif;
386 struct request *rq = HWGROUP(drive)->rq; 404 struct request *rq = hwif->hwgroup->rq;
387 u8 stat = ide_read_status(drive); 405 u8 stat = hwif->tp_ops->read_status(hwif);
388 406
389 /* Error? */ 407 /* Error? */
390 if (stat & ERR_STAT) 408 if (stat & ERR_STAT)
@@ -418,7 +436,7 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
418{ 436{
419 ide_hwif_t *hwif = drive->hwif; 437 ide_hwif_t *hwif = drive->hwif;
420 struct request *rq = HWGROUP(drive)->rq; 438 struct request *rq = HWGROUP(drive)->rq;
421 u8 stat = ide_read_status(drive); 439 u8 stat = hwif->tp_ops->read_status(hwif);
422 440
423 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) 441 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
424 return task_error(drive, rq, __func__, stat); 442 return task_error(drive, rq, __func__, stat);
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index d4a6b102a772..772451600e4d 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 1994-1998 Linus Torvalds & authors (see below) 2 * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
3 * Copyrifht (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz 3 * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
4 */ 4 */
5 5
6/* 6/*
@@ -101,8 +101,7 @@ void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
101 101
102 init_completion(&hwif->gendev_rel_comp); 102 init_completion(&hwif->gendev_rel_comp);
103 103
104 default_hwif_iops(hwif); 104 hwif->tp_ops = &default_tp_ops;
105 default_hwif_transport(hwif);
106 105
107 ide_port_init_devices_data(hwif); 106 ide_port_init_devices_data(hwif);
108} 107}
@@ -134,41 +133,6 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
134 } 133 }
135} 134}
136 135
137void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
138{
139 ide_hwgroup_t *hwgroup = hwif->hwgroup;
140
141 spin_lock_irq(&ide_lock);
142 /*
143 * Remove us from the hwgroup, and free
144 * the hwgroup if we were the only member
145 */
146 if (hwif->next == hwif) {
147 BUG_ON(hwgroup->hwif != hwif);
148 kfree(hwgroup);
149 } else {
150 /* There is another interface in hwgroup.
151 * Unlink us, and set hwgroup->drive and ->hwif to
152 * something sane.
153 */
154 ide_hwif_t *g = hwgroup->hwif;
155
156 while (g->next != hwif)
157 g = g->next;
158 g->next = hwif->next;
159 if (hwgroup->hwif == hwif) {
160 /* Chose a random hwif for hwgroup->hwif.
161 * It's guaranteed that there are no drives
162 * left in the hwgroup.
163 */
164 BUG_ON(hwgroup->drive != NULL);
165 hwgroup->hwif = g;
166 }
167 BUG_ON(hwgroup->hwif == hwif);
168 }
169 spin_unlock_irq(&ide_lock);
170}
171
172/* Called with ide_lock held. */ 136/* Called with ide_lock held. */
173static void __ide_port_unregister_devices(ide_hwif_t *hwif) 137static void __ide_port_unregister_devices(ide_hwif_t *hwif)
174{ 138{
@@ -269,16 +233,9 @@ void ide_unregister(ide_hwif_t *hwif)
269 if (hwif->dma_base) 233 if (hwif->dma_base)
270 ide_release_dma_engine(hwif); 234 ide_release_dma_engine(hwif);
271 235
272 spin_lock_irq(&ide_lock);
273 /* restore hwif data to pristine status */
274 ide_init_port_data(hwif, hwif->index);
275 spin_unlock_irq(&ide_lock);
276
277 mutex_unlock(&ide_cfg_mtx); 236 mutex_unlock(&ide_cfg_mtx);
278} 237}
279 238
280EXPORT_SYMBOL(ide_unregister);
281
282void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) 239void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
283{ 240{
284 memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports)); 241 memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
@@ -287,8 +244,8 @@ void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
287 hwif->dev = hw->dev; 244 hwif->dev = hw->dev;
288 hwif->gendev.parent = hw->parent ? hw->parent : hw->dev; 245 hwif->gendev.parent = hw->parent ? hw->parent : hw->dev;
289 hwif->ack_intr = hw->ack_intr; 246 hwif->ack_intr = hw->ack_intr;
247 hwif->config_data = hw->config;
290} 248}
291EXPORT_SYMBOL_GPL(ide_init_port_hw);
292 249
293/* 250/*
294 * Locks for IDE setting functionality 251 * Locks for IDE setting functionality
@@ -661,6 +618,53 @@ set_val:
661 618
662EXPORT_SYMBOL(generic_ide_ioctl); 619EXPORT_SYMBOL(generic_ide_ioctl);
663 620
621/**
622 * ide_device_get - get an additional reference to a ide_drive_t
623 * @drive: device to get a reference to
624 *
625 * Gets a reference to the ide_drive_t and increments the use count of the
626 * underlying LLDD module.
627 */
628int ide_device_get(ide_drive_t *drive)
629{
630 struct device *host_dev;
631 struct module *module;
632
633 if (!get_device(&drive->gendev))
634 return -ENXIO;
635
636 host_dev = drive->hwif->host->dev[0];
637 module = host_dev ? host_dev->driver->owner : NULL;
638
639 if (module && !try_module_get(module)) {
640 put_device(&drive->gendev);
641 return -ENXIO;
642 }
643
644 return 0;
645}
646EXPORT_SYMBOL_GPL(ide_device_get);
647
648/**
649 * ide_device_put - release a reference to a ide_drive_t
650 * @drive: device to release a reference on
651 *
652 * Release a reference to the ide_drive_t and decrements the use count of
653 * the underlying LLDD module.
654 */
655void ide_device_put(ide_drive_t *drive)
656{
657#ifdef CONFIG_MODULE_UNLOAD
658 struct device *host_dev = drive->hwif->host->dev[0];
659 struct module *module = host_dev ? host_dev->driver->owner : NULL;
660
661 if (module)
662 module_put(module);
663#endif
664 put_device(&drive->gendev);
665}
666EXPORT_SYMBOL_GPL(ide_device_put);
667
664static int ide_bus_match(struct device *dev, struct device_driver *drv) 668static int ide_bus_match(struct device *dev, struct device_driver *drv)
665{ 669{
666 return 1; 670 return 1;
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
index 0497e7f85b09..7c2afa97f417 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/legacy/buddha.c
@@ -37,6 +37,8 @@
37#define CATWEASEL_NUM_HWIFS 3 37#define CATWEASEL_NUM_HWIFS 3
38#define XSURF_NUM_HWIFS 2 38#define XSURF_NUM_HWIFS 2
39 39
40#define MAX_NUM_HWIFS 3
41
40 /* 42 /*
41 * Bases of the IDE interfaces (relative to the board address) 43 * Bases of the IDE interfaces (relative to the board address)
42 */ 44 */
@@ -148,18 +150,14 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
148 150
149static int __init buddha_init(void) 151static int __init buddha_init(void)
150{ 152{
151 hw_regs_t hw;
152 ide_hwif_t *hwif;
153 int i;
154
155 struct zorro_dev *z = NULL; 153 struct zorro_dev *z = NULL;
156 u_long buddha_board = 0; 154 u_long buddha_board = 0;
157 BuddhaType type; 155 BuddhaType type;
158 int buddha_num_hwifs; 156 int buddha_num_hwifs, i;
159 157
160 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { 158 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
161 unsigned long board; 159 unsigned long board;
162 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 160 hw_regs_t hw[MAX_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
163 161
164 if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) { 162 if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) {
165 buddha_num_hwifs = BUDDHA_NUM_HWIFS; 163 buddha_num_hwifs = BUDDHA_NUM_HWIFS;
@@ -221,19 +219,13 @@ fail_base2:
221 ack_intr = xsurf_ack_intr; 219 ack_intr = xsurf_ack_intr;
222 } 220 }
223 221
224 buddha_setup_ports(&hw, base, ctl, irq_port, ack_intr); 222 buddha_setup_ports(&hw[i], base, ctl, irq_port,
223 ack_intr);
225 224
226 hwif = ide_find_port(); 225 hws[i] = &hw[i];
227 if (hwif) {
228 u8 index = hwif->index;
229
230 ide_init_port_hw(hwif, &hw);
231
232 idx[i] = index;
233 }
234 } 226 }
235 227
236 ide_device_add(idx, NULL); 228 ide_host_add(NULL, hws, NULL);
237 } 229 }
238 230
239 return 0; 231 return 0;
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index 129a812bb57f..724f95073d80 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -66,6 +66,27 @@ static void falconide_output_data(ide_drive_t *drive, struct request *rq,
66 outsw_swapw(data_addr, buf, (len + 1) / 2); 66 outsw_swapw(data_addr, buf, (len + 1) / 2);
67} 67}
68 68
69/* Atari has a byte-swapped IDE interface */
70static const struct ide_tp_ops falconide_tp_ops = {
71 .exec_command = ide_exec_command,
72 .read_status = ide_read_status,
73 .read_altstatus = ide_read_altstatus,
74 .read_sff_dma_status = ide_read_sff_dma_status,
75
76 .set_irq = ide_set_irq,
77
78 .tf_load = ide_tf_load,
79 .tf_read = ide_tf_read,
80
81 .input_data = falconide_input_data,
82 .output_data = falconide_output_data,
83};
84
85static const struct ide_port_info falconide_port_info = {
86 .tp_ops = &falconide_tp_ops,
87 .host_flags = IDE_HFLAG_NO_DMA,
88};
89
69static void __init falconide_setup_ports(hw_regs_t *hw) 90static void __init falconide_setup_ports(hw_regs_t *hw)
70{ 91{
71 int i; 92 int i;
@@ -91,11 +112,12 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
91 112
92static int __init falconide_init(void) 113static int __init falconide_init(void)
93{ 114{
94 hw_regs_t hw; 115 struct ide_host *host;
95 ide_hwif_t *hwif; 116 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
117 int rc;
96 118
97 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE)) 119 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
98 return 0; 120 return -ENODEV;
99 121
100 printk(KERN_INFO "ide: Falcon IDE controller\n"); 122 printk(KERN_INFO "ide: Falcon IDE controller\n");
101 123
@@ -106,23 +128,25 @@ static int __init falconide_init(void)
106 128
107 falconide_setup_ports(&hw); 129 falconide_setup_ports(&hw);
108 130
109 hwif = ide_find_port(); 131 host = ide_host_alloc(&falconide_port_info, hws);
110 if (hwif) { 132 if (host == NULL) {
111 u8 index = hwif->index; 133 rc = -ENOMEM;
112 u8 idx[4] = { index, 0xff, 0xff, 0xff }; 134 goto err;
113 135 }
114 ide_init_port_hw(hwif, &hw);
115 136
116 /* Atari has a byte-swapped IDE interface */ 137 ide_get_lock(NULL, NULL);
117 hwif->input_data = falconide_input_data; 138 rc = ide_host_register(host, &falconide_port_info, hws);
118 hwif->output_data = falconide_output_data; 139 ide_release_lock();
119 140
120 ide_get_lock(NULL, NULL); 141 if (rc)
121 ide_device_add(idx, NULL); 142 goto err_free;
122 ide_release_lock();
123 }
124 143
125 return 0; 144 return 0;
145err_free:
146 ide_host_free(host);
147err:
148 release_mem_region(ATA_HD_BASE, 0x40);
149 return rc;
126} 150}
127 151
128module_init(falconide_init); 152module_init(falconide_init);
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
index 7e74b20202df..51ba085d7aa8 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/legacy/gayle.c
@@ -31,6 +31,8 @@
31#define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */ 31#define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */
32#define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */ 32#define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */
33 33
34#define GAYLE_IDEREG_SIZE 0x2000
35
34 /* 36 /*
35 * Offsets from one of the above bases 37 * Offsets from one of the above bases
36 */ 38 */
@@ -56,13 +58,11 @@
56#define GAYLE_NUM_HWIFS 1 58#define GAYLE_NUM_HWIFS 1
57#define GAYLE_NUM_PROBE_HWIFS GAYLE_NUM_HWIFS 59#define GAYLE_NUM_PROBE_HWIFS GAYLE_NUM_HWIFS
58#define GAYLE_HAS_CONTROL_REG 1 60#define GAYLE_HAS_CONTROL_REG 1
59#define GAYLE_IDEREG_SIZE 0x2000
60#else /* CONFIG_BLK_DEV_IDEDOUBLER */ 61#else /* CONFIG_BLK_DEV_IDEDOUBLER */
61#define GAYLE_NUM_HWIFS 2 62#define GAYLE_NUM_HWIFS 2
62#define GAYLE_NUM_PROBE_HWIFS (ide_doubler ? GAYLE_NUM_HWIFS : \ 63#define GAYLE_NUM_PROBE_HWIFS (ide_doubler ? GAYLE_NUM_HWIFS : \
63 GAYLE_NUM_HWIFS-1) 64 GAYLE_NUM_HWIFS-1)
64#define GAYLE_HAS_CONTROL_REG (!ide_doubler) 65#define GAYLE_HAS_CONTROL_REG (!ide_doubler)
65#define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000)
66 66
67static int ide_doubler; 67static int ide_doubler;
68module_param_named(doubler, ide_doubler, bool, 0); 68module_param_named(doubler, ide_doubler, bool, 0);
@@ -124,8 +124,11 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
124 124
125static int __init gayle_init(void) 125static int __init gayle_init(void)
126{ 126{
127 int a4000, i; 127 unsigned long phys_base, res_start, res_n;
128 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 128 unsigned long base, ctrlport, irqport;
129 ide_ack_intr_t *ack_intr;
130 int a4000, i, rc;
131 hw_regs_t hw[GAYLE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
129 132
130 if (!MACH_IS_AMIGA) 133 if (!MACH_IS_AMIGA)
131 return -ENODEV; 134 return -ENODEV;
@@ -148,13 +151,6 @@ found:
148#endif 151#endif
149 ""); 152 "");
150 153
151 for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
152 unsigned long base, ctrlport, irqport;
153 ide_ack_intr_t *ack_intr;
154 hw_regs_t hw;
155 ide_hwif_t *hwif;
156 unsigned long phys_base, res_start, res_n;
157
158 if (a4000) { 154 if (a4000) {
159 phys_base = GAYLE_BASE_4000; 155 phys_base = GAYLE_BASE_4000;
160 irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000); 156 irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000);
@@ -168,33 +164,26 @@ found:
168 * FIXME: we now have selectable modes between mmio v/s iomio 164 * FIXME: we now have selectable modes between mmio v/s iomio
169 */ 165 */
170 166
171 phys_base += i*GAYLE_NEXT_PORT;
172
173 res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1); 167 res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1);
174 res_n = GAYLE_IDEREG_SIZE; 168 res_n = GAYLE_IDEREG_SIZE;
175 169
176 if (!request_mem_region(res_start, res_n, "IDE")) 170 if (!request_mem_region(res_start, res_n, "IDE"))
177 continue; 171 return -EBUSY;
178 172
179 base = (unsigned long)ZTWO_VADDR(phys_base); 173 for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
174 base = (unsigned long)ZTWO_VADDR(phys_base + i * GAYLE_NEXT_PORT);
180 ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0; 175 ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0;
181 176
182 gayle_setup_ports(&hw, base, ctrlport, irqport, ack_intr); 177 gayle_setup_ports(&hw[i], base, ctrlport, irqport, ack_intr);
183
184 hwif = ide_find_port();
185 if (hwif) {
186 u8 index = hwif->index;
187
188 ide_init_port_hw(hwif, &hw);
189 178
190 idx[i] = index; 179 hws[i] = &hw[i];
191 } else
192 release_mem_region(res_start, res_n);
193 } 180 }
194 181
195 ide_device_add(idx, NULL); 182 rc = ide_host_add(NULL, hws, NULL);
183 if (rc)
184 release_mem_region(res_start, res_n);
196 185
197 return 0; 186 return rc;
198} 187}
199 188
200module_init(gayle_init); 189module_init(gayle_init);
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c
index 7bc8fd59ea9e..98f7c95e39ed 100644
--- a/drivers/ide/legacy/ht6560b.c
+++ b/drivers/ide/legacy/ht6560b.c
@@ -3,34 +3,12 @@
3 */ 3 */
4 4
5/* 5/*
6 *
7 * Version 0.01 Initial version hacked out of ide.c
8 *
9 * Version 0.02 Added support for PIO modes, auto-tune
10 *
11 * Version 0.03 Some cleanups
12 *
13 * Version 0.05 PIO mode cycle timings auto-tune using bus-speed
14 *
15 * Version 0.06 Prefetch mode now defaults no OFF. To set
16 * prefetch mode OFF/ON use "hdparm -p8/-p9".
17 * Unmask irq is disabled when prefetch mode
18 * is enabled.
19 *
20 * Version 0.07 Trying to fix CD-ROM detection problem.
21 * "Prefetch" mode bit OFF for ide disks and
22 * ON for anything else.
23 *
24 * Version 0.08 Need to force prefetch for CDs and other non-disk
25 * devices. (not sure which devices exactly need
26 * prefetch)
27 *
28 * HT-6560B EIDE-controller support 6 * HT-6560B EIDE-controller support
29 * To activate controller support use kernel parameter "ide0=ht6560b". 7 * To activate controller support use kernel parameter "ide0=ht6560b".
30 * Use hdparm utility to enable PIO mode support. 8 * Use hdparm utility to enable PIO mode support.
31 * 9 *
32 * Author: Mikko Ala-Fossi <maf@iki.fi> 10 * Author: Mikko Ala-Fossi <maf@iki.fi>
33 * Jan Evert van Grootheest <janevert@caiway.nl> 11 * Jan Evert van Grootheest <j.e.van.grootheest@caiway.nl>
34 * 12 *
35 * Try: http://www.maf.iki.fi/~maf/ht6560b/ 13 * Try: http://www.maf.iki.fi/~maf/ht6560b/
36 */ 14 */
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/legacy/ide-4drives.c
index 89c8ff0a4d08..c76d55de6996 100644
--- a/drivers/ide/legacy/ide-4drives.c
+++ b/drivers/ide/legacy/ide-4drives.c
@@ -28,10 +28,8 @@ static const struct ide_port_info ide_4drives_port_info = {
28 28
29static int __init ide_4drives_init(void) 29static int __init ide_4drives_init(void)
30{ 30{
31 ide_hwif_t *hwif, *mate;
32 unsigned long base = 0x1f0, ctl = 0x3f6; 31 unsigned long base = 0x1f0, ctl = 0x3f6;
33 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 32 hw_regs_t hw, *hws[] = { &hw, &hw, NULL, NULL };
34 hw_regs_t hw;
35 33
36 if (probe_4drives == 0) 34 if (probe_4drives == 0)
37 return -ENODEV; 35 return -ENODEV;
@@ -55,21 +53,7 @@ static int __init ide_4drives_init(void)
55 hw.irq = 14; 53 hw.irq = 14;
56 hw.chipset = ide_4drives; 54 hw.chipset = ide_4drives;
57 55
58 hwif = ide_find_port(); 56 return ide_host_add(&ide_4drives_port_info, hws, NULL);
59 if (hwif) {
60 ide_init_port_hw(hwif, &hw);
61 idx[0] = hwif->index;
62 }
63
64 mate = ide_find_port();
65 if (mate) {
66 ide_init_port_hw(mate, &hw);
67 idx[1] = mate->index;
68 }
69
70 ide_device_add(idx, &ide_4drives_port_info);
71
72 return 0;
73} 57}
74 58
75module_init(ide_4drives_init); 59module_init(ide_4drives_init);
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 27b1e0b7ecb4..21bfac137844 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -74,7 +74,7 @@ INT_MODULE_PARM(pc_debug, 0);
74 74
75typedef struct ide_info_t { 75typedef struct ide_info_t {
76 struct pcmcia_device *p_dev; 76 struct pcmcia_device *p_dev;
77 ide_hwif_t *hwif; 77 struct ide_host *host;
78 int ndev; 78 int ndev;
79 dev_node_t node; 79 dev_node_t node;
80} ide_info_t; 80} ide_info_t;
@@ -132,7 +132,7 @@ static int ide_probe(struct pcmcia_device *link)
132static void ide_detach(struct pcmcia_device *link) 132static void ide_detach(struct pcmcia_device *link)
133{ 133{
134 ide_info_t *info = link->priv; 134 ide_info_t *info = link->priv;
135 ide_hwif_t *hwif = info->hwif; 135 ide_hwif_t *hwif = info->host->ports[0];
136 unsigned long data_addr, ctl_addr; 136 unsigned long data_addr, ctl_addr;
137 137
138 DEBUG(0, "ide_detach(0x%p)\n", link); 138 DEBUG(0, "ide_detach(0x%p)\n", link);
@@ -157,13 +157,13 @@ static const struct ide_port_info idecs_port_info = {
157 .host_flags = IDE_HFLAG_NO_DMA, 157 .host_flags = IDE_HFLAG_NO_DMA,
158}; 158};
159 159
160static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl, 160static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
161 unsigned long irq, struct pcmcia_device *handle) 161 unsigned long irq, struct pcmcia_device *handle)
162{ 162{
163 struct ide_host *host;
163 ide_hwif_t *hwif; 164 ide_hwif_t *hwif;
164 hw_regs_t hw; 165 int i, rc;
165 int i; 166 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
166 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
167 167
168 if (!request_region(io, 8, DRV_NAME)) { 168 if (!request_region(io, 8, DRV_NAME)) {
169 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", 169 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
@@ -184,30 +184,24 @@ static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl,
184 hw.chipset = ide_pci; 184 hw.chipset = ide_pci;
185 hw.dev = &handle->dev; 185 hw.dev = &handle->dev;
186 186
187 hwif = ide_find_port(); 187 rc = ide_host_add(&idecs_port_info, hws, &host);
188 if (hwif == NULL) 188 if (rc)
189 goto out_release; 189 goto out_release;
190 190
191 i = hwif->index; 191 hwif = host->ports[0];
192
193 ide_init_port_hw(hwif, &hw);
194
195 idx[0] = i;
196
197 ide_device_add(idx, &idecs_port_info);
198 192
199 if (hwif->present) 193 if (hwif->present)
200 return hwif; 194 return host;
201 195
202 /* retry registration in case device is still spinning up */ 196 /* retry registration in case device is still spinning up */
203 for (i = 0; i < 10; i++) { 197 for (i = 0; i < 10; i++) {
204 msleep(100); 198 msleep(100);
205 ide_port_scan(hwif); 199 ide_port_scan(hwif);
206 if (hwif->present) 200 if (hwif->present)
207 return hwif; 201 return host;
208 } 202 }
209 203
210 return hwif; 204 return host;
211 205
212out_release: 206out_release:
213 release_region(ctl, 1); 207 release_region(ctl, 1);
@@ -239,7 +233,7 @@ static int ide_config(struct pcmcia_device *link)
239 cistpl_cftable_entry_t *cfg; 233 cistpl_cftable_entry_t *cfg;
240 int pass, last_ret = 0, last_fn = 0, is_kme = 0; 234 int pass, last_ret = 0, last_fn = 0, is_kme = 0;
241 unsigned long io_base, ctl_base; 235 unsigned long io_base, ctl_base;
242 ide_hwif_t *hwif; 236 struct ide_host *host;
243 237
244 DEBUG(0, "ide_config(0x%p)\n", link); 238 DEBUG(0, "ide_config(0x%p)\n", link);
245 239
@@ -334,21 +328,21 @@ static int ide_config(struct pcmcia_device *link)
334 if (is_kme) 328 if (is_kme)
335 outb(0x81, ctl_base+1); 329 outb(0x81, ctl_base+1);
336 330
337 hwif = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link); 331 host = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
338 if (hwif == NULL && link->io.NumPorts1 == 0x20) { 332 if (host == NULL && link->io.NumPorts1 == 0x20) {
339 outb(0x02, ctl_base + 0x10); 333 outb(0x02, ctl_base + 0x10);
340 hwif = idecs_register(io_base + 0x10, ctl_base + 0x10, 334 host = idecs_register(io_base + 0x10, ctl_base + 0x10,
341 link->irq.AssignedIRQ, link); 335 link->irq.AssignedIRQ, link);
342 } 336 }
343 337
344 if (hwif == NULL) 338 if (host == NULL)
345 goto failed; 339 goto failed;
346 340
347 info->ndev = 1; 341 info->ndev = 1;
348 sprintf(info->node.dev_name, "hd%c", 'a' + hwif->index * 2); 342 sprintf(info->node.dev_name, "hd%c", 'a' + host->ports[0]->index * 2);
349 info->node.major = hwif->major; 343 info->node.major = host->ports[0]->major;
350 info->node.minor = 0; 344 info->node.minor = 0;
351 info->hwif = hwif; 345 info->host = host;
352 link->dev_node = &info->node; 346 link->dev_node = &info->node;
353 printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n", 347 printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n",
354 info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10); 348 info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10);
@@ -379,15 +373,15 @@ failed:
379static void ide_release(struct pcmcia_device *link) 373static void ide_release(struct pcmcia_device *link)
380{ 374{
381 ide_info_t *info = link->priv; 375 ide_info_t *info = link->priv;
382 ide_hwif_t *hwif = info->hwif; 376 struct ide_host *host = info->host;
383 377
384 DEBUG(0, "ide_release(0x%p)\n", link); 378 DEBUG(0, "ide_release(0x%p)\n", link);
385 379
386 if (info->ndev) { 380 if (info->ndev)
387 /* FIXME: if this fails we need to queue the cleanup somehow 381 /* FIXME: if this fails we need to queue the cleanup somehow
388 -- need to investigate the required PCMCIA magic */ 382 -- need to investigate the required PCMCIA magic */
389 ide_unregister(hwif); 383 ide_host_remove(host);
390 } 384
391 info->ndev = 0; 385 info->ndev = 0;
392 386
393 pcmcia_disable_device(link); 387 pcmcia_disable_device(link);
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
index a249562b34b5..051b4ab0f359 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/legacy/ide_platform.c
@@ -52,12 +52,10 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
52{ 52{
53 struct resource *res_base, *res_alt, *res_irq; 53 struct resource *res_base, *res_alt, *res_irq;
54 void __iomem *base, *alt_base; 54 void __iomem *base, *alt_base;
55 ide_hwif_t *hwif;
56 struct pata_platform_info *pdata; 55 struct pata_platform_info *pdata;
57 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 56 struct ide_host *host;
58 int ret = 0; 57 int ret = 0, mmio = 0;
59 int mmio = 0; 58 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
60 hw_regs_t hw;
61 struct ide_port_info d = platform_ide_port_info; 59 struct ide_port_info d = platform_ide_port_info;
62 60
63 pdata = pdev->dev.platform_data; 61 pdata = pdev->dev.platform_data;
@@ -94,28 +92,18 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
94 res_alt->start, res_alt->end - res_alt->start + 1); 92 res_alt->start, res_alt->end - res_alt->start + 1);
95 } 93 }
96 94
97 hwif = ide_find_port();
98 if (!hwif) {
99 ret = -ENODEV;
100 goto out;
101 }
102
103 memset(&hw, 0, sizeof(hw)); 95 memset(&hw, 0, sizeof(hw));
104 plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); 96 plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
105 hw.dev = &pdev->dev; 97 hw.dev = &pdev->dev;
106 98
107 ide_init_port_hw(hwif, &hw); 99 if (mmio)
108
109 if (mmio) {
110 d.host_flags |= IDE_HFLAG_MMIO; 100 d.host_flags |= IDE_HFLAG_MMIO;
111 default_hwif_mmiops(hwif);
112 }
113 101
114 idx[0] = hwif->index; 102 ret = ide_host_add(&d, hws, &host);
115 103 if (ret)
116 ide_device_add(idx, &d); 104 goto out;
117 105
118 platform_set_drvdata(pdev, hwif); 106 platform_set_drvdata(pdev, host);
119 107
120 return 0; 108 return 0;
121 109
@@ -125,9 +113,9 @@ out:
125 113
126static int __devexit plat_ide_remove(struct platform_device *pdev) 114static int __devexit plat_ide_remove(struct platform_device *pdev)
127{ 115{
128 ide_hwif_t *hwif = pdev->dev.driver_data; 116 struct ide_host *host = pdev->dev.driver_data;
129 117
130 ide_unregister(hwif); 118 ide_host_remove(host);
131 119
132 return 0; 120 return 0;
133} 121}
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index 0a6195bcfeda..a0bb167980e7 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -91,11 +91,10 @@ static const char *mac_ide_name[] =
91 91
92static int __init macide_init(void) 92static int __init macide_init(void)
93{ 93{
94 ide_hwif_t *hwif;
95 ide_ack_intr_t *ack_intr; 94 ide_ack_intr_t *ack_intr;
96 unsigned long base; 95 unsigned long base;
97 int irq; 96 int irq;
98 hw_regs_t hw; 97 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
99 98
100 if (!MACH_IS_MAC) 99 if (!MACH_IS_MAC)
101 return -ENODEV; 100 return -ENODEV;
@@ -125,17 +124,7 @@ static int __init macide_init(void)
125 124
126 macide_setup_ports(&hw, base, irq, ack_intr); 125 macide_setup_ports(&hw, base, irq, ack_intr);
127 126
128 hwif = ide_find_port(); 127 return ide_host_add(NULL, hws, NULL);
129 if (hwif) {
130 u8 index = hwif->index;
131 u8 idx[4] = { index, 0xff, 0xff, 0xff };
132
133 ide_init_port_hw(hwif, &hw);
134
135 ide_device_add(idx, NULL);
136 }
137
138 return 0;
139} 128}
140 129
141module_init(macide_init); 130module_init(macide_init);
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index 9c2b9d078f69..4abd8fc78197 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -96,6 +96,27 @@ static void q40ide_output_data(ide_drive_t *drive, struct request *rq,
96 outsw_swapw(data_addr, buf, (len + 1) / 2); 96 outsw_swapw(data_addr, buf, (len + 1) / 2);
97} 97}
98 98
99/* Q40 has a byte-swapped IDE interface */
100static const struct ide_tp_ops q40ide_tp_ops = {
101 .exec_command = ide_exec_command,
102 .read_status = ide_read_status,
103 .read_altstatus = ide_read_altstatus,
104 .read_sff_dma_status = ide_read_sff_dma_status,
105
106 .set_irq = ide_set_irq,
107
108 .tf_load = ide_tf_load,
109 .tf_read = ide_tf_read,
110
111 .input_data = q40ide_input_data,
112 .output_data = q40ide_output_data,
113};
114
115static const struct ide_port_info q40ide_port_info = {
116 .tp_ops = &q40ide_tp_ops,
117 .host_flags = IDE_HFLAG_NO_DMA,
118};
119
99/* 120/*
100 * the static array is needed to have the name reported in /proc/ioports, 121 * the static array is needed to have the name reported in /proc/ioports,
101 * hwif->name unfortunately isn't available yet 122 * hwif->name unfortunately isn't available yet
@@ -111,9 +132,7 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
111static int __init q40ide_init(void) 132static int __init q40ide_init(void)
112{ 133{
113 int i; 134 int i;
114 ide_hwif_t *hwif; 135 hw_regs_t hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
115 const char *name;
116 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
117 136
118 if (!MACH_IS_Q40) 137 if (!MACH_IS_Q40)
119 return -ENODEV; 138 return -ENODEV;
@@ -121,9 +140,8 @@ static int __init q40ide_init(void)
121 printk(KERN_INFO "ide: Q40 IDE controller\n"); 140 printk(KERN_INFO "ide: Q40 IDE controller\n");
122 141
123 for (i = 0; i < Q40IDE_NUM_HWIFS; i++) { 142 for (i = 0; i < Q40IDE_NUM_HWIFS; i++) {
124 hw_regs_t hw; 143 const char *name = q40_ide_names[i];
125 144
126 name = q40_ide_names[i];
127 if (!request_region(pcide_bases[i], 8, name)) { 145 if (!request_region(pcide_bases[i], 8, name)) {
128 printk("could not reserve ports %lx-%lx for %s\n", 146 printk("could not reserve ports %lx-%lx for %s\n",
129 pcide_bases[i],pcide_bases[i]+8,name); 147 pcide_bases[i],pcide_bases[i]+8,name);
@@ -135,26 +153,13 @@ static int __init q40ide_init(void)
135 release_region(pcide_bases[i], 8); 153 release_region(pcide_bases[i], 8);
136 continue; 154 continue;
137 } 155 }
138 q40_ide_setup_ports(&hw, pcide_bases[i], 156 q40_ide_setup_ports(&hw[i], pcide_bases[i], NULL,
139 NULL,
140// m68kide_iops,
141 q40ide_default_irq(pcide_bases[i])); 157 q40ide_default_irq(pcide_bases[i]));
142 158
143 hwif = ide_find_port(); 159 hws[i] = &hw[i];
144 if (hwif) {
145 ide_init_port_hw(hwif, &hw);
146
147 /* Q40 has a byte-swapped IDE interface */
148 hwif->input_data = q40ide_input_data;
149 hwif->output_data = q40ide_output_data;
150
151 idx[i] = hwif->index;
152 }
153 } 160 }
154 161
155 ide_device_add(idx, NULL); 162 return ide_host_add(&q40ide_port_info, hws, NULL);
156
157 return 0;
158} 163}
159 164
160module_init(q40ide_init); 165module_init(q40ide_init);
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index 48d57cae63c6..11b7f61aae40 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -519,6 +519,23 @@ static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
519 *ata_regs = ahwif->regbase + (14 << IDE_REG_SHIFT); 519 *ata_regs = ahwif->regbase + (14 << IDE_REG_SHIFT);
520} 520}
521 521
522#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
523static const struct ide_tp_ops au1xxx_tp_ops = {
524 .exec_command = ide_exec_command,
525 .read_status = ide_read_status,
526 .read_altstatus = ide_read_altstatus,
527 .read_sff_dma_status = ide_read_sff_dma_status,
528
529 .set_irq = ide_set_irq,
530
531 .tf_load = ide_tf_load,
532 .tf_read = ide_tf_read,
533
534 .input_data = au1xxx_input_data,
535 .output_data = au1xxx_output_data,
536};
537#endif
538
522static const struct ide_port_ops au1xxx_port_ops = { 539static const struct ide_port_ops au1xxx_port_ops = {
523 .set_pio_mode = au1xxx_set_pio_mode, 540 .set_pio_mode = au1xxx_set_pio_mode,
524 .set_dma_mode = auide_set_dma_mode, 541 .set_dma_mode = auide_set_dma_mode,
@@ -526,6 +543,9 @@ static const struct ide_port_ops au1xxx_port_ops = {
526 543
527static const struct ide_port_info au1xxx_port_info = { 544static const struct ide_port_info au1xxx_port_info = {
528 .init_dma = auide_ddma_init, 545 .init_dma = auide_ddma_init,
546#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
547 .tp_ops = &au1xxx_tp_ops,
548#endif
529 .port_ops = &au1xxx_port_ops, 549 .port_ops = &au1xxx_port_ops,
530#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 550#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
531 .dma_ops = &au1xxx_dma_ops, 551 .dma_ops = &au1xxx_dma_ops,
@@ -543,11 +563,10 @@ static int au_ide_probe(struct device *dev)
543{ 563{
544 struct platform_device *pdev = to_platform_device(dev); 564 struct platform_device *pdev = to_platform_device(dev);
545 _auide_hwif *ahwif = &auide_hwif; 565 _auide_hwif *ahwif = &auide_hwif;
546 ide_hwif_t *hwif;
547 struct resource *res; 566 struct resource *res;
567 struct ide_host *host;
548 int ret = 0; 568 int ret = 0;
549 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 569 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
550 hw_regs_t hw;
551 570
552#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) 571#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
553 char *mode = "MWDMA2"; 572 char *mode = "MWDMA2";
@@ -584,36 +603,19 @@ static int au_ide_probe(struct device *dev)
584 goto out; 603 goto out;
585 } 604 }
586 605
587 hwif = ide_find_port();
588 if (hwif == NULL) {
589 ret = -ENOENT;
590 goto out;
591 }
592
593 memset(&hw, 0, sizeof(hw)); 606 memset(&hw, 0, sizeof(hw));
594 auide_setup_ports(&hw, ahwif); 607 auide_setup_ports(&hw, ahwif);
595 hw.irq = ahwif->irq; 608 hw.irq = ahwif->irq;
596 hw.dev = dev; 609 hw.dev = dev;
597 hw.chipset = ide_au1xxx; 610 hw.chipset = ide_au1xxx;
598 611
599 ide_init_port_hw(hwif, &hw); 612 ret = ide_host_add(&au1xxx_port_info, hws, &host);
600 613 if (ret)
601 /* If the user has selected DDMA assisted copies, 614 goto out;
602 then set up a few local I/O function entry points
603 */
604
605#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
606 hwif->input_data = au1xxx_input_data;
607 hwif->output_data = au1xxx_output_data;
608#endif
609
610 auide_hwif.hwif = hwif;
611
612 idx[0] = hwif->index;
613 615
614 ide_device_add(idx, &au1xxx_port_info); 616 auide_hwif.hwif = host->ports[0];
615 617
616 dev_set_drvdata(dev, hwif); 618 dev_set_drvdata(dev, host);
617 619
618 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode ); 620 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
619 621
@@ -625,10 +627,10 @@ static int au_ide_remove(struct device *dev)
625{ 627{
626 struct platform_device *pdev = to_platform_device(dev); 628 struct platform_device *pdev = to_platform_device(dev);
627 struct resource *res; 629 struct resource *res;
628 ide_hwif_t *hwif = dev_get_drvdata(dev); 630 struct ide_host *host = dev_get_drvdata(dev);
629 _auide_hwif *ahwif = &auide_hwif; 631 _auide_hwif *ahwif = &auide_hwif;
630 632
631 ide_unregister(hwif); 633 ide_host_remove(host);
632 634
633 iounmap((void *)ahwif->regbase); 635 iounmap((void *)ahwif->regbase);
634 636
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 9f1212cc4aed..badf79fc9e3a 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -72,12 +72,11 @@ static const struct ide_port_info swarm_port_info = {
72 */ 72 */
73static int __devinit swarm_ide_probe(struct device *dev) 73static int __devinit swarm_ide_probe(struct device *dev)
74{ 74{
75 ide_hwif_t *hwif;
76 u8 __iomem *base; 75 u8 __iomem *base;
76 struct ide_host *host;
77 phys_t offset, size; 77 phys_t offset, size;
78 hw_regs_t hw; 78 int i, rc;
79 int i; 79 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
80 u8 idx[] = { 0xff, 0xff, 0xff, 0xff };
81 80
82 if (!SIBYTE_HAVE_IDE) 81 if (!SIBYTE_HAVE_IDE)
83 return -ENODEV; 82 return -ENODEV;
@@ -116,26 +115,17 @@ static int __devinit swarm_ide_probe(struct device *dev)
116 hw.irq = K_INT_GB_IDE; 115 hw.irq = K_INT_GB_IDE;
117 hw.chipset = ide_generic; 116 hw.chipset = ide_generic;
118 117
119 hwif = ide_find_port_slot(&swarm_port_info); 118 rc = ide_host_add(&swarm_port_info, hws, &host);
120 if (hwif == NULL) 119 if (rc)
121 goto err; 120 goto err;
122 121
123 ide_init_port_hw(hwif, &hw); 122 dev_set_drvdata(dev, host);
124
125 /* Setup MMIO ops. */
126 default_hwif_mmiops(hwif);
127
128 idx[0] = hwif->index;
129
130 ide_device_add(idx, &swarm_port_info);
131
132 dev_set_drvdata(dev, hwif);
133 123
134 return 0; 124 return 0;
135err: 125err:
136 release_resource(&swarm_ide_resource); 126 release_resource(&swarm_ide_resource);
137 iounmap(base); 127 iounmap(base);
138 return -ENOMEM; 128 return rc;
139} 129}
140 130
141static struct device_driver swarm_ide_driver = { 131static struct device_driver swarm_ide_driver = {
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index ae7a4329a581..e0c8fe7d9fea 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -13,6 +13,8 @@
13 13
14#include <asm/io.h> 14#include <asm/io.h>
15 15
16#define DRV_NAME "aec62xx"
17
16struct chipset_bus_clock_list_entry { 18struct chipset_bus_clock_list_entry {
17 u8 xfer_speed; 19 u8 xfer_speed;
18 u8 chipset_settings; 20 u8 chipset_settings;
@@ -59,10 +61,6 @@ static const struct chipset_bus_clock_list_entry aec6xxx_34_base [] = {
59 { 0, 0x00, 0x00 } 61 { 0, 0x00, 0x00 }
60}; 62};
61 63
62#define BUSCLOCK(D) \
63 ((struct chipset_bus_clock_list_entry *) pci_get_drvdata((D)))
64
65
66/* 64/*
67 * TO DO: active tuning and correction of cards without a bios. 65 * TO DO: active tuning and correction of cards without a bios.
68 */ 66 */
@@ -88,6 +86,8 @@ static void aec6210_set_mode(ide_drive_t *drive, const u8 speed)
88{ 86{
89 ide_hwif_t *hwif = HWIF(drive); 87 ide_hwif_t *hwif = HWIF(drive);
90 struct pci_dev *dev = to_pci_dev(hwif->dev); 88 struct pci_dev *dev = to_pci_dev(hwif->dev);
89 struct ide_host *host = pci_get_drvdata(dev);
90 struct chipset_bus_clock_list_entry *bus_clock = host->host_priv;
91 u16 d_conf = 0; 91 u16 d_conf = 0;
92 u8 ultra = 0, ultra_conf = 0; 92 u8 ultra = 0, ultra_conf = 0;
93 u8 tmp0 = 0, tmp1 = 0, tmp2 = 0; 93 u8 tmp0 = 0, tmp1 = 0, tmp2 = 0;
@@ -96,7 +96,7 @@ static void aec6210_set_mode(ide_drive_t *drive, const u8 speed)
96 local_irq_save(flags); 96 local_irq_save(flags);
97 /* 0x40|(2*drive->dn): Active, 0x41|(2*drive->dn): Recovery */ 97 /* 0x40|(2*drive->dn): Active, 0x41|(2*drive->dn): Recovery */
98 pci_read_config_word(dev, 0x40|(2*drive->dn), &d_conf); 98 pci_read_config_word(dev, 0x40|(2*drive->dn), &d_conf);
99 tmp0 = pci_bus_clock_list(speed, BUSCLOCK(dev)); 99 tmp0 = pci_bus_clock_list(speed, bus_clock);
100 d_conf = ((tmp0 & 0xf0) << 4) | (tmp0 & 0xf); 100 d_conf = ((tmp0 & 0xf0) << 4) | (tmp0 & 0xf);
101 pci_write_config_word(dev, 0x40|(2*drive->dn), d_conf); 101 pci_write_config_word(dev, 0x40|(2*drive->dn), d_conf);
102 102
@@ -104,7 +104,7 @@ static void aec6210_set_mode(ide_drive_t *drive, const u8 speed)
104 tmp2 = 0x00; 104 tmp2 = 0x00;
105 pci_read_config_byte(dev, 0x54, &ultra); 105 pci_read_config_byte(dev, 0x54, &ultra);
106 tmp1 = ((0x00 << (2*drive->dn)) | (ultra & ~(3 << (2*drive->dn)))); 106 tmp1 = ((0x00 << (2*drive->dn)) | (ultra & ~(3 << (2*drive->dn))));
107 ultra_conf = pci_bus_clock_list_ultra(speed, BUSCLOCK(dev)); 107 ultra_conf = pci_bus_clock_list_ultra(speed, bus_clock);
108 tmp2 = ((ultra_conf << (2*drive->dn)) | (tmp1 & ~(3 << (2*drive->dn)))); 108 tmp2 = ((ultra_conf << (2*drive->dn)) | (tmp1 & ~(3 << (2*drive->dn))));
109 pci_write_config_byte(dev, 0x54, tmp2); 109 pci_write_config_byte(dev, 0x54, tmp2);
110 local_irq_restore(flags); 110 local_irq_restore(flags);
@@ -114,6 +114,8 @@ static void aec6260_set_mode(ide_drive_t *drive, const u8 speed)
114{ 114{
115 ide_hwif_t *hwif = HWIF(drive); 115 ide_hwif_t *hwif = HWIF(drive);
116 struct pci_dev *dev = to_pci_dev(hwif->dev); 116 struct pci_dev *dev = to_pci_dev(hwif->dev);
117 struct ide_host *host = pci_get_drvdata(dev);
118 struct chipset_bus_clock_list_entry *bus_clock = host->host_priv;
117 u8 unit = (drive->select.b.unit & 0x01); 119 u8 unit = (drive->select.b.unit & 0x01);
118 u8 tmp1 = 0, tmp2 = 0; 120 u8 tmp1 = 0, tmp2 = 0;
119 u8 ultra = 0, drive_conf = 0, ultra_conf = 0; 121 u8 ultra = 0, drive_conf = 0, ultra_conf = 0;
@@ -122,12 +124,12 @@ static void aec6260_set_mode(ide_drive_t *drive, const u8 speed)
122 local_irq_save(flags); 124 local_irq_save(flags);
123 /* high 4-bits: Active, low 4-bits: Recovery */ 125 /* high 4-bits: Active, low 4-bits: Recovery */
124 pci_read_config_byte(dev, 0x40|drive->dn, &drive_conf); 126 pci_read_config_byte(dev, 0x40|drive->dn, &drive_conf);
125 drive_conf = pci_bus_clock_list(speed, BUSCLOCK(dev)); 127 drive_conf = pci_bus_clock_list(speed, bus_clock);
126 pci_write_config_byte(dev, 0x40|drive->dn, drive_conf); 128 pci_write_config_byte(dev, 0x40|drive->dn, drive_conf);
127 129
128 pci_read_config_byte(dev, (0x44|hwif->channel), &ultra); 130 pci_read_config_byte(dev, (0x44|hwif->channel), &ultra);
129 tmp1 = ((0x00 << (4*unit)) | (ultra & ~(7 << (4*unit)))); 131 tmp1 = ((0x00 << (4*unit)) | (ultra & ~(7 << (4*unit))));
130 ultra_conf = pci_bus_clock_list_ultra(speed, BUSCLOCK(dev)); 132 ultra_conf = pci_bus_clock_list_ultra(speed, bus_clock);
131 tmp2 = ((ultra_conf << (4*unit)) | (tmp1 & ~(7 << (4*unit)))); 133 tmp2 = ((ultra_conf << (4*unit)) | (tmp1 & ~(7 << (4*unit))));
132 pci_write_config_byte(dev, (0x44|hwif->channel), tmp2); 134 pci_write_config_byte(dev, (0x44|hwif->channel), tmp2);
133 local_irq_restore(flags); 135 local_irq_restore(flags);
@@ -138,15 +140,8 @@ static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio)
138 drive->hwif->port_ops->set_dma_mode(drive, pio + XFER_PIO_0); 140 drive->hwif->port_ops->set_dma_mode(drive, pio + XFER_PIO_0);
139} 141}
140 142
141static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const char *name) 143static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev)
142{ 144{
143 int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
144
145 if (bus_speed <= 33)
146 pci_set_drvdata(dev, (void *) aec6xxx_33_base);
147 else
148 pci_set_drvdata(dev, (void *) aec6xxx_34_base);
149
150 /* These are necessary to get AEC6280 Macintosh cards to work */ 145 /* These are necessary to get AEC6280 Macintosh cards to work */
151 if ((dev->device == PCI_DEVICE_ID_ARTOP_ATP865) || 146 if ((dev->device == PCI_DEVICE_ID_ARTOP_ATP865) ||
152 (dev->device == PCI_DEVICE_ID_ARTOP_ATP865R)) { 147 (dev->device == PCI_DEVICE_ID_ARTOP_ATP865R)) {
@@ -187,57 +182,56 @@ static const struct ide_port_ops atp86x_port_ops = {
187}; 182};
188 183
189static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { 184static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
190 { /* 0 */ 185 { /* 0: AEC6210 */
191 .name = "AEC6210", 186 .name = DRV_NAME,
192 .init_chipset = init_chipset_aec62xx, 187 .init_chipset = init_chipset_aec62xx,
193 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 188 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
194 .port_ops = &atp850_port_ops, 189 .port_ops = &atp850_port_ops,
195 .host_flags = IDE_HFLAG_SERIALIZE | 190 .host_flags = IDE_HFLAG_SERIALIZE |
196 IDE_HFLAG_NO_ATAPI_DMA | 191 IDE_HFLAG_NO_ATAPI_DMA |
197 IDE_HFLAG_NO_DSC | 192 IDE_HFLAG_NO_DSC |
198 IDE_HFLAG_ABUSE_SET_DMA_MODE |
199 IDE_HFLAG_OFF_BOARD, 193 IDE_HFLAG_OFF_BOARD,
200 .pio_mask = ATA_PIO4, 194 .pio_mask = ATA_PIO4,
201 .mwdma_mask = ATA_MWDMA2, 195 .mwdma_mask = ATA_MWDMA2,
202 .udma_mask = ATA_UDMA2, 196 .udma_mask = ATA_UDMA2,
203 },{ /* 1 */ 197 },
204 .name = "AEC6260", 198 { /* 1: AEC6260 */
199 .name = DRV_NAME,
205 .init_chipset = init_chipset_aec62xx, 200 .init_chipset = init_chipset_aec62xx,
206 .port_ops = &atp86x_port_ops, 201 .port_ops = &atp86x_port_ops,
207 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA | 202 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA |
208 IDE_HFLAG_ABUSE_SET_DMA_MODE |
209 IDE_HFLAG_OFF_BOARD, 203 IDE_HFLAG_OFF_BOARD,
210 .pio_mask = ATA_PIO4, 204 .pio_mask = ATA_PIO4,
211 .mwdma_mask = ATA_MWDMA2, 205 .mwdma_mask = ATA_MWDMA2,
212 .udma_mask = ATA_UDMA4, 206 .udma_mask = ATA_UDMA4,
213 },{ /* 2 */ 207 },
214 .name = "AEC6260R", 208 { /* 2: AEC6260R */
209 .name = DRV_NAME,
215 .init_chipset = init_chipset_aec62xx, 210 .init_chipset = init_chipset_aec62xx,
216 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 211 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
217 .port_ops = &atp86x_port_ops, 212 .port_ops = &atp86x_port_ops,
218 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 213 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
219 IDE_HFLAG_ABUSE_SET_DMA_MODE |
220 IDE_HFLAG_NON_BOOTABLE, 214 IDE_HFLAG_NON_BOOTABLE,
221 .pio_mask = ATA_PIO4, 215 .pio_mask = ATA_PIO4,
222 .mwdma_mask = ATA_MWDMA2, 216 .mwdma_mask = ATA_MWDMA2,
223 .udma_mask = ATA_UDMA4, 217 .udma_mask = ATA_UDMA4,
224 },{ /* 3 */ 218 },
225 .name = "AEC6280", 219 { /* 3: AEC6280 */
220 .name = DRV_NAME,
226 .init_chipset = init_chipset_aec62xx, 221 .init_chipset = init_chipset_aec62xx,
227 .port_ops = &atp86x_port_ops, 222 .port_ops = &atp86x_port_ops,
228 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 223 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
229 IDE_HFLAG_ABUSE_SET_DMA_MODE |
230 IDE_HFLAG_OFF_BOARD, 224 IDE_HFLAG_OFF_BOARD,
231 .pio_mask = ATA_PIO4, 225 .pio_mask = ATA_PIO4,
232 .mwdma_mask = ATA_MWDMA2, 226 .mwdma_mask = ATA_MWDMA2,
233 .udma_mask = ATA_UDMA5, 227 .udma_mask = ATA_UDMA5,
234 },{ /* 4 */ 228 },
235 .name = "AEC6280R", 229 { /* 4: AEC6280R */
230 .name = DRV_NAME,
236 .init_chipset = init_chipset_aec62xx, 231 .init_chipset = init_chipset_aec62xx,
237 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 232 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
238 .port_ops = &atp86x_port_ops, 233 .port_ops = &atp86x_port_ops,
239 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 234 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
240 IDE_HFLAG_ABUSE_SET_DMA_MODE |
241 IDE_HFLAG_OFF_BOARD, 235 IDE_HFLAG_OFF_BOARD,
242 .pio_mask = ATA_PIO4, 236 .pio_mask = ATA_PIO4,
243 .mwdma_mask = ATA_MWDMA2, 237 .mwdma_mask = ATA_MWDMA2,
@@ -259,10 +253,17 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
259 253
260static int __devinit aec62xx_init_one(struct pci_dev *dev, const struct pci_device_id *id) 254static int __devinit aec62xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
261{ 255{
256 const struct chipset_bus_clock_list_entry *bus_clock;
262 struct ide_port_info d; 257 struct ide_port_info d;
263 u8 idx = id->driver_data; 258 u8 idx = id->driver_data;
259 int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
264 int err; 260 int err;
265 261
262 if (bus_speed <= 33)
263 bus_clock = aec6xxx_33_base;
264 else
265 bus_clock = aec6xxx_34_base;
266
266 err = pci_enable_device(dev); 267 err = pci_enable_device(dev);
267 if (err) 268 if (err)
268 return err; 269 return err;
@@ -273,18 +274,25 @@ static int __devinit aec62xx_init_one(struct pci_dev *dev, const struct pci_devi
273 unsigned long dma_base = pci_resource_start(dev, 4); 274 unsigned long dma_base = pci_resource_start(dev, 4);
274 275
275 if (inb(dma_base + 2) & 0x10) { 276 if (inb(dma_base + 2) & 0x10) {
276 d.name = (idx == 4) ? "AEC6880R" : "AEC6880"; 277 printk(KERN_INFO DRV_NAME " %s: AEC6880%s card detected"
278 "\n", pci_name(dev), (idx == 4) ? "R" : "");
277 d.udma_mask = ATA_UDMA6; 279 d.udma_mask = ATA_UDMA6;
278 } 280 }
279 } 281 }
280 282
281 err = ide_setup_pci_device(dev, &d); 283 err = ide_pci_init_one(dev, &d, (void *)bus_clock);
282 if (err) 284 if (err)
283 pci_disable_device(dev); 285 pci_disable_device(dev);
284 286
285 return err; 287 return err;
286} 288}
287 289
290static void __devexit aec62xx_remove(struct pci_dev *dev)
291{
292 ide_pci_remove(dev);
293 pci_disable_device(dev);
294}
295
288static const struct pci_device_id aec62xx_pci_tbl[] = { 296static const struct pci_device_id aec62xx_pci_tbl[] = {
289 { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP850UF), 0 }, 297 { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP850UF), 0 },
290 { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP860), 1 }, 298 { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP860), 1 },
@@ -299,6 +307,7 @@ static struct pci_driver driver = {
299 .name = "AEC62xx_IDE", 307 .name = "AEC62xx_IDE",
300 .id_table = aec62xx_pci_tbl, 308 .id_table = aec62xx_pci_tbl,
301 .probe = aec62xx_init_one, 309 .probe = aec62xx_init_one,
310 .remove = aec62xx_remove,
302}; 311};
303 312
304static int __init aec62xx_ide_init(void) 313static int __init aec62xx_ide_init(void)
@@ -306,7 +315,13 @@ static int __init aec62xx_ide_init(void)
306 return ide_pci_register_driver(&driver); 315 return ide_pci_register_driver(&driver);
307} 316}
308 317
318static void __exit aec62xx_ide_exit(void)
319{
320 pci_unregister_driver(&driver);
321}
322
309module_init(aec62xx_ide_init); 323module_init(aec62xx_ide_init);
324module_exit(aec62xx_ide_exit);
310 325
311MODULE_AUTHOR("Andre Hedrick"); 326MODULE_AUTHOR("Andre Hedrick");
312MODULE_DESCRIPTION("PCI driver module for ARTOP AEC62xx IDE"); 327MODULE_DESCRIPTION("PCI driver module for ARTOP AEC62xx IDE");
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index 80d19c0eb780..b582687e0cd4 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -38,6 +38,8 @@
38 38
39#include <asm/io.h> 39#include <asm/io.h>
40 40
41#define DRV_NAME "alim15x3"
42
41/* 43/*
42 * Allow UDMA on M1543C-E chipset for WDC disks that ignore CRC checking 44 * Allow UDMA on M1543C-E chipset for WDC disks that ignore CRC checking
43 * (this is DANGEROUS and could result in data corruption). 45 * (this is DANGEROUS and could result in data corruption).
@@ -207,13 +209,12 @@ static int ali15x3_dma_setup(ide_drive_t *drive)
207/** 209/**
208 * init_chipset_ali15x3 - Initialise an ALi IDE controller 210 * init_chipset_ali15x3 - Initialise an ALi IDE controller
209 * @dev: PCI device 211 * @dev: PCI device
210 * @name: Name of the controller
211 * 212 *
212 * This function initializes the ALI IDE controller and where 213 * This function initializes the ALI IDE controller and where
213 * appropriate also sets up the 1533 southbridge. 214 * appropriate also sets up the 1533 southbridge.
214 */ 215 */
215 216
216static unsigned int __devinit init_chipset_ali15x3 (struct pci_dev *dev, const char *name) 217static unsigned int __devinit init_chipset_ali15x3(struct pci_dev *dev)
217{ 218{
218 unsigned long flags; 219 unsigned long flags;
219 u8 tmpbyte; 220 u8 tmpbyte;
@@ -471,7 +472,15 @@ static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
471 struct pci_dev *dev = to_pci_dev(hwif->dev); 472 struct pci_dev *dev = to_pci_dev(hwif->dev);
472 unsigned long base = ide_pci_dma_base(hwif, d); 473 unsigned long base = ide_pci_dma_base(hwif, d);
473 474
474 if (base == 0 || ide_pci_set_master(dev, d->name) < 0) 475 if (base == 0)
476 return -1;
477
478 hwif->dma_base = base;
479
480 if (ide_pci_check_simplex(hwif, d) < 0)
481 return -1;
482
483 if (ide_pci_set_master(dev, d->name) < 0)
475 return -1; 484 return -1;
476 485
477 if (!hwif->channel) 486 if (!hwif->channel)
@@ -483,7 +492,7 @@ static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
483 if (ide_allocate_dma_engine(hwif)) 492 if (ide_allocate_dma_engine(hwif))
484 return -1; 493 return -1;
485 494
486 ide_setup_dma(hwif, base); 495 hwif->dma_ops = &sff_dma_ops;
487 496
488 return 0; 497 return 0;
489} 498}
@@ -507,7 +516,7 @@ static const struct ide_dma_ops ali_dma_ops = {
507}; 516};
508 517
509static const struct ide_port_info ali15x3_chipset __devinitdata = { 518static const struct ide_port_info ali15x3_chipset __devinitdata = {
510 .name = "ALI15X3", 519 .name = DRV_NAME,
511 .init_chipset = init_chipset_ali15x3, 520 .init_chipset = init_chipset_ali15x3,
512 .init_hwif = init_hwif_ali15x3, 521 .init_hwif = init_hwif_ali15x3,
513 .init_dma = init_dma_ali15x3, 522 .init_dma = init_dma_ali15x3,
@@ -557,7 +566,7 @@ static int __devinit alim15x3_init_one(struct pci_dev *dev, const struct pci_dev
557 if (idx == 0) 566 if (idx == 0)
558 d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX; 567 d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX;
559 568
560 return ide_setup_pci_device(dev, &d); 569 return ide_pci_init_one(dev, &d, NULL);
561} 570}
562 571
563 572
@@ -572,6 +581,7 @@ static struct pci_driver driver = {
572 .name = "ALI15x3_IDE", 581 .name = "ALI15x3_IDE",
573 .id_table = alim15x3_pci_tbl, 582 .id_table = alim15x3_pci_tbl,
574 .probe = alim15x3_init_one, 583 .probe = alim15x3_init_one,
584 .remove = ide_pci_remove,
575}; 585};
576 586
577static int __init ali15x3_ide_init(void) 587static int __init ali15x3_ide_init(void)
@@ -579,7 +589,13 @@ static int __init ali15x3_ide_init(void)
579 return ide_pci_register_driver(&driver); 589 return ide_pci_register_driver(&driver);
580} 590}
581 591
592static void __exit ali15x3_ide_exit(void)
593{
594 return pci_unregister_driver(&driver);
595}
596
582module_init(ali15x3_ide_init); 597module_init(ali15x3_ide_init);
598module_exit(ali15x3_ide_exit);
583 599
584MODULE_AUTHOR("Michael Aubry, Andrzej Krzysztofowicz, CJ, Andre Hedrick, Alan Cox"); 600MODULE_AUTHOR("Michael Aubry, Andrzej Krzysztofowicz, CJ, Andre Hedrick, Alan Cox");
585MODULE_DESCRIPTION("PCI driver module for ALi 15x3 IDE"); 601MODULE_DESCRIPTION("PCI driver module for ALi 15x3 IDE");
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 0bfcdd0e77b3..2cea7bf51a0f 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -21,6 +21,8 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/ide.h> 22#include <linux/ide.h>
23 23
24#define DRV_NAME "amd74xx"
25
24enum { 26enum {
25 AMD_IDE_CONFIG = 0x41, 27 AMD_IDE_CONFIG = 0x41,
26 AMD_CABLE_DETECT = 0x42, 28 AMD_CABLE_DETECT = 0x42,
@@ -110,15 +112,13 @@ static void amd_set_pio_mode(ide_drive_t *drive, const u8 pio)
110 amd_set_drive(drive, XFER_PIO_0 + pio); 112 amd_set_drive(drive, XFER_PIO_0 + pio);
111} 113}
112 114
113static void __devinit amd7409_cable_detect(struct pci_dev *dev, 115static void __devinit amd7409_cable_detect(struct pci_dev *dev)
114 const char *name)
115{ 116{
116 /* no host side cable detection */ 117 /* no host side cable detection */
117 amd_80w = 0x03; 118 amd_80w = 0x03;
118} 119}
119 120
120static void __devinit amd7411_cable_detect(struct pci_dev *dev, 121static void __devinit amd7411_cable_detect(struct pci_dev *dev)
121 const char *name)
122{ 122{
123 int i; 123 int i;
124 u32 u = 0; 124 u32 u = 0;
@@ -129,9 +129,9 @@ static void __devinit amd7411_cable_detect(struct pci_dev *dev,
129 amd_80w = ((t & 0x3) ? 1 : 0) | ((t & 0xc) ? 2 : 0); 129 amd_80w = ((t & 0x3) ? 1 : 0) | ((t & 0xc) ? 2 : 0);
130 for (i = 24; i >= 0; i -= 8) 130 for (i = 24; i >= 0; i -= 8)
131 if (((u >> i) & 4) && !(amd_80w & (1 << (1 - (i >> 4))))) { 131 if (((u >> i) & 4) && !(amd_80w & (1 << (1 - (i >> 4))))) {
132 printk(KERN_WARNING "%s: BIOS didn't set cable bits " 132 printk(KERN_WARNING DRV_NAME " %s: BIOS didn't set "
133 "correctly. Enabling workaround.\n", 133 "cable bits correctly. Enabling workaround.\n",
134 name); 134 pci_name(dev));
135 amd_80w |= (1 << (1 - (i >> 4))); 135 amd_80w |= (1 << (1 - (i >> 4)));
136 } 136 }
137} 137}
@@ -140,8 +140,7 @@ static void __devinit amd7411_cable_detect(struct pci_dev *dev,
140 * The initialization callback. Initialize drive independent registers. 140 * The initialization callback. Initialize drive independent registers.
141 */ 141 */
142 142
143static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, 143static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev)
144 const char *name)
145{ 144{
146 u8 t = 0, offset = amd_offset(dev); 145 u8 t = 0, offset = amd_offset(dev);
147 146
@@ -154,9 +153,9 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev,
154 ; /* no UDMA > 2 */ 153 ; /* no UDMA > 2 */
155 else if (dev->vendor == PCI_VENDOR_ID_AMD && 154 else if (dev->vendor == PCI_VENDOR_ID_AMD &&
156 dev->device == PCI_DEVICE_ID_AMD_VIPER_7409) 155 dev->device == PCI_DEVICE_ID_AMD_VIPER_7409)
157 amd7409_cable_detect(dev, name); 156 amd7409_cable_detect(dev);
158 else 157 else
159 amd7411_cable_detect(dev, name); 158 amd7411_cable_detect(dev);
160 159
161/* 160/*
162 * Take care of prefetch & postwrite. 161 * Take care of prefetch & postwrite.
@@ -173,24 +172,6 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev,
173 t |= 0xf0; 172 t |= 0xf0;
174 pci_write_config_byte(dev, AMD_IDE_CONFIG + offset, t); 173 pci_write_config_byte(dev, AMD_IDE_CONFIG + offset, t);
175 174
176/*
177 * Determine the system bus clock.
178 */
179
180 amd_clock = (ide_pci_clk ? ide_pci_clk : 33) * 1000;
181
182 switch (amd_clock) {
183 case 33000: amd_clock = 33333; break;
184 case 37000: amd_clock = 37500; break;
185 case 41000: amd_clock = 41666; break;
186 }
187
188 if (amd_clock < 20000 || amd_clock > 50000) {
189 printk(KERN_WARNING "%s: User given PCI clock speed impossible (%d), using 33 MHz instead.\n",
190 name, amd_clock);
191 amd_clock = 33333;
192 }
193
194 return dev->irq; 175 return dev->irq;
195} 176}
196 177
@@ -218,14 +199,13 @@ static const struct ide_port_ops amd_port_ops = {
218 199
219#define IDE_HFLAGS_AMD \ 200#define IDE_HFLAGS_AMD \
220 (IDE_HFLAG_PIO_NO_BLACKLIST | \ 201 (IDE_HFLAG_PIO_NO_BLACKLIST | \
221 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
222 IDE_HFLAG_POST_SET_MODE | \ 202 IDE_HFLAG_POST_SET_MODE | \
223 IDE_HFLAG_IO_32BIT | \ 203 IDE_HFLAG_IO_32BIT | \
224 IDE_HFLAG_UNMASK_IRQS) 204 IDE_HFLAG_UNMASK_IRQS)
225 205
226#define DECLARE_AMD_DEV(name_str, swdma, udma) \ 206#define DECLARE_AMD_DEV(swdma, udma) \
227 { \ 207 { \
228 .name = name_str, \ 208 .name = DRV_NAME, \
229 .init_chipset = init_chipset_amd74xx, \ 209 .init_chipset = init_chipset_amd74xx, \
230 .init_hwif = init_hwif_amd74xx, \ 210 .init_hwif = init_hwif_amd74xx, \
231 .enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, \ 211 .enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, \
@@ -237,9 +217,9 @@ static const struct ide_port_ops amd_port_ops = {
237 .udma_mask = udma, \ 217 .udma_mask = udma, \
238 } 218 }
239 219
240#define DECLARE_NV_DEV(name_str, udma) \ 220#define DECLARE_NV_DEV(udma) \
241 { \ 221 { \
242 .name = name_str, \ 222 .name = DRV_NAME, \
243 .init_chipset = init_chipset_amd74xx, \ 223 .init_chipset = init_chipset_amd74xx, \
244 .init_hwif = init_hwif_amd74xx, \ 224 .init_hwif = init_hwif_amd74xx, \
245 .enablebits = {{0x50,0x02,0x02}, {0x50,0x01,0x01}}, \ 225 .enablebits = {{0x50,0x02,0x02}, {0x50,0x01,0x01}}, \
@@ -252,31 +232,15 @@ static const struct ide_port_ops amd_port_ops = {
252 } 232 }
253 233
254static const struct ide_port_info amd74xx_chipsets[] __devinitdata = { 234static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
255 /* 0 */ DECLARE_AMD_DEV("AMD7401", 0x00, ATA_UDMA2), 235 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
256 /* 1 */ DECLARE_AMD_DEV("AMD7409", ATA_SWDMA2, ATA_UDMA4), 236 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
257 /* 2 */ DECLARE_AMD_DEV("AMD7411", ATA_SWDMA2, ATA_UDMA5), 237 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
258 /* 3 */ DECLARE_AMD_DEV("AMD7441", ATA_SWDMA2, ATA_UDMA5), 238 /* 3: AMD8111 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA6),
259 /* 4 */ DECLARE_AMD_DEV("AMD8111", ATA_SWDMA2, ATA_UDMA6), 239
260 240 /* 4: NFORCE */ DECLARE_NV_DEV(ATA_UDMA5),
261 /* 5 */ DECLARE_NV_DEV("NFORCE", ATA_UDMA5), 241 /* 5: >= NFORCE2 */ DECLARE_NV_DEV(ATA_UDMA6),
262 /* 6 */ DECLARE_NV_DEV("NFORCE2", ATA_UDMA6), 242
263 /* 7 */ DECLARE_NV_DEV("NFORCE2-U400R", ATA_UDMA6), 243 /* 6: AMD5536 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
264 /* 8 */ DECLARE_NV_DEV("NFORCE2-U400R-SATA", ATA_UDMA6),
265 /* 9 */ DECLARE_NV_DEV("NFORCE3-150", ATA_UDMA6),
266 /* 10 */ DECLARE_NV_DEV("NFORCE3-250", ATA_UDMA6),
267 /* 11 */ DECLARE_NV_DEV("NFORCE3-250-SATA", ATA_UDMA6),
268 /* 12 */ DECLARE_NV_DEV("NFORCE3-250-SATA2", ATA_UDMA6),
269 /* 13 */ DECLARE_NV_DEV("NFORCE-CK804", ATA_UDMA6),
270 /* 14 */ DECLARE_NV_DEV("NFORCE-MCP04", ATA_UDMA6),
271 /* 15 */ DECLARE_NV_DEV("NFORCE-MCP51", ATA_UDMA6),
272 /* 16 */ DECLARE_NV_DEV("NFORCE-MCP55", ATA_UDMA6),
273 /* 17 */ DECLARE_NV_DEV("NFORCE-MCP61", ATA_UDMA6),
274 /* 18 */ DECLARE_NV_DEV("NFORCE-MCP65", ATA_UDMA6),
275 /* 19 */ DECLARE_NV_DEV("NFORCE-MCP67", ATA_UDMA6),
276 /* 20 */ DECLARE_NV_DEV("NFORCE-MCP73", ATA_UDMA6),
277 /* 21 */ DECLARE_NV_DEV("NFORCE-MCP77", ATA_UDMA6),
278
279 /* 22 */ DECLARE_AMD_DEV("AMD5536", ATA_SWDMA2, ATA_UDMA5),
280}; 244};
281 245
282static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id) 246static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -293,47 +257,64 @@ static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_
293 if (dev->revision <= 7) 257 if (dev->revision <= 7)
294 d.swdma_mask = 0; 258 d.swdma_mask = 0;
295 d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX; 259 d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX;
296 } else if (idx == 4) { 260 } else if (idx == 3) {
297 if (dev->subsystem_vendor == PCI_VENDOR_ID_AMD && 261 if (dev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
298 dev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE) 262 dev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
299 d.udma_mask = ATA_UDMA5; 263 d.udma_mask = ATA_UDMA5;
300 } 264 }
301 265
302 printk(KERN_INFO "%s: %s (rev %02x) UDMA%s controller\n", 266 printk(KERN_INFO "%s %s: UDMA%s controller\n",
303 d.name, pci_name(dev), dev->revision, 267 d.name, pci_name(dev), amd_dma[fls(d.udma_mask) - 1]);
304 amd_dma[fls(d.udma_mask) - 1]); 268
269 /*
270 * Determine the system bus clock.
271 */
272 amd_clock = (ide_pci_clk ? ide_pci_clk : 33) * 1000;
273
274 switch (amd_clock) {
275 case 33000: amd_clock = 33333; break;
276 case 37000: amd_clock = 37500; break;
277 case 41000: amd_clock = 41666; break;
278 }
279
280 if (amd_clock < 20000 || amd_clock > 50000) {
281 printk(KERN_WARNING "%s: User given PCI clock speed impossible"
282 " (%d), using 33 MHz instead.\n",
283 d.name, amd_clock);
284 amd_clock = 33333;
285 }
305 286
306 return ide_setup_pci_device(dev, &d); 287 return ide_pci_init_one(dev, &d, NULL);
307} 288}
308 289
309static const struct pci_device_id amd74xx_pci_tbl[] = { 290static const struct pci_device_id amd74xx_pci_tbl[] = {
310 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 }, 291 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
311 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 }, 292 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
312 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 2 }, 293 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 2 },
313 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 3 }, 294 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 2 },
314 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 4 }, 295 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 3 },
315 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 5 }, 296 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 4 },
316 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 6 }, 297 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 5 },
317 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 7 }, 298 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 5 },
318#ifdef CONFIG_BLK_DEV_IDE_SATA 299#ifdef CONFIG_BLK_DEV_IDE_SATA
319 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), 8 }, 300 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), 5 },
320#endif 301#endif
321 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 9 }, 302 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 5 },
322 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 10 }, 303 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 5 },
323#ifdef CONFIG_BLK_DEV_IDE_SATA 304#ifdef CONFIG_BLK_DEV_IDE_SATA
324 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), 11 }, 305 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), 5 },
325 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), 12 }, 306 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), 5 },
326#endif 307#endif
327 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 13 }, 308 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 5 },
328 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 14 }, 309 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 5 },
329 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 15 }, 310 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 5 },
330 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 16 }, 311 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 5 },
331 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 17 }, 312 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 5 },
332 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 18 }, 313 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 5 },
333 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 19 }, 314 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 5 },
334 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 20 }, 315 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 5 },
335 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 21 }, 316 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 5 },
336 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 22 }, 317 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 6 },
337 { 0, }, 318 { 0, },
338}; 319};
339MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl); 320MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl);
@@ -342,6 +323,7 @@ static struct pci_driver driver = {
342 .name = "AMD_IDE", 323 .name = "AMD_IDE",
343 .id_table = amd74xx_pci_tbl, 324 .id_table = amd74xx_pci_tbl,
344 .probe = amd74xx_probe, 325 .probe = amd74xx_probe,
326 .remove = ide_pci_remove,
345}; 327};
346 328
347static int __init amd74xx_ide_init(void) 329static int __init amd74xx_ide_init(void)
@@ -349,7 +331,13 @@ static int __init amd74xx_ide_init(void)
349 return ide_pci_register_driver(&driver); 331 return ide_pci_register_driver(&driver);
350} 332}
351 333
334static void __exit amd74xx_ide_exit(void)
335{
336 pci_unregister_driver(&driver);
337}
338
352module_init(amd74xx_ide_init); 339module_init(amd74xx_ide_init);
340module_exit(amd74xx_ide_exit);
353 341
354MODULE_AUTHOR("Vojtech Pavlik"); 342MODULE_AUTHOR("Vojtech Pavlik");
355MODULE_DESCRIPTION("AMD PCI IDE driver"); 343MODULE_DESCRIPTION("AMD PCI IDE driver");
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
index 8b637181681a..332f08f43b56 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/pci/atiixp.c
@@ -11,6 +11,8 @@
11#include <linux/ide.h> 11#include <linux/ide.h>
12#include <linux/init.h> 12#include <linux/init.h>
13 13
14#define DRV_NAME "atiixp"
15
14#define ATIIXP_IDE_PIO_TIMING 0x40 16#define ATIIXP_IDE_PIO_TIMING 0x40
15#define ATIIXP_IDE_MDMA_TIMING 0x44 17#define ATIIXP_IDE_MDMA_TIMING 0x44
16#define ATIIXP_IDE_PIO_CONTROL 0x48 18#define ATIIXP_IDE_PIO_CONTROL 0x48
@@ -137,16 +139,17 @@ static const struct ide_port_ops atiixp_port_ops = {
137}; 139};
138 140
139static const struct ide_port_info atiixp_pci_info[] __devinitdata = { 141static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
140 { /* 0 */ 142 { /* 0: IXP200/300/400/700 */
141 .name = "ATIIXP", 143 .name = DRV_NAME,
142 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}}, 144 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
143 .port_ops = &atiixp_port_ops, 145 .port_ops = &atiixp_port_ops,
144 .host_flags = IDE_HFLAG_LEGACY_IRQS, 146 .host_flags = IDE_HFLAG_LEGACY_IRQS,
145 .pio_mask = ATA_PIO4, 147 .pio_mask = ATA_PIO4,
146 .mwdma_mask = ATA_MWDMA2, 148 .mwdma_mask = ATA_MWDMA2,
147 .udma_mask = ATA_UDMA5, 149 .udma_mask = ATA_UDMA5,
148 },{ /* 1 */ 150 },
149 .name = "SB600_PATA", 151 { /* 1: IXP600 */
152 .name = DRV_NAME,
150 .enablebits = {{0x48,0x01,0x00}, {0x00,0x00,0x00}}, 153 .enablebits = {{0x48,0x01,0x00}, {0x00,0x00,0x00}},
151 .port_ops = &atiixp_port_ops, 154 .port_ops = &atiixp_port_ops,
152 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_LEGACY_IRQS, 155 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_LEGACY_IRQS,
@@ -167,7 +170,7 @@ static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
167 170
168static int __devinit atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id) 171static int __devinit atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
169{ 172{
170 return ide_setup_pci_device(dev, &atiixp_pci_info[id->driver_data]); 173 return ide_pci_init_one(dev, &atiixp_pci_info[id->driver_data], NULL);
171} 174}
172 175
173static const struct pci_device_id atiixp_pci_tbl[] = { 176static const struct pci_device_id atiixp_pci_tbl[] = {
@@ -184,6 +187,7 @@ static struct pci_driver driver = {
184 .name = "ATIIXP_IDE", 187 .name = "ATIIXP_IDE",
185 .id_table = atiixp_pci_tbl, 188 .id_table = atiixp_pci_tbl,
186 .probe = atiixp_init_one, 189 .probe = atiixp_init_one,
190 .remove = ide_pci_remove,
187}; 191};
188 192
189static int __init atiixp_ide_init(void) 193static int __init atiixp_ide_init(void)
@@ -191,7 +195,13 @@ static int __init atiixp_ide_init(void)
191 return ide_pci_register_driver(&driver); 195 return ide_pci_register_driver(&driver);
192} 196}
193 197
198static void __exit atiixp_ide_exit(void)
199{
200 pci_unregister_driver(&driver);
201}
202
194module_init(atiixp_ide_init); 203module_init(atiixp_ide_init);
204module_exit(atiixp_ide_exit);
195 205
196MODULE_AUTHOR("HUI YU"); 206MODULE_AUTHOR("HUI YU");
197MODULE_DESCRIPTION("PCI driver module for ATI IXP IDE"); 207MODULE_DESCRIPTION("PCI driver module for ATI IXP IDE");
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
index 1ad1e23e3105..e6c62006ca1a 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/pci/cmd640.c
@@ -181,11 +181,6 @@ static u8 recovery_counts[4] = {16, 16, 16, 16}; /* Recovery count (encoded) */
181static DEFINE_SPINLOCK(cmd640_lock); 181static DEFINE_SPINLOCK(cmd640_lock);
182 182
183/* 183/*
184 * These are initialized to point at the devices we control
185 */
186static ide_hwif_t *cmd_hwif0, *cmd_hwif1;
187
188/*
189 * Interface to access cmd640x registers 184 * Interface to access cmd640x registers
190 */ 185 */
191static unsigned int cmd640_key; 186static unsigned int cmd640_key;
@@ -717,8 +712,7 @@ static int __init cmd640x_init(void)
717 int second_port_cmd640 = 0, rc; 712 int second_port_cmd640 = 0, rc;
718 const char *bus_type, *port2; 713 const char *bus_type, *port2;
719 u8 b, cfr; 714 u8 b, cfr;
720 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 715 hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
721 hw_regs_t hw[2];
722 716
723 if (cmd640_vlb && probe_for_cmd640_vlb()) { 717 if (cmd640_vlb && probe_for_cmd640_vlb()) {
724 bus_type = "VLB"; 718 bus_type = "VLB";
@@ -781,15 +775,10 @@ static int __init cmd640x_init(void)
781 printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x" 775 printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x"
782 "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr); 776 "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr);
783 777
784 cmd_hwif0 = ide_find_port();
785
786 /* 778 /*
787 * Initialize data for primary port 779 * Initialize data for primary port
788 */ 780 */
789 if (cmd_hwif0) { 781 hws[0] = &hw[0];
790 ide_init_port_hw(cmd_hwif0, &hw[0]);
791 idx[0] = cmd_hwif0->index;
792 }
793 782
794 /* 783 /*
795 * Ensure compatibility by always using the slowest timings 784 * Ensure compatibility by always using the slowest timings
@@ -829,13 +818,9 @@ static int __init cmd640x_init(void)
829 /* 818 /*
830 * Initialize data for secondary cmd640 port, if enabled 819 * Initialize data for secondary cmd640 port, if enabled
831 */ 820 */
832 if (second_port_cmd640) { 821 if (second_port_cmd640)
833 cmd_hwif1 = ide_find_port(); 822 hws[1] = &hw[1];
834 if (cmd_hwif1) { 823
835 ide_init_port_hw(cmd_hwif1, &hw[1]);
836 idx[1] = cmd_hwif1->index;
837 }
838 }
839 printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n", 824 printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n",
840 second_port_cmd640 ? "" : "not ", port2); 825 second_port_cmd640 ? "" : "not ", port2);
841 826
@@ -843,9 +828,7 @@ static int __init cmd640x_init(void)
843 cmd640_dump_regs(); 828 cmd640_dump_regs();
844#endif 829#endif
845 830
846 ide_device_add(idx, &cmd640_port_info); 831 return ide_host_add(&cmd640_port_info, hws, NULL);
847
848 return 1;
849} 832}
850 833
851module_param_named(probe_vlb, cmd640_vlb, bool, 0); 834module_param_named(probe_vlb, cmd640_vlb, bool, 0);
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index cfa784bacf48..1360b4fa9fd3 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -19,6 +19,8 @@
19 19
20#include <asm/io.h> 20#include <asm/io.h>
21 21
22#define DRV_NAME "cmd64x"
23
22#define CMD_DEBUG 0 24#define CMD_DEBUG 0
23 25
24#if CMD_DEBUG 26#if CMD_DEBUG
@@ -262,7 +264,7 @@ static int cmd648_dma_test_irq(ide_drive_t *drive)
262 unsigned long base = hwif->dma_base - (hwif->channel * 8); 264 unsigned long base = hwif->dma_base - (hwif->channel * 8);
263 u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 : 265 u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 :
264 MRDMODE_INTR_CH0; 266 MRDMODE_INTR_CH0;
265 u8 dma_stat = inb(hwif->dma_status); 267 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
266 u8 mrdmode = inb(base + 1); 268 u8 mrdmode = inb(base + 1);
267 269
268#ifdef DEBUG 270#ifdef DEBUG
@@ -286,7 +288,7 @@ static int cmd64x_dma_test_irq(ide_drive_t *drive)
286 int irq_reg = hwif->channel ? ARTTIM23 : CFR; 288 int irq_reg = hwif->channel ? ARTTIM23 : CFR;
287 u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 : 289 u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 :
288 CFR_INTR_CH0; 290 CFR_INTR_CH0;
289 u8 dma_stat = inb(hwif->dma_status); 291 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
290 u8 irq_stat = 0; 292 u8 irq_stat = 0;
291 293
292 (void) pci_read_config_byte(dev, irq_reg, &irq_stat); 294 (void) pci_read_config_byte(dev, irq_reg, &irq_stat);
@@ -317,41 +319,23 @@ static int cmd646_1_dma_end(ide_drive_t *drive)
317 319
318 drive->waiting_for_dma = 0; 320 drive->waiting_for_dma = 0;
319 /* get DMA status */ 321 /* get DMA status */
320 dma_stat = inb(hwif->dma_status); 322 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
321 /* read DMA command state */ 323 /* read DMA command state */
322 dma_cmd = inb(hwif->dma_command); 324 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
323 /* stop DMA */ 325 /* stop DMA */
324 outb(dma_cmd & ~1, hwif->dma_command); 326 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
325 /* clear the INTR & ERROR bits */ 327 /* clear the INTR & ERROR bits */
326 outb(dma_stat | 6, hwif->dma_status); 328 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
327 /* and free any DMA resources */ 329 /* and free any DMA resources */
328 ide_destroy_dmatable(drive); 330 ide_destroy_dmatable(drive);
329 /* verify good DMA status */ 331 /* verify good DMA status */
330 return (dma_stat & 7) != 4; 332 return (dma_stat & 7) != 4;
331} 333}
332 334
333static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const char *name) 335static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev)
334{ 336{
335 u8 mrdmode = 0; 337 u8 mrdmode = 0;
336 338
337 if (dev->device == PCI_DEVICE_ID_CMD_646) {
338
339 switch (dev->revision) {
340 case 0x07:
341 case 0x05:
342 printk("%s: UltraDMA capable\n", name);
343 break;
344 case 0x03:
345 default:
346 printk("%s: MultiWord DMA force limited\n", name);
347 break;
348 case 0x01:
349 printk("%s: MultiWord DMA limited, "
350 "IRQ workaround enabled\n", name);
351 break;
352 }
353 }
354
355 /* Set a good latency timer and cache line size value. */ 339 /* Set a good latency timer and cache line size value. */
356 (void) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); 340 (void) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
357 /* FIXME: pci_set_master() to ensure a good latency timer value */ 341 /* FIXME: pci_set_master() to ensure a good latency timer value */
@@ -425,8 +409,8 @@ static const struct ide_dma_ops cmd648_dma_ops = {
425}; 409};
426 410
427static const struct ide_port_info cmd64x_chipsets[] __devinitdata = { 411static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
428 { /* 0 */ 412 { /* 0: CMD643 */
429 .name = "CMD643", 413 .name = DRV_NAME,
430 .init_chipset = init_chipset_cmd64x, 414 .init_chipset = init_chipset_cmd64x,
431 .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}}, 415 .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}},
432 .port_ops = &cmd64x_port_ops, 416 .port_ops = &cmd64x_port_ops,
@@ -436,8 +420,9 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
436 .pio_mask = ATA_PIO5, 420 .pio_mask = ATA_PIO5,
437 .mwdma_mask = ATA_MWDMA2, 421 .mwdma_mask = ATA_MWDMA2,
438 .udma_mask = 0x00, /* no udma */ 422 .udma_mask = 0x00, /* no udma */
439 },{ /* 1 */ 423 },
440 .name = "CMD646", 424 { /* 1: CMD646 */
425 .name = DRV_NAME,
441 .init_chipset = init_chipset_cmd64x, 426 .init_chipset = init_chipset_cmd64x,
442 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 427 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
443 .chipset = ide_cmd646, 428 .chipset = ide_cmd646,
@@ -447,8 +432,9 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
447 .pio_mask = ATA_PIO5, 432 .pio_mask = ATA_PIO5,
448 .mwdma_mask = ATA_MWDMA2, 433 .mwdma_mask = ATA_MWDMA2,
449 .udma_mask = ATA_UDMA2, 434 .udma_mask = ATA_UDMA2,
450 },{ /* 2 */ 435 },
451 .name = "CMD648", 436 { /* 2: CMD648 */
437 .name = DRV_NAME,
452 .init_chipset = init_chipset_cmd64x, 438 .init_chipset = init_chipset_cmd64x,
453 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 439 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
454 .port_ops = &cmd64x_port_ops, 440 .port_ops = &cmd64x_port_ops,
@@ -457,8 +443,9 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
457 .pio_mask = ATA_PIO5, 443 .pio_mask = ATA_PIO5,
458 .mwdma_mask = ATA_MWDMA2, 444 .mwdma_mask = ATA_MWDMA2,
459 .udma_mask = ATA_UDMA4, 445 .udma_mask = ATA_UDMA4,
460 },{ /* 3 */ 446 },
461 .name = "CMD649", 447 { /* 3: CMD649 */
448 .name = DRV_NAME,
462 .init_chipset = init_chipset_cmd64x, 449 .init_chipset = init_chipset_cmd64x,
463 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 450 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
464 .port_ops = &cmd64x_port_ops, 451 .port_ops = &cmd64x_port_ops,
@@ -507,7 +494,7 @@ static int __devinit cmd64x_init_one(struct pci_dev *dev, const struct pci_devic
507 } 494 }
508 } 495 }
509 496
510 return ide_setup_pci_device(dev, &d); 497 return ide_pci_init_one(dev, &d, NULL);
511} 498}
512 499
513static const struct pci_device_id cmd64x_pci_tbl[] = { 500static const struct pci_device_id cmd64x_pci_tbl[] = {
@@ -523,6 +510,7 @@ static struct pci_driver driver = {
523 .name = "CMD64x_IDE", 510 .name = "CMD64x_IDE",
524 .id_table = cmd64x_pci_tbl, 511 .id_table = cmd64x_pci_tbl,
525 .probe = cmd64x_init_one, 512 .probe = cmd64x_init_one,
513 .remove = ide_pci_remove,
526}; 514};
527 515
528static int __init cmd64x_ide_init(void) 516static int __init cmd64x_ide_init(void)
@@ -530,7 +518,13 @@ static int __init cmd64x_ide_init(void)
530 return ide_pci_register_driver(&driver); 518 return ide_pci_register_driver(&driver);
531} 519}
532 520
521static void __exit cmd64x_ide_exit(void)
522{
523 pci_unregister_driver(&driver);
524}
525
533module_init(cmd64x_ide_init); 526module_init(cmd64x_ide_init);
527module_exit(cmd64x_ide_exit);
534 528
535MODULE_AUTHOR("Eddie Dost, David Miller, Andre Hedrick"); 529MODULE_AUTHOR("Eddie Dost, David Miller, Andre Hedrick");
536MODULE_DESCRIPTION("PCI driver module for CMD64x IDE"); 530MODULE_DESCRIPTION("PCI driver module for CMD64x IDE");
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index 992b1cf8db69..c0364b287f17 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -41,6 +41,8 @@
41#include <linux/ide.h> 41#include <linux/ide.h>
42#include <linux/dma-mapping.h> 42#include <linux/dma-mapping.h>
43 43
44#define DRV_NAME "cs5520"
45
44struct pio_clocks 46struct pio_clocks
45{ 47{
46 int address; 48 int address;
@@ -62,8 +64,6 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
62 struct pci_dev *pdev = to_pci_dev(hwif->dev); 64 struct pci_dev *pdev = to_pci_dev(hwif->dev);
63 int controller = drive->dn > 1 ? 1 : 0; 65 int controller = drive->dn > 1 ? 1 : 0;
64 66
65 /* FIXME: if DMA = 1 do we need to set the DMA bit here ? */
66
67 /* 8bit CAT/CRT - 8bit command timing for channel */ 67 /* 8bit CAT/CRT - 8bit command timing for channel */
68 pci_write_config_byte(pdev, 0x62 + controller, 68 pci_write_config_byte(pdev, 0x62 + controller,
69 (cs5520_pio_clocks[pio].recovery << 4) | 69 (cs5520_pio_clocks[pio].recovery << 4) |
@@ -89,52 +89,16 @@ static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed)
89 cs5520_set_pio_mode(drive, 0); 89 cs5520_set_pio_mode(drive, 0);
90} 90}
91 91
92/*
93 * We wrap the DMA activate to set the vdma flag. This is needed
94 * so that the IDE DMA layer issues PIO not DMA commands over the
95 * DMA channel
96 *
97 * ATAPI is harder so disable it for now using IDE_HFLAG_NO_ATAPI_DMA
98 */
99
100static void cs5520_dma_host_set(ide_drive_t *drive, int on)
101{
102 drive->vdma = on;
103 ide_dma_host_set(drive, on);
104}
105
106static const struct ide_port_ops cs5520_port_ops = { 92static const struct ide_port_ops cs5520_port_ops = {
107 .set_pio_mode = cs5520_set_pio_mode, 93 .set_pio_mode = cs5520_set_pio_mode,
108 .set_dma_mode = cs5520_set_dma_mode, 94 .set_dma_mode = cs5520_set_dma_mode,
109}; 95};
110 96
111static const struct ide_dma_ops cs5520_dma_ops = { 97static const struct ide_port_info cyrix_chipset __devinitdata = {
112 .dma_host_set = cs5520_dma_host_set, 98 .name = DRV_NAME,
113 .dma_setup = ide_dma_setup, 99 .port_ops = &cs5520_port_ops,
114 .dma_exec_cmd = ide_dma_exec_cmd, 100 .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_CS5520,
115 .dma_start = ide_dma_start, 101 .pio_mask = ATA_PIO4,
116 .dma_end = __ide_dma_end,
117 .dma_test_irq = ide_dma_test_irq,
118 .dma_lost_irq = ide_dma_lost_irq,
119 .dma_timeout = ide_dma_timeout,
120};
121
122/* FIXME: VDMA is disabled because it caused system hangs */
123#define DECLARE_CS_DEV(name_str) \
124 { \
125 .name = name_str, \
126 .port_ops = &cs5520_port_ops, \
127 .dma_ops = &cs5520_dma_ops, \
128 .host_flags = IDE_HFLAG_ISA_PORTS | \
129 IDE_HFLAG_CS5520 | \
130 IDE_HFLAG_NO_ATAPI_DMA | \
131 IDE_HFLAG_ABUSE_SET_DMA_MODE, \
132 .pio_mask = ATA_PIO4, \
133 }
134
135static const struct ide_port_info cyrix_chipsets[] __devinitdata = {
136 /* 0 */ DECLARE_CS_DEV("Cyrix 5510"),
137 /* 1 */ DECLARE_CS_DEV("Cyrix 5520")
138}; 102};
139 103
140/* 104/*
@@ -145,8 +109,8 @@ static const struct ide_port_info cyrix_chipsets[] __devinitdata = {
145 109
146static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id) 110static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
147{ 111{
148 const struct ide_port_info *d = &cyrix_chipsets[id->driver_data]; 112 const struct ide_port_info *d = &cyrix_chipset;
149 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 113 hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
150 114
151 ide_setup_pci_noise(dev, d); 115 ide_setup_pci_noise(dev, d);
152 116
@@ -159,7 +123,8 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
159 } 123 }
160 pci_set_master(dev); 124 pci_set_master(dev);
161 if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) { 125 if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
162 printk(KERN_WARNING "cs5520: No suitable DMA available.\n"); 126 printk(KERN_WARNING "%s: No suitable DMA available.\n",
127 d->name);
163 return -ENODEV; 128 return -ENODEV;
164 } 129 }
165 130
@@ -168,11 +133,9 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
168 * do all the device setup for us 133 * do all the device setup for us
169 */ 134 */
170 135
171 ide_pci_setup_ports(dev, d, 14, &idx[0]); 136 ide_pci_setup_ports(dev, d, 14, &hw[0], &hws[0]);
172
173 ide_device_add(idx, d);
174 137
175 return 0; 138 return ide_host_add(d, hws, NULL);
176} 139}
177 140
178static const struct pci_device_id cs5520_pci_tbl[] = { 141static const struct pci_device_id cs5520_pci_tbl[] = {
diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/pci/cs5530.c
index f5534c1ff349..f235db8c678b 100644
--- a/drivers/ide/pci/cs5530.c
+++ b/drivers/ide/pci/cs5530.c
@@ -22,6 +22,8 @@
22 22
23#include <asm/io.h> 23#include <asm/io.h>
24 24
25#define DRV_NAME "cs5530"
26
25/* 27/*
26 * Here are the standard PIO mode 0-4 timings for each "format". 28 * Here are the standard PIO mode 0-4 timings for each "format".
27 * Format-0 uses fast data reg timings, with slower command reg timings. 29 * Format-0 uses fast data reg timings, with slower command reg timings.
@@ -127,12 +129,11 @@ static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode)
127/** 129/**
128 * init_chipset_5530 - set up 5530 bridge 130 * init_chipset_5530 - set up 5530 bridge
129 * @dev: PCI device 131 * @dev: PCI device
130 * @name: device name
131 * 132 *
132 * Initialize the cs5530 bridge for reliable IDE DMA operation. 133 * Initialize the cs5530 bridge for reliable IDE DMA operation.
133 */ 134 */
134 135
135static unsigned int __devinit init_chipset_cs5530 (struct pci_dev *dev, const char *name) 136static unsigned int __devinit init_chipset_cs5530(struct pci_dev *dev)
136{ 137{
137 struct pci_dev *master_0 = NULL, *cs5530_0 = NULL; 138 struct pci_dev *master_0 = NULL, *cs5530_0 = NULL;
138 139
@@ -151,11 +152,11 @@ static unsigned int __devinit init_chipset_cs5530 (struct pci_dev *dev, const ch
151 } 152 }
152 } 153 }
153 if (!master_0) { 154 if (!master_0) {
154 printk(KERN_ERR "%s: unable to locate PCI MASTER function\n", name); 155 printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n");
155 goto out; 156 goto out;
156 } 157 }
157 if (!cs5530_0) { 158 if (!cs5530_0) {
158 printk(KERN_ERR "%s: unable to locate CS5530 LEGACY function\n", name); 159 printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n");
159 goto out; 160 goto out;
160 } 161 }
161 162
@@ -243,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
243}; 244};
244 245
245static const struct ide_port_info cs5530_chipset __devinitdata = { 246static const struct ide_port_info cs5530_chipset __devinitdata = {
246 .name = "CS5530", 247 .name = DRV_NAME,
247 .init_chipset = init_chipset_cs5530, 248 .init_chipset = init_chipset_cs5530,
248 .init_hwif = init_hwif_cs5530, 249 .init_hwif = init_hwif_cs5530,
249 .port_ops = &cs5530_port_ops, 250 .port_ops = &cs5530_port_ops,
@@ -256,7 +257,7 @@ static const struct ide_port_info cs5530_chipset __devinitdata = {
256 257
257static int __devinit cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id) 258static int __devinit cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
258{ 259{
259 return ide_setup_pci_device(dev, &cs5530_chipset); 260 return ide_pci_init_one(dev, &cs5530_chipset, NULL);
260} 261}
261 262
262static const struct pci_device_id cs5530_pci_tbl[] = { 263static const struct pci_device_id cs5530_pci_tbl[] = {
@@ -269,6 +270,7 @@ static struct pci_driver driver = {
269 .name = "CS5530 IDE", 270 .name = "CS5530 IDE",
270 .id_table = cs5530_pci_tbl, 271 .id_table = cs5530_pci_tbl,
271 .probe = cs5530_init_one, 272 .probe = cs5530_init_one,
273 .remove = ide_pci_remove,
272}; 274};
273 275
274static int __init cs5530_ide_init(void) 276static int __init cs5530_ide_init(void)
@@ -276,7 +278,13 @@ static int __init cs5530_ide_init(void)
276 return ide_pci_register_driver(&driver); 278 return ide_pci_register_driver(&driver);
277} 279}
278 280
281static void __exit cs5530_ide_exit(void)
282{
283 pci_unregister_driver(&driver);
284}
285
279module_init(cs5530_ide_init); 286module_init(cs5530_ide_init);
287module_exit(cs5530_ide_exit);
280 288
281MODULE_AUTHOR("Mark Lord"); 289MODULE_AUTHOR("Mark Lord");
282MODULE_DESCRIPTION("PCI driver module for Cyrix/NS 5530 IDE"); 290MODULE_DESCRIPTION("PCI driver module for Cyrix/NS 5530 IDE");
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index dc97c48623f3..f7b50cdeefa6 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -26,6 +26,8 @@
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/ide.h> 27#include <linux/ide.h>
28 28
29#define DRV_NAME "cs5535"
30
29#define MSR_ATAC_BASE 0x51300000 31#define MSR_ATAC_BASE 0x51300000
30#define ATAC_GLD_MSR_CAP (MSR_ATAC_BASE+0) 32#define ATAC_GLD_MSR_CAP (MSR_ATAC_BASE+0)
31#define ATAC_GLD_MSR_CONFIG (MSR_ATAC_BASE+0x01) 33#define ATAC_GLD_MSR_CONFIG (MSR_ATAC_BASE+0x01)
@@ -169,10 +171,9 @@ static const struct ide_port_ops cs5535_port_ops = {
169}; 171};
170 172
171static const struct ide_port_info cs5535_chipset __devinitdata = { 173static const struct ide_port_info cs5535_chipset __devinitdata = {
172 .name = "CS5535", 174 .name = DRV_NAME,
173 .port_ops = &cs5535_port_ops, 175 .port_ops = &cs5535_port_ops,
174 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE | 176 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
175 IDE_HFLAG_ABUSE_SET_DMA_MODE,
176 .pio_mask = ATA_PIO4, 177 .pio_mask = ATA_PIO4,
177 .mwdma_mask = ATA_MWDMA2, 178 .mwdma_mask = ATA_MWDMA2,
178 .udma_mask = ATA_UDMA4, 179 .udma_mask = ATA_UDMA4,
@@ -181,7 +182,7 @@ static const struct ide_port_info cs5535_chipset __devinitdata = {
181static int __devinit cs5535_init_one(struct pci_dev *dev, 182static int __devinit cs5535_init_one(struct pci_dev *dev,
182 const struct pci_device_id *id) 183 const struct pci_device_id *id)
183{ 184{
184 return ide_setup_pci_device(dev, &cs5535_chipset); 185 return ide_pci_init_one(dev, &cs5535_chipset, NULL);
185} 186}
186 187
187static const struct pci_device_id cs5535_pci_tbl[] = { 188static const struct pci_device_id cs5535_pci_tbl[] = {
@@ -195,6 +196,7 @@ static struct pci_driver driver = {
195 .name = "CS5535_IDE", 196 .name = "CS5535_IDE",
196 .id_table = cs5535_pci_tbl, 197 .id_table = cs5535_pci_tbl,
197 .probe = cs5535_init_one, 198 .probe = cs5535_init_one,
199 .remove = ide_pci_remove,
198}; 200};
199 201
200static int __init cs5535_ide_init(void) 202static int __init cs5535_ide_init(void)
@@ -202,7 +204,13 @@ static int __init cs5535_ide_init(void)
202 return ide_pci_register_driver(&driver); 204 return ide_pci_register_driver(&driver);
203} 205}
204 206
207static void __exit cs5535_ide_exit(void)
208{
209 pci_unregister_driver(&driver);
210}
211
205module_init(cs5535_ide_init); 212module_init(cs5535_ide_init);
213module_exit(cs5535_ide_exit);
206 214
207MODULE_AUTHOR("AMD"); 215MODULE_AUTHOR("AMD");
208MODULE_DESCRIPTION("PCI driver module for AMD/NS CS5535 IDE"); 216MODULE_DESCRIPTION("PCI driver module for AMD/NS CS5535 IDE");
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c
index e14ad5530fa4..bfae2f882f48 100644
--- a/drivers/ide/pci/cy82c693.c
+++ b/drivers/ide/pci/cy82c693.c
@@ -48,6 +48,8 @@
48 48
49#include <asm/io.h> 49#include <asm/io.h>
50 50
51#define DRV_NAME "cy82c693"
52
51/* the current version */ 53/* the current version */
52#define CY82_VERSION "CY82C693U driver v0.34 99-13-12 Andreas S. Krebs (akrebs@altavista.net)" 54#define CY82_VERSION "CY82C693U driver v0.34 99-13-12 Andreas S. Krebs (akrebs@altavista.net)"
53 55
@@ -330,7 +332,7 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
330/* 332/*
331 * this function is called during init and is used to setup the cy82c693 chip 333 * this function is called during init and is used to setup the cy82c693 chip
332 */ 334 */
333static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev, const char *name) 335static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev)
334{ 336{
335 if (PCI_FUNC(dev->devfn) != 1) 337 if (PCI_FUNC(dev->devfn) != 1)
336 return 0; 338 return 0;
@@ -349,8 +351,8 @@ static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev, const c
349 data = inb(CY82_DATA_PORT); 351 data = inb(CY82_DATA_PORT);
350 352
351#if CY82C693_DEBUG_INFO 353#if CY82C693_DEBUG_INFO
352 printk(KERN_INFO "%s: Peripheral Configuration Register: 0x%X\n", 354 printk(KERN_INFO DRV_NAME ": Peripheral Configuration Register: 0x%X\n",
353 name, data); 355 data);
354#endif /* CY82C693_DEBUG_INFO */ 356#endif /* CY82C693_DEBUG_INFO */
355 357
356 /* 358 /*
@@ -371,8 +373,8 @@ static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev, const c
371 outb(data, CY82_DATA_PORT); 373 outb(data, CY82_DATA_PORT);
372 374
373#if CY82C693_DEBUG_INFO 375#if CY82C693_DEBUG_INFO
374 printk(KERN_INFO "%s: New Peripheral Configuration Register: 0x%X\n", 376 printk(KERN_INFO ": New Peripheral Configuration Register: 0x%X\n",
375 name, data); 377 data);
376#endif /* CY82C693_DEBUG_INFO */ 378#endif /* CY82C693_DEBUG_INFO */
377 379
378#endif /* CY82C693_SETDMA_CLOCK */ 380#endif /* CY82C693_SETDMA_CLOCK */
@@ -398,7 +400,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
398}; 400};
399 401
400static const struct ide_port_info cy82c693_chipset __devinitdata = { 402static const struct ide_port_info cy82c693_chipset __devinitdata = {
401 .name = "CY82C693", 403 .name = DRV_NAME,
402 .init_chipset = init_chipset_cy82c693, 404 .init_chipset = init_chipset_cy82c693,
403 .init_iops = init_iops_cy82c693, 405 .init_iops = init_iops_cy82c693,
404 .port_ops = &cy82c693_port_ops, 406 .port_ops = &cy82c693_port_ops,
@@ -419,12 +421,22 @@ static int __devinit cy82c693_init_one(struct pci_dev *dev, const struct pci_dev
419 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && 421 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
420 PCI_FUNC(dev->devfn) == 1) { 422 PCI_FUNC(dev->devfn) == 1) {
421 dev2 = pci_get_slot(dev->bus, dev->devfn + 1); 423 dev2 = pci_get_slot(dev->bus, dev->devfn + 1);
422 ret = ide_setup_pci_devices(dev, dev2, &cy82c693_chipset); 424 ret = ide_pci_init_two(dev, dev2, &cy82c693_chipset, NULL);
423 /* We leak pci refs here but thats ok - we can't be unloaded */ 425 if (ret)
426 pci_dev_put(dev2);
424 } 427 }
425 return ret; 428 return ret;
426} 429}
427 430
431static void __devexit cy82c693_remove(struct pci_dev *dev)
432{
433 struct ide_host *host = pci_get_drvdata(dev);
434 struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
435
436 ide_pci_remove(dev);
437 pci_dev_put(dev2);
438}
439
428static const struct pci_device_id cy82c693_pci_tbl[] = { 440static const struct pci_device_id cy82c693_pci_tbl[] = {
429 { PCI_VDEVICE(CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693), 0 }, 441 { PCI_VDEVICE(CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693), 0 },
430 { 0, }, 442 { 0, },
@@ -435,6 +447,7 @@ static struct pci_driver driver = {
435 .name = "Cypress_IDE", 447 .name = "Cypress_IDE",
436 .id_table = cy82c693_pci_tbl, 448 .id_table = cy82c693_pci_tbl,
437 .probe = cy82c693_init_one, 449 .probe = cy82c693_init_one,
450 .remove = cy82c693_remove,
438}; 451};
439 452
440static int __init cy82c693_ide_init(void) 453static int __init cy82c693_ide_init(void)
@@ -442,7 +455,13 @@ static int __init cy82c693_ide_init(void)
442 return ide_pci_register_driver(&driver); 455 return ide_pci_register_driver(&driver);
443} 456}
444 457
458static void __exit cy82c693_ide_exit(void)
459{
460 pci_unregister_driver(&driver);
461}
462
445module_init(cy82c693_ide_init); 463module_init(cy82c693_ide_init);
464module_exit(cy82c693_ide_exit);
446 465
447MODULE_AUTHOR("Andreas Krebs, Andre Hedrick"); 466MODULE_AUTHOR("Andreas Krebs, Andre Hedrick");
448MODULE_DESCRIPTION("PCI driver module for the Cypress CY82C693 IDE"); 467MODULE_DESCRIPTION("PCI driver module for the Cypress CY82C693 IDE");
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
index 0106e2a2df77..f84bfb4f600f 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/pci/delkin_cb.c
@@ -56,11 +56,10 @@ static const struct ide_port_info delkin_cb_port_info = {
56static int __devinit 56static int __devinit
57delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) 57delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
58{ 58{
59 struct ide_host *host;
59 unsigned long base; 60 unsigned long base;
60 hw_regs_t hw;
61 ide_hwif_t *hwif = NULL;
62 int i, rc; 61 int i, rc;
63 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 62 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
64 63
65 rc = pci_enable_device(dev); 64 rc = pci_enable_device(dev);
66 if (rc) { 65 if (rc) {
@@ -87,34 +86,26 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
87 hw.dev = &dev->dev; 86 hw.dev = &dev->dev;
88 hw.chipset = ide_pci; /* this enables IRQ sharing */ 87 hw.chipset = ide_pci; /* this enables IRQ sharing */
89 88
90 hwif = ide_find_port(); 89 rc = ide_host_add(&delkin_cb_port_info, hws, &host);
91 if (hwif == NULL) 90 if (rc)
92 goto out_disable; 91 goto out_disable;
93 92
94 i = hwif->index; 93 pci_set_drvdata(dev, host);
95
96 ide_init_port_hw(hwif, &hw);
97
98 idx[0] = i;
99
100 ide_device_add(idx, &delkin_cb_port_info);
101
102 pci_set_drvdata(dev, hwif);
103 94
104 return 0; 95 return 0;
105 96
106out_disable: 97out_disable:
107 pci_release_regions(dev); 98 pci_release_regions(dev);
108 pci_disable_device(dev); 99 pci_disable_device(dev);
109 return -ENODEV; 100 return rc;
110} 101}
111 102
112static void 103static void
113delkin_cb_remove (struct pci_dev *dev) 104delkin_cb_remove (struct pci_dev *dev)
114{ 105{
115 ide_hwif_t *hwif = pci_get_drvdata(dev); 106 struct ide_host *host = pci_get_drvdata(dev);
116 107
117 ide_unregister(hwif); 108 ide_host_remove(host);
118 109
119 pci_release_regions(dev); 110 pci_release_regions(dev);
120 pci_disable_device(dev); 111 pci_disable_device(dev);
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/pci/generic.c
index 041720e22762..b07d4f4273b3 100644
--- a/drivers/ide/pci/generic.c
+++ b/drivers/ide/pci/generic.c
@@ -27,6 +27,8 @@
27#include <linux/ide.h> 27#include <linux/ide.h>
28#include <linux/init.h> 28#include <linux/init.h>
29 29
30#define DRV_NAME "ide_pci_generic"
31
30static int ide_generic_all; /* Set to claim all devices */ 32static int ide_generic_all; /* Set to claim all devices */
31 33
32module_param_named(all_generic_ide, ide_generic_all, bool, 0444); 34module_param_named(all_generic_ide, ide_generic_all, bool, 0444);
@@ -34,9 +36,9 @@ MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE st
34 36
35#define IDE_HFLAGS_UMC (IDE_HFLAG_NO_DMA | IDE_HFLAG_FORCE_LEGACY_IRQS) 37#define IDE_HFLAGS_UMC (IDE_HFLAG_NO_DMA | IDE_HFLAG_FORCE_LEGACY_IRQS)
36 38
37#define DECLARE_GENERIC_PCI_DEV(name_str, extra_flags) \ 39#define DECLARE_GENERIC_PCI_DEV(extra_flags) \
38 { \ 40 { \
39 .name = name_str, \ 41 .name = DRV_NAME, \
40 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | \ 42 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | \
41 extra_flags, \ 43 extra_flags, \
42 .swdma_mask = ATA_SWDMA2, \ 44 .swdma_mask = ATA_SWDMA2, \
@@ -45,10 +47,11 @@ MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE st
45 } 47 }
46 48
47static const struct ide_port_info generic_chipsets[] __devinitdata = { 49static const struct ide_port_info generic_chipsets[] __devinitdata = {
48 /* 0 */ DECLARE_GENERIC_PCI_DEV("Unknown", 0), 50 /* 0: Unknown */
51 DECLARE_GENERIC_PCI_DEV(0),
49 52
50 { /* 1 */ 53 { /* 1: NS87410 */
51 .name = "NS87410", 54 .name = DRV_NAME,
52 .enablebits = { {0x43, 0x08, 0x08}, {0x47, 0x08, 0x08} }, 55 .enablebits = { {0x43, 0x08, 0x08}, {0x47, 0x08, 0x08} },
53 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA, 56 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
54 .swdma_mask = ATA_SWDMA2, 57 .swdma_mask = ATA_SWDMA2,
@@ -56,17 +59,15 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = {
56 .udma_mask = ATA_UDMA6, 59 .udma_mask = ATA_UDMA6,
57 }, 60 },
58 61
59 /* 2 */ DECLARE_GENERIC_PCI_DEV("SAMURAI", 0), 62 /* 2: SAMURAI / HT6565 / HINT_IDE */
60 /* 3 */ DECLARE_GENERIC_PCI_DEV("HT6565", 0), 63 DECLARE_GENERIC_PCI_DEV(0),
61 /* 4 */ DECLARE_GENERIC_PCI_DEV("UM8673F", IDE_HFLAGS_UMC), 64 /* 3: UM8673F / UM8886A / UM8886BF */
62 /* 5 */ DECLARE_GENERIC_PCI_DEV("UM8886A", IDE_HFLAGS_UMC), 65 DECLARE_GENERIC_PCI_DEV(IDE_HFLAGS_UMC),
63 /* 6 */ DECLARE_GENERIC_PCI_DEV("UM8886BF", IDE_HFLAGS_UMC), 66 /* 4: VIA_IDE / OPTI621V / Piccolo010{2,3,5} */
64 /* 7 */ DECLARE_GENERIC_PCI_DEV("HINT_IDE", 0), 67 DECLARE_GENERIC_PCI_DEV(IDE_HFLAG_NO_AUTODMA),
65 /* 8 */ DECLARE_GENERIC_PCI_DEV("VIA_IDE", IDE_HFLAG_NO_AUTODMA), 68
66 /* 9 */ DECLARE_GENERIC_PCI_DEV("OPTI621V", IDE_HFLAG_NO_AUTODMA), 69 { /* 5: VIA8237SATA */
67 70 .name = DRV_NAME,
68 { /* 10 */
69 .name = "VIA8237SATA",
70 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | 71 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA |
71 IDE_HFLAG_OFF_BOARD, 72 IDE_HFLAG_OFF_BOARD,
72 .swdma_mask = ATA_SWDMA2, 73 .swdma_mask = ATA_SWDMA2,
@@ -74,12 +75,8 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = {
74 .udma_mask = ATA_UDMA6, 75 .udma_mask = ATA_UDMA6,
75 }, 76 },
76 77
77 /* 11 */ DECLARE_GENERIC_PCI_DEV("Piccolo0102", IDE_HFLAG_NO_AUTODMA), 78 { /* 6: Revolution */
78 /* 12 */ DECLARE_GENERIC_PCI_DEV("Piccolo0103", IDE_HFLAG_NO_AUTODMA), 79 .name = DRV_NAME,
79 /* 13 */ DECLARE_GENERIC_PCI_DEV("Piccolo0105", IDE_HFLAG_NO_AUTODMA),
80
81 { /* 14 */
82 .name = "Revolution",
83 .host_flags = IDE_HFLAG_CLEAR_SIMPLEX | 80 .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
84 IDE_HFLAG_TRUST_BIOS_FOR_DMA | 81 IDE_HFLAG_TRUST_BIOS_FOR_DMA |
85 IDE_HFLAG_OFF_BOARD, 82 IDE_HFLAG_OFF_BOARD,
@@ -134,12 +131,12 @@ static int __devinit generic_init_one(struct pci_dev *dev, const struct pci_devi
134 u16 command; 131 u16 command;
135 pci_read_config_word(dev, PCI_COMMAND, &command); 132 pci_read_config_word(dev, PCI_COMMAND, &command);
136 if (!(command & PCI_COMMAND_IO)) { 133 if (!(command & PCI_COMMAND_IO)) {
137 printk(KERN_INFO "Skipping disabled %s IDE " 134 printk(KERN_INFO "%s %s: skipping disabled "
138 "controller.\n", d->name); 135 "controller\n", d->name, pci_name(dev));
139 goto out; 136 goto out;
140 } 137 }
141 } 138 }
142 ret = ide_setup_pci_device(dev, d); 139 ret = ide_pci_init_one(dev, d, NULL);
143out: 140out:
144 return ret; 141 return ret;
145} 142}
@@ -147,20 +144,20 @@ out:
147static const struct pci_device_id generic_pci_tbl[] = { 144static const struct pci_device_id generic_pci_tbl[] = {
148 { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87410), 1 }, 145 { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87410), 1 },
149 { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), 2 }, 146 { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), 2 },
150 { PCI_VDEVICE(HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), 3 }, 147 { PCI_VDEVICE(HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), 2 },
151 { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8673F), 4 }, 148 { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8673F), 3 },
152 { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886A), 5 }, 149 { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886A), 3 },
153 { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886BF), 6 }, 150 { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886BF), 3 },
154 { PCI_VDEVICE(HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), 7 }, 151 { PCI_VDEVICE(HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), 2 },
155 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C561), 8 }, 152 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C561), 4 },
156 { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C558), 9 }, 153 { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C558), 4 },
157#ifdef CONFIG_BLK_DEV_IDE_SATA 154#ifdef CONFIG_BLK_DEV_IDE_SATA
158 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8237_SATA), 10 }, 155 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8237_SATA), 5 },
159#endif 156#endif
160 { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO), 11 }, 157 { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO), 4 },
161 { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), 12 }, 158 { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), 4 },
162 { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), 13 }, 159 { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), 4 },
163 { PCI_VDEVICE(NETCELL, PCI_DEVICE_ID_REVOLUTION), 14 }, 160 { PCI_VDEVICE(NETCELL, PCI_DEVICE_ID_REVOLUTION), 6 },
164 /* 161 /*
165 * Must come last. If you add entries adjust 162 * Must come last. If you add entries adjust
166 * this table and generic_chipsets[] appropriately. 163 * this table and generic_chipsets[] appropriately.
@@ -174,6 +171,7 @@ static struct pci_driver driver = {
174 .name = "PCI_IDE", 171 .name = "PCI_IDE",
175 .id_table = generic_pci_tbl, 172 .id_table = generic_pci_tbl,
176 .probe = generic_init_one, 173 .probe = generic_init_one,
174 .remove = ide_pci_remove,
177}; 175};
178 176
179static int __init generic_ide_init(void) 177static int __init generic_ide_init(void)
@@ -181,7 +179,13 @@ static int __init generic_ide_init(void)
181 return ide_pci_register_driver(&driver); 179 return ide_pci_register_driver(&driver);
182} 180}
183 181
182static void __exit generic_ide_exit(void)
183{
184 pci_unregister_driver(&driver);
185}
186
184module_init(generic_ide_init); 187module_init(generic_ide_init);
188module_exit(generic_ide_exit);
185 189
186MODULE_AUTHOR("Andre Hedrick"); 190MODULE_AUTHOR("Andre Hedrick");
187MODULE_DESCRIPTION("PCI driver module for generic PCI IDE"); 191MODULE_DESCRIPTION("PCI driver module for generic PCI IDE");
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
index 84c36c117194..6009b0b9655d 100644
--- a/drivers/ide/pci/hpt34x.c
+++ b/drivers/ide/pci/hpt34x.c
@@ -33,6 +33,8 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/ide.h> 34#include <linux/ide.h>
35 35
36#define DRV_NAME "hpt34x"
37
36#define HPT343_DEBUG_DRIVE_INFO 0 38#define HPT343_DEBUG_DRIVE_INFO 0
37 39
38static void hpt34x_set_mode(ide_drive_t *drive, const u8 speed) 40static void hpt34x_set_mode(ide_drive_t *drive, const u8 speed)
@@ -77,7 +79,7 @@ static void hpt34x_set_pio_mode(ide_drive_t *drive, const u8 pio)
77 */ 79 */
78#define HPT34X_PCI_INIT_REG 0x80 80#define HPT34X_PCI_INIT_REG 0x80
79 81
80static unsigned int __devinit init_chipset_hpt34x(struct pci_dev *dev, const char *name) 82static unsigned int __devinit init_chipset_hpt34x(struct pci_dev *dev)
81{ 83{
82 int i = 0; 84 int i = 0;
83 unsigned long hpt34xIoBase = pci_resource_start(dev, 4); 85 unsigned long hpt34xIoBase = pci_resource_start(dev, 4);
@@ -123,19 +125,18 @@ static const struct ide_port_ops hpt34x_port_ops = {
123#define IDE_HFLAGS_HPT34X \ 125#define IDE_HFLAGS_HPT34X \
124 (IDE_HFLAG_NO_ATAPI_DMA | \ 126 (IDE_HFLAG_NO_ATAPI_DMA | \
125 IDE_HFLAG_NO_DSC | \ 127 IDE_HFLAG_NO_DSC | \
126 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
127 IDE_HFLAG_NO_AUTODMA) 128 IDE_HFLAG_NO_AUTODMA)
128 129
129static const struct ide_port_info hpt34x_chipsets[] __devinitdata = { 130static const struct ide_port_info hpt34x_chipsets[] __devinitdata = {
130 { /* 0 */ 131 { /* 0: HPT343 */
131 .name = "HPT343", 132 .name = DRV_NAME,
132 .init_chipset = init_chipset_hpt34x, 133 .init_chipset = init_chipset_hpt34x,
133 .port_ops = &hpt34x_port_ops, 134 .port_ops = &hpt34x_port_ops,
134 .host_flags = IDE_HFLAGS_HPT34X | IDE_HFLAG_NON_BOOTABLE, 135 .host_flags = IDE_HFLAGS_HPT34X | IDE_HFLAG_NON_BOOTABLE,
135 .pio_mask = ATA_PIO5, 136 .pio_mask = ATA_PIO5,
136 }, 137 },
137 { /* 1 */ 138 { /* 1: HPT345 */
138 .name = "HPT345", 139 .name = DRV_NAME,
139 .init_chipset = init_chipset_hpt34x, 140 .init_chipset = init_chipset_hpt34x,
140 .port_ops = &hpt34x_port_ops, 141 .port_ops = &hpt34x_port_ops,
141 .host_flags = IDE_HFLAGS_HPT34X | IDE_HFLAG_OFF_BOARD, 142 .host_flags = IDE_HFLAGS_HPT34X | IDE_HFLAG_OFF_BOARD,
@@ -157,7 +158,7 @@ static int __devinit hpt34x_init_one(struct pci_dev *dev, const struct pci_devic
157 158
158 d = &hpt34x_chipsets[(pcicmd & PCI_COMMAND_MEMORY) ? 1 : 0]; 159 d = &hpt34x_chipsets[(pcicmd & PCI_COMMAND_MEMORY) ? 1 : 0];
159 160
160 return ide_setup_pci_device(dev, d); 161 return ide_pci_init_one(dev, d, NULL);
161} 162}
162 163
163static const struct pci_device_id hpt34x_pci_tbl[] = { 164static const struct pci_device_id hpt34x_pci_tbl[] = {
@@ -170,6 +171,7 @@ static struct pci_driver driver = {
170 .name = "HPT34x_IDE", 171 .name = "HPT34x_IDE",
171 .id_table = hpt34x_pci_tbl, 172 .id_table = hpt34x_pci_tbl,
172 .probe = hpt34x_init_one, 173 .probe = hpt34x_init_one,
174 .remove = ide_pci_remove,
173}; 175};
174 176
175static int __init hpt34x_ide_init(void) 177static int __init hpt34x_ide_init(void)
@@ -177,7 +179,13 @@ static int __init hpt34x_ide_init(void)
177 return ide_pci_register_driver(&driver); 179 return ide_pci_register_driver(&driver);
178} 180}
179 181
182static void __exit hpt34x_ide_exit(void)
183{
184 pci_unregister_driver(&driver);
185}
186
180module_init(hpt34x_ide_init); 187module_init(hpt34x_ide_init);
188module_exit(hpt34x_ide_exit);
181 189
182MODULE_AUTHOR("Andre Hedrick"); 190MODULE_AUTHOR("Andre Hedrick");
183MODULE_DESCRIPTION("PCI driver module for Highpoint 34x IDE"); 191MODULE_DESCRIPTION("PCI driver module for Highpoint 34x IDE");
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 397c6cbe953c..5271b246b88c 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -131,6 +131,8 @@
131#include <asm/uaccess.h> 131#include <asm/uaccess.h>
132#include <asm/io.h> 132#include <asm/io.h>
133 133
134#define DRV_NAME "hpt366"
135
134/* various tuning parameters */ 136/* various tuning parameters */
135#define HPT_RESET_STATE_ENGINE 137#define HPT_RESET_STATE_ENGINE
136#undef HPT_DELAY_INTERRUPT 138#undef HPT_DELAY_INTERRUPT
@@ -620,7 +622,8 @@ static u8 hpt3xx_udma_filter(ide_drive_t *drive)
620{ 622{
621 ide_hwif_t *hwif = HWIF(drive); 623 ide_hwif_t *hwif = HWIF(drive);
622 struct pci_dev *dev = to_pci_dev(hwif->dev); 624 struct pci_dev *dev = to_pci_dev(hwif->dev);
623 struct hpt_info *info = pci_get_drvdata(dev); 625 struct ide_host *host = pci_get_drvdata(dev);
626 struct hpt_info *info = host->host_priv + (hwif->dev == host->dev[1]);
624 u8 mask = hwif->ultra_mask; 627 u8 mask = hwif->ultra_mask;
625 628
626 switch (info->chip_type) { 629 switch (info->chip_type) {
@@ -660,7 +663,8 @@ static u8 hpt3xx_mdma_filter(ide_drive_t *drive)
660{ 663{
661 ide_hwif_t *hwif = HWIF(drive); 664 ide_hwif_t *hwif = HWIF(drive);
662 struct pci_dev *dev = to_pci_dev(hwif->dev); 665 struct pci_dev *dev = to_pci_dev(hwif->dev);
663 struct hpt_info *info = pci_get_drvdata(dev); 666 struct ide_host *host = pci_get_drvdata(dev);
667 struct hpt_info *info = host->host_priv + (hwif->dev == host->dev[1]);
664 668
665 switch (info->chip_type) { 669 switch (info->chip_type) {
666 case HPT372 : 670 case HPT372 :
@@ -694,8 +698,10 @@ static u32 get_speed_setting(u8 speed, struct hpt_info *info)
694 698
695static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed) 699static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed)
696{ 700{
697 struct pci_dev *dev = to_pci_dev(drive->hwif->dev); 701 ide_hwif_t *hwif = drive->hwif;
698 struct hpt_info *info = pci_get_drvdata(dev); 702 struct pci_dev *dev = to_pci_dev(hwif->dev);
703 struct ide_host *host = pci_get_drvdata(dev);
704 struct hpt_info *info = host->host_priv + (hwif->dev == host->dev[1]);
699 struct hpt_timings *t = info->timings; 705 struct hpt_timings *t = info->timings;
700 u8 itr_addr = 0x40 + (drive->dn * 4); 706 u8 itr_addr = 0x40 + (drive->dn * 4);
701 u32 old_itr = 0; 707 u32 old_itr = 0;
@@ -738,7 +744,8 @@ static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
738{ 744{
739 ide_hwif_t *hwif = HWIF(drive); 745 ide_hwif_t *hwif = HWIF(drive);
740 struct pci_dev *dev = to_pci_dev(hwif->dev); 746 struct pci_dev *dev = to_pci_dev(hwif->dev);
741 struct hpt_info *info = pci_get_drvdata(dev); 747 struct ide_host *host = pci_get_drvdata(dev);
748 struct hpt_info *info = host->host_priv + (hwif->dev == host->dev[1]);
742 749
743 if (drive->quirk_list) { 750 if (drive->quirk_list) {
744 if (info->chip_type >= HPT370) { 751 if (info->chip_type >= HPT370) {
@@ -801,9 +808,9 @@ static void hpt370_irq_timeout(ide_drive_t *drive)
801 printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff); 808 printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff);
802 809
803 /* get DMA command mode */ 810 /* get DMA command mode */
804 dma_cmd = inb(hwif->dma_command); 811 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
805 /* stop DMA */ 812 /* stop DMA */
806 outb(dma_cmd & ~0x1, hwif->dma_command); 813 outb(dma_cmd & ~0x1, hwif->dma_base + ATA_DMA_CMD);
807 hpt370_clear_engine(drive); 814 hpt370_clear_engine(drive);
808} 815}
809 816
@@ -818,12 +825,12 @@ static void hpt370_dma_start(ide_drive_t *drive)
818static int hpt370_dma_end(ide_drive_t *drive) 825static int hpt370_dma_end(ide_drive_t *drive)
819{ 826{
820 ide_hwif_t *hwif = HWIF(drive); 827 ide_hwif_t *hwif = HWIF(drive);
821 u8 dma_stat = inb(hwif->dma_status); 828 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
822 829
823 if (dma_stat & 0x01) { 830 if (dma_stat & 0x01) {
824 /* wait a little */ 831 /* wait a little */
825 udelay(20); 832 udelay(20);
826 dma_stat = inb(hwif->dma_status); 833 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
827 if (dma_stat & 0x01) 834 if (dma_stat & 0x01)
828 hpt370_irq_timeout(drive); 835 hpt370_irq_timeout(drive);
829 } 836 }
@@ -850,7 +857,7 @@ static int hpt374_dma_test_irq(ide_drive_t *drive)
850 return 0; 857 return 0;
851 } 858 }
852 859
853 dma_stat = inb(hwif->dma_status); 860 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
854 /* return 1 if INTR asserted */ 861 /* return 1 if INTR asserted */
855 if (dma_stat & 4) 862 if (dma_stat & 4)
856 return 1; 863 return 1;
@@ -963,24 +970,16 @@ static int __devinit hpt37x_calibrate_dpll(struct pci_dev *dev, u16 f_low, u16 f
963 return 1; 970 return 1;
964} 971}
965 972
966static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const char *name) 973static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev)
967{ 974{
968 struct hpt_info *info = kmalloc(sizeof(struct hpt_info), GFP_KERNEL);
969 unsigned long io_base = pci_resource_start(dev, 4); 975 unsigned long io_base = pci_resource_start(dev, 4);
976 struct ide_host *host = pci_get_drvdata(dev);
977 struct hpt_info *info = host->host_priv + (&dev->dev == host->dev[1]);
978 const char *name = DRV_NAME;
970 u8 pci_clk, dpll_clk = 0; /* PCI and DPLL clock in MHz */ 979 u8 pci_clk, dpll_clk = 0; /* PCI and DPLL clock in MHz */
971 u8 chip_type; 980 u8 chip_type;
972 enum ata_clock clock; 981 enum ata_clock clock;
973 982
974 if (info == NULL) {
975 printk(KERN_ERR "%s: out of memory!\n", name);
976 return -ENOMEM;
977 }
978
979 /*
980 * Copy everything from a static "template" structure
981 * to just allocated per-chip hpt_info structure.
982 */
983 memcpy(info, pci_get_drvdata(dev), sizeof(struct hpt_info));
984 chip_type = info->chip_type; 983 chip_type = info->chip_type;
985 984
986 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4)); 985 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
@@ -1048,8 +1047,8 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
1048 if ((temp & 0xFFFFF000) != 0xABCDE000) { 1047 if ((temp & 0xFFFFF000) != 0xABCDE000) {
1049 int i; 1048 int i;
1050 1049
1051 printk(KERN_WARNING "%s: no clock data saved by BIOS\n", 1050 printk(KERN_WARNING "%s %s: no clock data saved by "
1052 name); 1051 "BIOS\n", name, pci_name(dev));
1053 1052
1054 /* Calculate the average value of f_CNT. */ 1053 /* Calculate the average value of f_CNT. */
1055 for (temp = i = 0; i < 128; i++) { 1054 for (temp = i = 0; i < 128; i++) {
@@ -1074,8 +1073,9 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
1074 else 1073 else
1075 pci_clk = 66; 1074 pci_clk = 66;
1076 1075
1077 printk(KERN_INFO "%s: DPLL base: %d MHz, f_CNT: %d, " 1076 printk(KERN_INFO "%s %s: DPLL base: %d MHz, f_CNT: %d, "
1078 "assuming %d MHz PCI\n", name, dpll_clk, f_cnt, pci_clk); 1077 "assuming %d MHz PCI\n", name, pci_name(dev),
1078 dpll_clk, f_cnt, pci_clk);
1079 } else { 1079 } else {
1080 u32 itr1 = 0; 1080 u32 itr1 = 0;
1081 1081
@@ -1141,8 +1141,8 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
1141 } 1141 }
1142 1142
1143 if (info->timings->clock_table[clock] == NULL) { 1143 if (info->timings->clock_table[clock] == NULL) {
1144 printk(KERN_ERR "%s: unknown bus timing!\n", name); 1144 printk(KERN_ERR "%s %s: unknown bus timing!\n",
1145 kfree(info); 1145 name, pci_name(dev));
1146 return -EIO; 1146 return -EIO;
1147 } 1147 }
1148 1148
@@ -1168,17 +1168,19 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
1168 f_low += adjust >> 1; 1168 f_low += adjust >> 1;
1169 } 1169 }
1170 if (adjust == 8) { 1170 if (adjust == 8) {
1171 printk(KERN_ERR "%s: DPLL did not stabilize!\n", name); 1171 printk(KERN_ERR "%s %s: DPLL did not stabilize!\n",
1172 kfree(info); 1172 name, pci_name(dev));
1173 return -EIO; 1173 return -EIO;
1174 } 1174 }
1175 1175
1176 printk("%s: using %d MHz DPLL clock\n", name, dpll_clk); 1176 printk(KERN_INFO "%s %s: using %d MHz DPLL clock\n",
1177 name, pci_name(dev), dpll_clk);
1177 } else { 1178 } else {
1178 /* Mark the fact that we're not using the DPLL. */ 1179 /* Mark the fact that we're not using the DPLL. */
1179 dpll_clk = 0; 1180 dpll_clk = 0;
1180 1181
1181 printk("%s: using %d MHz PCI clock\n", name, pci_clk); 1182 printk(KERN_INFO "%s %s: using %d MHz PCI clock\n",
1183 name, pci_name(dev), pci_clk);
1182 } 1184 }
1183 1185
1184 /* Store the clock frequencies. */ 1186 /* Store the clock frequencies. */
@@ -1186,9 +1188,6 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
1186 info->pci_clk = pci_clk; 1188 info->pci_clk = pci_clk;
1187 info->clock = clock; 1189 info->clock = clock;
1188 1190
1189 /* Point to this chip's own instance of the hpt_info structure. */
1190 pci_set_drvdata(dev, info);
1191
1192 if (chip_type >= HPT370) { 1191 if (chip_type >= HPT370) {
1193 u8 mcr1, mcr4; 1192 u8 mcr1, mcr4;
1194 1193
@@ -1218,7 +1217,8 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
1218static u8 __devinit hpt3xx_cable_detect(ide_hwif_t *hwif) 1217static u8 __devinit hpt3xx_cable_detect(ide_hwif_t *hwif)
1219{ 1218{
1220 struct pci_dev *dev = to_pci_dev(hwif->dev); 1219 struct pci_dev *dev = to_pci_dev(hwif->dev);
1221 struct hpt_info *info = pci_get_drvdata(dev); 1220 struct ide_host *host = pci_get_drvdata(dev);
1221 struct hpt_info *info = host->host_priv + (hwif->dev == host->dev[1]);
1222 u8 chip_type = info->chip_type; 1222 u8 chip_type = info->chip_type;
1223 u8 scr1 = 0, ata66 = hwif->channel ? 0x01 : 0x02; 1223 u8 scr1 = 0, ata66 = hwif->channel ? 0x01 : 0x02;
1224 1224
@@ -1262,7 +1262,8 @@ static u8 __devinit hpt3xx_cable_detect(ide_hwif_t *hwif)
1262static void __devinit init_hwif_hpt366(ide_hwif_t *hwif) 1262static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1263{ 1263{
1264 struct pci_dev *dev = to_pci_dev(hwif->dev); 1264 struct pci_dev *dev = to_pci_dev(hwif->dev);
1265 struct hpt_info *info = pci_get_drvdata(dev); 1265 struct ide_host *host = pci_get_drvdata(dev);
1266 struct hpt_info *info = host->host_priv + (hwif->dev == host->dev[1]);
1266 int serialize = HPT_SERIALIZE_IO; 1267 int serialize = HPT_SERIALIZE_IO;
1267 u8 chip_type = info->chip_type; 1268 u8 chip_type = info->chip_type;
1268 u8 new_mcr, old_mcr = 0; 1269 u8 new_mcr, old_mcr = 0;
@@ -1320,7 +1321,15 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
1320 unsigned long flags, base = ide_pci_dma_base(hwif, d); 1321 unsigned long flags, base = ide_pci_dma_base(hwif, d);
1321 u8 dma_old, dma_new, masterdma = 0, slavedma = 0; 1322 u8 dma_old, dma_new, masterdma = 0, slavedma = 0;
1322 1323
1323 if (base == 0 || ide_pci_set_master(dev, d->name) < 0) 1324 if (base == 0)
1325 return -1;
1326
1327 hwif->dma_base = base;
1328
1329 if (ide_pci_check_simplex(hwif, d) < 0)
1330 return -1;
1331
1332 if (ide_pci_set_master(dev, d->name) < 0)
1324 return -1; 1333 return -1;
1325 1334
1326 dma_old = inb(base + 2); 1335 dma_old = inb(base + 2);
@@ -1346,7 +1355,7 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
1346 if (ide_allocate_dma_engine(hwif)) 1355 if (ide_allocate_dma_engine(hwif))
1347 return -1; 1356 return -1;
1348 1357
1349 ide_setup_dma(hwif, base); 1358 hwif->dma_ops = &sff_dma_ops;
1350 1359
1351 return 0; 1360 return 0;
1352} 1361}
@@ -1356,7 +1365,8 @@ static void __devinit hpt374_init(struct pci_dev *dev, struct pci_dev *dev2)
1356 if (dev2->irq != dev->irq) { 1365 if (dev2->irq != dev->irq) {
1357 /* FIXME: we need a core pci_set_interrupt() */ 1366 /* FIXME: we need a core pci_set_interrupt() */
1358 dev2->irq = dev->irq; 1367 dev2->irq = dev->irq;
1359 printk(KERN_INFO "HPT374: PCI config space interrupt fixed\n"); 1368 printk(KERN_INFO DRV_NAME " %s: PCI config space interrupt "
1369 "fixed\n", pci_name(dev2));
1360 } 1370 }
1361} 1371}
1362 1372
@@ -1391,8 +1401,8 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
1391 pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin2); 1401 pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin2);
1392 1402
1393 if (pin1 != pin2 && dev->irq == dev2->irq) { 1403 if (pin1 != pin2 && dev->irq == dev2->irq) {
1394 printk(KERN_INFO "HPT36x: onboard version of chipset, " 1404 printk(KERN_INFO DRV_NAME " %s: onboard version of chipset, "
1395 "pin1=%d pin2=%d\n", pin1, pin2); 1405 "pin1=%d pin2=%d\n", pci_name(dev), pin1, pin2);
1396 return 1; 1406 return 1;
1397 } 1407 }
1398 1408
@@ -1401,7 +1411,6 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
1401 1411
1402#define IDE_HFLAGS_HPT3XX \ 1412#define IDE_HFLAGS_HPT3XX \
1403 (IDE_HFLAG_NO_ATAPI_DMA | \ 1413 (IDE_HFLAG_NO_ATAPI_DMA | \
1404 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
1405 IDE_HFLAG_OFF_BOARD) 1414 IDE_HFLAG_OFF_BOARD)
1406 1415
1407static const struct ide_port_ops hpt3xx_port_ops = { 1416static const struct ide_port_ops hpt3xx_port_ops = {
@@ -1448,8 +1457,8 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
1448}; 1457};
1449 1458
1450static const struct ide_port_info hpt366_chipsets[] __devinitdata = { 1459static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1451 { /* 0 */ 1460 { /* 0: HPT36x */
1452 .name = "HPT36x", 1461 .name = DRV_NAME,
1453 .init_chipset = init_chipset_hpt366, 1462 .init_chipset = init_chipset_hpt366,
1454 .init_hwif = init_hwif_hpt366, 1463 .init_hwif = init_hwif_hpt366,
1455 .init_dma = init_dma_hpt366, 1464 .init_dma = init_dma_hpt366,
@@ -1465,53 +1474,9 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1465 .host_flags = IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE, 1474 .host_flags = IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE,
1466 .pio_mask = ATA_PIO4, 1475 .pio_mask = ATA_PIO4,
1467 .mwdma_mask = ATA_MWDMA2, 1476 .mwdma_mask = ATA_MWDMA2,
1468 },{ /* 1 */ 1477 },
1469 .name = "HPT372A", 1478 { /* 1: HPT3xx */
1470 .init_chipset = init_chipset_hpt366, 1479 .name = DRV_NAME,
1471 .init_hwif = init_hwif_hpt366,
1472 .init_dma = init_dma_hpt366,
1473 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1474 .port_ops = &hpt3xx_port_ops,
1475 .dma_ops = &hpt37x_dma_ops,
1476 .host_flags = IDE_HFLAGS_HPT3XX,
1477 .pio_mask = ATA_PIO4,
1478 .mwdma_mask = ATA_MWDMA2,
1479 },{ /* 2 */
1480 .name = "HPT302",
1481 .init_chipset = init_chipset_hpt366,
1482 .init_hwif = init_hwif_hpt366,
1483 .init_dma = init_dma_hpt366,
1484 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1485 .port_ops = &hpt3xx_port_ops,
1486 .dma_ops = &hpt37x_dma_ops,
1487 .host_flags = IDE_HFLAGS_HPT3XX,
1488 .pio_mask = ATA_PIO4,
1489 .mwdma_mask = ATA_MWDMA2,
1490 },{ /* 3 */
1491 .name = "HPT371",
1492 .init_chipset = init_chipset_hpt366,
1493 .init_hwif = init_hwif_hpt366,
1494 .init_dma = init_dma_hpt366,
1495 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1496 .port_ops = &hpt3xx_port_ops,
1497 .dma_ops = &hpt37x_dma_ops,
1498 .host_flags = IDE_HFLAGS_HPT3XX,
1499 .pio_mask = ATA_PIO4,
1500 .mwdma_mask = ATA_MWDMA2,
1501 },{ /* 4 */
1502 .name = "HPT374",
1503 .init_chipset = init_chipset_hpt366,
1504 .init_hwif = init_hwif_hpt366,
1505 .init_dma = init_dma_hpt366,
1506 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1507 .udma_mask = ATA_UDMA5,
1508 .port_ops = &hpt3xx_port_ops,
1509 .dma_ops = &hpt37x_dma_ops,
1510 .host_flags = IDE_HFLAGS_HPT3XX,
1511 .pio_mask = ATA_PIO4,
1512 .mwdma_mask = ATA_MWDMA2,
1513 },{ /* 5 */
1514 .name = "HPT372N",
1515 .init_chipset = init_chipset_hpt366, 1480 .init_chipset = init_chipset_hpt366,
1516 .init_hwif = init_hwif_hpt366, 1481 .init_hwif = init_hwif_hpt366,
1517 .init_dma = init_dma_hpt366, 1482 .init_dma = init_dma_hpt366,
@@ -1535,10 +1500,12 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1535static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id) 1500static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1536{ 1501{
1537 const struct hpt_info *info = NULL; 1502 const struct hpt_info *info = NULL;
1503 struct hpt_info *dyn_info;
1538 struct pci_dev *dev2 = NULL; 1504 struct pci_dev *dev2 = NULL;
1539 struct ide_port_info d; 1505 struct ide_port_info d;
1540 u8 idx = id->driver_data; 1506 u8 idx = id->driver_data;
1541 u8 rev = dev->revision; 1507 u8 rev = dev->revision;
1508 int ret;
1542 1509
1543 if ((idx == 0 || idx == 4) && (PCI_FUNC(dev->devfn) & 1)) 1510 if ((idx == 0 || idx == 4) && (PCI_FUNC(dev->devfn) & 1))
1544 return -ENODEV; 1511 return -ENODEV;
@@ -1575,24 +1542,35 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
1575 break; 1542 break;
1576 } 1543 }
1577 1544
1578 d = hpt366_chipsets[idx]; 1545 printk(KERN_INFO DRV_NAME ": %s chipset detected\n", info->chip_name);
1546
1547 d = hpt366_chipsets[min_t(u8, idx, 1)];
1579 1548
1580 d.name = info->chip_name;
1581 d.udma_mask = info->udma_mask; 1549 d.udma_mask = info->udma_mask;
1582 1550
1583 /* fixup ->dma_ops for HPT370/HPT370A */ 1551 /* fixup ->dma_ops for HPT370/HPT370A */
1584 if (info == &hpt370 || info == &hpt370a) 1552 if (info == &hpt370 || info == &hpt370a)
1585 d.dma_ops = &hpt370_dma_ops; 1553 d.dma_ops = &hpt370_dma_ops;
1586 1554
1587 pci_set_drvdata(dev, (void *)info);
1588
1589 if (info == &hpt36x || info == &hpt374) 1555 if (info == &hpt36x || info == &hpt374)
1590 dev2 = pci_get_slot(dev->bus, dev->devfn + 1); 1556 dev2 = pci_get_slot(dev->bus, dev->devfn + 1);
1591 1557
1592 if (dev2) { 1558 dyn_info = kzalloc(sizeof(*dyn_info) * (dev2 ? 2 : 1), GFP_KERNEL);
1593 int ret; 1559 if (dyn_info == NULL) {
1560 printk(KERN_ERR "%s %s: out of memory!\n",
1561 d.name, pci_name(dev));
1562 pci_dev_put(dev2);
1563 return -ENOMEM;
1564 }
1594 1565
1595 pci_set_drvdata(dev2, (void *)info); 1566 /*
1567 * Copy everything from a static "template" structure
1568 * to just allocated per-chip hpt_info structure.
1569 */
1570 memcpy(dyn_info, info, sizeof(*dyn_info));
1571
1572 if (dev2) {
1573 memcpy(dyn_info + 1, info, sizeof(*dyn_info));
1596 1574
1597 if (info == &hpt374) 1575 if (info == &hpt374)
1598 hpt374_init(dev, dev2); 1576 hpt374_init(dev, dev2);
@@ -1601,13 +1579,30 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
1601 d.host_flags &= ~IDE_HFLAG_NON_BOOTABLE; 1579 d.host_flags &= ~IDE_HFLAG_NON_BOOTABLE;
1602 } 1580 }
1603 1581
1604 ret = ide_setup_pci_devices(dev, dev2, &d); 1582 ret = ide_pci_init_two(dev, dev2, &d, dyn_info);
1605 if (ret < 0) 1583 if (ret < 0) {
1606 pci_dev_put(dev2); 1584 pci_dev_put(dev2);
1585 kfree(dyn_info);
1586 }
1607 return ret; 1587 return ret;
1608 } 1588 }
1609 1589
1610 return ide_setup_pci_device(dev, &d); 1590 ret = ide_pci_init_one(dev, &d, dyn_info);
1591 if (ret < 0)
1592 kfree(dyn_info);
1593
1594 return ret;
1595}
1596
1597static void __devexit hpt366_remove(struct pci_dev *dev)
1598{
1599 struct ide_host *host = pci_get_drvdata(dev);
1600 struct ide_info *info = host->host_priv;
1601 struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
1602
1603 ide_pci_remove(dev);
1604 pci_dev_put(dev2);
1605 kfree(info);
1611} 1606}
1612 1607
1613static const struct pci_device_id hpt366_pci_tbl[] __devinitconst = { 1608static const struct pci_device_id hpt366_pci_tbl[] __devinitconst = {
@@ -1625,6 +1620,7 @@ static struct pci_driver driver = {
1625 .name = "HPT366_IDE", 1620 .name = "HPT366_IDE",
1626 .id_table = hpt366_pci_tbl, 1621 .id_table = hpt366_pci_tbl,
1627 .probe = hpt366_init_one, 1622 .probe = hpt366_init_one,
1623 .remove = hpt366_remove,
1628}; 1624};
1629 1625
1630static int __init hpt366_ide_init(void) 1626static int __init hpt366_ide_init(void)
@@ -1632,7 +1628,13 @@ static int __init hpt366_ide_init(void)
1632 return ide_pci_register_driver(&driver); 1628 return ide_pci_register_driver(&driver);
1633} 1629}
1634 1630
1631static void __exit hpt366_ide_exit(void)
1632{
1633 pci_unregister_driver(&driver);
1634}
1635
1635module_init(hpt366_ide_init); 1636module_init(hpt366_ide_init);
1637module_exit(hpt366_ide_exit);
1636 1638
1637MODULE_AUTHOR("Andre Hedrick"); 1639MODULE_AUTHOR("Andre Hedrick");
1638MODULE_DESCRIPTION("PCI driver module for Highpoint HPT366 IDE"); 1640MODULE_DESCRIPTION("PCI driver module for Highpoint HPT366 IDE");
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
index 2b71bdf74e73..6eba8f188264 100644
--- a/drivers/ide/pci/it8213.c
+++ b/drivers/ide/pci/it8213.c
@@ -14,6 +14,8 @@
14#include <linux/ide.h> 14#include <linux/ide.h>
15#include <linux/init.h> 15#include <linux/init.h>
16 16
17#define DRV_NAME "it8213"
18
17/** 19/**
18 * it8213_set_pio_mode - set host controller for PIO mode 20 * it8213_set_pio_mode - set host controller for PIO mode
19 * @drive: drive 21 * @drive: drive
@@ -155,23 +157,17 @@ static const struct ide_port_ops it8213_port_ops = {
155 .cable_detect = it8213_cable_detect, 157 .cable_detect = it8213_cable_detect,
156}; 158};
157 159
158#define DECLARE_ITE_DEV(name_str) \ 160static const struct ide_port_info it8213_chipset __devinitdata = {
159 { \ 161 .name = DRV_NAME,
160 .name = name_str, \ 162 .enablebits = { {0x41, 0x80, 0x80} },
161 .enablebits = { {0x41, 0x80, 0x80} }, \ 163 .port_ops = &it8213_port_ops,
162 .port_ops = &it8213_port_ops, \ 164 .host_flags = IDE_HFLAG_SINGLE,
163 .host_flags = IDE_HFLAG_SINGLE, \ 165 .pio_mask = ATA_PIO4,
164 .pio_mask = ATA_PIO4, \ 166 .swdma_mask = ATA_SWDMA2_ONLY,
165 .swdma_mask = ATA_SWDMA2_ONLY, \ 167 .mwdma_mask = ATA_MWDMA12_ONLY,
166 .mwdma_mask = ATA_MWDMA12_ONLY, \ 168 .udma_mask = ATA_UDMA6,
167 .udma_mask = ATA_UDMA6, \
168 }
169
170static const struct ide_port_info it8213_chipsets[] __devinitdata = {
171 /* 0 */ DECLARE_ITE_DEV("IT8213"),
172}; 169};
173 170
174
175/** 171/**
176 * it8213_init_one - pci layer discovery entry 172 * it8213_init_one - pci layer discovery entry
177 * @dev: PCI device 173 * @dev: PCI device
@@ -184,7 +180,7 @@ static const struct ide_port_info it8213_chipsets[] __devinitdata = {
184 180
185static int __devinit it8213_init_one(struct pci_dev *dev, const struct pci_device_id *id) 181static int __devinit it8213_init_one(struct pci_dev *dev, const struct pci_device_id *id)
186{ 182{
187 return ide_setup_pci_device(dev, &it8213_chipsets[id->driver_data]); 183 return ide_pci_init_one(dev, &it8213_chipset, NULL);
188} 184}
189 185
190static const struct pci_device_id it8213_pci_tbl[] = { 186static const struct pci_device_id it8213_pci_tbl[] = {
@@ -198,6 +194,7 @@ static struct pci_driver driver = {
198 .name = "ITE8213_IDE", 194 .name = "ITE8213_IDE",
199 .id_table = it8213_pci_tbl, 195 .id_table = it8213_pci_tbl,
200 .probe = it8213_init_one, 196 .probe = it8213_init_one,
197 .remove = ide_pci_remove,
201}; 198};
202 199
203static int __init it8213_ide_init(void) 200static int __init it8213_ide_init(void)
@@ -205,7 +202,13 @@ static int __init it8213_ide_init(void)
205 return ide_pci_register_driver(&driver); 202 return ide_pci_register_driver(&driver);
206} 203}
207 204
205static void __exit it8213_ide_exit(void)
206{
207 pci_unregister_driver(&driver);
208}
209
208module_init(it8213_ide_init); 210module_init(it8213_ide_init);
211module_exit(it8213_ide_exit);
209 212
210MODULE_AUTHOR("Jack Lee, Alan Cox"); 213MODULE_AUTHOR("Jack Lee, Alan Cox");
211MODULE_DESCRIPTION("PCI driver module for the ITE 8213"); 214MODULE_DESCRIPTION("PCI driver module for the ITE 8213");
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index cbf647202994..e16a1d113a2a 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -67,6 +67,8 @@
67#include <linux/ide.h> 67#include <linux/ide.h>
68#include <linux/init.h> 68#include <linux/init.h>
69 69
70#define DRV_NAME "it821x"
71
70struct it821x_dev 72struct it821x_dev
71{ 73{
72 unsigned int smart:1, /* Are we in smart raid mode */ 74 unsigned int smart:1, /* Are we in smart raid mode */
@@ -534,8 +536,9 @@ static struct ide_dma_ops it821x_pass_through_dma_ops = {
534static void __devinit init_hwif_it821x(ide_hwif_t *hwif) 536static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
535{ 537{
536 struct pci_dev *dev = to_pci_dev(hwif->dev); 538 struct pci_dev *dev = to_pci_dev(hwif->dev);
537 struct it821x_dev **itdevs = (struct it821x_dev **)pci_get_drvdata(dev); 539 struct ide_host *host = pci_get_drvdata(dev);
538 struct it821x_dev *idev = itdevs[hwif->channel]; 540 struct it821x_dev *itdevs = host->host_priv;
541 struct it821x_dev *idev = itdevs + hwif->channel;
539 u8 conf; 542 u8 conf;
540 543
541 ide_set_hwifdata(hwif, idev); 544 ide_set_hwifdata(hwif, idev);
@@ -568,7 +571,8 @@ static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
568 idev->timing10 = 1; 571 idev->timing10 = 1;
569 hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA; 572 hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
570 if (idev->smart == 0) 573 if (idev->smart == 0)
571 printk(KERN_WARNING "it821x: Revision 0x10, workarounds activated.\n"); 574 printk(KERN_WARNING DRV_NAME " %s: revision 0x10, "
575 "workarounds activated\n", pci_name(dev));
572 } 576 }
573 577
574 if (idev->smart == 0) { 578 if (idev->smart == 0) {
@@ -601,18 +605,20 @@ static void __devinit it8212_disable_raid(struct pci_dev *dev)
601 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20); 605 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
602} 606}
603 607
604static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev, const char *name) 608static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev)
605{ 609{
606 u8 conf; 610 u8 conf;
607 static char *mode[2] = { "pass through", "smart" }; 611 static char *mode[2] = { "pass through", "smart" };
608 612
609 /* Force the card into bypass mode if so requested */ 613 /* Force the card into bypass mode if so requested */
610 if (it8212_noraid) { 614 if (it8212_noraid) {
611 printk(KERN_INFO "it8212: forcing bypass mode.\n"); 615 printk(KERN_INFO DRV_NAME " %s: forcing bypass mode\n",
616 pci_name(dev));
612 it8212_disable_raid(dev); 617 it8212_disable_raid(dev);
613 } 618 }
614 pci_read_config_byte(dev, 0x50, &conf); 619 pci_read_config_byte(dev, 0x50, &conf);
615 printk(KERN_INFO "it821x: controller in %s mode.\n", mode[conf & 1]); 620 printk(KERN_INFO DRV_NAME " %s: controller in %s mode\n",
621 pci_name(dev), mode[conf & 1]);
616 return 0; 622 return 0;
617} 623}
618 624
@@ -624,17 +630,12 @@ static const struct ide_port_ops it821x_port_ops = {
624 .cable_detect = it821x_cable_detect, 630 .cable_detect = it821x_cable_detect,
625}; 631};
626 632
627#define DECLARE_ITE_DEV(name_str) \ 633static const struct ide_port_info it821x_chipset __devinitdata = {
628 { \ 634 .name = DRV_NAME,
629 .name = name_str, \ 635 .init_chipset = init_chipset_it821x,
630 .init_chipset = init_chipset_it821x, \ 636 .init_hwif = init_hwif_it821x,
631 .init_hwif = init_hwif_it821x, \ 637 .port_ops = &it821x_port_ops,
632 .port_ops = &it821x_port_ops, \ 638 .pio_mask = ATA_PIO4,
633 .pio_mask = ATA_PIO4, \
634 }
635
636static const struct ide_port_info it821x_chipsets[] __devinitdata = {
637 /* 0 */ DECLARE_ITE_DEV("IT8212"),
638}; 639};
639 640
640/** 641/**
@@ -648,23 +649,29 @@ static const struct ide_port_info it821x_chipsets[] __devinitdata = {
648 649
649static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id) 650static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
650{ 651{
651 struct it821x_dev *itdevs[2] = { NULL, NULL} , *itdev; 652 struct it821x_dev *itdevs;
652 unsigned int i; 653 int rc;
653
654 for (i = 0; i < 2; i++) {
655 itdev = kzalloc(sizeof(*itdev), GFP_KERNEL);
656 if (itdev == NULL) {
657 kfree(itdevs[0]);
658 printk(KERN_ERR "it821x: out of memory\n");
659 return -ENOMEM;
660 }
661 654
662 itdevs[i] = itdev; 655 itdevs = kzalloc(2 * sizeof(*itdevs), GFP_KERNEL);
656 if (itdevs == NULL) {
657 printk(KERN_ERR DRV_NAME " %s: out of memory\n", pci_name(dev));
658 return -ENOMEM;
663 } 659 }
664 660
665 pci_set_drvdata(dev, itdevs); 661 rc = ide_pci_init_one(dev, &it821x_chipset, itdevs);
662 if (rc)
663 kfree(itdevs);
666 664
667 return ide_setup_pci_device(dev, &it821x_chipsets[id->driver_data]); 665 return rc;
666}
667
668static void __devexit it821x_remove(struct pci_dev *dev)
669{
670 struct ide_host *host = pci_get_drvdata(dev);
671 struct it821x_dev *itdevs = host->host_priv;
672
673 ide_pci_remove(dev);
674 kfree(itdevs);
668} 675}
669 676
670static const struct pci_device_id it821x_pci_tbl[] = { 677static const struct pci_device_id it821x_pci_tbl[] = {
@@ -679,6 +686,7 @@ static struct pci_driver driver = {
679 .name = "ITE821x IDE", 686 .name = "ITE821x IDE",
680 .id_table = it821x_pci_tbl, 687 .id_table = it821x_pci_tbl,
681 .probe = it821x_init_one, 688 .probe = it821x_init_one,
689 .remove = it821x_remove,
682}; 690};
683 691
684static int __init it821x_ide_init(void) 692static int __init it821x_ide_init(void)
@@ -686,7 +694,13 @@ static int __init it821x_ide_init(void)
686 return ide_pci_register_driver(&driver); 694 return ide_pci_register_driver(&driver);
687} 695}
688 696
697static void __exit it821x_ide_exit(void)
698{
699 pci_unregister_driver(&driver);
700}
701
689module_init(it821x_ide_init); 702module_init(it821x_ide_init);
703module_exit(it821x_ide_exit);
690 704
691module_param_named(noraid, it8212_noraid, int, S_IRUGO); 705module_param_named(noraid, it8212_noraid, int, S_IRUGO);
692MODULE_PARM_DESC(noraid, "Force card into bypass mode"); 706MODULE_PARM_DESC(noraid, "Force card into bypass mode");
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c
index 96ef7394f283..545b6e172d9b 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/pci/jmicron.c
@@ -12,6 +12,8 @@
12#include <linux/ide.h> 12#include <linux/ide.h>
13#include <linux/init.h> 13#include <linux/init.h>
14 14
15#define DRV_NAME "jmicron"
16
15typedef enum { 17typedef enum {
16 PORT_PATA0 = 0, 18 PORT_PATA0 = 0,
17 PORT_PATA1 = 1, 19 PORT_PATA1 = 1,
@@ -102,7 +104,7 @@ static const struct ide_port_ops jmicron_port_ops = {
102}; 104};
103 105
104static const struct ide_port_info jmicron_chipset __devinitdata = { 106static const struct ide_port_info jmicron_chipset __devinitdata = {
105 .name = "JMB", 107 .name = DRV_NAME,
106 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } }, 108 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
107 .port_ops = &jmicron_port_ops, 109 .port_ops = &jmicron_port_ops,
108 .pio_mask = ATA_PIO5, 110 .pio_mask = ATA_PIO5,
@@ -121,7 +123,7 @@ static const struct ide_port_info jmicron_chipset __devinitdata = {
121 123
122static int __devinit jmicron_init_one(struct pci_dev *dev, const struct pci_device_id *id) 124static int __devinit jmicron_init_one(struct pci_dev *dev, const struct pci_device_id *id)
123{ 125{
124 return ide_setup_pci_device(dev, &jmicron_chipset); 126 return ide_pci_init_one(dev, &jmicron_chipset, NULL);
125} 127}
126 128
127/* All JMB PATA controllers have and will continue to have the same 129/* All JMB PATA controllers have and will continue to have the same
@@ -152,6 +154,7 @@ static struct pci_driver driver = {
152 .name = "JMicron IDE", 154 .name = "JMicron IDE",
153 .id_table = jmicron_pci_tbl, 155 .id_table = jmicron_pci_tbl,
154 .probe = jmicron_init_one, 156 .probe = jmicron_init_one,
157 .remove = ide_pci_remove,
155}; 158};
156 159
157static int __init jmicron_ide_init(void) 160static int __init jmicron_ide_init(void)
@@ -159,7 +162,13 @@ static int __init jmicron_ide_init(void)
159 return ide_pci_register_driver(&driver); 162 return ide_pci_register_driver(&driver);
160} 163}
161 164
165static void __exit jmicron_ide_exit(void)
166{
167 pci_unregister_driver(&driver);
168}
169
162module_init(jmicron_ide_init); 170module_init(jmicron_ide_init);
171module_exit(jmicron_ide_exit);
163 172
164MODULE_AUTHOR("Alan Cox"); 173MODULE_AUTHOR("Alan Cox");
165MODULE_DESCRIPTION("PCI driver module for the JMicron in legacy modes"); 174MODULE_DESCRIPTION("PCI driver module for the JMicron in legacy modes");
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/pci/ns87415.c
index 45ba71a7182f..ffefcd15196c 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/pci/ns87415.c
@@ -19,6 +19,8 @@
19 19
20#include <asm/io.h> 20#include <asm/io.h>
21 21
22#define DRV_NAME "ns87415"
23
22#ifdef CONFIG_SUPERIO 24#ifdef CONFIG_SUPERIO
23/* SUPERIO 87560 is a PoS chip that NatSem denies exists. 25/* SUPERIO 87560 is a PoS chip that NatSem denies exists.
24 * Unfortunately, it's built-in on all Astro-based PA-RISC workstations 26 * Unfortunately, it's built-in on all Astro-based PA-RISC workstations
@@ -28,10 +30,6 @@
28 */ 30 */
29#include <asm/superio.h> 31#include <asm/superio.h>
30 32
31static unsigned long superio_ide_status[2];
32static unsigned long superio_ide_select[2];
33static unsigned long superio_ide_dma_status[2];
34
35#define SUPERIO_IDE_MAX_RETRIES 25 33#define SUPERIO_IDE_MAX_RETRIES 25
36 34
37/* Because of a defect in Super I/O, all reads of the PCI DMA status 35/* Because of a defect in Super I/O, all reads of the PCI DMA status
@@ -40,27 +38,28 @@ static unsigned long superio_ide_dma_status[2];
40 */ 38 */
41static u8 superio_ide_inb (unsigned long port) 39static u8 superio_ide_inb (unsigned long port)
42{ 40{
43 if (port == superio_ide_status[0] || 41 u8 tmp;
44 port == superio_ide_status[1] || 42 int retries = SUPERIO_IDE_MAX_RETRIES;
45 port == superio_ide_select[0] ||
46 port == superio_ide_select[1] ||
47 port == superio_ide_dma_status[0] ||
48 port == superio_ide_dma_status[1]) {
49 u8 tmp;
50 int retries = SUPERIO_IDE_MAX_RETRIES;
51 43
52 /* printk(" [ reading port 0x%x with retry ] ", port); */ 44 /* printk(" [ reading port 0x%x with retry ] ", port); */
53 45
54 do { 46 do {
55 tmp = inb(port); 47 tmp = inb(port);
56 if (tmp == 0) 48 if (tmp == 0)
57 udelay(50); 49 udelay(50);
58 } while (tmp == 0 && retries-- > 0); 50 } while (tmp == 0 && retries-- > 0);
59 51
60 return tmp; 52 return tmp;
61 } 53}
62 54
63 return inb(port); 55static u8 superio_read_status(ide_hwif_t *hwif)
56{
57 return superio_ide_inb(hwif->io_ports.status_addr);
58}
59
60static u8 superio_read_sff_dma_status(ide_hwif_t *hwif)
61{
62 return superio_ide_inb(hwif->dma_base + ATA_DMA_STATUS);
64} 63}
65 64
66static void superio_tf_read(ide_drive_t *drive, ide_task_t *task) 65static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
@@ -78,6 +77,8 @@ static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
78 /* be sure we're looking at the low order bits */ 77 /* be sure we're looking at the low order bits */
79 outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); 78 outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
80 79
80 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
81 tf->feature = inb(io_ports->feature_addr);
81 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 82 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
82 tf->nsect = inb(io_ports->nsect_addr); 83 tf->nsect = inb(io_ports->nsect_addr);
83 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 84 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -105,36 +106,32 @@ static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
105 } 106 }
106} 107}
107 108
108static void __devinit superio_ide_init_iops (struct hwif_s *hwif) 109static const struct ide_tp_ops superio_tp_ops = {
109{ 110 .exec_command = ide_exec_command,
110 struct pci_dev *pdev = to_pci_dev(hwif->dev); 111 .read_status = superio_read_status,
111 u32 base, dmabase; 112 .read_altstatus = ide_read_altstatus,
112 u8 port = hwif->channel, tmp; 113 .read_sff_dma_status = superio_read_sff_dma_status,
113
114 base = pci_resource_start(pdev, port * 2) & ~3;
115 dmabase = pci_resource_start(pdev, 4) & ~3;
116
117 superio_ide_status[port] = base + 7;
118 superio_ide_select[port] = base + 6;
119 superio_ide_dma_status[port] = dmabase + (!port ? 2 : 0xa);
120 114
121 /* Clear error/interrupt, enable dma */ 115 .set_irq = ide_set_irq,
122 tmp = superio_ide_inb(superio_ide_dma_status[port]);
123 outb(tmp | 0x66, superio_ide_dma_status[port]);
124 116
125 hwif->tf_read = superio_tf_read; 117 .tf_load = ide_tf_load,
118 .tf_read = superio_tf_read,
126 119
127 /* We need to override inb to workaround a SuperIO errata */ 120 .input_data = ide_input_data,
128 hwif->INB = superio_ide_inb; 121 .output_data = ide_output_data,
129} 122};
130 123
131static void __devinit init_iops_ns87415(ide_hwif_t *hwif) 124static void __devinit superio_init_iops(struct hwif_s *hwif)
132{ 125{
133 struct pci_dev *dev = to_pci_dev(hwif->dev); 126 struct pci_dev *pdev = to_pci_dev(hwif->dev);
127 u32 dma_stat;
128 u8 port = hwif->channel, tmp;
134 129
135 if (PCI_SLOT(dev->devfn) == 0xE) 130 dma_stat = (pci_resource_start(pdev, 4) & ~3) + (!port ? 2 : 0xa);
136 /* Built-in - assume it's under superio. */ 131
137 superio_ide_init_iops(hwif); 132 /* Clear error/interrupt, enable dma */
133 tmp = superio_ide_inb(dma_stat);
134 outb(tmp | 0x66, dma_stat);
138} 135}
139#endif 136#endif
140 137
@@ -200,14 +197,14 @@ static int ns87415_dma_end(ide_drive_t *drive)
200 u8 dma_stat = 0, dma_cmd = 0; 197 u8 dma_stat = 0, dma_cmd = 0;
201 198
202 drive->waiting_for_dma = 0; 199 drive->waiting_for_dma = 0;
203 dma_stat = hwif->INB(hwif->dma_status); 200 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
204 /* get dma command mode */ 201 /* get DMA command mode */
205 dma_cmd = hwif->INB(hwif->dma_command); 202 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
206 /* stop DMA */ 203 /* stop DMA */
207 outb(dma_cmd & ~1, hwif->dma_command); 204 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
208 /* from ERRATA: clear the INTR & ERROR bits */ 205 /* from ERRATA: clear the INTR & ERROR bits */
209 dma_cmd = hwif->INB(hwif->dma_command); 206 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
210 outb(dma_cmd | 6, hwif->dma_command); 207 outb(dma_cmd | 6, hwif->dma_base + ATA_DMA_CMD);
211 /* and free any DMA resources */ 208 /* and free any DMA resources */
212 ide_destroy_dmatable(drive); 209 ide_destroy_dmatable(drive);
213 /* verify good DMA status */ 210 /* verify good DMA status */
@@ -276,7 +273,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
276 outb(8, hwif->io_ports.ctl_addr); 273 outb(8, hwif->io_ports.ctl_addr);
277 do { 274 do {
278 udelay(50); 275 udelay(50);
279 stat = hwif->INB(hwif->io_ports.status_addr); 276 stat = hwif->tp_ops->read_status(hwif);
280 if (stat == 0xff) 277 if (stat == 0xff)
281 break; 278 break;
282 } while ((stat & BUSY_STAT) && --timeout); 279 } while ((stat & BUSY_STAT) && --timeout);
@@ -291,7 +288,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
291 if (!hwif->dma_base) 288 if (!hwif->dma_base)
292 return; 289 return;
293 290
294 outb(0x60, hwif->dma_status); 291 outb(0x60, hwif->dma_base + ATA_DMA_STATUS);
295} 292}
296 293
297static const struct ide_port_ops ns87415_port_ops = { 294static const struct ide_port_ops ns87415_port_ops = {
@@ -310,10 +307,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
310}; 307};
311 308
312static const struct ide_port_info ns87415_chipset __devinitdata = { 309static const struct ide_port_info ns87415_chipset __devinitdata = {
313 .name = "NS87415", 310 .name = DRV_NAME,
314#ifdef CONFIG_SUPERIO
315 .init_iops = init_iops_ns87415,
316#endif
317 .init_hwif = init_hwif_ns87415, 311 .init_hwif = init_hwif_ns87415,
318 .port_ops = &ns87415_port_ops, 312 .port_ops = &ns87415_port_ops,
319 .dma_ops = &ns87415_dma_ops, 313 .dma_ops = &ns87415_dma_ops,
@@ -323,7 +317,16 @@ static const struct ide_port_info ns87415_chipset __devinitdata = {
323 317
324static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id) 318static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id)
325{ 319{
326 return ide_setup_pci_device(dev, &ns87415_chipset); 320 struct ide_port_info d = ns87415_chipset;
321
322#ifdef CONFIG_SUPERIO
323 if (PCI_SLOT(dev->devfn) == 0xE) {
324 /* Built-in - assume it's under superio. */
325 d.init_iops = superio_init_iops;
326 d.tp_ops = &superio_tp_ops;
327 }
328#endif
329 return ide_pci_init_one(dev, &d, NULL);
327} 330}
328 331
329static const struct pci_device_id ns87415_pci_tbl[] = { 332static const struct pci_device_id ns87415_pci_tbl[] = {
@@ -336,6 +339,7 @@ static struct pci_driver driver = {
336 .name = "NS87415_IDE", 339 .name = "NS87415_IDE",
337 .id_table = ns87415_pci_tbl, 340 .id_table = ns87415_pci_tbl,
338 .probe = ns87415_init_one, 341 .probe = ns87415_init_one,
342 .remove = ide_pci_remove,
339}; 343};
340 344
341static int __init ns87415_ide_init(void) 345static int __init ns87415_ide_init(void)
@@ -343,7 +347,13 @@ static int __init ns87415_ide_init(void)
343 return ide_pci_register_driver(&driver); 347 return ide_pci_register_driver(&driver);
344} 348}
345 349
350static void __exit ns87415_ide_exit(void)
351{
352 pci_unregister_driver(&driver);
353}
354
346module_init(ns87415_ide_init); 355module_init(ns87415_ide_init);
356module_exit(ns87415_ide_exit);
347 357
348MODULE_AUTHOR("Mark Lord, Eddie Dost, Andre Hedrick"); 358MODULE_AUTHOR("Mark Lord, Eddie Dost, Andre Hedrick");
349MODULE_DESCRIPTION("PCI driver module for NS87415 IDE"); 359MODULE_DESCRIPTION("PCI driver module for NS87415 IDE");
diff --git a/drivers/ide/pci/opti621.c b/drivers/ide/pci/opti621.c
index 725c80508d90..e28e672ddafc 100644
--- a/drivers/ide/pci/opti621.c
+++ b/drivers/ide/pci/opti621.c
@@ -90,6 +90,8 @@
90 90
91#include <asm/io.h> 91#include <asm/io.h>
92 92
93#define DRV_NAME "opti621"
94
93#define READ_REG 0 /* index of Read cycle timing register */ 95#define READ_REG 0 /* index of Read cycle timing register */
94#define WRITE_REG 1 /* index of Write cycle timing register */ 96#define WRITE_REG 1 /* index of Write cycle timing register */
95#define CNTRL_REG 3 /* index of Control register */ 97#define CNTRL_REG 3 /* index of Control register */
@@ -200,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
200}; 202};
201 203
202static const struct ide_port_info opti621_chipset __devinitdata = { 204static const struct ide_port_info opti621_chipset __devinitdata = {
203 .name = "OPTI621/X", 205 .name = DRV_NAME,
204 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} }, 206 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
205 .port_ops = &opti621_port_ops, 207 .port_ops = &opti621_port_ops,
206 .host_flags = IDE_HFLAG_NO_DMA, 208 .host_flags = IDE_HFLAG_NO_DMA,
@@ -209,7 +211,7 @@ static const struct ide_port_info opti621_chipset __devinitdata = {
209 211
210static int __devinit opti621_init_one(struct pci_dev *dev, const struct pci_device_id *id) 212static int __devinit opti621_init_one(struct pci_dev *dev, const struct pci_device_id *id)
211{ 213{
212 return ide_setup_pci_device(dev, &opti621_chipset); 214 return ide_pci_init_one(dev, &opti621_chipset, NULL);
213} 215}
214 216
215static const struct pci_device_id opti621_pci_tbl[] = { 217static const struct pci_device_id opti621_pci_tbl[] = {
@@ -223,6 +225,7 @@ static struct pci_driver driver = {
223 .name = "Opti621_IDE", 225 .name = "Opti621_IDE",
224 .id_table = opti621_pci_tbl, 226 .id_table = opti621_pci_tbl,
225 .probe = opti621_init_one, 227 .probe = opti621_init_one,
228 .remove = ide_pci_remove,
226}; 229};
227 230
228static int __init opti621_ide_init(void) 231static int __init opti621_ide_init(void)
@@ -230,7 +233,13 @@ static int __init opti621_ide_init(void)
230 return ide_pci_register_driver(&driver); 233 return ide_pci_register_driver(&driver);
231} 234}
232 235
236static void __exit opti621_ide_exit(void)
237{
238 pci_unregister_driver(&driver);
239}
240
233module_init(opti621_ide_init); 241module_init(opti621_ide_init);
242module_exit(opti621_ide_exit);
234 243
235MODULE_AUTHOR("Jaromir Koutek, Jan Harkes, Mark Lord"); 244MODULE_AUTHOR("Jaromir Koutek, Jan Harkes, Mark Lord");
236MODULE_DESCRIPTION("PCI driver module for Opti621 IDE"); 245MODULE_DESCRIPTION("PCI driver module for Opti621 IDE");
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 070df8ab3b21..998615fa285f 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -31,6 +31,8 @@
31#include <asm/pci-bridge.h> 31#include <asm/pci-bridge.h>
32#endif 32#endif
33 33
34#define DRV_NAME "pdc202xx_new"
35
34#undef DEBUG 36#undef DEBUG
35 37
36#ifdef DEBUG 38#ifdef DEBUG
@@ -324,8 +326,9 @@ static void __devinit apple_kiwi_init(struct pci_dev *pdev)
324} 326}
325#endif /* CONFIG_PPC_PMAC */ 327#endif /* CONFIG_PPC_PMAC */
326 328
327static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const char *name) 329static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev)
328{ 330{
331 const char *name = DRV_NAME;
329 unsigned long dma_base = pci_resource_start(dev, 4); 332 unsigned long dma_base = pci_resource_start(dev, 4);
330 unsigned long sec_dma_base = dma_base + 0x08; 333 unsigned long sec_dma_base = dma_base + 0x08;
331 long pll_input, pll_output, ratio; 334 long pll_input, pll_output, ratio;
@@ -358,12 +361,13 @@ static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const cha
358 * registers setting. 361 * registers setting.
359 */ 362 */
360 pll_input = detect_pll_input_clock(dma_base); 363 pll_input = detect_pll_input_clock(dma_base);
361 printk("%s: PLL input clock is %ld kHz\n", name, pll_input / 1000); 364 printk(KERN_INFO "%s %s: PLL input clock is %ld kHz\n",
365 name, pci_name(dev), pll_input / 1000);
362 366
363 /* Sanity check */ 367 /* Sanity check */
364 if (unlikely(pll_input < 5000000L || pll_input > 70000000L)) { 368 if (unlikely(pll_input < 5000000L || pll_input > 70000000L)) {
365 printk(KERN_ERR "%s: Bad PLL input clock %ld Hz, giving up!\n", 369 printk(KERN_ERR "%s %s: Bad PLL input clock %ld Hz, giving up!"
366 name, pll_input); 370 "\n", name, pci_name(dev), pll_input);
367 goto out; 371 goto out;
368 } 372 }
369 373
@@ -399,7 +403,8 @@ static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const cha
399 r = 0x00; 403 r = 0x00;
400 } else { 404 } else {
401 /* Invalid ratio */ 405 /* Invalid ratio */
402 printk(KERN_ERR "%s: Bad ratio %ld, giving up!\n", name, ratio); 406 printk(KERN_ERR "%s %s: Bad ratio %ld, giving up!\n",
407 name, pci_name(dev), ratio);
403 goto out; 408 goto out;
404 } 409 }
405 410
@@ -409,7 +414,8 @@ static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const cha
409 414
410 if (unlikely(f < 0 || f > 127)) { 415 if (unlikely(f < 0 || f > 127)) {
411 /* Invalid F */ 416 /* Invalid F */
412 printk(KERN_ERR "%s: F[%d] invalid!\n", name, f); 417 printk(KERN_ERR "%s %s: F[%d] invalid!\n",
418 name, pci_name(dev), f);
413 goto out; 419 goto out;
414 } 420 }
415 421
@@ -455,8 +461,8 @@ static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
455 461
456 if (dev2->irq != dev->irq) { 462 if (dev2->irq != dev->irq) {
457 dev2->irq = dev->irq; 463 dev2->irq = dev->irq;
458 printk(KERN_INFO "PDC20270: PCI config space " 464 printk(KERN_INFO DRV_NAME " %s: PCI config space "
459 "interrupt fixed\n"); 465 "interrupt fixed\n", pci_name(dev));
460 } 466 }
461 467
462 return dev2; 468 return dev2;
@@ -473,9 +479,9 @@ static const struct ide_port_ops pdcnew_port_ops = {
473 .cable_detect = pdcnew_cable_detect, 479 .cable_detect = pdcnew_cable_detect,
474}; 480};
475 481
476#define DECLARE_PDCNEW_DEV(name_str, udma) \ 482#define DECLARE_PDCNEW_DEV(udma) \
477 { \ 483 { \
478 .name = name_str, \ 484 .name = DRV_NAME, \
479 .init_chipset = init_chipset_pdcnew, \ 485 .init_chipset = init_chipset_pdcnew, \
480 .port_ops = &pdcnew_port_ops, \ 486 .port_ops = &pdcnew_port_ops, \
481 .host_flags = IDE_HFLAG_POST_SET_MODE | \ 487 .host_flags = IDE_HFLAG_POST_SET_MODE | \
@@ -487,13 +493,8 @@ static const struct ide_port_ops pdcnew_port_ops = {
487 } 493 }
488 494
489static const struct ide_port_info pdcnew_chipsets[] __devinitdata = { 495static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
490 /* 0 */ DECLARE_PDCNEW_DEV("PDC20268", ATA_UDMA5), 496 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
491 /* 1 */ DECLARE_PDCNEW_DEV("PDC20269", ATA_UDMA6), 497 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
492 /* 2 */ DECLARE_PDCNEW_DEV("PDC20270", ATA_UDMA5),
493 /* 3 */ DECLARE_PDCNEW_DEV("PDC20271", ATA_UDMA6),
494 /* 4 */ DECLARE_PDCNEW_DEV("PDC20275", ATA_UDMA6),
495 /* 5 */ DECLARE_PDCNEW_DEV("PDC20276", ATA_UDMA6),
496 /* 6 */ DECLARE_PDCNEW_DEV("PDC20277", ATA_UDMA6),
497}; 498};
498 499
499/** 500/**
@@ -507,13 +508,10 @@ static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
507 508
508static int __devinit pdc202new_init_one(struct pci_dev *dev, const struct pci_device_id *id) 509static int __devinit pdc202new_init_one(struct pci_dev *dev, const struct pci_device_id *id)
509{ 510{
510 const struct ide_port_info *d; 511 const struct ide_port_info *d = &pdcnew_chipsets[id->driver_data];
511 struct pci_dev *bridge = dev->bus->self; 512 struct pci_dev *bridge = dev->bus->self;
512 u8 idx = id->driver_data;
513
514 d = &pdcnew_chipsets[idx];
515 513
516 if (idx == 2 && bridge && 514 if (dev->device == PCI_DEVICE_ID_PROMISE_20270 && bridge &&
517 bridge->vendor == PCI_VENDOR_ID_DEC && 515 bridge->vendor == PCI_VENDOR_ID_DEC &&
518 bridge->device == PCI_DEVICE_ID_DEC_21150) { 516 bridge->device == PCI_DEVICE_ID_DEC_21150) {
519 struct pci_dev *dev2; 517 struct pci_dev *dev2;
@@ -524,33 +522,42 @@ static int __devinit pdc202new_init_one(struct pci_dev *dev, const struct pci_de
524 dev2 = pdc20270_get_dev2(dev); 522 dev2 = pdc20270_get_dev2(dev);
525 523
526 if (dev2) { 524 if (dev2) {
527 int ret = ide_setup_pci_devices(dev, dev2, d); 525 int ret = ide_pci_init_two(dev, dev2, d, NULL);
528 if (ret < 0) 526 if (ret < 0)
529 pci_dev_put(dev2); 527 pci_dev_put(dev2);
530 return ret; 528 return ret;
531 } 529 }
532 } 530 }
533 531
534 if (idx == 5 && bridge && 532 if (dev->device == PCI_DEVICE_ID_PROMISE_20276 && bridge &&
535 bridge->vendor == PCI_VENDOR_ID_INTEL && 533 bridge->vendor == PCI_VENDOR_ID_INTEL &&
536 (bridge->device == PCI_DEVICE_ID_INTEL_I960 || 534 (bridge->device == PCI_DEVICE_ID_INTEL_I960 ||
537 bridge->device == PCI_DEVICE_ID_INTEL_I960RM)) { 535 bridge->device == PCI_DEVICE_ID_INTEL_I960RM)) {
538 printk(KERN_INFO "PDC20276: attached to I2O RAID controller, " 536 printk(KERN_INFO DRV_NAME " %s: attached to I2O RAID controller,"
539 "skipping\n"); 537 " skipping\n", pci_name(dev));
540 return -ENODEV; 538 return -ENODEV;
541 } 539 }
542 540
543 return ide_setup_pci_device(dev, d); 541 return ide_pci_init_one(dev, d, NULL);
542}
543
544static void __devexit pdc202new_remove(struct pci_dev *dev)
545{
546 struct ide_host *host = pci_get_drvdata(dev);
547 struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
548
549 ide_pci_remove(dev);
550 pci_dev_put(dev2);
544} 551}
545 552
546static const struct pci_device_id pdc202new_pci_tbl[] = { 553static const struct pci_device_id pdc202new_pci_tbl[] = {
547 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20268), 0 }, 554 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20268), 0 },
548 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20269), 1 }, 555 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20269), 1 },
549 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20270), 2 }, 556 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20270), 0 },
550 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20271), 3 }, 557 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20271), 1 },
551 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20275), 4 }, 558 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20275), 1 },
552 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20276), 5 }, 559 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20276), 1 },
553 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20277), 6 }, 560 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20277), 1 },
554 { 0, }, 561 { 0, },
555}; 562};
556MODULE_DEVICE_TABLE(pci, pdc202new_pci_tbl); 563MODULE_DEVICE_TABLE(pci, pdc202new_pci_tbl);
@@ -559,6 +566,7 @@ static struct pci_driver driver = {
559 .name = "Promise_IDE", 566 .name = "Promise_IDE",
560 .id_table = pdc202new_pci_tbl, 567 .id_table = pdc202new_pci_tbl,
561 .probe = pdc202new_init_one, 568 .probe = pdc202new_init_one,
569 .remove = pdc202new_remove,
562}; 570};
563 571
564static int __init pdc202new_ide_init(void) 572static int __init pdc202new_ide_init(void)
@@ -566,7 +574,13 @@ static int __init pdc202new_ide_init(void)
566 return ide_pci_register_driver(&driver); 574 return ide_pci_register_driver(&driver);
567} 575}
568 576
577static void __exit pdc202new_ide_exit(void)
578{
579 pci_unregister_driver(&driver);
580}
581
569module_init(pdc202new_ide_init); 582module_init(pdc202new_ide_init);
583module_exit(pdc202new_ide_exit);
570 584
571MODULE_AUTHOR("Andre Hedrick, Frank Tiernan"); 585MODULE_AUTHOR("Andre Hedrick, Frank Tiernan");
572MODULE_DESCRIPTION("PCI driver module for Promise PDC20268 and higher"); 586MODULE_DESCRIPTION("PCI driver module for Promise PDC20268 and higher");
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index fca89eda5c02..6ff2def58da0 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -20,6 +20,8 @@
20 20
21#include <asm/io.h> 21#include <asm/io.h>
22 22
23#define DRV_NAME "pdc202xx_old"
24
23#define PDC202XX_DEBUG_DRIVE_INFO 0 25#define PDC202XX_DEBUG_DRIVE_INFO 0
24 26
25static const char *pdc_quirk_drives[] = { 27static const char *pdc_quirk_drives[] = {
@@ -206,7 +208,7 @@ static int pdc202xx_dma_test_irq(ide_drive_t *drive)
206{ 208{
207 ide_hwif_t *hwif = HWIF(drive); 209 ide_hwif_t *hwif = HWIF(drive);
208 unsigned long high_16 = hwif->extra_base - 16; 210 unsigned long high_16 = hwif->extra_base - 16;
209 u8 dma_stat = inb(hwif->dma_status); 211 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
210 u8 sc1d = inb(high_16 + 0x001d); 212 u8 sc1d = inb(high_16 + 0x001d);
211 213
212 if (hwif->channel) { 214 if (hwif->channel) {
@@ -263,8 +265,7 @@ static void pdc202xx_dma_timeout(ide_drive_t *drive)
263 ide_dma_timeout(drive); 265 ide_dma_timeout(drive);
264} 266}
265 267
266static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev, 268static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev)
267 const char *name)
268{ 269{
269 unsigned long dmabase = pci_resource_start(dev, 4); 270 unsigned long dmabase = pci_resource_start(dev, 4);
270 u8 udma_speed_flag = 0, primary_mode = 0, secondary_mode = 0; 271 u8 udma_speed_flag = 0, primary_mode = 0, secondary_mode = 0;
@@ -304,15 +305,14 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
304 if (irq != irq2) { 305 if (irq != irq2) {
305 pci_write_config_byte(dev, 306 pci_write_config_byte(dev,
306 (PCI_INTERRUPT_LINE)|0x80, irq); /* 0xbc */ 307 (PCI_INTERRUPT_LINE)|0x80, irq); /* 0xbc */
307 printk(KERN_INFO "%s: PCI config space interrupt " 308 printk(KERN_INFO "%s %s: PCI config space interrupt "
308 "mirror fixed\n", name); 309 "mirror fixed\n", name, pci_name(dev));
309 } 310 }
310 } 311 }
311} 312}
312 313
313#define IDE_HFLAGS_PDC202XX \ 314#define IDE_HFLAGS_PDC202XX \
314 (IDE_HFLAG_ERROR_STOPS_FIFO | \ 315 (IDE_HFLAG_ERROR_STOPS_FIFO | \
315 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
316 IDE_HFLAG_OFF_BOARD) 316 IDE_HFLAG_OFF_BOARD)
317 317
318static const struct ide_port_ops pdc20246_port_ops = { 318static const struct ide_port_ops pdc20246_port_ops = {
@@ -351,9 +351,9 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
351 .dma_timeout = pdc202xx_dma_timeout, 351 .dma_timeout = pdc202xx_dma_timeout,
352}; 352};
353 353
354#define DECLARE_PDC2026X_DEV(name_str, udma, extra_flags) \ 354#define DECLARE_PDC2026X_DEV(udma, extra_flags) \
355 { \ 355 { \
356 .name = name_str, \ 356 .name = DRV_NAME, \
357 .init_chipset = init_chipset_pdc202xx, \ 357 .init_chipset = init_chipset_pdc202xx, \
358 .port_ops = &pdc2026x_port_ops, \ 358 .port_ops = &pdc2026x_port_ops, \
359 .dma_ops = &pdc2026x_dma_ops, \ 359 .dma_ops = &pdc2026x_dma_ops, \
@@ -364,8 +364,8 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
364 } 364 }
365 365
366static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = { 366static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
367 { /* 0 */ 367 { /* 0: PDC20246 */
368 .name = "PDC20246", 368 .name = DRV_NAME,
369 .init_chipset = init_chipset_pdc202xx, 369 .init_chipset = init_chipset_pdc202xx,
370 .port_ops = &pdc20246_port_ops, 370 .port_ops = &pdc20246_port_ops,
371 .dma_ops = &pdc20246_dma_ops, 371 .dma_ops = &pdc20246_dma_ops,
@@ -375,10 +375,10 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
375 .udma_mask = ATA_UDMA2, 375 .udma_mask = ATA_UDMA2,
376 }, 376 },
377 377
378 /* 1 */ DECLARE_PDC2026X_DEV("PDC20262", ATA_UDMA4, 0), 378 /* 1: PDC2026{2,3} */
379 /* 2 */ DECLARE_PDC2026X_DEV("PDC20263", ATA_UDMA4, 0), 379 DECLARE_PDC2026X_DEV(ATA_UDMA4, 0),
380 /* 3 */ DECLARE_PDC2026X_DEV("PDC20265", ATA_UDMA5, IDE_HFLAG_RQSIZE_256), 380 /* 2: PDC2026{5,7} */
381 /* 4 */ DECLARE_PDC2026X_DEV("PDC20267", ATA_UDMA5, IDE_HFLAG_RQSIZE_256), 381 DECLARE_PDC2026X_DEV(ATA_UDMA5, IDE_HFLAG_RQSIZE_256),
382}; 382};
383 383
384/** 384/**
@@ -397,31 +397,32 @@ static int __devinit pdc202xx_init_one(struct pci_dev *dev, const struct pci_dev
397 397
398 d = &pdc202xx_chipsets[idx]; 398 d = &pdc202xx_chipsets[idx];
399 399
400 if (idx < 3) 400 if (idx < 2)
401 pdc202ata4_fixup_irq(dev, d->name); 401 pdc202ata4_fixup_irq(dev, d->name);
402 402
403 if (idx == 3) { 403 if (dev->vendor == PCI_DEVICE_ID_PROMISE_20265) {
404 struct pci_dev *bridge = dev->bus->self; 404 struct pci_dev *bridge = dev->bus->self;
405 405
406 if (bridge && 406 if (bridge &&
407 bridge->vendor == PCI_VENDOR_ID_INTEL && 407 bridge->vendor == PCI_VENDOR_ID_INTEL &&
408 (bridge->device == PCI_DEVICE_ID_INTEL_I960 || 408 (bridge->device == PCI_DEVICE_ID_INTEL_I960 ||
409 bridge->device == PCI_DEVICE_ID_INTEL_I960RM)) { 409 bridge->device == PCI_DEVICE_ID_INTEL_I960RM)) {
410 printk(KERN_INFO "ide: Skipping Promise PDC20265 " 410 printk(KERN_INFO DRV_NAME " %s: skipping Promise "
411 "attached to I2O RAID controller\n"); 411 "PDC20265 attached to I2O RAID controller\n",
412 pci_name(dev));
412 return -ENODEV; 413 return -ENODEV;
413 } 414 }
414 } 415 }
415 416
416 return ide_setup_pci_device(dev, d); 417 return ide_pci_init_one(dev, d, NULL);
417} 418}
418 419
419static const struct pci_device_id pdc202xx_pci_tbl[] = { 420static const struct pci_device_id pdc202xx_pci_tbl[] = {
420 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 }, 421 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 },
421 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 }, 422 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 },
422 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 2 }, 423 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 },
423 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 3 }, 424 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2 },
424 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 4 }, 425 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2 },
425 { 0, }, 426 { 0, },
426}; 427};
427MODULE_DEVICE_TABLE(pci, pdc202xx_pci_tbl); 428MODULE_DEVICE_TABLE(pci, pdc202xx_pci_tbl);
@@ -430,6 +431,7 @@ static struct pci_driver driver = {
430 .name = "Promise_Old_IDE", 431 .name = "Promise_Old_IDE",
431 .id_table = pdc202xx_pci_tbl, 432 .id_table = pdc202xx_pci_tbl,
432 .probe = pdc202xx_init_one, 433 .probe = pdc202xx_init_one,
434 .remove = ide_pci_remove,
433}; 435};
434 436
435static int __init pdc202xx_ide_init(void) 437static int __init pdc202xx_ide_init(void)
@@ -437,7 +439,13 @@ static int __init pdc202xx_ide_init(void)
437 return ide_pci_register_driver(&driver); 439 return ide_pci_register_driver(&driver);
438} 440}
439 441
442static void __exit pdc202xx_ide_exit(void)
443{
444 pci_unregister_driver(&driver);
445}
446
440module_init(pdc202xx_ide_init); 447module_init(pdc202xx_ide_init);
448module_exit(pdc202xx_ide_exit);
441 449
442MODULE_AUTHOR("Andre Hedrick, Frank Tiernan"); 450MODULE_AUTHOR("Andre Hedrick, Frank Tiernan");
443MODULE_DESCRIPTION("PCI driver module for older Promise IDE"); 451MODULE_DESCRIPTION("PCI driver module for older Promise IDE");
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index f04738d14a6f..7fc3022dcf68 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -54,6 +54,8 @@
54 54
55#include <asm/io.h> 55#include <asm/io.h>
56 56
57#define DRV_NAME "piix"
58
57static int no_piix_dma; 59static int no_piix_dma;
58 60
59/** 61/**
@@ -198,13 +200,12 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
198/** 200/**
199 * init_chipset_ich - set up the ICH chipset 201 * init_chipset_ich - set up the ICH chipset
200 * @dev: PCI device to set up 202 * @dev: PCI device to set up
201 * @name: Name of the device
202 * 203 *
203 * Initialize the PCI device as required. For the ICH this turns 204 * Initialize the PCI device as required. For the ICH this turns
204 * out to be nice and simple. 205 * out to be nice and simple.
205 */ 206 */
206 207
207static unsigned int __devinit init_chipset_ich(struct pci_dev *dev, const char *name) 208static unsigned int __devinit init_chipset_ich(struct pci_dev *dev)
208{ 209{
209 u32 extra = 0; 210 u32 extra = 0;
210 211
@@ -227,9 +228,9 @@ static void piix_dma_clear_irq(ide_drive_t *drive)
227 u8 dma_stat; 228 u8 dma_stat;
228 229
229 /* clear the INTR & ERROR bits */ 230 /* clear the INTR & ERROR bits */
230 dma_stat = inb(hwif->dma_status); 231 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
231 /* Should we force the bit as well ? */ 232 /* Should we force the bit as well ? */
232 outb(dma_stat, hwif->dma_status); 233 outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
233} 234}
234 235
235struct ich_laptop { 236struct ich_laptop {
@@ -314,9 +315,9 @@ static const struct ide_port_ops piix_port_ops = {
314 #define IDE_HFLAGS_PIIX 0 315 #define IDE_HFLAGS_PIIX 0
315#endif 316#endif
316 317
317#define DECLARE_PIIX_DEV(name_str, udma) \ 318#define DECLARE_PIIX_DEV(udma) \
318 { \ 319 { \
319 .name = name_str, \ 320 .name = DRV_NAME, \
320 .init_hwif = init_hwif_piix, \ 321 .init_hwif = init_hwif_piix, \
321 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ 322 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
322 .port_ops = &piix_port_ops, \ 323 .port_ops = &piix_port_ops, \
@@ -327,9 +328,9 @@ static const struct ide_port_ops piix_port_ops = {
327 .udma_mask = udma, \ 328 .udma_mask = udma, \
328 } 329 }
329 330
330#define DECLARE_ICH_DEV(name_str, udma) \ 331#define DECLARE_ICH_DEV(udma) \
331 { \ 332 { \
332 .name = name_str, \ 333 .name = DRV_NAME, \
333 .init_chipset = init_chipset_ich, \ 334 .init_chipset = init_chipset_ich, \
334 .init_hwif = init_hwif_ich, \ 335 .init_hwif = init_hwif_ich, \
335 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ 336 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
@@ -342,45 +343,31 @@ static const struct ide_port_ops piix_port_ops = {
342 } 343 }
343 344
344static const struct ide_port_info piix_pci_info[] __devinitdata = { 345static const struct ide_port_info piix_pci_info[] __devinitdata = {
345 /* 0 */ DECLARE_PIIX_DEV("PIIXa", 0x00), /* no udma */ 346 /* 0: MPIIX */
346 /* 1 */ DECLARE_PIIX_DEV("PIIXb", 0x00), /* no udma */
347
348 /* 2 */
349 { /* 347 { /*
350 * MPIIX actually has only a single IDE channel mapped to 348 * MPIIX actually has only a single IDE channel mapped to
351 * the primary or secondary ports depending on the value 349 * the primary or secondary ports depending on the value
352 * of the bit 14 of the IDETIM register at offset 0x6c 350 * of the bit 14 of the IDETIM register at offset 0x6c
353 */ 351 */
354 .name = "MPIIX", 352 .name = DRV_NAME,
355 .enablebits = {{0x6d,0xc0,0x80}, {0x6d,0xc0,0xc0}}, 353 .enablebits = {{0x6d,0xc0,0x80}, {0x6d,0xc0,0xc0}},
356 .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_NO_DMA | 354 .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_NO_DMA |
357 IDE_HFLAGS_PIIX, 355 IDE_HFLAGS_PIIX,
358 .pio_mask = ATA_PIO4, 356 .pio_mask = ATA_PIO4,
359 /* This is a painful system best to let it self tune for now */ 357 /* This is a painful system best to let it self tune for now */
360 }, 358 },
361 359 /* 1: PIIXa/PIIXb/PIIX3 */
362 /* 3 */ DECLARE_PIIX_DEV("PIIX3", 0x00), /* no udma */ 360 DECLARE_PIIX_DEV(0x00), /* no udma */
363 /* 4 */ DECLARE_PIIX_DEV("PIIX4", ATA_UDMA2), 361 /* 2: PIIX4 */
364 /* 5 */ DECLARE_ICH_DEV("ICH0", ATA_UDMA2), 362 DECLARE_PIIX_DEV(ATA_UDMA2),
365 /* 6 */ DECLARE_PIIX_DEV("PIIX4", ATA_UDMA2), 363 /* 3: ICH0 */
366 /* 7 */ DECLARE_ICH_DEV("ICH", ATA_UDMA4), 364 DECLARE_ICH_DEV(ATA_UDMA2),
367 /* 8 */ DECLARE_PIIX_DEV("PIIX4", ATA_UDMA4), 365 /* 4: ICH */
368 /* 9 */ DECLARE_PIIX_DEV("PIIX4", ATA_UDMA2), 366 DECLARE_ICH_DEV(ATA_UDMA4),
369 /* 10 */ DECLARE_ICH_DEV("ICH2", ATA_UDMA5), 367 /* 5: PIIX4 */
370 /* 11 */ DECLARE_ICH_DEV("ICH2M", ATA_UDMA5), 368 DECLARE_PIIX_DEV(ATA_UDMA4),
371 /* 12 */ DECLARE_ICH_DEV("ICH3M", ATA_UDMA5), 369 /* 6: ICH[2-7]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */
372 /* 13 */ DECLARE_ICH_DEV("ICH3", ATA_UDMA5), 370 DECLARE_ICH_DEV(ATA_UDMA5),
373 /* 14 */ DECLARE_ICH_DEV("ICH4", ATA_UDMA5),
374 /* 15 */ DECLARE_ICH_DEV("ICH5", ATA_UDMA5),
375 /* 16 */ DECLARE_ICH_DEV("C-ICH", ATA_UDMA5),
376 /* 17 */ DECLARE_ICH_DEV("ICH4", ATA_UDMA5),
377 /* 18 */ DECLARE_ICH_DEV("ICH5-SATA", ATA_UDMA5),
378 /* 19 */ DECLARE_ICH_DEV("ICH5", ATA_UDMA5),
379 /* 20 */ DECLARE_ICH_DEV("ICH6", ATA_UDMA5),
380 /* 21 */ DECLARE_ICH_DEV("ICH7", ATA_UDMA5),
381 /* 22 */ DECLARE_ICH_DEV("ICH4", ATA_UDMA5),
382 /* 23 */ DECLARE_ICH_DEV("ESB2", ATA_UDMA5),
383 /* 24 */ DECLARE_ICH_DEV("ICH8M", ATA_UDMA5),
384}; 371};
385 372
386/** 373/**
@@ -394,7 +381,7 @@ static const struct ide_port_info piix_pci_info[] __devinitdata = {
394 381
395static int __devinit piix_init_one(struct pci_dev *dev, const struct pci_device_id *id) 382static int __devinit piix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
396{ 383{
397 return ide_setup_pci_device(dev, &piix_pci_info[id->driver_data]); 384 return ide_pci_init_one(dev, &piix_pci_info[id->driver_data], NULL);
398} 385}
399 386
400/** 387/**
@@ -421,39 +408,39 @@ static void __devinit piix_check_450nx(void)
421 no_piix_dma = 2; 408 no_piix_dma = 2;
422 } 409 }
423 if(no_piix_dma) 410 if(no_piix_dma)
424 printk(KERN_WARNING "piix: 450NX errata present, disabling IDE DMA.\n"); 411 printk(KERN_WARNING DRV_NAME ": 450NX errata present, disabling IDE DMA.\n");
425 if(no_piix_dma == 2) 412 if(no_piix_dma == 2)
426 printk(KERN_WARNING "piix: A BIOS update may resolve this.\n"); 413 printk(KERN_WARNING DRV_NAME ": A BIOS update may resolve this.\n");
427} 414}
428 415
429static const struct pci_device_id piix_pci_tbl[] = { 416static const struct pci_device_id piix_pci_tbl[] = {
430 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_0), 0 }, 417 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_0), 1 },
431 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_1), 1 }, 418 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_1), 1 },
432 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), 2 }, 419 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), 0 },
433 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371SB_1), 3 }, 420 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371SB_1), 1 },
434 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371AB), 4 }, 421 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371AB), 2 },
435 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AB_1), 5 }, 422 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AB_1), 3 },
436 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82443MX_1), 6 }, 423 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82443MX_1), 2 },
437 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AA_1), 7 }, 424 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AA_1), 4 },
438 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82372FB_1), 8 }, 425 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82372FB_1), 5 },
439 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82451NX), 9 }, 426 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82451NX), 2 },
440 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_9), 10 }, 427 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_9), 6 },
441 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_8), 11 }, 428 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_8), 6 },
442 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_10), 12 }, 429 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_10), 6 },
443 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_11), 13 }, 430 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_11), 6 },
444 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_11), 14 }, 431 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_11), 6 },
445 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_11), 15 }, 432 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_11), 6 },
446 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801E_11), 16 }, 433 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801E_11), 6 },
447 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_10), 17 }, 434 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_10), 6 },
448#ifdef CONFIG_BLK_DEV_IDE_SATA 435#ifdef CONFIG_BLK_DEV_IDE_SATA
449 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_1), 18 }, 436 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_1), 6 },
450#endif 437#endif
451 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 19 }, 438 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 },
452 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 20 }, 439 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 },
453 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 21 }, 440 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 6 },
454 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 22 }, 441 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 },
455 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 23 }, 442 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 6 },
456 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 24 }, 443 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 },
457 { 0, }, 444 { 0, },
458}; 445};
459MODULE_DEVICE_TABLE(pci, piix_pci_tbl); 446MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
@@ -462,6 +449,7 @@ static struct pci_driver driver = {
462 .name = "PIIX_IDE", 449 .name = "PIIX_IDE",
463 .id_table = piix_pci_tbl, 450 .id_table = piix_pci_tbl,
464 .probe = piix_init_one, 451 .probe = piix_init_one,
452 .remove = ide_pci_remove,
465}; 453};
466 454
467static int __init piix_ide_init(void) 455static int __init piix_ide_init(void)
@@ -470,7 +458,13 @@ static int __init piix_ide_init(void)
470 return ide_pci_register_driver(&driver); 458 return ide_pci_register_driver(&driver);
471} 459}
472 460
461static void __exit piix_ide_exit(void)
462{
463 pci_unregister_driver(&driver);
464}
465
473module_init(piix_ide_init); 466module_init(piix_ide_init);
467module_exit(piix_ide_exit);
474 468
475MODULE_AUTHOR("Andre Hedrick, Andrzej Krzysztofowicz"); 469MODULE_AUTHOR("Andre Hedrick, Andrzej Krzysztofowicz");
476MODULE_DESCRIPTION("PCI driver module for Intel PIIX IDE"); 470MODULE_DESCRIPTION("PCI driver module for Intel PIIX IDE");
diff --git a/drivers/ide/pci/rz1000.c b/drivers/ide/pci/rz1000.c
index 532154adba29..8d11ee838a2a 100644
--- a/drivers/ide/pci/rz1000.c
+++ b/drivers/ide/pci/rz1000.c
@@ -21,6 +21,8 @@
21#include <linux/ide.h> 21#include <linux/ide.h>
22#include <linux/init.h> 22#include <linux/init.h>
23 23
24#define DRV_NAME "rz1000"
25
24static void __devinit init_hwif_rz1000 (ide_hwif_t *hwif) 26static void __devinit init_hwif_rz1000 (ide_hwif_t *hwif)
25{ 27{
26 struct pci_dev *dev = to_pci_dev(hwif->dev); 28 struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -40,7 +42,7 @@ static void __devinit init_hwif_rz1000 (ide_hwif_t *hwif)
40} 42}
41 43
42static const struct ide_port_info rz1000_chipset __devinitdata = { 44static const struct ide_port_info rz1000_chipset __devinitdata = {
43 .name = "RZ100x", 45 .name = DRV_NAME,
44 .init_hwif = init_hwif_rz1000, 46 .init_hwif = init_hwif_rz1000,
45 .chipset = ide_rz1000, 47 .chipset = ide_rz1000,
46 .host_flags = IDE_HFLAG_NO_DMA, 48 .host_flags = IDE_HFLAG_NO_DMA,
@@ -48,7 +50,7 @@ static const struct ide_port_info rz1000_chipset __devinitdata = {
48 50
49static int __devinit rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id) 51static int __devinit rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id)
50{ 52{
51 return ide_setup_pci_device(dev, &rz1000_chipset); 53 return ide_pci_init_one(dev, &rz1000_chipset, NULL);
52} 54}
53 55
54static const struct pci_device_id rz1000_pci_tbl[] = { 56static const struct pci_device_id rz1000_pci_tbl[] = {
@@ -62,6 +64,7 @@ static struct pci_driver driver = {
62 .name = "RZ1000_IDE", 64 .name = "RZ1000_IDE",
63 .id_table = rz1000_pci_tbl, 65 .id_table = rz1000_pci_tbl,
64 .probe = rz1000_init_one, 66 .probe = rz1000_init_one,
67 .remove = ide_pci_remove,
65}; 68};
66 69
67static int __init rz1000_ide_init(void) 70static int __init rz1000_ide_init(void)
@@ -69,7 +72,13 @@ static int __init rz1000_ide_init(void)
69 return ide_pci_register_driver(&driver); 72 return ide_pci_register_driver(&driver);
70} 73}
71 74
75static void __exit rz1000_ide_exit(void)
76{
77 pci_unregister_driver(&driver);
78}
79
72module_init(rz1000_ide_init); 80module_init(rz1000_ide_init);
81module_exit(rz1000_ide_exit);
73 82
74MODULE_AUTHOR("Andre Hedrick"); 83MODULE_AUTHOR("Andre Hedrick");
75MODULE_DESCRIPTION("PCI driver module for RZ1000 IDE"); 84MODULE_DESCRIPTION("PCI driver module for RZ1000 IDE");
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c
index 14c787b5d95f..8efaed16fea3 100644
--- a/drivers/ide/pci/sc1200.c
+++ b/drivers/ide/pci/sc1200.c
@@ -22,6 +22,8 @@
22 22
23#include <asm/io.h> 23#include <asm/io.h>
24 24
25#define DRV_NAME "sc1200"
26
25#define SC1200_REV_A 0x00 27#define SC1200_REV_A 0x00
26#define SC1200_REV_B1 0x01 28#define SC1200_REV_B1 0x01
27#define SC1200_REV_B3 0x02 29#define SC1200_REV_B3 0x02
@@ -234,21 +236,11 @@ static int sc1200_suspend (struct pci_dev *dev, pm_message_t state)
234 * we only save state when going from full power to less 236 * we only save state when going from full power to less
235 */ 237 */
236 if (state.event == PM_EVENT_ON) { 238 if (state.event == PM_EVENT_ON) {
237 struct sc1200_saved_state *ss; 239 struct ide_host *host = pci_get_drvdata(dev);
240 struct sc1200_saved_state *ss = host->host_priv;
238 unsigned int r; 241 unsigned int r;
239 242
240 /* 243 /*
241 * allocate a permanent save area, if not already allocated
242 */
243 ss = (struct sc1200_saved_state *)pci_get_drvdata(dev);
244 if (ss == NULL) {
245 ss = kmalloc(sizeof(*ss), GFP_KERNEL);
246 if (ss == NULL)
247 return -ENOMEM;
248 pci_set_drvdata(dev, ss);
249 }
250
251 /*
252 * save timing registers 244 * save timing registers
253 * (this may be unnecessary if BIOS also does it) 245 * (this may be unnecessary if BIOS also does it)
254 */ 246 */
@@ -263,7 +255,8 @@ static int sc1200_suspend (struct pci_dev *dev, pm_message_t state)
263 255
264static int sc1200_resume (struct pci_dev *dev) 256static int sc1200_resume (struct pci_dev *dev)
265{ 257{
266 struct sc1200_saved_state *ss; 258 struct ide_host *host = pci_get_drvdata(dev);
259 struct sc1200_saved_state *ss = host->host_priv;
267 unsigned int r; 260 unsigned int r;
268 int i; 261 int i;
269 262
@@ -271,16 +264,12 @@ static int sc1200_resume (struct pci_dev *dev)
271 if (i) 264 if (i)
272 return i; 265 return i;
273 266
274 ss = (struct sc1200_saved_state *)pci_get_drvdata(dev);
275
276 /* 267 /*
277 * restore timing registers 268 * restore timing registers
278 * (this may be unnecessary if BIOS also does it) 269 * (this may be unnecessary if BIOS also does it)
279 */ 270 */
280 if (ss) { 271 for (r = 0; r < 8; r++)
281 for (r = 0; r < 8; r++) 272 pci_write_config_dword(dev, 0x40 + r * 4, ss->regs[r]);
282 pci_write_config_dword(dev, 0x40 + r * 4, ss->regs[r]);
283 }
284 273
285 return 0; 274 return 0;
286} 275}
@@ -304,7 +293,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
304}; 293};
305 294
306static const struct ide_port_info sc1200_chipset __devinitdata = { 295static const struct ide_port_info sc1200_chipset __devinitdata = {
307 .name = "SC1200", 296 .name = DRV_NAME,
308 .port_ops = &sc1200_port_ops, 297 .port_ops = &sc1200_port_ops,
309 .dma_ops = &sc1200_dma_ops, 298 .dma_ops = &sc1200_dma_ops,
310 .host_flags = IDE_HFLAG_SERIALIZE | 299 .host_flags = IDE_HFLAG_SERIALIZE |
@@ -317,7 +306,19 @@ static const struct ide_port_info sc1200_chipset __devinitdata = {
317 306
318static int __devinit sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id) 307static int __devinit sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
319{ 308{
320 return ide_setup_pci_device(dev, &sc1200_chipset); 309 struct sc1200_saved_state *ss = NULL;
310 int rc;
311
312#ifdef CONFIG_PM
313 ss = kmalloc(sizeof(*ss), GFP_KERNEL);
314 if (ss == NULL)
315 return -ENOMEM;
316#endif
317 rc = ide_pci_init_one(dev, &sc1200_chipset, ss);
318 if (rc)
319 kfree(ss);
320
321 return rc;
321} 322}
322 323
323static const struct pci_device_id sc1200_pci_tbl[] = { 324static const struct pci_device_id sc1200_pci_tbl[] = {
@@ -330,6 +331,7 @@ static struct pci_driver driver = {
330 .name = "SC1200_IDE", 331 .name = "SC1200_IDE",
331 .id_table = sc1200_pci_tbl, 332 .id_table = sc1200_pci_tbl,
332 .probe = sc1200_init_one, 333 .probe = sc1200_init_one,
334 .remove = ide_pci_remove,
333#ifdef CONFIG_PM 335#ifdef CONFIG_PM
334 .suspend = sc1200_suspend, 336 .suspend = sc1200_suspend,
335 .resume = sc1200_resume, 337 .resume = sc1200_resume,
@@ -341,7 +343,13 @@ static int __init sc1200_ide_init(void)
341 return ide_pci_register_driver(&driver); 343 return ide_pci_register_driver(&driver);
342} 344}
343 345
346static void __exit sc1200_ide_exit(void)
347{
348 pci_unregister_driver(&driver);
349}
350
344module_init(sc1200_ide_init); 351module_init(sc1200_ide_init);
352module_exit(sc1200_ide_exit);
345 353
346MODULE_AUTHOR("Mark Lord"); 354MODULE_AUTHOR("Mark Lord");
347MODULE_DESCRIPTION("PCI driver module for NS SC1200 IDE"); 355MODULE_DESCRIPTION("PCI driver module for NS SC1200 IDE");
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index 789c66dfbde5..94a7ab864236 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -65,7 +65,7 @@
65 65
66static struct scc_ports { 66static struct scc_ports {
67 unsigned long ctl, dma; 67 unsigned long ctl, dma;
68 ide_hwif_t *hwif; /* for removing port from system */ 68 struct ide_host *host; /* for removing port from system */
69} scc_ports[MAX_HWIFS]; 69} scc_ports[MAX_HWIFS];
70 70
71/* PIO transfer mode table */ 71/* PIO transfer mode table */
@@ -126,6 +126,46 @@ static u8 scc_ide_inb(unsigned long port)
126 return (u8)data; 126 return (u8)data;
127} 127}
128 128
129static void scc_exec_command(ide_hwif_t *hwif, u8 cmd)
130{
131 out_be32((void *)hwif->io_ports.command_addr, cmd);
132 eieio();
133 in_be32((void *)(hwif->dma_base + 0x01c));
134 eieio();
135}
136
137static u8 scc_read_status(ide_hwif_t *hwif)
138{
139 return (u8)in_be32((void *)hwif->io_ports.status_addr);
140}
141
142static u8 scc_read_altstatus(ide_hwif_t *hwif)
143{
144 return (u8)in_be32((void *)hwif->io_ports.ctl_addr);
145}
146
147static u8 scc_read_sff_dma_status(ide_hwif_t *hwif)
148{
149 return (u8)in_be32((void *)(hwif->dma_base + 4));
150}
151
152static void scc_set_irq(ide_hwif_t *hwif, int on)
153{
154 u8 ctl = ATA_DEVCTL_OBS;
155
156 if (on == 4) { /* hack for SRST */
157 ctl |= 4;
158 on &= ~4;
159 }
160
161 ctl |= on ? 0 : 2;
162
163 out_be32((void *)hwif->io_ports.ctl_addr, ctl);
164 eieio();
165 in_be32((void *)(hwif->dma_base + 0x01c));
166 eieio();
167}
168
129static void scc_ide_insw(unsigned long port, void *addr, u32 count) 169static void scc_ide_insw(unsigned long port, void *addr, u32 count)
130{ 170{
131 u16 *ptr = (u16 *)addr; 171 u16 *ptr = (u16 *)addr;
@@ -148,14 +188,6 @@ static void scc_ide_outb(u8 addr, unsigned long port)
148 out_be32((void*)port, addr); 188 out_be32((void*)port, addr);
149} 189}
150 190
151static void scc_ide_outbsync(ide_hwif_t *hwif, u8 addr, unsigned long port)
152{
153 out_be32((void*)port, addr);
154 eieio();
155 in_be32((void*)(hwif->dma_base + 0x01c));
156 eieio();
157}
158
159static void 191static void
160scc_ide_outsw(unsigned long port, void *addr, u32 count) 192scc_ide_outsw(unsigned long port, void *addr, u32 count)
161{ 193{
@@ -261,14 +293,14 @@ static void scc_dma_host_set(ide_drive_t *drive, int on)
261{ 293{
262 ide_hwif_t *hwif = drive->hwif; 294 ide_hwif_t *hwif = drive->hwif;
263 u8 unit = (drive->select.b.unit & 0x01); 295 u8 unit = (drive->select.b.unit & 0x01);
264 u8 dma_stat = scc_ide_inb(hwif->dma_status); 296 u8 dma_stat = scc_ide_inb(hwif->dma_base + 4);
265 297
266 if (on) 298 if (on)
267 dma_stat |= (1 << (5 + unit)); 299 dma_stat |= (1 << (5 + unit));
268 else 300 else
269 dma_stat &= ~(1 << (5 + unit)); 301 dma_stat &= ~(1 << (5 + unit));
270 302
271 scc_ide_outb(dma_stat, hwif->dma_status); 303 scc_ide_outb(dma_stat, hwif->dma_base + 4);
272} 304}
273 305
274/** 306/**
@@ -304,13 +336,13 @@ static int scc_dma_setup(ide_drive_t *drive)
304 out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma); 336 out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
305 337
306 /* specify r/w */ 338 /* specify r/w */
307 out_be32((void __iomem *)hwif->dma_command, reading); 339 out_be32((void __iomem *)hwif->dma_base, reading);
308 340
309 /* read dma_status for INTR & ERROR flags */ 341 /* read DMA status for INTR & ERROR flags */
310 dma_stat = in_be32((void __iomem *)hwif->dma_status); 342 dma_stat = in_be32((void __iomem *)(hwif->dma_base + 4));
311 343
312 /* clear INTR & ERROR flags */ 344 /* clear INTR & ERROR flags */
313 out_be32((void __iomem *)hwif->dma_status, dma_stat|6); 345 out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6);
314 drive->waiting_for_dma = 1; 346 drive->waiting_for_dma = 1;
315 return 0; 347 return 0;
316} 348}
@@ -318,10 +350,10 @@ static int scc_dma_setup(ide_drive_t *drive)
318static void scc_dma_start(ide_drive_t *drive) 350static void scc_dma_start(ide_drive_t *drive)
319{ 351{
320 ide_hwif_t *hwif = drive->hwif; 352 ide_hwif_t *hwif = drive->hwif;
321 u8 dma_cmd = scc_ide_inb(hwif->dma_command); 353 u8 dma_cmd = scc_ide_inb(hwif->dma_base);
322 354
323 /* start DMA */ 355 /* start DMA */
324 scc_ide_outb(dma_cmd | 1, hwif->dma_command); 356 scc_ide_outb(dma_cmd | 1, hwif->dma_base);
325 hwif->dma = 1; 357 hwif->dma = 1;
326 wmb(); 358 wmb();
327} 359}
@@ -333,13 +365,13 @@ static int __scc_dma_end(ide_drive_t *drive)
333 365
334 drive->waiting_for_dma = 0; 366 drive->waiting_for_dma = 0;
335 /* get DMA command mode */ 367 /* get DMA command mode */
336 dma_cmd = scc_ide_inb(hwif->dma_command); 368 dma_cmd = scc_ide_inb(hwif->dma_base);
337 /* stop DMA */ 369 /* stop DMA */
338 scc_ide_outb(dma_cmd & ~1, hwif->dma_command); 370 scc_ide_outb(dma_cmd & ~1, hwif->dma_base);
339 /* get DMA status */ 371 /* get DMA status */
340 dma_stat = scc_ide_inb(hwif->dma_status); 372 dma_stat = scc_ide_inb(hwif->dma_base + 4);
341 /* clear the INTR & ERROR bits */ 373 /* clear the INTR & ERROR bits */
342 scc_ide_outb(dma_stat | 6, hwif->dma_status); 374 scc_ide_outb(dma_stat | 6, hwif->dma_base + 4);
343 /* purge DMA mappings */ 375 /* purge DMA mappings */
344 ide_destroy_dmatable(drive); 376 ide_destroy_dmatable(drive);
345 /* verify good DMA status */ 377 /* verify good DMA status */
@@ -359,6 +391,7 @@ static int __scc_dma_end(ide_drive_t *drive)
359static int scc_dma_end(ide_drive_t *drive) 391static int scc_dma_end(ide_drive_t *drive)
360{ 392{
361 ide_hwif_t *hwif = HWIF(drive); 393 ide_hwif_t *hwif = HWIF(drive);
394 void __iomem *dma_base = (void __iomem *)hwif->dma_base;
362 unsigned long intsts_port = hwif->dma_base + 0x014; 395 unsigned long intsts_port = hwif->dma_base + 0x014;
363 u32 reg; 396 u32 reg;
364 int dma_stat, data_loss = 0; 397 int dma_stat, data_loss = 0;
@@ -397,7 +430,7 @@ static int scc_dma_end(ide_drive_t *drive)
397 printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME); 430 printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
398 out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT); 431 out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
399 432
400 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); 433 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
401 continue; 434 continue;
402 } 435 }
403 436
@@ -412,7 +445,7 @@ static int scc_dma_end(ide_drive_t *drive)
412 445
413 out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT); 446 out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
414 447
415 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); 448 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
416 continue; 449 continue;
417 } 450 }
418 451
@@ -420,12 +453,12 @@ static int scc_dma_end(ide_drive_t *drive)
420 printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME); 453 printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
421 out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT); 454 out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
422 455
423 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); 456 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
424 continue; 457 continue;
425 } 458 }
426 459
427 if (reg & INTSTS_ICERR) { 460 if (reg & INTSTS_ICERR) {
428 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); 461 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
429 462
430 printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME); 463 printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
431 out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT); 464 out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
@@ -553,14 +586,9 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
553 const struct ide_port_info *d) 586 const struct ide_port_info *d)
554{ 587{
555 struct scc_ports *ports = pci_get_drvdata(dev); 588 struct scc_ports *ports = pci_get_drvdata(dev);
556 ide_hwif_t *hwif = NULL; 589 struct ide_host *host;
557 hw_regs_t hw; 590 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
558 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 591 int i, rc;
559 int i;
560
561 hwif = ide_find_port_slot(d);
562 if (hwif == NULL)
563 return -ENOMEM;
564 592
565 memset(&hw, 0, sizeof(hw)); 593 memset(&hw, 0, sizeof(hw));
566 for (i = 0; i <= 8; i++) 594 for (i = 0; i <= 8; i++)
@@ -568,11 +596,12 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
568 hw.irq = dev->irq; 596 hw.irq = dev->irq;
569 hw.dev = &dev->dev; 597 hw.dev = &dev->dev;
570 hw.chipset = ide_pci; 598 hw.chipset = ide_pci;
571 ide_init_port_hw(hwif, &hw);
572 599
573 idx[0] = hwif->index; 600 rc = ide_host_add(d, hws, &host);
601 if (rc)
602 return rc;
574 603
575 ide_device_add(idx, d); 604 ports->host = host;
576 605
577 return 0; 606 return 0;
578} 607}
@@ -701,6 +730,8 @@ static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
701 /* be sure we're looking at the low order bits */ 730 /* be sure we're looking at the low order bits */
702 scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); 731 scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
703 732
733 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
734 tf->feature = scc_ide_inb(io_ports->feature_addr);
704 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 735 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
705 tf->nsect = scc_ide_inb(io_ports->nsect_addr); 736 tf->nsect = scc_ide_inb(io_ports->nsect_addr);
706 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 737 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -774,16 +805,6 @@ static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
774 805
775 ide_set_hwifdata(hwif, ports); 806 ide_set_hwifdata(hwif, ports);
776 807
777 hwif->tf_load = scc_tf_load;
778 hwif->tf_read = scc_tf_read;
779
780 hwif->input_data = scc_input_data;
781 hwif->output_data = scc_output_data;
782
783 hwif->INB = scc_ide_inb;
784 hwif->OUTB = scc_ide_outb;
785 hwif->OUTBSYNC = scc_ide_outbsync;
786
787 hwif->dma_base = dma_base; 808 hwif->dma_base = dma_base;
788 hwif->config_data = ports->ctl; 809 hwif->config_data = ports->ctl;
789} 810}
@@ -824,11 +845,6 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
824{ 845{
825 struct scc_ports *ports = ide_get_hwifdata(hwif); 846 struct scc_ports *ports = ide_get_hwifdata(hwif);
826 847
827 ports->hwif = hwif;
828
829 hwif->dma_command = hwif->dma_base;
830 hwif->dma_status = hwif->dma_base + 0x04;
831
832 /* PTERADD */ 848 /* PTERADD */
833 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma); 849 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
834 850
@@ -838,6 +854,21 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
838 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */ 854 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
839} 855}
840 856
857static const struct ide_tp_ops scc_tp_ops = {
858 .exec_command = scc_exec_command,
859 .read_status = scc_read_status,
860 .read_altstatus = scc_read_altstatus,
861 .read_sff_dma_status = scc_read_sff_dma_status,
862
863 .set_irq = scc_set_irq,
864
865 .tf_load = scc_tf_load,
866 .tf_read = scc_tf_read,
867
868 .input_data = scc_input_data,
869 .output_data = scc_output_data,
870};
871
841static const struct ide_port_ops scc_port_ops = { 872static const struct ide_port_ops scc_port_ops = {
842 .set_pio_mode = scc_set_pio_mode, 873 .set_pio_mode = scc_set_pio_mode,
843 .set_dma_mode = scc_set_dma_mode, 874 .set_dma_mode = scc_set_dma_mode,
@@ -861,6 +892,7 @@ static const struct ide_dma_ops scc_dma_ops = {
861 .name = name_str, \ 892 .name = name_str, \
862 .init_iops = init_iops_scc, \ 893 .init_iops = init_iops_scc, \
863 .init_hwif = init_hwif_scc, \ 894 .init_hwif = init_hwif_scc, \
895 .tp_ops = &scc_tp_ops, \
864 .port_ops = &scc_port_ops, \ 896 .port_ops = &scc_port_ops, \
865 .dma_ops = &scc_dma_ops, \ 897 .dma_ops = &scc_dma_ops, \
866 .host_flags = IDE_HFLAG_SINGLE, \ 898 .host_flags = IDE_HFLAG_SINGLE, \
@@ -895,7 +927,8 @@ static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_i
895static void __devexit scc_remove(struct pci_dev *dev) 927static void __devexit scc_remove(struct pci_dev *dev)
896{ 928{
897 struct scc_ports *ports = pci_get_drvdata(dev); 929 struct scc_ports *ports = pci_get_drvdata(dev);
898 ide_hwif_t *hwif = ports->hwif; 930 struct ide_host *host = ports->host;
931 ide_hwif_t *hwif = host->ports[0];
899 932
900 if (hwif->dmatable_cpu) { 933 if (hwif->dmatable_cpu) {
901 pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES, 934 pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
@@ -903,7 +936,7 @@ static void __devexit scc_remove(struct pci_dev *dev)
903 hwif->dmatable_cpu = NULL; 936 hwif->dmatable_cpu = NULL;
904 } 937 }
905 938
906 ide_unregister(hwif); 939 ide_host_remove(host);
907 940
908 iounmap((void*)ports->dma); 941 iounmap((void*)ports->dma);
909 iounmap((void*)ports->ctl); 942 iounmap((void*)ports->ctl);
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index a1fb20826a5b..d173f2937722 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -38,6 +38,8 @@
38 38
39#include <asm/io.h> 39#include <asm/io.h>
40 40
41#define DRV_NAME "serverworks"
42
41#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */ 43#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
42#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */ 44#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
43 45
@@ -172,7 +174,7 @@ static void svwks_set_dma_mode(ide_drive_t *drive, const u8 speed)
172 pci_write_config_byte(dev, 0x54, ultra_enable); 174 pci_write_config_byte(dev, 0x54, ultra_enable);
173} 175}
174 176
175static unsigned int __devinit init_chipset_svwks (struct pci_dev *dev, const char *name) 177static unsigned int __devinit init_chipset_svwks(struct pci_dev *dev)
176{ 178{
177 unsigned int reg; 179 unsigned int reg;
178 u8 btr; 180 u8 btr;
@@ -188,7 +190,8 @@ static unsigned int __devinit init_chipset_svwks (struct pci_dev *dev, const cha
188 pci_read_config_dword(isa_dev, 0x64, &reg); 190 pci_read_config_dword(isa_dev, 0x64, &reg);
189 reg &= ~0x00002000; /* disable 600ns interrupt mask */ 191 reg &= ~0x00002000; /* disable 600ns interrupt mask */
190 if(!(reg & 0x00004000)) 192 if(!(reg & 0x00004000))
191 printk(KERN_DEBUG "%s: UDMA not BIOS enabled.\n", name); 193 printk(KERN_DEBUG DRV_NAME " %s: UDMA not BIOS "
194 "enabled.\n", pci_name(dev));
192 reg |= 0x00004000; /* enable UDMA/33 support */ 195 reg |= 0x00004000; /* enable UDMA/33 support */
193 pci_write_config_dword(isa_dev, 0x64, reg); 196 pci_write_config_dword(isa_dev, 0x64, reg);
194 } 197 }
@@ -349,45 +352,47 @@ static const struct ide_port_ops svwks_port_ops = {
349 .cable_detect = svwks_cable_detect, 352 .cable_detect = svwks_cable_detect,
350}; 353};
351 354
352#define IDE_HFLAGS_SVWKS \ 355#define IDE_HFLAGS_SVWKS IDE_HFLAG_LEGACY_IRQS
353 (IDE_HFLAG_LEGACY_IRQS | \
354 IDE_HFLAG_ABUSE_SET_DMA_MODE)
355 356
356static const struct ide_port_info serverworks_chipsets[] __devinitdata = { 357static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
357 { /* 0 */ 358 { /* 0: OSB4 */
358 .name = "SvrWks OSB4", 359 .name = DRV_NAME,
359 .init_chipset = init_chipset_svwks, 360 .init_chipset = init_chipset_svwks,
360 .port_ops = &osb4_port_ops, 361 .port_ops = &osb4_port_ops,
361 .host_flags = IDE_HFLAGS_SVWKS, 362 .host_flags = IDE_HFLAGS_SVWKS,
362 .pio_mask = ATA_PIO4, 363 .pio_mask = ATA_PIO4,
363 .mwdma_mask = ATA_MWDMA2, 364 .mwdma_mask = ATA_MWDMA2,
364 .udma_mask = 0x00, /* UDMA is problematic on OSB4 */ 365 .udma_mask = 0x00, /* UDMA is problematic on OSB4 */
365 },{ /* 1 */ 366 },
366 .name = "SvrWks CSB5", 367 { /* 1: CSB5 */
368 .name = DRV_NAME,
367 .init_chipset = init_chipset_svwks, 369 .init_chipset = init_chipset_svwks,
368 .port_ops = &svwks_port_ops, 370 .port_ops = &svwks_port_ops,
369 .host_flags = IDE_HFLAGS_SVWKS, 371 .host_flags = IDE_HFLAGS_SVWKS,
370 .pio_mask = ATA_PIO4, 372 .pio_mask = ATA_PIO4,
371 .mwdma_mask = ATA_MWDMA2, 373 .mwdma_mask = ATA_MWDMA2,
372 .udma_mask = ATA_UDMA5, 374 .udma_mask = ATA_UDMA5,
373 },{ /* 2 */ 375 },
374 .name = "SvrWks CSB6", 376 { /* 2: CSB6 */
377 .name = DRV_NAME,
375 .init_chipset = init_chipset_svwks, 378 .init_chipset = init_chipset_svwks,
376 .port_ops = &svwks_port_ops, 379 .port_ops = &svwks_port_ops,
377 .host_flags = IDE_HFLAGS_SVWKS, 380 .host_flags = IDE_HFLAGS_SVWKS,
378 .pio_mask = ATA_PIO4, 381 .pio_mask = ATA_PIO4,
379 .mwdma_mask = ATA_MWDMA2, 382 .mwdma_mask = ATA_MWDMA2,
380 .udma_mask = ATA_UDMA5, 383 .udma_mask = ATA_UDMA5,
381 },{ /* 3 */ 384 },
382 .name = "SvrWks CSB6", 385 { /* 3: CSB6-2 */
386 .name = DRV_NAME,
383 .init_chipset = init_chipset_svwks, 387 .init_chipset = init_chipset_svwks,
384 .port_ops = &svwks_port_ops, 388 .port_ops = &svwks_port_ops,
385 .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE, 389 .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
386 .pio_mask = ATA_PIO4, 390 .pio_mask = ATA_PIO4,
387 .mwdma_mask = ATA_MWDMA2, 391 .mwdma_mask = ATA_MWDMA2,
388 .udma_mask = ATA_UDMA5, 392 .udma_mask = ATA_UDMA5,
389 },{ /* 4 */ 393 },
390 .name = "SvrWks HT1000", 394 { /* 4: HT1000 */
395 .name = DRV_NAME,
391 .init_chipset = init_chipset_svwks, 396 .init_chipset = init_chipset_svwks,
392 .port_ops = &svwks_port_ops, 397 .port_ops = &svwks_port_ops,
393 .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE, 398 .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
@@ -424,7 +429,7 @@ static int __devinit svwks_init_one(struct pci_dev *dev, const struct pci_device
424 d.host_flags &= ~IDE_HFLAG_SINGLE; 429 d.host_flags &= ~IDE_HFLAG_SINGLE;
425 } 430 }
426 431
427 return ide_setup_pci_device(dev, &d); 432 return ide_pci_init_one(dev, &d, NULL);
428} 433}
429 434
430static const struct pci_device_id svwks_pci_tbl[] = { 435static const struct pci_device_id svwks_pci_tbl[] = {
@@ -441,6 +446,7 @@ static struct pci_driver driver = {
441 .name = "Serverworks_IDE", 446 .name = "Serverworks_IDE",
442 .id_table = svwks_pci_tbl, 447 .id_table = svwks_pci_tbl,
443 .probe = svwks_init_one, 448 .probe = svwks_init_one,
449 .remove = ide_pci_remove,
444}; 450};
445 451
446static int __init svwks_ide_init(void) 452static int __init svwks_ide_init(void)
@@ -448,7 +454,13 @@ static int __init svwks_ide_init(void)
448 return ide_pci_register_driver(&driver); 454 return ide_pci_register_driver(&driver);
449} 455}
450 456
457static void __exit svwks_ide_exit(void)
458{
459 pci_unregister_driver(&driver);
460}
461
451module_init(svwks_ide_init); 462module_init(svwks_ide_init);
463module_exit(svwks_ide_exit);
452 464
453MODULE_AUTHOR("Michael Aubry. Andrzej Krzysztofowicz, Andre Hedrick"); 465MODULE_AUTHOR("Michael Aubry. Andrzej Krzysztofowicz, Andre Hedrick");
454MODULE_DESCRIPTION("PCI driver module for Serverworks OSB4/CSB5/CSB6 IDE"); 466MODULE_DESCRIPTION("PCI driver module for Serverworks OSB4/CSB5/CSB6 IDE");
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index c79ff5b41088..42eef19a18f1 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -127,7 +127,7 @@ sgiioc4_checkirq(ide_hwif_t * hwif)
127 return 0; 127 return 0;
128} 128}
129 129
130static u8 sgiioc4_INB(unsigned long); 130static u8 sgiioc4_read_status(ide_hwif_t *);
131 131
132static int 132static int
133sgiioc4_clearirq(ide_drive_t * drive) 133sgiioc4_clearirq(ide_drive_t * drive)
@@ -141,18 +141,19 @@ sgiioc4_clearirq(ide_drive_t * drive)
141 intr_reg = readl((void __iomem *)other_ir); 141 intr_reg = readl((void __iomem *)other_ir);
142 if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */ 142 if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */
143 /* 143 /*
144 * Using sgiioc4_INB to read the Status register has a side 144 * Using sgiioc4_read_status to read the Status register has a
145 * effect of clearing the interrupt. The first read should 145 * side effect of clearing the interrupt. The first read should
146 * clear it if it is set. The second read should return 146 * clear it if it is set. The second read should return
147 * a "clear" status if it got cleared. If not, then spin 147 * a "clear" status if it got cleared. If not, then spin
148 * for a bit trying to clear it. 148 * for a bit trying to clear it.
149 */ 149 */
150 u8 stat = sgiioc4_INB(io_ports->status_addr); 150 u8 stat = sgiioc4_read_status(hwif);
151 int count = 0; 151 int count = 0;
152 stat = sgiioc4_INB(io_ports->status_addr); 152
153 stat = sgiioc4_read_status(hwif);
153 while ((stat & 0x80) && (count++ < 100)) { 154 while ((stat & 0x80) && (count++ < 100)) {
154 udelay(1); 155 udelay(1);
155 stat = sgiioc4_INB(io_ports->status_addr); 156 stat = sgiioc4_read_status(hwif);
156 } 157 }
157 158
158 if (intr_reg & 0x02) { 159 if (intr_reg & 0x02) {
@@ -304,9 +305,9 @@ sgiioc4_dma_lost_irq(ide_drive_t * drive)
304 ide_dma_lost_irq(drive); 305 ide_dma_lost_irq(drive);
305} 306}
306 307
307static u8 308static u8 sgiioc4_read_status(ide_hwif_t *hwif)
308sgiioc4_INB(unsigned long port)
309{ 309{
310 unsigned long port = hwif->io_ports.status_addr;
310 u8 reg = (u8) readb((void __iomem *) port); 311 u8 reg = (u8) readb((void __iomem *) port);
311 312
312 if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */ 313 if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */
@@ -549,6 +550,21 @@ static int sgiioc4_dma_setup(ide_drive_t *drive)
549 return 0; 550 return 0;
550} 551}
551 552
553static const struct ide_tp_ops sgiioc4_tp_ops = {
554 .exec_command = ide_exec_command,
555 .read_status = sgiioc4_read_status,
556 .read_altstatus = ide_read_altstatus,
557 .read_sff_dma_status = ide_read_sff_dma_status,
558
559 .set_irq = ide_set_irq,
560
561 .tf_load = ide_tf_load,
562 .tf_read = ide_tf_read,
563
564 .input_data = ide_input_data,
565 .output_data = ide_output_data,
566};
567
552static const struct ide_port_ops sgiioc4_port_ops = { 568static const struct ide_port_ops sgiioc4_port_ops = {
553 .set_dma_mode = sgiioc4_set_dma_mode, 569 .set_dma_mode = sgiioc4_set_dma_mode,
554 /* reset DMA engine, clear IRQs */ 570 /* reset DMA engine, clear IRQs */
@@ -571,6 +587,7 @@ static const struct ide_port_info sgiioc4_port_info __devinitdata = {
571 .name = DRV_NAME, 587 .name = DRV_NAME,
572 .chipset = ide_pci, 588 .chipset = ide_pci,
573 .init_dma = ide_dma_sgiioc4, 589 .init_dma = ide_dma_sgiioc4,
590 .tp_ops = &sgiioc4_tp_ops,
574 .port_ops = &sgiioc4_port_ops, 591 .port_ops = &sgiioc4_port_ops,
575 .dma_ops = &sgiioc4_dma_ops, 592 .dma_ops = &sgiioc4_dma_ops,
576 .host_flags = IDE_HFLAG_MMIO, 593 .host_flags = IDE_HFLAG_MMIO,
@@ -583,10 +600,10 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
583 unsigned long cmd_base, irqport; 600 unsigned long cmd_base, irqport;
584 unsigned long bar0, cmd_phys_base, ctl; 601 unsigned long bar0, cmd_phys_base, ctl;
585 void __iomem *virt_base; 602 void __iomem *virt_base;
586 ide_hwif_t *hwif; 603 struct ide_host *host;
587 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 604 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
588 hw_regs_t hw;
589 struct ide_port_info d = sgiioc4_port_info; 605 struct ide_port_info d = sgiioc4_port_info;
606 int rc;
590 607
591 /* Get the CmdBlk and CtrlBlk Base Registers */ 608 /* Get the CmdBlk and CtrlBlk Base Registers */
592 bar0 = pci_resource_start(dev, 0); 609 bar0 = pci_resource_start(dev, 0);
@@ -618,30 +635,26 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
618 hw.chipset = ide_pci; 635 hw.chipset = ide_pci;
619 hw.dev = &dev->dev; 636 hw.dev = &dev->dev;
620 637
621 hwif = ide_find_port_slot(&d);
622 if (hwif == NULL)
623 goto err;
624
625 ide_init_port_hw(hwif, &hw);
626
627 /* The IOC4 uses MMIO rather than Port IO. */
628 default_hwif_mmiops(hwif);
629
630 /* Initializing chipset IRQ Registers */ 638 /* Initializing chipset IRQ Registers */
631 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); 639 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
632 640
633 hwif->INB = &sgiioc4_INB; 641 host = ide_host_alloc(&d, hws);
634 642 if (host == NULL) {
635 idx[0] = hwif->index; 643 rc = -ENOMEM;
644 goto err;
645 }
636 646
637 if (ide_device_add(idx, &d)) 647 rc = ide_host_register(host, &d, hws);
638 return -EIO; 648 if (rc)
649 goto err_free;
639 650
640 return 0; 651 return 0;
652err_free:
653 ide_host_free(host);
641err: 654err:
642 release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE); 655 release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE);
643 iounmap(virt_base); 656 iounmap(virt_base);
644 return -ENOMEM; 657 return rc;
645} 658}
646 659
647static unsigned int __devinit 660static unsigned int __devinit
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 6e9d7655d89c..b8ad9ad6cf0d 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -44,6 +44,8 @@
44#include <linux/init.h> 44#include <linux/init.h>
45#include <linux/io.h> 45#include <linux/io.h>
46 46
47#define DRV_NAME "siimage"
48
47/** 49/**
48 * pdev_is_sata - check if device is SATA 50 * pdev_is_sata - check if device is SATA
49 * @pdev: PCI device to check 51 * @pdev: PCI device to check
@@ -127,9 +129,10 @@ static inline unsigned long siimage_seldev(ide_drive_t *drive, int r)
127 129
128static u8 sil_ioread8(struct pci_dev *dev, unsigned long addr) 130static u8 sil_ioread8(struct pci_dev *dev, unsigned long addr)
129{ 131{
132 struct ide_host *host = pci_get_drvdata(dev);
130 u8 tmp = 0; 133 u8 tmp = 0;
131 134
132 if (pci_get_drvdata(dev)) 135 if (host->host_priv)
133 tmp = readb((void __iomem *)addr); 136 tmp = readb((void __iomem *)addr);
134 else 137 else
135 pci_read_config_byte(dev, addr, &tmp); 138 pci_read_config_byte(dev, addr, &tmp);
@@ -139,9 +142,10 @@ static u8 sil_ioread8(struct pci_dev *dev, unsigned long addr)
139 142
140static u16 sil_ioread16(struct pci_dev *dev, unsigned long addr) 143static u16 sil_ioread16(struct pci_dev *dev, unsigned long addr)
141{ 144{
145 struct ide_host *host = pci_get_drvdata(dev);
142 u16 tmp = 0; 146 u16 tmp = 0;
143 147
144 if (pci_get_drvdata(dev)) 148 if (host->host_priv)
145 tmp = readw((void __iomem *)addr); 149 tmp = readw((void __iomem *)addr);
146 else 150 else
147 pci_read_config_word(dev, addr, &tmp); 151 pci_read_config_word(dev, addr, &tmp);
@@ -151,7 +155,9 @@ static u16 sil_ioread16(struct pci_dev *dev, unsigned long addr)
151 155
152static void sil_iowrite8(struct pci_dev *dev, u8 val, unsigned long addr) 156static void sil_iowrite8(struct pci_dev *dev, u8 val, unsigned long addr)
153{ 157{
154 if (pci_get_drvdata(dev)) 158 struct ide_host *host = pci_get_drvdata(dev);
159
160 if (host->host_priv)
155 writeb(val, (void __iomem *)addr); 161 writeb(val, (void __iomem *)addr);
156 else 162 else
157 pci_write_config_byte(dev, addr, val); 163 pci_write_config_byte(dev, addr, val);
@@ -159,7 +165,9 @@ static void sil_iowrite8(struct pci_dev *dev, u8 val, unsigned long addr)
159 165
160static void sil_iowrite16(struct pci_dev *dev, u16 val, unsigned long addr) 166static void sil_iowrite16(struct pci_dev *dev, u16 val, unsigned long addr)
161{ 167{
162 if (pci_get_drvdata(dev)) 168 struct ide_host *host = pci_get_drvdata(dev);
169
170 if (host->host_priv)
163 writew(val, (void __iomem *)addr); 171 writew(val, (void __iomem *)addr);
164 else 172 else
165 pci_write_config_word(dev, addr, val); 173 pci_write_config_word(dev, addr, val);
@@ -167,7 +175,9 @@ static void sil_iowrite16(struct pci_dev *dev, u16 val, unsigned long addr)
167 175
168static void sil_iowrite32(struct pci_dev *dev, u32 val, unsigned long addr) 176static void sil_iowrite32(struct pci_dev *dev, u32 val, unsigned long addr)
169{ 177{
170 if (pci_get_drvdata(dev)) 178 struct ide_host *host = pci_get_drvdata(dev);
179
180 if (host->host_priv)
171 writel(val, (void __iomem *)addr); 181 writel(val, (void __iomem *)addr);
172 else 182 else
173 pci_write_config_dword(dev, addr, val); 183 pci_write_config_dword(dev, addr, val);
@@ -334,7 +344,7 @@ static int siimage_io_dma_test_irq(ide_drive_t *drive)
334 unsigned long addr = siimage_selreg(hwif, 1); 344 unsigned long addr = siimage_selreg(hwif, 1);
335 345
336 /* return 1 if INTR asserted */ 346 /* return 1 if INTR asserted */
337 if (hwif->INB(hwif->dma_status) & 4) 347 if (inb(hwif->dma_base + ATA_DMA_STATUS) & 4)
338 return 1; 348 return 1;
339 349
340 /* return 1 if Device INTR asserted */ 350 /* return 1 if Device INTR asserted */
@@ -382,7 +392,7 @@ static int siimage_mmio_dma_test_irq(ide_drive_t *drive)
382 } 392 }
383 393
384 /* return 1 if INTR asserted */ 394 /* return 1 if INTR asserted */
385 if (readb((void __iomem *)hwif->dma_status) & 0x04) 395 if (readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)) & 4)
386 return 1; 396 return 1;
387 397
388 /* return 1 if Device INTR asserted */ 398 /* return 1 if Device INTR asserted */
@@ -445,66 +455,24 @@ static void sil_sata_pre_reset(ide_drive_t *drive)
445} 455}
446 456
447/** 457/**
448 * setup_mmio_siimage - switch controller into MMIO mode
449 * @dev: PCI device we are configuring
450 * @name: device name
451 *
452 * Attempt to put the device into MMIO mode. There are some slight
453 * complications here with certain systems where the MMIO BAR isn't
454 * mapped, so we have to be sure that we can fall back to I/O.
455 */
456
457static unsigned int setup_mmio_siimage(struct pci_dev *dev, const char *name)
458{
459 resource_size_t bar5 = pci_resource_start(dev, 5);
460 unsigned long barsize = pci_resource_len(dev, 5);
461 void __iomem *ioaddr;
462
463 /*
464 * Drop back to PIO if we can't map the MMIO. Some systems
465 * seem to get terminally confused in the PCI spaces.
466 */
467 if (!request_mem_region(bar5, barsize, name)) {
468 printk(KERN_WARNING "siimage: IDE controller MMIO ports not "
469 "available.\n");
470 return 0;
471 }
472
473 ioaddr = ioremap(bar5, barsize);
474 if (ioaddr == NULL) {
475 release_mem_region(bar5, barsize);
476 return 0;
477 }
478
479 pci_set_master(dev);
480 pci_set_drvdata(dev, (void *) ioaddr);
481
482 return 1;
483}
484
485/**
486 * init_chipset_siimage - set up an SI device 458 * init_chipset_siimage - set up an SI device
487 * @dev: PCI device 459 * @dev: PCI device
488 * @name: device name
489 * 460 *
490 * Perform the initial PCI set up for this device. Attempt to switch 461 * Perform the initial PCI set up for this device. Attempt to switch
491 * to 133 MHz clocking if the system isn't already set up to do it. 462 * to 133 MHz clocking if the system isn't already set up to do it.
492 */ 463 */
493 464
494static unsigned int __devinit init_chipset_siimage(struct pci_dev *dev, 465static unsigned int __devinit init_chipset_siimage(struct pci_dev *dev)
495 const char *name)
496{ 466{
467 struct ide_host *host = pci_get_drvdata(dev);
468 void __iomem *ioaddr = host->host_priv;
497 unsigned long base, scsc_addr; 469 unsigned long base, scsc_addr;
498 void __iomem *ioaddr = NULL; 470 u8 rev = dev->revision, tmp;
499 u8 rev = dev->revision, tmp, BA5_EN;
500 471
501 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, rev ? 1 : 255); 472 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, rev ? 1 : 255);
502 473
503 pci_read_config_byte(dev, 0x8A, &BA5_EN); 474 if (ioaddr)
504 475 pci_set_master(dev);
505 if ((BA5_EN & 0x01) || pci_resource_start(dev, 5))
506 if (setup_mmio_siimage(dev, name))
507 ioaddr = pci_get_drvdata(dev);
508 476
509 base = (unsigned long)ioaddr; 477 base = (unsigned long)ioaddr;
510 478
@@ -571,7 +539,8 @@ static unsigned int __devinit init_chipset_siimage(struct pci_dev *dev,
571 { "== 100", "== 133", "== 2X PCI", "DISABLED!" }; 539 { "== 100", "== 133", "== 2X PCI", "DISABLED!" };
572 540
573 tmp >>= 4; 541 tmp >>= 4;
574 printk(KERN_INFO "%s: BASE CLOCK %s\n", name, clk_str[tmp & 3]); 542 printk(KERN_INFO DRV_NAME " %s: BASE CLOCK %s\n",
543 pci_name(dev), clk_str[tmp & 3]);
575 } 544 }
576 545
577 return 0; 546 return 0;
@@ -592,7 +561,8 @@ static unsigned int __devinit init_chipset_siimage(struct pci_dev *dev,
592static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif) 561static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
593{ 562{
594 struct pci_dev *dev = to_pci_dev(hwif->dev); 563 struct pci_dev *dev = to_pci_dev(hwif->dev);
595 void *addr = pci_get_drvdata(dev); 564 struct ide_host *host = pci_get_drvdata(dev);
565 void *addr = host->host_priv;
596 u8 ch = hwif->channel; 566 u8 ch = hwif->channel;
597 struct ide_io_ports *io_ports = &hwif->io_ports; 567 struct ide_io_ports *io_ports = &hwif->io_ports;
598 unsigned long base; 568 unsigned long base;
@@ -601,7 +571,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
601 * Fill in the basic hwif bits 571 * Fill in the basic hwif bits
602 */ 572 */
603 hwif->host_flags |= IDE_HFLAG_MMIO; 573 hwif->host_flags |= IDE_HFLAG_MMIO;
604 default_hwif_mmiops(hwif); 574
605 hwif->hwif_data = addr; 575 hwif->hwif_data = addr;
606 576
607 /* 577 /*
@@ -691,16 +661,15 @@ static void __devinit sil_quirkproc(ide_drive_t *drive)
691static void __devinit init_iops_siimage(ide_hwif_t *hwif) 661static void __devinit init_iops_siimage(ide_hwif_t *hwif)
692{ 662{
693 struct pci_dev *dev = to_pci_dev(hwif->dev); 663 struct pci_dev *dev = to_pci_dev(hwif->dev);
664 struct ide_host *host = pci_get_drvdata(dev);
694 665
695 hwif->hwif_data = NULL; 666 hwif->hwif_data = NULL;
696 667
697 /* Pessimal until we finish probing */ 668 /* Pessimal until we finish probing */
698 hwif->rqsize = 15; 669 hwif->rqsize = 15;
699 670
700 if (pci_get_drvdata(dev) == NULL) 671 if (host->host_priv)
701 return; 672 init_mmio_iops_siimage(hwif);
702
703 init_mmio_iops_siimage(hwif);
704} 673}
705 674
706/** 675/**
@@ -748,9 +717,9 @@ static const struct ide_dma_ops sil_dma_ops = {
748 .dma_lost_irq = ide_dma_lost_irq, 717 .dma_lost_irq = ide_dma_lost_irq,
749}; 718};
750 719
751#define DECLARE_SII_DEV(name_str, p_ops) \ 720#define DECLARE_SII_DEV(p_ops) \
752 { \ 721 { \
753 .name = name_str, \ 722 .name = DRV_NAME, \
754 .init_chipset = init_chipset_siimage, \ 723 .init_chipset = init_chipset_siimage, \
755 .init_iops = init_iops_siimage, \ 724 .init_iops = init_iops_siimage, \
756 .port_ops = p_ops, \ 725 .port_ops = p_ops, \
@@ -761,9 +730,8 @@ static const struct ide_dma_ops sil_dma_ops = {
761 } 730 }
762 731
763static const struct ide_port_info siimage_chipsets[] __devinitdata = { 732static const struct ide_port_info siimage_chipsets[] __devinitdata = {
764 /* 0 */ DECLARE_SII_DEV("SiI680", &sil_pata_port_ops), 733 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
765 /* 1 */ DECLARE_SII_DEV("SiI3112 Serial ATA", &sil_sata_port_ops), 734 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
766 /* 2 */ DECLARE_SII_DEV("Adaptec AAR-1210SA", &sil_sata_port_ops)
767}; 735};
768 736
769/** 737/**
@@ -778,8 +746,13 @@ static const struct ide_port_info siimage_chipsets[] __devinitdata = {
778static int __devinit siimage_init_one(struct pci_dev *dev, 746static int __devinit siimage_init_one(struct pci_dev *dev,
779 const struct pci_device_id *id) 747 const struct pci_device_id *id)
780{ 748{
749 void __iomem *ioaddr = NULL;
750 resource_size_t bar5 = pci_resource_start(dev, 5);
751 unsigned long barsize = pci_resource_len(dev, 5);
752 int rc;
781 struct ide_port_info d; 753 struct ide_port_info d;
782 u8 idx = id->driver_data; 754 u8 idx = id->driver_data;
755 u8 BA5_EN;
783 756
784 d = siimage_chipsets[idx]; 757 d = siimage_chipsets[idx];
785 758
@@ -787,7 +760,7 @@ static int __devinit siimage_init_one(struct pci_dev *dev,
787 static int first = 1; 760 static int first = 1;
788 761
789 if (first) { 762 if (first) {
790 printk(KERN_INFO "siimage: For full SATA support you " 763 printk(KERN_INFO DRV_NAME ": For full SATA support you "
791 "should use the libata sata_sil module.\n"); 764 "should use the libata sata_sil module.\n");
792 first = 0; 765 first = 0;
793 } 766 }
@@ -795,14 +768,61 @@ static int __devinit siimage_init_one(struct pci_dev *dev,
795 d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA; 768 d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
796 } 769 }
797 770
798 return ide_setup_pci_device(dev, &d); 771 rc = pci_enable_device(dev);
772 if (rc)
773 return rc;
774
775 pci_read_config_byte(dev, 0x8A, &BA5_EN);
776 if ((BA5_EN & 0x01) || bar5) {
777 /*
778 * Drop back to PIO if we can't map the MMIO. Some systems
779 * seem to get terminally confused in the PCI spaces.
780 */
781 if (!request_mem_region(bar5, barsize, d.name)) {
782 printk(KERN_WARNING DRV_NAME " %s: MMIO ports not "
783 "available\n", pci_name(dev));
784 } else {
785 ioaddr = ioremap(bar5, barsize);
786 if (ioaddr == NULL)
787 release_mem_region(bar5, barsize);
788 }
789 }
790
791 rc = ide_pci_init_one(dev, &d, ioaddr);
792 if (rc) {
793 if (ioaddr) {
794 iounmap(ioaddr);
795 release_mem_region(bar5, barsize);
796 }
797 pci_disable_device(dev);
798 }
799
800 return rc;
801}
802
803static void __devexit siimage_remove(struct pci_dev *dev)
804{
805 struct ide_host *host = pci_get_drvdata(dev);
806 void __iomem *ioaddr = host->host_priv;
807
808 ide_pci_remove(dev);
809
810 if (ioaddr) {
811 resource_size_t bar5 = pci_resource_start(dev, 5);
812 unsigned long barsize = pci_resource_len(dev, 5);
813
814 iounmap(ioaddr);
815 release_mem_region(bar5, barsize);
816 }
817
818 pci_disable_device(dev);
799} 819}
800 820
801static const struct pci_device_id siimage_pci_tbl[] = { 821static const struct pci_device_id siimage_pci_tbl[] = {
802 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), 0 }, 822 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), 0 },
803#ifdef CONFIG_BLK_DEV_IDE_SATA 823#ifdef CONFIG_BLK_DEV_IDE_SATA
804 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_3112), 1 }, 824 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_3112), 1 },
805 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_1210SA), 2 }, 825 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_1210SA), 1 },
806#endif 826#endif
807 { 0, }, 827 { 0, },
808}; 828};
@@ -812,6 +832,7 @@ static struct pci_driver driver = {
812 .name = "SiI_IDE", 832 .name = "SiI_IDE",
813 .id_table = siimage_pci_tbl, 833 .id_table = siimage_pci_tbl,
814 .probe = siimage_init_one, 834 .probe = siimage_init_one,
835 .remove = siimage_remove,
815}; 836};
816 837
817static int __init siimage_ide_init(void) 838static int __init siimage_ide_init(void)
@@ -819,7 +840,13 @@ static int __init siimage_ide_init(void)
819 return ide_pci_register_driver(&driver); 840 return ide_pci_register_driver(&driver);
820} 841}
821 842
843static void __exit siimage_ide_exit(void)
844{
845 pci_unregister_driver(&driver);
846}
847
822module_init(siimage_ide_init); 848module_init(siimage_ide_init);
849module_exit(siimage_ide_exit);
823 850
824MODULE_AUTHOR("Andre Hedrick, Alan Cox"); 851MODULE_AUTHOR("Andre Hedrick, Alan Cox");
825MODULE_DESCRIPTION("PCI driver module for SiI IDE"); 852MODULE_DESCRIPTION("PCI driver module for SiI IDE");
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index 2389945ca95d..cc95f90b53b7 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -52,6 +52,8 @@
52#include <linux/init.h> 52#include <linux/init.h>
53#include <linux/ide.h> 53#include <linux/ide.h>
54 54
55#define DRV_NAME "sis5513"
56
55/* registers layout and init values are chipset family dependant */ 57/* registers layout and init values are chipset family dependant */
56 58
57#define ATA_16 0x01 59#define ATA_16 0x01
@@ -380,8 +382,9 @@ static int __devinit sis_find_family(struct pci_dev *dev)
380 } 382 }
381 pci_dev_put(host); 383 pci_dev_put(host);
382 384
383 printk(KERN_INFO "SIS5513: %s %s controller\n", 385 printk(KERN_INFO DRV_NAME " %s: %s %s controller\n",
384 SiSHostChipInfo[i].name, chipset_capability[chipset_family]); 386 pci_name(dev), SiSHostChipInfo[i].name,
387 chipset_capability[chipset_family]);
385 } 388 }
386 389
387 if (!chipset_family) { /* Belongs to pci-quirks */ 390 if (!chipset_family) { /* Belongs to pci-quirks */
@@ -396,7 +399,8 @@ static int __devinit sis_find_family(struct pci_dev *dev)
396 pci_write_config_dword(dev, 0x54, idemisc); 399 pci_write_config_dword(dev, 0x54, idemisc);
397 400
398 if (trueid == 0x5518) { 401 if (trueid == 0x5518) {
399 printk(KERN_INFO "SIS5513: SiS 962/963 MuTIOL IDE UDMA133 controller\n"); 402 printk(KERN_INFO DRV_NAME " %s: SiS 962/963 MuTIOL IDE UDMA133 controller\n",
403 pci_name(dev));
400 chipset_family = ATA_133; 404 chipset_family = ATA_133;
401 405
402 /* Check for 5513 compability mapping 406 /* Check for 5513 compability mapping
@@ -405,7 +409,8 @@ static int __devinit sis_find_family(struct pci_dev *dev)
405 */ 409 */
406 if ((idemisc & 0x40000000) == 0) { 410 if ((idemisc & 0x40000000) == 0) {
407 pci_write_config_dword(dev, 0x54, idemisc | 0x40000000); 411 pci_write_config_dword(dev, 0x54, idemisc | 0x40000000);
408 printk(KERN_INFO "SIS5513: Switching to 5513 register mapping\n"); 412 printk(KERN_INFO DRV_NAME " %s: Switching to 5513 register mapping\n",
413 pci_name(dev));
409 } 414 }
410 } 415 }
411 } 416 }
@@ -429,10 +434,12 @@ static int __devinit sis_find_family(struct pci_dev *dev)
429 pci_dev_put(lpc_bridge); 434 pci_dev_put(lpc_bridge);
430 435
431 if (lpc_bridge->revision == 0x10 && (prefctl & 0x80)) { 436 if (lpc_bridge->revision == 0x10 && (prefctl & 0x80)) {
432 printk(KERN_INFO "SIS5513: SiS 961B MuTIOL IDE UDMA133 controller\n"); 437 printk(KERN_INFO DRV_NAME " %s: SiS 961B MuTIOL IDE UDMA133 controller\n",
438 pci_name(dev));
433 chipset_family = ATA_133a; 439 chipset_family = ATA_133a;
434 } else { 440 } else {
435 printk(KERN_INFO "SIS5513: SiS 961 MuTIOL IDE UDMA100 controller\n"); 441 printk(KERN_INFO DRV_NAME " %s: SiS 961 MuTIOL IDE UDMA100 controller\n",
442 pci_name(dev));
436 chipset_family = ATA_100; 443 chipset_family = ATA_100;
437 } 444 }
438 } 445 }
@@ -441,8 +448,7 @@ static int __devinit sis_find_family(struct pci_dev *dev)
441 return chipset_family; 448 return chipset_family;
442} 449}
443 450
444static unsigned int __devinit init_chipset_sis5513(struct pci_dev *dev, 451static unsigned int __devinit init_chipset_sis5513(struct pci_dev *dev)
445 const char *name)
446{ 452{
447 /* Make general config ops here 453 /* Make general config ops here
448 1/ tell IDE channels to operate in Compatibility mode only 454 1/ tell IDE channels to operate in Compatibility mode only
@@ -555,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
555}; 561};
556 562
557static const struct ide_port_info sis5513_chipset __devinitdata = { 563static const struct ide_port_info sis5513_chipset __devinitdata = {
558 .name = "SIS5513", 564 .name = DRV_NAME,
559 .init_chipset = init_chipset_sis5513, 565 .init_chipset = init_chipset_sis5513,
560 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} }, 566 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
561 .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_NO_AUTODMA, 567 .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_NO_AUTODMA,
@@ -583,7 +589,13 @@ static int __devinit sis5513_init_one(struct pci_dev *dev, const struct pci_devi
583 589
584 d.udma_mask = udma_rates[chipset_family]; 590 d.udma_mask = udma_rates[chipset_family];
585 591
586 return ide_setup_pci_device(dev, &d); 592 return ide_pci_init_one(dev, &d, NULL);
593}
594
595static void __devexit sis5513_remove(struct pci_dev *dev)
596{
597 ide_pci_remove(dev);
598 pci_disable_device(dev);
587} 599}
588 600
589static const struct pci_device_id sis5513_pci_tbl[] = { 601static const struct pci_device_id sis5513_pci_tbl[] = {
@@ -598,6 +610,7 @@ static struct pci_driver driver = {
598 .name = "SIS_IDE", 610 .name = "SIS_IDE",
599 .id_table = sis5513_pci_tbl, 611 .id_table = sis5513_pci_tbl,
600 .probe = sis5513_init_one, 612 .probe = sis5513_init_one,
613 .remove = sis5513_remove,
601}; 614};
602 615
603static int __init sis5513_ide_init(void) 616static int __init sis5513_ide_init(void)
@@ -605,7 +618,13 @@ static int __init sis5513_ide_init(void)
605 return ide_pci_register_driver(&driver); 618 return ide_pci_register_driver(&driver);
606} 619}
607 620
621static void __exit sis5513_ide_exit(void)
622{
623 pci_unregister_driver(&driver);
624}
625
608module_init(sis5513_ide_init); 626module_init(sis5513_ide_init);
627module_exit(sis5513_ide_exit);
609 628
610MODULE_AUTHOR("Lionel Bouton, L C Chang, Andre Hedrick, Vojtech Pavlik"); 629MODULE_AUTHOR("Lionel Bouton, L C Chang, Andre Hedrick, Vojtech Pavlik");
611MODULE_DESCRIPTION("PCI driver module for SIS IDE"); 630MODULE_DESCRIPTION("PCI driver module for SIS IDE");
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
index 6efbde297174..73905bcc08fb 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/pci/sl82c105.c
@@ -23,6 +23,8 @@
23 23
24#include <asm/io.h> 24#include <asm/io.h>
25 25
26#define DRV_NAME "sl82c105"
27
26#undef DEBUG 28#undef DEBUG
27 29
28#ifdef DEBUG 30#ifdef DEBUG
@@ -157,9 +159,9 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive)
157 * Was DMA enabled? If so, disable it - we're resetting the 159 * Was DMA enabled? If so, disable it - we're resetting the
158 * host. The IDE layer will be handling the drive for us. 160 * host. The IDE layer will be handling the drive for us.
159 */ 161 */
160 dma_cmd = inb(hwif->dma_command); 162 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
161 if (dma_cmd & 1) { 163 if (dma_cmd & 1) {
162 outb(dma_cmd & ~1, hwif->dma_command); 164 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
163 printk("sl82c105: DMA was enabled\n"); 165 printk("sl82c105: DMA was enabled\n");
164 } 166 }
165 167
@@ -270,7 +272,7 @@ static u8 sl82c105_bridge_revision(struct pci_dev *dev)
270 * channel 0 here at least, but channel 1 has to be enabled by 272 * channel 0 here at least, but channel 1 has to be enabled by
271 * firmware or arch code. We still set both to 16 bits mode. 273 * firmware or arch code. We still set both to 16 bits mode.
272 */ 274 */
273static unsigned int __devinit init_chipset_sl82c105(struct pci_dev *dev, const char *msg) 275static unsigned int __devinit init_chipset_sl82c105(struct pci_dev *dev)
274{ 276{
275 u32 val; 277 u32 val;
276 278
@@ -301,7 +303,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
301}; 303};
302 304
303static const struct ide_port_info sl82c105_chipset __devinitdata = { 305static const struct ide_port_info sl82c105_chipset __devinitdata = {
304 .name = "W82C105", 306 .name = DRV_NAME,
305 .init_chipset = init_chipset_sl82c105, 307 .init_chipset = init_chipset_sl82c105,
306 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}}, 308 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
307 .port_ops = &sl82c105_port_ops, 309 .port_ops = &sl82c105_port_ops,
@@ -328,14 +330,14 @@ static int __devinit sl82c105_init_one(struct pci_dev *dev, const struct pci_dev
328 * Never ever EVER under any circumstances enable 330 * Never ever EVER under any circumstances enable
329 * DMA when the bridge is this old. 331 * DMA when the bridge is this old.
330 */ 332 */
331 printk(KERN_INFO "W82C105_IDE: Winbond W83C553 bridge " 333 printk(KERN_INFO DRV_NAME ": Winbond W83C553 bridge "
332 "revision %d, BM-DMA disabled\n", rev); 334 "revision %d, BM-DMA disabled\n", rev);
333 d.dma_ops = NULL; 335 d.dma_ops = NULL;
334 d.mwdma_mask = 0; 336 d.mwdma_mask = 0;
335 d.host_flags &= ~IDE_HFLAG_SERIALIZE_DMA; 337 d.host_flags &= ~IDE_HFLAG_SERIALIZE_DMA;
336 } 338 }
337 339
338 return ide_setup_pci_device(dev, &d); 340 return ide_pci_init_one(dev, &d, NULL);
339} 341}
340 342
341static const struct pci_device_id sl82c105_pci_tbl[] = { 343static const struct pci_device_id sl82c105_pci_tbl[] = {
@@ -348,6 +350,7 @@ static struct pci_driver driver = {
348 .name = "W82C105_IDE", 350 .name = "W82C105_IDE",
349 .id_table = sl82c105_pci_tbl, 351 .id_table = sl82c105_pci_tbl,
350 .probe = sl82c105_init_one, 352 .probe = sl82c105_init_one,
353 .remove = ide_pci_remove,
351}; 354};
352 355
353static int __init sl82c105_ide_init(void) 356static int __init sl82c105_ide_init(void)
@@ -355,7 +358,13 @@ static int __init sl82c105_ide_init(void)
355 return ide_pci_register_driver(&driver); 358 return ide_pci_register_driver(&driver);
356} 359}
357 360
361static void __exit sl82c105_ide_exit(void)
362{
363 pci_unregister_driver(&driver);
364}
365
358module_init(sl82c105_ide_init); 366module_init(sl82c105_ide_init);
367module_exit(sl82c105_ide_exit);
359 368
360MODULE_DESCRIPTION("PCI driver module for W82C105 IDE"); 369MODULE_DESCRIPTION("PCI driver module for W82C105 IDE");
361MODULE_LICENSE("GPL"); 370MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index dae6e2c94d86..13d1fa491f26 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -15,6 +15,8 @@
15#include <linux/ide.h> 15#include <linux/ide.h>
16#include <linux/init.h> 16#include <linux/init.h>
17 17
18#define DRV_NAME "slc90e66"
19
18static DEFINE_SPINLOCK(slc90e66_lock); 20static DEFINE_SPINLOCK(slc90e66_lock);
19 21
20static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio) 22static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio)
@@ -132,7 +134,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
132}; 134};
133 135
134static const struct ide_port_info slc90e66_chipset __devinitdata = { 136static const struct ide_port_info slc90e66_chipset __devinitdata = {
135 .name = "SLC90E66", 137 .name = DRV_NAME,
136 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} }, 138 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
137 .port_ops = &slc90e66_port_ops, 139 .port_ops = &slc90e66_port_ops,
138 .host_flags = IDE_HFLAG_LEGACY_IRQS, 140 .host_flags = IDE_HFLAG_LEGACY_IRQS,
@@ -144,7 +146,7 @@ static const struct ide_port_info slc90e66_chipset __devinitdata = {
144 146
145static int __devinit slc90e66_init_one(struct pci_dev *dev, const struct pci_device_id *id) 147static int __devinit slc90e66_init_one(struct pci_dev *dev, const struct pci_device_id *id)
146{ 148{
147 return ide_setup_pci_device(dev, &slc90e66_chipset); 149 return ide_pci_init_one(dev, &slc90e66_chipset, NULL);
148} 150}
149 151
150static const struct pci_device_id slc90e66_pci_tbl[] = { 152static const struct pci_device_id slc90e66_pci_tbl[] = {
@@ -157,6 +159,7 @@ static struct pci_driver driver = {
157 .name = "SLC90e66_IDE", 159 .name = "SLC90e66_IDE",
158 .id_table = slc90e66_pci_tbl, 160 .id_table = slc90e66_pci_tbl,
159 .probe = slc90e66_init_one, 161 .probe = slc90e66_init_one,
162 .remove = ide_pci_remove,
160}; 163};
161 164
162static int __init slc90e66_ide_init(void) 165static int __init slc90e66_ide_init(void)
@@ -164,7 +167,13 @@ static int __init slc90e66_ide_init(void)
164 return ide_pci_register_driver(&driver); 167 return ide_pci_register_driver(&driver);
165} 168}
166 169
170static void __exit slc90e66_ide_exit(void)
171{
172 pci_unregister_driver(&driver);
173}
174
167module_init(slc90e66_ide_init); 175module_init(slc90e66_ide_init);
176module_exit(slc90e66_ide_exit);
168 177
169MODULE_AUTHOR("Andre Hedrick"); 178MODULE_AUTHOR("Andre Hedrick");
170MODULE_DESCRIPTION("PCI driver module for SLC90E66 IDE"); 179MODULE_DESCRIPTION("PCI driver module for SLC90E66 IDE");
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index 9b4b27a4c711..b1cb8a9ce5a9 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -11,6 +11,8 @@
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/ide.h> 12#include <linux/ide.h>
13 13
14#define DRV_NAME "tc86c001"
15
14static void tc86c001_set_mode(ide_drive_t *drive, const u8 speed) 16static void tc86c001_set_mode(ide_drive_t *drive, const u8 speed)
15{ 17{
16 ide_hwif_t *hwif = HWIF(drive); 18 ide_hwif_t *hwif = HWIF(drive);
@@ -63,7 +65,7 @@ static int tc86c001_timer_expiry(ide_drive_t *drive)
63 ide_hwif_t *hwif = HWIF(drive); 65 ide_hwif_t *hwif = HWIF(drive);
64 ide_expiry_t *expiry = ide_get_hwifdata(hwif); 66 ide_expiry_t *expiry = ide_get_hwifdata(hwif);
65 ide_hwgroup_t *hwgroup = HWGROUP(drive); 67 ide_hwgroup_t *hwgroup = HWGROUP(drive);
66 u8 dma_stat = inb(hwif->dma_status); 68 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
67 69
68 /* Restore a higher level driver's expiry handler first. */ 70 /* Restore a higher level driver's expiry handler first. */
69 hwgroup->expiry = expiry; 71 hwgroup->expiry = expiry;
@@ -71,21 +73,24 @@ static int tc86c001_timer_expiry(ide_drive_t *drive)
71 if ((dma_stat & 5) == 1) { /* DMA active and no interrupt */ 73 if ((dma_stat & 5) == 1) { /* DMA active and no interrupt */
72 unsigned long sc_base = hwif->config_data; 74 unsigned long sc_base = hwif->config_data;
73 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04); 75 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
74 u8 dma_cmd = inb(hwif->dma_command); 76 u8 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
75 77
76 printk(KERN_WARNING "%s: DMA interrupt possibly stuck, " 78 printk(KERN_WARNING "%s: DMA interrupt possibly stuck, "
77 "attempting recovery...\n", drive->name); 79 "attempting recovery...\n", drive->name);
78 80
79 /* Stop DMA */ 81 /* Stop DMA */
80 outb(dma_cmd & ~0x01, hwif->dma_command); 82 outb(dma_cmd & ~0x01, hwif->dma_base + ATA_DMA_CMD);
81 83
82 /* Setup the dummy DMA transfer */ 84 /* Setup the dummy DMA transfer */
83 outw(0, sc_base + 0x0a); /* Sector Count */ 85 outw(0, sc_base + 0x0a); /* Sector Count */
84 outw(0, twcr_port); /* Transfer Word Count 1 or 2 */ 86 outw(0, twcr_port); /* Transfer Word Count 1 or 2 */
85 87
86 /* Start the dummy DMA transfer */ 88 /* Start the dummy DMA transfer */
87 outb(0x00, hwif->dma_command); /* clear R_OR_WCTR for write */ 89
88 outb(0x01, hwif->dma_command); /* set START_STOPBM */ 90 /* clear R_OR_WCTR for write */
91 outb(0x00, hwif->dma_base + ATA_DMA_CMD);
92 /* set START_STOPBM */
93 outb(0x01, hwif->dma_base + ATA_DMA_CMD);
89 94
90 /* 95 /*
91 * If an interrupt was pending, it should come thru shortly. 96 * If an interrupt was pending, it should come thru shortly.
@@ -170,16 +175,6 @@ static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
170 hwif->rqsize = 0xffff; 175 hwif->rqsize = 0xffff;
171} 176}
172 177
173static unsigned int __devinit init_chipset_tc86c001(struct pci_dev *dev,
174 const char *name)
175{
176 int err = pci_request_region(dev, 5, name);
177
178 if (err)
179 printk(KERN_ERR "%s: system control regs already in use", name);
180 return err;
181}
182
183static const struct ide_port_ops tc86c001_port_ops = { 178static const struct ide_port_ops tc86c001_port_ops = {
184 .set_pio_mode = tc86c001_set_pio_mode, 179 .set_pio_mode = tc86c001_set_pio_mode,
185 .set_dma_mode = tc86c001_set_mode, 180 .set_dma_mode = tc86c001_set_mode,
@@ -198,13 +193,11 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
198}; 193};
199 194
200static const struct ide_port_info tc86c001_chipset __devinitdata = { 195static const struct ide_port_info tc86c001_chipset __devinitdata = {
201 .name = "TC86C001", 196 .name = DRV_NAME,
202 .init_chipset = init_chipset_tc86c001,
203 .init_hwif = init_hwif_tc86c001, 197 .init_hwif = init_hwif_tc86c001,
204 .port_ops = &tc86c001_port_ops, 198 .port_ops = &tc86c001_port_ops,
205 .dma_ops = &tc86c001_dma_ops, 199 .dma_ops = &tc86c001_dma_ops,
206 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD | 200 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD,
207 IDE_HFLAG_ABUSE_SET_DMA_MODE,
208 .pio_mask = ATA_PIO4, 201 .pio_mask = ATA_PIO4,
209 .mwdma_mask = ATA_MWDMA2, 202 .mwdma_mask = ATA_MWDMA2,
210 .udma_mask = ATA_UDMA4, 203 .udma_mask = ATA_UDMA4,
@@ -213,7 +206,37 @@ static const struct ide_port_info tc86c001_chipset __devinitdata = {
213static int __devinit tc86c001_init_one(struct pci_dev *dev, 206static int __devinit tc86c001_init_one(struct pci_dev *dev,
214 const struct pci_device_id *id) 207 const struct pci_device_id *id)
215{ 208{
216 return ide_setup_pci_device(dev, &tc86c001_chipset); 209 int rc;
210
211 rc = pci_enable_device(dev);
212 if (rc)
213 goto out;
214
215 rc = pci_request_region(dev, 5, DRV_NAME);
216 if (rc) {
217 printk(KERN_ERR DRV_NAME ": system control regs already in use");
218 goto out_disable;
219 }
220
221 rc = ide_pci_init_one(dev, &tc86c001_chipset, NULL);
222 if (rc)
223 goto out_release;
224
225 goto out;
226
227out_release:
228 pci_release_region(dev, 5);
229out_disable:
230 pci_disable_device(dev);
231out:
232 return rc;
233}
234
235static void __devexit tc86c001_remove(struct pci_dev *dev)
236{
237 ide_pci_remove(dev);
238 pci_release_region(dev, 5);
239 pci_disable_device(dev);
217} 240}
218 241
219static const struct pci_device_id tc86c001_pci_tbl[] = { 242static const struct pci_device_id tc86c001_pci_tbl[] = {
@@ -225,14 +248,22 @@ MODULE_DEVICE_TABLE(pci, tc86c001_pci_tbl);
225static struct pci_driver driver = { 248static struct pci_driver driver = {
226 .name = "TC86C001", 249 .name = "TC86C001",
227 .id_table = tc86c001_pci_tbl, 250 .id_table = tc86c001_pci_tbl,
228 .probe = tc86c001_init_one 251 .probe = tc86c001_init_one,
252 .remove = tc86c001_remove,
229}; 253};
230 254
231static int __init tc86c001_ide_init(void) 255static int __init tc86c001_ide_init(void)
232{ 256{
233 return ide_pci_register_driver(&driver); 257 return ide_pci_register_driver(&driver);
234} 258}
259
260static void __exit tc86c001_ide_exit(void)
261{
262 pci_unregister_driver(&driver);
263}
264
235module_init(tc86c001_ide_init); 265module_init(tc86c001_ide_init);
266module_exit(tc86c001_ide_exit);
236 267
237MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>"); 268MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
238MODULE_DESCRIPTION("PCI driver module for TC86C001 IDE"); 269MODULE_DESCRIPTION("PCI driver module for TC86C001 IDE");
diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/pci/triflex.c
index db65a558d4ec..b77ec35151b3 100644
--- a/drivers/ide/pci/triflex.c
+++ b/drivers/ide/pci/triflex.c
@@ -33,6 +33,8 @@
33#include <linux/ide.h> 33#include <linux/ide.h>
34#include <linux/init.h> 34#include <linux/init.h>
35 35
36#define DRV_NAME "triflex"
37
36static void triflex_set_mode(ide_drive_t *drive, const u8 speed) 38static void triflex_set_mode(ide_drive_t *drive, const u8 speed)
37{ 39{
38 ide_hwif_t *hwif = HWIF(drive); 40 ide_hwif_t *hwif = HWIF(drive);
@@ -93,7 +95,7 @@ static const struct ide_port_ops triflex_port_ops = {
93}; 95};
94 96
95static const struct ide_port_info triflex_device __devinitdata = { 97static const struct ide_port_info triflex_device __devinitdata = {
96 .name = "TRIFLEX", 98 .name = DRV_NAME,
97 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}}, 99 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
98 .port_ops = &triflex_port_ops, 100 .port_ops = &triflex_port_ops,
99 .pio_mask = ATA_PIO4, 101 .pio_mask = ATA_PIO4,
@@ -104,7 +106,7 @@ static const struct ide_port_info triflex_device __devinitdata = {
104static int __devinit triflex_init_one(struct pci_dev *dev, 106static int __devinit triflex_init_one(struct pci_dev *dev,
105 const struct pci_device_id *id) 107 const struct pci_device_id *id)
106{ 108{
107 return ide_setup_pci_device(dev, &triflex_device); 109 return ide_pci_init_one(dev, &triflex_device, NULL);
108} 110}
109 111
110static const struct pci_device_id triflex_pci_tbl[] = { 112static const struct pci_device_id triflex_pci_tbl[] = {
@@ -117,6 +119,7 @@ static struct pci_driver driver = {
117 .name = "TRIFLEX_IDE", 119 .name = "TRIFLEX_IDE",
118 .id_table = triflex_pci_tbl, 120 .id_table = triflex_pci_tbl,
119 .probe = triflex_init_one, 121 .probe = triflex_init_one,
122 .remove = ide_pci_remove,
120}; 123};
121 124
122static int __init triflex_ide_init(void) 125static int __init triflex_ide_init(void)
@@ -124,7 +127,13 @@ static int __init triflex_ide_init(void)
124 return ide_pci_register_driver(&driver); 127 return ide_pci_register_driver(&driver);
125} 128}
126 129
130static void __exit triflex_ide_exit(void)
131{
132 pci_unregister_driver(&driver);
133}
134
127module_init(triflex_ide_init); 135module_init(triflex_ide_init);
136module_exit(triflex_ide_exit);
128 137
129MODULE_AUTHOR("Torben Mathiasen"); 138MODULE_AUTHOR("Torben Mathiasen");
130MODULE_DESCRIPTION("PCI driver module for Compaq Triflex IDE"); 139MODULE_DESCRIPTION("PCI driver module for Compaq Triflex IDE");
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/pci/trm290.c
index a8a3138682ef..fd28b49977fd 100644
--- a/drivers/ide/pci/trm290.c
+++ b/drivers/ide/pci/trm290.c
@@ -141,6 +141,8 @@
141 141
142#include <asm/io.h> 142#include <asm/io.h>
143 143
144#define DRV_NAME "trm290"
145
144static void trm290_prepare_drive (ide_drive_t *drive, unsigned int use_dma) 146static void trm290_prepare_drive (ide_drive_t *drive, unsigned int use_dma)
145{ 147{
146 ide_hwif_t *hwif = HWIF(drive); 148 ide_hwif_t *hwif = HWIF(drive);
@@ -245,10 +247,10 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
245 u8 reg = 0; 247 u8 reg = 0;
246 248
247 if ((dev->class & 5) && cfg_base) 249 if ((dev->class & 5) && cfg_base)
248 printk(KERN_INFO "TRM290: chip"); 250 printk(KERN_INFO DRV_NAME " %s: chip", pci_name(dev));
249 else { 251 else {
250 cfg_base = 0x3df0; 252 cfg_base = 0x3df0;
251 printk(KERN_INFO "TRM290: using default"); 253 printk(KERN_INFO DRV_NAME " %s: using default", pci_name(dev));
252 } 254 }
253 printk(KERN_CONT " config base at 0x%04x\n", cfg_base); 255 printk(KERN_CONT " config base at 0x%04x\n", cfg_base);
254 hwif->config_data = cfg_base; 256 hwif->config_data = cfg_base;
@@ -325,7 +327,7 @@ static struct ide_dma_ops trm290_dma_ops = {
325}; 327};
326 328
327static const struct ide_port_info trm290_chipset __devinitdata = { 329static const struct ide_port_info trm290_chipset __devinitdata = {
328 .name = "TRM290", 330 .name = DRV_NAME,
329 .init_hwif = init_hwif_trm290, 331 .init_hwif = init_hwif_trm290,
330 .chipset = ide_trm290, 332 .chipset = ide_trm290,
331 .port_ops = &trm290_port_ops, 333 .port_ops = &trm290_port_ops,
@@ -340,7 +342,7 @@ static const struct ide_port_info trm290_chipset __devinitdata = {
340 342
341static int __devinit trm290_init_one(struct pci_dev *dev, const struct pci_device_id *id) 343static int __devinit trm290_init_one(struct pci_dev *dev, const struct pci_device_id *id)
342{ 344{
343 return ide_setup_pci_device(dev, &trm290_chipset); 345 return ide_pci_init_one(dev, &trm290_chipset, NULL);
344} 346}
345 347
346static const struct pci_device_id trm290_pci_tbl[] = { 348static const struct pci_device_id trm290_pci_tbl[] = {
@@ -353,6 +355,7 @@ static struct pci_driver driver = {
353 .name = "TRM290_IDE", 355 .name = "TRM290_IDE",
354 .id_table = trm290_pci_tbl, 356 .id_table = trm290_pci_tbl,
355 .probe = trm290_init_one, 357 .probe = trm290_init_one,
358 .remove = ide_pci_remove,
356}; 359};
357 360
358static int __init trm290_ide_init(void) 361static int __init trm290_ide_init(void)
@@ -360,7 +363,13 @@ static int __init trm290_ide_init(void)
360 return ide_pci_register_driver(&driver); 363 return ide_pci_register_driver(&driver);
361} 364}
362 365
366static void __exit trm290_ide_exit(void)
367{
368 pci_unregister_driver(&driver);
369}
370
363module_init(trm290_ide_init); 371module_init(trm290_ide_init);
372module_exit(trm290_ide_exit);
364 373
365MODULE_AUTHOR("Mark Lord"); 374MODULE_AUTHOR("Mark Lord");
366MODULE_DESCRIPTION("PCI driver module for Tekram TRM290 IDE"); 375MODULE_DESCRIPTION("PCI driver module for Tekram TRM290 IDE");
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index e47384c70c40..454d2bf62dce 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -35,6 +35,8 @@
35#include <asm/processor.h> 35#include <asm/processor.h>
36#endif 36#endif
37 37
38#define DRV_NAME "via82cxxx"
39
38#define VIA_IDE_ENABLE 0x40 40#define VIA_IDE_ENABLE 0x40
39#define VIA_IDE_CONFIG 0x41 41#define VIA_IDE_CONFIG 0x41
40#define VIA_FIFO_CONFIG 0x43 42#define VIA_FIFO_CONFIG 0x43
@@ -113,7 +115,8 @@ struct via82cxxx_dev
113static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing) 115static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing)
114{ 116{
115 struct pci_dev *dev = to_pci_dev(hwif->dev); 117 struct pci_dev *dev = to_pci_dev(hwif->dev);
116 struct via82cxxx_dev *vdev = pci_get_drvdata(dev); 118 struct ide_host *host = pci_get_drvdata(dev);
119 struct via82cxxx_dev *vdev = host->host_priv;
117 u8 t; 120 u8 t;
118 121
119 if (~vdev->via_config->flags & VIA_BAD_AST) { 122 if (~vdev->via_config->flags & VIA_BAD_AST) {
@@ -153,7 +156,8 @@ static void via_set_drive(ide_drive_t *drive, const u8 speed)
153 ide_hwif_t *hwif = drive->hwif; 156 ide_hwif_t *hwif = drive->hwif;
154 ide_drive_t *peer = hwif->drives + (~drive->dn & 1); 157 ide_drive_t *peer = hwif->drives + (~drive->dn & 1);
155 struct pci_dev *dev = to_pci_dev(hwif->dev); 158 struct pci_dev *dev = to_pci_dev(hwif->dev);
156 struct via82cxxx_dev *vdev = pci_get_drvdata(dev); 159 struct ide_host *host = pci_get_drvdata(dev);
160 struct via82cxxx_dev *vdev = host->host_priv;
157 struct ide_timing t, p; 161 struct ide_timing t, p;
158 unsigned int T, UT; 162 unsigned int T, UT;
159 163
@@ -258,37 +262,19 @@ static void __devinit via_cable_detect(struct via82cxxx_dev *vdev, u32 u)
258/** 262/**
259 * init_chipset_via82cxxx - initialization handler 263 * init_chipset_via82cxxx - initialization handler
260 * @dev: PCI device 264 * @dev: PCI device
261 * @name: Name of interface
262 * 265 *
263 * The initialization callback. Here we determine the IDE chip type 266 * The initialization callback. Here we determine the IDE chip type
264 * and initialize its drive independent registers. 267 * and initialize its drive independent registers.
265 */ 268 */
266 269
267static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const char *name) 270static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev)
268{ 271{
269 struct pci_dev *isa = NULL; 272 struct ide_host *host = pci_get_drvdata(dev);
270 struct via82cxxx_dev *vdev; 273 struct via82cxxx_dev *vdev = host->host_priv;
271 struct via_isa_bridge *via_config; 274 struct via_isa_bridge *via_config = vdev->via_config;
272 u8 t, v; 275 u8 t, v;
273 u32 u; 276 u32 u;
274 277
275 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
276 if (!vdev) {
277 printk(KERN_ERR "VP_IDE: out of memory :(\n");
278 return -ENOMEM;
279 }
280 pci_set_drvdata(dev, vdev);
281
282 /*
283 * Find the ISA bridge to see how good the IDE is.
284 */
285 vdev->via_config = via_config = via_config_find(&isa);
286
287 /* We checked this earlier so if it fails here deeep badness
288 is involved */
289
290 BUG_ON(!via_config->id);
291
292 /* 278 /*
293 * Detect cable and configure Clk66 279 * Detect cable and configure Clk66
294 */ 280 */
@@ -334,39 +320,6 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const
334 320
335 pci_write_config_byte(dev, VIA_FIFO_CONFIG, t); 321 pci_write_config_byte(dev, VIA_FIFO_CONFIG, t);
336 322
337 /*
338 * Determine system bus clock.
339 */
340
341 via_clock = (ide_pci_clk ? ide_pci_clk : 33) * 1000;
342
343 switch (via_clock) {
344 case 33000: via_clock = 33333; break;
345 case 37000: via_clock = 37500; break;
346 case 41000: via_clock = 41666; break;
347 }
348
349 if (via_clock < 20000 || via_clock > 50000) {
350 printk(KERN_WARNING "VP_IDE: User given PCI clock speed "
351 "impossible (%d), using 33 MHz instead.\n", via_clock);
352 printk(KERN_WARNING "VP_IDE: Use ide0=ata66 if you want "
353 "to assume 80-wire cable.\n");
354 via_clock = 33333;
355 }
356
357 /*
358 * Print the boot message.
359 */
360
361 printk(KERN_INFO "VP_IDE: VIA %s (rev %02x) IDE %sDMA%s "
362 "controller on pci%s\n",
363 via_config->name, isa->revision,
364 via_config->udma_mask ? "U" : "MW",
365 via_dma[via_config->udma_mask ?
366 (fls(via_config->udma_mask) - 1) : 0],
367 pci_name(dev));
368
369 pci_dev_put(isa);
370 return 0; 323 return 0;
371} 324}
372 325
@@ -402,7 +355,8 @@ static int via_cable_override(struct pci_dev *pdev)
402static u8 __devinit via82cxxx_cable_detect(ide_hwif_t *hwif) 355static u8 __devinit via82cxxx_cable_detect(ide_hwif_t *hwif)
403{ 356{
404 struct pci_dev *pdev = to_pci_dev(hwif->dev); 357 struct pci_dev *pdev = to_pci_dev(hwif->dev);
405 struct via82cxxx_dev *vdev = pci_get_drvdata(pdev); 358 struct ide_host *host = pci_get_drvdata(pdev);
359 struct via82cxxx_dev *vdev = host->host_priv;
406 360
407 if (via_cable_override(pdev)) 361 if (via_cable_override(pdev))
408 return ATA_CBL_PATA40_SHORT; 362 return ATA_CBL_PATA40_SHORT;
@@ -420,12 +374,11 @@ static const struct ide_port_ops via_port_ops = {
420}; 374};
421 375
422static const struct ide_port_info via82cxxx_chipset __devinitdata = { 376static const struct ide_port_info via82cxxx_chipset __devinitdata = {
423 .name = "VP_IDE", 377 .name = DRV_NAME,
424 .init_chipset = init_chipset_via82cxxx, 378 .init_chipset = init_chipset_via82cxxx,
425 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } }, 379 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
426 .port_ops = &via_port_ops, 380 .port_ops = &via_port_ops,
427 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST | 381 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST |
428 IDE_HFLAG_ABUSE_SET_DMA_MODE |
429 IDE_HFLAG_POST_SET_MODE | 382 IDE_HFLAG_POST_SET_MODE |
430 IDE_HFLAG_IO_32BIT, 383 IDE_HFLAG_IO_32BIT,
431 .pio_mask = ATA_PIO5, 384 .pio_mask = ATA_PIO5,
@@ -437,6 +390,8 @@ static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_i
437{ 390{
438 struct pci_dev *isa = NULL; 391 struct pci_dev *isa = NULL;
439 struct via_isa_bridge *via_config; 392 struct via_isa_bridge *via_config;
393 struct via82cxxx_dev *vdev;
394 int rc;
440 u8 idx = id->driver_data; 395 u8 idx = id->driver_data;
441 struct ide_port_info d; 396 struct ide_port_info d;
442 397
@@ -446,12 +401,42 @@ static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_i
446 * Find the ISA bridge and check we know what it is. 401 * Find the ISA bridge and check we know what it is.
447 */ 402 */
448 via_config = via_config_find(&isa); 403 via_config = via_config_find(&isa);
449 pci_dev_put(isa);
450 if (!via_config->id) { 404 if (!via_config->id) {
451 printk(KERN_WARNING "VP_IDE: Unknown VIA SouthBridge, disabling DMA.\n"); 405 printk(KERN_WARNING DRV_NAME " %s: unknown chipset, skipping\n",
406 pci_name(dev));
452 return -ENODEV; 407 return -ENODEV;
453 } 408 }
454 409
410 /*
411 * Print the boot message.
412 */
413 printk(KERN_INFO DRV_NAME " %s: VIA %s (rev %02x) IDE %sDMA%s\n",
414 pci_name(dev), via_config->name, isa->revision,
415 via_config->udma_mask ? "U" : "MW",
416 via_dma[via_config->udma_mask ?
417 (fls(via_config->udma_mask) - 1) : 0]);
418
419 pci_dev_put(isa);
420
421 /*
422 * Determine system bus clock.
423 */
424 via_clock = (ide_pci_clk ? ide_pci_clk : 33) * 1000;
425
426 switch (via_clock) {
427 case 33000: via_clock = 33333; break;
428 case 37000: via_clock = 37500; break;
429 case 41000: via_clock = 41666; break;
430 }
431
432 if (via_clock < 20000 || via_clock > 50000) {
433 printk(KERN_WARNING DRV_NAME ": User given PCI clock speed "
434 "impossible (%d), using 33 MHz instead.\n", via_clock);
435 printk(KERN_WARNING DRV_NAME ": Use ide0=ata66 if you want "
436 "to assume 80-wire cable.\n");
437 via_clock = 33333;
438 }
439
455 if (idx == 0) 440 if (idx == 0)
456 d.host_flags |= IDE_HFLAG_NO_AUTODMA; 441 d.host_flags |= IDE_HFLAG_NO_AUTODMA;
457 else 442 else
@@ -467,7 +452,29 @@ static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_i
467 452
468 d.udma_mask = via_config->udma_mask; 453 d.udma_mask = via_config->udma_mask;
469 454
470 return ide_setup_pci_device(dev, &d); 455 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
456 if (!vdev) {
457 printk(KERN_ERR DRV_NAME " %s: out of memory :(\n",
458 pci_name(dev));
459 return -ENOMEM;
460 }
461
462 vdev->via_config = via_config;
463
464 rc = ide_pci_init_one(dev, &d, vdev);
465 if (rc)
466 kfree(vdev);
467
468 return rc;
469}
470
471static void __devexit via_remove(struct pci_dev *dev)
472{
473 struct ide_host *host = pci_get_drvdata(dev);
474 struct via82cxxx_dev *vdev = host->host_priv;
475
476 ide_pci_remove(dev);
477 kfree(vdev);
471} 478}
472 479
473static const struct pci_device_id via_pci_tbl[] = { 480static const struct pci_device_id via_pci_tbl[] = {
@@ -484,6 +491,7 @@ static struct pci_driver driver = {
484 .name = "VIA_IDE", 491 .name = "VIA_IDE",
485 .id_table = via_pci_tbl, 492 .id_table = via_pci_tbl,
486 .probe = via_init_one, 493 .probe = via_init_one,
494 .remove = via_remove,
487}; 495};
488 496
489static int __init via_ide_init(void) 497static int __init via_ide_init(void)
@@ -491,7 +499,13 @@ static int __init via_ide_init(void)
491 return ide_pci_register_driver(&driver); 499 return ide_pci_register_driver(&driver);
492} 500}
493 501
502static void __exit via_ide_exit(void)
503{
504 pci_unregister_driver(&driver);
505}
506
494module_init(via_ide_init); 507module_init(via_ide_init);
508module_exit(via_ide_exit);
495 509
496MODULE_AUTHOR("Vojtech Pavlik, Michel Aubry, Jeff Garzik, Andre Hedrick"); 510MODULE_AUTHOR("Vojtech Pavlik, Michel Aubry, Jeff Garzik, Andre Hedrick");
497MODULE_DESCRIPTION("PCI driver module for VIA IDE"); 511MODULE_DESCRIPTION("PCI driver module for VIA IDE");
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 93fb9067c043..c521bf6e1bf2 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -48,6 +48,8 @@
48#include <asm/mediabay.h> 48#include <asm/mediabay.h>
49#endif 49#endif
50 50
51#define DRV_NAME "ide-pmac"
52
51#undef IDE_PMAC_DEBUG 53#undef IDE_PMAC_DEBUG
52 54
53#define DMA_WAIT_TIMEOUT 50 55#define DMA_WAIT_TIMEOUT 50
@@ -424,7 +426,9 @@ static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
424static void 426static void
425pmac_ide_selectproc(ide_drive_t *drive) 427pmac_ide_selectproc(ide_drive_t *drive)
426{ 428{
427 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 429 ide_hwif_t *hwif = drive->hwif;
430 pmac_ide_hwif_t *pmif =
431 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
428 432
429 if (pmif == NULL) 433 if (pmif == NULL)
430 return; 434 return;
@@ -444,7 +448,9 @@ pmac_ide_selectproc(ide_drive_t *drive)
444static void 448static void
445pmac_ide_kauai_selectproc(ide_drive_t *drive) 449pmac_ide_kauai_selectproc(ide_drive_t *drive)
446{ 450{
447 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 451 ide_hwif_t *hwif = drive->hwif;
452 pmac_ide_hwif_t *pmif =
453 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
448 454
449 if (pmif == NULL) 455 if (pmif == NULL)
450 return; 456 return;
@@ -465,7 +471,9 @@ pmac_ide_kauai_selectproc(ide_drive_t *drive)
465static void 471static void
466pmac_ide_do_update_timings(ide_drive_t *drive) 472pmac_ide_do_update_timings(ide_drive_t *drive)
467{ 473{
468 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 474 ide_hwif_t *hwif = drive->hwif;
475 pmac_ide_hwif_t *pmif =
476 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
469 477
470 if (pmif == NULL) 478 if (pmif == NULL)
471 return; 479 return;
@@ -478,12 +486,26 @@ pmac_ide_do_update_timings(ide_drive_t *drive)
478 pmac_ide_selectproc(drive); 486 pmac_ide_selectproc(drive);
479} 487}
480 488
481static void pmac_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port) 489static void pmac_exec_command(ide_hwif_t *hwif, u8 cmd)
482{ 490{
483 u32 tmp; 491 writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
484 492 (void)readl((void __iomem *)(hwif->io_ports.data_addr
485 writeb(value, (void __iomem *) port); 493 + IDE_TIMING_CONFIG));
486 tmp = readl((void __iomem *)(hwif->io_ports.data_addr 494}
495
496static void pmac_set_irq(ide_hwif_t *hwif, int on)
497{
498 u8 ctl = ATA_DEVCTL_OBS;
499
500 if (on == 4) { /* hack for SRST */
501 ctl |= 4;
502 on &= ~4;
503 }
504
505 ctl |= on ? 0 : 2;
506
507 writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
508 (void)readl((void __iomem *)(hwif->io_ports.data_addr
487 + IDE_TIMING_CONFIG)); 509 + IDE_TIMING_CONFIG));
488} 510}
489 511
@@ -493,11 +515,13 @@ static void pmac_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port)
493static void 515static void
494pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) 516pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
495{ 517{
518 ide_hwif_t *hwif = drive->hwif;
519 pmac_ide_hwif_t *pmif =
520 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
496 struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio); 521 struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
497 u32 *timings, t; 522 u32 *timings, t;
498 unsigned accessTicks, recTicks; 523 unsigned accessTicks, recTicks;
499 unsigned accessTime, recTime; 524 unsigned accessTime, recTime;
500 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
501 unsigned int cycle_time; 525 unsigned int cycle_time;
502 526
503 if (pmif == NULL) 527 if (pmif == NULL)
@@ -778,9 +802,11 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
778 802
779static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed) 803static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
780{ 804{
805 ide_hwif_t *hwif = drive->hwif;
806 pmac_ide_hwif_t *pmif =
807 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
781 int unit = (drive->select.b.unit & 0x01); 808 int unit = (drive->select.b.unit & 0x01);
782 int ret = 0; 809 int ret = 0;
783 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
784 u32 *timings, *timings2, tl[2]; 810 u32 *timings, *timings2, tl[2];
785 811
786 timings = &pmif->timings[unit]; 812 timings = &pmif->timings[unit];
@@ -852,11 +878,8 @@ sanitize_timings(pmac_ide_hwif_t *pmif)
852/* Suspend call back, should be called after the child devices 878/* Suspend call back, should be called after the child devices
853 * have actually been suspended 879 * have actually been suspended
854 */ 880 */
855static int 881static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
856pmac_ide_do_suspend(ide_hwif_t *hwif)
857{ 882{
858 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
859
860 /* We clear the timings */ 883 /* We clear the timings */
861 pmif->timings[0] = 0; 884 pmif->timings[0] = 0;
862 pmif->timings[1] = 0; 885 pmif->timings[1] = 0;
@@ -884,11 +907,8 @@ pmac_ide_do_suspend(ide_hwif_t *hwif)
884/* Resume call back, should be called before the child devices 907/* Resume call back, should be called before the child devices
885 * are resumed 908 * are resumed
886 */ 909 */
887static int 910static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif)
888pmac_ide_do_resume(ide_hwif_t *hwif)
889{ 911{
890 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
891
892 /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */ 912 /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */
893 if (!pmif->mediabay) { 913 if (!pmif->mediabay) {
894 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1); 914 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
@@ -916,7 +936,8 @@ pmac_ide_do_resume(ide_hwif_t *hwif)
916 936
917static u8 pmac_ide_cable_detect(ide_hwif_t *hwif) 937static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
918{ 938{
919 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)ide_get_hwifdata(hwif); 939 pmac_ide_hwif_t *pmif =
940 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
920 struct device_node *np = pmif->node; 941 struct device_node *np = pmif->node;
921 const char *cable = of_get_property(np, "cable-type", NULL); 942 const char *cable = of_get_property(np, "cable-type", NULL);
922 943
@@ -936,7 +957,40 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
936 return ATA_CBL_PATA40; 957 return ATA_CBL_PATA40;
937} 958}
938 959
960static void pmac_ide_init_dev(ide_drive_t *drive)
961{
962 ide_hwif_t *hwif = drive->hwif;
963 pmac_ide_hwif_t *pmif =
964 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
965
966 if (pmif->mediabay) {
967#ifdef CONFIG_PMAC_MEDIABAY
968 if (check_media_bay_by_base(pmif->regbase, MB_CD) == 0) {
969 drive->noprobe = 0;
970 return;
971 }
972#endif
973 drive->noprobe = 1;
974 }
975}
976
977static const struct ide_tp_ops pmac_tp_ops = {
978 .exec_command = pmac_exec_command,
979 .read_status = ide_read_status,
980 .read_altstatus = ide_read_altstatus,
981 .read_sff_dma_status = ide_read_sff_dma_status,
982
983 .set_irq = pmac_set_irq,
984
985 .tf_load = ide_tf_load,
986 .tf_read = ide_tf_read,
987
988 .input_data = ide_input_data,
989 .output_data = ide_output_data,
990};
991
939static const struct ide_port_ops pmac_ide_ata6_port_ops = { 992static const struct ide_port_ops pmac_ide_ata6_port_ops = {
993 .init_dev = pmac_ide_init_dev,
940 .set_pio_mode = pmac_ide_set_pio_mode, 994 .set_pio_mode = pmac_ide_set_pio_mode,
941 .set_dma_mode = pmac_ide_set_dma_mode, 995 .set_dma_mode = pmac_ide_set_dma_mode,
942 .selectproc = pmac_ide_kauai_selectproc, 996 .selectproc = pmac_ide_kauai_selectproc,
@@ -944,6 +998,7 @@ static const struct ide_port_ops pmac_ide_ata6_port_ops = {
944}; 998};
945 999
946static const struct ide_port_ops pmac_ide_ata4_port_ops = { 1000static const struct ide_port_ops pmac_ide_ata4_port_ops = {
1001 .init_dev = pmac_ide_init_dev,
947 .set_pio_mode = pmac_ide_set_pio_mode, 1002 .set_pio_mode = pmac_ide_set_pio_mode,
948 .set_dma_mode = pmac_ide_set_dma_mode, 1003 .set_dma_mode = pmac_ide_set_dma_mode,
949 .selectproc = pmac_ide_selectproc, 1004 .selectproc = pmac_ide_selectproc,
@@ -951,6 +1006,7 @@ static const struct ide_port_ops pmac_ide_ata4_port_ops = {
951}; 1006};
952 1007
953static const struct ide_port_ops pmac_ide_port_ops = { 1008static const struct ide_port_ops pmac_ide_port_ops = {
1009 .init_dev = pmac_ide_init_dev,
954 .set_pio_mode = pmac_ide_set_pio_mode, 1010 .set_pio_mode = pmac_ide_set_pio_mode,
955 .set_dma_mode = pmac_ide_set_dma_mode, 1011 .set_dma_mode = pmac_ide_set_dma_mode,
956 .selectproc = pmac_ide_selectproc, 1012 .selectproc = pmac_ide_selectproc,
@@ -959,12 +1015,14 @@ static const struct ide_port_ops pmac_ide_port_ops = {
959static const struct ide_dma_ops pmac_dma_ops; 1015static const struct ide_dma_ops pmac_dma_ops;
960 1016
961static const struct ide_port_info pmac_port_info = { 1017static const struct ide_port_info pmac_port_info = {
1018 .name = DRV_NAME,
962 .init_dma = pmac_ide_init_dma, 1019 .init_dma = pmac_ide_init_dma,
963 .chipset = ide_pmac, 1020 .chipset = ide_pmac,
1021 .tp_ops = &pmac_tp_ops,
1022 .port_ops = &pmac_ide_port_ops,
964#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC 1023#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
965 .dma_ops = &pmac_dma_ops, 1024 .dma_ops = &pmac_dma_ops,
966#endif 1025#endif
967 .port_ops = &pmac_ide_port_ops,
968 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | 1026 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
969 IDE_HFLAG_POST_SET_MODE | 1027 IDE_HFLAG_POST_SET_MODE |
970 IDE_HFLAG_MMIO | 1028 IDE_HFLAG_MMIO |
@@ -977,13 +1035,15 @@ static const struct ide_port_info pmac_port_info = {
977 * Setup, register & probe an IDE channel driven by this driver, this is 1035 * Setup, register & probe an IDE channel driven by this driver, this is
978 * called by one of the 2 probe functions (macio or PCI). 1036 * called by one of the 2 probe functions (macio or PCI).
979 */ 1037 */
980static int __devinit 1038static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw)
981pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
982{ 1039{
983 struct device_node *np = pmif->node; 1040 struct device_node *np = pmif->node;
984 const int *bidp; 1041 const int *bidp;
985 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 1042 struct ide_host *host;
1043 ide_hwif_t *hwif;
1044 hw_regs_t *hws[] = { hw, NULL, NULL, NULL };
986 struct ide_port_info d = pmac_port_info; 1045 struct ide_port_info d = pmac_port_info;
1046 int rc;
987 1047
988 pmif->broken_dma = pmif->broken_dma_warn = 0; 1048 pmif->broken_dma = pmif->broken_dma_warn = 0;
989 if (of_device_is_compatible(np, "shasta-ata")) { 1049 if (of_device_is_compatible(np, "shasta-ata")) {
@@ -1054,31 +1114,16 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
1054 msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY)); 1114 msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
1055 } 1115 }
1056 1116
1057 /* Setup MMIO ops */ 1117 printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), "
1058 default_hwif_mmiops(hwif); 1118 "bus ID %d%s, irq %d\n", model_name[pmif->kind],
1059 hwif->OUTBSYNC = pmac_outbsync; 1119 pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
1120 pmif->mediabay ? " (mediabay)" : "", hw->irq);
1060 1121
1061 hwif->hwif_data = pmif; 1122 rc = ide_host_add(&d, hws, &host);
1062 ide_init_port_hw(hwif, hw); 1123 if (rc)
1124 return rc;
1063 1125
1064 printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n", 1126 hwif = host->ports[0];
1065 hwif->index, model_name[pmif->kind], pmif->aapl_bus_id,
1066 pmif->mediabay ? " (mediabay)" : "", hwif->irq);
1067
1068 if (pmif->mediabay) {
1069#ifdef CONFIG_PMAC_MEDIABAY
1070 if (check_media_bay_by_base(pmif->regbase, MB_CD)) {
1071#else
1072 if (1) {
1073#endif
1074 hwif->drives[0].noprobe = 1;
1075 hwif->drives[1].noprobe = 1;
1076 }
1077 }
1078
1079 idx[0] = hwif->index;
1080
1081 ide_device_add(idx, &d);
1082 1127
1083 return 0; 1128 return 0;
1084} 1129}
@@ -1101,7 +1146,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1101{ 1146{
1102 void __iomem *base; 1147 void __iomem *base;
1103 unsigned long regbase; 1148 unsigned long regbase;
1104 ide_hwif_t *hwif;
1105 pmac_ide_hwif_t *pmif; 1149 pmac_ide_hwif_t *pmif;
1106 int irq, rc; 1150 int irq, rc;
1107 hw_regs_t hw; 1151 hw_regs_t hw;
@@ -1110,14 +1154,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1110 if (pmif == NULL) 1154 if (pmif == NULL)
1111 return -ENOMEM; 1155 return -ENOMEM;
1112 1156
1113 hwif = ide_find_port();
1114 if (hwif == NULL) {
1115 printk(KERN_ERR "ide-pmac: MacIO interface attach with no slot\n");
1116 printk(KERN_ERR " %s\n", mdev->ofdev.node->full_name);
1117 rc = -ENODEV;
1118 goto out_free_pmif;
1119 }
1120
1121 if (macio_resource_count(mdev) == 0) { 1157 if (macio_resource_count(mdev) == 0) {
1122 printk(KERN_WARNING "ide-pmac: no address for %s\n", 1158 printk(KERN_WARNING "ide-pmac: no address for %s\n",
1123 mdev->ofdev.node->full_name); 1159 mdev->ofdev.node->full_name);
@@ -1164,7 +1200,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1164 } else 1200 } else
1165 pmif->dma_regs = NULL; 1201 pmif->dma_regs = NULL;
1166#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ 1202#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1167 dev_set_drvdata(&mdev->ofdev.dev, hwif); 1203 dev_set_drvdata(&mdev->ofdev.dev, pmif);
1168 1204
1169 memset(&hw, 0, sizeof(hw)); 1205 memset(&hw, 0, sizeof(hw));
1170 pmac_ide_init_ports(&hw, pmif->regbase); 1206 pmac_ide_init_ports(&hw, pmif->regbase);
@@ -1172,7 +1208,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1172 hw.dev = &mdev->bus->pdev->dev; 1208 hw.dev = &mdev->bus->pdev->dev;
1173 hw.parent = &mdev->ofdev.dev; 1209 hw.parent = &mdev->ofdev.dev;
1174 1210
1175 rc = pmac_ide_setup_device(pmif, hwif, &hw); 1211 rc = pmac_ide_setup_device(pmif, &hw);
1176 if (rc != 0) { 1212 if (rc != 0) {
1177 /* The inteface is released to the common IDE layer */ 1213 /* The inteface is released to the common IDE layer */
1178 dev_set_drvdata(&mdev->ofdev.dev, NULL); 1214 dev_set_drvdata(&mdev->ofdev.dev, NULL);
@@ -1195,12 +1231,13 @@ out_free_pmif:
1195static int 1231static int
1196pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg) 1232pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1197{ 1233{
1198 ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); 1234 pmac_ide_hwif_t *pmif =
1199 int rc = 0; 1235 (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
1236 int rc = 0;
1200 1237
1201 if (mesg.event != mdev->ofdev.dev.power.power_state.event 1238 if (mesg.event != mdev->ofdev.dev.power.power_state.event
1202 && (mesg.event & PM_EVENT_SLEEP)) { 1239 && (mesg.event & PM_EVENT_SLEEP)) {
1203 rc = pmac_ide_do_suspend(hwif); 1240 rc = pmac_ide_do_suspend(pmif);
1204 if (rc == 0) 1241 if (rc == 0)
1205 mdev->ofdev.dev.power.power_state = mesg; 1242 mdev->ofdev.dev.power.power_state = mesg;
1206 } 1243 }
@@ -1211,11 +1248,12 @@ pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1211static int 1248static int
1212pmac_ide_macio_resume(struct macio_dev *mdev) 1249pmac_ide_macio_resume(struct macio_dev *mdev)
1213{ 1250{
1214 ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); 1251 pmac_ide_hwif_t *pmif =
1215 int rc = 0; 1252 (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
1216 1253 int rc = 0;
1254
1217 if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) { 1255 if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
1218 rc = pmac_ide_do_resume(hwif); 1256 rc = pmac_ide_do_resume(pmif);
1219 if (rc == 0) 1257 if (rc == 0)
1220 mdev->ofdev.dev.power.power_state = PMSG_ON; 1258 mdev->ofdev.dev.power.power_state = PMSG_ON;
1221 } 1259 }
@@ -1229,7 +1267,6 @@ pmac_ide_macio_resume(struct macio_dev *mdev)
1229static int __devinit 1267static int __devinit
1230pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) 1268pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1231{ 1269{
1232 ide_hwif_t *hwif;
1233 struct device_node *np; 1270 struct device_node *np;
1234 pmac_ide_hwif_t *pmif; 1271 pmac_ide_hwif_t *pmif;
1235 void __iomem *base; 1272 void __iomem *base;
@@ -1247,14 +1284,6 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1247 if (pmif == NULL) 1284 if (pmif == NULL)
1248 return -ENOMEM; 1285 return -ENOMEM;
1249 1286
1250 hwif = ide_find_port();
1251 if (hwif == NULL) {
1252 printk(KERN_ERR "ide-pmac: PCI interface attach with no slot\n");
1253 printk(KERN_ERR " %s\n", np->full_name);
1254 rc = -ENODEV;
1255 goto out_free_pmif;
1256 }
1257
1258 if (pci_enable_device(pdev)) { 1287 if (pci_enable_device(pdev)) {
1259 printk(KERN_WARNING "ide-pmac: Can't enable PCI device for " 1288 printk(KERN_WARNING "ide-pmac: Can't enable PCI device for "
1260 "%s\n", np->full_name); 1289 "%s\n", np->full_name);
@@ -1284,14 +1313,14 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1284 pmif->kauai_fcr = base; 1313 pmif->kauai_fcr = base;
1285 pmif->irq = pdev->irq; 1314 pmif->irq = pdev->irq;
1286 1315
1287 pci_set_drvdata(pdev, hwif); 1316 pci_set_drvdata(pdev, pmif);
1288 1317
1289 memset(&hw, 0, sizeof(hw)); 1318 memset(&hw, 0, sizeof(hw));
1290 pmac_ide_init_ports(&hw, pmif->regbase); 1319 pmac_ide_init_ports(&hw, pmif->regbase);
1291 hw.irq = pdev->irq; 1320 hw.irq = pdev->irq;
1292 hw.dev = &pdev->dev; 1321 hw.dev = &pdev->dev;
1293 1322
1294 rc = pmac_ide_setup_device(pmif, hwif, &hw); 1323 rc = pmac_ide_setup_device(pmif, &hw);
1295 if (rc != 0) { 1324 if (rc != 0) {
1296 /* The inteface is released to the common IDE layer */ 1325 /* The inteface is released to the common IDE layer */
1297 pci_set_drvdata(pdev, NULL); 1326 pci_set_drvdata(pdev, NULL);
@@ -1310,12 +1339,12 @@ out_free_pmif:
1310static int 1339static int
1311pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) 1340pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1312{ 1341{
1313 ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev); 1342 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)pci_get_drvdata(pdev);
1314 int rc = 0; 1343 int rc = 0;
1315 1344
1316 if (mesg.event != pdev->dev.power.power_state.event 1345 if (mesg.event != pdev->dev.power.power_state.event
1317 && (mesg.event & PM_EVENT_SLEEP)) { 1346 && (mesg.event & PM_EVENT_SLEEP)) {
1318 rc = pmac_ide_do_suspend(hwif); 1347 rc = pmac_ide_do_suspend(pmif);
1319 if (rc == 0) 1348 if (rc == 0)
1320 pdev->dev.power.power_state = mesg; 1349 pdev->dev.power.power_state = mesg;
1321 } 1350 }
@@ -1326,11 +1355,11 @@ pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1326static int 1355static int
1327pmac_ide_pci_resume(struct pci_dev *pdev) 1356pmac_ide_pci_resume(struct pci_dev *pdev)
1328{ 1357{
1329 ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev); 1358 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)pci_get_drvdata(pdev);
1330 int rc = 0; 1359 int rc = 0;
1331 1360
1332 if (pdev->dev.power.power_state.event != PM_EVENT_ON) { 1361 if (pdev->dev.power.power_state.event != PM_EVENT_ON) {
1333 rc = pmac_ide_do_resume(hwif); 1362 rc = pmac_ide_do_resume(pmif);
1334 if (rc == 0) 1363 if (rc == 0)
1335 pdev->dev.power.power_state = PMSG_ON; 1364 pdev->dev.power.power_state = PMSG_ON;
1336 } 1365 }
@@ -1421,10 +1450,11 @@ out:
1421static int 1450static int
1422pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq) 1451pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
1423{ 1452{
1453 ide_hwif_t *hwif = drive->hwif;
1454 pmac_ide_hwif_t *pmif =
1455 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1424 struct dbdma_cmd *table; 1456 struct dbdma_cmd *table;
1425 int i, count = 0; 1457 int i, count = 0;
1426 ide_hwif_t *hwif = HWIF(drive);
1427 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1428 volatile struct dbdma_regs __iomem *dma = pmif->dma_regs; 1458 volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
1429 struct scatterlist *sg; 1459 struct scatterlist *sg;
1430 int wr = (rq_data_dir(rq) == WRITE); 1460 int wr = (rq_data_dir(rq) == WRITE);
@@ -1520,7 +1550,8 @@ static int
1520pmac_ide_dma_setup(ide_drive_t *drive) 1550pmac_ide_dma_setup(ide_drive_t *drive)
1521{ 1551{
1522 ide_hwif_t *hwif = HWIF(drive); 1552 ide_hwif_t *hwif = HWIF(drive);
1523 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data; 1553 pmac_ide_hwif_t *pmif =
1554 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1524 struct request *rq = HWGROUP(drive)->rq; 1555 struct request *rq = HWGROUP(drive)->rq;
1525 u8 unit = (drive->select.b.unit & 0x01); 1556 u8 unit = (drive->select.b.unit & 0x01);
1526 u8 ata4; 1557 u8 ata4;
@@ -1560,7 +1591,9 @@ pmac_ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
1560static void 1591static void
1561pmac_ide_dma_start(ide_drive_t *drive) 1592pmac_ide_dma_start(ide_drive_t *drive)
1562{ 1593{
1563 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 1594 ide_hwif_t *hwif = drive->hwif;
1595 pmac_ide_hwif_t *pmif =
1596 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1564 volatile struct dbdma_regs __iomem *dma; 1597 volatile struct dbdma_regs __iomem *dma;
1565 1598
1566 dma = pmif->dma_regs; 1599 dma = pmif->dma_regs;
@@ -1576,7 +1609,9 @@ pmac_ide_dma_start(ide_drive_t *drive)
1576static int 1609static int
1577pmac_ide_dma_end (ide_drive_t *drive) 1610pmac_ide_dma_end (ide_drive_t *drive)
1578{ 1611{
1579 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 1612 ide_hwif_t *hwif = drive->hwif;
1613 pmac_ide_hwif_t *pmif =
1614 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1580 volatile struct dbdma_regs __iomem *dma; 1615 volatile struct dbdma_regs __iomem *dma;
1581 u32 dstat; 1616 u32 dstat;
1582 1617
@@ -1604,7 +1639,9 @@ pmac_ide_dma_end (ide_drive_t *drive)
1604static int 1639static int
1605pmac_ide_dma_test_irq (ide_drive_t *drive) 1640pmac_ide_dma_test_irq (ide_drive_t *drive)
1606{ 1641{
1607 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 1642 ide_hwif_t *hwif = drive->hwif;
1643 pmac_ide_hwif_t *pmif =
1644 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1608 volatile struct dbdma_regs __iomem *dma; 1645 volatile struct dbdma_regs __iomem *dma;
1609 unsigned long status, timeout; 1646 unsigned long status, timeout;
1610 1647
@@ -1664,7 +1701,9 @@ static void pmac_ide_dma_host_set(ide_drive_t *drive, int on)
1664static void 1701static void
1665pmac_ide_dma_lost_irq (ide_drive_t *drive) 1702pmac_ide_dma_lost_irq (ide_drive_t *drive)
1666{ 1703{
1667 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 1704 ide_hwif_t *hwif = drive->hwif;
1705 pmac_ide_hwif_t *pmif =
1706 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1668 volatile struct dbdma_regs __iomem *dma; 1707 volatile struct dbdma_regs __iomem *dma;
1669 unsigned long status; 1708 unsigned long status;
1670 1709
@@ -1694,7 +1733,8 @@ static const struct ide_dma_ops pmac_dma_ops = {
1694static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif, 1733static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
1695 const struct ide_port_info *d) 1734 const struct ide_port_info *d)
1696{ 1735{
1697 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data; 1736 pmac_ide_hwif_t *pmif =
1737 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1698 struct pci_dev *dev = to_pci_dev(hwif->dev); 1738 struct pci_dev *dev = to_pci_dev(hwif->dev);
1699 1739
1700 /* We won't need pci_dev if we switch to generic consistent 1740 /* We won't need pci_dev if we switch to generic consistent
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 65fc08b6b6d0..a8e9e8a69a52 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -39,17 +39,18 @@ static int ide_setup_pci_baseregs(struct pci_dev *dev, const char *name)
39 if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) || 39 if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
40 (progif & 5) != 5) { 40 (progif & 5) != 5) {
41 if ((progif & 0xa) != 0xa) { 41 if ((progif & 0xa) != 0xa) {
42 printk(KERN_INFO "%s: device not capable of full " 42 printk(KERN_INFO "%s %s: device not capable of full "
43 "native PCI mode\n", name); 43 "native PCI mode\n", name, pci_name(dev));
44 return -EOPNOTSUPP; 44 return -EOPNOTSUPP;
45 } 45 }
46 printk("%s: placing both ports into native PCI mode\n", name); 46 printk(KERN_INFO "%s %s: placing both ports into native PCI "
47 "mode\n", name, pci_name(dev));
47 (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5); 48 (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
48 if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) || 49 if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
49 (progif & 5) != 5) { 50 (progif & 5) != 5) {
50 printk(KERN_ERR "%s: rewrite of PROGIF failed, wanted " 51 printk(KERN_ERR "%s %s: rewrite of PROGIF failed, "
51 "0x%04x, got 0x%04x\n", 52 "wanted 0x%04x, got 0x%04x\n",
52 name, progif|5, progif); 53 name, pci_name(dev), progif | 5, progif);
53 return -EOPNOTSUPP; 54 return -EOPNOTSUPP;
54 } 55 }
55 } 56 }
@@ -57,14 +58,14 @@ static int ide_setup_pci_baseregs(struct pci_dev *dev, const char *name)
57} 58}
58 59
59#ifdef CONFIG_BLK_DEV_IDEDMA_PCI 60#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
60static void ide_pci_clear_simplex(unsigned long dma_base, const char *name) 61static int ide_pci_clear_simplex(unsigned long dma_base, const char *name)
61{ 62{
62 u8 dma_stat = inb(dma_base + 2); 63 u8 dma_stat = inb(dma_base + 2);
63 64
64 outb(dma_stat & 0x60, dma_base + 2); 65 outb(dma_stat & 0x60, dma_base + 2);
65 dma_stat = inb(dma_base + 2); 66 dma_stat = inb(dma_base + 2);
66 if (dma_stat & 0x80) 67
67 printk(KERN_INFO "%s: simplex device: DMA forced\n", name); 68 return (dma_stat & 0x80) ? 1 : 0;
68} 69}
69 70
70/** 71/**
@@ -73,15 +74,12 @@ static void ide_pci_clear_simplex(unsigned long dma_base, const char *name)
73 * @d: IDE port info 74 * @d: IDE port info
74 * 75 *
75 * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space. 76 * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space.
76 * Where a device has a partner that is already in DMA mode we check
77 * and enforce IDE simplex rules.
78 */ 77 */
79 78
80unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d) 79unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
81{ 80{
82 struct pci_dev *dev = to_pci_dev(hwif->dev); 81 struct pci_dev *dev = to_pci_dev(hwif->dev);
83 unsigned long dma_base = 0; 82 unsigned long dma_base = 0;
84 u8 dma_stat = 0;
85 83
86 if (hwif->host_flags & IDE_HFLAG_MMIO) 84 if (hwif->host_flags & IDE_HFLAG_MMIO)
87 return hwif->dma_base; 85 return hwif->dma_base;
@@ -94,7 +92,8 @@ unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
94 dma_base = pci_resource_start(dev, baridx); 92 dma_base = pci_resource_start(dev, baridx);
95 93
96 if (dma_base == 0) { 94 if (dma_base == 0) {
97 printk(KERN_ERR "%s: DMA base is invalid\n", d->name); 95 printk(KERN_ERR "%s %s: DMA base is invalid\n",
96 d->name, pci_name(dev));
98 return 0; 97 return 0;
99 } 98 }
100 } 99 }
@@ -102,11 +101,22 @@ unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
102 if (hwif->channel) 101 if (hwif->channel)
103 dma_base += 8; 102 dma_base += 8;
104 103
105 if (d->host_flags & IDE_HFLAG_CS5520) 104 return dma_base;
105}
106EXPORT_SYMBOL_GPL(ide_pci_dma_base);
107
108int ide_pci_check_simplex(ide_hwif_t *hwif, const struct ide_port_info *d)
109{
110 struct pci_dev *dev = to_pci_dev(hwif->dev);
111 u8 dma_stat;
112
113 if (d->host_flags & (IDE_HFLAG_MMIO | IDE_HFLAG_CS5520))
106 goto out; 114 goto out;
107 115
108 if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) { 116 if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) {
109 ide_pci_clear_simplex(dma_base, d->name); 117 if (ide_pci_clear_simplex(hwif->dma_base, d->name))
118 printk(KERN_INFO "%s %s: simplex device: DMA forced\n",
119 d->name, pci_name(dev));
110 goto out; 120 goto out;
111 } 121 }
112 122
@@ -120,15 +130,16 @@ unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
120 * we tune the drive then try to grab DMA ownership if we want to be 130 * we tune the drive then try to grab DMA ownership if we want to be
121 * the DMA end. This has to be become dynamic to handle hot-plug. 131 * the DMA end. This has to be become dynamic to handle hot-plug.
122 */ 132 */
123 dma_stat = hwif->INB(dma_base + 2); 133 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
124 if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) { 134 if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) {
125 printk(KERN_INFO "%s: simplex device: DMA disabled\n", d->name); 135 printk(KERN_INFO "%s %s: simplex device: DMA disabled\n",
126 dma_base = 0; 136 d->name, pci_name(dev));
137 return -1;
127 } 138 }
128out: 139out:
129 return dma_base; 140 return 0;
130} 141}
131EXPORT_SYMBOL_GPL(ide_pci_dma_base); 142EXPORT_SYMBOL_GPL(ide_pci_check_simplex);
132 143
133/* 144/*
134 * Set up BM-DMA capability (PnP BIOS should have done this) 145 * Set up BM-DMA capability (PnP BIOS should have done this)
@@ -144,8 +155,8 @@ int ide_pci_set_master(struct pci_dev *dev, const char *name)
144 155
145 if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) || 156 if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) ||
146 (pcicmd & PCI_COMMAND_MASTER) == 0) { 157 (pcicmd & PCI_COMMAND_MASTER) == 0) {
147 printk(KERN_ERR "%s: error updating PCICMD on %s\n", 158 printk(KERN_ERR "%s %s: error updating PCICMD\n",
148 name, pci_name(dev)); 159 name, pci_name(dev));
149 return -EIO; 160 return -EIO;
150 } 161 }
151 } 162 }
@@ -157,9 +168,9 @@ EXPORT_SYMBOL_GPL(ide_pci_set_master);
157 168
158void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d) 169void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
159{ 170{
160 printk(KERN_INFO "%s: IDE controller (0x%04x:0x%04x rev 0x%02x) at " 171 printk(KERN_INFO "%s %s: IDE controller (0x%04x:0x%04x rev 0x%02x)\n",
161 " PCI slot %s\n", d->name, dev->vendor, dev->device, 172 d->name, pci_name(dev),
162 dev->revision, pci_name(dev)); 173 dev->vendor, dev->device, dev->revision);
163} 174}
164EXPORT_SYMBOL_GPL(ide_setup_pci_noise); 175EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
165 176
@@ -184,11 +195,12 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
184 if (pci_enable_device(dev)) { 195 if (pci_enable_device(dev)) {
185 ret = pci_enable_device_io(dev); 196 ret = pci_enable_device_io(dev);
186 if (ret < 0) { 197 if (ret < 0) {
187 printk(KERN_WARNING "%s: (ide_setup_pci_device:) " 198 printk(KERN_WARNING "%s %s: couldn't enable device\n",
188 "Could not enable device.\n", d->name); 199 d->name, pci_name(dev));
189 goto out; 200 goto out;
190 } 201 }
191 printk(KERN_WARNING "%s: BIOS configuration fixed.\n", d->name); 202 printk(KERN_WARNING "%s %s: BIOS configuration fixed\n",
203 d->name, pci_name(dev));
192 } 204 }
193 205
194 /* 206 /*
@@ -198,7 +210,8 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
198 */ 210 */
199 ret = pci_set_dma_mask(dev, DMA_32BIT_MASK); 211 ret = pci_set_dma_mask(dev, DMA_32BIT_MASK);
200 if (ret < 0) { 212 if (ret < 0) {
201 printk(KERN_ERR "%s: can't set dma mask\n", d->name); 213 printk(KERN_ERR "%s %s: can't set DMA mask\n",
214 d->name, pci_name(dev));
202 goto out; 215 goto out;
203 } 216 }
204 217
@@ -216,7 +229,8 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
216 229
217 ret = pci_request_selected_regions(dev, bars, d->name); 230 ret = pci_request_selected_regions(dev, bars, d->name);
218 if (ret < 0) 231 if (ret < 0)
219 printk(KERN_ERR "%s: can't reserve resources\n", d->name); 232 printk(KERN_ERR "%s %s: can't reserve resources\n",
233 d->name, pci_name(dev));
220out: 234out:
221 return ret; 235 return ret;
222} 236}
@@ -242,15 +256,18 @@ static int ide_pci_configure(struct pci_dev *dev, const struct ide_port_info *d)
242 */ 256 */
243 if (ide_setup_pci_baseregs(dev, d->name) || 257 if (ide_setup_pci_baseregs(dev, d->name) ||
244 pci_write_config_word(dev, PCI_COMMAND, pcicmd | PCI_COMMAND_IO)) { 258 pci_write_config_word(dev, PCI_COMMAND, pcicmd | PCI_COMMAND_IO)) {
245 printk(KERN_INFO "%s: device disabled (BIOS)\n", d->name); 259 printk(KERN_INFO "%s %s: device disabled (BIOS)\n",
260 d->name, pci_name(dev));
246 return -ENODEV; 261 return -ENODEV;
247 } 262 }
248 if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd)) { 263 if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd)) {
249 printk(KERN_ERR "%s: error accessing PCI regs\n", d->name); 264 printk(KERN_ERR "%s %s: error accessing PCI regs\n",
265 d->name, pci_name(dev));
250 return -EIO; 266 return -EIO;
251 } 267 }
252 if (!(pcicmd & PCI_COMMAND_IO)) { 268 if (!(pcicmd & PCI_COMMAND_IO)) {
253 printk(KERN_ERR "%s: unable to enable IDE controller\n", d->name); 269 printk(KERN_ERR "%s %s: unable to enable IDE controller\n",
270 d->name, pci_name(dev));
254 return -ENXIO; 271 return -ENXIO;
255 } 272 }
256 return 0; 273 return 0;
@@ -284,33 +301,32 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *
284} 301}
285 302
286/** 303/**
287 * ide_hwif_configure - configure an IDE interface 304 * ide_hw_configure - configure a hw_regs_t instance
288 * @dev: PCI device holding interface 305 * @dev: PCI device holding interface
289 * @d: IDE port info 306 * @d: IDE port info
290 * @port: port number 307 * @port: port number
291 * @irq: PCI IRQ 308 * @irq: PCI IRQ
309 * @hw: hw_regs_t instance corresponding to this port
292 * 310 *
293 * Perform the initial set up for the hardware interface structure. This 311 * Perform the initial set up for the hardware interface structure. This
294 * is done per interface port rather than per PCI device. There may be 312 * is done per interface port rather than per PCI device. There may be
295 * more than one port per device. 313 * more than one port per device.
296 * 314 *
297 * Returns the new hardware interface structure, or NULL on a failure 315 * Returns zero on success or an error code.
298 */ 316 */
299 317
300static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, 318static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
301 const struct ide_port_info *d, 319 unsigned int port, int irq, hw_regs_t *hw)
302 unsigned int port, int irq)
303{ 320{
304 unsigned long ctl = 0, base = 0; 321 unsigned long ctl = 0, base = 0;
305 ide_hwif_t *hwif;
306 struct hw_regs_s hw;
307 322
308 if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) { 323 if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
309 if (ide_pci_check_iomem(dev, d, 2 * port) || 324 if (ide_pci_check_iomem(dev, d, 2 * port) ||
310 ide_pci_check_iomem(dev, d, 2 * port + 1)) { 325 ide_pci_check_iomem(dev, d, 2 * port + 1)) {
311 printk(KERN_ERR "%s: I/O baseregs (BIOS) are reported " 326 printk(KERN_ERR "%s %s: I/O baseregs (BIOS) are "
312 "as MEM for port %d!\n", d->name, port); 327 "reported as MEM for port %d!\n",
313 return NULL; 328 d->name, pci_name(dev), port);
329 return -EINVAL;
314 } 330 }
315 331
316 ctl = pci_resource_start(dev, 2*port+1); 332 ctl = pci_resource_start(dev, 2*port+1);
@@ -322,24 +338,18 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
322 } 338 }
323 339
324 if (!base || !ctl) { 340 if (!base || !ctl) {
325 printk(KERN_ERR "%s: bad PCI BARs for port %d, skipping\n", 341 printk(KERN_ERR "%s %s: bad PCI BARs for port %d, skipping\n",
326 d->name, port); 342 d->name, pci_name(dev), port);
327 return NULL; 343 return -EINVAL;
328 } 344 }
329 345
330 hwif = ide_find_port_slot(d); 346 memset(hw, 0, sizeof(*hw));
331 if (hwif == NULL) 347 hw->irq = irq;
332 return NULL; 348 hw->dev = &dev->dev;
333 349 hw->chipset = d->chipset ? d->chipset : ide_pci;
334 memset(&hw, 0, sizeof(hw)); 350 ide_std_init_ports(hw, base, ctl | 2);
335 hw.irq = irq;
336 hw.dev = &dev->dev;
337 hw.chipset = d->chipset ? d->chipset : ide_pci;
338 ide_std_init_ports(&hw, base, ctl | 2);
339 351
340 ide_init_port_hw(hwif, &hw); 352 return 0;
341
342 return hwif;
343} 353}
344 354
345#ifdef CONFIG_BLK_DEV_IDEDMA_PCI 355#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
@@ -362,7 +372,15 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
362 (dev->class & 0x80))) { 372 (dev->class & 0x80))) {
363 unsigned long base = ide_pci_dma_base(hwif, d); 373 unsigned long base = ide_pci_dma_base(hwif, d);
364 374
365 if (base == 0 || ide_pci_set_master(dev, d->name) < 0) 375 if (base == 0)
376 return -1;
377
378 hwif->dma_base = base;
379
380 if (ide_pci_check_simplex(hwif, d) < 0)
381 return -1;
382
383 if (ide_pci_set_master(dev, d->name) < 0)
366 return -1; 384 return -1;
367 385
368 if (hwif->host_flags & IDE_HFLAG_MMIO) 386 if (hwif->host_flags & IDE_HFLAG_MMIO)
@@ -376,7 +394,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
376 if (ide_allocate_dma_engine(hwif)) 394 if (ide_allocate_dma_engine(hwif))
377 return -1; 395 return -1;
378 396
379 ide_setup_dma(hwif, base); 397 hwif->dma_ops = &sff_dma_ops;
380 } 398 }
381 399
382 return 0; 400 return 0;
@@ -388,14 +406,14 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
388 * @dev: PCI device 406 * @dev: PCI device
389 * @d: IDE port info 407 * @d: IDE port info
390 * @noisy: verbose flag 408 * @noisy: verbose flag
391 * @config: returned as 1 if we configured the hardware
392 * 409 *
393 * Set up the PCI and controller side of the IDE interface. This brings 410 * Set up the PCI and controller side of the IDE interface. This brings
394 * up the PCI side of the device, checks that the device is enabled 411 * up the PCI side of the device, checks that the device is enabled
395 * and enables it if need be 412 * and enables it if need be
396 */ 413 */
397 414
398static int ide_setup_pci_controller(struct pci_dev *dev, const struct ide_port_info *d, int noisy, int *config) 415static int ide_setup_pci_controller(struct pci_dev *dev,
416 const struct ide_port_info *d, int noisy)
399{ 417{
400 int ret; 418 int ret;
401 u16 pcicmd; 419 u16 pcicmd;
@@ -409,15 +427,16 @@ static int ide_setup_pci_controller(struct pci_dev *dev, const struct ide_port_i
409 427
410 ret = pci_read_config_word(dev, PCI_COMMAND, &pcicmd); 428 ret = pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
411 if (ret < 0) { 429 if (ret < 0) {
412 printk(KERN_ERR "%s: error accessing PCI regs\n", d->name); 430 printk(KERN_ERR "%s %s: error accessing PCI regs\n",
431 d->name, pci_name(dev));
413 goto out; 432 goto out;
414 } 433 }
415 if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */ 434 if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */
416 ret = ide_pci_configure(dev, d); 435 ret = ide_pci_configure(dev, d);
417 if (ret < 0) 436 if (ret < 0)
418 goto out; 437 goto out;
419 *config = 1; 438 printk(KERN_INFO "%s %s: device enabled (Linux)\n",
420 printk(KERN_INFO "%s: device enabled (Linux)\n", d->name); 439 d->name, pci_name(dev));
421 } 440 }
422 441
423out: 442out:
@@ -429,7 +448,8 @@ out:
429 * @dev: PCI device 448 * @dev: PCI device
430 * @d: IDE port info 449 * @d: IDE port info
431 * @pciirq: IRQ line 450 * @pciirq: IRQ line
432 * @idx: ATA index table to update 451 * @hw: hw_regs_t instances corresponding to this PCI IDE device
452 * @hws: hw_regs_t pointers table to update
433 * 453 *
434 * Scan the interfaces attached to this device and do any 454 * Scan the interfaces attached to this device and do any
435 * necessary per port setup. Attach the devices and ask the 455 * necessary per port setup. Attach the devices and ask the
@@ -440,10 +460,10 @@ out:
440 * where the chipset setup is not the default PCI IDE one. 460 * where the chipset setup is not the default PCI IDE one.
441 */ 461 */
442 462
443void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int pciirq, u8 *idx) 463void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
464 int pciirq, hw_regs_t *hw, hw_regs_t **hws)
444{ 465{
445 int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port; 466 int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
446 ide_hwif_t *hwif;
447 u8 tmp; 467 u8 tmp;
448 468
449 /* 469 /*
@@ -455,15 +475,15 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int
455 475
456 if (e->reg && (pci_read_config_byte(dev, e->reg, &tmp) || 476 if (e->reg && (pci_read_config_byte(dev, e->reg, &tmp) ||
457 (tmp & e->mask) != e->val)) { 477 (tmp & e->mask) != e->val)) {
458 printk(KERN_INFO "%s: IDE port disabled\n", d->name); 478 printk(KERN_INFO "%s %s: IDE port disabled\n",
479 d->name, pci_name(dev));
459 continue; /* port not enabled */ 480 continue; /* port not enabled */
460 } 481 }
461 482
462 hwif = ide_hwif_configure(dev, d, port, pciirq); 483 if (ide_hw_configure(dev, d, port, pciirq, hw + port))
463 if (hwif == NULL)
464 continue; 484 continue;
465 485
466 *(idx + port) = hwif->index; 486 *(hws + port) = hw + port;
467 } 487 }
468} 488}
469EXPORT_SYMBOL_GPL(ide_pci_setup_ports); 489EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
@@ -480,95 +500,162 @@ EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
480 */ 500 */
481static int do_ide_setup_pci_device(struct pci_dev *dev, 501static int do_ide_setup_pci_device(struct pci_dev *dev,
482 const struct ide_port_info *d, 502 const struct ide_port_info *d,
483 u8 *idx, u8 noisy) 503 u8 noisy)
484{ 504{
485 int tried_config = 0;
486 int pciirq, ret; 505 int pciirq, ret;
487 506
488 ret = ide_setup_pci_controller(dev, d, noisy, &tried_config);
489 if (ret < 0)
490 goto out;
491
492 /* 507 /*
493 * Can we trust the reported IRQ? 508 * Can we trust the reported IRQ?
494 */ 509 */
495 pciirq = dev->irq; 510 pciirq = dev->irq;
496 511
512 /*
513 * This allows offboard ide-pci cards the enable a BIOS,
514 * verify interrupt settings of split-mirror pci-config
515 * space, place chipset into init-mode, and/or preserve
516 * an interrupt if the card is not native ide support.
517 */
518 ret = d->init_chipset ? d->init_chipset(dev) : 0;
519 if (ret < 0)
520 goto out;
521
497 /* Is it an "IDE storage" device in non-PCI mode? */ 522 /* Is it an "IDE storage" device in non-PCI mode? */
498 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5) { 523 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5) {
499 if (noisy) 524 if (noisy)
500 printk(KERN_INFO "%s: not 100%% native mode: " 525 printk(KERN_INFO "%s %s: not 100%% native mode: will "
501 "will probe irqs later\n", d->name); 526 "probe irqs later\n", d->name, pci_name(dev));
502 /*
503 * This allows offboard ide-pci cards the enable a BIOS,
504 * verify interrupt settings of split-mirror pci-config
505 * space, place chipset into init-mode, and/or preserve
506 * an interrupt if the card is not native ide support.
507 */
508 ret = d->init_chipset ? d->init_chipset(dev, d->name) : 0;
509 if (ret < 0)
510 goto out;
511 pciirq = ret; 527 pciirq = ret;
512 } else if (tried_config) { 528 } else if (!pciirq && noisy) {
513 if (noisy) 529 printk(KERN_WARNING "%s %s: bad irq (%d): will probe later\n",
514 printk(KERN_INFO "%s: will probe irqs later\n", d->name); 530 d->name, pci_name(dev), pciirq);
515 pciirq = 0; 531 } else if (noisy) {
516 } else if (!pciirq) { 532 printk(KERN_INFO "%s %s: 100%% native mode on irq %d\n",
517 if (noisy) 533 d->name, pci_name(dev), pciirq);
518 printk(KERN_WARNING "%s: bad irq (%d): will probe later\n",
519 d->name, pciirq);
520 pciirq = 0;
521 } else {
522 if (d->init_chipset) {
523 ret = d->init_chipset(dev, d->name);
524 if (ret < 0)
525 goto out;
526 }
527 if (noisy)
528 printk(KERN_INFO "%s: 100%% native mode on irq %d\n",
529 d->name, pciirq);
530 } 534 }
531 535
532 /* FIXME: silent failure can happen */ 536 ret = pciirq;
533
534 ide_pci_setup_ports(dev, d, pciirq, idx);
535out: 537out:
536 return ret; 538 return ret;
537} 539}
538 540
539int ide_setup_pci_device(struct pci_dev *dev, const struct ide_port_info *d) 541int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
542 void *priv)
540{ 543{
541 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 544 struct ide_host *host;
545 hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
542 int ret; 546 int ret;
543 547
544 ret = do_ide_setup_pci_device(dev, d, &idx[0], 1); 548 ret = ide_setup_pci_controller(dev, d, 1);
549 if (ret < 0)
550 goto out;
551
552 ide_pci_setup_ports(dev, d, 0, &hw[0], &hws[0]);
553
554 host = ide_host_alloc(d, hws);
555 if (host == NULL) {
556 ret = -ENOMEM;
557 goto out;
558 }
559
560 host->dev[0] = &dev->dev;
561
562 host->host_priv = priv;
563
564 pci_set_drvdata(dev, host);
565
566 ret = do_ide_setup_pci_device(dev, d, 1);
567 if (ret < 0)
568 goto out;
545 569
546 if (ret >= 0) 570 /* fixup IRQ */
547 ide_device_add(idx, d); 571 hw[1].irq = hw[0].irq = ret;
548 572
573 ret = ide_host_register(host, d, hws);
574 if (ret)
575 ide_host_free(host);
576out:
549 return ret; 577 return ret;
550} 578}
551EXPORT_SYMBOL_GPL(ide_setup_pci_device); 579EXPORT_SYMBOL_GPL(ide_pci_init_one);
552 580
553int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2, 581int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
554 const struct ide_port_info *d) 582 const struct ide_port_info *d, void *priv)
555{ 583{
556 struct pci_dev *pdev[] = { dev1, dev2 }; 584 struct pci_dev *pdev[] = { dev1, dev2 };
585 struct ide_host *host;
557 int ret, i; 586 int ret, i;
558 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 587 hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
588
589 for (i = 0; i < 2; i++) {
590 ret = ide_setup_pci_controller(pdev[i], d, !i);
591 if (ret < 0)
592 goto out;
593
594 ide_pci_setup_ports(pdev[i], d, 0, &hw[i*2], &hws[i*2]);
595 }
596
597 host = ide_host_alloc(d, hws);
598 if (host == NULL) {
599 ret = -ENOMEM;
600 goto out;
601 }
602
603 host->dev[0] = &dev1->dev;
604 host->dev[1] = &dev2->dev;
605
606 host->host_priv = priv;
607
608 pci_set_drvdata(pdev[0], host);
609 pci_set_drvdata(pdev[1], host);
559 610
560 for (i = 0; i < 2; i++) { 611 for (i = 0; i < 2; i++) {
561 ret = do_ide_setup_pci_device(pdev[i], d, &idx[i*2], !i); 612 ret = do_ide_setup_pci_device(pdev[i], d, !i);
613
562 /* 614 /*
563 * FIXME: Mom, mom, they stole me the helper function to undo 615 * FIXME: Mom, mom, they stole me the helper function to undo
564 * do_ide_setup_pci_device() on the first device! 616 * do_ide_setup_pci_device() on the first device!
565 */ 617 */
566 if (ret < 0) 618 if (ret < 0)
567 goto out; 619 goto out;
620
621 /* fixup IRQ */
622 hw[i*2 + 1].irq = hw[i*2].irq = ret;
568 } 623 }
569 624
570 ide_device_add(idx, d); 625 ret = ide_host_register(host, d, hws);
626 if (ret)
627 ide_host_free(host);
571out: 628out:
572 return ret; 629 return ret;
573} 630}
574EXPORT_SYMBOL_GPL(ide_setup_pci_devices); 631EXPORT_SYMBOL_GPL(ide_pci_init_two);
632
633void ide_pci_remove(struct pci_dev *dev)
634{
635 struct ide_host *host = pci_get_drvdata(dev);
636 struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
637 int bars;
638
639 if (host->host_flags & IDE_HFLAG_SINGLE)
640 bars = (1 << 2) - 1;
641 else
642 bars = (1 << 4) - 1;
643
644 if ((host->host_flags & IDE_HFLAG_NO_DMA) == 0) {
645 if (host->host_flags & IDE_HFLAG_CS5520)
646 bars |= (1 << 2);
647 else
648 bars |= (1 << 4);
649 }
650
651 ide_host_remove(host);
652
653 if (dev2)
654 pci_release_selected_regions(dev2, bars);
655 pci_release_selected_regions(dev, bars);
656
657 if (dev2)
658 pci_disable_device(dev2);
659 pci_disable_device(dev);
660}
661EXPORT_SYMBOL_GPL(ide_pci_remove);
diff --git a/drivers/ieee1394/iso.c b/drivers/ieee1394/iso.c
index 07ca35c98f96..1cf6487b65ba 100644
--- a/drivers/ieee1394/iso.c
+++ b/drivers/ieee1394/iso.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/mm.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15 16
16#include "hosts.h" 17#include "hosts.h"
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index ae11d5cc74d0..e980ff3335db 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -168,6 +168,12 @@ struct cma_work {
168 struct rdma_cm_event event; 168 struct rdma_cm_event event;
169}; 169};
170 170
171struct cma_ndev_work {
172 struct work_struct work;
173 struct rdma_id_private *id;
174 struct rdma_cm_event event;
175};
176
171union cma_ip_addr { 177union cma_ip_addr {
172 struct in6_addr ip6; 178 struct in6_addr ip6;
173 struct { 179 struct {
@@ -914,7 +920,10 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
914 struct rdma_cm_event event; 920 struct rdma_cm_event event;
915 int ret = 0; 921 int ret = 0;
916 922
917 if (cma_disable_callback(id_priv, CMA_CONNECT)) 923 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
924 cma_disable_callback(id_priv, CMA_CONNECT)) ||
925 (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
926 cma_disable_callback(id_priv, CMA_DISCONNECT)))
918 return 0; 927 return 0;
919 928
920 memset(&event, 0, sizeof event); 929 memset(&event, 0, sizeof event);
@@ -950,6 +959,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
950 event.event = RDMA_CM_EVENT_DISCONNECTED; 959 event.event = RDMA_CM_EVENT_DISCONNECTED;
951 break; 960 break;
952 case IB_CM_TIMEWAIT_EXIT: 961 case IB_CM_TIMEWAIT_EXIT:
962 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
963 break;
953 case IB_CM_MRA_RECEIVED: 964 case IB_CM_MRA_RECEIVED:
954 /* ignore event */ 965 /* ignore event */
955 goto out; 966 goto out;
@@ -1598,6 +1609,30 @@ out:
1598 kfree(work); 1609 kfree(work);
1599} 1610}
1600 1611
1612static void cma_ndev_work_handler(struct work_struct *_work)
1613{
1614 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
1615 struct rdma_id_private *id_priv = work->id;
1616 int destroy = 0;
1617
1618 mutex_lock(&id_priv->handler_mutex);
1619 if (id_priv->state == CMA_DESTROYING ||
1620 id_priv->state == CMA_DEVICE_REMOVAL)
1621 goto out;
1622
1623 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1624 cma_exch(id_priv, CMA_DESTROYING);
1625 destroy = 1;
1626 }
1627
1628out:
1629 mutex_unlock(&id_priv->handler_mutex);
1630 cma_deref_id(id_priv);
1631 if (destroy)
1632 rdma_destroy_id(&id_priv->id);
1633 kfree(work);
1634}
1635
1601static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 1636static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1602{ 1637{
1603 struct rdma_route *route = &id_priv->id.route; 1638 struct rdma_route *route = &id_priv->id.route;
@@ -2723,6 +2758,65 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
2723} 2758}
2724EXPORT_SYMBOL(rdma_leave_multicast); 2759EXPORT_SYMBOL(rdma_leave_multicast);
2725 2760
2761static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
2762{
2763 struct rdma_dev_addr *dev_addr;
2764 struct cma_ndev_work *work;
2765
2766 dev_addr = &id_priv->id.route.addr.dev_addr;
2767
2768 if ((dev_addr->src_dev == ndev) &&
2769 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
2770 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
2771 ndev->name, &id_priv->id);
2772 work = kzalloc(sizeof *work, GFP_KERNEL);
2773 if (!work)
2774 return -ENOMEM;
2775
2776 INIT_WORK(&work->work, cma_ndev_work_handler);
2777 work->id = id_priv;
2778 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
2779 atomic_inc(&id_priv->refcount);
2780 queue_work(cma_wq, &work->work);
2781 }
2782
2783 return 0;
2784}
2785
2786static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
2787 void *ctx)
2788{
2789 struct net_device *ndev = (struct net_device *)ctx;
2790 struct cma_device *cma_dev;
2791 struct rdma_id_private *id_priv;
2792 int ret = NOTIFY_DONE;
2793
2794 if (dev_net(ndev) != &init_net)
2795 return NOTIFY_DONE;
2796
2797 if (event != NETDEV_BONDING_FAILOVER)
2798 return NOTIFY_DONE;
2799
2800 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
2801 return NOTIFY_DONE;
2802
2803 mutex_lock(&lock);
2804 list_for_each_entry(cma_dev, &dev_list, list)
2805 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
2806 ret = cma_netdev_change(ndev, id_priv);
2807 if (ret)
2808 goto out;
2809 }
2810
2811out:
2812 mutex_unlock(&lock);
2813 return ret;
2814}
2815
2816static struct notifier_block cma_nb = {
2817 .notifier_call = cma_netdev_callback
2818};
2819
2726static void cma_add_one(struct ib_device *device) 2820static void cma_add_one(struct ib_device *device)
2727{ 2821{
2728 struct cma_device *cma_dev; 2822 struct cma_device *cma_dev;
@@ -2831,6 +2925,7 @@ static int cma_init(void)
2831 2925
2832 ib_sa_register_client(&sa_client); 2926 ib_sa_register_client(&sa_client);
2833 rdma_addr_register_client(&addr_client); 2927 rdma_addr_register_client(&addr_client);
2928 register_netdevice_notifier(&cma_nb);
2834 2929
2835 ret = ib_register_client(&cma_client); 2930 ret = ib_register_client(&cma_client);
2836 if (ret) 2931 if (ret)
@@ -2838,6 +2933,7 @@ static int cma_init(void)
2838 return 0; 2933 return 0;
2839 2934
2840err: 2935err:
2936 unregister_netdevice_notifier(&cma_nb);
2841 rdma_addr_unregister_client(&addr_client); 2937 rdma_addr_unregister_client(&addr_client);
2842 ib_sa_unregister_client(&sa_client); 2938 ib_sa_unregister_client(&sa_client);
2843 destroy_workqueue(cma_wq); 2939 destroy_workqueue(cma_wq);
@@ -2847,6 +2943,7 @@ err:
2847static void cma_cleanup(void) 2943static void cma_cleanup(void)
2848{ 2944{
2849 ib_unregister_client(&cma_client); 2945 ib_unregister_client(&cma_client);
2946 unregister_netdevice_notifier(&cma_nb);
2850 rdma_addr_unregister_client(&addr_client); 2947 rdma_addr_unregister_client(&addr_client);
2851 ib_sa_unregister_client(&sa_client); 2948 ib_sa_unregister_client(&sa_client);
2852 destroy_workqueue(cma_wq); 2949 destroy_workqueue(cma_wq);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 81c9195b512a..8f9509e1ebf7 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -942,8 +942,7 @@ static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
942 case IW_CM_STATE_CONN_RECV: 942 case IW_CM_STATE_CONN_RECV:
943 case IW_CM_STATE_ESTABLISHED: 943 case IW_CM_STATE_ESTABLISHED:
944 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 944 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
945 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | 945 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE|
946 IB_ACCESS_REMOTE_WRITE|
947 IB_ACCESS_REMOTE_READ; 946 IB_ACCESS_REMOTE_READ;
948 ret = 0; 947 ret = 0;
949 break; 948 break;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1341de793e51..7863a50d56f2 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1064,7 +1064,8 @@ static void ib_sa_remove_one(struct ib_device *device)
1064 1064
1065 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { 1065 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
1066 ib_unregister_mad_agent(sa_dev->port[i].agent); 1066 ib_unregister_mad_agent(sa_dev->port[i].agent);
1067 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); 1067 if (sa_dev->port[i].sm_ah)
1068 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
1068 } 1069 }
1069 1070
1070 kfree(sa_dev); 1071 kfree(sa_dev);
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 1e9e99a13933..0b0618edd645 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -194,6 +194,7 @@ struct ehca_qp {
194 u32 packet_count; 194 u32 packet_count;
195 atomic_t nr_events; /* events seen */ 195 atomic_t nr_events; /* events seen */
196 wait_queue_head_t wait_completion; 196 wait_queue_head_t wait_completion;
197 int mig_armed;
197}; 198};
198 199
199#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ) 200#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index bc3b37d2070f..46288220cfbb 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -114,7 +114,9 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
114 } 114 }
115 115
116 props->max_pkeys = 16; 116 props->max_pkeys = 16;
117 props->local_ca_ack_delay = min_t(u8, rblock->local_ca_ack_delay, 255); 117 /* Some FW versions say 0 here; insert sensible value in that case */
118 props->local_ca_ack_delay = rblock->local_ca_ack_delay ?
119 min_t(u8, rblock->local_ca_ack_delay, 255) : 12;
118 props->max_raw_ipv6_qp = limit_uint(rblock->max_raw_ipv6_qp); 120 props->max_raw_ipv6_qp = limit_uint(rblock->max_raw_ipv6_qp);
119 props->max_raw_ethy_qp = limit_uint(rblock->max_raw_ethy_qp); 121 props->max_raw_ethy_qp = limit_uint(rblock->max_raw_ethy_qp);
120 props->max_mcast_grp = limit_uint(rblock->max_mcast_grp); 122 props->max_mcast_grp = limit_uint(rblock->max_mcast_grp);
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 0792d930c481..cb55be04442c 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -178,6 +178,10 @@ static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp,
178{ 178{
179 struct ib_event event; 179 struct ib_event event;
180 180
181 /* PATH_MIG without the QP ever having been armed is false alarm */
182 if (event_type == IB_EVENT_PATH_MIG && !qp->mig_armed)
183 return;
184
181 event.device = &shca->ib_device; 185 event.device = &shca->ib_device;
182 event.event = event_type; 186 event.event = event_type;
183 187
@@ -646,8 +650,8 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
646 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); 650 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
647 651
648 spin_lock_irqsave(&pool->last_cpu_lock, flags); 652 spin_lock_irqsave(&pool->last_cpu_lock, flags);
649 cpu = next_cpu(pool->last_cpu, cpu_online_map); 653 cpu = next_cpu_nr(pool->last_cpu, cpu_online_map);
650 if (cpu == NR_CPUS) 654 if (cpu >= nr_cpu_ids)
651 cpu = first_cpu(cpu_online_map); 655 cpu = first_cpu(cpu_online_map);
652 pool->last_cpu = cpu; 656 pool->last_cpu = cpu;
653 spin_unlock_irqrestore(&pool->last_cpu_lock, flags); 657 spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 3f59587338ea..ea13efddf175 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -1460,6 +1460,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1460 goto modify_qp_exit2; 1460 goto modify_qp_exit2;
1461 } 1461 }
1462 mqpcb->path_migration_state = attr->path_mig_state + 1; 1462 mqpcb->path_migration_state = attr->path_mig_state + 1;
1463 if (attr->path_mig_state == IB_MIG_REARM)
1464 my_qp->mig_armed = 1;
1463 update_mask |= 1465 update_mask |=
1464 EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1); 1466 EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
1465 } 1467 }
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index 661f8db62706..c3a328465431 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -163,6 +163,7 @@ static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
163 163
164out: 164out:
165 ehca_err(pd->ib_pd.device, "failed to allocate small queue page"); 165 ehca_err(pd->ib_pd.device, "failed to allocate small queue page");
166 mutex_unlock(&pd->lock);
166 return 0; 167 return 0;
167} 168}
168 169
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 299f20832ab6..0b191a4842ce 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -637,6 +637,7 @@ repoll:
637 case MLX4_OPCODE_SEND_IMM: 637 case MLX4_OPCODE_SEND_IMM:
638 wc->wc_flags |= IB_WC_WITH_IMM; 638 wc->wc_flags |= IB_WC_WITH_IMM;
639 case MLX4_OPCODE_SEND: 639 case MLX4_OPCODE_SEND:
640 case MLX4_OPCODE_SEND_INVAL:
640 wc->opcode = IB_WC_SEND; 641 wc->opcode = IB_WC_SEND;
641 break; 642 break;
642 case MLX4_OPCODE_RDMA_READ: 643 case MLX4_OPCODE_RDMA_READ:
@@ -657,6 +658,12 @@ repoll:
657 case MLX4_OPCODE_LSO: 658 case MLX4_OPCODE_LSO:
658 wc->opcode = IB_WC_LSO; 659 wc->opcode = IB_WC_LSO;
659 break; 660 break;
661 case MLX4_OPCODE_FMR:
662 wc->opcode = IB_WC_FAST_REG_MR;
663 break;
664 case MLX4_OPCODE_LOCAL_INVAL:
665 wc->opcode = IB_WC_LOCAL_INV;
666 break;
660 } 667 }
661 } else { 668 } else {
662 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 669 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
@@ -667,6 +674,11 @@ repoll:
667 wc->wc_flags = IB_WC_WITH_IMM; 674 wc->wc_flags = IB_WC_WITH_IMM;
668 wc->ex.imm_data = cqe->immed_rss_invalid; 675 wc->ex.imm_data = cqe->immed_rss_invalid;
669 break; 676 break;
677 case MLX4_RECV_OPCODE_SEND_INVAL:
678 wc->opcode = IB_WC_RECV;
679 wc->wc_flags = IB_WC_WITH_INVALIDATE;
680 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
681 break;
670 case MLX4_RECV_OPCODE_SEND: 682 case MLX4_RECV_OPCODE_SEND:
671 wc->opcode = IB_WC_RECV; 683 wc->opcode = IB_WC_RECV;
672 wc->wc_flags = 0; 684 wc->wc_flags = 0;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index bcf50648fa18..38d6907ab521 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -104,6 +104,12 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
104 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 104 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
105 if (dev->dev->caps.max_gso_sz) 105 if (dev->dev->caps.max_gso_sz)
106 props->device_cap_flags |= IB_DEVICE_UD_TSO; 106 props->device_cap_flags |= IB_DEVICE_UD_TSO;
107 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
108 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
109 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
110 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
111 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
112 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
107 113
108 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 114 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
109 0xffffff; 115 0xffffff;
@@ -127,6 +133,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
127 props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs; 133 props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
128 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1; 134 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
129 props->max_srq_sge = dev->dev->caps.max_srq_sge; 135 props->max_srq_sge = dev->dev->caps.max_srq_sge;
136 props->max_fast_reg_page_list_len = PAGE_SIZE / sizeof (u64);
130 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; 137 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
131 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? 138 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
132 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 139 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
@@ -565,6 +572,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
565 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); 572 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
566 ibdev->ib_dev.owner = THIS_MODULE; 573 ibdev->ib_dev.owner = THIS_MODULE;
567 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; 574 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
575 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
568 ibdev->ib_dev.phys_port_cnt = dev->caps.num_ports; 576 ibdev->ib_dev.phys_port_cnt = dev->caps.num_ports;
569 ibdev->ib_dev.num_comp_vectors = 1; 577 ibdev->ib_dev.num_comp_vectors = 1;
570 ibdev->ib_dev.dma_device = &dev->pdev->dev; 578 ibdev->ib_dev.dma_device = &dev->pdev->dev;
@@ -627,6 +635,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
627 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr; 635 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
628 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; 636 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
629 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; 637 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
638 ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
639 ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
640 ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list;
630 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; 641 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
631 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; 642 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
632 ibdev->ib_dev.process_mad = mlx4_ib_process_mad; 643 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index c4cf5b69eefa..d26a91317d4d 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -83,6 +83,11 @@ struct mlx4_ib_mr {
83 struct ib_umem *umem; 83 struct ib_umem *umem;
84}; 84};
85 85
86struct mlx4_ib_fast_reg_page_list {
87 struct ib_fast_reg_page_list ibfrpl;
88 dma_addr_t map;
89};
90
86struct mlx4_ib_fmr { 91struct mlx4_ib_fmr {
87 struct ib_fmr ibfmr; 92 struct ib_fmr ibfmr;
88 struct mlx4_fmr mfmr; 93 struct mlx4_fmr mfmr;
@@ -199,6 +204,11 @@ static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
199 return container_of(ibmr, struct mlx4_ib_mr, ibmr); 204 return container_of(ibmr, struct mlx4_ib_mr, ibmr);
200} 205}
201 206
207static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
208{
209 return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl);
210}
211
202static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr) 212static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
203{ 213{
204 return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr); 214 return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr);
@@ -239,6 +249,11 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
239 u64 virt_addr, int access_flags, 249 u64 virt_addr, int access_flags,
240 struct ib_udata *udata); 250 struct ib_udata *udata);
241int mlx4_ib_dereg_mr(struct ib_mr *mr); 251int mlx4_ib_dereg_mr(struct ib_mr *mr);
252struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
253 int max_page_list_len);
254struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
255 int page_list_len);
256void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
242 257
243int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 258int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
244int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); 259int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 68e92485fc76..db2086faa4ed 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -183,6 +183,76 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
183 return 0; 183 return 0;
184} 184}
185 185
186struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
187 int max_page_list_len)
188{
189 struct mlx4_ib_dev *dev = to_mdev(pd->device);
190 struct mlx4_ib_mr *mr;
191 int err;
192
193 mr = kmalloc(sizeof *mr, GFP_KERNEL);
194 if (!mr)
195 return ERR_PTR(-ENOMEM);
196
197 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
198 max_page_list_len, 0, &mr->mmr);
199 if (err)
200 goto err_free;
201
202 err = mlx4_mr_enable(dev->dev, &mr->mmr);
203 if (err)
204 goto err_mr;
205
206 return &mr->ibmr;
207
208err_mr:
209 mlx4_mr_free(dev->dev, &mr->mmr);
210
211err_free:
212 kfree(mr);
213 return ERR_PTR(err);
214}
215
216struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
217 int page_list_len)
218{
219 struct mlx4_ib_dev *dev = to_mdev(ibdev);
220 struct mlx4_ib_fast_reg_page_list *mfrpl;
221 int size = page_list_len * sizeof (u64);
222
223 if (size > PAGE_SIZE)
224 return ERR_PTR(-EINVAL);
225
226 mfrpl = kmalloc(sizeof *mfrpl, GFP_KERNEL);
227 if (!mfrpl)
228 return ERR_PTR(-ENOMEM);
229
230 mfrpl->ibfrpl.page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
231 size, &mfrpl->map,
232 GFP_KERNEL);
233 if (!mfrpl->ibfrpl.page_list)
234 goto err_free;
235
236 WARN_ON(mfrpl->map & 0x3f);
237
238 return &mfrpl->ibfrpl;
239
240err_free:
241 kfree(mfrpl);
242 return ERR_PTR(-ENOMEM);
243}
244
245void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
246{
247 struct mlx4_ib_dev *dev = to_mdev(page_list->device);
248 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
249 int size = page_list->max_page_list_len * sizeof (u64);
250
251 dma_free_coherent(&dev->dev->pdev->dev, size, page_list->page_list,
252 mfrpl->map);
253 kfree(mfrpl);
254}
255
186struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc, 256struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
187 struct ib_fmr_attr *fmr_attr) 257 struct ib_fmr_attr *fmr_attr)
188{ 258{
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 89eb6cbe592e..02a99bc4442e 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -78,6 +78,9 @@ static const __be32 mlx4_ib_opcode[] = {
78 [IB_WR_RDMA_READ] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ), 78 [IB_WR_RDMA_READ] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ),
79 [IB_WR_ATOMIC_CMP_AND_SWP] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), 79 [IB_WR_ATOMIC_CMP_AND_SWP] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
80 [IB_WR_ATOMIC_FETCH_AND_ADD] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), 80 [IB_WR_ATOMIC_FETCH_AND_ADD] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
81 [IB_WR_SEND_WITH_INV] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
82 [IB_WR_LOCAL_INV] = __constant_cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
83 [IB_WR_FAST_REG_MR] = __constant_cpu_to_be32(MLX4_OPCODE_FMR),
81}; 84};
82 85
83static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) 86static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
@@ -976,6 +979,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
976 context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pdn); 979 context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pdn);
977 context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); 980 context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
978 981
982 /* Set "fast registration enabled" for all kernel QPs */
983 if (!qp->ibqp.uobject)
984 context->params1 |= cpu_to_be32(1 << 11);
985
979 if (attr_mask & IB_QP_RNR_RETRY) { 986 if (attr_mask & IB_QP_RNR_RETRY) {
980 context->params1 |= cpu_to_be32(attr->rnr_retry << 13); 987 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
981 optpar |= MLX4_QP_OPTPAR_RNR_RETRY; 988 optpar |= MLX4_QP_OPTPAR_RNR_RETRY;
@@ -1322,6 +1329,38 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq
1322 return cur + nreq >= wq->max_post; 1329 return cur + nreq >= wq->max_post;
1323} 1330}
1324 1331
1332static __be32 convert_access(int acc)
1333{
1334 return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_PERM_ATOMIC) : 0) |
1335 (acc & IB_ACCESS_REMOTE_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_WRITE) : 0) |
1336 (acc & IB_ACCESS_REMOTE_READ ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_READ) : 0) |
1337 (acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) |
1338 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
1339}
1340
1341static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr)
1342{
1343 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
1344
1345 fseg->flags = convert_access(wr->wr.fast_reg.access_flags);
1346 fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey);
1347 fseg->buf_list = cpu_to_be64(mfrpl->map);
1348 fseg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
1349 fseg->reg_len = cpu_to_be64(wr->wr.fast_reg.length);
1350 fseg->offset = 0; /* XXX -- is this just for ZBVA? */
1351 fseg->page_size = cpu_to_be32(wr->wr.fast_reg.page_shift);
1352 fseg->reserved[0] = 0;
1353 fseg->reserved[1] = 0;
1354}
1355
1356static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
1357{
1358 iseg->flags = 0;
1359 iseg->mem_key = cpu_to_be32(rkey);
1360 iseg->guest_id = 0;
1361 iseg->pa = 0;
1362}
1363
1325static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, 1364static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
1326 u64 remote_addr, u32 rkey) 1365 u64 remote_addr, u32 rkey)
1327{ 1366{
@@ -1395,7 +1434,7 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
1395 dseg->addr = cpu_to_be64(sg->addr); 1434 dseg->addr = cpu_to_be64(sg->addr);
1396} 1435}
1397 1436
1398static int build_lso_seg(struct mlx4_lso_seg *wqe, struct ib_send_wr *wr, 1437static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
1399 struct mlx4_ib_qp *qp, unsigned *lso_seg_len) 1438 struct mlx4_ib_qp *qp, unsigned *lso_seg_len)
1400{ 1439{
1401 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); 1440 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
@@ -1423,6 +1462,21 @@ static int build_lso_seg(struct mlx4_lso_seg *wqe, struct ib_send_wr *wr,
1423 return 0; 1462 return 0;
1424} 1463}
1425 1464
1465static __be32 send_ieth(struct ib_send_wr *wr)
1466{
1467 switch (wr->opcode) {
1468 case IB_WR_SEND_WITH_IMM:
1469 case IB_WR_RDMA_WRITE_WITH_IMM:
1470 return wr->ex.imm_data;
1471
1472 case IB_WR_SEND_WITH_INV:
1473 return cpu_to_be32(wr->ex.invalidate_rkey);
1474
1475 default:
1476 return 0;
1477 }
1478}
1479
1426int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1480int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1427 struct ib_send_wr **bad_wr) 1481 struct ib_send_wr **bad_wr)
1428{ 1482{
@@ -1469,11 +1523,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1469 MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) | 1523 MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) |
1470 qp->sq_signal_bits; 1524 qp->sq_signal_bits;
1471 1525
1472 if (wr->opcode == IB_WR_SEND_WITH_IMM || 1526 ctrl->imm = send_ieth(wr);
1473 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1474 ctrl->imm = wr->ex.imm_data;
1475 else
1476 ctrl->imm = 0;
1477 1527
1478 wqe += sizeof *ctrl; 1528 wqe += sizeof *ctrl;
1479 size = sizeof *ctrl / 16; 1529 size = sizeof *ctrl / 16;
@@ -1505,6 +1555,18 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1505 size += sizeof (struct mlx4_wqe_raddr_seg) / 16; 1555 size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
1506 break; 1556 break;
1507 1557
1558 case IB_WR_LOCAL_INV:
1559 set_local_inv_seg(wqe, wr->ex.invalidate_rkey);
1560 wqe += sizeof (struct mlx4_wqe_local_inval_seg);
1561 size += sizeof (struct mlx4_wqe_local_inval_seg) / 16;
1562 break;
1563
1564 case IB_WR_FAST_REG_MR:
1565 set_fmr_seg(wqe, wr);
1566 wqe += sizeof (struct mlx4_wqe_fmr_seg);
1567 size += sizeof (struct mlx4_wqe_fmr_seg) / 16;
1568 break;
1569
1508 default: 1570 default:
1509 /* No extra segments required for sends */ 1571 /* No extra segments required for sends */
1510 break; 1572 break;
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index ee4d073c889f..252590116df5 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -202,6 +202,7 @@ struct mthca_pd_table {
202 202
203struct mthca_buddy { 203struct mthca_buddy {
204 unsigned long **bits; 204 unsigned long **bits;
205 int *num_free;
205 int max_order; 206 int max_order;
206 spinlock_t lock; 207 spinlock_t lock;
207}; 208};
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 8489b1e81c0f..882e6b735915 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -89,23 +89,26 @@ static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
89 89
90 spin_lock(&buddy->lock); 90 spin_lock(&buddy->lock);
91 91
92 for (o = order; o <= buddy->max_order; ++o) { 92 for (o = order; o <= buddy->max_order; ++o)
93 m = 1 << (buddy->max_order - o); 93 if (buddy->num_free[o]) {
94 seg = find_first_bit(buddy->bits[o], m); 94 m = 1 << (buddy->max_order - o);
95 if (seg < m) 95 seg = find_first_bit(buddy->bits[o], m);
96 goto found; 96 if (seg < m)
97 } 97 goto found;
98 }
98 99
99 spin_unlock(&buddy->lock); 100 spin_unlock(&buddy->lock);
100 return -1; 101 return -1;
101 102
102 found: 103 found:
103 clear_bit(seg, buddy->bits[o]); 104 clear_bit(seg, buddy->bits[o]);
105 --buddy->num_free[o];
104 106
105 while (o > order) { 107 while (o > order) {
106 --o; 108 --o;
107 seg <<= 1; 109 seg <<= 1;
108 set_bit(seg ^ 1, buddy->bits[o]); 110 set_bit(seg ^ 1, buddy->bits[o]);
111 ++buddy->num_free[o];
109 } 112 }
110 113
111 spin_unlock(&buddy->lock); 114 spin_unlock(&buddy->lock);
@@ -123,11 +126,13 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
123 126
124 while (test_bit(seg ^ 1, buddy->bits[order])) { 127 while (test_bit(seg ^ 1, buddy->bits[order])) {
125 clear_bit(seg ^ 1, buddy->bits[order]); 128 clear_bit(seg ^ 1, buddy->bits[order]);
129 --buddy->num_free[order];
126 seg >>= 1; 130 seg >>= 1;
127 ++order; 131 ++order;
128 } 132 }
129 133
130 set_bit(seg, buddy->bits[order]); 134 set_bit(seg, buddy->bits[order]);
135 ++buddy->num_free[order];
131 136
132 spin_unlock(&buddy->lock); 137 spin_unlock(&buddy->lock);
133} 138}
@@ -141,7 +146,9 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
141 146
142 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), 147 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
143 GFP_KERNEL); 148 GFP_KERNEL);
144 if (!buddy->bits) 149 buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *),
150 GFP_KERNEL);
151 if (!buddy->bits || !buddy->num_free)
145 goto err_out; 152 goto err_out;
146 153
147 for (i = 0; i <= buddy->max_order; ++i) { 154 for (i = 0; i <= buddy->max_order; ++i) {
@@ -154,6 +161,7 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
154 } 161 }
155 162
156 set_bit(0, buddy->bits[buddy->max_order]); 163 set_bit(0, buddy->bits[buddy->max_order]);
164 buddy->num_free[buddy->max_order] = 1;
157 165
158 return 0; 166 return 0;
159 167
@@ -161,9 +169,10 @@ err_out_free:
161 for (i = 0; i <= buddy->max_order; ++i) 169 for (i = 0; i <= buddy->max_order; ++i)
162 kfree(buddy->bits[i]); 170 kfree(buddy->bits[i]);
163 171
172err_out:
164 kfree(buddy->bits); 173 kfree(buddy->bits);
174 kfree(buddy->num_free);
165 175
166err_out:
167 return -ENOMEM; 176 return -ENOMEM;
168} 177}
169 178
@@ -175,6 +184,7 @@ static void mthca_buddy_cleanup(struct mthca_buddy *buddy)
175 kfree(buddy->bits[i]); 184 kfree(buddy->bits[i]);
176 185
177 kfree(buddy->bits); 186 kfree(buddy->bits);
187 kfree(buddy->num_free);
178} 188}
179 189
180static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order, 190static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 8be9ea0436e6..f51201b17bfd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -548,7 +548,7 @@ static int path_rec_start(struct net_device *dev,
548 path_rec_completion, 548 path_rec_completion,
549 path, &path->query); 549 path, &path->query);
550 if (path->query_id < 0) { 550 if (path->query_id < 0) {
551 ipoib_warn(priv, "ib_sa_path_rec_get failed\n"); 551 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
552 path->query = NULL; 552 path->query = NULL;
553 return path->query_id; 553 return path->query_id;
554 } 554 }
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 3a917c1f796f..63462ecca147 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -483,6 +483,7 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
483 break; 483 break;
484 case RDMA_CM_EVENT_DISCONNECTED: 484 case RDMA_CM_EVENT_DISCONNECTED:
485 case RDMA_CM_EVENT_DEVICE_REMOVAL: 485 case RDMA_CM_EVENT_DEVICE_REMOVAL:
486 case RDMA_CM_EVENT_ADDR_CHANGE:
486 iser_disconnected_handler(cma_id); 487 iser_disconnected_handler(cma_id);
487 break; 488 break;
488 default: 489 default:
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index adbf29f0169d..71c1971abf80 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -37,6 +37,7 @@
37#include <linux/kernel.h> 37#include <linux/kernel.h>
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/semaphore.h>
40#include <linux/slab.h> 41#include <linux/slab.h>
41#include <linux/pci_ids.h> 42#include <linux/pci_ids.h>
42 43
diff --git a/drivers/input/keyboard/tosakbd.c b/drivers/input/keyboard/tosakbd.c
index 94e444b4ee15..b12b7ee4b6aa 100644
--- a/drivers/input/keyboard/tosakbd.c
+++ b/drivers/input/keyboard/tosakbd.c
@@ -215,8 +215,6 @@ static int tosakbd_suspend(struct platform_device *dev, pm_message_t state)
215 unsigned long flags; 215 unsigned long flags;
216 216
217 spin_lock_irqsave(&tosakbd->lock, flags); 217 spin_lock_irqsave(&tosakbd->lock, flags);
218 PGSR1 = (PGSR1 & ~TOSA_GPIO_LOW_STROBE_BIT);
219 PGSR2 = (PGSR2 & ~TOSA_GPIO_HIGH_STROBE_BIT);
220 tosakbd->suspended = 1; 218 tosakbd->suspended = 1;
221 spin_unlock_irqrestore(&tosakbd->lock, flags); 219 spin_unlock_irqrestore(&tosakbd->lock, flags);
222 220
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index 49d8abfe38fe..daa9d4220331 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -44,6 +44,7 @@
44#include <linux/proc_fs.h> 44#include <linux/proc_fs.h>
45#include <linux/poll.h> 45#include <linux/poll.h>
46#include <linux/rtc.h> 46#include <linux/rtc.h>
47#include <linux/semaphore.h>
47 48
48MODULE_AUTHOR("Brian S. Julin <bri@calyx.com>"); 49MODULE_AUTHOR("Brian S. Julin <bri@calyx.com>");
49MODULE_DESCRIPTION("HP i8042 SDC + MSM-58321 RTC Driver"); 50MODULE_DESCRIPTION("HP i8042 SDC + MSM-58321 RTC Driver");
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 7b233a492ad5..0d395979b2d1 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -67,9 +67,9 @@
67#include <linux/module.h> 67#include <linux/module.h>
68#include <linux/ioport.h> 68#include <linux/ioport.h>
69#include <linux/time.h> 69#include <linux/time.h>
70#include <linux/semaphore.h>
70#include <linux/slab.h> 71#include <linux/slab.h>
71#include <linux/hil.h> 72#include <linux/hil.h>
72#include <linux/semaphore.h>
73#include <asm/io.h> 73#include <asm/io.h>
74#include <asm/system.h> 74#include <asm/system.h>
75 75
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index 091deb9d1c47..c2bd97d29273 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -575,7 +575,8 @@ int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
575 else 575 else
576 skb = iraw_encode(skb, HW_HDR_LEN, 0); 576 skb = iraw_encode(skb, HW_HDR_LEN, 0);
577 if (!skb) { 577 if (!skb) {
578 err("unable to allocate memory for encoding!\n"); 578 dev_err(bcs->cs->dev,
579 "unable to allocate memory for encoding!\n");
579 return -ENOMEM; 580 return -ENOMEM;
580 } 581 }
581 582
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 5255b5e20e13..3f11910c7ccd 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -1050,10 +1050,9 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
1050 } 1050 }
1051 1051
1052 /* retrieve block of data to send */ 1052 /* retrieve block of data to send */
1053 ifd->offset = gigaset_isowbuf_getbytes(ubc->isooutbuf, 1053 rc = gigaset_isowbuf_getbytes(ubc->isooutbuf, ifd->length);
1054 ifd->length); 1054 if (rc < 0) {
1055 if (ifd->offset < 0) { 1055 if (rc == -EBUSY) {
1056 if (ifd->offset == -EBUSY) {
1057 gig_dbg(DEBUG_ISO, 1056 gig_dbg(DEBUG_ISO,
1058 "%s: buffer busy at frame %d", 1057 "%s: buffer busy at frame %d",
1059 __func__, nframe); 1058 __func__, nframe);
@@ -1062,11 +1061,12 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
1062 } else { 1061 } else {
1063 dev_err(ucx->bcs->cs->dev, 1062 dev_err(ucx->bcs->cs->dev,
1064 "%s: buffer error %d at frame %d\n", 1063 "%s: buffer error %d at frame %d\n",
1065 __func__, ifd->offset, nframe); 1064 __func__, rc, nframe);
1066 return ifd->offset; 1065 return rc;
1067 } 1066 }
1068 break; 1067 break;
1069 } 1068 }
1069 ifd->offset = rc;
1070 ucx->limit = ubc->isooutbuf->nextread; 1070 ucx->limit = ubc->isooutbuf->nextread;
1071 ifd->status = 0; 1071 ifd->status = 0;
1072 ifd->actual_length = 0; 1072 ifd->actual_length = 0;
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 827c32c16795..9d3ce7718e58 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -287,7 +287,7 @@ struct event_t *gigaset_add_event(struct cardstate *cs,
287 tail = cs->ev_tail; 287 tail = cs->ev_tail;
288 next = (tail + 1) % MAX_EVENTS; 288 next = (tail + 1) % MAX_EVENTS;
289 if (unlikely(next == cs->ev_head)) 289 if (unlikely(next == cs->ev_head))
290 err("event queue full"); 290 dev_err(cs->dev, "event queue full\n");
291 else { 291 else {
292 event = cs->events + tail; 292 event = cs->events + tail;
293 event->type = type; 293 event->type = type;
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index f365993161fc..003752954993 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -106,7 +106,6 @@ enum debuglevel {
106#undef err 106#undef err
107#undef info 107#undef info
108#undef warn 108#undef warn
109#undef notice
110 109
111#define err(format, arg...) printk(KERN_ERR KBUILD_MODNAME ": " \ 110#define err(format, arg...) printk(KERN_ERR KBUILD_MODNAME ": " \
112 format "\n" , ## arg) 111 format "\n" , ## arg)
@@ -114,8 +113,6 @@ enum debuglevel {
114 format "\n" , ## arg) 113 format "\n" , ## arg)
115#define warn(format, arg...) printk(KERN_WARNING KBUILD_MODNAME ": " \ 114#define warn(format, arg...) printk(KERN_WARNING KBUILD_MODNAME ": " \
116 format "\n" , ## arg) 115 format "\n" , ## arg)
117#define notice(format, arg...) printk(KERN_NOTICE KBUILD_MODNAME ": " \
118 format "\n" , ## arg)
119 116
120#ifdef CONFIG_GIGASET_DEBUG 117#ifdef CONFIG_GIGASET_DEBUG
121 118
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 9e089f06a942..3c127a8cbaf2 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -46,7 +46,8 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
46 return -ENODEV; 46 return -ENODEV;
47 } 47 }
48 if (channel < 0 || channel >= cs->channels) { 48 if (channel < 0 || channel >= cs->channels) {
49 err("%s: invalid channel ID (%d)", __func__, channel); 49 dev_err(cs->dev, "%s: invalid channel ID (%d)\n",
50 __func__, channel);
50 return -ENODEV; 51 return -ENODEV;
51 } 52 }
52 bcs = &cs->bcs[channel]; 53 bcs = &cs->bcs[channel];
@@ -58,11 +59,13 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
58 59
59 if (!len) { 60 if (!len) {
60 if (ack) 61 if (ack)
61 notice("%s: not ACKing empty packet", __func__); 62 dev_notice(cs->dev, "%s: not ACKing empty packet\n",
63 __func__);
62 return 0; 64 return 0;
63 } 65 }
64 if (len > MAX_BUF_SIZE) { 66 if (len > MAX_BUF_SIZE) {
65 err("%s: packet too large (%d bytes)", __func__, len); 67 dev_err(cs->dev, "%s: packet too large (%d bytes)\n",
68 __func__, len);
66 return -EINVAL; 69 return -EINVAL;
67 } 70 }
68 71
@@ -116,8 +119,7 @@ static int command_from_LL(isdn_ctrl *cntrl)
116 gigaset_debugdrivers(); 119 gigaset_debugdrivers();
117 120
118 if (!cs) { 121 if (!cs) {
119 warn("LL tried to access unknown device with nr. %d", 122 err("%s: invalid driver ID (%d)", __func__, cntrl->driver);
120 cntrl->driver);
121 return -ENODEV; 123 return -ENODEV;
122 } 124 }
123 125
@@ -126,7 +128,7 @@ static int command_from_LL(isdn_ctrl *cntrl)
126 gig_dbg(DEBUG_ANY, "ISDN_CMD_IOCTL (driver: %d, arg: %ld)", 128 gig_dbg(DEBUG_ANY, "ISDN_CMD_IOCTL (driver: %d, arg: %ld)",
127 cntrl->driver, cntrl->arg); 129 cntrl->driver, cntrl->arg);
128 130
129 warn("ISDN_CMD_IOCTL is not supported."); 131 dev_warn(cs->dev, "ISDN_CMD_IOCTL not supported\n");
130 return -EINVAL; 132 return -EINVAL;
131 133
132 case ISDN_CMD_DIAL: 134 case ISDN_CMD_DIAL:
@@ -138,22 +140,23 @@ static int command_from_LL(isdn_ctrl *cntrl)
138 cntrl->parm.setup.si1, cntrl->parm.setup.si2); 140 cntrl->parm.setup.si1, cntrl->parm.setup.si2);
139 141
140 if (cntrl->arg >= cs->channels) { 142 if (cntrl->arg >= cs->channels) {
141 err("ISDN_CMD_DIAL: invalid channel (%d)", 143 dev_err(cs->dev,
142 (int) cntrl->arg); 144 "ISDN_CMD_DIAL: invalid channel (%d)\n",
145 (int) cntrl->arg);
143 return -EINVAL; 146 return -EINVAL;
144 } 147 }
145 148
146 bcs = cs->bcs + cntrl->arg; 149 bcs = cs->bcs + cntrl->arg;
147 150
148 if (!gigaset_get_channel(bcs)) { 151 if (!gigaset_get_channel(bcs)) {
149 err("ISDN_CMD_DIAL: channel not free"); 152 dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n");
150 return -EBUSY; 153 return -EBUSY;
151 } 154 }
152 155
153 sp = kmalloc(sizeof *sp, GFP_ATOMIC); 156 sp = kmalloc(sizeof *sp, GFP_ATOMIC);
154 if (!sp) { 157 if (!sp) {
155 gigaset_free_channel(bcs); 158 gigaset_free_channel(bcs);
156 err("ISDN_CMD_DIAL: out of memory"); 159 dev_err(cs->dev, "ISDN_CMD_DIAL: out of memory\n");
157 return -ENOMEM; 160 return -ENOMEM;
158 } 161 }
159 *sp = cntrl->parm.setup; 162 *sp = cntrl->parm.setup;
@@ -173,8 +176,9 @@ static int command_from_LL(isdn_ctrl *cntrl)
173 gig_dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTD"); 176 gig_dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTD");
174 177
175 if (cntrl->arg >= cs->channels) { 178 if (cntrl->arg >= cs->channels) {
176 err("ISDN_CMD_ACCEPTD: invalid channel (%d)", 179 dev_err(cs->dev,
177 (int) cntrl->arg); 180 "ISDN_CMD_ACCEPTD: invalid channel (%d)\n",
181 (int) cntrl->arg);
178 return -EINVAL; 182 return -EINVAL;
179 } 183 }
180 184
@@ -196,8 +200,9 @@ static int command_from_LL(isdn_ctrl *cntrl)
196 (int) cntrl->arg); 200 (int) cntrl->arg);
197 201
198 if (cntrl->arg >= cs->channels) { 202 if (cntrl->arg >= cs->channels) {
199 err("ISDN_CMD_HANGUP: invalid channel (%u)", 203 dev_err(cs->dev,
200 (unsigned) cntrl->arg); 204 "ISDN_CMD_HANGUP: invalid channel (%d)\n",
205 (int) cntrl->arg);
201 return -EINVAL; 206 return -EINVAL;
202 } 207 }
203 208
@@ -224,8 +229,9 @@ static int command_from_LL(isdn_ctrl *cntrl)
224 cntrl->arg & 0xff, (cntrl->arg >> 8)); 229 cntrl->arg & 0xff, (cntrl->arg >> 8));
225 230
226 if ((cntrl->arg & 0xff) >= cs->channels) { 231 if ((cntrl->arg & 0xff) >= cs->channels) {
227 err("ISDN_CMD_SETL2: invalid channel (%u)", 232 dev_err(cs->dev,
228 (unsigned) cntrl->arg & 0xff); 233 "ISDN_CMD_SETL2: invalid channel (%d)\n",
234 (int) cntrl->arg & 0xff);
229 return -EINVAL; 235 return -EINVAL;
230 } 236 }
231 237
@@ -244,14 +250,16 @@ static int command_from_LL(isdn_ctrl *cntrl)
244 cntrl->arg & 0xff, (cntrl->arg >> 8)); 250 cntrl->arg & 0xff, (cntrl->arg >> 8));
245 251
246 if ((cntrl->arg & 0xff) >= cs->channels) { 252 if ((cntrl->arg & 0xff) >= cs->channels) {
247 err("ISDN_CMD_SETL3: invalid channel (%u)", 253 dev_err(cs->dev,
248 (unsigned) cntrl->arg & 0xff); 254 "ISDN_CMD_SETL3: invalid channel (%d)\n",
255 (int) cntrl->arg & 0xff);
249 return -EINVAL; 256 return -EINVAL;
250 } 257 }
251 258
252 if (cntrl->arg >> 8 != ISDN_PROTO_L3_TRANS) { 259 if (cntrl->arg >> 8 != ISDN_PROTO_L3_TRANS) {
253 err("ISDN_CMD_SETL3: invalid protocol %lu", 260 dev_err(cs->dev,
254 cntrl->arg >> 8); 261 "ISDN_CMD_SETL3: invalid protocol %lu\n",
262 cntrl->arg >> 8);
255 return -EINVAL; 263 return -EINVAL;
256 } 264 }
257 265
@@ -262,8 +270,9 @@ static int command_from_LL(isdn_ctrl *cntrl)
262 case ISDN_CMD_ALERT: 270 case ISDN_CMD_ALERT:
263 gig_dbg(DEBUG_ANY, "ISDN_CMD_ALERT"); //FIXME 271 gig_dbg(DEBUG_ANY, "ISDN_CMD_ALERT"); //FIXME
264 if (cntrl->arg >= cs->channels) { 272 if (cntrl->arg >= cs->channels) {
265 err("ISDN_CMD_ALERT: invalid channel (%d)", 273 dev_err(cs->dev,
266 (int) cntrl->arg); 274 "ISDN_CMD_ALERT: invalid channel (%d)\n",
275 (int) cntrl->arg);
267 return -EINVAL; 276 return -EINVAL;
268 } 277 }
269 //bcs = cs->bcs + cntrl->arg; 278 //bcs = cs->bcs + cntrl->arg;
@@ -295,7 +304,8 @@ static int command_from_LL(isdn_ctrl *cntrl)
295 gig_dbg(DEBUG_ANY, "ISDN_CMD_GETSIL"); 304 gig_dbg(DEBUG_ANY, "ISDN_CMD_GETSIL");
296 break; 305 break;
297 default: 306 default:
298 err("unknown command %d from LL", cntrl->command); 307 dev_err(cs->dev, "unknown command %d from LL\n",
308 cntrl->command);
299 return -EINVAL; 309 return -EINVAL;
300 } 310 }
301 311
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index af195b07c191..521951a898ec 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -197,7 +197,7 @@ static void if_close(struct tty_struct *tty, struct file *filp)
197 mutex_lock(&cs->mutex); 197 mutex_lock(&cs->mutex);
198 198
199 if (!cs->open_count) 199 if (!cs->open_count)
200 warn("%s: device not opened", __func__); 200 dev_warn(cs->dev, "%s: device not opened\n", __func__);
201 else { 201 else {
202 if (!--cs->open_count) { 202 if (!--cs->open_count) {
203 spin_lock_irqsave(&cs->lock, flags); 203 spin_lock_irqsave(&cs->lock, flags);
@@ -232,7 +232,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
232 return -ERESTARTSYS; // FIXME -EINTR? 232 return -ERESTARTSYS; // FIXME -EINTR?
233 233
234 if (!cs->open_count) 234 if (!cs->open_count)
235 warn("%s: device not opened", __func__); 235 dev_warn(cs->dev, "%s: device not opened\n", __func__);
236 else { 236 else {
237 retval = 0; 237 retval = 0;
238 switch (cmd) { 238 switch (cmd) {
@@ -364,9 +364,9 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
364 return -ERESTARTSYS; // FIXME -EINTR? 364 return -ERESTARTSYS; // FIXME -EINTR?
365 365
366 if (!cs->open_count) 366 if (!cs->open_count)
367 warn("%s: device not opened", __func__); 367 dev_warn(cs->dev, "%s: device not opened\n", __func__);
368 else if (cs->mstate != MS_LOCKED) { 368 else if (cs->mstate != MS_LOCKED) {
369 warn("can't write to unlocked device"); 369 dev_warn(cs->dev, "can't write to unlocked device\n");
370 retval = -EBUSY; 370 retval = -EBUSY;
371 } else if (!cs->connected) { 371 } else if (!cs->connected) {
372 gig_dbg(DEBUG_ANY, "can't write to unplugged device"); 372 gig_dbg(DEBUG_ANY, "can't write to unplugged device");
@@ -398,9 +398,9 @@ static int if_write_room(struct tty_struct *tty)
398 return -ERESTARTSYS; // FIXME -EINTR? 398 return -ERESTARTSYS; // FIXME -EINTR?
399 399
400 if (!cs->open_count) 400 if (!cs->open_count)
401 warn("%s: device not opened", __func__); 401 dev_warn(cs->dev, "%s: device not opened\n", __func__);
402 else if (cs->mstate != MS_LOCKED) { 402 else if (cs->mstate != MS_LOCKED) {
403 warn("can't write to unlocked device"); 403 dev_warn(cs->dev, "can't write to unlocked device\n");
404 retval = -EBUSY; 404 retval = -EBUSY;
405 } else if (!cs->connected) { 405 } else if (!cs->connected) {
406 gig_dbg(DEBUG_ANY, "can't write to unplugged device"); 406 gig_dbg(DEBUG_ANY, "can't write to unplugged device");
@@ -430,9 +430,9 @@ static int if_chars_in_buffer(struct tty_struct *tty)
430 return -ERESTARTSYS; // FIXME -EINTR? 430 return -ERESTARTSYS; // FIXME -EINTR?
431 431
432 if (!cs->open_count) 432 if (!cs->open_count)
433 warn("%s: device not opened", __func__); 433 dev_warn(cs->dev, "%s: device not opened\n", __func__);
434 else if (cs->mstate != MS_LOCKED) { 434 else if (cs->mstate != MS_LOCKED) {
435 warn("can't write to unlocked device"); 435 dev_warn(cs->dev, "can't write to unlocked device\n");
436 retval = -EBUSY; 436 retval = -EBUSY;
437 } else if (!cs->connected) { 437 } else if (!cs->connected) {
438 gig_dbg(DEBUG_ANY, "can't write to unplugged device"); 438 gig_dbg(DEBUG_ANY, "can't write to unplugged device");
@@ -460,7 +460,7 @@ static void if_throttle(struct tty_struct *tty)
460 mutex_lock(&cs->mutex); 460 mutex_lock(&cs->mutex);
461 461
462 if (!cs->open_count) 462 if (!cs->open_count)
463 warn("%s: device not opened", __func__); 463 dev_warn(cs->dev, "%s: device not opened\n", __func__);
464 else { 464 else {
465 //FIXME 465 //FIXME
466 } 466 }
@@ -483,7 +483,7 @@ static void if_unthrottle(struct tty_struct *tty)
483 mutex_lock(&cs->mutex); 483 mutex_lock(&cs->mutex);
484 484
485 if (!cs->open_count) 485 if (!cs->open_count)
486 warn("%s: device not opened", __func__); 486 dev_warn(cs->dev, "%s: device not opened\n", __func__);
487 else { 487 else {
488 //FIXME 488 //FIXME
489 } 489 }
@@ -510,7 +510,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
510 mutex_lock(&cs->mutex); 510 mutex_lock(&cs->mutex);
511 511
512 if (!cs->open_count) { 512 if (!cs->open_count) {
513 warn("%s: device not opened", __func__); 513 dev_warn(cs->dev, "%s: device not opened\n", __func__);
514 goto out; 514 goto out;
515 } 515 }
516 516
@@ -623,7 +623,8 @@ void gigaset_if_init(struct cardstate *cs)
623 if (!IS_ERR(cs->tty_dev)) 623 if (!IS_ERR(cs->tty_dev))
624 dev_set_drvdata(cs->tty_dev, cs); 624 dev_set_drvdata(cs->tty_dev, cs);
625 else { 625 else {
626 warn("could not register device to the tty subsystem"); 626 dev_warn(cs->dev,
627 "could not register device to the tty subsystem\n");
627 cs->tty_dev = NULL; 628 cs->tty_dev = NULL;
628 } 629 }
629 mutex_unlock(&cs->mutex); 630 mutex_unlock(&cs->mutex);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 77d20ab0cd4d..4661830a49db 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -498,8 +498,9 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
498 498
499 if (status) { 499 if (status) {
500 ucs->busy = 0; 500 ucs->busy = 0;
501 err("could not submit urb (error %d)\n", 501 dev_err(cs->dev,
502 -status); 502 "could not submit urb (error %d)\n",
503 -status);
503 cb->len = 0; /* skip urb => remove cb+wakeup 504 cb->len = 0; /* skip urb => remove cb+wakeup
504 in next loop cycle */ 505 in next loop cycle */
505 } 506 }
@@ -670,7 +671,7 @@ static int write_modem(struct cardstate *cs)
670 spin_unlock_irqrestore(&cs->lock, flags); 671 spin_unlock_irqrestore(&cs->lock, flags);
671 672
672 if (ret) { 673 if (ret) {
673 err("could not submit urb (error %d)\n", -ret); 674 dev_err(cs->dev, "could not submit urb (error %d)\n", -ret);
674 ucs->busy = 0; 675 ucs->busy = 0;
675 } 676 }
676 677
diff --git a/drivers/isdn/hisax/st5481.h b/drivers/isdn/hisax/st5481.h
index 2044e7173ab4..cff7a6354334 100644
--- a/drivers/isdn/hisax/st5481.h
+++ b/drivers/isdn/hisax/st5481.h
@@ -220,7 +220,7 @@ enum {
220#define ERR(format, arg...) \ 220#define ERR(format, arg...) \
221printk(KERN_ERR "%s:%s: " format "\n" , __FILE__, __func__ , ## arg) 221printk(KERN_ERR "%s:%s: " format "\n" , __FILE__, __func__ , ## arg)
222 222
223#define WARN(format, arg...) \ 223#define WARNING(format, arg...) \
224printk(KERN_WARNING "%s:%s: " format "\n" , __FILE__, __func__ , ## arg) 224printk(KERN_WARNING "%s:%s: " format "\n" , __FILE__, __func__ , ## arg)
225 225
226#define INFO(format, arg...) \ 226#define INFO(format, arg...) \
@@ -412,7 +412,7 @@ struct st5481_adapter {
412({ \ 412({ \
413 int status; \ 413 int status; \
414 if ((status = usb_submit_urb(urb, mem_flags)) < 0) { \ 414 if ((status = usb_submit_urb(urb, mem_flags)) < 0) { \
415 WARN("usb_submit_urb failed,status=%d", status); \ 415 WARNING("usb_submit_urb failed,status=%d", status); \
416 } \ 416 } \
417 status; \ 417 status; \
418}) 418})
diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c
index fa64115cd7c7..0074b600a0ef 100644
--- a/drivers/isdn/hisax/st5481_b.c
+++ b/drivers/isdn/hisax/st5481_b.c
@@ -180,7 +180,7 @@ static void usb_b_out_complete(struct urb *urb)
180 DBG(4,"urb killed status %d", urb->status); 180 DBG(4,"urb killed status %d", urb->status);
181 return; // Give up 181 return; // Give up
182 default: 182 default:
183 WARN("urb status %d",urb->status); 183 WARNING("urb status %d",urb->status);
184 if (b_out->busy == 0) { 184 if (b_out->busy == 0) {
185 st5481_usb_pipe_reset(adapter, (bcs->channel+1)*2 | USB_DIR_OUT, NULL, NULL); 185 st5481_usb_pipe_reset(adapter, (bcs->channel+1)*2 | USB_DIR_OUT, NULL, NULL);
186 } 186 }
@@ -372,6 +372,6 @@ void st5481_b_l2l1(struct hisax_if *ifc, int pr, void *arg)
372 B_L1L2(bcs, PH_DEACTIVATE | INDICATION, NULL); 372 B_L1L2(bcs, PH_DEACTIVATE | INDICATION, NULL);
373 break; 373 break;
374 default: 374 default:
375 WARN("pr %#x\n", pr); 375 WARNING("pr %#x\n", pr);
376 } 376 }
377} 377}
diff --git a/drivers/isdn/hisax/st5481_d.c b/drivers/isdn/hisax/st5481_d.c
index b8c4855cc889..077991c1cd05 100644
--- a/drivers/isdn/hisax/st5481_d.c
+++ b/drivers/isdn/hisax/st5481_d.c
@@ -389,7 +389,7 @@ static void usb_d_out_complete(struct urb *urb)
389 DBG(1,"urb killed status %d", urb->status); 389 DBG(1,"urb killed status %d", urb->status);
390 break; 390 break;
391 default: 391 default:
392 WARN("urb status %d",urb->status); 392 WARNING("urb status %d",urb->status);
393 if (d_out->busy == 0) { 393 if (d_out->busy == 0) {
394 st5481_usb_pipe_reset(adapter, EP_D_OUT | USB_DIR_OUT, fifo_reseted, adapter); 394 st5481_usb_pipe_reset(adapter, EP_D_OUT | USB_DIR_OUT, fifo_reseted, adapter);
395 } 395 }
@@ -420,7 +420,7 @@ static void dout_start_xmit(struct FsmInst *fsm, int event, void *arg)
420 isdnhdlc_out_init(&d_out->hdlc_state, 1, 0); 420 isdnhdlc_out_init(&d_out->hdlc_state, 1, 0);
421 421
422 if (test_and_set_bit(buf_nr, &d_out->busy)) { 422 if (test_and_set_bit(buf_nr, &d_out->busy)) {
423 WARN("ep %d urb %d busy %#lx", EP_D_OUT, buf_nr, d_out->busy); 423 WARNING("ep %d urb %d busy %#lx", EP_D_OUT, buf_nr, d_out->busy);
424 return; 424 return;
425 } 425 }
426 urb = d_out->urb[buf_nr]; 426 urb = d_out->urb[buf_nr];
@@ -601,7 +601,7 @@ void st5481_d_l2l1(struct hisax_if *hisax_d_if, int pr, void *arg)
601 FsmEvent(&adapter->d_out.fsm, EV_DOUT_START_XMIT, NULL); 601 FsmEvent(&adapter->d_out.fsm, EV_DOUT_START_XMIT, NULL);
602 break; 602 break;
603 default: 603 default:
604 WARN("pr %#x\n", pr); 604 WARNING("pr %#x\n", pr);
605 break; 605 break;
606 } 606 }
607} 607}
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index 427a8b0520f5..ec3c0e507669 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -66,7 +66,7 @@ static void usb_ctrl_msg(struct st5481_adapter *adapter,
66 struct ctrl_msg *ctrl_msg; 66 struct ctrl_msg *ctrl_msg;
67 67
68 if ((w_index = fifo_add(&ctrl->msg_fifo.f)) < 0) { 68 if ((w_index = fifo_add(&ctrl->msg_fifo.f)) < 0) {
69 WARN("control msg FIFO full"); 69 WARNING("control msg FIFO full");
70 return; 70 return;
71 } 71 }
72 ctrl_msg = &ctrl->msg_fifo.data[w_index]; 72 ctrl_msg = &ctrl->msg_fifo.data[w_index];
@@ -139,7 +139,7 @@ static void usb_ctrl_complete(struct urb *urb)
139 DBG(1,"urb killed status %d", urb->status); 139 DBG(1,"urb killed status %d", urb->status);
140 return; // Give up 140 return; // Give up
141 default: 141 default:
142 WARN("urb status %d",urb->status); 142 WARNING("urb status %d",urb->status);
143 break; 143 break;
144 } 144 }
145 } 145 }
@@ -198,7 +198,7 @@ static void usb_int_complete(struct urb *urb)
198 DBG(2, "urb shutting down with status: %d", urb->status); 198 DBG(2, "urb shutting down with status: %d", urb->status);
199 return; 199 return;
200 default: 200 default:
201 WARN("nonzero urb status received: %d", urb->status); 201 WARNING("nonzero urb status received: %d", urb->status);
202 goto exit; 202 goto exit;
203 } 203 }
204 204
@@ -235,7 +235,7 @@ static void usb_int_complete(struct urb *urb)
235exit: 235exit:
236 status = usb_submit_urb (urb, GFP_ATOMIC); 236 status = usb_submit_urb (urb, GFP_ATOMIC);
237 if (status) 237 if (status)
238 WARN("usb_submit_urb failed with result %d", status); 238 WARNING("usb_submit_urb failed with result %d", status);
239} 239}
240 240
241/* ====================================================================== 241/* ======================================================================
@@ -257,7 +257,7 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
257 DBG(2,""); 257 DBG(2,"");
258 258
259 if ((status = usb_reset_configuration (dev)) < 0) { 259 if ((status = usb_reset_configuration (dev)) < 0) {
260 WARN("reset_configuration failed,status=%d",status); 260 WARNING("reset_configuration failed,status=%d",status);
261 return status; 261 return status;
262 } 262 }
263 263
@@ -269,7 +269,7 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
269 269
270 // Check if the config is sane 270 // Check if the config is sane
271 if ( altsetting->desc.bNumEndpoints != 7 ) { 271 if ( altsetting->desc.bNumEndpoints != 7 ) {
272 WARN("expecting 7 got %d endpoints!", altsetting->desc.bNumEndpoints); 272 WARNING("expecting 7 got %d endpoints!", altsetting->desc.bNumEndpoints);
273 return -EINVAL; 273 return -EINVAL;
274 } 274 }
275 275
@@ -279,7 +279,7 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
279 279
280 // Use alternative setting 3 on interface 0 to have 2B+D 280 // Use alternative setting 3 on interface 0 to have 2B+D
281 if ((status = usb_set_interface (dev, 0, 3)) < 0) { 281 if ((status = usb_set_interface (dev, 0, 3)) < 0) {
282 WARN("usb_set_interface failed,status=%d",status); 282 WARNING("usb_set_interface failed,status=%d",status);
283 return status; 283 return status;
284 } 284 }
285 285
@@ -497,7 +497,7 @@ static void usb_in_complete(struct urb *urb)
497 DBG(1,"urb killed status %d", urb->status); 497 DBG(1,"urb killed status %d", urb->status);
498 return; // Give up 498 return; // Give up
499 default: 499 default:
500 WARN("urb status %d",urb->status); 500 WARNING("urb status %d",urb->status);
501 break; 501 break;
502 } 502 }
503 } 503 }
@@ -523,7 +523,7 @@ static void usb_in_complete(struct urb *urb)
523 DBG(4,"count=%d",status); 523 DBG(4,"count=%d",status);
524 DBG_PACKET(0x400, in->rcvbuf, status); 524 DBG_PACKET(0x400, in->rcvbuf, status);
525 if (!(skb = dev_alloc_skb(status))) { 525 if (!(skb = dev_alloc_skb(status))) {
526 WARN("receive out of memory\n"); 526 WARNING("receive out of memory\n");
527 break; 527 break;
528 } 528 }
529 memcpy(skb_put(skb, status), in->rcvbuf, status); 529 memcpy(skb_put(skb, status), in->rcvbuf, status);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 86a369bc57d6..9556262dda5a 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -103,6 +103,14 @@ config LEDS_HP6XX
103 This option enables led support for the handheld 103 This option enables led support for the handheld
104 HP Jornada 620/660/680/690. 104 HP Jornada 620/660/680/690.
105 105
106config LEDS_PCA9532
107 tristate "LED driver for PCA9532 dimmer"
108 depends on LEDS_CLASS && I2C && INPUT && EXPERIMENTAL
109 help
110 This option enables support for NXP pca9532
111 led controller. It is generally only usefull
112 as a platform driver
113
106config LEDS_GPIO 114config LEDS_GPIO
107 tristate "LED Support for GPIO connected LEDs" 115 tristate "LED Support for GPIO connected LEDs"
108 depends on LEDS_CLASS && GENERIC_GPIO 116 depends on LEDS_CLASS && GENERIC_GPIO
@@ -147,6 +155,14 @@ config LEDS_CLEVO_MAIL
147 To compile this driver as a module, choose M here: the 155 To compile this driver as a module, choose M here: the
148 module will be called leds-clevo-mail. 156 module will be called leds-clevo-mail.
149 157
158config LEDS_PCA955X
159 tristate "LED Support for PCA955x I2C chips"
160 depends on LEDS_CLASS && I2C
161 help
162 This option enables support for LEDs connected to PCA955x
163 LED driver chips accessed via the I2C bus. Supported
164 devices include PCA9550, PCA9551, PCA9552, and PCA9553.
165
150comment "LED Triggers" 166comment "LED Triggers"
151 167
152config LEDS_TRIGGERS 168config LEDS_TRIGGERS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 973d626f5f4a..ff7982b44565 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -16,11 +16,13 @@ obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
16obj-$(CONFIG_LEDS_H1940) += leds-h1940.o 16obj-$(CONFIG_LEDS_H1940) += leds-h1940.o
17obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o 17obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o
18obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o 18obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
19obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
19obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o 20obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
20obj-$(CONFIG_LEDS_CM_X270) += leds-cm-x270.o 21obj-$(CONFIG_LEDS_CM_X270) += leds-cm-x270.o
21obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o 22obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
22obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o 23obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
23obj-$(CONFIG_LEDS_FSG) += leds-fsg.o 24obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
25obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
24 26
25# LED Triggers 27# LED Triggers
26obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o 28obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 0f242b3f09b6..f910eaffe3a6 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -111,16 +111,17 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger)
111 flags); 111 flags);
112 if (led_cdev->trigger->deactivate) 112 if (led_cdev->trigger->deactivate)
113 led_cdev->trigger->deactivate(led_cdev); 113 led_cdev->trigger->deactivate(led_cdev);
114 led_cdev->trigger = NULL;
114 led_set_brightness(led_cdev, LED_OFF); 115 led_set_brightness(led_cdev, LED_OFF);
115 } 116 }
116 if (trigger) { 117 if (trigger) {
117 write_lock_irqsave(&trigger->leddev_list_lock, flags); 118 write_lock_irqsave(&trigger->leddev_list_lock, flags);
118 list_add_tail(&led_cdev->trig_list, &trigger->led_cdevs); 119 list_add_tail(&led_cdev->trig_list, &trigger->led_cdevs);
119 write_unlock_irqrestore(&trigger->leddev_list_lock, flags); 120 write_unlock_irqrestore(&trigger->leddev_list_lock, flags);
121 led_cdev->trigger = trigger;
120 if (trigger->activate) 122 if (trigger->activate)
121 trigger->activate(led_cdev); 123 trigger->activate(led_cdev);
122 } 124 }
123 led_cdev->trigger = trigger;
124} 125}
125EXPORT_SYMBOL_GPL(led_trigger_set); 126EXPORT_SYMBOL_GPL(led_trigger_set);
126 127
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c
index 28db6c1444ed..52297c3ab246 100644
--- a/drivers/leds/leds-atmel-pwm.c
+++ b/drivers/leds/leds-atmel-pwm.c
@@ -37,7 +37,7 @@ static int __init pwmled_probe(struct platform_device *pdev)
37{ 37{
38 const struct gpio_led_platform_data *pdata; 38 const struct gpio_led_platform_data *pdata;
39 struct pwmled *leds; 39 struct pwmled *leds;
40 unsigned i; 40 int i;
41 int status; 41 int status;
42 42
43 pdata = pdev->dev.platform_data; 43 pdata = pdev->dev.platform_data;
diff --git a/drivers/leds/leds-h1940.c b/drivers/leds/leds-h1940.c
index bcec42230389..73c705021686 100644
--- a/drivers/leds/leds-h1940.c
+++ b/drivers/leds/leds-h1940.c
@@ -23,7 +23,8 @@
23/* 23/*
24 * Green led. 24 * Green led.
25 */ 25 */
26void h1940_greenled_set(struct led_classdev *led_dev, enum led_brightness value) 26static void h1940_greenled_set(struct led_classdev *led_dev,
27 enum led_brightness value)
27{ 28{
28 switch (value) { 29 switch (value) {
29 case LED_HALF: 30 case LED_HALF:
@@ -52,7 +53,8 @@ static struct led_classdev h1940_greenled = {
52/* 53/*
53 * Red led. 54 * Red led.
54 */ 55 */
55void h1940_redled_set(struct led_classdev *led_dev, enum led_brightness value) 56static void h1940_redled_set(struct led_classdev *led_dev,
57 enum led_brightness value)
56{ 58{
57 switch (value) { 59 switch (value) {
58 case LED_HALF: 60 case LED_HALF:
@@ -82,7 +84,8 @@ static struct led_classdev h1940_redled = {
82 * Blue led. 84 * Blue led.
83 * (it can only be blue flashing led) 85 * (it can only be blue flashing led)
84 */ 86 */
85void h1940_blueled_set(struct led_classdev *led_dev, enum led_brightness value) 87static void h1940_blueled_set(struct led_classdev *led_dev,
88 enum led_brightness value)
86{ 89{
87 if (value) { 90 if (value) {
88 /* flashing Blue */ 91 /* flashing Blue */
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
new file mode 100644
index 000000000000..4064d4f6b33b
--- /dev/null
+++ b/drivers/leds/leds-pca9532.c
@@ -0,0 +1,337 @@
1/*
2 * pca9532.c - 16-bit Led dimmer
3 *
4 * Copyright (C) 2008 Riku Voipio <riku.voipio@movial.fi>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * Datasheet: http://www.nxp.com/acrobat/datasheets/PCA9532_3.pdf
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/i2c.h>
16#include <linux/leds.h>
17#include <linux/input.h>
18#include <linux/mutex.h>
19#include <linux/leds-pca9532.h>
20
21static const unsigned short normal_i2c[] = { /*0x60,*/ I2C_CLIENT_END};
22I2C_CLIENT_INSMOD_1(pca9532);
23
24#define PCA9532_REG_PSC(i) (0x2+(i)*2)
25#define PCA9532_REG_PWM(i) (0x3+(i)*2)
26#define PCA9532_REG_LS0 0x6
27#define LED_REG(led) ((led>>2)+PCA9532_REG_LS0)
28#define LED_NUM(led) (led & 0x3)
29
30#define ldev_to_led(c) container_of(c, struct pca9532_led, ldev)
31
32struct pca9532_data {
33 struct i2c_client *client;
34 struct pca9532_led leds[16];
35 struct mutex update_lock;
36 struct input_dev *idev;
37 u8 pwm[2];
38 u8 psc[2];
39};
40
41static int pca9532_probe(struct i2c_client *client,
42 const struct i2c_device_id *id);
43static int pca9532_remove(struct i2c_client *client);
44
45static const struct i2c_device_id pca9532_id[] = {
46 { "pca9532", 0 },
47 { }
48};
49
50MODULE_DEVICE_TABLE(i2c, pca9532_id);
51
52static struct i2c_driver pca9532_driver = {
53 .driver = {
54 .name = "pca9532",
55 },
56 .probe = pca9532_probe,
57 .remove = pca9532_remove,
58 .id_table = pca9532_id,
59};
60
61/* We have two pwm/blinkers, but 16 possible leds to drive. Additionaly,
62 * the clever Thecus people are using one pwm to drive the beeper. So,
63 * as a compromise we average one pwm to the values requested by all
64 * leds that are not ON/OFF.
65 * */
66static int pca9532_setpwm(struct i2c_client *client, int pwm, int blink,
67 enum led_brightness value)
68{
69 int a = 0, b = 0, i = 0;
70 struct pca9532_data *data = i2c_get_clientdata(client);
71 for (i = 0; i < 16; i++) {
72 if (data->leds[i].type == PCA9532_TYPE_LED &&
73 data->leds[i].state == PCA9532_PWM0+pwm) {
74 a++;
75 b += data->leds[i].ldev.brightness;
76 }
77 }
78 if (a == 0) {
79 dev_err(&client->dev,
80 "fear of division by zero %d/%d, wanted %d\n",
81 b, a, value);
82 return -EINVAL;
83 }
84 b = b/a;
85 if (b > 0xFF)
86 return -EINVAL;
87 mutex_lock(&data->update_lock);
88 data->pwm[pwm] = b;
89 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm),
90 data->pwm[pwm]);
91 data->psc[pwm] = blink;
92 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm),
93 data->psc[pwm]);
94 mutex_unlock(&data->update_lock);
95 return 0;
96}
97
98/* Set LED routing */
99static void pca9532_setled(struct pca9532_led *led)
100{
101 struct i2c_client *client = led->client;
102 struct pca9532_data *data = i2c_get_clientdata(client);
103 char reg;
104
105 mutex_lock(&data->update_lock);
106 reg = i2c_smbus_read_byte_data(client, LED_REG(led->id));
107 /* zero led bits */
108 reg = reg & ~(0x3<<LED_NUM(led->id)*2);
109 /* set the new value */
110 reg = reg | (led->state << LED_NUM(led->id)*2);
111 i2c_smbus_write_byte_data(client, LED_REG(led->id), reg);
112 mutex_unlock(&data->update_lock);
113}
114
115static void pca9532_set_brightness(struct led_classdev *led_cdev,
116 enum led_brightness value)
117{
118 int err = 0;
119 struct pca9532_led *led = ldev_to_led(led_cdev);
120
121 if (value == LED_OFF)
122 led->state = PCA9532_OFF;
123 else if (value == LED_FULL)
124 led->state = PCA9532_ON;
125 else {
126 led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */
127 err = pca9532_setpwm(led->client, 0, 0, value);
128 if (err)
129 return; /* XXX: led api doesn't allow error code? */
130 }
131 pca9532_setled(led);
132}
133
134static int pca9532_set_blink(struct led_classdev *led_cdev,
135 unsigned long *delay_on, unsigned long *delay_off)
136{
137 struct pca9532_led *led = ldev_to_led(led_cdev);
138 struct i2c_client *client = led->client;
139 int psc;
140
141 if (*delay_on == 0 && *delay_off == 0) {
142 /* led subsystem ask us for a blink rate */
143 *delay_on = 1000;
144 *delay_off = 1000;
145 }
146 if (*delay_on != *delay_off || *delay_on > 1690 || *delay_on < 6)
147 return -EINVAL;
148
149 /* Thecus specific: only use PSC/PWM 0 */
150 psc = (*delay_on * 152-1)/1000;
151 return pca9532_setpwm(client, 0, psc, led_cdev->brightness);
152}
153
154int pca9532_event(struct input_dev *dev, unsigned int type, unsigned int code,
155 int value)
156{
157 struct pca9532_data *data = input_get_drvdata(dev);
158
159 if (type != EV_SND && (code != SND_BELL || code != SND_TONE))
160 return -1;
161
162 /* XXX: allow different kind of beeps with psc/pwm modifications */
163 if (value > 1 && value < 32767)
164 data->pwm[1] = 127;
165 else
166 data->pwm[1] = 0;
167
168 dev_info(&dev->dev, "setting beep to %d \n", data->pwm[1]);
169 mutex_lock(&data->update_lock);
170 i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1),
171 data->pwm[1]);
172 mutex_unlock(&data->update_lock);
173
174 return 0;
175}
176
177static int pca9532_configure(struct i2c_client *client,
178 struct pca9532_data *data, struct pca9532_platform_data *pdata)
179{
180 int i, err = 0;
181
182 for (i = 0; i < 2; i++) {
183 data->pwm[i] = pdata->pwm[i];
184 data->psc[i] = pdata->psc[i];
185 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(i),
186 data->pwm[i]);
187 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(i),
188 data->psc[i]);
189 }
190
191 for (i = 0; i < 16; i++) {
192 struct pca9532_led *led = &data->leds[i];
193 struct pca9532_led *pled = &pdata->leds[i];
194 led->client = client;
195 led->id = i;
196 led->type = pled->type;
197 switch (led->type) {
198 case PCA9532_TYPE_NONE:
199 break;
200 case PCA9532_TYPE_LED:
201 led->state = pled->state;
202 led->name = pled->name;
203 led->ldev.name = led->name;
204 led->ldev.brightness = LED_OFF;
205 led->ldev.brightness_set = pca9532_set_brightness;
206 led->ldev.blink_set = pca9532_set_blink;
207 if (led_classdev_register(&client->dev,
208 &led->ldev) < 0) {
209 dev_err(&client->dev,
210 "couldn't register LED %s\n",
211 led->name);
212 goto exit;
213 }
214 pca9532_setled(led);
215 break;
216 case PCA9532_TYPE_N2100_BEEP:
217 BUG_ON(data->idev);
218 led->state = PCA9532_PWM1;
219 pca9532_setled(led);
220 data->idev = input_allocate_device();
221 if (data->idev == NULL) {
222 err = -ENOMEM;
223 goto exit;
224 }
225 data->idev->name = pled->name;
226 data->idev->phys = "i2c/pca9532";
227 data->idev->id.bustype = BUS_HOST;
228 data->idev->id.vendor = 0x001f;
229 data->idev->id.product = 0x0001;
230 data->idev->id.version = 0x0100;
231 data->idev->evbit[0] = BIT_MASK(EV_SND);
232 data->idev->sndbit[0] = BIT_MASK(SND_BELL) |
233 BIT_MASK(SND_TONE);
234 data->idev->event = pca9532_event;
235 input_set_drvdata(data->idev, data);
236 err = input_register_device(data->idev);
237 if (err) {
238 input_free_device(data->idev);
239 data->idev = NULL;
240 goto exit;
241 }
242 break;
243 }
244 }
245 return 0;
246
247exit:
248 if (i > 0)
249 for (i = i - 1; i >= 0; i--)
250 switch (data->leds[i].type) {
251 case PCA9532_TYPE_NONE:
252 break;
253 case PCA9532_TYPE_LED:
254 led_classdev_unregister(&data->leds[i].ldev);
255 break;
256 case PCA9532_TYPE_N2100_BEEP:
257 if (data->idev != NULL) {
258 input_unregister_device(data->idev);
259 input_free_device(data->idev);
260 data->idev = NULL;
261 }
262 break;
263 }
264
265 return err;
266
267}
268
269static int pca9532_probe(struct i2c_client *client,
270 const struct i2c_device_id *id)
271{
272 struct pca9532_data *data = i2c_get_clientdata(client);
273 struct pca9532_platform_data *pca9532_pdata = client->dev.platform_data;
274
275 if (!i2c_check_functionality(client->adapter,
276 I2C_FUNC_SMBUS_BYTE_DATA))
277 return -EIO;
278
279 data = kzalloc(sizeof(struct pca9532_data), GFP_KERNEL);
280 if (!data)
281 return -ENOMEM;
282
283 dev_info(&client->dev, "setting platform data\n");
284 i2c_set_clientdata(client, data);
285 data->client = client;
286 mutex_init(&data->update_lock);
287
288 if (pca9532_pdata == NULL)
289 return -EIO;
290
291 pca9532_configure(client, data, pca9532_pdata);
292 return 0;
293
294}
295
296static int pca9532_remove(struct i2c_client *client)
297{
298 struct pca9532_data *data = i2c_get_clientdata(client);
299 int i;
300 for (i = 0; i < 16; i++)
301 switch (data->leds[i].type) {
302 case PCA9532_TYPE_NONE:
303 break;
304 case PCA9532_TYPE_LED:
305 led_classdev_unregister(&data->leds[i].ldev);
306 break;
307 case PCA9532_TYPE_N2100_BEEP:
308 if (data->idev != NULL) {
309 input_unregister_device(data->idev);
310 input_free_device(data->idev);
311 data->idev = NULL;
312 }
313 break;
314 }
315
316 kfree(data);
317 i2c_set_clientdata(client, NULL);
318 return 0;
319}
320
321static int __init pca9532_init(void)
322{
323 return i2c_add_driver(&pca9532_driver);
324}
325
326static void __exit pca9532_exit(void)
327{
328 i2c_del_driver(&pca9532_driver);
329}
330
331MODULE_AUTHOR("Riku Voipio <riku.voipio@movial.fi>");
332MODULE_LICENSE("GPL");
333MODULE_DESCRIPTION("PCA 9532 LED dimmer");
334
335module_init(pca9532_init);
336module_exit(pca9532_exit);
337
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
new file mode 100644
index 000000000000..146c06972863
--- /dev/null
+++ b/drivers/leds/leds-pca955x.c
@@ -0,0 +1,384 @@
1/*
2 * Copyright 2007-2008 Extreme Engineering Solutions, Inc.
3 *
4 * Author: Nate Case <ncase@xes-inc.com>
5 *
6 * This file is subject to the terms and conditions of version 2 of
7 * the GNU General Public License. See the file COPYING in the main
8 * directory of this archive for more details.
9 *
10 * LED driver for various PCA955x I2C LED drivers
11 *
12 * Supported devices:
13 *
14 * Device Description 7-bit slave address
15 * ------ ----------- -------------------
16 * PCA9550 2-bit driver 0x60 .. 0x61
17 * PCA9551 8-bit driver 0x60 .. 0x67
18 * PCA9552 16-bit driver 0x60 .. 0x67
19 * PCA9553/01 4-bit driver 0x62
20 * PCA9553/02 4-bit driver 0x63
21 *
22 * Philips PCA955x LED driver chips follow a register map as shown below:
23 *
24 * Control Register Description
25 * ---------------- -----------
26 * 0x0 Input register 0
27 * ..
28 * NUM_INPUT_REGS - 1 Last Input register X
29 *
30 * NUM_INPUT_REGS Frequency prescaler 0
31 * NUM_INPUT_REGS + 1 PWM register 0
32 * NUM_INPUT_REGS + 2 Frequency prescaler 1
33 * NUM_INPUT_REGS + 3 PWM register 1
34 *
35 * NUM_INPUT_REGS + 4 LED selector 0
36 * NUM_INPUT_REGS + 4
37 * + NUM_LED_REGS - 1 Last LED selector
38 *
39 * where NUM_INPUT_REGS and NUM_LED_REGS vary depending on how many
40 * bits the chip supports.
41 */
42
43#include <linux/module.h>
44#include <linux/delay.h>
45#include <linux/string.h>
46#include <linux/ctype.h>
47#include <linux/leds.h>
48#include <linux/err.h>
49#include <linux/i2c.h>
50#include <linux/workqueue.h>
51
52/* LED select registers determine the source that drives LED outputs */
53#define PCA955X_LS_LED_ON 0x0 /* Output LOW */
54#define PCA955X_LS_LED_OFF 0x1 /* Output HI-Z */
55#define PCA955X_LS_BLINK0 0x2 /* Blink at PWM0 rate */
56#define PCA955X_LS_BLINK1 0x3 /* Blink at PWM1 rate */
57
58enum pca955x_type {
59 pca9550,
60 pca9551,
61 pca9552,
62 pca9553,
63};
64
65struct pca955x_chipdef {
66 int bits;
67 u8 slv_addr; /* 7-bit slave address mask */
68 int slv_addr_shift; /* Number of bits to ignore */
69};
70
71static struct pca955x_chipdef pca955x_chipdefs[] = {
72 [pca9550] = {
73 .bits = 2,
74 .slv_addr = /* 110000x */ 0x60,
75 .slv_addr_shift = 1,
76 },
77 [pca9551] = {
78 .bits = 8,
79 .slv_addr = /* 1100xxx */ 0x60,
80 .slv_addr_shift = 3,
81 },
82 [pca9552] = {
83 .bits = 16,
84 .slv_addr = /* 1100xxx */ 0x60,
85 .slv_addr_shift = 3,
86 },
87 [pca9553] = {
88 .bits = 4,
89 .slv_addr = /* 110001x */ 0x62,
90 .slv_addr_shift = 1,
91 },
92};
93
94static const struct i2c_device_id pca955x_id[] = {
95 { "pca9550", pca9550 },
96 { "pca9551", pca9551 },
97 { "pca9552", pca9552 },
98 { "pca9553", pca9553 },
99 { }
100};
101MODULE_DEVICE_TABLE(i2c, pca955x_id);
102
103struct pca955x_led {
104 struct pca955x_chipdef *chipdef;
105 struct i2c_client *client;
106 struct work_struct work;
107 spinlock_t lock;
108 enum led_brightness brightness;
109 struct led_classdev led_cdev;
110 int led_num; /* 0 .. 15 potentially */
111 char name[32];
112};
113
114/* 8 bits per input register */
115static inline int pca95xx_num_input_regs(int bits)
116{
117 return (bits + 7) / 8;
118}
119
120/* 4 bits per LED selector register */
121static inline int pca95xx_num_led_regs(int bits)
122{
123 return (bits + 3) / 4;
124}
125
126/*
127 * Return an LED selector register value based on an existing one, with
128 * the appropriate 2-bit state value set for the given LED number (0-3).
129 */
130static inline u8 pca955x_ledsel(u8 oldval, int led_num, int state)
131{
132 return (oldval & (~(0x3 << (led_num << 1)))) |
133 ((state & 0x3) << (led_num << 1));
134}
135
136/*
137 * Write to frequency prescaler register, used to program the
138 * period of the PWM output. period = (PSCx + 1) / 38
139 */
140static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
141{
142 struct pca955x_led *pca955x = i2c_get_clientdata(client);
143
144 i2c_smbus_write_byte_data(client,
145 pca95xx_num_input_regs(pca955x->chipdef->bits) + 2*n,
146 val);
147}
148
149/*
150 * Write to PWM register, which determines the duty cycle of the
151 * output. LED is OFF when the count is less than the value of this
152 * register, and ON when it is greater. If PWMx == 0, LED is always OFF.
153 *
154 * Duty cycle is (256 - PWMx) / 256
155 */
156static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
157{
158 struct pca955x_led *pca955x = i2c_get_clientdata(client);
159
160 i2c_smbus_write_byte_data(client,
161 pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + 2*n,
162 val);
163}
164
165/*
166 * Write to LED selector register, which determines the source that
167 * drives the LED output.
168 */
169static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
170{
171 struct pca955x_led *pca955x = i2c_get_clientdata(client);
172
173 i2c_smbus_write_byte_data(client,
174 pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n,
175 val);
176}
177
178/*
179 * Read the LED selector register, which determines the source that
180 * drives the LED output.
181 */
182static u8 pca955x_read_ls(struct i2c_client *client, int n)
183{
184 struct pca955x_led *pca955x = i2c_get_clientdata(client);
185
186 return (u8) i2c_smbus_read_byte_data(client,
187 pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n);
188}
189
190static void pca955x_led_work(struct work_struct *work)
191{
192 struct pca955x_led *pca955x;
193 u8 ls;
194 int chip_ls; /* which LSx to use (0-3 potentially) */
195 int ls_led; /* which set of bits within LSx to use (0-3) */
196
197 pca955x = container_of(work, struct pca955x_led, work);
198 chip_ls = pca955x->led_num / 4;
199 ls_led = pca955x->led_num % 4;
200
201 ls = pca955x_read_ls(pca955x->client, chip_ls);
202
203 switch (pca955x->brightness) {
204 case LED_FULL:
205 ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON);
206 break;
207 case LED_OFF:
208 ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_OFF);
209 break;
210 case LED_HALF:
211 ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK0);
212 break;
213 default:
214 /*
215 * Use PWM1 for all other values. This has the unwanted
216 * side effect of making all LEDs on the chip share the
217 * same brightness level if set to a value other than
218 * OFF, HALF, or FULL. But, this is probably better than
219 * just turning off for all other values.
220 */
221 pca955x_write_pwm(pca955x->client, 1, 255-pca955x->brightness);
222 ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1);
223 break;
224 }
225
226 pca955x_write_ls(pca955x->client, chip_ls, ls);
227}
228
229void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value)
230{
231 struct pca955x_led *pca955x;
232
233 pca955x = container_of(led_cdev, struct pca955x_led, led_cdev);
234
235 spin_lock(&pca955x->lock);
236 pca955x->brightness = value;
237
238 /*
239 * Must use workqueue for the actual I/O since I2C operations
240 * can sleep.
241 */
242 schedule_work(&pca955x->work);
243
244 spin_unlock(&pca955x->lock);
245}
246
247static int __devinit pca955x_probe(struct i2c_client *client,
248 const struct i2c_device_id *id)
249{
250 struct pca955x_led *pca955x;
251 int i;
252 int err = -ENODEV;
253 struct pca955x_chipdef *chip;
254 struct i2c_adapter *adapter;
255 struct led_platform_data *pdata;
256
257 chip = &pca955x_chipdefs[id->driver_data];
258 adapter = to_i2c_adapter(client->dev.parent);
259 pdata = client->dev.platform_data;
260
261 /* Make sure the slave address / chip type combo given is possible */
262 if ((client->addr & ~((1 << chip->slv_addr_shift) - 1)) !=
263 chip->slv_addr) {
264 dev_err(&client->dev, "invalid slave address %02x\n",
265 client->addr);
266 return -ENODEV;
267 }
268
269 printk(KERN_INFO "leds-pca955x: Using %s %d-bit LED driver at "
270 "slave address 0x%02x\n",
271 id->name, chip->bits, client->addr);
272
273 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
274 return -EIO;
275
276 if (pdata) {
277 if (pdata->num_leds != chip->bits) {
278 dev_err(&client->dev, "board info claims %d LEDs"
279 " on a %d-bit chip\n",
280 pdata->num_leds, chip->bits);
281 return -ENODEV;
282 }
283 }
284
285 for (i = 0; i < chip->bits; i++) {
286 pca955x = kzalloc(sizeof(struct pca955x_led), GFP_KERNEL);
287 if (!pca955x) {
288 err = -ENOMEM;
289 goto exit;
290 }
291
292 pca955x->chipdef = chip;
293 pca955x->client = client;
294 pca955x->led_num = i;
295 /* Platform data can specify LED names and default triggers */
296 if (pdata) {
297 if (pdata->leds[i].name)
298 snprintf(pca955x->name, 32, "pca955x:%s",
299 pdata->leds[i].name);
300 if (pdata->leds[i].default_trigger)
301 pca955x->led_cdev.default_trigger =
302 pdata->leds[i].default_trigger;
303 } else {
304 snprintf(pca955x->name, 32, "pca955x:%d", i);
305 }
306 spin_lock_init(&pca955x->lock);
307
308 pca955x->led_cdev.name = pca955x->name;
309 pca955x->led_cdev.brightness_set =
310 pca955x_led_set;
311
312 /*
313 * Client data is a pointer to the _first_ pca955x_led
314 * struct
315 */
316 if (i == 0)
317 i2c_set_clientdata(client, pca955x);
318
319 INIT_WORK(&(pca955x->work), pca955x_led_work);
320
321 led_classdev_register(&client->dev, &(pca955x->led_cdev));
322 }
323
324 /* Turn off LEDs */
325 for (i = 0; i < pca95xx_num_led_regs(chip->bits); i++)
326 pca955x_write_ls(client, i, 0x55);
327
328 /* PWM0 is used for half brightness or 50% duty cycle */
329 pca955x_write_pwm(client, 0, 255-LED_HALF);
330
331 /* PWM1 is used for variable brightness, default to OFF */
332 pca955x_write_pwm(client, 1, 0);
333
334 /* Set to fast frequency so we do not see flashing */
335 pca955x_write_psc(client, 0, 0);
336 pca955x_write_psc(client, 1, 0);
337
338 return 0;
339exit:
340 return err;
341}
342
343static int __devexit pca955x_remove(struct i2c_client *client)
344{
345 struct pca955x_led *pca955x = i2c_get_clientdata(client);
346 int leds = pca955x->chipdef->bits;
347 int i;
348
349 for (i = 0; i < leds; i++) {
350 led_classdev_unregister(&(pca955x->led_cdev));
351 cancel_work_sync(&(pca955x->work));
352 kfree(pca955x);
353 pca955x = pca955x + 1;
354 }
355
356 return 0;
357}
358
359static struct i2c_driver pca955x_driver = {
360 .driver = {
361 .name = "leds-pca955x",
362 .owner = THIS_MODULE,
363 },
364 .probe = pca955x_probe,
365 .remove = __devexit_p(pca955x_remove),
366 .id_table = pca955x_id,
367};
368
369static int __init pca955x_leds_init(void)
370{
371 return i2c_add_driver(&pca955x_driver);
372}
373
374static void __exit pca955x_leds_exit(void)
375{
376 i2c_del_driver(&pca955x_driver);
377}
378
379module_init(pca955x_leds_init);
380module_exit(pca955x_leds_exit);
381
382MODULE_AUTHOR("Nate Case <ncase@xes-inc.com>");
383MODULE_DESCRIPTION("PCA955x LED driver");
384MODULE_LICENSE("GPL v2");
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 1a8de57289eb..37344aaee22f 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -98,16 +98,20 @@ static u32 lg_get_features(struct virtio_device *vdev)
98 return features; 98 return features;
99} 99}
100 100
101static void lg_set_features(struct virtio_device *vdev, u32 features) 101static void lg_finalize_features(struct virtio_device *vdev)
102{ 102{
103 unsigned int i; 103 unsigned int i, bits;
104 struct lguest_device_desc *desc = to_lgdev(vdev)->desc; 104 struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
105 /* Second half of bitmap is features we accept. */ 105 /* Second half of bitmap is features we accept. */
106 u8 *out_features = lg_features(desc) + desc->feature_len; 106 u8 *out_features = lg_features(desc) + desc->feature_len;
107 107
108 /* Give virtio_ring a chance to accept features. */
109 vring_transport_features(vdev);
110
108 memset(out_features, 0, desc->feature_len); 111 memset(out_features, 0, desc->feature_len);
109 for (i = 0; i < min(desc->feature_len * 8, 32); i++) { 112 bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
110 if (features & (1 << i)) 113 for (i = 0; i < bits; i++) {
114 if (test_bit(i, vdev->features))
111 out_features[i / 8] |= (1 << (i % 8)); 115 out_features[i / 8] |= (1 << (i % 8));
112 } 116 }
113} 117}
@@ -297,7 +301,7 @@ static void lg_del_vq(struct virtqueue *vq)
297/* The ops structure which hooks everything together. */ 301/* The ops structure which hooks everything together. */
298static struct virtio_config_ops lguest_config_ops = { 302static struct virtio_config_ops lguest_config_ops = {
299 .get_features = lg_get_features, 303 .get_features = lg_get_features,
300 .set_features = lg_set_features, 304 .finalize_features = lg_finalize_features,
301 .get = lg_get, 305 .get = lg_get,
302 .set = lg_set, 306 .set = lg_set,
303 .get_status = lg_get_status, 307 .get_status = lg_get_status,
diff --git a/drivers/media/video/pvrusb2/pvrusb2-dvb.c b/drivers/media/video/pvrusb2/pvrusb2-dvb.c
index 6ec4bf81fc7f..77b3c3385066 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-dvb.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-dvb.c
@@ -20,6 +20,7 @@
20 20
21#include <linux/kthread.h> 21#include <linux/kthread.h>
22#include <linux/freezer.h> 22#include <linux/freezer.h>
23#include <linux/mm.h>
23#include "dvbdev.h" 24#include "dvbdev.h"
24#include "pvrusb2-debug.h" 25#include "pvrusb2-debug.h"
25#include "pvrusb2-hdw-internal.h" 26#include "pvrusb2-hdw-internal.h"
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ioread.c b/drivers/media/video/pvrusb2/pvrusb2-ioread.c
index 05a1376405e7..b4824782d858 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-ioread.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-ioread.c
@@ -22,6 +22,7 @@
22#include "pvrusb2-debug.h" 22#include "pvrusb2-debug.h"
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/mm.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/mutex.h> 27#include <linux/mutex.h>
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 7388d0cee3d4..5646a6a32939 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/version.h> 15#include <linux/version.h>
16#include <linux/mm.h>
16#include <linux/list.h> 17#include <linux/list.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/usb.h> 19#include <linux/usb.h>
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index 0a88c44ace00..b7b05842cf28 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
19#include <linux/mm.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/interrupt.h> 21#include <linux/interrupt.h>
21 22
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index 489d7c5c4965..8774c670e668 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -243,29 +243,41 @@ static int i2o_device_add(struct i2o_controller *c, i2o_lct_entry *entry)
243 243
244 /* create user entries for this device */ 244 /* create user entries for this device */
245 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid); 245 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
246 if (tmp && (tmp != i2o_dev)) 246 if (tmp && (tmp != i2o_dev)) {
247 sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, 247 rc = sysfs_create_link(&i2o_dev->device.kobj,
248 "user"); 248 &tmp->device.kobj, "user");
249 if (rc)
250 goto unreg_dev;
251 }
249 252
250 /* create user entries refering to this device */ 253 /* create user entries refering to this device */
251 list_for_each_entry(tmp, &c->devices, list) 254 list_for_each_entry(tmp, &c->devices, list)
252 if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid) 255 if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
253 && (tmp != i2o_dev)) 256 && (tmp != i2o_dev)) {
254 sysfs_create_link(&tmp->device.kobj, 257 rc = sysfs_create_link(&tmp->device.kobj,
255 &i2o_dev->device.kobj, "user"); 258 &i2o_dev->device.kobj, "user");
259 if (rc)
260 goto rmlink1;
261 }
256 262
257 /* create parent entries for this device */ 263 /* create parent entries for this device */
258 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid); 264 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
259 if (tmp && (tmp != i2o_dev)) 265 if (tmp && (tmp != i2o_dev)) {
260 sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, 266 rc = sysfs_create_link(&i2o_dev->device.kobj,
261 "parent"); 267 &tmp->device.kobj, "parent");
268 if (rc)
269 goto rmlink1;
270 }
262 271
263 /* create parent entries refering to this device */ 272 /* create parent entries refering to this device */
264 list_for_each_entry(tmp, &c->devices, list) 273 list_for_each_entry(tmp, &c->devices, list)
265 if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) 274 if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
266 && (tmp != i2o_dev)) 275 && (tmp != i2o_dev)) {
267 sysfs_create_link(&tmp->device.kobj, 276 rc = sysfs_create_link(&tmp->device.kobj,
268 &i2o_dev->device.kobj, "parent"); 277 &i2o_dev->device.kobj, "parent");
278 if (rc)
279 goto rmlink2;
280 }
269 281
270 i2o_driver_notify_device_add_all(i2o_dev); 282 i2o_driver_notify_device_add_all(i2o_dev);
271 283
@@ -273,6 +285,24 @@ static int i2o_device_add(struct i2o_controller *c, i2o_lct_entry *entry)
273 285
274 return 0; 286 return 0;
275 287
288rmlink2:
289 /* If link creating failed halfway, we loop whole list to cleanup.
290 * And we don't care wrong removing of link, because sysfs_remove_link
291 * will take care of it.
292 */
293 list_for_each_entry(tmp, &c->devices, list) {
294 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
295 sysfs_remove_link(&tmp->device.kobj, "parent");
296 }
297 sysfs_remove_link(&i2o_dev->device.kobj, "parent");
298rmlink1:
299 list_for_each_entry(tmp, &c->devices, list)
300 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
301 sysfs_remove_link(&tmp->device.kobj, "user");
302 sysfs_remove_link(&i2o_dev->device.kobj, "user");
303unreg_dev:
304 list_del(&i2o_dev->list);
305 device_unregister(&i2o_dev->device);
276err: 306err:
277 kfree(i2o_dev); 307 kfree(i2o_dev);
278 return rc; 308 return rc;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 260bade0a5ec..883e7ea31de2 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -5,6 +5,10 @@
5menu "Multifunction device drivers" 5menu "Multifunction device drivers"
6 depends on HAS_IOMEM 6 depends on HAS_IOMEM
7 7
8config MFD_CORE
9 tristate
10 default n
11
8config MFD_SM501 12config MFD_SM501
9 tristate "Support for Silicon Motion SM501" 13 tristate "Support for Silicon Motion SM501"
10 ---help--- 14 ---help---
@@ -15,6 +19,14 @@ config MFD_SM501
15 interface. The device may be connected by PCI or local bus with 19 interface. The device may be connected by PCI or local bus with
16 varying functions enabled. 20 varying functions enabled.
17 21
22config MFD_SM501_GPIO
23 bool "Export GPIO via GPIO layer"
24 depends on MFD_SM501 && HAVE_GPIO_LIB
25 ---help---
26 This option uses the gpio library layer to export the 64 GPIO
27 lines on the SM501. The platform data is used to supply the
28 base number for the first GPIO line to register.
29
18config MFD_ASIC3 30config MFD_ASIC3
19 bool "Support for Compaq ASIC3" 31 bool "Support for Compaq ASIC3"
20 depends on GENERIC_HARDIRQS && HAVE_GPIO_LIB && ARM 32 depends on GENERIC_HARDIRQS && HAVE_GPIO_LIB && ARM
@@ -24,7 +36,7 @@ config MFD_ASIC3
24 36
25config HTC_EGPIO 37config HTC_EGPIO
26 bool "HTC EGPIO support" 38 bool "HTC EGPIO support"
27 depends on GENERIC_HARDIRQS && HAVE_GPIO_LIB && ARM 39 depends on GENERIC_HARDIRQS && GPIOLIB && ARM
28 help 40 help
29 This driver supports the CPLD egpio chip present on 41 This driver supports the CPLD egpio chip present on
30 several HTC phones. It provides basic support for input 42 several HTC phones. It provides basic support for input
@@ -38,6 +50,13 @@ config HTC_PASIC3
38 HTC Magician devices, respectively. Actual functionality is 50 HTC Magician devices, respectively. Actual functionality is
39 handled by the leds-pasic3 and ds1wm drivers. 51 handled by the leds-pasic3 and ds1wm drivers.
40 52
53config MFD_TC6393XB
54 bool "Support Toshiba TC6393XB"
55 depends on GPIOLIB && ARM
56 select MFD_CORE
57 help
58 Support for Toshiba Mobile IO Controller TC6393XB
59
41endmenu 60endmenu
42 61
43menu "Multimedia Capabilities Port drivers" 62menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index eef4e26807df..33daa2f45dd8 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -8,6 +8,10 @@ obj-$(CONFIG_MFD_ASIC3) += asic3.o
8obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o 8obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
9obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o 9obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
10 10
11obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o
12
13obj-$(CONFIG_MFD_CORE) += mfd-core.o
14
11obj-$(CONFIG_MCP) += mcp-core.o 15obj-$(CONFIG_MCP) += mcp-core.o
12obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o 16obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o
13obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o 17obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index 8872cc077519..6be43172dc65 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -318,6 +318,8 @@ static int __init egpio_probe(struct platform_device *pdev)
318 ei->chip[i].dev = &(pdev->dev); 318 ei->chip[i].dev = &(pdev->dev);
319 chip = &(ei->chip[i].chip); 319 chip = &(ei->chip[i].chip);
320 chip->label = "htc-egpio"; 320 chip->label = "htc-egpio";
321 chip->dev = &pdev->dev;
322 chip->owner = THIS_MODULE;
321 chip->get = egpio_get; 323 chip->get = egpio_get;
322 chip->set = egpio_set; 324 chip->set = egpio_set;
323 chip->direction_input = egpio_direction_input; 325 chip->direction_input = egpio_direction_input;
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index 633cbba072f0..91b294dcc133 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -238,6 +238,8 @@ static int pasic3_remove(struct platform_device *pdev)
238 return 0; 238 return 0;
239} 239}
240 240
241MODULE_ALIAS("platform:pasic3");
242
241static struct platform_driver pasic3_driver = { 243static struct platform_driver pasic3_driver = {
242 .driver = { 244 .driver = {
243 .name = "pasic3", 245 .name = "pasic3",
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c
index 1eab7cffceaa..b5272b5ce3fa 100644
--- a/drivers/mfd/mcp-sa11x0.c
+++ b/drivers/mfd/mcp-sa11x0.c
@@ -242,6 +242,8 @@ static int mcp_sa11x0_resume(struct platform_device *dev)
242/* 242/*
243 * The driver for the SA11x0 MCP port. 243 * The driver for the SA11x0 MCP port.
244 */ 244 */
245MODULE_ALIAS("platform:sa11x0-mcp");
246
245static struct platform_driver mcp_sa11x0_driver = { 247static struct platform_driver mcp_sa11x0_driver = {
246 .probe = mcp_sa11x0_probe, 248 .probe = mcp_sa11x0_probe,
247 .remove = mcp_sa11x0_remove, 249 .remove = mcp_sa11x0_remove,
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
new file mode 100644
index 000000000000..0454be4266c1
--- /dev/null
+++ b/drivers/mfd/mfd-core.c
@@ -0,0 +1,114 @@
1/*
2 * drivers/mfd/mfd-core.c
3 *
4 * core MFD support
5 * Copyright (c) 2006 Ian Molton
6 * Copyright (c) 2007,2008 Dmitry Baryshkov
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/platform_device.h>
16#include <linux/mfd/core.h>
17
18static int mfd_add_device(struct platform_device *parent,
19 const struct mfd_cell *cell,
20 struct resource *mem_base,
21 int irq_base)
22{
23 struct resource res[cell->num_resources];
24 struct platform_device *pdev;
25 int ret = -ENOMEM;
26 int r;
27
28 pdev = platform_device_alloc(cell->name, parent->id);
29 if (!pdev)
30 goto fail_alloc;
31
32 pdev->dev.parent = &parent->dev;
33
34 ret = platform_device_add_data(pdev,
35 cell, sizeof(struct mfd_cell));
36 if (ret)
37 goto fail_device;
38
39 memset(res, 0, sizeof(res));
40 for (r = 0; r < cell->num_resources; r++) {
41 res[r].name = cell->resources[r].name;
42 res[r].flags = cell->resources[r].flags;
43
44 /* Find out base to use */
45 if (cell->resources[r].flags & IORESOURCE_MEM) {
46 res[r].parent = mem_base;
47 res[r].start = mem_base->start +
48 cell->resources[r].start;
49 res[r].end = mem_base->start +
50 cell->resources[r].end;
51 } else if (cell->resources[r].flags & IORESOURCE_IRQ) {
52 res[r].start = irq_base +
53 cell->resources[r].start;
54 res[r].end = irq_base +
55 cell->resources[r].end;
56 } else {
57 res[r].parent = cell->resources[r].parent;
58 res[r].start = cell->resources[r].start;
59 res[r].end = cell->resources[r].end;
60 }
61 }
62
63 platform_device_add_resources(pdev, res, cell->num_resources);
64
65 ret = platform_device_add(pdev);
66 if (ret)
67 goto fail_device;
68
69 return 0;
70
71/* platform_device_del(pdev); */
72fail_device:
73 platform_device_put(pdev);
74fail_alloc:
75 return ret;
76}
77
78int mfd_add_devices(
79 struct platform_device *parent,
80 const struct mfd_cell *cells, int n_devs,
81 struct resource *mem_base,
82 int irq_base)
83{
84 int i;
85 int ret = 0;
86
87 for (i = 0; i < n_devs; i++) {
88 ret = mfd_add_device(parent, cells + i, mem_base, irq_base);
89 if (ret)
90 break;
91 }
92
93 if (ret)
94 mfd_remove_devices(parent);
95
96 return ret;
97}
98EXPORT_SYMBOL(mfd_add_devices);
99
100static int mfd_remove_devices_fn(struct device *dev, void *unused)
101{
102 platform_device_unregister(
103 container_of(dev, struct platform_device, dev));
104 return 0;
105}
106
107void mfd_remove_devices(struct platform_device *parent)
108{
109 device_for_each_child(&parent->dev, NULL, mfd_remove_devices_fn);
110}
111EXPORT_SYMBOL(mfd_remove_devices);
112
113MODULE_LICENSE("GPL");
114MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 2fe64734d8af..7aebad4c06ff 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -19,6 +19,7 @@
19#include <linux/device.h> 19#include <linux/device.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/i2c-gpio.h>
22 23
23#include <linux/sm501.h> 24#include <linux/sm501.h>
24#include <linux/sm501-regs.h> 25#include <linux/sm501-regs.h>
@@ -31,10 +32,37 @@ struct sm501_device {
31 struct platform_device pdev; 32 struct platform_device pdev;
32}; 33};
33 34
35struct sm501_gpio;
36
37#ifdef CONFIG_MFD_SM501_GPIO
38#include <linux/gpio.h>
39
40struct sm501_gpio_chip {
41 struct gpio_chip gpio;
42 struct sm501_gpio *ourgpio; /* to get back to parent. */
43 void __iomem *regbase;
44};
45
46struct sm501_gpio {
47 struct sm501_gpio_chip low;
48 struct sm501_gpio_chip high;
49 spinlock_t lock;
50
51 unsigned int registered : 1;
52 void __iomem *regs;
53 struct resource *regs_res;
54};
55#else
56struct sm501_gpio {
57 /* no gpio support, empty definition for sm501_devdata. */
58};
59#endif
60
34struct sm501_devdata { 61struct sm501_devdata {
35 spinlock_t reg_lock; 62 spinlock_t reg_lock;
36 struct mutex clock_lock; 63 struct mutex clock_lock;
37 struct list_head devices; 64 struct list_head devices;
65 struct sm501_gpio gpio;
38 66
39 struct device *dev; 67 struct device *dev;
40 struct resource *io_res; 68 struct resource *io_res;
@@ -42,6 +70,7 @@ struct sm501_devdata {
42 struct resource *regs_claim; 70 struct resource *regs_claim;
43 struct sm501_platdata *platdata; 71 struct sm501_platdata *platdata;
44 72
73
45 unsigned int in_suspend; 74 unsigned int in_suspend;
46 unsigned long pm_misc; 75 unsigned long pm_misc;
47 76
@@ -52,6 +81,7 @@ struct sm501_devdata {
52 unsigned int rev; 81 unsigned int rev;
53}; 82};
54 83
84
55#define MHZ (1000 * 1000) 85#define MHZ (1000 * 1000)
56 86
57#ifdef DEBUG 87#ifdef DEBUG
@@ -276,58 +306,6 @@ unsigned long sm501_modify_reg(struct device *dev,
276 306
277EXPORT_SYMBOL_GPL(sm501_modify_reg); 307EXPORT_SYMBOL_GPL(sm501_modify_reg);
278 308
279unsigned long sm501_gpio_get(struct device *dev,
280 unsigned long gpio)
281{
282 struct sm501_devdata *sm = dev_get_drvdata(dev);
283 unsigned long result;
284 unsigned long reg;
285
286 reg = (gpio > 32) ? SM501_GPIO_DATA_HIGH : SM501_GPIO_DATA_LOW;
287 result = readl(sm->regs + reg);
288
289 result >>= (gpio & 31);
290 return result & 1UL;
291}
292
293EXPORT_SYMBOL_GPL(sm501_gpio_get);
294
295void sm501_gpio_set(struct device *dev,
296 unsigned long gpio,
297 unsigned int to,
298 unsigned int dir)
299{
300 struct sm501_devdata *sm = dev_get_drvdata(dev);
301
302 unsigned long bit = 1 << (gpio & 31);
303 unsigned long base;
304 unsigned long save;
305 unsigned long val;
306
307 base = (gpio > 32) ? SM501_GPIO_DATA_HIGH : SM501_GPIO_DATA_LOW;
308 base += SM501_GPIO;
309
310 spin_lock_irqsave(&sm->reg_lock, save);
311
312 val = readl(sm->regs + base) & ~bit;
313 if (to)
314 val |= bit;
315 writel(val, sm->regs + base);
316
317 val = readl(sm->regs + SM501_GPIO_DDR_LOW) & ~bit;
318 if (dir)
319 val |= bit;
320
321 writel(val, sm->regs + SM501_GPIO_DDR_LOW);
322 sm501_sync_regs(sm);
323
324 spin_unlock_irqrestore(&sm->reg_lock, save);
325
326}
327
328EXPORT_SYMBOL_GPL(sm501_gpio_set);
329
330
331/* sm501_unit_power 309/* sm501_unit_power
332 * 310 *
333 * alters the power active gate to set specific units on or off 311 * alters the power active gate to set specific units on or off
@@ -906,6 +884,313 @@ static int sm501_register_display(struct sm501_devdata *sm,
906 return sm501_register_device(sm, pdev); 884 return sm501_register_device(sm, pdev);
907} 885}
908 886
887#ifdef CONFIG_MFD_SM501_GPIO
888
889static inline struct sm501_gpio_chip *to_sm501_gpio(struct gpio_chip *gc)
890{
891 return container_of(gc, struct sm501_gpio_chip, gpio);
892}
893
894static inline struct sm501_devdata *sm501_gpio_to_dev(struct sm501_gpio *gpio)
895{
896 return container_of(gpio, struct sm501_devdata, gpio);
897}
898
899static int sm501_gpio_get(struct gpio_chip *chip, unsigned offset)
900
901{
902 struct sm501_gpio_chip *smgpio = to_sm501_gpio(chip);
903 unsigned long result;
904
905 result = readl(smgpio->regbase + SM501_GPIO_DATA_LOW);
906 result >>= offset;
907
908 return result & 1UL;
909}
910
911static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
912
913{
914 struct sm501_gpio_chip *smchip = to_sm501_gpio(chip);
915 struct sm501_gpio *smgpio = smchip->ourgpio;
916 unsigned long bit = 1 << offset;
917 void __iomem *regs = smchip->regbase;
918 unsigned long save;
919 unsigned long val;
920
921 dev_dbg(sm501_gpio_to_dev(smgpio)->dev, "%s(%p,%d)\n",
922 __func__, chip, offset);
923
924 spin_lock_irqsave(&smgpio->lock, save);
925
926 val = readl(regs + SM501_GPIO_DATA_LOW) & ~bit;
927 if (value)
928 val |= bit;
929 writel(val, regs);
930
931 sm501_sync_regs(sm501_gpio_to_dev(smgpio));
932 spin_unlock_irqrestore(&smgpio->lock, save);
933}
934
935static int sm501_gpio_input(struct gpio_chip *chip, unsigned offset)
936{
937 struct sm501_gpio_chip *smchip = to_sm501_gpio(chip);
938 struct sm501_gpio *smgpio = smchip->ourgpio;
939 void __iomem *regs = smchip->regbase;
940 unsigned long bit = 1 << offset;
941 unsigned long save;
942 unsigned long ddr;
943
944 dev_info(sm501_gpio_to_dev(smgpio)->dev, "%s(%p,%d)\n",
945 __func__, chip, offset);
946
947 spin_lock_irqsave(&smgpio->lock, save);
948
949 ddr = readl(regs + SM501_GPIO_DDR_LOW);
950 writel(ddr & ~bit, regs + SM501_GPIO_DDR_LOW);
951
952 sm501_sync_regs(sm501_gpio_to_dev(smgpio));
953 spin_unlock_irqrestore(&smgpio->lock, save);
954
955 return 0;
956}
957
958static int sm501_gpio_output(struct gpio_chip *chip,
959 unsigned offset, int value)
960{
961 struct sm501_gpio_chip *smchip = to_sm501_gpio(chip);
962 struct sm501_gpio *smgpio = smchip->ourgpio;
963 unsigned long bit = 1 << offset;
964 void __iomem *regs = smchip->regbase;
965 unsigned long save;
966 unsigned long val;
967 unsigned long ddr;
968
969 dev_dbg(sm501_gpio_to_dev(smgpio)->dev, "%s(%p,%d,%d)\n",
970 __func__, chip, offset, value);
971
972 spin_lock_irqsave(&smgpio->lock, save);
973
974 val = readl(regs + SM501_GPIO_DATA_LOW);
975 if (value)
976 val |= bit;
977 else
978 val &= ~bit;
979 writel(val, regs);
980
981 ddr = readl(regs + SM501_GPIO_DDR_LOW);
982 writel(ddr | bit, regs + SM501_GPIO_DDR_LOW);
983
984 sm501_sync_regs(sm501_gpio_to_dev(smgpio));
985 writel(val, regs + SM501_GPIO_DATA_LOW);
986
987 sm501_sync_regs(sm501_gpio_to_dev(smgpio));
988 spin_unlock_irqrestore(&smgpio->lock, save);
989
990 return 0;
991}
992
993static struct gpio_chip gpio_chip_template = {
994 .ngpio = 32,
995 .direction_input = sm501_gpio_input,
996 .direction_output = sm501_gpio_output,
997 .set = sm501_gpio_set,
998 .get = sm501_gpio_get,
999};
1000
1001static int __devinit sm501_gpio_register_chip(struct sm501_devdata *sm,
1002 struct sm501_gpio *gpio,
1003 struct sm501_gpio_chip *chip)
1004{
1005 struct sm501_platdata *pdata = sm->platdata;
1006 struct gpio_chip *gchip = &chip->gpio;
1007 int base = pdata->gpio_base;
1008
1009 chip->gpio = gpio_chip_template;
1010
1011 if (chip == &gpio->high) {
1012 if (base > 0)
1013 base += 32;
1014 chip->regbase = gpio->regs + SM501_GPIO_DATA_HIGH;
1015 gchip->label = "SM501-HIGH";
1016 } else {
1017 chip->regbase = gpio->regs + SM501_GPIO_DATA_LOW;
1018 gchip->label = "SM501-LOW";
1019 }
1020
1021 gchip->base = base;
1022 chip->ourgpio = gpio;
1023
1024 return gpiochip_add(gchip);
1025}
1026
1027static int sm501_register_gpio(struct sm501_devdata *sm)
1028{
1029 struct sm501_gpio *gpio = &sm->gpio;
1030 resource_size_t iobase = sm->io_res->start + SM501_GPIO;
1031 int ret;
1032 int tmp;
1033
1034 dev_dbg(sm->dev, "registering gpio block %08llx\n",
1035 (unsigned long long)iobase);
1036
1037 spin_lock_init(&gpio->lock);
1038
1039 gpio->regs_res = request_mem_region(iobase, 0x20, "sm501-gpio");
1040 if (gpio->regs_res == NULL) {
1041 dev_err(sm->dev, "gpio: failed to request region\n");
1042 return -ENXIO;
1043 }
1044
1045 gpio->regs = ioremap(iobase, 0x20);
1046 if (gpio->regs == NULL) {
1047 dev_err(sm->dev, "gpio: failed to remap registers\n");
1048 ret = -ENXIO;
1049 goto err_claimed;
1050 }
1051
1052 /* Register both our chips. */
1053
1054 ret = sm501_gpio_register_chip(sm, gpio, &gpio->low);
1055 if (ret) {
1056 dev_err(sm->dev, "failed to add low chip\n");
1057 goto err_mapped;
1058 }
1059
1060 ret = sm501_gpio_register_chip(sm, gpio, &gpio->high);
1061 if (ret) {
1062 dev_err(sm->dev, "failed to add high chip\n");
1063 goto err_low_chip;
1064 }
1065
1066 gpio->registered = 1;
1067
1068 return 0;
1069
1070 err_low_chip:
1071 tmp = gpiochip_remove(&gpio->low.gpio);
1072 if (tmp) {
1073 dev_err(sm->dev, "cannot remove low chip, cannot tidy up\n");
1074 return ret;
1075 }
1076
1077 err_mapped:
1078 iounmap(gpio->regs);
1079
1080 err_claimed:
1081 release_resource(gpio->regs_res);
1082 kfree(gpio->regs_res);
1083
1084 return ret;
1085}
1086
1087static void sm501_gpio_remove(struct sm501_devdata *sm)
1088{
1089 struct sm501_gpio *gpio = &sm->gpio;
1090 int ret;
1091
1092 if (!sm->gpio.registered)
1093 return;
1094
1095 ret = gpiochip_remove(&gpio->low.gpio);
1096 if (ret)
1097 dev_err(sm->dev, "cannot remove low chip, cannot tidy up\n");
1098
1099 ret = gpiochip_remove(&gpio->high.gpio);
1100 if (ret)
1101 dev_err(sm->dev, "cannot remove high chip, cannot tidy up\n");
1102
1103 iounmap(gpio->regs);
1104 release_resource(gpio->regs_res);
1105 kfree(gpio->regs_res);
1106}
1107
1108static inline int sm501_gpio_pin2nr(struct sm501_devdata *sm, unsigned int pin)
1109{
1110 struct sm501_gpio *gpio = &sm->gpio;
1111 int base = (pin < 32) ? gpio->low.gpio.base : gpio->high.gpio.base;
1112
1113 return (pin % 32) + base;
1114}
1115
1116static inline int sm501_gpio_isregistered(struct sm501_devdata *sm)
1117{
1118 return sm->gpio.registered;
1119}
1120#else
1121static inline int sm501_register_gpio(struct sm501_devdata *sm)
1122{
1123 return 0;
1124}
1125
1126static inline void sm501_gpio_remove(struct sm501_devdata *sm)
1127{
1128}
1129
1130static inline int sm501_gpio_pin2nr(struct sm501_devdata *sm, unsigned int pin)
1131{
1132 return -1;
1133}
1134
1135static inline int sm501_gpio_isregistered(struct sm501_devdata *sm)
1136{
1137 return 0;
1138}
1139#endif
1140
1141static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm,
1142 struct sm501_platdata_gpio_i2c *iic)
1143{
1144 struct i2c_gpio_platform_data *icd;
1145 struct platform_device *pdev;
1146
1147 pdev = sm501_create_subdev(sm, "i2c-gpio", 0,
1148 sizeof(struct i2c_gpio_platform_data));
1149 if (!pdev)
1150 return -ENOMEM;
1151
1152 icd = pdev->dev.platform_data;
1153
1154 /* We keep the pin_sda and pin_scl fields relative in case the
1155 * same platform data is passed to >1 SM501.
1156 */
1157
1158 icd->sda_pin = sm501_gpio_pin2nr(sm, iic->pin_sda);
1159 icd->scl_pin = sm501_gpio_pin2nr(sm, iic->pin_scl);
1160 icd->timeout = iic->timeout;
1161 icd->udelay = iic->udelay;
1162
1163 /* note, we can't use either of the pin numbers, as the i2c-gpio
1164 * driver uses the platform.id field to generate the bus number
1165 * to register with the i2c core; The i2c core doesn't have enough
1166 * entries to deal with anything we currently use.
1167 */
1168
1169 pdev->id = iic->bus_num;
1170
1171 dev_info(sm->dev, "registering i2c-%d: sda=%d (%d), scl=%d (%d)\n",
1172 iic->bus_num,
1173 icd->sda_pin, iic->pin_sda, icd->scl_pin, iic->pin_scl);
1174
1175 return sm501_register_device(sm, pdev);
1176}
1177
1178static int sm501_register_gpio_i2c(struct sm501_devdata *sm,
1179 struct sm501_platdata *pdata)
1180{
1181 struct sm501_platdata_gpio_i2c *iic = pdata->gpio_i2c;
1182 int index;
1183 int ret;
1184
1185 for (index = 0; index < pdata->gpio_i2c_nr; index++, iic++) {
1186 ret = sm501_register_gpio_i2c_instance(sm, iic);
1187 if (ret < 0)
1188 return ret;
1189 }
1190
1191 return 0;
1192}
1193
909/* sm501_dbg_regs 1194/* sm501_dbg_regs
910 * 1195 *
911 * Debug attribute to attach to parent device to show core registers 1196 * Debug attribute to attach to parent device to show core registers
@@ -1013,6 +1298,7 @@ static unsigned int sm501_mem_local[] = {
1013static int sm501_init_dev(struct sm501_devdata *sm) 1298static int sm501_init_dev(struct sm501_devdata *sm)
1014{ 1299{
1015 struct sm501_initdata *idata; 1300 struct sm501_initdata *idata;
1301 struct sm501_platdata *pdata;
1016 resource_size_t mem_avail; 1302 resource_size_t mem_avail;
1017 unsigned long dramctrl; 1303 unsigned long dramctrl;
1018 unsigned long devid; 1304 unsigned long devid;
@@ -1051,7 +1337,9 @@ static int sm501_init_dev(struct sm501_devdata *sm)
1051 1337
1052 /* check to see if we have some device initialisation */ 1338 /* check to see if we have some device initialisation */
1053 1339
1054 idata = sm->platdata ? sm->platdata->init : NULL; 1340 pdata = sm->platdata;
1341 idata = pdata ? pdata->init : NULL;
1342
1055 if (idata) { 1343 if (idata) {
1056 sm501_init_regs(sm, idata); 1344 sm501_init_regs(sm, idata);
1057 1345
@@ -1059,6 +1347,15 @@ static int sm501_init_dev(struct sm501_devdata *sm)
1059 sm501_register_usbhost(sm, &mem_avail); 1347 sm501_register_usbhost(sm, &mem_avail);
1060 if (idata->devices & (SM501_USE_UART0 | SM501_USE_UART1)) 1348 if (idata->devices & (SM501_USE_UART0 | SM501_USE_UART1))
1061 sm501_register_uart(sm, idata->devices); 1349 sm501_register_uart(sm, idata->devices);
1350 if (idata->devices & SM501_USE_GPIO)
1351 sm501_register_gpio(sm);
1352 }
1353
1354 if (pdata->gpio_i2c != NULL && pdata->gpio_i2c_nr > 0) {
1355 if (!sm501_gpio_isregistered(sm))
1356 dev_err(sm->dev, "no gpio available for i2c gpio.\n");
1357 else
1358 sm501_register_gpio_i2c(sm, pdata);
1062 } 1359 }
1063 1360
1064 ret = sm501_check_clocks(sm); 1361 ret = sm501_check_clocks(sm);
@@ -1138,8 +1435,31 @@ static int sm501_plat_probe(struct platform_device *dev)
1138} 1435}
1139 1436
1140#ifdef CONFIG_PM 1437#ifdef CONFIG_PM
1438
1141/* power management support */ 1439/* power management support */
1142 1440
1441static void sm501_set_power(struct sm501_devdata *sm, int on)
1442{
1443 struct sm501_platdata *pd = sm->platdata;
1444
1445 if (pd == NULL)
1446 return;
1447
1448 if (pd->get_power) {
1449 if (pd->get_power(sm->dev) == on) {
1450 dev_dbg(sm->dev, "is already %d\n", on);
1451 return;
1452 }
1453 }
1454
1455 if (pd->set_power) {
1456 dev_dbg(sm->dev, "setting power to %d\n", on);
1457
1458 pd->set_power(sm->dev, on);
1459 sm501_mdelay(sm, 10);
1460 }
1461}
1462
1143static int sm501_plat_suspend(struct platform_device *pdev, pm_message_t state) 1463static int sm501_plat_suspend(struct platform_device *pdev, pm_message_t state)
1144{ 1464{
1145 struct sm501_devdata *sm = platform_get_drvdata(pdev); 1465 struct sm501_devdata *sm = platform_get_drvdata(pdev);
@@ -1148,6 +1468,12 @@ static int sm501_plat_suspend(struct platform_device *pdev, pm_message_t state)
1148 sm->pm_misc = readl(sm->regs + SM501_MISC_CONTROL); 1468 sm->pm_misc = readl(sm->regs + SM501_MISC_CONTROL);
1149 1469
1150 sm501_dump_regs(sm); 1470 sm501_dump_regs(sm);
1471
1472 if (sm->platdata) {
1473 if (sm->platdata->flags & SM501_FLAG_SUSPEND_OFF)
1474 sm501_set_power(sm, 0);
1475 }
1476
1151 return 0; 1477 return 0;
1152} 1478}
1153 1479
@@ -1155,6 +1481,8 @@ static int sm501_plat_resume(struct platform_device *pdev)
1155{ 1481{
1156 struct sm501_devdata *sm = platform_get_drvdata(pdev); 1482 struct sm501_devdata *sm = platform_get_drvdata(pdev);
1157 1483
1484 sm501_set_power(sm, 1);
1485
1158 sm501_dump_regs(sm); 1486 sm501_dump_regs(sm);
1159 sm501_dump_gate(sm); 1487 sm501_dump_gate(sm);
1160 sm501_dump_clk(sm); 1488 sm501_dump_clk(sm);
@@ -1229,6 +1557,7 @@ static struct sm501_platdata_fb sm501_fb_pdata = {
1229static struct sm501_platdata sm501_pci_platdata = { 1557static struct sm501_platdata sm501_pci_platdata = {
1230 .init = &sm501_pci_initdata, 1558 .init = &sm501_pci_initdata,
1231 .fb = &sm501_fb_pdata, 1559 .fb = &sm501_fb_pdata,
1560 .gpio_base = -1,
1232}; 1561};
1233 1562
1234static int sm501_pci_probe(struct pci_dev *dev, 1563static int sm501_pci_probe(struct pci_dev *dev,
@@ -1335,6 +1664,8 @@ static void sm501_dev_remove(struct sm501_devdata *sm)
1335 sm501_remove_sub(sm, smdev); 1664 sm501_remove_sub(sm, smdev);
1336 1665
1337 device_remove_file(sm->dev, &dev_attr_dbg_regs); 1666 device_remove_file(sm->dev, &dev_attr_dbg_regs);
1667
1668 sm501_gpio_remove(sm);
1338} 1669}
1339 1670
1340static void sm501_pci_remove(struct pci_dev *dev) 1671static void sm501_pci_remove(struct pci_dev *dev)
@@ -1378,6 +1709,8 @@ static struct pci_driver sm501_pci_drv = {
1378 .remove = sm501_pci_remove, 1709 .remove = sm501_pci_remove,
1379}; 1710};
1380 1711
1712MODULE_ALIAS("platform:sm501");
1713
1381static struct platform_driver sm501_plat_drv = { 1714static struct platform_driver sm501_plat_drv = {
1382 .driver = { 1715 .driver = {
1383 .name = "sm501", 1716 .name = "sm501",
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
new file mode 100644
index 000000000000..2d87501b6fd4
--- /dev/null
+++ b/drivers/mfd/tc6393xb.c
@@ -0,0 +1,600 @@
1/*
2 * Toshiba TC6393XB SoC support
3 *
4 * Copyright(c) 2005-2006 Chris Humbert
5 * Copyright(c) 2005 Dirk Opfer
6 * Copyright(c) 2005 Ian Molton <spyro@f2s.com>
7 * Copyright(c) 2007 Dmitry Baryshkov
8 *
9 * Based on code written by Sharp/Lineo for 2.4 kernels
10 * Based on locomo.c
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/platform_device.h>
22#include <linux/fb.h>
23#include <linux/clk.h>
24#include <linux/mfd/core.h>
25#include <linux/mfd/tmio.h>
26#include <linux/mfd/tc6393xb.h>
27#include <linux/gpio.h>
28
29#define SCR_REVID 0x08 /* b Revision ID */
30#define SCR_ISR 0x50 /* b Interrupt Status */
31#define SCR_IMR 0x52 /* b Interrupt Mask */
32#define SCR_IRR 0x54 /* b Interrupt Routing */
33#define SCR_GPER 0x60 /* w GP Enable */
34#define SCR_GPI_SR(i) (0x64 + (i)) /* b3 GPI Status */
35#define SCR_GPI_IMR(i) (0x68 + (i)) /* b3 GPI INT Mask */
36#define SCR_GPI_EDER(i) (0x6c + (i)) /* b3 GPI Edge Detect Enable */
37#define SCR_GPI_LIR(i) (0x70 + (i)) /* b3 GPI Level Invert */
38#define SCR_GPO_DSR(i) (0x78 + (i)) /* b3 GPO Data Set */
39#define SCR_GPO_DOECR(i) (0x7c + (i)) /* b3 GPO Data OE Control */
40#define SCR_GP_IARCR(i) (0x80 + (i)) /* b3 GP Internal Active Register Control */
41#define SCR_GP_IARLCR(i) (0x84 + (i)) /* b3 GP INTERNAL Active Register Level Control */
42#define SCR_GPI_BCR(i) (0x88 + (i)) /* b3 GPI Buffer Control */
43#define SCR_GPA_IARCR 0x8c /* w GPa Internal Active Register Control */
44#define SCR_GPA_IARLCR 0x90 /* w GPa Internal Active Register Level Control */
45#define SCR_GPA_BCR 0x94 /* w GPa Buffer Control */
46#define SCR_CCR 0x98 /* w Clock Control */
47#define SCR_PLL2CR 0x9a /* w PLL2 Control */
48#define SCR_PLL1CR 0x9c /* l PLL1 Control */
49#define SCR_DIARCR 0xa0 /* b Device Internal Active Register Control */
50#define SCR_DBOCR 0xa1 /* b Device Buffer Off Control */
51#define SCR_FER 0xe0 /* b Function Enable */
52#define SCR_MCR 0xe4 /* w Mode Control */
53#define SCR_CONFIG 0xfc /* b Configuration Control */
54#define SCR_DEBUG 0xff /* b Debug */
55
56#define SCR_CCR_CK32K BIT(0)
57#define SCR_CCR_USBCK BIT(1)
58#define SCR_CCR_UNK1 BIT(4)
59#define SCR_CCR_MCLK_MASK (7 << 8)
60#define SCR_CCR_MCLK_OFF (0 << 8)
61#define SCR_CCR_MCLK_12 (1 << 8)
62#define SCR_CCR_MCLK_24 (2 << 8)
63#define SCR_CCR_MCLK_48 (3 << 8)
64#define SCR_CCR_HCLK_MASK (3 << 12)
65#define SCR_CCR_HCLK_24 (0 << 12)
66#define SCR_CCR_HCLK_48 (1 << 12)
67
68#define SCR_FER_USBEN BIT(0) /* USB host enable */
69#define SCR_FER_LCDCVEN BIT(1) /* polysilicon TFT enable */
70#define SCR_FER_SLCDEN BIT(2) /* SLCD enable */
71
72#define SCR_MCR_RDY_MASK (3 << 0)
73#define SCR_MCR_RDY_OPENDRAIN (0 << 0)
74#define SCR_MCR_RDY_TRISTATE (1 << 0)
75#define SCR_MCR_RDY_PUSHPULL (2 << 0)
76#define SCR_MCR_RDY_UNK BIT(2)
77#define SCR_MCR_RDY_EN BIT(3)
78#define SCR_MCR_INT_MASK (3 << 4)
79#define SCR_MCR_INT_OPENDRAIN (0 << 4)
80#define SCR_MCR_INT_TRISTATE (1 << 4)
81#define SCR_MCR_INT_PUSHPULL (2 << 4)
82#define SCR_MCR_INT_UNK BIT(6)
83#define SCR_MCR_INT_EN BIT(7)
84/* bits 8 - 16 are unknown */
85
86#define TC_GPIO_BIT(i) (1 << (i & 0x7))
87
88/*--------------------------------------------------------------------------*/
89
90struct tc6393xb {
91 void __iomem *scr;
92
93 struct gpio_chip gpio;
94
95 struct clk *clk; /* 3,6 Mhz */
96
97 spinlock_t lock; /* protects RMW cycles */
98
99 struct {
100 u8 fer;
101 u16 ccr;
102 u8 gpi_bcr[3];
103 u8 gpo_dsr[3];
104 u8 gpo_doecr[3];
105 } suspend_state;
106
107 struct resource rscr;
108 struct resource *iomem;
109 int irq;
110 int irq_base;
111};
112
113enum {
114 TC6393XB_CELL_NAND,
115};
116
117/*--------------------------------------------------------------------------*/
118
119static int tc6393xb_nand_enable(struct platform_device *nand)
120{
121 struct platform_device *dev = to_platform_device(nand->dev.parent);
122 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
123 unsigned long flags;
124
125 spin_lock_irqsave(&tc6393xb->lock, flags);
126
127 /* SMD buffer on */
128 dev_dbg(&dev->dev, "SMD buffer on\n");
129 iowrite8(0xff, tc6393xb->scr + SCR_GPI_BCR(1));
130
131 spin_unlock_irqrestore(&tc6393xb->lock, flags);
132
133 return 0;
134}
135
136static struct resource __devinitdata tc6393xb_nand_resources[] = {
137 {
138 .name = TMIO_NAND_CONFIG,
139 .start = 0x0100,
140 .end = 0x01ff,
141 .flags = IORESOURCE_MEM,
142 },
143 {
144 .name = TMIO_NAND_CONTROL,
145 .start = 0x1000,
146 .end = 0x1007,
147 .flags = IORESOURCE_MEM,
148 },
149 {
150 .name = TMIO_NAND_IRQ,
151 .start = IRQ_TC6393_NAND,
152 .end = IRQ_TC6393_NAND,
153 .flags = IORESOURCE_IRQ,
154 },
155};
156
157static struct mfd_cell __devinitdata tc6393xb_cells[] = {
158 [TC6393XB_CELL_NAND] = {
159 .name = "tmio-nand",
160 .enable = tc6393xb_nand_enable,
161 .num_resources = ARRAY_SIZE(tc6393xb_nand_resources),
162 .resources = tc6393xb_nand_resources,
163 },
164};
165
166/*--------------------------------------------------------------------------*/
167
168static int tc6393xb_gpio_get(struct gpio_chip *chip,
169 unsigned offset)
170{
171 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
172
173 /* XXX: does dsr also represent inputs? */
174 return ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8))
175 & TC_GPIO_BIT(offset);
176}
177
178static void __tc6393xb_gpio_set(struct gpio_chip *chip,
179 unsigned offset, int value)
180{
181 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
182 u8 dsr;
183
184 dsr = ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8));
185 if (value)
186 dsr |= TC_GPIO_BIT(offset);
187 else
188 dsr &= ~TC_GPIO_BIT(offset);
189
190 iowrite8(dsr, tc6393xb->scr + SCR_GPO_DSR(offset / 8));
191}
192
193static void tc6393xb_gpio_set(struct gpio_chip *chip,
194 unsigned offset, int value)
195{
196 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
197 unsigned long flags;
198
199 spin_lock_irqsave(&tc6393xb->lock, flags);
200
201 __tc6393xb_gpio_set(chip, offset, value);
202
203 spin_unlock_irqrestore(&tc6393xb->lock, flags);
204}
205
206static int tc6393xb_gpio_direction_input(struct gpio_chip *chip,
207 unsigned offset)
208{
209 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
210 unsigned long flags;
211 u8 doecr;
212
213 spin_lock_irqsave(&tc6393xb->lock, flags);
214
215 doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
216 doecr &= ~TC_GPIO_BIT(offset);
217 iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
218
219 spin_unlock_irqrestore(&tc6393xb->lock, flags);
220
221 return 0;
222}
223
224static int tc6393xb_gpio_direction_output(struct gpio_chip *chip,
225 unsigned offset, int value)
226{
227 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
228 unsigned long flags;
229 u8 doecr;
230
231 spin_lock_irqsave(&tc6393xb->lock, flags);
232
233 __tc6393xb_gpio_set(chip, offset, value);
234
235 doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
236 doecr |= TC_GPIO_BIT(offset);
237 iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
238
239 spin_unlock_irqrestore(&tc6393xb->lock, flags);
240
241 return 0;
242}
243
244static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base)
245{
246 tc6393xb->gpio.label = "tc6393xb";
247 tc6393xb->gpio.base = gpio_base;
248 tc6393xb->gpio.ngpio = 16;
249 tc6393xb->gpio.set = tc6393xb_gpio_set;
250 tc6393xb->gpio.get = tc6393xb_gpio_get;
251 tc6393xb->gpio.direction_input = tc6393xb_gpio_direction_input;
252 tc6393xb->gpio.direction_output = tc6393xb_gpio_direction_output;
253
254 return gpiochip_add(&tc6393xb->gpio);
255}
256
257/*--------------------------------------------------------------------------*/
258
259static void
260tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
261{
262 struct tc6393xb *tc6393xb = get_irq_data(irq);
263 unsigned int isr;
264 unsigned int i, irq_base;
265
266 irq_base = tc6393xb->irq_base;
267
268 while ((isr = ioread8(tc6393xb->scr + SCR_ISR) &
269 ~ioread8(tc6393xb->scr + SCR_IMR)))
270 for (i = 0; i < TC6393XB_NR_IRQS; i++) {
271 if (isr & (1 << i))
272 generic_handle_irq(irq_base + i);
273 }
274}
275
276static void tc6393xb_irq_ack(unsigned int irq)
277{
278}
279
280static void tc6393xb_irq_mask(unsigned int irq)
281{
282 struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
283 unsigned long flags;
284 u8 imr;
285
286 spin_lock_irqsave(&tc6393xb->lock, flags);
287 imr = ioread8(tc6393xb->scr + SCR_IMR);
288 imr |= 1 << (irq - tc6393xb->irq_base);
289 iowrite8(imr, tc6393xb->scr + SCR_IMR);
290 spin_unlock_irqrestore(&tc6393xb->lock, flags);
291}
292
293static void tc6393xb_irq_unmask(unsigned int irq)
294{
295 struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
296 unsigned long flags;
297 u8 imr;
298
299 spin_lock_irqsave(&tc6393xb->lock, flags);
300 imr = ioread8(tc6393xb->scr + SCR_IMR);
301 imr &= ~(1 << (irq - tc6393xb->irq_base));
302 iowrite8(imr, tc6393xb->scr + SCR_IMR);
303 spin_unlock_irqrestore(&tc6393xb->lock, flags);
304}
305
306static struct irq_chip tc6393xb_chip = {
307 .name = "tc6393xb",
308 .ack = tc6393xb_irq_ack,
309 .mask = tc6393xb_irq_mask,
310 .unmask = tc6393xb_irq_unmask,
311};
312
313static void tc6393xb_attach_irq(struct platform_device *dev)
314{
315 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
316 unsigned int irq, irq_base;
317
318 irq_base = tc6393xb->irq_base;
319
320 for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
321 set_irq_chip(irq, &tc6393xb_chip);
322 set_irq_chip_data(irq, tc6393xb);
323 set_irq_handler(irq, handle_edge_irq);
324 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
325 }
326
327 set_irq_type(tc6393xb->irq, IRQT_FALLING);
328 set_irq_data(tc6393xb->irq, tc6393xb);
329 set_irq_chained_handler(tc6393xb->irq, tc6393xb_irq);
330}
331
332static void tc6393xb_detach_irq(struct platform_device *dev)
333{
334 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
335 unsigned int irq, irq_base;
336
337 set_irq_chained_handler(tc6393xb->irq, NULL);
338 set_irq_data(tc6393xb->irq, NULL);
339
340 irq_base = tc6393xb->irq_base;
341
342 for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
343 set_irq_flags(irq, 0);
344 set_irq_chip(irq, NULL);
345 set_irq_chip_data(irq, NULL);
346 }
347}
348
349/*--------------------------------------------------------------------------*/
350
351static int tc6393xb_hw_init(struct platform_device *dev)
352{
353 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
354 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
355 int i;
356
357 iowrite8(tc6393xb->suspend_state.fer, tc6393xb->scr + SCR_FER);
358 iowrite16(tcpd->scr_pll2cr, tc6393xb->scr + SCR_PLL2CR);
359 iowrite16(tc6393xb->suspend_state.ccr, tc6393xb->scr + SCR_CCR);
360 iowrite16(SCR_MCR_RDY_OPENDRAIN | SCR_MCR_RDY_UNK | SCR_MCR_RDY_EN |
361 SCR_MCR_INT_OPENDRAIN | SCR_MCR_INT_UNK | SCR_MCR_INT_EN |
362 BIT(15), tc6393xb->scr + SCR_MCR);
363 iowrite16(tcpd->scr_gper, tc6393xb->scr + SCR_GPER);
364 iowrite8(0, tc6393xb->scr + SCR_IRR);
365 iowrite8(0xbf, tc6393xb->scr + SCR_IMR);
366
367 for (i = 0; i < 3; i++) {
368 iowrite8(tc6393xb->suspend_state.gpo_dsr[i],
369 tc6393xb->scr + SCR_GPO_DSR(i));
370 iowrite8(tc6393xb->suspend_state.gpo_doecr[i],
371 tc6393xb->scr + SCR_GPO_DOECR(i));
372 iowrite8(tc6393xb->suspend_state.gpi_bcr[i],
373 tc6393xb->scr + SCR_GPI_BCR(i));
374 }
375
376 return 0;
377}
378
379static int __devinit tc6393xb_probe(struct platform_device *dev)
380{
381 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
382 struct tc6393xb *tc6393xb;
383 struct resource *iomem;
384 struct resource *rscr;
385 int retval, temp;
386 int i;
387
388 iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
389 if (!iomem)
390 return -EINVAL;
391
392 tc6393xb = kzalloc(sizeof *tc6393xb, GFP_KERNEL);
393 if (!tc6393xb) {
394 retval = -ENOMEM;
395 goto err_kzalloc;
396 }
397
398 spin_lock_init(&tc6393xb->lock);
399
400 platform_set_drvdata(dev, tc6393xb);
401 tc6393xb->iomem = iomem;
402 tc6393xb->irq = platform_get_irq(dev, 0);
403 tc6393xb->irq_base = tcpd->irq_base;
404
405 tc6393xb->clk = clk_get(&dev->dev, "GPIO27_CLK" /* "CK3P6MI" */);
406 if (IS_ERR(tc6393xb->clk)) {
407 retval = PTR_ERR(tc6393xb->clk);
408 goto err_clk_get;
409 }
410
411 rscr = &tc6393xb->rscr;
412 rscr->name = "tc6393xb-core";
413 rscr->start = iomem->start;
414 rscr->end = iomem->start + 0xff;
415 rscr->flags = IORESOURCE_MEM;
416
417 retval = request_resource(iomem, rscr);
418 if (retval)
419 goto err_request_scr;
420
421 tc6393xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1);
422 if (!tc6393xb->scr) {
423 retval = -ENOMEM;
424 goto err_ioremap;
425 }
426
427 retval = clk_enable(tc6393xb->clk);
428 if (retval)
429 goto err_clk_enable;
430
431 retval = tcpd->enable(dev);
432 if (retval)
433 goto err_enable;
434
435 tc6393xb->suspend_state.fer = 0;
436 for (i = 0; i < 3; i++) {
437 tc6393xb->suspend_state.gpo_dsr[i] =
438 (tcpd->scr_gpo_dsr >> (8 * i)) & 0xff;
439 tc6393xb->suspend_state.gpo_doecr[i] =
440 (tcpd->scr_gpo_doecr >> (8 * i)) & 0xff;
441 }
442 /*
443 * It may be necessary to change this back to
444 * platform-dependant code
445 */
446 tc6393xb->suspend_state.ccr = SCR_CCR_UNK1 |
447 SCR_CCR_HCLK_48;
448
449 retval = tc6393xb_hw_init(dev);
450 if (retval)
451 goto err_hw_init;
452
453 printk(KERN_INFO "Toshiba tc6393xb revision %d at 0x%08lx, irq %d\n",
454 ioread8(tc6393xb->scr + SCR_REVID),
455 (unsigned long) iomem->start, tc6393xb->irq);
456
457 tc6393xb->gpio.base = -1;
458
459 if (tcpd->gpio_base >= 0) {
460 retval = tc6393xb_register_gpio(tc6393xb, tcpd->gpio_base);
461 if (retval)
462 goto err_gpio_add;
463 }
464
465 if (tc6393xb->irq)
466 tc6393xb_attach_irq(dev);
467
468 tc6393xb_cells[TC6393XB_CELL_NAND].driver_data = tcpd->nand_data;
469
470 retval = mfd_add_devices(dev,
471 tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
472 iomem, tcpd->irq_base);
473
474 return 0;
475
476 if (tc6393xb->irq)
477 tc6393xb_detach_irq(dev);
478
479err_gpio_add:
480 if (tc6393xb->gpio.base != -1)
481 temp = gpiochip_remove(&tc6393xb->gpio);
482err_hw_init:
483 tcpd->disable(dev);
484err_clk_enable:
485 clk_disable(tc6393xb->clk);
486err_enable:
487 iounmap(tc6393xb->scr);
488err_ioremap:
489 release_resource(&tc6393xb->rscr);
490err_request_scr:
491 clk_put(tc6393xb->clk);
492err_clk_get:
493 kfree(tc6393xb);
494err_kzalloc:
495 return retval;
496}
497
498static int __devexit tc6393xb_remove(struct platform_device *dev)
499{
500 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
501 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
502 int ret;
503
504 mfd_remove_devices(dev);
505
506 if (tc6393xb->irq)
507 tc6393xb_detach_irq(dev);
508
509 if (tc6393xb->gpio.base != -1) {
510 ret = gpiochip_remove(&tc6393xb->gpio);
511 if (ret) {
512 dev_err(&dev->dev, "Can't remove gpio chip: %d\n", ret);
513 return ret;
514 }
515 }
516
517 ret = tcpd->disable(dev);
518
519 clk_disable(tc6393xb->clk);
520
521 iounmap(tc6393xb->scr);
522
523 release_resource(&tc6393xb->rscr);
524
525 platform_set_drvdata(dev, NULL);
526
527 clk_put(tc6393xb->clk);
528
529 kfree(tc6393xb);
530
531 return ret;
532}
533
534#ifdef CONFIG_PM
535static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state)
536{
537 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
538 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
539 int i;
540
541
542 tc6393xb->suspend_state.ccr = ioread16(tc6393xb->scr + SCR_CCR);
543 tc6393xb->suspend_state.fer = ioread8(tc6393xb->scr + SCR_FER);
544
545 for (i = 0; i < 3; i++) {
546 tc6393xb->suspend_state.gpo_dsr[i] =
547 ioread8(tc6393xb->scr + SCR_GPO_DSR(i));
548 tc6393xb->suspend_state.gpo_doecr[i] =
549 ioread8(tc6393xb->scr + SCR_GPO_DOECR(i));
550 tc6393xb->suspend_state.gpi_bcr[i] =
551 ioread8(tc6393xb->scr + SCR_GPI_BCR(i));
552 }
553
554 return tcpd->suspend(dev);
555}
556
557static int tc6393xb_resume(struct platform_device *dev)
558{
559 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
560 int ret = tcpd->resume(dev);
561
562 if (ret)
563 return ret;
564
565 return tc6393xb_hw_init(dev);
566}
567#else
568#define tc6393xb_suspend NULL
569#define tc6393xb_resume NULL
570#endif
571
572static struct platform_driver tc6393xb_driver = {
573 .probe = tc6393xb_probe,
574 .remove = __devexit_p(tc6393xb_remove),
575 .suspend = tc6393xb_suspend,
576 .resume = tc6393xb_resume,
577
578 .driver = {
579 .name = "tc6393xb",
580 .owner = THIS_MODULE,
581 },
582};
583
584static int __init tc6393xb_init(void)
585{
586 return platform_driver_register(&tc6393xb_driver);
587}
588
589static void __exit tc6393xb_exit(void)
590{
591 platform_driver_unregister(&tc6393xb_driver);
592}
593
594subsys_initcall(tc6393xb_init);
595module_exit(tc6393xb_exit);
596
597MODULE_LICENSE("GPL");
598MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer");
599MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller");
600MODULE_ALIAS("platform:tc6393xb");
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index ce67d973d349..321eb9134635 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -77,11 +77,13 @@ config IBM_ASM
77 for your IBM server. 77 for your IBM server.
78 78
79config PHANTOM 79config PHANTOM
80 tristate "Sensable PHANToM" 80 tristate "Sensable PHANToM (PCI)"
81 depends on PCI 81 depends on PCI
82 help 82 help
83 Say Y here if you want to build a driver for Sensable PHANToM device. 83 Say Y here if you want to build a driver for Sensable PHANToM device.
84 84
85 This driver is only for PCI PHANToMs.
86
85 If you choose to build module, its name will be phantom. If unsure, 87 If you choose to build module, its name will be phantom. If unsure,
86 say N here. 88 say N here.
87 89
@@ -212,6 +214,18 @@ config TC1100_WMI
212 This is a driver for the WMI extensions (wireless and bluetooth power 214 This is a driver for the WMI extensions (wireless and bluetooth power
213 control) of the HP Compaq TC1100 tablet. 215 control) of the HP Compaq TC1100 tablet.
214 216
217config HP_WMI
218 tristate "HP WMI extras"
219 depends on ACPI_WMI
220 depends on INPUT
221 depends on RFKILL
222 help
223 Say Y here if you want to support WMI-based hotkeys on HP laptops and
224 to read data from WMI such as docking or ambient light sensor state.
225
226 To compile this driver as a module, choose M here: the module will
227 be called hp-wmi.
228
215config MSI_LAPTOP 229config MSI_LAPTOP
216 tristate "MSI Laptop Extras" 230 tristate "MSI Laptop Extras"
217 depends on X86 231 depends on X86
@@ -279,6 +293,8 @@ config THINKPAD_ACPI
279 select INPUT 293 select INPUT
280 select NEW_LEDS 294 select NEW_LEDS
281 select LEDS_CLASS 295 select LEDS_CLASS
296 select NET
297 select RFKILL
282 ---help--- 298 ---help---
283 This is a driver for the IBM and Lenovo ThinkPad laptops. It adds 299 This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
284 support for Fn-Fx key combinations, Bluetooth control, video 300 support for Fn-Fx key combinations, Bluetooth control, video
@@ -422,6 +438,7 @@ config SGI_XP
422 438
423config HP_ILO 439config HP_ILO
424 tristate "Channel interface driver for HP iLO/iLO2 processor" 440 tristate "Channel interface driver for HP iLO/iLO2 processor"
441 depends on PCI
425 default n 442 default n
426 help 443 help
427 The channel interface driver allows applications to communicate 444 The channel interface driver allows applications to communicate
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 688fe76135e0..f5e273420c09 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_ACER_WMI) += acer-wmi.o
13obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o 13obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
14obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o 14obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
15obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o 15obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
16obj-$(CONFIG_HP_WMI) += hp-wmi.o
16obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o 17obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
17obj-$(CONFIG_LKDTM) += lkdtm.o 18obj-$(CONFIG_LKDTM) += lkdtm.o
18obj-$(CONFIG_TIFM_CORE) += tifm_core.o 19obj-$(CONFIG_TIFM_CORE) += tifm_core.o
diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c
index 5b5a14dab3d3..6aa5294dfec4 100644
--- a/drivers/misc/atmel_pwm.c
+++ b/drivers/misc/atmel_pwm.c
@@ -211,8 +211,7 @@ int pwm_clk_alloc(unsigned prescale, unsigned div)
211 if ((mr & 0xffff) == 0) { 211 if ((mr & 0xffff) == 0) {
212 mr |= val; 212 mr |= val;
213 ret = PWM_CPR_CLKA; 213 ret = PWM_CPR_CLKA;
214 } 214 } else if ((mr & (0xffff << 16)) == 0) {
215 if ((mr & (0xffff << 16)) == 0) {
216 mr |= val << 16; 215 mr |= val << 16;
217 ret = PWM_CPR_CLKB; 216 ret = PWM_CPR_CLKB;
218 } 217 }
diff --git a/drivers/misc/hp-wmi.c b/drivers/misc/hp-wmi.c
new file mode 100644
index 000000000000..1dbcbcb323a2
--- /dev/null
+++ b/drivers/misc/hp-wmi.c
@@ -0,0 +1,494 @@
1/*
2 * HP WMI hotkeys
3 *
4 * Copyright (C) 2008 Red Hat <mjg@redhat.com>
5 *
6 * Portions based on wistron_btns.c:
7 * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
8 * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
9 * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/types.h>
30#include <linux/input.h>
31#include <acpi/acpi_drivers.h>
32#include <linux/platform_device.h>
33#include <linux/acpi.h>
34#include <linux/rfkill.h>
35#include <linux/string.h>
36
37MODULE_AUTHOR("Matthew Garrett <mjg59@srcf.ucam.org>");
38MODULE_DESCRIPTION("HP laptop WMI hotkeys driver");
39MODULE_LICENSE("GPL");
40
41MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
42MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
43
44#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
45#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
46
47#define HPWMI_DISPLAY_QUERY 0x1
48#define HPWMI_HDDTEMP_QUERY 0x2
49#define HPWMI_ALS_QUERY 0x3
50#define HPWMI_DOCK_QUERY 0x4
51#define HPWMI_WIRELESS_QUERY 0x5
52
53static int __init hp_wmi_bios_setup(struct platform_device *device);
54static int __exit hp_wmi_bios_remove(struct platform_device *device);
55
56struct bios_args {
57 u32 signature;
58 u32 command;
59 u32 commandtype;
60 u32 datasize;
61 u32 data;
62};
63
64struct bios_return {
65 u32 sigpass;
66 u32 return_code;
67 u32 value;
68};
69
70struct key_entry {
71 char type; /* See KE_* below */
72 u8 code;
73 u16 keycode;
74};
75
76enum { KE_KEY, KE_SW, KE_END };
77
78static struct key_entry hp_wmi_keymap[] = {
79 {KE_SW, 0x01, SW_DOCK},
80 {KE_KEY, 0x02, KEY_BRIGHTNESSUP},
81 {KE_KEY, 0x03, KEY_BRIGHTNESSDOWN},
82 {KE_KEY, 0x04, KEY_HELP},
83 {KE_END, 0}
84};
85
86static struct input_dev *hp_wmi_input_dev;
87static struct platform_device *hp_wmi_platform_dev;
88
89static struct rfkill *wifi_rfkill;
90static struct rfkill *bluetooth_rfkill;
91static struct rfkill *wwan_rfkill;
92
93static struct platform_driver hp_wmi_driver = {
94 .driver = {
95 .name = "hp-wmi",
96 .owner = THIS_MODULE,
97 },
98 .probe = hp_wmi_bios_setup,
99 .remove = hp_wmi_bios_remove,
100};
101
102static int hp_wmi_perform_query(int query, int write, int value)
103{
104 struct bios_return bios_return;
105 acpi_status status;
106 union acpi_object *obj;
107 struct bios_args args = {
108 .signature = 0x55434553,
109 .command = write ? 0x2 : 0x1,
110 .commandtype = query,
111 .datasize = write ? 0x4 : 0,
112 .data = value,
113 };
114 struct acpi_buffer input = { sizeof(struct bios_args), &args };
115 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
116
117 status = wmi_evaluate_method(HPWMI_BIOS_GUID, 0, 0x3, &input, &output);
118
119 obj = output.pointer;
120
121 if (!obj || obj->type != ACPI_TYPE_BUFFER)
122 return -EINVAL;
123
124 bios_return = *((struct bios_return *)obj->buffer.pointer);
125 if (bios_return.return_code > 0)
126 return bios_return.return_code * -1;
127 else
128 return bios_return.value;
129}
130
131static int hp_wmi_display_state(void)
132{
133 return hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, 0);
134}
135
136static int hp_wmi_hddtemp_state(void)
137{
138 return hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, 0);
139}
140
141static int hp_wmi_als_state(void)
142{
143 return hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, 0);
144}
145
146static int hp_wmi_dock_state(void)
147{
148 return hp_wmi_perform_query(HPWMI_DOCK_QUERY, 0, 0);
149}
150
151static int hp_wmi_wifi_set(void *data, enum rfkill_state state)
152{
153 if (state)
154 return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x101);
155 else
156 return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x100);
157}
158
159static int hp_wmi_bluetooth_set(void *data, enum rfkill_state state)
160{
161 if (state)
162 return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x202);
163 else
164 return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x200);
165}
166
167static int hp_wmi_wwan_set(void *data, enum rfkill_state state)
168{
169 if (state)
170 return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x404);
171 else
172 return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x400);
173}
174
175static int hp_wmi_wifi_state(void)
176{
177 int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
178
179 if (wireless & 0x100)
180 return 1;
181 else
182 return 0;
183}
184
185static int hp_wmi_bluetooth_state(void)
186{
187 int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
188
189 if (wireless & 0x10000)
190 return 1;
191 else
192 return 0;
193}
194
195static int hp_wmi_wwan_state(void)
196{
197 int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
198
199 if (wireless & 0x1000000)
200 return 1;
201 else
202 return 0;
203}
204
205static ssize_t show_display(struct device *dev, struct device_attribute *attr,
206 char *buf)
207{
208 int value = hp_wmi_display_state();
209 if (value < 0)
210 return -EINVAL;
211 return sprintf(buf, "%d\n", value);
212}
213
214static ssize_t show_hddtemp(struct device *dev, struct device_attribute *attr,
215 char *buf)
216{
217 int value = hp_wmi_hddtemp_state();
218 if (value < 0)
219 return -EINVAL;
220 return sprintf(buf, "%d\n", value);
221}
222
223static ssize_t show_als(struct device *dev, struct device_attribute *attr,
224 char *buf)
225{
226 int value = hp_wmi_als_state();
227 if (value < 0)
228 return -EINVAL;
229 return sprintf(buf, "%d\n", value);
230}
231
232static ssize_t show_dock(struct device *dev, struct device_attribute *attr,
233 char *buf)
234{
235 int value = hp_wmi_dock_state();
236 if (value < 0)
237 return -EINVAL;
238 return sprintf(buf, "%d\n", value);
239}
240
241static ssize_t set_als(struct device *dev, struct device_attribute *attr,
242 const char *buf, size_t count)
243{
244 u32 tmp = simple_strtoul(buf, NULL, 10);
245 hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, tmp);
246 return count;
247}
248
249static DEVICE_ATTR(display, S_IRUGO, show_display, NULL);
250static DEVICE_ATTR(hddtemp, S_IRUGO, show_hddtemp, NULL);
251static DEVICE_ATTR(als, S_IRUGO | S_IWUSR, show_als, set_als);
252static DEVICE_ATTR(dock, S_IRUGO, show_dock, NULL);
253
254static struct key_entry *hp_wmi_get_entry_by_scancode(int code)
255{
256 struct key_entry *key;
257
258 for (key = hp_wmi_keymap; key->type != KE_END; key++)
259 if (code == key->code)
260 return key;
261
262 return NULL;
263}
264
265static struct key_entry *hp_wmi_get_entry_by_keycode(int keycode)
266{
267 struct key_entry *key;
268
269 for (key = hp_wmi_keymap; key->type != KE_END; key++)
270 if (key->type == KE_KEY && keycode == key->keycode)
271 return key;
272
273 return NULL;
274}
275
276static int hp_wmi_getkeycode(struct input_dev *dev, int scancode, int *keycode)
277{
278 struct key_entry *key = hp_wmi_get_entry_by_scancode(scancode);
279
280 if (key && key->type == KE_KEY) {
281 *keycode = key->keycode;
282 return 0;
283 }
284
285 return -EINVAL;
286}
287
288static int hp_wmi_setkeycode(struct input_dev *dev, int scancode, int keycode)
289{
290 struct key_entry *key;
291 int old_keycode;
292
293 if (keycode < 0 || keycode > KEY_MAX)
294 return -EINVAL;
295
296 key = hp_wmi_get_entry_by_scancode(scancode);
297 if (key && key->type == KE_KEY) {
298 old_keycode = key->keycode;
299 key->keycode = keycode;
300 set_bit(keycode, dev->keybit);
301 if (!hp_wmi_get_entry_by_keycode(old_keycode))
302 clear_bit(old_keycode, dev->keybit);
303 return 0;
304 }
305
306 return -EINVAL;
307}
308
309void hp_wmi_notify(u32 value, void *context)
310{
311 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
312 static struct key_entry *key;
313 union acpi_object *obj;
314
315 wmi_get_event_data(value, &response);
316
317 obj = (union acpi_object *)response.pointer;
318
319 if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == 8) {
320 int eventcode = *((u8 *) obj->buffer.pointer);
321 key = hp_wmi_get_entry_by_scancode(eventcode);
322 if (key) {
323 switch (key->type) {
324 case KE_KEY:
325 input_report_key(hp_wmi_input_dev,
326 key->keycode, 1);
327 input_sync(hp_wmi_input_dev);
328 input_report_key(hp_wmi_input_dev,
329 key->keycode, 0);
330 input_sync(hp_wmi_input_dev);
331 break;
332 case KE_SW:
333 input_report_switch(hp_wmi_input_dev,
334 key->keycode,
335 hp_wmi_dock_state());
336 input_sync(hp_wmi_input_dev);
337 break;
338 }
339 } else if (eventcode == 0x5) {
340 if (wifi_rfkill)
341 wifi_rfkill->state = hp_wmi_wifi_state();
342 if (bluetooth_rfkill)
343 bluetooth_rfkill->state =
344 hp_wmi_bluetooth_state();
345 if (wwan_rfkill)
346 wwan_rfkill->state = hp_wmi_wwan_state();
347 } else
348 printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n",
349 eventcode);
350 } else
351 printk(KERN_INFO "HP WMI: Unknown response received\n");
352}
353
354static int __init hp_wmi_input_setup(void)
355{
356 struct key_entry *key;
357 int err;
358
359 hp_wmi_input_dev = input_allocate_device();
360
361 hp_wmi_input_dev->name = "HP WMI hotkeys";
362 hp_wmi_input_dev->phys = "wmi/input0";
363 hp_wmi_input_dev->id.bustype = BUS_HOST;
364 hp_wmi_input_dev->getkeycode = hp_wmi_getkeycode;
365 hp_wmi_input_dev->setkeycode = hp_wmi_setkeycode;
366
367 for (key = hp_wmi_keymap; key->type != KE_END; key++) {
368 switch (key->type) {
369 case KE_KEY:
370 set_bit(EV_KEY, hp_wmi_input_dev->evbit);
371 set_bit(key->keycode, hp_wmi_input_dev->keybit);
372 break;
373 case KE_SW:
374 set_bit(EV_SW, hp_wmi_input_dev->evbit);
375 set_bit(key->keycode, hp_wmi_input_dev->swbit);
376 break;
377 }
378 }
379
380 err = input_register_device(hp_wmi_input_dev);
381
382 if (err) {
383 input_free_device(hp_wmi_input_dev);
384 return err;
385 }
386
387 return 0;
388}
389
390static void cleanup_sysfs(struct platform_device *device)
391{
392 device_remove_file(&device->dev, &dev_attr_display);
393 device_remove_file(&device->dev, &dev_attr_hddtemp);
394 device_remove_file(&device->dev, &dev_attr_als);
395 device_remove_file(&device->dev, &dev_attr_dock);
396}
397
398static int __init hp_wmi_bios_setup(struct platform_device *device)
399{
400 int err;
401
402 err = device_create_file(&device->dev, &dev_attr_display);
403 if (err)
404 goto add_sysfs_error;
405 err = device_create_file(&device->dev, &dev_attr_hddtemp);
406 if (err)
407 goto add_sysfs_error;
408 err = device_create_file(&device->dev, &dev_attr_als);
409 if (err)
410 goto add_sysfs_error;
411 err = device_create_file(&device->dev, &dev_attr_dock);
412 if (err)
413 goto add_sysfs_error;
414
415 wifi_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WLAN);
416 wifi_rfkill->name = "hp-wifi";
417 wifi_rfkill->state = hp_wmi_wifi_state();
418 wifi_rfkill->toggle_radio = hp_wmi_wifi_set;
419 wifi_rfkill->user_claim_unsupported = 1;
420
421 bluetooth_rfkill = rfkill_allocate(&device->dev,
422 RFKILL_TYPE_BLUETOOTH);
423 bluetooth_rfkill->name = "hp-bluetooth";
424 bluetooth_rfkill->state = hp_wmi_bluetooth_state();
425 bluetooth_rfkill->toggle_radio = hp_wmi_bluetooth_set;
426 bluetooth_rfkill->user_claim_unsupported = 1;
427
428 wwan_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WIMAX);
429 wwan_rfkill->name = "hp-wwan";
430 wwan_rfkill->state = hp_wmi_wwan_state();
431 wwan_rfkill->toggle_radio = hp_wmi_wwan_set;
432 wwan_rfkill->user_claim_unsupported = 1;
433
434 rfkill_register(wifi_rfkill);
435 rfkill_register(bluetooth_rfkill);
436 rfkill_register(wwan_rfkill);
437
438 return 0;
439add_sysfs_error:
440 cleanup_sysfs(device);
441 return err;
442}
443
444static int __exit hp_wmi_bios_remove(struct platform_device *device)
445{
446 cleanup_sysfs(device);
447
448 rfkill_unregister(wifi_rfkill);
449 rfkill_unregister(bluetooth_rfkill);
450 rfkill_unregister(wwan_rfkill);
451
452 return 0;
453}
454
455static int __init hp_wmi_init(void)
456{
457 int err;
458
459 if (wmi_has_guid(HPWMI_EVENT_GUID)) {
460 err = wmi_install_notify_handler(HPWMI_EVENT_GUID,
461 hp_wmi_notify, NULL);
462 if (!err)
463 hp_wmi_input_setup();
464 }
465
466 if (wmi_has_guid(HPWMI_BIOS_GUID)) {
467 err = platform_driver_register(&hp_wmi_driver);
468 if (err)
469 return 0;
470 hp_wmi_platform_dev = platform_device_alloc("hp-wmi", -1);
471 if (!hp_wmi_platform_dev) {
472 platform_driver_unregister(&hp_wmi_driver);
473 return 0;
474 }
475 platform_device_add(hp_wmi_platform_dev);
476 }
477
478 return 0;
479}
480
481static void __exit hp_wmi_exit(void)
482{
483 if (wmi_has_guid(HPWMI_EVENT_GUID)) {
484 wmi_remove_notify_handler(HPWMI_EVENT_GUID);
485 input_unregister_device(hp_wmi_input_dev);
486 }
487 if (hp_wmi_platform_dev) {
488 platform_device_del(hp_wmi_platform_dev);
489 platform_driver_unregister(&hp_wmi_driver);
490 }
491}
492
493module_init(hp_wmi_init);
494module_exit(hp_wmi_exit);
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 4ce3bdc2f959..daf585689ce3 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -563,6 +563,6 @@ module_init(phantom_init);
563module_exit(phantom_exit); 563module_exit(phantom_exit);
564 564
565MODULE_AUTHOR("Jiri Slaby <jirislaby@gmail.com>"); 565MODULE_AUTHOR("Jiri Slaby <jirislaby@gmail.com>");
566MODULE_DESCRIPTION("Sensable Phantom driver"); 566MODULE_DESCRIPTION("Sensable Phantom driver (PCI devices)");
567MODULE_LICENSE("GPL"); 567MODULE_LICENSE("GPL");
568MODULE_VERSION(PHANTOM_VERSION); 568MODULE_VERSION(PHANTOM_VERSION);
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 08256ed0d9a6..579b01ff82d4 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -229,10 +229,11 @@ xpc_hb_checker(void *ignore)
229 int last_IRQ_count = 0; 229 int last_IRQ_count = 0;
230 int new_IRQ_count; 230 int new_IRQ_count;
231 int force_IRQ = 0; 231 int force_IRQ = 0;
232 cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU);
232 233
233 /* this thread was marked active by xpc_hb_init() */ 234 /* this thread was marked active by xpc_hb_init() */
234 235
235 set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); 236 set_cpus_allowed_ptr(current, cpumask);
236 237
237 /* set our heartbeating to other partitions into motion */ 238 /* set our heartbeating to other partitions into motion */
238 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); 239 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
index b5969298f3d3..d3eb7903c346 100644
--- a/drivers/misc/thinkpad_acpi.c
+++ b/drivers/misc/thinkpad_acpi.c
@@ -21,7 +21,7 @@
21 * 02110-1301, USA. 21 * 02110-1301, USA.
22 */ 22 */
23 23
24#define TPACPI_VERSION "0.20" 24#define TPACPI_VERSION "0.21"
25#define TPACPI_SYSFS_VERSION 0x020200 25#define TPACPI_SYSFS_VERSION 0x020200
26 26
27/* 27/*
@@ -68,6 +68,7 @@
68#include <linux/hwmon-sysfs.h> 68#include <linux/hwmon-sysfs.h>
69#include <linux/input.h> 69#include <linux/input.h>
70#include <linux/leds.h> 70#include <linux/leds.h>
71#include <linux/rfkill.h>
71#include <asm/uaccess.h> 72#include <asm/uaccess.h>
72 73
73#include <linux/dmi.h> 74#include <linux/dmi.h>
@@ -144,6 +145,12 @@ enum {
144 145
145#define TPACPI_MAX_ACPI_ARGS 3 146#define TPACPI_MAX_ACPI_ARGS 3
146 147
148/* rfkill switches */
149enum {
150 TPACPI_RFK_BLUETOOTH_SW_ID = 0,
151 TPACPI_RFK_WWAN_SW_ID,
152};
153
147/* Debugging */ 154/* Debugging */
148#define TPACPI_LOG TPACPI_FILE ": " 155#define TPACPI_LOG TPACPI_FILE ": "
149#define TPACPI_ERR KERN_ERR TPACPI_LOG 156#define TPACPI_ERR KERN_ERR TPACPI_LOG
@@ -905,6 +912,43 @@ static int __init tpacpi_check_std_acpi_brightness_support(void)
905 return 0; 912 return 0;
906} 913}
907 914
915static int __init tpacpi_new_rfkill(const unsigned int id,
916 struct rfkill **rfk,
917 const enum rfkill_type rfktype,
918 const char *name,
919 int (*toggle_radio)(void *, enum rfkill_state),
920 int (*get_state)(void *, enum rfkill_state *))
921{
922 int res;
923 enum rfkill_state initial_state;
924
925 *rfk = rfkill_allocate(&tpacpi_pdev->dev, rfktype);
926 if (!*rfk) {
927 printk(TPACPI_ERR
928 "failed to allocate memory for rfkill class\n");
929 return -ENOMEM;
930 }
931
932 (*rfk)->name = name;
933 (*rfk)->get_state = get_state;
934 (*rfk)->toggle_radio = toggle_radio;
935
936 if (!get_state(NULL, &initial_state))
937 (*rfk)->state = initial_state;
938
939 res = rfkill_register(*rfk);
940 if (res < 0) {
941 printk(TPACPI_ERR
942 "failed to register %s rfkill switch: %d\n",
943 name, res);
944 rfkill_free(*rfk);
945 *rfk = NULL;
946 return res;
947 }
948
949 return 0;
950}
951
908/************************************************************************* 952/*************************************************************************
909 * thinkpad-acpi driver attributes 953 * thinkpad-acpi driver attributes
910 */ 954 */
@@ -1285,21 +1329,6 @@ static int hotkey_status_set(int status)
1285 return 0; 1329 return 0;
1286} 1330}
1287 1331
1288static void tpacpi_input_send_radiosw(void)
1289{
1290 int wlsw;
1291
1292 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw)) {
1293 mutex_lock(&tpacpi_inputdev_send_mutex);
1294
1295 input_report_switch(tpacpi_inputdev,
1296 SW_RFKILL_ALL, !!wlsw);
1297 input_sync(tpacpi_inputdev);
1298
1299 mutex_unlock(&tpacpi_inputdev_send_mutex);
1300 }
1301}
1302
1303static void tpacpi_input_send_tabletsw(void) 1332static void tpacpi_input_send_tabletsw(void)
1304{ 1333{
1305 int state; 1334 int state;
@@ -1921,6 +1950,30 @@ static struct attribute *hotkey_mask_attributes[] __initdata = {
1921 &dev_attr_hotkey_wakeup_hotunplug_complete.attr, 1950 &dev_attr_hotkey_wakeup_hotunplug_complete.attr,
1922}; 1951};
1923 1952
1953static void bluetooth_update_rfk(void);
1954static void wan_update_rfk(void);
1955static void tpacpi_send_radiosw_update(void)
1956{
1957 int wlsw;
1958
1959 /* Sync these BEFORE sending any rfkill events */
1960 if (tp_features.bluetooth)
1961 bluetooth_update_rfk();
1962 if (tp_features.wan)
1963 wan_update_rfk();
1964
1965 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw)) {
1966 mutex_lock(&tpacpi_inputdev_send_mutex);
1967
1968 input_report_switch(tpacpi_inputdev,
1969 SW_RFKILL_ALL, !!wlsw);
1970 input_sync(tpacpi_inputdev);
1971
1972 mutex_unlock(&tpacpi_inputdev_send_mutex);
1973 }
1974 hotkey_radio_sw_notify_change();
1975}
1976
1924static void hotkey_exit(void) 1977static void hotkey_exit(void)
1925{ 1978{
1926#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 1979#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
@@ -2167,9 +2220,10 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
2167 printk(TPACPI_INFO 2220 printk(TPACPI_INFO
2168 "radio switch found; radios are %s\n", 2221 "radio switch found; radios are %s\n",
2169 enabled(status, 0)); 2222 enabled(status, 0));
2223 }
2224 if (tp_features.hotkey_wlsw)
2170 res = add_to_attr_set(hotkey_dev_attributes, 2225 res = add_to_attr_set(hotkey_dev_attributes,
2171 &dev_attr_hotkey_radio_sw.attr); 2226 &dev_attr_hotkey_radio_sw.attr);
2172 }
2173 2227
2174 /* For X41t, X60t, X61t Tablets... */ 2228 /* For X41t, X60t, X61t Tablets... */
2175 if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) { 2229 if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
@@ -2287,7 +2341,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
2287 tpacpi_inputdev->close = &hotkey_inputdev_close; 2341 tpacpi_inputdev->close = &hotkey_inputdev_close;
2288 2342
2289 hotkey_poll_setup_safe(1); 2343 hotkey_poll_setup_safe(1);
2290 tpacpi_input_send_radiosw(); 2344 tpacpi_send_radiosw_update();
2291 tpacpi_input_send_tabletsw(); 2345 tpacpi_input_send_tabletsw();
2292 2346
2293 return 0; 2347 return 0;
@@ -2419,8 +2473,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
2419 case 7: 2473 case 7:
2420 /* 0x7000-0x7FFF: misc */ 2474 /* 0x7000-0x7FFF: misc */
2421 if (tp_features.hotkey_wlsw && hkey == 0x7000) { 2475 if (tp_features.hotkey_wlsw && hkey == 0x7000) {
2422 tpacpi_input_send_radiosw(); 2476 tpacpi_send_radiosw_update();
2423 hotkey_radio_sw_notify_change();
2424 send_acpi_ev = 0; 2477 send_acpi_ev = 0;
2425 break; 2478 break;
2426 } 2479 }
@@ -2463,8 +2516,7 @@ static void hotkey_resume(void)
2463 printk(TPACPI_ERR 2516 printk(TPACPI_ERR
2464 "error while trying to read hot key mask " 2517 "error while trying to read hot key mask "
2465 "from firmware\n"); 2518 "from firmware\n");
2466 tpacpi_input_send_radiosw(); 2519 tpacpi_send_radiosw_update();
2467 hotkey_radio_sw_notify_change();
2468 hotkey_tablet_mode_notify_change(); 2520 hotkey_tablet_mode_notify_change();
2469 hotkey_wakeup_reason_notify_change(); 2521 hotkey_wakeup_reason_notify_change();
2470 hotkey_wakeup_hotunplug_complete_notify_change(); 2522 hotkey_wakeup_hotunplug_complete_notify_change();
@@ -2581,8 +2633,66 @@ enum {
2581 TP_ACPI_BLUETOOTH_UNK = 0x04, /* unknown function */ 2633 TP_ACPI_BLUETOOTH_UNK = 0x04, /* unknown function */
2582}; 2634};
2583 2635
2584static int bluetooth_get_radiosw(void); 2636static struct rfkill *tpacpi_bluetooth_rfkill;
2585static int bluetooth_set_radiosw(int radio_on); 2637
2638static int bluetooth_get_radiosw(void)
2639{
2640 int status;
2641
2642 if (!tp_features.bluetooth)
2643 return -ENODEV;
2644
2645 /* WLSW overrides bluetooth in firmware/hardware, reflect that */
2646 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status)
2647 return RFKILL_STATE_HARD_BLOCKED;
2648
2649 if (!acpi_evalf(hkey_handle, &status, "GBDC", "d"))
2650 return -EIO;
2651
2652 return ((status & TP_ACPI_BLUETOOTH_RADIOSSW) != 0) ?
2653 RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
2654}
2655
2656static void bluetooth_update_rfk(void)
2657{
2658 int status;
2659
2660 if (!tpacpi_bluetooth_rfkill)
2661 return;
2662
2663 status = bluetooth_get_radiosw();
2664 if (status < 0)
2665 return;
2666 rfkill_force_state(tpacpi_bluetooth_rfkill, status);
2667}
2668
2669static int bluetooth_set_radiosw(int radio_on, int update_rfk)
2670{
2671 int status;
2672
2673 if (!tp_features.bluetooth)
2674 return -ENODEV;
2675
2676 /* WLSW overrides bluetooth in firmware/hardware, but there is no
2677 * reason to risk weird behaviour. */
2678 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status
2679 && radio_on)
2680 return -EPERM;
2681
2682 if (!acpi_evalf(hkey_handle, &status, "GBDC", "d"))
2683 return -EIO;
2684 if (radio_on)
2685 status |= TP_ACPI_BLUETOOTH_RADIOSSW;
2686 else
2687 status &= ~TP_ACPI_BLUETOOTH_RADIOSSW;
2688 if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
2689 return -EIO;
2690
2691 if (update_rfk)
2692 bluetooth_update_rfk();
2693
2694 return 0;
2695}
2586 2696
2587/* sysfs bluetooth enable ---------------------------------------------- */ 2697/* sysfs bluetooth enable ---------------------------------------------- */
2588static ssize_t bluetooth_enable_show(struct device *dev, 2698static ssize_t bluetooth_enable_show(struct device *dev,
@@ -2595,7 +2705,8 @@ static ssize_t bluetooth_enable_show(struct device *dev,
2595 if (status < 0) 2705 if (status < 0)
2596 return status; 2706 return status;
2597 2707
2598 return snprintf(buf, PAGE_SIZE, "%d\n", status ? 1 : 0); 2708 return snprintf(buf, PAGE_SIZE, "%d\n",
2709 (status == RFKILL_STATE_UNBLOCKED) ? 1 : 0);
2599} 2710}
2600 2711
2601static ssize_t bluetooth_enable_store(struct device *dev, 2712static ssize_t bluetooth_enable_store(struct device *dev,
@@ -2608,7 +2719,7 @@ static ssize_t bluetooth_enable_store(struct device *dev,
2608 if (parse_strtoul(buf, 1, &t)) 2719 if (parse_strtoul(buf, 1, &t))
2609 return -EINVAL; 2720 return -EINVAL;
2610 2721
2611 res = bluetooth_set_radiosw(t); 2722 res = bluetooth_set_radiosw(t, 1);
2612 2723
2613 return (res) ? res : count; 2724 return (res) ? res : count;
2614} 2725}
@@ -2628,6 +2739,31 @@ static const struct attribute_group bluetooth_attr_group = {
2628 .attrs = bluetooth_attributes, 2739 .attrs = bluetooth_attributes,
2629}; 2740};
2630 2741
2742static int tpacpi_bluetooth_rfk_get(void *data, enum rfkill_state *state)
2743{
2744 int bts = bluetooth_get_radiosw();
2745
2746 if (bts < 0)
2747 return bts;
2748
2749 *state = bts;
2750 return 0;
2751}
2752
2753static int tpacpi_bluetooth_rfk_set(void *data, enum rfkill_state state)
2754{
2755 return bluetooth_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0);
2756}
2757
2758static void bluetooth_exit(void)
2759{
2760 if (tpacpi_bluetooth_rfkill)
2761 rfkill_unregister(tpacpi_bluetooth_rfkill);
2762
2763 sysfs_remove_group(&tpacpi_pdev->dev.kobj,
2764 &bluetooth_attr_group);
2765}
2766
2631static int __init bluetooth_init(struct ibm_init_struct *iibm) 2767static int __init bluetooth_init(struct ibm_init_struct *iibm)
2632{ 2768{
2633 int res; 2769 int res;
@@ -2646,57 +2782,32 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
2646 str_supported(tp_features.bluetooth), 2782 str_supported(tp_features.bluetooth),
2647 status); 2783 status);
2648 2784
2649 if (tp_features.bluetooth) { 2785 if (tp_features.bluetooth &&
2650 if (!(status & TP_ACPI_BLUETOOTH_HWPRESENT)) { 2786 !(status & TP_ACPI_BLUETOOTH_HWPRESENT)) {
2651 /* no bluetooth hardware present in system */ 2787 /* no bluetooth hardware present in system */
2652 tp_features.bluetooth = 0; 2788 tp_features.bluetooth = 0;
2653 dbg_printk(TPACPI_DBG_INIT, 2789 dbg_printk(TPACPI_DBG_INIT,
2654 "bluetooth hardware not installed\n"); 2790 "bluetooth hardware not installed\n");
2655 } else {
2656 res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
2657 &bluetooth_attr_group);
2658 if (res)
2659 return res;
2660 }
2661 } 2791 }
2662 2792
2663 return (tp_features.bluetooth)? 0 : 1;
2664}
2665
2666static void bluetooth_exit(void)
2667{
2668 sysfs_remove_group(&tpacpi_pdev->dev.kobj,
2669 &bluetooth_attr_group);
2670}
2671
2672static int bluetooth_get_radiosw(void)
2673{
2674 int status;
2675
2676 if (!tp_features.bluetooth) 2793 if (!tp_features.bluetooth)
2677 return -ENODEV; 2794 return 1;
2678
2679 if (!acpi_evalf(hkey_handle, &status, "GBDC", "d"))
2680 return -EIO;
2681
2682 return ((status & TP_ACPI_BLUETOOTH_RADIOSSW) != 0);
2683}
2684
2685static int bluetooth_set_radiosw(int radio_on)
2686{
2687 int status;
2688 2795
2689 if (!tp_features.bluetooth) 2796 res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
2690 return -ENODEV; 2797 &bluetooth_attr_group);
2798 if (res)
2799 return res;
2691 2800
2692 if (!acpi_evalf(hkey_handle, &status, "GBDC", "d")) 2801 res = tpacpi_new_rfkill(TPACPI_RFK_BLUETOOTH_SW_ID,
2693 return -EIO; 2802 &tpacpi_bluetooth_rfkill,
2694 if (radio_on) 2803 RFKILL_TYPE_BLUETOOTH,
2695 status |= TP_ACPI_BLUETOOTH_RADIOSSW; 2804 "tpacpi_bluetooth_sw",
2696 else 2805 tpacpi_bluetooth_rfk_set,
2697 status &= ~TP_ACPI_BLUETOOTH_RADIOSSW; 2806 tpacpi_bluetooth_rfk_get);
2698 if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status)) 2807 if (res) {
2699 return -EIO; 2808 bluetooth_exit();
2809 return res;
2810 }
2700 2811
2701 return 0; 2812 return 0;
2702} 2813}
@@ -2711,7 +2822,8 @@ static int bluetooth_read(char *p)
2711 len += sprintf(p + len, "status:\t\tnot supported\n"); 2822 len += sprintf(p + len, "status:\t\tnot supported\n");
2712 else { 2823 else {
2713 len += sprintf(p + len, "status:\t\t%s\n", 2824 len += sprintf(p + len, "status:\t\t%s\n",
2714 (status)? "enabled" : "disabled"); 2825 (status == RFKILL_STATE_UNBLOCKED) ?
2826 "enabled" : "disabled");
2715 len += sprintf(p + len, "commands:\tenable, disable\n"); 2827 len += sprintf(p + len, "commands:\tenable, disable\n");
2716 } 2828 }
2717 2829
@@ -2727,9 +2839,9 @@ static int bluetooth_write(char *buf)
2727 2839
2728 while ((cmd = next_cmd(&buf))) { 2840 while ((cmd = next_cmd(&buf))) {
2729 if (strlencmp(cmd, "enable") == 0) { 2841 if (strlencmp(cmd, "enable") == 0) {
2730 bluetooth_set_radiosw(1); 2842 bluetooth_set_radiosw(1, 1);
2731 } else if (strlencmp(cmd, "disable") == 0) { 2843 } else if (strlencmp(cmd, "disable") == 0) {
2732 bluetooth_set_radiosw(0); 2844 bluetooth_set_radiosw(0, 1);
2733 } else 2845 } else
2734 return -EINVAL; 2846 return -EINVAL;
2735 } 2847 }
@@ -2755,8 +2867,66 @@ enum {
2755 TP_ACPI_WANCARD_UNK = 0x04, /* unknown function */ 2867 TP_ACPI_WANCARD_UNK = 0x04, /* unknown function */
2756}; 2868};
2757 2869
2758static int wan_get_radiosw(void); 2870static struct rfkill *tpacpi_wan_rfkill;
2759static int wan_set_radiosw(int radio_on); 2871
2872static int wan_get_radiosw(void)
2873{
2874 int status;
2875
2876 if (!tp_features.wan)
2877 return -ENODEV;
2878
2879 /* WLSW overrides WWAN in firmware/hardware, reflect that */
2880 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status)
2881 return RFKILL_STATE_HARD_BLOCKED;
2882
2883 if (!acpi_evalf(hkey_handle, &status, "GWAN", "d"))
2884 return -EIO;
2885
2886 return ((status & TP_ACPI_WANCARD_RADIOSSW) != 0) ?
2887 RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
2888}
2889
2890static void wan_update_rfk(void)
2891{
2892 int status;
2893
2894 if (!tpacpi_wan_rfkill)
2895 return;
2896
2897 status = wan_get_radiosw();
2898 if (status < 0)
2899 return;
2900 rfkill_force_state(tpacpi_wan_rfkill, status);
2901}
2902
2903static int wan_set_radiosw(int radio_on, int update_rfk)
2904{
2905 int status;
2906
2907 if (!tp_features.wan)
2908 return -ENODEV;
2909
2910 /* WLSW overrides bluetooth in firmware/hardware, but there is no
2911 * reason to risk weird behaviour. */
2912 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status
2913 && radio_on)
2914 return -EPERM;
2915
2916 if (!acpi_evalf(hkey_handle, &status, "GWAN", "d"))
2917 return -EIO;
2918 if (radio_on)
2919 status |= TP_ACPI_WANCARD_RADIOSSW;
2920 else
2921 status &= ~TP_ACPI_WANCARD_RADIOSSW;
2922 if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
2923 return -EIO;
2924
2925 if (update_rfk)
2926 wan_update_rfk();
2927
2928 return 0;
2929}
2760 2930
2761/* sysfs wan enable ---------------------------------------------------- */ 2931/* sysfs wan enable ---------------------------------------------------- */
2762static ssize_t wan_enable_show(struct device *dev, 2932static ssize_t wan_enable_show(struct device *dev,
@@ -2769,7 +2939,8 @@ static ssize_t wan_enable_show(struct device *dev,
2769 if (status < 0) 2939 if (status < 0)
2770 return status; 2940 return status;
2771 2941
2772 return snprintf(buf, PAGE_SIZE, "%d\n", status ? 1 : 0); 2942 return snprintf(buf, PAGE_SIZE, "%d\n",
2943 (status == RFKILL_STATE_UNBLOCKED) ? 1 : 0);
2773} 2944}
2774 2945
2775static ssize_t wan_enable_store(struct device *dev, 2946static ssize_t wan_enable_store(struct device *dev,
@@ -2782,7 +2953,7 @@ static ssize_t wan_enable_store(struct device *dev,
2782 if (parse_strtoul(buf, 1, &t)) 2953 if (parse_strtoul(buf, 1, &t))
2783 return -EINVAL; 2954 return -EINVAL;
2784 2955
2785 res = wan_set_radiosw(t); 2956 res = wan_set_radiosw(t, 1);
2786 2957
2787 return (res) ? res : count; 2958 return (res) ? res : count;
2788} 2959}
@@ -2802,6 +2973,31 @@ static const struct attribute_group wan_attr_group = {
2802 .attrs = wan_attributes, 2973 .attrs = wan_attributes,
2803}; 2974};
2804 2975
2976static int tpacpi_wan_rfk_get(void *data, enum rfkill_state *state)
2977{
2978 int wans = wan_get_radiosw();
2979
2980 if (wans < 0)
2981 return wans;
2982
2983 *state = wans;
2984 return 0;
2985}
2986
2987static int tpacpi_wan_rfk_set(void *data, enum rfkill_state state)
2988{
2989 return wan_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0);
2990}
2991
2992static void wan_exit(void)
2993{
2994 if (tpacpi_wan_rfkill)
2995 rfkill_unregister(tpacpi_wan_rfkill);
2996
2997 sysfs_remove_group(&tpacpi_pdev->dev.kobj,
2998 &wan_attr_group);
2999}
3000
2805static int __init wan_init(struct ibm_init_struct *iibm) 3001static int __init wan_init(struct ibm_init_struct *iibm)
2806{ 3002{
2807 int res; 3003 int res;
@@ -2818,57 +3014,32 @@ static int __init wan_init(struct ibm_init_struct *iibm)
2818 str_supported(tp_features.wan), 3014 str_supported(tp_features.wan),
2819 status); 3015 status);
2820 3016
2821 if (tp_features.wan) { 3017 if (tp_features.wan &&
2822 if (!(status & TP_ACPI_WANCARD_HWPRESENT)) { 3018 !(status & TP_ACPI_WANCARD_HWPRESENT)) {
2823 /* no wan hardware present in system */ 3019 /* no wan hardware present in system */
2824 tp_features.wan = 0; 3020 tp_features.wan = 0;
2825 dbg_printk(TPACPI_DBG_INIT, 3021 dbg_printk(TPACPI_DBG_INIT,
2826 "wan hardware not installed\n"); 3022 "wan hardware not installed\n");
2827 } else {
2828 res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
2829 &wan_attr_group);
2830 if (res)
2831 return res;
2832 }
2833 } 3023 }
2834 3024
2835 return (tp_features.wan)? 0 : 1;
2836}
2837
2838static void wan_exit(void)
2839{
2840 sysfs_remove_group(&tpacpi_pdev->dev.kobj,
2841 &wan_attr_group);
2842}
2843
2844static int wan_get_radiosw(void)
2845{
2846 int status;
2847
2848 if (!tp_features.wan) 3025 if (!tp_features.wan)
2849 return -ENODEV; 3026 return 1;
2850
2851 if (!acpi_evalf(hkey_handle, &status, "GWAN", "d"))
2852 return -EIO;
2853
2854 return ((status & TP_ACPI_WANCARD_RADIOSSW) != 0);
2855}
2856
2857static int wan_set_radiosw(int radio_on)
2858{
2859 int status;
2860 3027
2861 if (!tp_features.wan) 3028 res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
2862 return -ENODEV; 3029 &wan_attr_group);
3030 if (res)
3031 return res;
2863 3032
2864 if (!acpi_evalf(hkey_handle, &status, "GWAN", "d")) 3033 res = tpacpi_new_rfkill(TPACPI_RFK_WWAN_SW_ID,
2865 return -EIO; 3034 &tpacpi_wan_rfkill,
2866 if (radio_on) 3035 RFKILL_TYPE_WWAN,
2867 status |= TP_ACPI_WANCARD_RADIOSSW; 3036 "tpacpi_wwan_sw",
2868 else 3037 tpacpi_wan_rfk_set,
2869 status &= ~TP_ACPI_WANCARD_RADIOSSW; 3038 tpacpi_wan_rfk_get);
2870 if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status)) 3039 if (res) {
2871 return -EIO; 3040 wan_exit();
3041 return res;
3042 }
2872 3043
2873 return 0; 3044 return 0;
2874} 3045}
@@ -2883,7 +3054,8 @@ static int wan_read(char *p)
2883 len += sprintf(p + len, "status:\t\tnot supported\n"); 3054 len += sprintf(p + len, "status:\t\tnot supported\n");
2884 else { 3055 else {
2885 len += sprintf(p + len, "status:\t\t%s\n", 3056 len += sprintf(p + len, "status:\t\t%s\n",
2886 (status)? "enabled" : "disabled"); 3057 (status == RFKILL_STATE_UNBLOCKED) ?
3058 "enabled" : "disabled");
2887 len += sprintf(p + len, "commands:\tenable, disable\n"); 3059 len += sprintf(p + len, "commands:\tenable, disable\n");
2888 } 3060 }
2889 3061
@@ -2899,9 +3071,9 @@ static int wan_write(char *buf)
2899 3071
2900 while ((cmd = next_cmd(&buf))) { 3072 while ((cmd = next_cmd(&buf))) {
2901 if (strlencmp(cmd, "enable") == 0) { 3073 if (strlencmp(cmd, "enable") == 0) {
2902 wan_set_radiosw(1); 3074 wan_set_radiosw(1, 1);
2903 } else if (strlencmp(cmd, "disable") == 0) { 3075 } else if (strlencmp(cmd, "disable") == 0) {
2904 wan_set_radiosw(0); 3076 wan_set_radiosw(0, 1);
2905 } else 3077 } else
2906 return -EINVAL; 3078 return -EINVAL;
2907 } 3079 }
@@ -6168,13 +6340,18 @@ err_out:
6168 6340
6169/* Probing */ 6341/* Probing */
6170 6342
6171static void __init get_thinkpad_model_data(struct thinkpad_id_data *tp) 6343/* returns 0 - probe ok, or < 0 - probe error.
6344 * Probe ok doesn't mean thinkpad found.
6345 * On error, kfree() cleanup on tp->* is not performed, caller must do it */
6346static int __must_check __init get_thinkpad_model_data(
6347 struct thinkpad_id_data *tp)
6172{ 6348{
6173 const struct dmi_device *dev = NULL; 6349 const struct dmi_device *dev = NULL;
6174 char ec_fw_string[18]; 6350 char ec_fw_string[18];
6351 char const *s;
6175 6352
6176 if (!tp) 6353 if (!tp)
6177 return; 6354 return -EINVAL;
6178 6355
6179 memset(tp, 0, sizeof(*tp)); 6356 memset(tp, 0, sizeof(*tp));
6180 6357
@@ -6183,12 +6360,14 @@ static void __init get_thinkpad_model_data(struct thinkpad_id_data *tp)
6183 else if (dmi_name_in_vendors("LENOVO")) 6360 else if (dmi_name_in_vendors("LENOVO"))
6184 tp->vendor = PCI_VENDOR_ID_LENOVO; 6361 tp->vendor = PCI_VENDOR_ID_LENOVO;
6185 else 6362 else
6186 return; 6363 return 0;
6187 6364
6188 tp->bios_version_str = kstrdup(dmi_get_system_info(DMI_BIOS_VERSION), 6365 s = dmi_get_system_info(DMI_BIOS_VERSION);
6189 GFP_KERNEL); 6366 tp->bios_version_str = kstrdup(s, GFP_KERNEL);
6367 if (s && !tp->bios_version_str)
6368 return -ENOMEM;
6190 if (!tp->bios_version_str) 6369 if (!tp->bios_version_str)
6191 return; 6370 return 0;
6192 tp->bios_model = tp->bios_version_str[0] 6371 tp->bios_model = tp->bios_version_str[0]
6193 | (tp->bios_version_str[1] << 8); 6372 | (tp->bios_version_str[1] << 8);
6194 6373
@@ -6207,21 +6386,27 @@ static void __init get_thinkpad_model_data(struct thinkpad_id_data *tp)
6207 ec_fw_string[strcspn(ec_fw_string, " ]")] = 0; 6386 ec_fw_string[strcspn(ec_fw_string, " ]")] = 0;
6208 6387
6209 tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL); 6388 tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL);
6389 if (!tp->ec_version_str)
6390 return -ENOMEM;
6210 tp->ec_model = ec_fw_string[0] 6391 tp->ec_model = ec_fw_string[0]
6211 | (ec_fw_string[1] << 8); 6392 | (ec_fw_string[1] << 8);
6212 break; 6393 break;
6213 } 6394 }
6214 } 6395 }
6215 6396
6216 tp->model_str = kstrdup(dmi_get_system_info(DMI_PRODUCT_VERSION), 6397 s = dmi_get_system_info(DMI_PRODUCT_VERSION);
6217 GFP_KERNEL); 6398 if (s && !strnicmp(s, "ThinkPad", 8)) {
6218 if (tp->model_str && strnicmp(tp->model_str, "ThinkPad", 8) != 0) { 6399 tp->model_str = kstrdup(s, GFP_KERNEL);
6219 kfree(tp->model_str); 6400 if (!tp->model_str)
6220 tp->model_str = NULL; 6401 return -ENOMEM;
6221 } 6402 }
6222 6403
6223 tp->nummodel_str = kstrdup(dmi_get_system_info(DMI_PRODUCT_NAME), 6404 s = dmi_get_system_info(DMI_PRODUCT_NAME);
6224 GFP_KERNEL); 6405 tp->nummodel_str = kstrdup(s, GFP_KERNEL);
6406 if (s && !tp->nummodel_str)
6407 return -ENOMEM;
6408
6409 return 0;
6225} 6410}
6226 6411
6227static int __init probe_for_thinkpad(void) 6412static int __init probe_for_thinkpad(void)
@@ -6484,7 +6669,13 @@ static int __init thinkpad_acpi_module_init(void)
6484 6669
6485 /* Driver-level probe */ 6670 /* Driver-level probe */
6486 6671
6487 get_thinkpad_model_data(&thinkpad_id); 6672 ret = get_thinkpad_model_data(&thinkpad_id);
6673 if (ret) {
6674 printk(TPACPI_ERR
6675 "unable to get DMI data: %d\n", ret);
6676 thinkpad_acpi_module_exit();
6677 return ret;
6678 }
6488 ret = probe_for_thinkpad(); 6679 ret = probe_for_thinkpad();
6489 if (ret) { 6680 if (ret) {
6490 thinkpad_acpi_module_exit(); 6681 thinkpad_acpi_module_exit();
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index d6b9b486417c..a067fe436301 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -21,13 +21,17 @@
21#define RESULT_UNSUP_HOST 2 21#define RESULT_UNSUP_HOST 2
22#define RESULT_UNSUP_CARD 3 22#define RESULT_UNSUP_CARD 3
23 23
24#define BUFFER_SIZE (PAGE_SIZE * 4) 24#define BUFFER_ORDER 2
25#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
25 26
26struct mmc_test_card { 27struct mmc_test_card {
27 struct mmc_card *card; 28 struct mmc_card *card;
28 29
29 u8 scratch[BUFFER_SIZE]; 30 u8 scratch[BUFFER_SIZE];
30 u8 *buffer; 31 u8 *buffer;
32#ifdef CONFIG_HIGHMEM
33 struct page *highmem;
34#endif
31}; 35};
32 36
33/*******************************************************************/ 37/*******************************************************************/
@@ -384,14 +388,16 @@ static int mmc_test_transfer(struct mmc_test_card *test,
384 int ret, i; 388 int ret, i;
385 unsigned long flags; 389 unsigned long flags;
386 390
391 BUG_ON(blocks * blksz > BUFFER_SIZE);
392
387 if (write) { 393 if (write) {
388 for (i = 0;i < blocks * blksz;i++) 394 for (i = 0;i < blocks * blksz;i++)
389 test->scratch[i] = i; 395 test->scratch[i] = i;
390 } else { 396 } else {
391 memset(test->scratch, 0, BUFFER_SIZE); 397 memset(test->scratch, 0, blocks * blksz);
392 } 398 }
393 local_irq_save(flags); 399 local_irq_save(flags);
394 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 400 sg_copy_from_buffer(sg, sg_len, test->scratch, blocks * blksz);
395 local_irq_restore(flags); 401 local_irq_restore(flags);
396 402
397 ret = mmc_test_set_blksize(test, blksz); 403 ret = mmc_test_set_blksize(test, blksz);
@@ -438,7 +444,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
438 } 444 }
439 } else { 445 } else {
440 local_irq_save(flags); 446 local_irq_save(flags);
441 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 447 sg_copy_to_buffer(sg, sg_len, test->scratch, blocks * blksz);
442 local_irq_restore(flags); 448 local_irq_restore(flags);
443 for (i = 0;i < blocks * blksz;i++) { 449 for (i = 0;i < blocks * blksz;i++) {
444 if (test->scratch[i] != (u8)i) 450 if (test->scratch[i] != (u8)i)
@@ -799,6 +805,157 @@ static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
799 return 0; 805 return 0;
800} 806}
801 807
808static int mmc_test_bigsg_write(struct mmc_test_card *test)
809{
810 int ret;
811 unsigned int size;
812 struct scatterlist sg;
813
814 if (test->card->host->max_blk_count == 1)
815 return RESULT_UNSUP_HOST;
816
817 size = PAGE_SIZE * 2;
818 size = min(size, test->card->host->max_req_size);
819 size = min(size, test->card->host->max_seg_size);
820 size = min(size, test->card->host->max_blk_count * 512);
821
822 memset(test->buffer, 0, BUFFER_SIZE);
823
824 if (size < 1024)
825 return RESULT_UNSUP_HOST;
826
827 sg_init_table(&sg, 1);
828 sg_init_one(&sg, test->buffer, BUFFER_SIZE);
829
830 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
831 if (ret)
832 return ret;
833
834 return 0;
835}
836
837static int mmc_test_bigsg_read(struct mmc_test_card *test)
838{
839 int ret, i;
840 unsigned int size;
841 struct scatterlist sg;
842
843 if (test->card->host->max_blk_count == 1)
844 return RESULT_UNSUP_HOST;
845
846 size = PAGE_SIZE * 2;
847 size = min(size, test->card->host->max_req_size);
848 size = min(size, test->card->host->max_seg_size);
849 size = min(size, test->card->host->max_blk_count * 512);
850
851 if (size < 1024)
852 return RESULT_UNSUP_HOST;
853
854 memset(test->buffer, 0xCD, BUFFER_SIZE);
855
856 sg_init_table(&sg, 1);
857 sg_init_one(&sg, test->buffer, BUFFER_SIZE);
858 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
859 if (ret)
860 return ret;
861
862 /* mmc_test_transfer() doesn't check for read overflows */
863 for (i = size;i < BUFFER_SIZE;i++) {
864 if (test->buffer[i] != 0xCD)
865 return RESULT_FAIL;
866 }
867
868 return 0;
869}
870
871#ifdef CONFIG_HIGHMEM
872
873static int mmc_test_write_high(struct mmc_test_card *test)
874{
875 int ret;
876 struct scatterlist sg;
877
878 sg_init_table(&sg, 1);
879 sg_set_page(&sg, test->highmem, 512, 0);
880
881 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
882 if (ret)
883 return ret;
884
885 return 0;
886}
887
888static int mmc_test_read_high(struct mmc_test_card *test)
889{
890 int ret;
891 struct scatterlist sg;
892
893 sg_init_table(&sg, 1);
894 sg_set_page(&sg, test->highmem, 512, 0);
895
896 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
897 if (ret)
898 return ret;
899
900 return 0;
901}
902
903static int mmc_test_multi_write_high(struct mmc_test_card *test)
904{
905 int ret;
906 unsigned int size;
907 struct scatterlist sg;
908
909 if (test->card->host->max_blk_count == 1)
910 return RESULT_UNSUP_HOST;
911
912 size = PAGE_SIZE * 2;
913 size = min(size, test->card->host->max_req_size);
914 size = min(size, test->card->host->max_seg_size);
915 size = min(size, test->card->host->max_blk_count * 512);
916
917 if (size < 1024)
918 return RESULT_UNSUP_HOST;
919
920 sg_init_table(&sg, 1);
921 sg_set_page(&sg, test->highmem, size, 0);
922
923 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
924 if (ret)
925 return ret;
926
927 return 0;
928}
929
930static int mmc_test_multi_read_high(struct mmc_test_card *test)
931{
932 int ret;
933 unsigned int size;
934 struct scatterlist sg;
935
936 if (test->card->host->max_blk_count == 1)
937 return RESULT_UNSUP_HOST;
938
939 size = PAGE_SIZE * 2;
940 size = min(size, test->card->host->max_req_size);
941 size = min(size, test->card->host->max_seg_size);
942 size = min(size, test->card->host->max_blk_count * 512);
943
944 if (size < 1024)
945 return RESULT_UNSUP_HOST;
946
947 sg_init_table(&sg, 1);
948 sg_set_page(&sg, test->highmem, size, 0);
949
950 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
951 if (ret)
952 return ret;
953
954 return 0;
955}
956
957#endif /* CONFIG_HIGHMEM */
958
802static const struct mmc_test_case mmc_test_cases[] = { 959static const struct mmc_test_case mmc_test_cases[] = {
803 { 960 {
804 .name = "Basic write (no data verification)", 961 .name = "Basic write (no data verification)",
@@ -913,6 +1070,53 @@ static const struct mmc_test_case mmc_test_cases[] = {
913 .name = "Correct xfer_size at read (midway failure)", 1070 .name = "Correct xfer_size at read (midway failure)",
914 .run = mmc_test_multi_xfersize_read, 1071 .run = mmc_test_multi_xfersize_read,
915 }, 1072 },
1073
1074 {
1075 .name = "Over-sized SG list write",
1076 .prepare = mmc_test_prepare_write,
1077 .run = mmc_test_bigsg_write,
1078 .cleanup = mmc_test_cleanup,
1079 },
1080
1081 {
1082 .name = "Over-sized SG list read",
1083 .prepare = mmc_test_prepare_read,
1084 .run = mmc_test_bigsg_read,
1085 .cleanup = mmc_test_cleanup,
1086 },
1087
1088#ifdef CONFIG_HIGHMEM
1089
1090 {
1091 .name = "Highmem write",
1092 .prepare = mmc_test_prepare_write,
1093 .run = mmc_test_write_high,
1094 .cleanup = mmc_test_cleanup,
1095 },
1096
1097 {
1098 .name = "Highmem read",
1099 .prepare = mmc_test_prepare_read,
1100 .run = mmc_test_read_high,
1101 .cleanup = mmc_test_cleanup,
1102 },
1103
1104 {
1105 .name = "Multi-block highmem write",
1106 .prepare = mmc_test_prepare_write,
1107 .run = mmc_test_multi_write_high,
1108 .cleanup = mmc_test_cleanup,
1109 },
1110
1111 {
1112 .name = "Multi-block highmem read",
1113 .prepare = mmc_test_prepare_read,
1114 .run = mmc_test_multi_read_high,
1115 .cleanup = mmc_test_cleanup,
1116 },
1117
1118#endif /* CONFIG_HIGHMEM */
1119
916}; 1120};
917 1121
918static struct mutex mmc_test_lock; 1122static struct mutex mmc_test_lock;
@@ -1014,12 +1218,23 @@ static ssize_t mmc_test_store(struct device *dev,
1014 test->card = card; 1218 test->card = card;
1015 1219
1016 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 1220 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
1221#ifdef CONFIG_HIGHMEM
1222 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
1223#endif
1224
1225#ifdef CONFIG_HIGHMEM
1226 if (test->buffer && test->highmem) {
1227#else
1017 if (test->buffer) { 1228 if (test->buffer) {
1229#endif
1018 mutex_lock(&mmc_test_lock); 1230 mutex_lock(&mmc_test_lock);
1019 mmc_test_run(test, testcase); 1231 mmc_test_run(test, testcase);
1020 mutex_unlock(&mmc_test_lock); 1232 mutex_unlock(&mmc_test_lock);
1021 } 1233 }
1022 1234
1235#ifdef CONFIG_HIGHMEM
1236 __free_pages(test->highmem, BUFFER_ORDER);
1237#endif
1023 kfree(test->buffer); 1238 kfree(test->buffer);
1024 kfree(test); 1239 kfree(test);
1025 1240
@@ -1041,6 +1256,8 @@ static int mmc_test_probe(struct mmc_card *card)
1041 if (ret) 1256 if (ret)
1042 return ret; 1257 return ret;
1043 1258
1259 dev_info(&card->dev, "Card claimed for testing.\n");
1260
1044 return 0; 1261 return 0;
1045} 1262}
1046 1263
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 7731ddefdc1b..3dee97e7d165 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -148,7 +148,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
148 printk(KERN_WARNING "%s: unable to allocate " 148 printk(KERN_WARNING "%s: unable to allocate "
149 "bounce buffer\n", mmc_card_name(card)); 149 "bounce buffer\n", mmc_card_name(card));
150 } else { 150 } else {
151 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); 151 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
152 blk_queue_max_sectors(mq->queue, bouncesz / 512); 152 blk_queue_max_sectors(mq->queue, bouncesz / 512);
153 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 153 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
154 blk_queue_max_hw_segments(mq->queue, bouncesz / 512); 154 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
@@ -290,55 +290,15 @@ void mmc_queue_resume(struct mmc_queue *mq)
290 } 290 }
291} 291}
292 292
293static void copy_sg(struct scatterlist *dst, unsigned int dst_len, 293/*
294 struct scatterlist *src, unsigned int src_len) 294 * Prepare the sg list(s) to be handed of to the host driver
295{ 295 */
296 unsigned int chunk;
297 char *dst_buf, *src_buf;
298 unsigned int dst_size, src_size;
299
300 dst_buf = NULL;
301 src_buf = NULL;
302 dst_size = 0;
303 src_size = 0;
304
305 while (src_len) {
306 BUG_ON(dst_len == 0);
307
308 if (dst_size == 0) {
309 dst_buf = sg_virt(dst);
310 dst_size = dst->length;
311 }
312
313 if (src_size == 0) {
314 src_buf = sg_virt(src);
315 src_size = src->length;
316 }
317
318 chunk = min(dst_size, src_size);
319
320 memcpy(dst_buf, src_buf, chunk);
321
322 dst_buf += chunk;
323 src_buf += chunk;
324 dst_size -= chunk;
325 src_size -= chunk;
326
327 if (dst_size == 0) {
328 dst++;
329 dst_len--;
330 }
331
332 if (src_size == 0) {
333 src++;
334 src_len--;
335 }
336 }
337}
338
339unsigned int mmc_queue_map_sg(struct mmc_queue *mq) 296unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
340{ 297{
341 unsigned int sg_len; 298 unsigned int sg_len;
299 size_t buflen;
300 struct scatterlist *sg;
301 int i;
342 302
343 if (!mq->bounce_buf) 303 if (!mq->bounce_buf)
344 return blk_rq_map_sg(mq->queue, mq->req, mq->sg); 304 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
@@ -349,47 +309,52 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
349 309
350 mq->bounce_sg_len = sg_len; 310 mq->bounce_sg_len = sg_len;
351 311
352 /* 312 buflen = 0;
353 * Shortcut in the event we only get a single entry. 313 for_each_sg(mq->bounce_sg, sg, sg_len, i)
354 */ 314 buflen += sg->length;
355 if (sg_len == 1) {
356 memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
357 return 1;
358 }
359 315
360 sg_init_one(mq->sg, mq->bounce_buf, 0); 316 sg_init_one(mq->sg, mq->bounce_buf, buflen);
361
362 while (sg_len) {
363 mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
364 sg_len--;
365 }
366 317
367 return 1; 318 return 1;
368} 319}
369 320
321/*
322 * If writing, bounce the data to the buffer before the request
323 * is sent to the host driver
324 */
370void mmc_queue_bounce_pre(struct mmc_queue *mq) 325void mmc_queue_bounce_pre(struct mmc_queue *mq)
371{ 326{
327 unsigned long flags;
328
372 if (!mq->bounce_buf) 329 if (!mq->bounce_buf)
373 return; 330 return;
374 331
375 if (mq->bounce_sg_len == 1)
376 return;
377 if (rq_data_dir(mq->req) != WRITE) 332 if (rq_data_dir(mq->req) != WRITE)
378 return; 333 return;
379 334
380 copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len); 335 local_irq_save(flags);
336 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
337 mq->bounce_buf, mq->sg[0].length);
338 local_irq_restore(flags);
381} 339}
382 340
341/*
342 * If reading, bounce the data from the buffer after the request
343 * has been handled by the host driver
344 */
383void mmc_queue_bounce_post(struct mmc_queue *mq) 345void mmc_queue_bounce_post(struct mmc_queue *mq)
384{ 346{
347 unsigned long flags;
348
385 if (!mq->bounce_buf) 349 if (!mq->bounce_buf)
386 return; 350 return;
387 351
388 if (mq->bounce_sg_len == 1)
389 return;
390 if (rq_data_dir(mq->req) != READ) 352 if (rq_data_dir(mq->req) != READ)
391 return; 353 return;
392 354
393 copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1); 355 local_irq_save(flags);
356 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
357 mq->bounce_buf, mq->sg[0].length);
358 local_irq_restore(flags);
394} 359}
395 360
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 3f15eb204895..99b20917cc0f 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1043,7 +1043,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
1043 goto out6; 1043 goto out6;
1044 } 1044 }
1045 1045
1046 platform_set_drvdata(pdev, mmc); 1046 platform_set_drvdata(pdev, host);
1047 1047
1048 printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X" 1048 printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X"
1049 " (mode=%s)\n", pdev->id, host->iobase, 1049 " (mode=%s)\n", pdev->id, host->iobase,
@@ -1087,13 +1087,10 @@ out0:
1087 1087
1088static int __devexit au1xmmc_remove(struct platform_device *pdev) 1088static int __devexit au1xmmc_remove(struct platform_device *pdev)
1089{ 1089{
1090 struct mmc_host *mmc = platform_get_drvdata(pdev); 1090 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1091 struct au1xmmc_host *host;
1092
1093 if (mmc) {
1094 host = mmc_priv(mmc);
1095 1091
1096 mmc_remove_host(mmc); 1092 if (host) {
1093 mmc_remove_host(host->mmc);
1097 1094
1098#ifdef CONFIG_LEDS_CLASS 1095#ifdef CONFIG_LEDS_CLASS
1099 if (host->platdata && host->platdata->led) 1096 if (host->platdata && host->platdata->led)
@@ -1101,8 +1098,8 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev)
1101#endif 1098#endif
1102 1099
1103 if (host->platdata && host->platdata->cd_setup && 1100 if (host->platdata && host->platdata->cd_setup &&
1104 !(mmc->caps & MMC_CAP_NEEDS_POLL)) 1101 !(host->mmc->caps & MMC_CAP_NEEDS_POLL))
1105 host->platdata->cd_setup(mmc, 0); 1102 host->platdata->cd_setup(host->mmc, 0);
1106 1103
1107 au_writel(0, HOST_ENABLE(host)); 1104 au_writel(0, HOST_ENABLE(host));
1108 au_writel(0, HOST_CONFIG(host)); 1105 au_writel(0, HOST_CONFIG(host));
@@ -1122,16 +1119,49 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev)
1122 release_resource(host->ioarea); 1119 release_resource(host->ioarea);
1123 kfree(host->ioarea); 1120 kfree(host->ioarea);
1124 1121
1125 mmc_free_host(mmc); 1122 mmc_free_host(host->mmc);
1123 platform_set_drvdata(pdev, NULL);
1126 } 1124 }
1127 return 0; 1125 return 0;
1128} 1126}
1129 1127
1128#ifdef CONFIG_PM
1129static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
1130{
1131 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1132 int ret;
1133
1134 ret = mmc_suspend_host(host->mmc, state);
1135 if (ret)
1136 return ret;
1137
1138 au_writel(0, HOST_CONFIG2(host));
1139 au_writel(0, HOST_CONFIG(host));
1140 au_writel(0xffffffff, HOST_STATUS(host));
1141 au_writel(0, HOST_ENABLE(host));
1142 au_sync();
1143
1144 return 0;
1145}
1146
1147static int au1xmmc_resume(struct platform_device *pdev)
1148{
1149 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1150
1151 au1xmmc_reset_controller(host);
1152
1153 return mmc_resume_host(host->mmc);
1154}
1155#else
1156#define au1xmmc_suspend NULL
1157#define au1xmmc_resume NULL
1158#endif
1159
1130static struct platform_driver au1xmmc_driver = { 1160static struct platform_driver au1xmmc_driver = {
1131 .probe = au1xmmc_probe, 1161 .probe = au1xmmc_probe,
1132 .remove = au1xmmc_remove, 1162 .remove = au1xmmc_remove,
1133 .suspend = NULL, 1163 .suspend = au1xmmc_suspend,
1134 .resume = NULL, 1164 .resume = au1xmmc_resume,
1135 .driver = { 1165 .driver = {
1136 .name = DRIVER_NAME, 1166 .name = DRIVER_NAME,
1137 .owner = THIS_MODULE, 1167 .owner = THIS_MODULE,
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index d39f59738866..a8e18fe53077 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -177,7 +177,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
177 if (dalgn) 177 if (dalgn)
178 DALGN |= (1 << host->dma); 178 DALGN |= (1 << host->dma);
179 else 179 else
180 DALGN &= (1 << host->dma); 180 DALGN &= ~(1 << host->dma);
181 DDADR(host->dma) = host->sg_dma; 181 DDADR(host->dma) = host->sg_dma;
182 DCSR(host->dma) = DCSR_RUN; 182 DCSR(host->dma) = DCSR_RUN;
183} 183}
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 6a1e4994b724..be550c26da68 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1331,21 +1331,30 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1331 return ret; 1331 return ret;
1332} 1332}
1333 1333
1334static void s3cmci_shutdown(struct platform_device *pdev)
1335{
1336 struct mmc_host *mmc = platform_get_drvdata(pdev);
1337 struct s3cmci_host *host = mmc_priv(mmc);
1338
1339 if (host->irq_cd >= 0)
1340 free_irq(host->irq_cd, host);
1341
1342 mmc_remove_host(mmc);
1343 clk_disable(host->clk);
1344}
1345
1334static int __devexit s3cmci_remove(struct platform_device *pdev) 1346static int __devexit s3cmci_remove(struct platform_device *pdev)
1335{ 1347{
1336 struct mmc_host *mmc = platform_get_drvdata(pdev); 1348 struct mmc_host *mmc = platform_get_drvdata(pdev);
1337 struct s3cmci_host *host = mmc_priv(mmc); 1349 struct s3cmci_host *host = mmc_priv(mmc);
1338 1350
1339 mmc_remove_host(mmc); 1351 s3cmci_shutdown(pdev);
1340 1352
1341 clk_disable(host->clk);
1342 clk_put(host->clk); 1353 clk_put(host->clk);
1343 1354
1344 tasklet_disable(&host->pio_tasklet); 1355 tasklet_disable(&host->pio_tasklet);
1345 s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client); 1356 s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client);
1346 1357
1347 if (host->irq_cd >= 0)
1348 free_irq(host->irq_cd, host);
1349 free_irq(host->irq, host); 1358 free_irq(host->irq, host);
1350 1359
1351 iounmap(host->base); 1360 iounmap(host->base);
@@ -1355,17 +1364,17 @@ static int __devexit s3cmci_remove(struct platform_device *pdev)
1355 return 0; 1364 return 0;
1356} 1365}
1357 1366
1358static int __devinit s3cmci_probe_2410(struct platform_device *dev) 1367static int __devinit s3cmci_2410_probe(struct platform_device *dev)
1359{ 1368{
1360 return s3cmci_probe(dev, 0); 1369 return s3cmci_probe(dev, 0);
1361} 1370}
1362 1371
1363static int __devinit s3cmci_probe_2412(struct platform_device *dev) 1372static int __devinit s3cmci_2412_probe(struct platform_device *dev)
1364{ 1373{
1365 return s3cmci_probe(dev, 1); 1374 return s3cmci_probe(dev, 1);
1366} 1375}
1367 1376
1368static int __devinit s3cmci_probe_2440(struct platform_device *dev) 1377static int __devinit s3cmci_2440_probe(struct platform_device *dev)
1369{ 1378{
1370 return s3cmci_probe(dev, 1); 1379 return s3cmci_probe(dev, 1);
1371} 1380}
@@ -1392,29 +1401,32 @@ static int s3cmci_resume(struct platform_device *dev)
1392#endif /* CONFIG_PM */ 1401#endif /* CONFIG_PM */
1393 1402
1394 1403
1395static struct platform_driver s3cmci_driver_2410 = { 1404static struct platform_driver s3cmci_2410_driver = {
1396 .driver.name = "s3c2410-sdi", 1405 .driver.name = "s3c2410-sdi",
1397 .driver.owner = THIS_MODULE, 1406 .driver.owner = THIS_MODULE,
1398 .probe = s3cmci_probe_2410, 1407 .probe = s3cmci_2410_probe,
1399 .remove = __devexit_p(s3cmci_remove), 1408 .remove = __devexit_p(s3cmci_remove),
1409 .shutdown = s3cmci_shutdown,
1400 .suspend = s3cmci_suspend, 1410 .suspend = s3cmci_suspend,
1401 .resume = s3cmci_resume, 1411 .resume = s3cmci_resume,
1402}; 1412};
1403 1413
1404static struct platform_driver s3cmci_driver_2412 = { 1414static struct platform_driver s3cmci_2412_driver = {
1405 .driver.name = "s3c2412-sdi", 1415 .driver.name = "s3c2412-sdi",
1406 .driver.owner = THIS_MODULE, 1416 .driver.owner = THIS_MODULE,
1407 .probe = s3cmci_probe_2412, 1417 .probe = s3cmci_2412_probe,
1408 .remove = __devexit_p(s3cmci_remove), 1418 .remove = __devexit_p(s3cmci_remove),
1419 .shutdown = s3cmci_shutdown,
1409 .suspend = s3cmci_suspend, 1420 .suspend = s3cmci_suspend,
1410 .resume = s3cmci_resume, 1421 .resume = s3cmci_resume,
1411}; 1422};
1412 1423
1413static struct platform_driver s3cmci_driver_2440 = { 1424static struct platform_driver s3cmci_2440_driver = {
1414 .driver.name = "s3c2440-sdi", 1425 .driver.name = "s3c2440-sdi",
1415 .driver.owner = THIS_MODULE, 1426 .driver.owner = THIS_MODULE,
1416 .probe = s3cmci_probe_2440, 1427 .probe = s3cmci_2440_probe,
1417 .remove = __devexit_p(s3cmci_remove), 1428 .remove = __devexit_p(s3cmci_remove),
1429 .shutdown = s3cmci_shutdown,
1418 .suspend = s3cmci_suspend, 1430 .suspend = s3cmci_suspend,
1419 .resume = s3cmci_resume, 1431 .resume = s3cmci_resume,
1420}; 1432};
@@ -1422,17 +1434,17 @@ static struct platform_driver s3cmci_driver_2440 = {
1422 1434
1423static int __init s3cmci_init(void) 1435static int __init s3cmci_init(void)
1424{ 1436{
1425 platform_driver_register(&s3cmci_driver_2410); 1437 platform_driver_register(&s3cmci_2410_driver);
1426 platform_driver_register(&s3cmci_driver_2412); 1438 platform_driver_register(&s3cmci_2412_driver);
1427 platform_driver_register(&s3cmci_driver_2440); 1439 platform_driver_register(&s3cmci_2440_driver);
1428 return 0; 1440 return 0;
1429} 1441}
1430 1442
1431static void __exit s3cmci_exit(void) 1443static void __exit s3cmci_exit(void)
1432{ 1444{
1433 platform_driver_unregister(&s3cmci_driver_2410); 1445 platform_driver_unregister(&s3cmci_2410_driver);
1434 platform_driver_unregister(&s3cmci_driver_2412); 1446 platform_driver_unregister(&s3cmci_2412_driver);
1435 platform_driver_unregister(&s3cmci_driver_2440); 1447 platform_driver_unregister(&s3cmci_2440_driver);
1436} 1448}
1437 1449
1438module_init(s3cmci_init); 1450module_init(s3cmci_init);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 17701c3da733..c3a5db72ddd7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -173,119 +173,95 @@ static void sdhci_led_control(struct led_classdev *led,
173 * * 173 * *
174\*****************************************************************************/ 174\*****************************************************************************/
175 175
176static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
177{
178 return sg_virt(host->cur_sg);
179}
180
181static inline int sdhci_next_sg(struct sdhci_host* host)
182{
183 /*
184 * Skip to next SG entry.
185 */
186 host->cur_sg++;
187 host->num_sg--;
188
189 /*
190 * Any entries left?
191 */
192 if (host->num_sg > 0) {
193 host->offset = 0;
194 host->remain = host->cur_sg->length;
195 }
196
197 return host->num_sg;
198}
199
200static void sdhci_read_block_pio(struct sdhci_host *host) 176static void sdhci_read_block_pio(struct sdhci_host *host)
201{ 177{
202 int blksize, chunk_remain; 178 unsigned long flags;
203 u32 data; 179 size_t blksize, len, chunk;
204 char *buffer; 180 u32 scratch;
205 int size; 181 u8 *buf;
206 182
207 DBG("PIO reading\n"); 183 DBG("PIO reading\n");
208 184
209 blksize = host->data->blksz; 185 blksize = host->data->blksz;
210 chunk_remain = 0; 186 chunk = 0;
211 data = 0;
212 187
213 buffer = sdhci_sg_to_buffer(host) + host->offset; 188 local_irq_save(flags);
214 189
215 while (blksize) { 190 while (blksize) {
216 if (chunk_remain == 0) { 191 if (!sg_miter_next(&host->sg_miter))
217 data = readl(host->ioaddr + SDHCI_BUFFER); 192 BUG();
218 chunk_remain = min(blksize, 4);
219 }
220 193
221 size = min(host->remain, chunk_remain); 194 len = min(host->sg_miter.length, blksize);
222 195
223 chunk_remain -= size; 196 blksize -= len;
224 blksize -= size; 197 host->sg_miter.consumed = len;
225 host->offset += size;
226 host->remain -= size;
227 198
228 while (size) { 199 buf = host->sg_miter.addr;
229 *buffer = data & 0xFF;
230 buffer++;
231 data >>= 8;
232 size--;
233 }
234 200
235 if (host->remain == 0) { 201 while (len) {
236 if (sdhci_next_sg(host) == 0) { 202 if (chunk == 0) {
237 BUG_ON(blksize != 0); 203 scratch = readl(host->ioaddr + SDHCI_BUFFER);
238 return; 204 chunk = 4;
239 } 205 }
240 buffer = sdhci_sg_to_buffer(host); 206
207 *buf = scratch & 0xFF;
208
209 buf++;
210 scratch >>= 8;
211 chunk--;
212 len--;
241 } 213 }
242 } 214 }
215
216 sg_miter_stop(&host->sg_miter);
217
218 local_irq_restore(flags);
243} 219}
244 220
245static void sdhci_write_block_pio(struct sdhci_host *host) 221static void sdhci_write_block_pio(struct sdhci_host *host)
246{ 222{
247 int blksize, chunk_remain; 223 unsigned long flags;
248 u32 data; 224 size_t blksize, len, chunk;
249 char *buffer; 225 u32 scratch;
250 int bytes, size; 226 u8 *buf;
251 227
252 DBG("PIO writing\n"); 228 DBG("PIO writing\n");
253 229
254 blksize = host->data->blksz; 230 blksize = host->data->blksz;
255 chunk_remain = 4; 231 chunk = 0;
256 data = 0; 232 scratch = 0;
257 233
258 bytes = 0; 234 local_irq_save(flags);
259 buffer = sdhci_sg_to_buffer(host) + host->offset;
260 235
261 while (blksize) { 236 while (blksize) {
262 size = min(host->remain, chunk_remain); 237 if (!sg_miter_next(&host->sg_miter))
263 238 BUG();
264 chunk_remain -= size;
265 blksize -= size;
266 host->offset += size;
267 host->remain -= size;
268
269 while (size) {
270 data >>= 8;
271 data |= (u32)*buffer << 24;
272 buffer++;
273 size--;
274 }
275 239
276 if (chunk_remain == 0) { 240 len = min(host->sg_miter.length, blksize);
277 writel(data, host->ioaddr + SDHCI_BUFFER); 241
278 chunk_remain = min(blksize, 4); 242 blksize -= len;
279 } 243 host->sg_miter.consumed = len;
244
245 buf = host->sg_miter.addr;
280 246
281 if (host->remain == 0) { 247 while (len) {
282 if (sdhci_next_sg(host) == 0) { 248 scratch |= (u32)*buf << (chunk * 8);
283 BUG_ON(blksize != 0); 249
284 return; 250 buf++;
251 chunk++;
252 len--;
253
254 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
255 writel(scratch, host->ioaddr + SDHCI_BUFFER);
256 chunk = 0;
257 scratch = 0;
285 } 258 }
286 buffer = sdhci_sg_to_buffer(host);
287 } 259 }
288 } 260 }
261
262 sg_miter_stop(&host->sg_miter);
263
264 local_irq_restore(flags);
289} 265}
290 266
291static void sdhci_transfer_pio(struct sdhci_host *host) 267static void sdhci_transfer_pio(struct sdhci_host *host)
@@ -294,7 +270,7 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
294 270
295 BUG_ON(!host->data); 271 BUG_ON(!host->data);
296 272
297 if (host->num_sg == 0) 273 if (host->blocks == 0)
298 return; 274 return;
299 275
300 if (host->data->flags & MMC_DATA_READ) 276 if (host->data->flags & MMC_DATA_READ)
@@ -308,7 +284,8 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
308 else 284 else
309 sdhci_write_block_pio(host); 285 sdhci_write_block_pio(host);
310 286
311 if (host->num_sg == 0) 287 host->blocks--;
288 if (host->blocks == 0)
312 break; 289 break;
313 } 290 }
314 291
@@ -389,6 +366,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
389 if (offset) { 366 if (offset) {
390 if (data->flags & MMC_DATA_WRITE) { 367 if (data->flags & MMC_DATA_WRITE) {
391 buffer = sdhci_kmap_atomic(sg, &flags); 368 buffer = sdhci_kmap_atomic(sg, &flags);
369 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
392 memcpy(align, buffer, offset); 370 memcpy(align, buffer, offset);
393 sdhci_kunmap_atomic(buffer, &flags); 371 sdhci_kunmap_atomic(buffer, &flags);
394 } 372 }
@@ -510,6 +488,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
510 size = 4 - (sg_dma_address(sg) & 0x3); 488 size = 4 - (sg_dma_address(sg) & 0x3);
511 489
512 buffer = sdhci_kmap_atomic(sg, &flags); 490 buffer = sdhci_kmap_atomic(sg, &flags);
491 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
513 memcpy(buffer, align, size); 492 memcpy(buffer, align, size);
514 sdhci_kunmap_atomic(buffer, &flags); 493 sdhci_kunmap_atomic(buffer, &flags);
515 494
@@ -687,7 +666,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
687 WARN_ON(1); 666 WARN_ON(1);
688 host->flags &= ~SDHCI_USE_DMA; 667 host->flags &= ~SDHCI_USE_DMA;
689 } else { 668 } else {
690 WARN_ON(count != 1); 669 WARN_ON(sg_cnt != 1);
691 writel(sg_dma_address(data->sg), 670 writel(sg_dma_address(data->sg),
692 host->ioaddr + SDHCI_DMA_ADDRESS); 671 host->ioaddr + SDHCI_DMA_ADDRESS);
693 } 672 }
@@ -711,11 +690,9 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
711 } 690 }
712 691
713 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 692 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
714 host->cur_sg = data->sg; 693 sg_miter_start(&host->sg_miter,
715 host->num_sg = data->sg_len; 694 data->sg, data->sg_len, SG_MITER_ATOMIC);
716 695 host->blocks = data->blocks;
717 host->offset = 0;
718 host->remain = host->cur_sg->length;
719 } 696 }
720 697
721 /* We do not handle DMA boundaries, so set it to max (512 KiB) */ 698 /* We do not handle DMA boundaries, so set it to max (512 KiB) */
@@ -1581,9 +1558,15 @@ int sdhci_add_host(struct sdhci_host *host)
1581 } 1558 }
1582 } 1559 }
1583 1560
1584 /* XXX: Hack to get MMC layer to avoid highmem */ 1561 /*
1585 if (!(host->flags & SDHCI_USE_DMA)) 1562 * If we use DMA, then it's up to the caller to set the DMA
1586 mmc_dev(host->mmc)->dma_mask = NULL; 1563 * mask, but PIO does not need the hw shim so we set a new
1564 * mask here in that case.
1565 */
1566 if (!(host->flags & SDHCI_USE_DMA)) {
1567 host->dma_mask = DMA_BIT_MASK(64);
1568 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
1569 }
1587 1570
1588 host->max_clk = 1571 host->max_clk =
1589 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 1572 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 5bb355281765..a06bf8b89343 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -212,6 +212,7 @@ struct sdhci_host {
212 212
213 /* Internal data */ 213 /* Internal data */
214 struct mmc_host *mmc; /* MMC structure */ 214 struct mmc_host *mmc; /* MMC structure */
215 u64 dma_mask; /* custom DMA mask */
215 216
216#ifdef CONFIG_LEDS_CLASS 217#ifdef CONFIG_LEDS_CLASS
217 struct led_classdev led; /* LED control */ 218 struct led_classdev led; /* LED control */
@@ -238,10 +239,8 @@ struct sdhci_host {
238 struct mmc_data *data; /* Current data request */ 239 struct mmc_data *data; /* Current data request */
239 unsigned int data_early:1; /* Data finished before cmd */ 240 unsigned int data_early:1; /* Data finished before cmd */
240 241
241 struct scatterlist *cur_sg; /* We're working on this */ 242 struct sg_mapping_iter sg_miter; /* SG state for PIO */
242 int num_sg; /* Entries left */ 243 unsigned int blocks; /* remaining PIO blocks */
243 int offset; /* Offset into current sg */
244 int remain; /* Bytes left in current */
245 244
246 int sg_count; /* Mapped sg entries */ 245 int sg_count; /* Mapped sg entries */
247 246
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index c42f4b83f686..3fcf92130aa4 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/mm.h>
18#include <linux/major.h> 19#include <linux/major.h>
19#include <linux/mtd/mtd.h> 20#include <linux/mtd/mtd.h>
20#include <linux/mtd/map.h> 21#include <linux/mtd/map.h>
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index cb663ef245d5..fc8529bedfdf 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -20,9 +20,11 @@
20 20
21#include <linux/mtd/nand.h> 21#include <linux/mtd/nand.h>
22#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
23#include <linux/gpio.h>
23 24
24#include <asm/io.h> 25#include <asm/io.h>
25#include <asm/irq.h> 26#include <asm/irq.h>
27#include <asm/mach-types.h>
26 28
27#include <asm/arch/hardware.h> 29#include <asm/arch/hardware.h>
28#include <asm/arch/pxa-regs.h> 30#include <asm/arch/pxa-regs.h>
@@ -30,20 +32,6 @@
30#define GPIO_NAND_CS (11) 32#define GPIO_NAND_CS (11)
31#define GPIO_NAND_RB (89) 33#define GPIO_NAND_RB (89)
32 34
33/* This macro needed to ensure in-order operation of GPIO and local
34 * bus. Without both asm command and dummy uncached read there're
35 * states when NAND access is broken. I've looked for such macro(s) in
36 * include/asm-arm but found nothing approptiate.
37 * dmac_clean_range is close, but is makes cache invalidation
38 * unnecessary here and it cannot be used in module
39 */
40#define DRAIN_WB() \
41 do { \
42 unsigned char dummy; \
43 asm volatile ("mcr p15, 0, r0, c7, c10, 4":::"r0"); \
44 dummy=*((unsigned char*)UNCACHED_ADDR); \
45 } while(0)
46
47/* MTD structure for CM-X270 board */ 35/* MTD structure for CM-X270 board */
48static struct mtd_info *cmx270_nand_mtd; 36static struct mtd_info *cmx270_nand_mtd;
49 37
@@ -103,14 +91,14 @@ static int cmx270_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
103 91
104static inline void nand_cs_on(void) 92static inline void nand_cs_on(void)
105{ 93{
106 GPCR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); 94 gpio_set_value(GPIO_NAND_CS, 0);
107} 95}
108 96
109static void nand_cs_off(void) 97static void nand_cs_off(void)
110{ 98{
111 DRAIN_WB(); 99 dsb();
112 100
113 GPSR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); 101 gpio_set_value(GPIO_NAND_CS, 1);
114} 102}
115 103
116/* 104/*
@@ -122,7 +110,7 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
122 struct nand_chip* this = mtd->priv; 110 struct nand_chip* this = mtd->priv;
123 unsigned int nandaddr = (unsigned int)this->IO_ADDR_W; 111 unsigned int nandaddr = (unsigned int)this->IO_ADDR_W;
124 112
125 DRAIN_WB(); 113 dsb();
126 114
127 if (ctrl & NAND_CTRL_CHANGE) { 115 if (ctrl & NAND_CTRL_CHANGE) {
128 if ( ctrl & NAND_ALE ) 116 if ( ctrl & NAND_ALE )
@@ -139,12 +127,12 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
139 nand_cs_off(); 127 nand_cs_off();
140 } 128 }
141 129
142 DRAIN_WB(); 130 dsb();
143 this->IO_ADDR_W = (void __iomem*)nandaddr; 131 this->IO_ADDR_W = (void __iomem*)nandaddr;
144 if (dat != NAND_CMD_NONE) 132 if (dat != NAND_CMD_NONE)
145 writel((dat << 16), this->IO_ADDR_W); 133 writel((dat << 16), this->IO_ADDR_W);
146 134
147 DRAIN_WB(); 135 dsb();
148} 136}
149 137
150/* 138/*
@@ -152,9 +140,9 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
152 */ 140 */
153static int cmx270_device_ready(struct mtd_info *mtd) 141static int cmx270_device_ready(struct mtd_info *mtd)
154{ 142{
155 DRAIN_WB(); 143 dsb();
156 144
157 return (GPLR(GPIO_NAND_RB) & GPIO_bit(GPIO_NAND_RB)); 145 return (gpio_get_value(GPIO_NAND_RB));
158} 146}
159 147
160/* 148/*
@@ -168,20 +156,40 @@ static int cmx270_init(void)
168 int mtd_parts_nb = 0; 156 int mtd_parts_nb = 0;
169 int ret; 157 int ret;
170 158
159 if (!machine_is_armcore())
160 return -ENODEV;
161
162 ret = gpio_request(GPIO_NAND_CS, "NAND CS");
163 if (ret) {
164 pr_warning("CM-X270: failed to request NAND CS gpio\n");
165 return ret;
166 }
167
168 gpio_direction_output(GPIO_NAND_CS, 1);
169
170 ret = gpio_request(GPIO_NAND_RB, "NAND R/B");
171 if (ret) {
172 pr_warning("CM-X270: failed to request NAND R/B gpio\n");
173 goto err_gpio_request;
174 }
175
176 gpio_direction_input(GPIO_NAND_RB);
177
171 /* Allocate memory for MTD device structure and private data */ 178 /* Allocate memory for MTD device structure and private data */
172 cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) + 179 cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) +
173 sizeof(struct nand_chip), 180 sizeof(struct nand_chip),
174 GFP_KERNEL); 181 GFP_KERNEL);
175 if (!cmx270_nand_mtd) { 182 if (!cmx270_nand_mtd) {
176 printk("Unable to allocate CM-X270 NAND MTD device structure.\n"); 183 pr_debug("Unable to allocate CM-X270 NAND MTD device structure.\n");
177 return -ENOMEM; 184 ret = -ENOMEM;
185 goto err_kzalloc;
178 } 186 }
179 187
180 cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12); 188 cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12);
181 if (!cmx270_nand_io) { 189 if (!cmx270_nand_io) {
182 printk("Unable to ioremap NAND device\n"); 190 pr_debug("Unable to ioremap NAND device\n");
183 ret = -EINVAL; 191 ret = -EINVAL;
184 goto err1; 192 goto err_ioremap;
185 } 193 }
186 194
187 /* Get pointer to private data */ 195 /* Get pointer to private data */
@@ -209,9 +217,9 @@ static int cmx270_init(void)
209 217
210 /* Scan to find existence of the device */ 218 /* Scan to find existence of the device */
211 if (nand_scan (cmx270_nand_mtd, 1)) { 219 if (nand_scan (cmx270_nand_mtd, 1)) {
212 printk(KERN_NOTICE "No NAND device\n"); 220 pr_notice("No NAND device\n");
213 ret = -ENXIO; 221 ret = -ENXIO;
214 goto err2; 222 goto err_scan;
215 } 223 }
216 224
217#ifdef CONFIG_MTD_CMDLINE_PARTS 225#ifdef CONFIG_MTD_CMDLINE_PARTS
@@ -229,18 +237,22 @@ static int cmx270_init(void)
229 } 237 }
230 238
231 /* Register the partitions */ 239 /* Register the partitions */
232 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 240 pr_notice("Using %s partition definition\n", part_type);
233 ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb); 241 ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
234 if (ret) 242 if (ret)
235 goto err2; 243 goto err_scan;
236 244
237 /* Return happy */ 245 /* Return happy */
238 return 0; 246 return 0;
239 247
240err2: 248err_scan:
241 iounmap(cmx270_nand_io); 249 iounmap(cmx270_nand_io);
242err1: 250err_ioremap:
243 kfree(cmx270_nand_mtd); 251 kfree(cmx270_nand_mtd);
252err_kzalloc:
253 gpio_free(GPIO_NAND_RB);
254err_gpio_request:
255 gpio_free(GPIO_NAND_CS);
244 256
245 return ret; 257 return ret;
246 258
@@ -255,6 +267,9 @@ static void cmx270_cleanup(void)
255 /* Release resources, unregister device */ 267 /* Release resources, unregister device */
256 nand_release(cmx270_nand_mtd); 268 nand_release(cmx270_nand_mtd);
257 269
270 gpio_free(GPIO_NAND_RB);
271 gpio_free(GPIO_NAND_CS);
272
258 iounmap(cmx270_nand_io); 273 iounmap(cmx270_nand_io);
259 274
260 /* Free the MTD device structure */ 275 /* Free the MTD device structure */
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 961416ac0616..c7630a228310 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -51,14 +51,13 @@
51 * @name: MTD device name or number string 51 * @name: MTD device name or number string
52 * @vid_hdr_offs: VID header offset 52 * @vid_hdr_offs: VID header offset
53 */ 53 */
54struct mtd_dev_param 54struct mtd_dev_param {
55{
56 char name[MTD_PARAM_LEN_MAX]; 55 char name[MTD_PARAM_LEN_MAX];
57 int vid_hdr_offs; 56 int vid_hdr_offs;
58}; 57};
59 58
60/* Numbers of elements set in the @mtd_dev_param array */ 59/* Numbers of elements set in the @mtd_dev_param array */
61static int mtd_devs = 0; 60static int mtd_devs;
62 61
63/* MTD devices specification parameters */ 62/* MTD devices specification parameters */
64static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; 63static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
@@ -160,8 +159,7 @@ void ubi_put_device(struct ubi_device *ubi)
160} 159}
161 160
162/** 161/**
163 * ubi_get_by_major - get UBI device description object by character device 162 * ubi_get_by_major - get UBI device by character device major number.
164 * major number.
165 * @major: major number 163 * @major: major number
166 * 164 *
167 * This function is similar to 'ubi_get_device()', but it searches the device 165 * This function is similar to 'ubi_get_device()', but it searches the device
@@ -355,15 +353,34 @@ static void kill_volumes(struct ubi_device *ubi)
355} 353}
356 354
357/** 355/**
356 * free_user_volumes - free all user volumes.
357 * @ubi: UBI device description object
358 *
359 * Normally the volumes are freed at the release function of the volume device
360 * objects. However, on error paths the volumes have to be freed before the
361 * device objects have been initialized.
362 */
363static void free_user_volumes(struct ubi_device *ubi)
364{
365 int i;
366
367 for (i = 0; i < ubi->vtbl_slots; i++)
368 if (ubi->volumes[i]) {
369 kfree(ubi->volumes[i]->eba_tbl);
370 kfree(ubi->volumes[i]);
371 }
372}
373
374/**
358 * uif_init - initialize user interfaces for an UBI device. 375 * uif_init - initialize user interfaces for an UBI device.
359 * @ubi: UBI device description object 376 * @ubi: UBI device description object
360 * 377 *
361 * This function returns zero in case of success and a negative error code in 378 * This function returns zero in case of success and a negative error code in
362 * case of failure. 379 * case of failure. Note, this function destroys all volumes if it failes.
363 */ 380 */
364static int uif_init(struct ubi_device *ubi) 381static int uif_init(struct ubi_device *ubi)
365{ 382{
366 int i, err; 383 int i, err, do_free = 0;
367 dev_t dev; 384 dev_t dev;
368 385
369 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); 386 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
@@ -384,7 +401,7 @@ static int uif_init(struct ubi_device *ubi)
384 401
385 ubi_assert(MINOR(dev) == 0); 402 ubi_assert(MINOR(dev) == 0);
386 cdev_init(&ubi->cdev, &ubi_cdev_operations); 403 cdev_init(&ubi->cdev, &ubi_cdev_operations);
387 dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev)); 404 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
388 ubi->cdev.owner = THIS_MODULE; 405 ubi->cdev.owner = THIS_MODULE;
389 406
390 err = cdev_add(&ubi->cdev, dev, 1); 407 err = cdev_add(&ubi->cdev, dev, 1);
@@ -410,10 +427,13 @@ static int uif_init(struct ubi_device *ubi)
410 427
411out_volumes: 428out_volumes:
412 kill_volumes(ubi); 429 kill_volumes(ubi);
430 do_free = 0;
413out_sysfs: 431out_sysfs:
414 ubi_sysfs_close(ubi); 432 ubi_sysfs_close(ubi);
415 cdev_del(&ubi->cdev); 433 cdev_del(&ubi->cdev);
416out_unreg: 434out_unreg:
435 if (do_free)
436 free_user_volumes(ubi);
417 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 437 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
418 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); 438 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
419 return err; 439 return err;
@@ -422,6 +442,10 @@ out_unreg:
422/** 442/**
423 * uif_close - close user interfaces for an UBI device. 443 * uif_close - close user interfaces for an UBI device.
424 * @ubi: UBI device description object 444 * @ubi: UBI device description object
445 *
446 * Note, since this function un-registers UBI volume device objects (@vol->dev),
447 * the memory allocated voe the volumes is freed as well (in the release
448 * function).
425 */ 449 */
426static void uif_close(struct ubi_device *ubi) 450static void uif_close(struct ubi_device *ubi)
427{ 451{
@@ -432,6 +456,21 @@ static void uif_close(struct ubi_device *ubi)
432} 456}
433 457
434/** 458/**
459 * free_internal_volumes - free internal volumes.
460 * @ubi: UBI device description object
461 */
462static void free_internal_volumes(struct ubi_device *ubi)
463{
464 int i;
465
466 for (i = ubi->vtbl_slots;
467 i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
468 kfree(ubi->volumes[i]->eba_tbl);
469 kfree(ubi->volumes[i]);
470 }
471}
472
473/**
435 * attach_by_scanning - attach an MTD device using scanning method. 474 * attach_by_scanning - attach an MTD device using scanning method.
436 * @ubi: UBI device descriptor 475 * @ubi: UBI device descriptor
437 * 476 *
@@ -475,6 +514,7 @@ static int attach_by_scanning(struct ubi_device *ubi)
475out_wl: 514out_wl:
476 ubi_wl_close(ubi); 515 ubi_wl_close(ubi);
477out_vtbl: 516out_vtbl:
517 free_internal_volumes(ubi);
478 vfree(ubi->vtbl); 518 vfree(ubi->vtbl);
479out_si: 519out_si:
480 ubi_scan_destroy_si(si); 520 ubi_scan_destroy_si(si);
@@ -482,7 +522,7 @@ out_si:
482} 522}
483 523
484/** 524/**
485 * io_init - initialize I/O unit for a given UBI device. 525 * io_init - initialize I/O sub-system for a given UBI device.
486 * @ubi: UBI device description object 526 * @ubi: UBI device description object
487 * 527 *
488 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are 528 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
@@ -530,7 +570,11 @@ static int io_init(struct ubi_device *ubi)
530 ubi->min_io_size = ubi->mtd->writesize; 570 ubi->min_io_size = ubi->mtd->writesize;
531 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; 571 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
532 572
533 /* Make sure minimal I/O unit is power of 2 */ 573 /*
574 * Make sure minimal I/O unit is power of 2. Note, there is no
575 * fundamental reason for this assumption. It is just an optimization
576 * which allows us to avoid costly division operations.
577 */
534 if (!is_power_of_2(ubi->min_io_size)) { 578 if (!is_power_of_2(ubi->min_io_size)) {
535 ubi_err("min. I/O unit (%d) is not power of 2", 579 ubi_err("min. I/O unit (%d) is not power of 2",
536 ubi->min_io_size); 580 ubi->min_io_size);
@@ -581,7 +625,7 @@ static int io_init(struct ubi_device *ubi)
581 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || 625 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
582 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || 626 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
583 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || 627 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
584 ubi->leb_start % ubi->min_io_size) { 628 ubi->leb_start & (ubi->min_io_size - 1)) {
585 ubi_err("bad VID header (%d) or data offsets (%d)", 629 ubi_err("bad VID header (%d) or data offsets (%d)",
586 ubi->vid_hdr_offset, ubi->leb_start); 630 ubi->vid_hdr_offset, ubi->leb_start);
587 return -EINVAL; 631 return -EINVAL;
@@ -646,7 +690,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
646 690
647 /* 691 /*
648 * Clear the auto-resize flag in the volume in-memory copy of the 692 * Clear the auto-resize flag in the volume in-memory copy of the
649 * volume table, and 'ubi_resize_volume()' will propogate this change 693 * volume table, and 'ubi_resize_volume()' will propagate this change
650 * to the flash. 694 * to the flash.
651 */ 695 */
652 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; 696 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
@@ -655,7 +699,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
655 struct ubi_vtbl_record vtbl_rec; 699 struct ubi_vtbl_record vtbl_rec;
656 700
657 /* 701 /*
658 * No avalilable PEBs to re-size the volume, clear the flag on 702 * No available PEBs to re-size the volume, clear the flag on
659 * flash and exit. 703 * flash and exit.
660 */ 704 */
661 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], 705 memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
@@ -682,13 +726,13 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
682 726
683/** 727/**
684 * ubi_attach_mtd_dev - attach an MTD device. 728 * ubi_attach_mtd_dev - attach an MTD device.
685 * @mtd_dev: MTD device description object 729 * @mtd: MTD device description object
686 * @ubi_num: number to assign to the new UBI device 730 * @ubi_num: number to assign to the new UBI device
687 * @vid_hdr_offset: VID header offset 731 * @vid_hdr_offset: VID header offset
688 * 732 *
689 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number 733 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
690 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in 734 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
691 * which case this function finds a vacant device nubert and assings it 735 * which case this function finds a vacant device number and assigns it
692 * automatically. Returns the new UBI device number in case of success and a 736 * automatically. Returns the new UBI device number in case of success and a
693 * negative error code in case of failure. 737 * negative error code in case of failure.
694 * 738 *
@@ -698,7 +742,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
698int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) 742int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
699{ 743{
700 struct ubi_device *ubi; 744 struct ubi_device *ubi;
701 int i, err; 745 int i, err, do_free = 1;
702 746
703 /* 747 /*
704 * Check if we already have the same MTD device attached. 748 * Check if we already have the same MTD device attached.
@@ -735,7 +779,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
735 if (!ubi_devices[ubi_num]) 779 if (!ubi_devices[ubi_num])
736 break; 780 break;
737 if (ubi_num == UBI_MAX_DEVICES) { 781 if (ubi_num == UBI_MAX_DEVICES) {
738 dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES); 782 dbg_err("only %d UBI devices may be created",
783 UBI_MAX_DEVICES);
739 return -ENFILE; 784 return -ENFILE;
740 } 785 }
741 } else { 786 } else {
@@ -760,6 +805,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
760 805
761 mutex_init(&ubi->buf_mutex); 806 mutex_init(&ubi->buf_mutex);
762 mutex_init(&ubi->ckvol_mutex); 807 mutex_init(&ubi->ckvol_mutex);
808 mutex_init(&ubi->mult_mutex);
763 mutex_init(&ubi->volumes_mutex); 809 mutex_init(&ubi->volumes_mutex);
764 spin_lock_init(&ubi->volumes_lock); 810 spin_lock_init(&ubi->volumes_lock);
765 811
@@ -798,7 +844,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
798 844
799 err = uif_init(ubi); 845 err = uif_init(ubi);
800 if (err) 846 if (err)
801 goto out_detach; 847 goto out_nofree;
802 848
803 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); 849 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
804 if (IS_ERR(ubi->bgt_thread)) { 850 if (IS_ERR(ubi->bgt_thread)) {
@@ -824,20 +870,22 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
824 ubi->beb_rsvd_pebs); 870 ubi->beb_rsvd_pebs);
825 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); 871 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
826 872
827 /* Enable the background thread */ 873 if (!DBG_DISABLE_BGT)
828 if (!DBG_DISABLE_BGT) {
829 ubi->thread_enabled = 1; 874 ubi->thread_enabled = 1;
830 wake_up_process(ubi->bgt_thread); 875 wake_up_process(ubi->bgt_thread);
831 }
832 876
833 ubi_devices[ubi_num] = ubi; 877 ubi_devices[ubi_num] = ubi;
834 return ubi_num; 878 return ubi_num;
835 879
836out_uif: 880out_uif:
837 uif_close(ubi); 881 uif_close(ubi);
882out_nofree:
883 do_free = 0;
838out_detach: 884out_detach:
839 ubi_eba_close(ubi);
840 ubi_wl_close(ubi); 885 ubi_wl_close(ubi);
886 if (do_free)
887 free_user_volumes(ubi);
888 free_internal_volumes(ubi);
841 vfree(ubi->vtbl); 889 vfree(ubi->vtbl);
842out_free: 890out_free:
843 vfree(ubi->peb_buf1); 891 vfree(ubi->peb_buf1);
@@ -899,8 +947,8 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
899 kthread_stop(ubi->bgt_thread); 947 kthread_stop(ubi->bgt_thread);
900 948
901 uif_close(ubi); 949 uif_close(ubi);
902 ubi_eba_close(ubi);
903 ubi_wl_close(ubi); 950 ubi_wl_close(ubi);
951 free_internal_volumes(ubi);
904 vfree(ubi->vtbl); 952 vfree(ubi->vtbl);
905 put_mtd_device(ubi->mtd); 953 put_mtd_device(ubi->mtd);
906 vfree(ubi->peb_buf1); 954 vfree(ubi->peb_buf1);
@@ -1044,8 +1092,7 @@ static void __exit ubi_exit(void)
1044module_exit(ubi_exit); 1092module_exit(ubi_exit);
1045 1093
1046/** 1094/**
1047 * bytes_str_to_int - convert a string representing number of bytes to an 1095 * bytes_str_to_int - convert a number of bytes string into an integer.
1048 * integer.
1049 * @str: the string to convert 1096 * @str: the string to convert
1050 * 1097 *
1051 * This function returns positive resulting integer in case of success and a 1098 * This function returns positive resulting integer in case of success and a
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 89193ba9451e..03c759b4eeb5 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -39,9 +39,9 @@
39#include <linux/stat.h> 39#include <linux/stat.h>
40#include <linux/ioctl.h> 40#include <linux/ioctl.h>
41#include <linux/capability.h> 41#include <linux/capability.h>
42#include <linux/uaccess.h>
42#include <linux/smp_lock.h> 43#include <linux/smp_lock.h>
43#include <mtd/ubi-user.h> 44#include <mtd/ubi-user.h>
44#include <asm/uaccess.h>
45#include <asm/div64.h> 45#include <asm/div64.h>
46#include "ubi.h" 46#include "ubi.h"
47 47
@@ -116,7 +116,7 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
116 else 116 else
117 mode = UBI_READONLY; 117 mode = UBI_READONLY;
118 118
119 dbg_msg("open volume %d, mode %d", vol_id, mode); 119 dbg_gen("open volume %d, mode %d", vol_id, mode);
120 120
121 desc = ubi_open_volume(ubi_num, vol_id, mode); 121 desc = ubi_open_volume(ubi_num, vol_id, mode);
122 unlock_kernel(); 122 unlock_kernel();
@@ -132,7 +132,7 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
132 struct ubi_volume_desc *desc = file->private_data; 132 struct ubi_volume_desc *desc = file->private_data;
133 struct ubi_volume *vol = desc->vol; 133 struct ubi_volume *vol = desc->vol;
134 134
135 dbg_msg("release volume %d, mode %d", vol->vol_id, desc->mode); 135 dbg_gen("release volume %d, mode %d", vol->vol_id, desc->mode);
136 136
137 if (vol->updating) { 137 if (vol->updating) {
138 ubi_warn("update of volume %d not finished, volume is damaged", 138 ubi_warn("update of volume %d not finished, volume is damaged",
@@ -141,7 +141,7 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
141 vol->updating = 0; 141 vol->updating = 0;
142 vfree(vol->upd_buf); 142 vfree(vol->upd_buf);
143 } else if (vol->changing_leb) { 143 } else if (vol->changing_leb) {
144 dbg_msg("only %lld of %lld bytes received for atomic LEB change" 144 dbg_gen("only %lld of %lld bytes received for atomic LEB change"
145 " for volume %d:%d, cancel", vol->upd_received, 145 " for volume %d:%d, cancel", vol->upd_received,
146 vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id); 146 vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
147 vol->changing_leb = 0; 147 vol->changing_leb = 0;
@@ -183,7 +183,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
183 return -EINVAL; 183 return -EINVAL;
184 } 184 }
185 185
186 dbg_msg("seek volume %d, offset %lld, origin %d, new offset %lld", 186 dbg_gen("seek volume %d, offset %lld, origin %d, new offset %lld",
187 vol->vol_id, offset, origin, new_offset); 187 vol->vol_id, offset, origin, new_offset);
188 188
189 file->f_pos = new_offset; 189 file->f_pos = new_offset;
@@ -201,7 +201,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
201 void *tbuf; 201 void *tbuf;
202 uint64_t tmp; 202 uint64_t tmp;
203 203
204 dbg_msg("read %zd bytes from offset %lld of volume %d", 204 dbg_gen("read %zd bytes from offset %lld of volume %d",
205 count, *offp, vol->vol_id); 205 count, *offp, vol->vol_id);
206 206
207 if (vol->updating) { 207 if (vol->updating) {
@@ -216,7 +216,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
216 return 0; 216 return 0;
217 217
218 if (vol->corrupted) 218 if (vol->corrupted)
219 dbg_msg("read from corrupted volume %d", vol->vol_id); 219 dbg_gen("read from corrupted volume %d", vol->vol_id);
220 220
221 if (*offp + count > vol->used_bytes) 221 if (*offp + count > vol->used_bytes)
222 count_save = count = vol->used_bytes - *offp; 222 count_save = count = vol->used_bytes - *offp;
@@ -285,7 +285,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
285 char *tbuf; 285 char *tbuf;
286 uint64_t tmp; 286 uint64_t tmp;
287 287
288 dbg_msg("requested: write %zd bytes to offset %lld of volume %u", 288 dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
289 count, *offp, vol->vol_id); 289 count, *offp, vol->vol_id);
290 290
291 if (vol->vol_type == UBI_STATIC_VOLUME) 291 if (vol->vol_type == UBI_STATIC_VOLUME)
@@ -295,7 +295,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
295 off = do_div(tmp, vol->usable_leb_size); 295 off = do_div(tmp, vol->usable_leb_size);
296 lnum = tmp; 296 lnum = tmp;
297 297
298 if (off % ubi->min_io_size) { 298 if (off & (ubi->min_io_size - 1)) {
299 dbg_err("unaligned position"); 299 dbg_err("unaligned position");
300 return -EINVAL; 300 return -EINVAL;
301 } 301 }
@@ -304,7 +304,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
304 count_save = count = vol->used_bytes - *offp; 304 count_save = count = vol->used_bytes - *offp;
305 305
306 /* We can write only in fractions of the minimum I/O unit */ 306 /* We can write only in fractions of the minimum I/O unit */
307 if (count % ubi->min_io_size) { 307 if (count & (ubi->min_io_size - 1)) {
308 dbg_err("unaligned write length"); 308 dbg_err("unaligned write length");
309 return -EINVAL; 309 return -EINVAL;
310 } 310 }
@@ -352,7 +352,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
352} 352}
353 353
354#else 354#else
355#define vol_cdev_direct_write(file, buf, count, offp) -EPERM 355#define vol_cdev_direct_write(file, buf, count, offp) (-EPERM)
356#endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */ 356#endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */
357 357
358static ssize_t vol_cdev_write(struct file *file, const char __user *buf, 358static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
@@ -437,7 +437,8 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
437 break; 437 break;
438 } 438 }
439 439
440 rsvd_bytes = vol->reserved_pebs * (ubi->leb_size-vol->data_pad); 440 rsvd_bytes = (long long)vol->reserved_pebs *
441 ubi->leb_size-vol->data_pad;
441 if (bytes < 0 || bytes > rsvd_bytes) { 442 if (bytes < 0 || bytes > rsvd_bytes) {
442 err = -EINVAL; 443 err = -EINVAL;
443 break; 444 break;
@@ -513,7 +514,7 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
513 break; 514 break;
514 } 515 }
515 516
516 dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); 517 dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
517 err = ubi_eba_unmap_leb(ubi, vol, lnum); 518 err = ubi_eba_unmap_leb(ubi, vol, lnum);
518 if (err) 519 if (err)
519 break; 520 break;
@@ -564,7 +565,7 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
564 if (req->alignment > ubi->leb_size) 565 if (req->alignment > ubi->leb_size)
565 goto bad; 566 goto bad;
566 567
567 n = req->alignment % ubi->min_io_size; 568 n = req->alignment & (ubi->min_io_size - 1);
568 if (req->alignment != 1 && n) 569 if (req->alignment != 1 && n)
569 goto bad; 570 goto bad;
570 571
@@ -573,6 +574,10 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
573 goto bad; 574 goto bad;
574 } 575 }
575 576
577 n = strnlen(req->name, req->name_len + 1);
578 if (n != req->name_len)
579 goto bad;
580
576 return 0; 581 return 0;
577 582
578bad: 583bad:
@@ -600,6 +605,166 @@ static int verify_rsvol_req(const struct ubi_device *ubi,
600 return 0; 605 return 0;
601} 606}
602 607
608/**
609 * rename_volumes - rename UBI volumes.
610 * @ubi: UBI device description object
611 * @req: volumes re-name request
612 *
613 * This is a helper function for the volume re-name IOCTL which validates the
614 * the request, opens the volume and calls corresponding volumes management
615 * function. Returns zero in case of success and a negative error code in case
616 * of failure.
617 */
618static int rename_volumes(struct ubi_device *ubi,
619 struct ubi_rnvol_req *req)
620{
621 int i, n, err;
622 struct list_head rename_list;
623 struct ubi_rename_entry *re, *re1;
624
625 if (req->count < 0 || req->count > UBI_MAX_RNVOL)
626 return -EINVAL;
627
628 if (req->count == 0)
629 return 0;
630
631 /* Validate volume IDs and names in the request */
632 for (i = 0; i < req->count; i++) {
633 if (req->ents[i].vol_id < 0 ||
634 req->ents[i].vol_id >= ubi->vtbl_slots)
635 return -EINVAL;
636 if (req->ents[i].name_len < 0)
637 return -EINVAL;
638 if (req->ents[i].name_len > UBI_VOL_NAME_MAX)
639 return -ENAMETOOLONG;
640 req->ents[i].name[req->ents[i].name_len] = '\0';
641 n = strlen(req->ents[i].name);
642 if (n != req->ents[i].name_len)
643 err = -EINVAL;
644 }
645
646 /* Make sure volume IDs and names are unique */
647 for (i = 0; i < req->count - 1; i++) {
648 for (n = i + 1; n < req->count; n++) {
649 if (req->ents[i].vol_id == req->ents[n].vol_id) {
650 dbg_err("duplicated volume id %d",
651 req->ents[i].vol_id);
652 return -EINVAL;
653 }
654 if (!strcmp(req->ents[i].name, req->ents[n].name)) {
655 dbg_err("duplicated volume name \"%s\"",
656 req->ents[i].name);
657 return -EINVAL;
658 }
659 }
660 }
661
662 /* Create the re-name list */
663 INIT_LIST_HEAD(&rename_list);
664 for (i = 0; i < req->count; i++) {
665 int vol_id = req->ents[i].vol_id;
666 int name_len = req->ents[i].name_len;
667 const char *name = req->ents[i].name;
668
669 re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
670 if (!re) {
671 err = -ENOMEM;
672 goto out_free;
673 }
674
675 re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
676 if (IS_ERR(re->desc)) {
677 err = PTR_ERR(re->desc);
678 dbg_err("cannot open volume %d, error %d", vol_id, err);
679 kfree(re);
680 goto out_free;
681 }
682
683 /* Skip this re-naming if the name does not really change */
684 if (re->desc->vol->name_len == name_len &&
685 !memcmp(re->desc->vol->name, name, name_len)) {
686 ubi_close_volume(re->desc);
687 kfree(re);
688 continue;
689 }
690
691 re->new_name_len = name_len;
692 memcpy(re->new_name, name, name_len);
693 list_add_tail(&re->list, &rename_list);
694 dbg_msg("will rename volume %d from \"%s\" to \"%s\"",
695 vol_id, re->desc->vol->name, name);
696 }
697
698 if (list_empty(&rename_list))
699 return 0;
700
701 /* Find out the volumes which have to be removed */
702 list_for_each_entry(re, &rename_list, list) {
703 struct ubi_volume_desc *desc;
704 int no_remove_needed = 0;
705
706 /*
707 * Volume @re->vol_id is going to be re-named to
708 * @re->new_name, while its current name is @name. If a volume
709 * with name @re->new_name currently exists, it has to be
710 * removed, unless it is also re-named in the request (@req).
711 */
712 list_for_each_entry(re1, &rename_list, list) {
713 if (re->new_name_len == re1->desc->vol->name_len &&
714 !memcmp(re->new_name, re1->desc->vol->name,
715 re1->desc->vol->name_len)) {
716 no_remove_needed = 1;
717 break;
718 }
719 }
720
721 if (no_remove_needed)
722 continue;
723
724 /*
725 * It seems we need to remove volume with name @re->new_name,
726 * if it exists.
727 */
728 desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, UBI_EXCLUSIVE);
729 if (IS_ERR(desc)) {
730 err = PTR_ERR(desc);
731 if (err == -ENODEV)
732 /* Re-naming into a non-existing volume name */
733 continue;
734
735 /* The volume exists but busy, or an error occurred */
736 dbg_err("cannot open volume \"%s\", error %d",
737 re->new_name, err);
738 goto out_free;
739 }
740
741 re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
742 if (!re) {
743 err = -ENOMEM;
744 ubi_close_volume(desc);
745 goto out_free;
746 }
747
748 re->remove = 1;
749 re->desc = desc;
750 list_add(&re->list, &rename_list);
751 dbg_msg("will remove volume %d, name \"%s\"",
752 re->desc->vol->vol_id, re->desc->vol->name);
753 }
754
755 mutex_lock(&ubi->volumes_mutex);
756 err = ubi_rename_volumes(ubi, &rename_list);
757 mutex_unlock(&ubi->volumes_mutex);
758
759out_free:
760 list_for_each_entry_safe(re, re1, &rename_list, list) {
761 ubi_close_volume(re->desc);
762 list_del(&re->list);
763 kfree(re);
764 }
765 return err;
766}
767
603static int ubi_cdev_ioctl(struct inode *inode, struct file *file, 768static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
604 unsigned int cmd, unsigned long arg) 769 unsigned int cmd, unsigned long arg)
605{ 770{
@@ -621,19 +786,18 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
621 { 786 {
622 struct ubi_mkvol_req req; 787 struct ubi_mkvol_req req;
623 788
624 dbg_msg("create volume"); 789 dbg_gen("create volume");
625 err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req)); 790 err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
626 if (err) { 791 if (err) {
627 err = -EFAULT; 792 err = -EFAULT;
628 break; 793 break;
629 } 794 }
630 795
796 req.name[req.name_len] = '\0';
631 err = verify_mkvol_req(ubi, &req); 797 err = verify_mkvol_req(ubi, &req);
632 if (err) 798 if (err)
633 break; 799 break;
634 800
635 req.name[req.name_len] = '\0';
636
637 mutex_lock(&ubi->volumes_mutex); 801 mutex_lock(&ubi->volumes_mutex);
638 err = ubi_create_volume(ubi, &req); 802 err = ubi_create_volume(ubi, &req);
639 mutex_unlock(&ubi->volumes_mutex); 803 mutex_unlock(&ubi->volumes_mutex);
@@ -652,7 +816,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
652 { 816 {
653 int vol_id; 817 int vol_id;
654 818
655 dbg_msg("remove volume"); 819 dbg_gen("remove volume");
656 err = get_user(vol_id, (__user int32_t *)argp); 820 err = get_user(vol_id, (__user int32_t *)argp);
657 if (err) { 821 if (err) {
658 err = -EFAULT; 822 err = -EFAULT;
@@ -666,7 +830,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
666 } 830 }
667 831
668 mutex_lock(&ubi->volumes_mutex); 832 mutex_lock(&ubi->volumes_mutex);
669 err = ubi_remove_volume(desc); 833 err = ubi_remove_volume(desc, 0);
670 mutex_unlock(&ubi->volumes_mutex); 834 mutex_unlock(&ubi->volumes_mutex);
671 835
672 /* 836 /*
@@ -685,7 +849,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
685 uint64_t tmp; 849 uint64_t tmp;
686 struct ubi_rsvol_req req; 850 struct ubi_rsvol_req req;
687 851
688 dbg_msg("re-size volume"); 852 dbg_gen("re-size volume");
689 err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req)); 853 err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
690 if (err) { 854 if (err) {
691 err = -EFAULT; 855 err = -EFAULT;
@@ -713,6 +877,32 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
713 break; 877 break;
714 } 878 }
715 879
880 /* Re-name volumes command */
881 case UBI_IOCRNVOL:
882 {
883 struct ubi_rnvol_req *req;
884
885 dbg_msg("re-name volumes");
886 req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
887 if (!req) {
888 err = -ENOMEM;
889 break;
890 };
891
892 err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req));
893 if (err) {
894 err = -EFAULT;
895 kfree(req);
896 break;
897 }
898
899 mutex_lock(&ubi->mult_mutex);
900 err = rename_volumes(ubi, req);
901 mutex_unlock(&ubi->mult_mutex);
902 kfree(req);
903 break;
904 }
905
716 default: 906 default:
717 err = -ENOTTY; 907 err = -ENOTTY;
718 break; 908 break;
@@ -738,7 +928,7 @@ static int ctrl_cdev_ioctl(struct inode *inode, struct file *file,
738 struct ubi_attach_req req; 928 struct ubi_attach_req req;
739 struct mtd_info *mtd; 929 struct mtd_info *mtd;
740 930
741 dbg_msg("attach MTD device"); 931 dbg_gen("attach MTD device");
742 err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req)); 932 err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
743 if (err) { 933 if (err) {
744 err = -EFAULT; 934 err = -EFAULT;
@@ -778,7 +968,7 @@ static int ctrl_cdev_ioctl(struct inode *inode, struct file *file,
778 { 968 {
779 int ubi_num; 969 int ubi_num;
780 970
781 dbg_msg("dettach MTD device"); 971 dbg_gen("dettach MTD device");
782 err = get_user(ubi_num, (__user int32_t *)argp); 972 err = get_user(ubi_num, (__user int32_t *)argp);
783 if (err) { 973 if (err) {
784 err = -EFAULT; 974 err = -EFAULT;
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 56956ec2845f..c0ed60e8ade9 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -24,7 +24,7 @@
24 * changes. 24 * changes.
25 */ 25 */
26 26
27#ifdef CONFIG_MTD_UBI_DEBUG_MSG 27#ifdef CONFIG_MTD_UBI_DEBUG
28 28
29#include "ubi.h" 29#include "ubi.h"
30 30
@@ -34,14 +34,19 @@
34 */ 34 */
35void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) 35void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
36{ 36{
37 dbg_msg("erase counter header dump:"); 37 printk(KERN_DEBUG "Erase counter header dump:\n");
38 dbg_msg("magic %#08x", be32_to_cpu(ec_hdr->magic)); 38 printk(KERN_DEBUG "\tmagic %#08x\n",
39 dbg_msg("version %d", (int)ec_hdr->version); 39 be32_to_cpu(ec_hdr->magic));
40 dbg_msg("ec %llu", (long long)be64_to_cpu(ec_hdr->ec)); 40 printk(KERN_DEBUG "\tversion %d\n", (int)ec_hdr->version);
41 dbg_msg("vid_hdr_offset %d", be32_to_cpu(ec_hdr->vid_hdr_offset)); 41 printk(KERN_DEBUG "\tec %llu\n",
42 dbg_msg("data_offset %d", be32_to_cpu(ec_hdr->data_offset)); 42 (long long)be64_to_cpu(ec_hdr->ec));
43 dbg_msg("hdr_crc %#08x", be32_to_cpu(ec_hdr->hdr_crc)); 43 printk(KERN_DEBUG "\tvid_hdr_offset %d\n",
44 dbg_msg("erase counter header hexdump:"); 44 be32_to_cpu(ec_hdr->vid_hdr_offset));
45 printk(KERN_DEBUG "\tdata_offset %d\n",
46 be32_to_cpu(ec_hdr->data_offset));
47 printk(KERN_DEBUG "\thdr_crc %#08x\n",
48 be32_to_cpu(ec_hdr->hdr_crc));
49 printk(KERN_DEBUG "erase counter header hexdump:\n");
45 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 50 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
46 ec_hdr, UBI_EC_HDR_SIZE, 1); 51 ec_hdr, UBI_EC_HDR_SIZE, 1);
47} 52}
@@ -52,22 +57,23 @@ void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
52 */ 57 */
53void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) 58void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
54{ 59{
55 dbg_msg("volume identifier header dump:"); 60 printk(KERN_DEBUG "Volume identifier header dump:\n");
56 dbg_msg("magic %08x", be32_to_cpu(vid_hdr->magic)); 61 printk(KERN_DEBUG "\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
57 dbg_msg("version %d", (int)vid_hdr->version); 62 printk(KERN_DEBUG "\tversion %d\n", (int)vid_hdr->version);
58 dbg_msg("vol_type %d", (int)vid_hdr->vol_type); 63 printk(KERN_DEBUG "\tvol_type %d\n", (int)vid_hdr->vol_type);
59 dbg_msg("copy_flag %d", (int)vid_hdr->copy_flag); 64 printk(KERN_DEBUG "\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
60 dbg_msg("compat %d", (int)vid_hdr->compat); 65 printk(KERN_DEBUG "\tcompat %d\n", (int)vid_hdr->compat);
61 dbg_msg("vol_id %d", be32_to_cpu(vid_hdr->vol_id)); 66 printk(KERN_DEBUG "\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
62 dbg_msg("lnum %d", be32_to_cpu(vid_hdr->lnum)); 67 printk(KERN_DEBUG "\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
63 dbg_msg("leb_ver %u", be32_to_cpu(vid_hdr->leb_ver)); 68 printk(KERN_DEBUG "\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
64 dbg_msg("data_size %d", be32_to_cpu(vid_hdr->data_size)); 69 printk(KERN_DEBUG "\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
65 dbg_msg("used_ebs %d", be32_to_cpu(vid_hdr->used_ebs)); 70 printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
66 dbg_msg("data_pad %d", be32_to_cpu(vid_hdr->data_pad)); 71 printk(KERN_DEBUG "\tsqnum %llu\n",
67 dbg_msg("sqnum %llu",
68 (unsigned long long)be64_to_cpu(vid_hdr->sqnum)); 72 (unsigned long long)be64_to_cpu(vid_hdr->sqnum));
69 dbg_msg("hdr_crc %08x", be32_to_cpu(vid_hdr->hdr_crc)); 73 printk(KERN_DEBUG "\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
70 dbg_msg("volume identifier header hexdump:"); 74 printk(KERN_DEBUG "Volume identifier header hexdump:\n");
75 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
76 vid_hdr, UBI_VID_HDR_SIZE, 1);
71} 77}
72 78
73/** 79/**
@@ -76,27 +82,27 @@ void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
76 */ 82 */
77void ubi_dbg_dump_vol_info(const struct ubi_volume *vol) 83void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
78{ 84{
79 dbg_msg("volume information dump:"); 85 printk(KERN_DEBUG "Volume information dump:\n");
80 dbg_msg("vol_id %d", vol->vol_id); 86 printk(KERN_DEBUG "\tvol_id %d\n", vol->vol_id);
81 dbg_msg("reserved_pebs %d", vol->reserved_pebs); 87 printk(KERN_DEBUG "\treserved_pebs %d\n", vol->reserved_pebs);
82 dbg_msg("alignment %d", vol->alignment); 88 printk(KERN_DEBUG "\talignment %d\n", vol->alignment);
83 dbg_msg("data_pad %d", vol->data_pad); 89 printk(KERN_DEBUG "\tdata_pad %d\n", vol->data_pad);
84 dbg_msg("vol_type %d", vol->vol_type); 90 printk(KERN_DEBUG "\tvol_type %d\n", vol->vol_type);
85 dbg_msg("name_len %d", vol->name_len); 91 printk(KERN_DEBUG "\tname_len %d\n", vol->name_len);
86 dbg_msg("usable_leb_size %d", vol->usable_leb_size); 92 printk(KERN_DEBUG "\tusable_leb_size %d\n", vol->usable_leb_size);
87 dbg_msg("used_ebs %d", vol->used_ebs); 93 printk(KERN_DEBUG "\tused_ebs %d\n", vol->used_ebs);
88 dbg_msg("used_bytes %lld", vol->used_bytes); 94 printk(KERN_DEBUG "\tused_bytes %lld\n", vol->used_bytes);
89 dbg_msg("last_eb_bytes %d", vol->last_eb_bytes); 95 printk(KERN_DEBUG "\tlast_eb_bytes %d\n", vol->last_eb_bytes);
90 dbg_msg("corrupted %d", vol->corrupted); 96 printk(KERN_DEBUG "\tcorrupted %d\n", vol->corrupted);
91 dbg_msg("upd_marker %d", vol->upd_marker); 97 printk(KERN_DEBUG "\tupd_marker %d\n", vol->upd_marker);
92 98
93 if (vol->name_len <= UBI_VOL_NAME_MAX && 99 if (vol->name_len <= UBI_VOL_NAME_MAX &&
94 strnlen(vol->name, vol->name_len + 1) == vol->name_len) { 100 strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
95 dbg_msg("name %s", vol->name); 101 printk(KERN_DEBUG "\tname %s\n", vol->name);
96 } else { 102 } else {
97 dbg_msg("the 1st 5 characters of the name: %c%c%c%c%c", 103 printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
98 vol->name[0], vol->name[1], vol->name[2], 104 vol->name[0], vol->name[1], vol->name[2],
99 vol->name[3], vol->name[4]); 105 vol->name[3], vol->name[4]);
100 } 106 }
101} 107}
102 108
@@ -109,28 +115,29 @@ void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
109{ 115{
110 int name_len = be16_to_cpu(r->name_len); 116 int name_len = be16_to_cpu(r->name_len);
111 117
112 dbg_msg("volume table record %d dump:", idx); 118 printk(KERN_DEBUG "Volume table record %d dump:\n", idx);
113 dbg_msg("reserved_pebs %d", be32_to_cpu(r->reserved_pebs)); 119 printk(KERN_DEBUG "\treserved_pebs %d\n",
114 dbg_msg("alignment %d", be32_to_cpu(r->alignment)); 120 be32_to_cpu(r->reserved_pebs));
115 dbg_msg("data_pad %d", be32_to_cpu(r->data_pad)); 121 printk(KERN_DEBUG "\talignment %d\n", be32_to_cpu(r->alignment));
116 dbg_msg("vol_type %d", (int)r->vol_type); 122 printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(r->data_pad));
117 dbg_msg("upd_marker %d", (int)r->upd_marker); 123 printk(KERN_DEBUG "\tvol_type %d\n", (int)r->vol_type);
118 dbg_msg("name_len %d", name_len); 124 printk(KERN_DEBUG "\tupd_marker %d\n", (int)r->upd_marker);
125 printk(KERN_DEBUG "\tname_len %d\n", name_len);
119 126
120 if (r->name[0] == '\0') { 127 if (r->name[0] == '\0') {
121 dbg_msg("name NULL"); 128 printk(KERN_DEBUG "\tname NULL\n");
122 return; 129 return;
123 } 130 }
124 131
125 if (name_len <= UBI_VOL_NAME_MAX && 132 if (name_len <= UBI_VOL_NAME_MAX &&
126 strnlen(&r->name[0], name_len + 1) == name_len) { 133 strnlen(&r->name[0], name_len + 1) == name_len) {
127 dbg_msg("name %s", &r->name[0]); 134 printk(KERN_DEBUG "\tname %s\n", &r->name[0]);
128 } else { 135 } else {
129 dbg_msg("1st 5 characters of the name: %c%c%c%c%c", 136 printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
130 r->name[0], r->name[1], r->name[2], r->name[3], 137 r->name[0], r->name[1], r->name[2], r->name[3],
131 r->name[4]); 138 r->name[4]);
132 } 139 }
133 dbg_msg("crc %#08x", be32_to_cpu(r->crc)); 140 printk(KERN_DEBUG "\tcrc %#08x\n", be32_to_cpu(r->crc));
134} 141}
135 142
136/** 143/**
@@ -139,15 +146,15 @@ void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
139 */ 146 */
140void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) 147void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv)
141{ 148{
142 dbg_msg("volume scanning information dump:"); 149 printk(KERN_DEBUG "Volume scanning information dump:\n");
143 dbg_msg("vol_id %d", sv->vol_id); 150 printk(KERN_DEBUG "\tvol_id %d\n", sv->vol_id);
144 dbg_msg("highest_lnum %d", sv->highest_lnum); 151 printk(KERN_DEBUG "\thighest_lnum %d\n", sv->highest_lnum);
145 dbg_msg("leb_count %d", sv->leb_count); 152 printk(KERN_DEBUG "\tleb_count %d\n", sv->leb_count);
146 dbg_msg("compat %d", sv->compat); 153 printk(KERN_DEBUG "\tcompat %d\n", sv->compat);
147 dbg_msg("vol_type %d", sv->vol_type); 154 printk(KERN_DEBUG "\tvol_type %d\n", sv->vol_type);
148 dbg_msg("used_ebs %d", sv->used_ebs); 155 printk(KERN_DEBUG "\tused_ebs %d\n", sv->used_ebs);
149 dbg_msg("last_data_size %d", sv->last_data_size); 156 printk(KERN_DEBUG "\tlast_data_size %d\n", sv->last_data_size);
150 dbg_msg("data_pad %d", sv->data_pad); 157 printk(KERN_DEBUG "\tdata_pad %d\n", sv->data_pad);
151} 158}
152 159
153/** 160/**
@@ -157,14 +164,13 @@ void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv)
157 */ 164 */
158void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type) 165void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type)
159{ 166{
160 dbg_msg("eraseblock scanning information dump:"); 167 printk(KERN_DEBUG "eraseblock scanning information dump:\n");
161 dbg_msg("ec %d", seb->ec); 168 printk(KERN_DEBUG "\tec %d\n", seb->ec);
162 dbg_msg("pnum %d", seb->pnum); 169 printk(KERN_DEBUG "\tpnum %d\n", seb->pnum);
163 if (type == 0) { 170 if (type == 0) {
164 dbg_msg("lnum %d", seb->lnum); 171 printk(KERN_DEBUG "\tlnum %d\n", seb->lnum);
165 dbg_msg("scrub %d", seb->scrub); 172 printk(KERN_DEBUG "\tscrub %d\n", seb->scrub);
166 dbg_msg("sqnum %llu", seb->sqnum); 173 printk(KERN_DEBUG "\tsqnum %llu\n", seb->sqnum);
167 dbg_msg("leb_ver %u", seb->leb_ver);
168 } 174 }
169} 175}
170 176
@@ -176,16 +182,16 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req)
176{ 182{
177 char nm[17]; 183 char nm[17];
178 184
179 dbg_msg("volume creation request dump:"); 185 printk(KERN_DEBUG "Volume creation request dump:\n");
180 dbg_msg("vol_id %d", req->vol_id); 186 printk(KERN_DEBUG "\tvol_id %d\n", req->vol_id);
181 dbg_msg("alignment %d", req->alignment); 187 printk(KERN_DEBUG "\talignment %d\n", req->alignment);
182 dbg_msg("bytes %lld", (long long)req->bytes); 188 printk(KERN_DEBUG "\tbytes %lld\n", (long long)req->bytes);
183 dbg_msg("vol_type %d", req->vol_type); 189 printk(KERN_DEBUG "\tvol_type %d\n", req->vol_type);
184 dbg_msg("name_len %d", req->name_len); 190 printk(KERN_DEBUG "\tname_len %d\n", req->name_len);
185 191
186 memcpy(nm, req->name, 16); 192 memcpy(nm, req->name, 16);
187 nm[16] = 0; 193 nm[16] = 0;
188 dbg_msg("the 1st 16 characters of the name: %s", nm); 194 printk(KERN_DEBUG "\t1st 16 characters of name: %s\n", nm);
189} 195}
190 196
191#endif /* CONFIG_MTD_UBI_DEBUG_MSG */ 197#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 8ea99d8c9e1f..78e914d23ece 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -24,21 +24,16 @@
24#ifdef CONFIG_MTD_UBI_DEBUG 24#ifdef CONFIG_MTD_UBI_DEBUG
25#include <linux/random.h> 25#include <linux/random.h>
26 26
27#define ubi_assert(expr) BUG_ON(!(expr))
28#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__) 27#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
29#else
30#define ubi_assert(expr) ({})
31#define dbg_err(fmt, ...) ({})
32#endif
33 28
34#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT 29#define ubi_assert(expr) do { \
35#define DBG_DISABLE_BGT 1 30 if (unlikely(!(expr))) { \
36#else 31 printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
37#define DBG_DISABLE_BGT 0 32 __func__, __LINE__, current->pid); \
38#endif 33 ubi_dbg_dump_stack(); \
34 } \
35} while (0)
39 36
40#ifdef CONFIG_MTD_UBI_DEBUG_MSG
41/* Generic debugging message */
42#define dbg_msg(fmt, ...) \ 37#define dbg_msg(fmt, ...) \
43 printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \ 38 printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
44 current->pid, __func__, ##__VA_ARGS__) 39 current->pid, __func__, ##__VA_ARGS__)
@@ -61,36 +56,29 @@ void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv);
61void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type); 56void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
62void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); 57void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
63 58
59#ifdef CONFIG_MTD_UBI_DEBUG_MSG
60/* General debugging messages */
61#define dbg_gen(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
64#else 62#else
65 63#define dbg_gen(fmt, ...) ({})
66#define dbg_msg(fmt, ...) ({}) 64#endif
67#define ubi_dbg_dump_stack() ({})
68#define ubi_dbg_dump_ec_hdr(ec_hdr) ({})
69#define ubi_dbg_dump_vid_hdr(vid_hdr) ({})
70#define ubi_dbg_dump_vol_info(vol) ({})
71#define ubi_dbg_dump_vtbl_record(r, idx) ({})
72#define ubi_dbg_dump_sv(sv) ({})
73#define ubi_dbg_dump_seb(seb, type) ({})
74#define ubi_dbg_dump_mkvol_req(req) ({})
75
76#endif /* CONFIG_MTD_UBI_DEBUG_MSG */
77 65
78#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA 66#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA
79/* Messages from the eraseblock association unit */ 67/* Messages from the eraseblock association sub-system */
80#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 68#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
81#else 69#else
82#define dbg_eba(fmt, ...) ({}) 70#define dbg_eba(fmt, ...) ({})
83#endif 71#endif
84 72
85#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL 73#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL
86/* Messages from the wear-leveling unit */ 74/* Messages from the wear-leveling sub-system */
87#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 75#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
88#else 76#else
89#define dbg_wl(fmt, ...) ({}) 77#define dbg_wl(fmt, ...) ({})
90#endif 78#endif
91 79
92#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO 80#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO
93/* Messages from the input/output unit */ 81/* Messages from the input/output sub-system */
94#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 82#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
95#else 83#else
96#define dbg_io(fmt, ...) ({}) 84#define dbg_io(fmt, ...) ({})
@@ -105,6 +93,12 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
105#define UBI_IO_DEBUG 0 93#define UBI_IO_DEBUG 0
106#endif 94#endif
107 95
96#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT
97#define DBG_DISABLE_BGT 1
98#else
99#define DBG_DISABLE_BGT 0
100#endif
101
108#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS 102#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS
109/** 103/**
110 * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip. 104 * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
@@ -149,4 +143,30 @@ static inline int ubi_dbg_is_erase_failure(void)
149#define ubi_dbg_is_erase_failure() 0 143#define ubi_dbg_is_erase_failure() 0
150#endif 144#endif
151 145
146#else
147
148#define ubi_assert(expr) ({})
149#define dbg_err(fmt, ...) ({})
150#define dbg_msg(fmt, ...) ({})
151#define dbg_gen(fmt, ...) ({})
152#define dbg_eba(fmt, ...) ({})
153#define dbg_wl(fmt, ...) ({})
154#define dbg_io(fmt, ...) ({})
155#define dbg_bld(fmt, ...) ({})
156#define ubi_dbg_dump_stack() ({})
157#define ubi_dbg_dump_ec_hdr(ec_hdr) ({})
158#define ubi_dbg_dump_vid_hdr(vid_hdr) ({})
159#define ubi_dbg_dump_vol_info(vol) ({})
160#define ubi_dbg_dump_vtbl_record(r, idx) ({})
161#define ubi_dbg_dump_sv(sv) ({})
162#define ubi_dbg_dump_seb(seb, type) ({})
163#define ubi_dbg_dump_mkvol_req(req) ({})
164
165#define UBI_IO_DEBUG 0
166#define DBG_DISABLE_BGT 0
167#define ubi_dbg_is_bitflip() 0
168#define ubi_dbg_is_write_failure() 0
169#define ubi_dbg_is_erase_failure() 0
170
171#endif /* !CONFIG_MTD_UBI_DEBUG */
152#endif /* !__UBI_DEBUG_H__ */ 172#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 7ce91ca742b1..e04bcf1dff87 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -19,20 +19,20 @@
19 */ 19 */
20 20
21/* 21/*
22 * The UBI Eraseblock Association (EBA) unit. 22 * The UBI Eraseblock Association (EBA) sub-system.
23 * 23 *
24 * This unit is responsible for I/O to/from logical eraseblock. 24 * This sub-system is responsible for I/O to/from logical eraseblock.
25 * 25 *
26 * Although in this implementation the EBA table is fully kept and managed in 26 * Although in this implementation the EBA table is fully kept and managed in
27 * RAM, which assumes poor scalability, it might be (partially) maintained on 27 * RAM, which assumes poor scalability, it might be (partially) maintained on
28 * flash in future implementations. 28 * flash in future implementations.
29 * 29 *
30 * The EBA unit implements per-logical eraseblock locking. Before accessing a 30 * The EBA sub-system implements per-logical eraseblock locking. Before
31 * logical eraseblock it is locked for reading or writing. The per-logical 31 * accessing a logical eraseblock it is locked for reading or writing. The
32 * eraseblock locking is implemented by means of the lock tree. The lock tree 32 * per-logical eraseblock locking is implemented by means of the lock tree. The
33 * is an RB-tree which refers all the currently locked logical eraseblocks. The 33 * lock tree is an RB-tree which refers all the currently locked logical
34 * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by 34 * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
35 * (@vol_id, @lnum) pairs. 35 * They are indexed by (@vol_id, @lnum) pairs.
36 * 36 *
37 * EBA also maintains the global sequence counter which is incremented each 37 * EBA also maintains the global sequence counter which is incremented each
38 * time a logical eraseblock is mapped to a physical eraseblock and it is 38 * time a logical eraseblock is mapped to a physical eraseblock and it is
@@ -189,9 +189,7 @@ static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
189 le->users += 1; 189 le->users += 1;
190 spin_unlock(&ubi->ltree_lock); 190 spin_unlock(&ubi->ltree_lock);
191 191
192 if (le_free) 192 kfree(le_free);
193 kfree(le_free);
194
195 return le; 193 return le;
196} 194}
197 195
@@ -223,22 +221,18 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
223 */ 221 */
224static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 222static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
225{ 223{
226 int free = 0;
227 struct ubi_ltree_entry *le; 224 struct ubi_ltree_entry *le;
228 225
229 spin_lock(&ubi->ltree_lock); 226 spin_lock(&ubi->ltree_lock);
230 le = ltree_lookup(ubi, vol_id, lnum); 227 le = ltree_lookup(ubi, vol_id, lnum);
231 le->users -= 1; 228 le->users -= 1;
232 ubi_assert(le->users >= 0); 229 ubi_assert(le->users >= 0);
230 up_read(&le->mutex);
233 if (le->users == 0) { 231 if (le->users == 0) {
234 rb_erase(&le->rb, &ubi->ltree); 232 rb_erase(&le->rb, &ubi->ltree);
235 free = 1; 233 kfree(le);
236 } 234 }
237 spin_unlock(&ubi->ltree_lock); 235 spin_unlock(&ubi->ltree_lock);
238
239 up_read(&le->mutex);
240 if (free)
241 kfree(le);
242} 236}
243 237
244/** 238/**
@@ -274,7 +268,6 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
274 */ 268 */
275static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) 269static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
276{ 270{
277 int free;
278 struct ubi_ltree_entry *le; 271 struct ubi_ltree_entry *le;
279 272
280 le = ltree_add_entry(ubi, vol_id, lnum); 273 le = ltree_add_entry(ubi, vol_id, lnum);
@@ -289,12 +282,9 @@ static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
289 ubi_assert(le->users >= 0); 282 ubi_assert(le->users >= 0);
290 if (le->users == 0) { 283 if (le->users == 0) {
291 rb_erase(&le->rb, &ubi->ltree); 284 rb_erase(&le->rb, &ubi->ltree);
292 free = 1;
293 } else
294 free = 0;
295 spin_unlock(&ubi->ltree_lock);
296 if (free)
297 kfree(le); 285 kfree(le);
286 }
287 spin_unlock(&ubi->ltree_lock);
298 288
299 return 1; 289 return 1;
300} 290}
@@ -307,23 +297,18 @@ static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
307 */ 297 */
308static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 298static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
309{ 299{
310 int free;
311 struct ubi_ltree_entry *le; 300 struct ubi_ltree_entry *le;
312 301
313 spin_lock(&ubi->ltree_lock); 302 spin_lock(&ubi->ltree_lock);
314 le = ltree_lookup(ubi, vol_id, lnum); 303 le = ltree_lookup(ubi, vol_id, lnum);
315 le->users -= 1; 304 le->users -= 1;
316 ubi_assert(le->users >= 0); 305 ubi_assert(le->users >= 0);
306 up_write(&le->mutex);
317 if (le->users == 0) { 307 if (le->users == 0) {
318 rb_erase(&le->rb, &ubi->ltree); 308 rb_erase(&le->rb, &ubi->ltree);
319 free = 1;
320 } else
321 free = 0;
322 spin_unlock(&ubi->ltree_lock);
323
324 up_write(&le->mutex);
325 if (free)
326 kfree(le); 309 kfree(le);
310 }
311 spin_unlock(&ubi->ltree_lock);
327} 312}
328 313
329/** 314/**
@@ -516,9 +501,8 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
516 struct ubi_vid_hdr *vid_hdr; 501 struct ubi_vid_hdr *vid_hdr;
517 502
518 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 503 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
519 if (!vid_hdr) { 504 if (!vid_hdr)
520 return -ENOMEM; 505 return -ENOMEM;
521 }
522 506
523 mutex_lock(&ubi->buf_mutex); 507 mutex_lock(&ubi->buf_mutex);
524 508
@@ -752,7 +736,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
752 /* If this is the last LEB @len may be unaligned */ 736 /* If this is the last LEB @len may be unaligned */
753 len = ALIGN(data_size, ubi->min_io_size); 737 len = ALIGN(data_size, ubi->min_io_size);
754 else 738 else
755 ubi_assert(len % ubi->min_io_size == 0); 739 ubi_assert(!(len & (ubi->min_io_size - 1)));
756 740
757 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 741 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
758 if (!vid_hdr) 742 if (!vid_hdr)
@@ -919,7 +903,7 @@ retry:
919 } 903 }
920 904
921 if (vol->eba_tbl[lnum] >= 0) { 905 if (vol->eba_tbl[lnum] >= 0) {
922 err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1); 906 err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0);
923 if (err) 907 if (err)
924 goto out_leb_unlock; 908 goto out_leb_unlock;
925 } 909 }
@@ -1141,7 +1125,7 @@ out_unlock_leb:
1141} 1125}
1142 1126
1143/** 1127/**
1144 * ubi_eba_init_scan - initialize the EBA unit using scanning information. 1128 * ubi_eba_init_scan - initialize the EBA sub-system using scanning information.
1145 * @ubi: UBI device description object 1129 * @ubi: UBI device description object
1146 * @si: scanning information 1130 * @si: scanning information
1147 * 1131 *
@@ -1156,7 +1140,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1156 struct ubi_scan_leb *seb; 1140 struct ubi_scan_leb *seb;
1157 struct rb_node *rb; 1141 struct rb_node *rb;
1158 1142
1159 dbg_eba("initialize EBA unit"); 1143 dbg_eba("initialize EBA sub-system");
1160 1144
1161 spin_lock_init(&ubi->ltree_lock); 1145 spin_lock_init(&ubi->ltree_lock);
1162 mutex_init(&ubi->alc_mutex); 1146 mutex_init(&ubi->alc_mutex);
@@ -1222,7 +1206,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1222 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1206 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1223 } 1207 }
1224 1208
1225 dbg_eba("EBA unit is initialized"); 1209 dbg_eba("EBA sub-system is initialized");
1226 return 0; 1210 return 0;
1227 1211
1228out_free: 1212out_free:
@@ -1233,20 +1217,3 @@ out_free:
1233 } 1217 }
1234 return err; 1218 return err;
1235} 1219}
1236
1237/**
1238 * ubi_eba_close - close EBA unit.
1239 * @ubi: UBI device description object
1240 */
1241void ubi_eba_close(const struct ubi_device *ubi)
1242{
1243 int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1244
1245 dbg_eba("close EBA unit");
1246
1247 for (i = 0; i < num_volumes; i++) {
1248 if (!ubi->volumes[i])
1249 continue;
1250 kfree(ubi->volumes[i]->eba_tbl);
1251 }
1252}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index e909b390069a..605812bb0b1a 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -111,7 +111,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
111 struct ubi_device *ubi; 111 struct ubi_device *ubi;
112 uint64_t tmp = from; 112 uint64_t tmp = from;
113 113
114 dbg_msg("read %zd bytes from offset %lld", len, from); 114 dbg_gen("read %zd bytes from offset %lld", len, from);
115 115
116 if (len < 0 || from < 0 || from + len > mtd->size) 116 if (len < 0 || from < 0 || from + len > mtd->size)
117 return -EINVAL; 117 return -EINVAL;
@@ -162,7 +162,7 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
162 struct ubi_device *ubi; 162 struct ubi_device *ubi;
163 uint64_t tmp = to; 163 uint64_t tmp = to;
164 164
165 dbg_msg("write %zd bytes to offset %lld", len, to); 165 dbg_gen("write %zd bytes to offset %lld", len, to);
166 166
167 if (len < 0 || to < 0 || len + to > mtd->size) 167 if (len < 0 || to < 0 || len + to > mtd->size)
168 return -EINVAL; 168 return -EINVAL;
@@ -215,7 +215,7 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
215 struct ubi_volume *vol; 215 struct ubi_volume *vol;
216 struct ubi_device *ubi; 216 struct ubi_device *ubi;
217 217
218 dbg_msg("erase %u bytes at offset %u", instr->len, instr->addr); 218 dbg_gen("erase %u bytes at offset %u", instr->len, instr->addr);
219 219
220 if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) 220 if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
221 return -EINVAL; 221 return -EINVAL;
@@ -249,8 +249,8 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
249 if (err) 249 if (err)
250 goto out_err; 250 goto out_err;
251 251
252 instr->state = MTD_ERASE_DONE; 252 instr->state = MTD_ERASE_DONE;
253 mtd_erase_callback(instr); 253 mtd_erase_callback(instr);
254 return 0; 254 return 0;
255 255
256out_err: 256out_err:
@@ -299,12 +299,12 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
299 mtd->size = vol->used_bytes; 299 mtd->size = vol->used_bytes;
300 300
301 if (add_mtd_device(mtd)) { 301 if (add_mtd_device(mtd)) {
302 ubi_err("cannot not add MTD device\n"); 302 ubi_err("cannot not add MTD device");
303 kfree(mtd->name); 303 kfree(mtd->name);
304 return -ENFILE; 304 return -ENFILE;
305 } 305 }
306 306
307 dbg_msg("added mtd%d (\"%s\"), size %u, EB size %u", 307 dbg_gen("added mtd%d (\"%s\"), size %u, EB size %u",
308 mtd->index, mtd->name, mtd->size, mtd->erasesize); 308 mtd->index, mtd->name, mtd->size, mtd->erasesize);
309 return 0; 309 return 0;
310} 310}
@@ -322,7 +322,7 @@ int ubi_destroy_gluebi(struct ubi_volume *vol)
322 int err; 322 int err;
323 struct mtd_info *mtd = &vol->gluebi_mtd; 323 struct mtd_info *mtd = &vol->gluebi_mtd;
324 324
325 dbg_msg("remove mtd%d", mtd->index); 325 dbg_gen("remove mtd%d", mtd->index);
326 err = del_mtd_device(mtd); 326 err = del_mtd_device(mtd);
327 if (err) 327 if (err)
328 return err; 328 return err;
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 4ac11df7b048..2fb64be44f1b 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -20,15 +20,15 @@
20 */ 20 */
21 21
22/* 22/*
23 * UBI input/output unit. 23 * UBI input/output sub-system.
24 * 24 *
25 * This unit provides a uniform way to work with all kinds of the underlying 25 * This sub-system provides a uniform way to work with all kinds of the
26 * MTD devices. It also implements handy functions for reading and writing UBI 26 * underlying MTD devices. It also implements handy functions for reading and
27 * headers. 27 * writing UBI headers.
28 * 28 *
29 * We are trying to have a paranoid mindset and not to trust to what we read 29 * We are trying to have a paranoid mindset and not to trust to what we read
30 * from the flash media in order to be more secure and robust. So this unit 30 * from the flash media in order to be more secure and robust. So this
31 * validates every single header it reads from the flash media. 31 * sub-system validates every single header it reads from the flash media.
32 * 32 *
33 * Some words about how the eraseblock headers are stored. 33 * Some words about how the eraseblock headers are stored.
34 * 34 *
@@ -79,11 +79,11 @@
79 * 512-byte chunks, we have to allocate one more buffer and copy our VID header 79 * 512-byte chunks, we have to allocate one more buffer and copy our VID header
80 * to offset 448 of this buffer. 80 * to offset 448 of this buffer.
81 * 81 *
82 * The I/O unit does the following trick in order to avoid this extra copy. 82 * The I/O sub-system does the following trick in order to avoid this extra
83 * It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID header 83 * copy. It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID
84 * and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. When the 84 * header and returns a pointer to offset @ubi->vid_hdr_shift of this buffer.
85 * VID header is being written out, it shifts the VID header pointer back and 85 * When the VID header is being written out, it shifts the VID header pointer
86 * writes the whole sub-page. 86 * back and writes the whole sub-page.
87 */ 87 */
88 88
89#include <linux/crc32.h> 89#include <linux/crc32.h>
@@ -156,15 +156,19 @@ retry:
156 /* 156 /*
157 * -EUCLEAN is reported if there was a bit-flip which 157 * -EUCLEAN is reported if there was a bit-flip which
158 * was corrected, so this is harmless. 158 * was corrected, so this is harmless.
159 *
160 * We do not report about it here unless debugging is
161 * enabled. A corresponding message will be printed
162 * later, when it is has been scrubbed.
159 */ 163 */
160 ubi_msg("fixable bit-flip detected at PEB %d", pnum); 164 dbg_msg("fixable bit-flip detected at PEB %d", pnum);
161 ubi_assert(len == read); 165 ubi_assert(len == read);
162 return UBI_IO_BITFLIPS; 166 return UBI_IO_BITFLIPS;
163 } 167 }
164 168
165 if (read != len && retries++ < UBI_IO_RETRIES) { 169 if (read != len && retries++ < UBI_IO_RETRIES) {
166 dbg_io("error %d while reading %d bytes from PEB %d:%d, " 170 dbg_io("error %d while reading %d bytes from PEB %d:%d,"
167 "read only %zd bytes, retry", 171 " read only %zd bytes, retry",
168 err, len, pnum, offset, read); 172 err, len, pnum, offset, read);
169 yield(); 173 yield();
170 goto retry; 174 goto retry;
@@ -187,7 +191,7 @@ retry:
187 ubi_assert(len == read); 191 ubi_assert(len == read);
188 192
189 if (ubi_dbg_is_bitflip()) { 193 if (ubi_dbg_is_bitflip()) {
190 dbg_msg("bit-flip (emulated)"); 194 dbg_gen("bit-flip (emulated)");
191 err = UBI_IO_BITFLIPS; 195 err = UBI_IO_BITFLIPS;
192 } 196 }
193 } 197 }
@@ -391,6 +395,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
391{ 395{
392 int err, i, patt_count; 396 int err, i, patt_count;
393 397
398 ubi_msg("run torture test for PEB %d", pnum);
394 patt_count = ARRAY_SIZE(patterns); 399 patt_count = ARRAY_SIZE(patterns);
395 ubi_assert(patt_count > 0); 400 ubi_assert(patt_count > 0);
396 401
@@ -434,6 +439,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
434 } 439 }
435 440
436 err = patt_count; 441 err = patt_count;
442 ubi_msg("PEB %d passed torture test, do not mark it a bad", pnum);
437 443
438out: 444out:
439 mutex_unlock(&ubi->buf_mutex); 445 mutex_unlock(&ubi->buf_mutex);
@@ -699,8 +705,8 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
699 705
700 if (hdr_crc != crc) { 706 if (hdr_crc != crc) {
701 if (verbose) { 707 if (verbose) {
702 ubi_warn("bad EC header CRC at PEB %d, calculated %#08x," 708 ubi_warn("bad EC header CRC at PEB %d, calculated "
703 " read %#08x", pnum, crc, hdr_crc); 709 "%#08x, read %#08x", pnum, crc, hdr_crc);
704 ubi_dbg_dump_ec_hdr(ec_hdr); 710 ubi_dbg_dump_ec_hdr(ec_hdr);
705 } 711 }
706 return UBI_IO_BAD_EC_HDR; 712 return UBI_IO_BAD_EC_HDR;
@@ -1095,8 +1101,7 @@ fail:
1095} 1101}
1096 1102
1097/** 1103/**
1098 * paranoid_check_peb_ec_hdr - check that the erase counter header of a 1104 * paranoid_check_peb_ec_hdr - check erase counter header.
1099 * physical eraseblock is in-place and is all right.
1100 * @ubi: UBI device description object 1105 * @ubi: UBI device description object
1101 * @pnum: the physical eraseblock number to check 1106 * @pnum: the physical eraseblock number to check
1102 * 1107 *
@@ -1174,8 +1179,7 @@ fail:
1174} 1179}
1175 1180
1176/** 1181/**
1177 * paranoid_check_peb_vid_hdr - check that the volume identifier header of a 1182 * paranoid_check_peb_vid_hdr - check volume identifier header.
1178 * physical eraseblock is in-place and is all right.
1179 * @ubi: UBI device description object 1183 * @ubi: UBI device description object
1180 * @pnum: the physical eraseblock number to check 1184 * @pnum: the physical eraseblock number to check
1181 * 1185 *
@@ -1256,7 +1260,7 @@ static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
1256 1260
1257fail: 1261fail:
1258 ubi_err("paranoid check failed for PEB %d", pnum); 1262 ubi_err("paranoid check failed for PEB %d", pnum);
1259 dbg_msg("hex dump of the %d-%d region", offset, offset + len); 1263 ubi_msg("hex dump of the %d-%d region", offset, offset + len);
1260 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 1264 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1261 ubi->dbg_peb_buf, len, 1); 1265 ubi->dbg_peb_buf, len, 1);
1262 err = 1; 1266 err = 1;
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index a70d58823f8d..5d9bcf109c13 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -106,7 +106,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
106 struct ubi_device *ubi; 106 struct ubi_device *ubi;
107 struct ubi_volume *vol; 107 struct ubi_volume *vol;
108 108
109 dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode); 109 dbg_gen("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
110 110
111 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 111 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
112 return ERR_PTR(-EINVAL); 112 return ERR_PTR(-EINVAL);
@@ -215,7 +215,7 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
215 struct ubi_device *ubi; 215 struct ubi_device *ubi;
216 struct ubi_volume_desc *ret; 216 struct ubi_volume_desc *ret;
217 217
218 dbg_msg("open volume %s, mode %d", name, mode); 218 dbg_gen("open volume %s, mode %d", name, mode);
219 219
220 if (!name) 220 if (!name)
221 return ERR_PTR(-EINVAL); 221 return ERR_PTR(-EINVAL);
@@ -266,7 +266,7 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
266 struct ubi_volume *vol = desc->vol; 266 struct ubi_volume *vol = desc->vol;
267 struct ubi_device *ubi = vol->ubi; 267 struct ubi_device *ubi = vol->ubi;
268 268
269 dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode); 269 dbg_gen("close volume %d, mode %d", vol->vol_id, desc->mode);
270 270
271 spin_lock(&ubi->volumes_lock); 271 spin_lock(&ubi->volumes_lock);
272 switch (desc->mode) { 272 switch (desc->mode) {
@@ -323,7 +323,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
323 struct ubi_device *ubi = vol->ubi; 323 struct ubi_device *ubi = vol->ubi;
324 int err, vol_id = vol->vol_id; 324 int err, vol_id = vol->vol_id;
325 325
326 dbg_msg("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset); 326 dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
327 327
328 if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 || 328 if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
329 lnum >= vol->used_ebs || offset < 0 || len < 0 || 329 lnum >= vol->used_ebs || offset < 0 || len < 0 ||
@@ -388,7 +388,7 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
388 struct ubi_device *ubi = vol->ubi; 388 struct ubi_device *ubi = vol->ubi;
389 int vol_id = vol->vol_id; 389 int vol_id = vol->vol_id;
390 390
391 dbg_msg("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset); 391 dbg_gen("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset);
392 392
393 if (vol_id < 0 || vol_id >= ubi->vtbl_slots) 393 if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
394 return -EINVAL; 394 return -EINVAL;
@@ -397,8 +397,8 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
397 return -EROFS; 397 return -EROFS;
398 398
399 if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 || 399 if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 ||
400 offset + len > vol->usable_leb_size || offset % ubi->min_io_size || 400 offset + len > vol->usable_leb_size ||
401 len % ubi->min_io_size) 401 offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
402 return -EINVAL; 402 return -EINVAL;
403 403
404 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && 404 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
@@ -438,7 +438,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
438 struct ubi_device *ubi = vol->ubi; 438 struct ubi_device *ubi = vol->ubi;
439 int vol_id = vol->vol_id; 439 int vol_id = vol->vol_id;
440 440
441 dbg_msg("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum); 441 dbg_gen("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum);
442 442
443 if (vol_id < 0 || vol_id >= ubi->vtbl_slots) 443 if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
444 return -EINVAL; 444 return -EINVAL;
@@ -447,7 +447,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
447 return -EROFS; 447 return -EROFS;
448 448
449 if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 || 449 if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 ||
450 len > vol->usable_leb_size || len % ubi->min_io_size) 450 len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
451 return -EINVAL; 451 return -EINVAL;
452 452
453 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && 453 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
@@ -482,7 +482,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
482 struct ubi_device *ubi = vol->ubi; 482 struct ubi_device *ubi = vol->ubi;
483 int err; 483 int err;
484 484
485 dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); 485 dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
486 486
487 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 487 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
488 return -EROFS; 488 return -EROFS;
@@ -542,7 +542,7 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
542 struct ubi_volume *vol = desc->vol; 542 struct ubi_volume *vol = desc->vol;
543 struct ubi_device *ubi = vol->ubi; 543 struct ubi_device *ubi = vol->ubi;
544 544
545 dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum); 545 dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
546 546
547 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 547 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
548 return -EROFS; 548 return -EROFS;
@@ -579,7 +579,7 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
579 struct ubi_volume *vol = desc->vol; 579 struct ubi_volume *vol = desc->vol;
580 struct ubi_device *ubi = vol->ubi; 580 struct ubi_device *ubi = vol->ubi;
581 581
582 dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum); 582 dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
583 583
584 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 584 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
585 return -EROFS; 585 return -EROFS;
@@ -621,7 +621,7 @@ int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum)
621{ 621{
622 struct ubi_volume *vol = desc->vol; 622 struct ubi_volume *vol = desc->vol;
623 623
624 dbg_msg("test LEB %d:%d", vol->vol_id, lnum); 624 dbg_gen("test LEB %d:%d", vol->vol_id, lnum);
625 625
626 if (lnum < 0 || lnum >= vol->reserved_pebs) 626 if (lnum < 0 || lnum >= vol->reserved_pebs)
627 return -EINVAL; 627 return -EINVAL;
@@ -632,3 +632,27 @@ int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum)
632 return vol->eba_tbl[lnum] >= 0; 632 return vol->eba_tbl[lnum] >= 0;
633} 633}
634EXPORT_SYMBOL_GPL(ubi_is_mapped); 634EXPORT_SYMBOL_GPL(ubi_is_mapped);
635
636/**
637 * ubi_sync - synchronize UBI device buffers.
638 * @ubi_num: UBI device to synchronize
639 *
640 * The underlying MTD device may cache data in hardware or in software. This
641 * function ensures the caches are flushed. Returns zero in case of success and
642 * a negative error code in case of failure.
643 */
644int ubi_sync(int ubi_num)
645{
646 struct ubi_device *ubi;
647
648 ubi = ubi_get_device(ubi_num);
649 if (!ubi)
650 return -ENODEV;
651
652 if (ubi->mtd->sync)
653 ubi->mtd->sync(ubi->mtd);
654
655 ubi_put_device(ubi);
656 return 0;
657}
658EXPORT_SYMBOL_GPL(ubi_sync);
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 93e052812012..22ad31402945 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -37,7 +37,7 @@ int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
37{ 37{
38 int i; 38 int i;
39 39
40 ubi_assert(length % ubi->min_io_size == 0); 40 ubi_assert(!(length & (ubi->min_io_size - 1)));
41 41
42 for (i = length - 1; i >= 0; i--) 42 for (i = length - 1; i >= 0; i--)
43 if (((const uint8_t *)buf)[i] != 0xFF) 43 if (((const uint8_t *)buf)[i] != 0xFF)
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 96d410e106ab..967bb4406df9 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -19,9 +19,9 @@
19 */ 19 */
20 20
21/* 21/*
22 * UBI scanning unit. 22 * UBI scanning sub-system.
23 * 23 *
24 * This unit is responsible for scanning the flash media, checking UBI 24 * This sub-system is responsible for scanning the flash media, checking UBI
25 * headers and providing complete information about the UBI flash image. 25 * headers and providing complete information about the UBI flash image.
26 * 26 *
27 * The scanning information is represented by a &struct ubi_scan_info' object. 27 * The scanning information is represented by a &struct ubi_scan_info' object.
@@ -93,8 +93,7 @@ static int add_to_list(struct ubi_scan_info *si, int pnum, int ec,
93} 93}
94 94
95/** 95/**
96 * validate_vid_hdr - check that volume identifier header is correct and 96 * validate_vid_hdr - check volume identifier header.
97 * consistent.
98 * @vid_hdr: the volume identifier header to check 97 * @vid_hdr: the volume identifier header to check
99 * @sv: information about the volume this logical eraseblock belongs to 98 * @sv: information about the volume this logical eraseblock belongs to
100 * @pnum: physical eraseblock number the VID header came from 99 * @pnum: physical eraseblock number the VID header came from
@@ -103,7 +102,7 @@ static int add_to_list(struct ubi_scan_info *si, int pnum, int ec,
103 * non-zero if an inconsistency was found and zero if not. 102 * non-zero if an inconsistency was found and zero if not.
104 * 103 *
105 * Note, UBI does sanity check of everything it reads from the flash media. 104 * Note, UBI does sanity check of everything it reads from the flash media.
106 * Most of the checks are done in the I/O unit. Here we check that the 105 * Most of the checks are done in the I/O sub-system. Here we check that the
107 * information in the VID header is consistent to the information in other VID 106 * information in the VID header is consistent to the information in other VID
108 * headers of the same volume. 107 * headers of the same volume.
109 */ 108 */
@@ -247,45 +246,21 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
247 struct ubi_vid_hdr *vh = NULL; 246 struct ubi_vid_hdr *vh = NULL;
248 unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); 247 unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
249 248
250 if (seb->sqnum == 0 && sqnum2 == 0) { 249 if (sqnum2 == seb->sqnum) {
251 long long abs, v1 = seb->leb_ver, v2 = be32_to_cpu(vid_hdr->leb_ver);
252
253 /* 250 /*
254 * UBI constantly increases the logical eraseblock version 251 * This must be a really ancient UBI image which has been
255 * number and it can overflow. Thus, we have to bear in mind 252 * created before sequence numbers support has been added. At
256 * that versions that are close to %0xFFFFFFFF are less then 253 * that times we used 32-bit LEB versions stored in logical
257 * versions that are close to %0. 254 * eraseblocks. That was before UBI got into mainline. We do not
258 * 255 * support these images anymore. Well, those images will work
259 * The UBI WL unit guarantees that the number of pending tasks 256 * still work, but only if no unclean reboots happened.
260 * is not greater then %0x7FFFFFFF. So, if the difference
261 * between any two versions is greater or equivalent to
262 * %0x7FFFFFFF, there was an overflow and the logical
263 * eraseblock with lower version is actually newer then the one
264 * with higher version.
265 *
266 * FIXME: but this is anyway obsolete and will be removed at
267 * some point.
268 */ 257 */
269 dbg_bld("using old crappy leb_ver stuff"); 258 ubi_err("unsupported on-flash UBI format\n");
270 259 return -EINVAL;
271 if (v1 == v2) { 260 }
272 ubi_err("PEB %d and PEB %d have the same version %lld",
273 seb->pnum, pnum, v1);
274 return -EINVAL;
275 }
276
277 abs = v1 - v2;
278 if (abs < 0)
279 abs = -abs;
280 261
281 if (abs < 0x7FFFFFFF) 262 /* Obviously the LEB with lower sequence counter is older */
282 /* Non-overflow situation */ 263 second_is_newer = !!(sqnum2 > seb->sqnum);
283 second_is_newer = (v2 > v1);
284 else
285 second_is_newer = (v2 < v1);
286 } else
287 /* Obviously the LEB with lower sequence counter is older */
288 second_is_newer = sqnum2 > seb->sqnum;
289 264
290 /* 265 /*
291 * Now we know which copy is newer. If the copy flag of the PEB with 266 * Now we know which copy is newer. If the copy flag of the PEB with
@@ -293,7 +268,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
293 * check data CRC. For the second PEB we already have the VID header, 268 * check data CRC. For the second PEB we already have the VID header,
294 * for the first one - we'll need to re-read it from flash. 269 * for the first one - we'll need to re-read it from flash.
295 * 270 *
296 * FIXME: this may be optimized so that we wouldn't read twice. 271 * Note: this may be optimized so that we wouldn't read twice.
297 */ 272 */
298 273
299 if (second_is_newer) { 274 if (second_is_newer) {
@@ -379,8 +354,7 @@ out_free_vidh:
379} 354}
380 355
381/** 356/**
382 * ubi_scan_add_used - add information about a physical eraseblock to the 357 * ubi_scan_add_used - add physical eraseblock to the scanning information.
383 * scanning information.
384 * @ubi: UBI device description object 358 * @ubi: UBI device description object
385 * @si: scanning information 359 * @si: scanning information
386 * @pnum: the physical eraseblock number 360 * @pnum: the physical eraseblock number
@@ -400,7 +374,6 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
400 int bitflips) 374 int bitflips)
401{ 375{
402 int err, vol_id, lnum; 376 int err, vol_id, lnum;
403 uint32_t leb_ver;
404 unsigned long long sqnum; 377 unsigned long long sqnum;
405 struct ubi_scan_volume *sv; 378 struct ubi_scan_volume *sv;
406 struct ubi_scan_leb *seb; 379 struct ubi_scan_leb *seb;
@@ -409,10 +382,9 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
409 vol_id = be32_to_cpu(vid_hdr->vol_id); 382 vol_id = be32_to_cpu(vid_hdr->vol_id);
410 lnum = be32_to_cpu(vid_hdr->lnum); 383 lnum = be32_to_cpu(vid_hdr->lnum);
411 sqnum = be64_to_cpu(vid_hdr->sqnum); 384 sqnum = be64_to_cpu(vid_hdr->sqnum);
412 leb_ver = be32_to_cpu(vid_hdr->leb_ver);
413 385
414 dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, ver %u, bitflips %d", 386 dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
415 pnum, vol_id, lnum, ec, sqnum, leb_ver, bitflips); 387 pnum, vol_id, lnum, ec, sqnum, bitflips);
416 388
417 sv = add_volume(si, vol_id, pnum, vid_hdr); 389 sv = add_volume(si, vol_id, pnum, vid_hdr);
418 if (IS_ERR(sv) < 0) 390 if (IS_ERR(sv) < 0)
@@ -445,25 +417,20 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
445 */ 417 */
446 418
447 dbg_bld("this LEB already exists: PEB %d, sqnum %llu, " 419 dbg_bld("this LEB already exists: PEB %d, sqnum %llu, "
448 "LEB ver %u, EC %d", seb->pnum, seb->sqnum, 420 "EC %d", seb->pnum, seb->sqnum, seb->ec);
449 seb->leb_ver, seb->ec);
450
451 /*
452 * Make sure that the logical eraseblocks have different
453 * versions. Otherwise the image is bad.
454 */
455 if (seb->leb_ver == leb_ver && leb_ver != 0) {
456 ubi_err("two LEBs with same version %u", leb_ver);
457 ubi_dbg_dump_seb(seb, 0);
458 ubi_dbg_dump_vid_hdr(vid_hdr);
459 return -EINVAL;
460 }
461 421
462 /* 422 /*
463 * Make sure that the logical eraseblocks have different 423 * Make sure that the logical eraseblocks have different
464 * sequence numbers. Otherwise the image is bad. 424 * sequence numbers. Otherwise the image is bad.
465 * 425 *
466 * FIXME: remove 'sqnum != 0' check when leb_ver is removed. 426 * However, if the sequence number is zero, we assume it must
427 * be an ancient UBI image from the era when UBI did not have
428 * sequence numbers. We still can attach these images, unless
429 * there is a need to distinguish between old and new
430 * eraseblocks, in which case we'll refuse the image in
431 * 'compare_lebs()'. In other words, we attach old clean
432 * images, but refuse attaching old images with duplicated
433 * logical eraseblocks because there was an unclean reboot.
467 */ 434 */
468 if (seb->sqnum == sqnum && sqnum != 0) { 435 if (seb->sqnum == sqnum && sqnum != 0) {
469 ubi_err("two LEBs with same sequence number %llu", 436 ubi_err("two LEBs with same sequence number %llu",
@@ -503,7 +470,6 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
503 seb->pnum = pnum; 470 seb->pnum = pnum;
504 seb->scrub = ((cmp_res & 2) || bitflips); 471 seb->scrub = ((cmp_res & 2) || bitflips);
505 seb->sqnum = sqnum; 472 seb->sqnum = sqnum;
506 seb->leb_ver = leb_ver;
507 473
508 if (sv->highest_lnum == lnum) 474 if (sv->highest_lnum == lnum)
509 sv->last_data_size = 475 sv->last_data_size =
@@ -540,7 +506,6 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
540 seb->lnum = lnum; 506 seb->lnum = lnum;
541 seb->sqnum = sqnum; 507 seb->sqnum = sqnum;
542 seb->scrub = bitflips; 508 seb->scrub = bitflips;
543 seb->leb_ver = leb_ver;
544 509
545 if (sv->highest_lnum <= lnum) { 510 if (sv->highest_lnum <= lnum) {
546 sv->highest_lnum = lnum; 511 sv->highest_lnum = lnum;
@@ -554,8 +519,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
554} 519}
555 520
556/** 521/**
557 * ubi_scan_find_sv - find information about a particular volume in the 522 * ubi_scan_find_sv - find volume in the scanning information.
558 * scanning information.
559 * @si: scanning information 523 * @si: scanning information
560 * @vol_id: the requested volume ID 524 * @vol_id: the requested volume ID
561 * 525 *
@@ -584,8 +548,7 @@ struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
584} 548}
585 549
586/** 550/**
587 * ubi_scan_find_seb - find information about a particular logical 551 * ubi_scan_find_seb - find LEB in the volume scanning information.
588 * eraseblock in the volume scanning information.
589 * @sv: a pointer to the volume scanning information 552 * @sv: a pointer to the volume scanning information
590 * @lnum: the requested logical eraseblock 553 * @lnum: the requested logical eraseblock
591 * 554 *
@@ -645,9 +608,9 @@ void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
645 * 608 *
646 * This function erases physical eraseblock 'pnum', and writes the erase 609 * This function erases physical eraseblock 'pnum', and writes the erase
647 * counter header to it. This function should only be used on UBI device 610 * counter header to it. This function should only be used on UBI device
648 * initialization stages, when the EBA unit had not been yet initialized. This 611 * initialization stages, when the EBA sub-system had not been yet initialized.
649 * function returns zero in case of success and a negative error code in case 612 * This function returns zero in case of success and a negative error code in
650 * of failure. 613 * case of failure.
651 */ 614 */
652int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si, 615int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
653 int pnum, int ec) 616 int pnum, int ec)
@@ -687,9 +650,10 @@ out_free:
687 * @si: scanning information 650 * @si: scanning information
688 * 651 *
689 * This function returns a free physical eraseblock. It is supposed to be 652 * This function returns a free physical eraseblock. It is supposed to be
690 * called on the UBI initialization stages when the wear-leveling unit is not 653 * called on the UBI initialization stages when the wear-leveling sub-system is
691 * initialized yet. This function picks a physical eraseblocks from one of the 654 * not initialized yet. This function picks a physical eraseblocks from one of
692 * lists, writes the EC header if it is needed, and removes it from the list. 655 * the lists, writes the EC header if it is needed, and removes it from the
656 * list.
693 * 657 *
694 * This function returns scanning physical eraseblock information in case of 658 * This function returns scanning physical eraseblock information in case of
695 * success and an error code in case of failure. 659 * success and an error code in case of failure.
@@ -742,8 +706,7 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
742} 706}
743 707
744/** 708/**
745 * process_eb - read UBI headers, check them and add corresponding data 709 * process_eb - read, check UBI headers, and add them to scanning information.
746 * to the scanning information.
747 * @ubi: UBI device description object 710 * @ubi: UBI device description object
748 * @si: scanning information 711 * @si: scanning information
749 * @pnum: the physical eraseblock number 712 * @pnum: the physical eraseblock number
@@ -751,7 +714,8 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
751 * This function returns a zero if the physical eraseblock was successfully 714 * This function returns a zero if the physical eraseblock was successfully
752 * handled and a negative error code in case of failure. 715 * handled and a negative error code in case of failure.
753 */ 716 */
754static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) 717static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
718 int pnum)
755{ 719{
756 long long uninitialized_var(ec); 720 long long uninitialized_var(ec);
757 int err, bitflips = 0, vol_id, ec_corr = 0; 721 int err, bitflips = 0, vol_id, ec_corr = 0;
@@ -764,8 +728,9 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
764 return err; 728 return err;
765 else if (err) { 729 else if (err) {
766 /* 730 /*
767 * FIXME: this is actually duty of the I/O unit to initialize 731 * FIXME: this is actually duty of the I/O sub-system to
768 * this, but MTD does not provide enough information. 732 * initialize this, but MTD does not provide enough
733 * information.
769 */ 734 */
770 si->bad_peb_count += 1; 735 si->bad_peb_count += 1;
771 return 0; 736 return 0;
@@ -930,7 +895,7 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
930 for (pnum = 0; pnum < ubi->peb_count; pnum++) { 895 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
931 cond_resched(); 896 cond_resched();
932 897
933 dbg_msg("process PEB %d", pnum); 898 dbg_gen("process PEB %d", pnum);
934 err = process_eb(ubi, si, pnum); 899 err = process_eb(ubi, si, pnum);
935 if (err < 0) 900 if (err < 0)
936 goto out_vidh; 901 goto out_vidh;
@@ -1079,8 +1044,7 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si)
1079#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1044#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1080 1045
1081/** 1046/**
1082 * paranoid_check_si - check if the scanning information is correct and 1047 * paranoid_check_si - check the scanning information.
1083 * consistent.
1084 * @ubi: UBI device description object 1048 * @ubi: UBI device description object
1085 * @si: scanning information 1049 * @si: scanning information
1086 * 1050 *
@@ -1265,11 +1229,6 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
1265 ubi_err("bad data_pad %d", sv->data_pad); 1229 ubi_err("bad data_pad %d", sv->data_pad);
1266 goto bad_vid_hdr; 1230 goto bad_vid_hdr;
1267 } 1231 }
1268
1269 if (seb->leb_ver != be32_to_cpu(vidh->leb_ver)) {
1270 ubi_err("bad leb_ver %u", seb->leb_ver);
1271 goto bad_vid_hdr;
1272 }
1273 } 1232 }
1274 1233
1275 if (!last_seb) 1234 if (!last_seb)
@@ -1299,8 +1258,7 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
1299 if (err < 0) { 1258 if (err < 0) {
1300 kfree(buf); 1259 kfree(buf);
1301 return err; 1260 return err;
1302 } 1261 } else if (err)
1303 else if (err)
1304 buf[pnum] = 1; 1262 buf[pnum] = 1;
1305 } 1263 }
1306 1264
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
index 966b9b682a42..61df208e2f20 100644
--- a/drivers/mtd/ubi/scan.h
+++ b/drivers/mtd/ubi/scan.h
@@ -34,7 +34,6 @@
34 * @u: unions RB-tree or @list links 34 * @u: unions RB-tree or @list links
35 * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects 35 * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects
36 * @u.list: link in one of the eraseblock lists 36 * @u.list: link in one of the eraseblock lists
37 * @leb_ver: logical eraseblock version (obsolete)
38 * 37 *
39 * One object of this type is allocated for each physical eraseblock during 38 * One object of this type is allocated for each physical eraseblock during
40 * scanning. 39 * scanning.
@@ -49,7 +48,6 @@ struct ubi_scan_leb {
49 struct rb_node rb; 48 struct rb_node rb;
50 struct list_head list; 49 struct list_head list;
51 } u; 50 } u;
52 uint32_t leb_ver;
53}; 51};
54 52
55/** 53/**
@@ -59,16 +57,16 @@ struct ubi_scan_leb {
59 * @leb_count: number of logical eraseblocks in this volume 57 * @leb_count: number of logical eraseblocks in this volume
60 * @vol_type: volume type 58 * @vol_type: volume type
61 * @used_ebs: number of used logical eraseblocks in this volume (only for 59 * @used_ebs: number of used logical eraseblocks in this volume (only for
62 * static volumes) 60 * static volumes)
63 * @last_data_size: amount of data in the last logical eraseblock of this 61 * @last_data_size: amount of data in the last logical eraseblock of this
64 * volume (always equivalent to the usable logical eraseblock size in case of 62 * volume (always equivalent to the usable logical eraseblock
65 * dynamic volumes) 63 * size in case of dynamic volumes)
66 * @data_pad: how many bytes at the end of logical eraseblocks of this volume 64 * @data_pad: how many bytes at the end of logical eraseblocks of this volume
67 * are not used (due to volume alignment) 65 * are not used (due to volume alignment)
68 * @compat: compatibility flags of this volume 66 * @compat: compatibility flags of this volume
69 * @rb: link in the volume RB-tree 67 * @rb: link in the volume RB-tree
70 * @root: root of the RB-tree containing all the eraseblock belonging to this 68 * @root: root of the RB-tree containing all the eraseblock belonging to this
71 * volume (&struct ubi_scan_leb objects) 69 * volume (&struct ubi_scan_leb objects)
72 * 70 *
73 * One object of this type is allocated for each volume during scanning. 71 * One object of this type is allocated for each volume during scanning.
74 */ 72 */
@@ -92,8 +90,8 @@ struct ubi_scan_volume {
92 * @free: list of free physical eraseblocks 90 * @free: list of free physical eraseblocks
93 * @erase: list of physical eraseblocks which have to be erased 91 * @erase: list of physical eraseblocks which have to be erased
94 * @alien: list of physical eraseblocks which should not be used by UBI (e.g., 92 * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
93 * those belonging to "preserve"-compatible internal volumes)
95 * @bad_peb_count: count of bad physical eraseblocks 94 * @bad_peb_count: count of bad physical eraseblocks
96 * those belonging to "preserve"-compatible internal volumes)
97 * @vols_found: number of volumes found during scanning 95 * @vols_found: number of volumes found during scanning
98 * @highest_vol_id: highest volume ID 96 * @highest_vol_id: highest volume ID
99 * @alien_peb_count: count of physical eraseblocks in the @alien list 97 * @alien_peb_count: count of physical eraseblocks in the @alien list
@@ -106,8 +104,8 @@ struct ubi_scan_volume {
106 * @ec_count: a temporary variable used when calculating @mean_ec 104 * @ec_count: a temporary variable used when calculating @mean_ec
107 * 105 *
108 * This data structure contains the result of scanning and may be used by other 106 * This data structure contains the result of scanning and may be used by other
109 * UBI units to build final UBI data structures, further error-recovery and so 107 * UBI sub-systems to build final UBI data structures, further error-recovery
110 * on. 108 * and so on.
111 */ 109 */
112struct ubi_scan_info { 110struct ubi_scan_info {
113 struct rb_root volumes; 111 struct rb_root volumes;
@@ -132,8 +130,7 @@ struct ubi_device;
132struct ubi_vid_hdr; 130struct ubi_vid_hdr;
133 131
134/* 132/*
135 * ubi_scan_move_to_list - move a physical eraseblock from the volume tree to a 133 * ubi_scan_move_to_list - move a PEB from the volume tree to a list.
136 * list.
137 * 134 *
138 * @sv: volume scanning information 135 * @sv: volume scanning information
139 * @seb: scanning eraseblock infprmation 136 * @seb: scanning eraseblock infprmation
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index c3185d9fd048..2ad940409053 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -98,10 +98,11 @@ enum {
98 * Compatibility constants used by internal volumes. 98 * Compatibility constants used by internal volumes.
99 * 99 *
100 * @UBI_COMPAT_DELETE: delete this internal volume before anything is written 100 * @UBI_COMPAT_DELETE: delete this internal volume before anything is written
101 * to the flash 101 * to the flash
102 * @UBI_COMPAT_RO: attach this device in read-only mode 102 * @UBI_COMPAT_RO: attach this device in read-only mode
103 * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its 103 * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its
104 * physical eraseblocks, don't allow the wear-leveling unit to move them 104 * physical eraseblocks, don't allow the wear-leveling
105 * sub-system to move them
105 * @UBI_COMPAT_REJECT: reject this UBI image 106 * @UBI_COMPAT_REJECT: reject this UBI image
106 */ 107 */
107enum { 108enum {
@@ -123,7 +124,7 @@ enum {
123 * struct ubi_ec_hdr - UBI erase counter header. 124 * struct ubi_ec_hdr - UBI erase counter header.
124 * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC) 125 * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC)
125 * @version: version of UBI implementation which is supposed to accept this 126 * @version: version of UBI implementation which is supposed to accept this
126 * UBI image 127 * UBI image
127 * @padding1: reserved for future, zeroes 128 * @padding1: reserved for future, zeroes
128 * @ec: the erase counter 129 * @ec: the erase counter
129 * @vid_hdr_offset: where the VID header starts 130 * @vid_hdr_offset: where the VID header starts
@@ -159,24 +160,23 @@ struct ubi_ec_hdr {
159 * struct ubi_vid_hdr - on-flash UBI volume identifier header. 160 * struct ubi_vid_hdr - on-flash UBI volume identifier header.
160 * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC) 161 * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC)
161 * @version: UBI implementation version which is supposed to accept this UBI 162 * @version: UBI implementation version which is supposed to accept this UBI
162 * image (%UBI_VERSION) 163 * image (%UBI_VERSION)
163 * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC) 164 * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC)
164 * @copy_flag: if this logical eraseblock was copied from another physical 165 * @copy_flag: if this logical eraseblock was copied from another physical
165 * eraseblock (for wear-leveling reasons) 166 * eraseblock (for wear-leveling reasons)
166 * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE, 167 * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE,
167 * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT) 168 * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT)
168 * @vol_id: ID of this volume 169 * @vol_id: ID of this volume
169 * @lnum: logical eraseblock number 170 * @lnum: logical eraseblock number
170 * @leb_ver: version of this logical eraseblock (IMPORTANT: obsolete, to be 171 * @padding1: reserved for future, zeroes
171 * removed, kept only for not breaking older UBI users)
172 * @data_size: how many bytes of data this logical eraseblock contains 172 * @data_size: how many bytes of data this logical eraseblock contains
173 * @used_ebs: total number of used logical eraseblocks in this volume 173 * @used_ebs: total number of used logical eraseblocks in this volume
174 * @data_pad: how many bytes at the end of this physical eraseblock are not 174 * @data_pad: how many bytes at the end of this physical eraseblock are not
175 * used 175 * used
176 * @data_crc: CRC checksum of the data stored in this logical eraseblock 176 * @data_crc: CRC checksum of the data stored in this logical eraseblock
177 * @padding1: reserved for future, zeroes
178 * @sqnum: sequence number
179 * @padding2: reserved for future, zeroes 177 * @padding2: reserved for future, zeroes
178 * @sqnum: sequence number
179 * @padding3: reserved for future, zeroes
180 * @hdr_crc: volume identifier header CRC checksum 180 * @hdr_crc: volume identifier header CRC checksum
181 * 181 *
182 * The @sqnum is the value of the global sequence counter at the time when this 182 * The @sqnum is the value of the global sequence counter at the time when this
@@ -224,10 +224,6 @@ struct ubi_ec_hdr {
224 * checksum is correct, this physical eraseblock is selected (P1). Otherwise 224 * checksum is correct, this physical eraseblock is selected (P1). Otherwise
225 * the older one (P) is selected. 225 * the older one (P) is selected.
226 * 226 *
227 * Note, there is an obsolete @leb_ver field which was used instead of @sqnum
228 * in the past. But it is not used anymore and we keep it in order to be able
229 * to deal with old UBI images. It will be removed at some point.
230 *
231 * There are 2 sorts of volumes in UBI: user volumes and internal volumes. 227 * There are 2 sorts of volumes in UBI: user volumes and internal volumes.
232 * Internal volumes are not seen from outside and are used for various internal 228 * Internal volumes are not seen from outside and are used for various internal
233 * UBI purposes. In this implementation there is only one internal volume - the 229 * UBI purposes. In this implementation there is only one internal volume - the
@@ -248,9 +244,9 @@ struct ubi_ec_hdr {
248 * The @data_crc field contains the CRC checksum of the contents of the logical 244 * The @data_crc field contains the CRC checksum of the contents of the logical
249 * eraseblock if this is a static volume. In case of dynamic volumes, it does 245 * eraseblock if this is a static volume. In case of dynamic volumes, it does
250 * not contain the CRC checksum as a rule. The only exception is when the 246 * not contain the CRC checksum as a rule. The only exception is when the
251 * data of the physical eraseblock was moved by the wear-leveling unit, then 247 * data of the physical eraseblock was moved by the wear-leveling sub-system,
252 * the wear-leveling unit calculates the data CRC and stores it in the 248 * then the wear-leveling sub-system calculates the data CRC and stores it in
253 * @data_crc field. And of course, the @copy_flag is %in this case. 249 * the @data_crc field. And of course, the @copy_flag is %in this case.
254 * 250 *
255 * The @data_size field is used only for static volumes because UBI has to know 251 * The @data_size field is used only for static volumes because UBI has to know
256 * how many bytes of data are stored in this eraseblock. For dynamic volumes, 252 * how many bytes of data are stored in this eraseblock. For dynamic volumes,
@@ -277,14 +273,14 @@ struct ubi_vid_hdr {
277 __u8 compat; 273 __u8 compat;
278 __be32 vol_id; 274 __be32 vol_id;
279 __be32 lnum; 275 __be32 lnum;
280 __be32 leb_ver; /* obsolete, to be removed, don't use */ 276 __u8 padding1[4];
281 __be32 data_size; 277 __be32 data_size;
282 __be32 used_ebs; 278 __be32 used_ebs;
283 __be32 data_pad; 279 __be32 data_pad;
284 __be32 data_crc; 280 __be32 data_crc;
285 __u8 padding1[4]; 281 __u8 padding2[4];
286 __be64 sqnum; 282 __be64 sqnum;
287 __u8 padding2[12]; 283 __u8 padding3[12];
288 __be32 hdr_crc; 284 __be32 hdr_crc;
289} __attribute__ ((packed)); 285} __attribute__ ((packed));
290 286
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 67dcbd11c15c..1c3fa18c26a7 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -74,15 +74,15 @@
74#define UBI_IO_RETRIES 3 74#define UBI_IO_RETRIES 3
75 75
76/* 76/*
77 * Error codes returned by the I/O unit. 77 * Error codes returned by the I/O sub-system.
78 * 78 *
79 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only 79 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only
80 * 0xFF bytes 80 * %0xFF bytes
81 * UBI_IO_PEB_FREE: the physical eraseblock is free, i.e. it contains only a 81 * UBI_IO_PEB_FREE: the physical eraseblock is free, i.e. it contains only a
82 * valid erase counter header, and the rest are %0xFF bytes 82 * valid erase counter header, and the rest are %0xFF bytes
83 * UBI_IO_BAD_EC_HDR: the erase counter header is corrupted (bad magic or CRC) 83 * UBI_IO_BAD_EC_HDR: the erase counter header is corrupted (bad magic or CRC)
84 * UBI_IO_BAD_VID_HDR: the volume identifier header is corrupted (bad magic or 84 * UBI_IO_BAD_VID_HDR: the volume identifier header is corrupted (bad magic or
85 * CRC) 85 * CRC)
86 * UBI_IO_BITFLIPS: bit-flips were detected and corrected 86 * UBI_IO_BITFLIPS: bit-flips were detected and corrected
87 */ 87 */
88enum { 88enum {
@@ -99,9 +99,9 @@ enum {
99 * @ec: erase counter 99 * @ec: erase counter
100 * @pnum: physical eraseblock number 100 * @pnum: physical eraseblock number
101 * 101 *
102 * This data structure is used in the WL unit. Each physical eraseblock has a 102 * This data structure is used in the WL sub-system. Each physical eraseblock
103 * corresponding &struct wl_entry object which may be kept in different 103 * has a corresponding &struct wl_entry object which may be kept in different
104 * RB-trees. See WL unit for details. 104 * RB-trees. See WL sub-system for details.
105 */ 105 */
106struct ubi_wl_entry { 106struct ubi_wl_entry {
107 struct rb_node rb; 107 struct rb_node rb;
@@ -118,10 +118,10 @@ struct ubi_wl_entry {
118 * @mutex: read/write mutex to implement read/write access serialization to 118 * @mutex: read/write mutex to implement read/write access serialization to
119 * the (@vol_id, @lnum) logical eraseblock 119 * the (@vol_id, @lnum) logical eraseblock
120 * 120 *
121 * This data structure is used in the EBA unit to implement per-LEB locking. 121 * This data structure is used in the EBA sub-system to implement per-LEB
122 * When a logical eraseblock is being locked - corresponding 122 * locking. When a logical eraseblock is being locked - corresponding
123 * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree). 123 * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
124 * See EBA unit for details. 124 * See EBA sub-system for details.
125 */ 125 */
126struct ubi_ltree_entry { 126struct ubi_ltree_entry {
127 struct rb_node rb; 127 struct rb_node rb;
@@ -131,6 +131,27 @@ struct ubi_ltree_entry {
131 struct rw_semaphore mutex; 131 struct rw_semaphore mutex;
132}; 132};
133 133
134/**
135 * struct ubi_rename_entry - volume re-name description data structure.
136 * @new_name_len: new volume name length
137 * @new_name: new volume name
138 * @remove: if not zero, this volume should be removed, not re-named
139 * @desc: descriptor of the volume
140 * @list: links re-name entries into a list
141 *
142 * This data structure is utilized in the multiple volume re-name code. Namely,
143 * UBI first creates a list of &struct ubi_rename_entry objects from the
144 * &struct ubi_rnvol_req request object, and then utilizes this list to do all
145 * the job.
146 */
147struct ubi_rename_entry {
148 int new_name_len;
149 char new_name[UBI_VOL_NAME_MAX + 1];
150 int remove;
151 struct ubi_volume_desc *desc;
152 struct list_head list;
153};
154
134struct ubi_volume_desc; 155struct ubi_volume_desc;
135 156
136/** 157/**
@@ -206,7 +227,7 @@ struct ubi_volume {
206 int alignment; 227 int alignment;
207 int data_pad; 228 int data_pad;
208 int name_len; 229 int name_len;
209 char name[UBI_VOL_NAME_MAX+1]; 230 char name[UBI_VOL_NAME_MAX + 1];
210 231
211 int upd_ebs; 232 int upd_ebs;
212 int ch_lnum; 233 int ch_lnum;
@@ -225,7 +246,7 @@ struct ubi_volume {
225#ifdef CONFIG_MTD_UBI_GLUEBI 246#ifdef CONFIG_MTD_UBI_GLUEBI
226 /* 247 /*
227 * Gluebi-related stuff may be compiled out. 248 * Gluebi-related stuff may be compiled out.
228 * TODO: this should not be built into UBI but should be a separate 249 * Note: this should not be built into UBI but should be a separate
229 * ubimtd driver which works on top of UBI and emulates MTD devices. 250 * ubimtd driver which works on top of UBI and emulates MTD devices.
230 */ 251 */
231 struct ubi_volume_desc *gluebi_desc; 252 struct ubi_volume_desc *gluebi_desc;
@@ -235,8 +256,7 @@ struct ubi_volume {
235}; 256};
236 257
237/** 258/**
238 * struct ubi_volume_desc - descriptor of the UBI volume returned when it is 259 * struct ubi_volume_desc - UBI volume descriptor returned when it is opened.
239 * opened.
240 * @vol: reference to the corresponding volume description object 260 * @vol: reference to the corresponding volume description object
241 * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE) 261 * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE)
242 */ 262 */
@@ -273,7 +293,7 @@ struct ubi_wl_entry;
273 * @vtbl_size: size of the volume table in bytes 293 * @vtbl_size: size of the volume table in bytes
274 * @vtbl: in-RAM volume table copy 294 * @vtbl: in-RAM volume table copy
275 * @volumes_mutex: protects on-flash volume table and serializes volume 295 * @volumes_mutex: protects on-flash volume table and serializes volume
276 * changes, like creation, deletion, update, resize 296 * changes, like creation, deletion, update, re-size and re-name
277 * 297 *
278 * @max_ec: current highest erase counter value 298 * @max_ec: current highest erase counter value
279 * @mean_ec: current mean erase counter value 299 * @mean_ec: current mean erase counter value
@@ -293,6 +313,7 @@ struct ubi_wl_entry;
293 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 313 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
294 * fields 314 * fields
295 * @move_mutex: serializes eraseblock moves 315 * @move_mutex: serializes eraseblock moves
316 * @work_sem: sycnhronizes the WL worker with use tasks
296 * @wl_scheduled: non-zero if the wear-leveling was scheduled 317 * @wl_scheduled: non-zero if the wear-leveling was scheduled
297 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any 318 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
298 * physical eraseblock 319 * physical eraseblock
@@ -316,11 +337,11 @@ struct ubi_wl_entry;
316 * @ro_mode: if the UBI device is in read-only mode 337 * @ro_mode: if the UBI device is in read-only mode
317 * @leb_size: logical eraseblock size 338 * @leb_size: logical eraseblock size
318 * @leb_start: starting offset of logical eraseblocks within physical 339 * @leb_start: starting offset of logical eraseblocks within physical
319 * eraseblocks 340 * eraseblocks
320 * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size 341 * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size
321 * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size 342 * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size
322 * @vid_hdr_offset: starting offset of the volume identifier header (might be 343 * @vid_hdr_offset: starting offset of the volume identifier header (might be
323 * unaligned) 344 * unaligned)
324 * @vid_hdr_aloffset: starting offset of the VID header aligned to 345 * @vid_hdr_aloffset: starting offset of the VID header aligned to
325 * @hdrs_min_io_size 346 * @hdrs_min_io_size
326 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset 347 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
@@ -331,6 +352,8 @@ struct ubi_wl_entry;
331 * @peb_buf1: a buffer of PEB size used for different purposes 352 * @peb_buf1: a buffer of PEB size used for different purposes
332 * @peb_buf2: another buffer of PEB size used for different purposes 353 * @peb_buf2: another buffer of PEB size used for different purposes
333 * @buf_mutex: proptects @peb_buf1 and @peb_buf2 354 * @buf_mutex: proptects @peb_buf1 and @peb_buf2
355 * @ckvol_mutex: serializes static volume checking when opening
356 * @mult_mutex: serializes operations on multiple volumes, like re-nameing
334 * @dbg_peb_buf: buffer of PEB size used for debugging 357 * @dbg_peb_buf: buffer of PEB size used for debugging
335 * @dbg_buf_mutex: proptects @dbg_peb_buf 358 * @dbg_buf_mutex: proptects @dbg_peb_buf
336 */ 359 */
@@ -356,16 +379,16 @@ struct ubi_device {
356 struct mutex volumes_mutex; 379 struct mutex volumes_mutex;
357 380
358 int max_ec; 381 int max_ec;
359 /* TODO: mean_ec is not updated run-time, fix */ 382 /* Note, mean_ec is not updated run-time - should be fixed */
360 int mean_ec; 383 int mean_ec;
361 384
362 /* EBA unit's stuff */ 385 /* EBA sub-system's stuff */
363 unsigned long long global_sqnum; 386 unsigned long long global_sqnum;
364 spinlock_t ltree_lock; 387 spinlock_t ltree_lock;
365 struct rb_root ltree; 388 struct rb_root ltree;
366 struct mutex alc_mutex; 389 struct mutex alc_mutex;
367 390
368 /* Wear-leveling unit's stuff */ 391 /* Wear-leveling sub-system's stuff */
369 struct rb_root used; 392 struct rb_root used;
370 struct rb_root free; 393 struct rb_root free;
371 struct rb_root scrub; 394 struct rb_root scrub;
@@ -388,7 +411,7 @@ struct ubi_device {
388 int thread_enabled; 411 int thread_enabled;
389 char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; 412 char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
390 413
391 /* I/O unit's stuff */ 414 /* I/O sub-system's stuff */
392 long long flash_size; 415 long long flash_size;
393 int peb_count; 416 int peb_count;
394 int peb_size; 417 int peb_size;
@@ -411,6 +434,7 @@ struct ubi_device {
411 void *peb_buf2; 434 void *peb_buf2;
412 struct mutex buf_mutex; 435 struct mutex buf_mutex;
413 struct mutex ckvol_mutex; 436 struct mutex ckvol_mutex;
437 struct mutex mult_mutex;
414#ifdef CONFIG_MTD_UBI_DEBUG 438#ifdef CONFIG_MTD_UBI_DEBUG
415 void *dbg_peb_buf; 439 void *dbg_peb_buf;
416 struct mutex dbg_buf_mutex; 440 struct mutex dbg_buf_mutex;
@@ -427,12 +451,15 @@ extern struct mutex ubi_devices_mutex;
427/* vtbl.c */ 451/* vtbl.c */
428int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, 452int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
429 struct ubi_vtbl_record *vtbl_rec); 453 struct ubi_vtbl_record *vtbl_rec);
454int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
455 struct list_head *rename_list);
430int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si); 456int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
431 457
432/* vmt.c */ 458/* vmt.c */
433int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); 459int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
434int ubi_remove_volume(struct ubi_volume_desc *desc); 460int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl);
435int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); 461int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs);
462int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list);
436int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol); 463int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol);
437void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol); 464void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol);
438 465
@@ -447,7 +474,8 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
447 const void __user *buf, int count); 474 const void __user *buf, int count);
448 475
449/* misc.c */ 476/* misc.c */
450int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); 477int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
478 int length);
451int ubi_check_volume(struct ubi_device *ubi, int vol_id); 479int ubi_check_volume(struct ubi_device *ubi, int vol_id);
452void ubi_calculate_reserved(struct ubi_device *ubi); 480void ubi_calculate_reserved(struct ubi_device *ubi);
453 481
@@ -477,7 +505,6 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
477int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 505int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
478 struct ubi_vid_hdr *vid_hdr); 506 struct ubi_vid_hdr *vid_hdr);
479int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); 507int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
480void ubi_eba_close(const struct ubi_device *ubi);
481 508
482/* wl.c */ 509/* wl.c */
483int ubi_wl_get_peb(struct ubi_device *ubi, int dtype); 510int ubi_wl_get_peb(struct ubi_device *ubi, int dtype);
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index ddaa1a56cc69..8b89cc18ff0b 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -39,7 +39,7 @@
39 */ 39 */
40 40
41#include <linux/err.h> 41#include <linux/err.h>
42#include <asm/uaccess.h> 42#include <linux/uaccess.h>
43#include <asm/div64.h> 43#include <asm/div64.h>
44#include "ubi.h" 44#include "ubi.h"
45 45
@@ -56,11 +56,11 @@ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
56 int err; 56 int err;
57 struct ubi_vtbl_record vtbl_rec; 57 struct ubi_vtbl_record vtbl_rec;
58 58
59 dbg_msg("set update marker for volume %d", vol->vol_id); 59 dbg_gen("set update marker for volume %d", vol->vol_id);
60 60
61 if (vol->upd_marker) { 61 if (vol->upd_marker) {
62 ubi_assert(ubi->vtbl[vol->vol_id].upd_marker); 62 ubi_assert(ubi->vtbl[vol->vol_id].upd_marker);
63 dbg_msg("already set"); 63 dbg_gen("already set");
64 return 0; 64 return 0;
65 } 65 }
66 66
@@ -92,7 +92,7 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
92 uint64_t tmp; 92 uint64_t tmp;
93 struct ubi_vtbl_record vtbl_rec; 93 struct ubi_vtbl_record vtbl_rec;
94 94
95 dbg_msg("clear update marker for volume %d", vol->vol_id); 95 dbg_gen("clear update marker for volume %d", vol->vol_id);
96 96
97 memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id], 97 memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
98 sizeof(struct ubi_vtbl_record)); 98 sizeof(struct ubi_vtbl_record));
@@ -133,7 +133,7 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
133 int i, err; 133 int i, err;
134 uint64_t tmp; 134 uint64_t tmp;
135 135
136 dbg_msg("start update of volume %d, %llu bytes", vol->vol_id, bytes); 136 dbg_gen("start update of volume %d, %llu bytes", vol->vol_id, bytes);
137 ubi_assert(!vol->updating && !vol->changing_leb); 137 ubi_assert(!vol->updating && !vol->changing_leb);
138 vol->updating = 1; 138 vol->updating = 1;
139 139
@@ -183,7 +183,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
183{ 183{
184 ubi_assert(!vol->updating && !vol->changing_leb); 184 ubi_assert(!vol->updating && !vol->changing_leb);
185 185
186 dbg_msg("start changing LEB %d:%d, %u bytes", 186 dbg_gen("start changing LEB %d:%d, %u bytes",
187 vol->vol_id, req->lnum, req->bytes); 187 vol->vol_id, req->lnum, req->bytes);
188 if (req->bytes == 0) 188 if (req->bytes == 0)
189 return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0, 189 return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
@@ -237,16 +237,17 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
237 int err; 237 int err;
238 238
239 if (vol->vol_type == UBI_DYNAMIC_VOLUME) { 239 if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
240 len = ALIGN(len, ubi->min_io_size); 240 int l = ALIGN(len, ubi->min_io_size);
241 memset(buf + len, 0xFF, len - len);
242 241
243 len = ubi_calc_data_len(ubi, buf, len); 242 memset(buf + len, 0xFF, l - len);
243 len = ubi_calc_data_len(ubi, buf, l);
244 if (len == 0) { 244 if (len == 0) {
245 dbg_msg("all %d bytes contain 0xFF - skip", len); 245 dbg_gen("all %d bytes contain 0xFF - skip", len);
246 return 0; 246 return 0;
247 } 247 }
248 248
249 err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN); 249 err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len,
250 UBI_UNKNOWN);
250 } else { 251 } else {
251 /* 252 /*
252 * When writing static volume, and this is the last logical 253 * When writing static volume, and this is the last logical
@@ -267,6 +268,7 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
267 268
268/** 269/**
269 * ubi_more_update_data - write more update data. 270 * ubi_more_update_data - write more update data.
271 * @ubi: UBI device description object
270 * @vol: volume description object 272 * @vol: volume description object
271 * @buf: write data (user-space memory buffer) 273 * @buf: write data (user-space memory buffer)
272 * @count: how much bytes to write 274 * @count: how much bytes to write
@@ -283,7 +285,7 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
283 uint64_t tmp; 285 uint64_t tmp;
284 int lnum, offs, err = 0, len, to_write = count; 286 int lnum, offs, err = 0, len, to_write = count;
285 287
286 dbg_msg("write %d of %lld bytes, %lld already passed", 288 dbg_gen("write %d of %lld bytes, %lld already passed",
287 count, vol->upd_bytes, vol->upd_received); 289 count, vol->upd_bytes, vol->upd_received);
288 290
289 if (ubi->ro_mode) 291 if (ubi->ro_mode)
@@ -384,6 +386,7 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
384 386
385/** 387/**
386 * ubi_more_leb_change_data - accept more data for atomic LEB change. 388 * ubi_more_leb_change_data - accept more data for atomic LEB change.
389 * @ubi: UBI device description object
387 * @vol: volume description object 390 * @vol: volume description object
388 * @buf: write data (user-space memory buffer) 391 * @buf: write data (user-space memory buffer)
389 * @count: how much bytes to write 392 * @count: how much bytes to write
@@ -400,7 +403,7 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
400{ 403{
401 int err; 404 int err;
402 405
403 dbg_msg("write %d of %lld bytes, %lld already passed", 406 dbg_gen("write %d of %lld bytes, %lld already passed",
404 count, vol->upd_bytes, vol->upd_received); 407 count, vol->upd_bytes, vol->upd_received);
405 408
406 if (ubi->ro_mode) 409 if (ubi->ro_mode)
@@ -418,7 +421,8 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
418 if (vol->upd_received == vol->upd_bytes) { 421 if (vol->upd_received == vol->upd_bytes) {
419 int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size); 422 int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size);
420 423
421 memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes); 424 memset(vol->upd_buf + vol->upd_bytes, 0xFF,
425 len - vol->upd_bytes);
422 len = ubi_calc_data_len(ubi, vol->upd_buf, len); 426 len = ubi_calc_data_len(ubi, vol->upd_buf, len);
423 err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum, 427 err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
424 vol->upd_buf, len, UBI_UNKNOWN); 428 vol->upd_buf, len, UBI_UNKNOWN);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 5be58d85c639..3531ca9a1e24 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -28,9 +28,9 @@
28#include "ubi.h" 28#include "ubi.h"
29 29
30#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 30#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
31static void paranoid_check_volumes(struct ubi_device *ubi); 31static int paranoid_check_volumes(struct ubi_device *ubi);
32#else 32#else
33#define paranoid_check_volumes(ubi) 33#define paranoid_check_volumes(ubi) 0
34#endif 34#endif
35 35
36static ssize_t vol_attribute_show(struct device *dev, 36static ssize_t vol_attribute_show(struct device *dev,
@@ -127,6 +127,7 @@ static void vol_release(struct device *dev)
127{ 127{
128 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); 128 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
129 129
130 kfree(vol->eba_tbl);
130 kfree(vol); 131 kfree(vol);
131} 132}
132 133
@@ -201,7 +202,7 @@ static void volume_sysfs_close(struct ubi_volume *vol)
201 */ 202 */
202int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) 203int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
203{ 204{
204 int i, err, vol_id = req->vol_id, dont_free = 0; 205 int i, err, vol_id = req->vol_id, do_free = 1;
205 struct ubi_volume *vol; 206 struct ubi_volume *vol;
206 struct ubi_vtbl_record vtbl_rec; 207 struct ubi_vtbl_record vtbl_rec;
207 uint64_t bytes; 208 uint64_t bytes;
@@ -217,7 +218,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
217 spin_lock(&ubi->volumes_lock); 218 spin_lock(&ubi->volumes_lock);
218 if (vol_id == UBI_VOL_NUM_AUTO) { 219 if (vol_id == UBI_VOL_NUM_AUTO) {
219 /* Find unused volume ID */ 220 /* Find unused volume ID */
220 dbg_msg("search for vacant volume ID"); 221 dbg_gen("search for vacant volume ID");
221 for (i = 0; i < ubi->vtbl_slots; i++) 222 for (i = 0; i < ubi->vtbl_slots; i++)
222 if (!ubi->volumes[i]) { 223 if (!ubi->volumes[i]) {
223 vol_id = i; 224 vol_id = i;
@@ -232,7 +233,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
232 req->vol_id = vol_id; 233 req->vol_id = vol_id;
233 } 234 }
234 235
235 dbg_msg("volume ID %d, %llu bytes, type %d, name %s", 236 dbg_gen("volume ID %d, %llu bytes, type %d, name %s",
236 vol_id, (unsigned long long)req->bytes, 237 vol_id, (unsigned long long)req->bytes,
237 (int)req->vol_type, req->name); 238 (int)req->vol_type, req->name);
238 239
@@ -252,7 +253,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
252 goto out_unlock; 253 goto out_unlock;
253 } 254 }
254 255
255 /* Calculate how many eraseblocks are requested */ 256 /* Calculate how many eraseblocks are requested */
256 vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment; 257 vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
257 bytes = req->bytes; 258 bytes = req->bytes;
258 if (do_div(bytes, vol->usable_leb_size)) 259 if (do_div(bytes, vol->usable_leb_size))
@@ -274,7 +275,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
274 vol->data_pad = ubi->leb_size % vol->alignment; 275 vol->data_pad = ubi->leb_size % vol->alignment;
275 vol->vol_type = req->vol_type; 276 vol->vol_type = req->vol_type;
276 vol->name_len = req->name_len; 277 vol->name_len = req->name_len;
277 memcpy(vol->name, req->name, vol->name_len + 1); 278 memcpy(vol->name, req->name, vol->name_len);
278 vol->ubi = ubi; 279 vol->ubi = ubi;
279 280
280 /* 281 /*
@@ -349,7 +350,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
349 vtbl_rec.vol_type = UBI_VID_DYNAMIC; 350 vtbl_rec.vol_type = UBI_VID_DYNAMIC;
350 else 351 else
351 vtbl_rec.vol_type = UBI_VID_STATIC; 352 vtbl_rec.vol_type = UBI_VID_STATIC;
352 memcpy(vtbl_rec.name, vol->name, vol->name_len + 1); 353 memcpy(vtbl_rec.name, vol->name, vol->name_len);
353 354
354 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 355 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
355 if (err) 356 if (err)
@@ -360,19 +361,19 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
360 ubi->vol_count += 1; 361 ubi->vol_count += 1;
361 spin_unlock(&ubi->volumes_lock); 362 spin_unlock(&ubi->volumes_lock);
362 363
363 paranoid_check_volumes(ubi); 364 err = paranoid_check_volumes(ubi);
364 return 0; 365 return err;
365 366
366out_sysfs: 367out_sysfs:
367 /* 368 /*
368 * We have registered our device, we should not free the volume* 369 * We have registered our device, we should not free the volume
369 * description object in this function in case of an error - it is 370 * description object in this function in case of an error - it is
370 * freed by the release function. 371 * freed by the release function.
371 * 372 *
372 * Get device reference to prevent the release function from being 373 * Get device reference to prevent the release function from being
373 * called just after sysfs has been closed. 374 * called just after sysfs has been closed.
374 */ 375 */
375 dont_free = 1; 376 do_free = 0;
376 get_device(&vol->dev); 377 get_device(&vol->dev);
377 volume_sysfs_close(vol); 378 volume_sysfs_close(vol);
378out_gluebi: 379out_gluebi:
@@ -382,17 +383,18 @@ out_gluebi:
382out_cdev: 383out_cdev:
383 cdev_del(&vol->cdev); 384 cdev_del(&vol->cdev);
384out_mapping: 385out_mapping:
385 kfree(vol->eba_tbl); 386 if (do_free)
387 kfree(vol->eba_tbl);
386out_acc: 388out_acc:
387 spin_lock(&ubi->volumes_lock); 389 spin_lock(&ubi->volumes_lock);
388 ubi->rsvd_pebs -= vol->reserved_pebs; 390 ubi->rsvd_pebs -= vol->reserved_pebs;
389 ubi->avail_pebs += vol->reserved_pebs; 391 ubi->avail_pebs += vol->reserved_pebs;
390out_unlock: 392out_unlock:
391 spin_unlock(&ubi->volumes_lock); 393 spin_unlock(&ubi->volumes_lock);
392 if (dont_free) 394 if (do_free)
393 put_device(&vol->dev);
394 else
395 kfree(vol); 395 kfree(vol);
396 else
397 put_device(&vol->dev);
396 ubi_err("cannot create volume %d, error %d", vol_id, err); 398 ubi_err("cannot create volume %d, error %d", vol_id, err);
397 return err; 399 return err;
398} 400}
@@ -400,19 +402,20 @@ out_unlock:
400/** 402/**
401 * ubi_remove_volume - remove volume. 403 * ubi_remove_volume - remove volume.
402 * @desc: volume descriptor 404 * @desc: volume descriptor
405 * @no_vtbl: do not change volume table if not zero
403 * 406 *
404 * This function removes volume described by @desc. The volume has to be opened 407 * This function removes volume described by @desc. The volume has to be opened
405 * in "exclusive" mode. Returns zero in case of success and a negative error 408 * in "exclusive" mode. Returns zero in case of success and a negative error
406 * code in case of failure. The caller has to have the @ubi->volumes_mutex 409 * code in case of failure. The caller has to have the @ubi->volumes_mutex
407 * locked. 410 * locked.
408 */ 411 */
409int ubi_remove_volume(struct ubi_volume_desc *desc) 412int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
410{ 413{
411 struct ubi_volume *vol = desc->vol; 414 struct ubi_volume *vol = desc->vol;
412 struct ubi_device *ubi = vol->ubi; 415 struct ubi_device *ubi = vol->ubi;
413 int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs; 416 int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs;
414 417
415 dbg_msg("remove UBI volume %d", vol_id); 418 dbg_gen("remove UBI volume %d", vol_id);
416 ubi_assert(desc->mode == UBI_EXCLUSIVE); 419 ubi_assert(desc->mode == UBI_EXCLUSIVE);
417 ubi_assert(vol == ubi->volumes[vol_id]); 420 ubi_assert(vol == ubi->volumes[vol_id]);
418 421
@@ -435,9 +438,11 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
435 if (err) 438 if (err)
436 goto out_err; 439 goto out_err;
437 440
438 err = ubi_change_vtbl_record(ubi, vol_id, NULL); 441 if (!no_vtbl) {
439 if (err) 442 err = ubi_change_vtbl_record(ubi, vol_id, NULL);
440 goto out_err; 443 if (err)
444 goto out_err;
445 }
441 446
442 for (i = 0; i < vol->reserved_pebs; i++) { 447 for (i = 0; i < vol->reserved_pebs; i++) {
443 err = ubi_eba_unmap_leb(ubi, vol, i); 448 err = ubi_eba_unmap_leb(ubi, vol, i);
@@ -445,8 +450,6 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
445 goto out_err; 450 goto out_err;
446 } 451 }
447 452
448 kfree(vol->eba_tbl);
449 vol->eba_tbl = NULL;
450 cdev_del(&vol->cdev); 453 cdev_del(&vol->cdev);
451 volume_sysfs_close(vol); 454 volume_sysfs_close(vol);
452 455
@@ -465,8 +468,9 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
465 ubi->vol_count -= 1; 468 ubi->vol_count -= 1;
466 spin_unlock(&ubi->volumes_lock); 469 spin_unlock(&ubi->volumes_lock);
467 470
468 paranoid_check_volumes(ubi); 471 if (!no_vtbl)
469 return 0; 472 err = paranoid_check_volumes(ubi);
473 return err;
470 474
471out_err: 475out_err:
472 ubi_err("cannot remove volume %d, error %d", vol_id, err); 476 ubi_err("cannot remove volume %d, error %d", vol_id, err);
@@ -497,7 +501,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
497 if (ubi->ro_mode) 501 if (ubi->ro_mode)
498 return -EROFS; 502 return -EROFS;
499 503
500 dbg_msg("re-size volume %d to from %d to %d PEBs", 504 dbg_gen("re-size volume %d to from %d to %d PEBs",
501 vol_id, vol->reserved_pebs, reserved_pebs); 505 vol_id, vol->reserved_pebs, reserved_pebs);
502 506
503 if (vol->vol_type == UBI_STATIC_VOLUME && 507 if (vol->vol_type == UBI_STATIC_VOLUME &&
@@ -586,8 +590,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
586 (long long)vol->used_ebs * vol->usable_leb_size; 590 (long long)vol->used_ebs * vol->usable_leb_size;
587 } 591 }
588 592
589 paranoid_check_volumes(ubi); 593 err = paranoid_check_volumes(ubi);
590 return 0; 594 return err;
591 595
592out_acc: 596out_acc:
593 if (pebs > 0) { 597 if (pebs > 0) {
@@ -602,6 +606,44 @@ out_free:
602} 606}
603 607
604/** 608/**
609 * ubi_rename_volumes - re-name UBI volumes.
610 * @ubi: UBI device description object
611 * @rename_list: list of &struct ubi_rename_entry objects
612 *
613 * This function re-names or removes volumes specified in the re-name list.
614 * Returns zero in case of success and a negative error code in case of
615 * failure.
616 */
617int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
618{
619 int err;
620 struct ubi_rename_entry *re;
621
622 err = ubi_vtbl_rename_volumes(ubi, rename_list);
623 if (err)
624 return err;
625
626 list_for_each_entry(re, rename_list, list) {
627 if (re->remove) {
628 err = ubi_remove_volume(re->desc, 1);
629 if (err)
630 break;
631 } else {
632 struct ubi_volume *vol = re->desc->vol;
633
634 spin_lock(&ubi->volumes_lock);
635 vol->name_len = re->new_name_len;
636 memcpy(vol->name, re->new_name, re->new_name_len + 1);
637 spin_unlock(&ubi->volumes_lock);
638 }
639 }
640
641 if (!err)
642 err = paranoid_check_volumes(ubi);
643 return err;
644}
645
646/**
605 * ubi_add_volume - add volume. 647 * ubi_add_volume - add volume.
606 * @ubi: UBI device description object 648 * @ubi: UBI device description object
607 * @vol: volume description object 649 * @vol: volume description object
@@ -615,8 +657,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
615 int err, vol_id = vol->vol_id; 657 int err, vol_id = vol->vol_id;
616 dev_t dev; 658 dev_t dev;
617 659
618 dbg_msg("add volume %d", vol_id); 660 dbg_gen("add volume %d", vol_id);
619 ubi_dbg_dump_vol_info(vol);
620 661
621 /* Register character device for the volume */ 662 /* Register character device for the volume */
622 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); 663 cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
@@ -650,8 +691,8 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
650 return err; 691 return err;
651 } 692 }
652 693
653 paranoid_check_volumes(ubi); 694 err = paranoid_check_volumes(ubi);
654 return 0; 695 return err;
655 696
656out_gluebi: 697out_gluebi:
657 err = ubi_destroy_gluebi(vol); 698 err = ubi_destroy_gluebi(vol);
@@ -672,7 +713,7 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
672{ 713{
673 int err; 714 int err;
674 715
675 dbg_msg("free volume %d", vol->vol_id); 716 dbg_gen("free volume %d", vol->vol_id);
676 717
677 ubi->volumes[vol->vol_id] = NULL; 718 ubi->volumes[vol->vol_id] = NULL;
678 err = ubi_destroy_gluebi(vol); 719 err = ubi_destroy_gluebi(vol);
@@ -686,8 +727,10 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
686 * paranoid_check_volume - check volume information. 727 * paranoid_check_volume - check volume information.
687 * @ubi: UBI device description object 728 * @ubi: UBI device description object
688 * @vol_id: volume ID 729 * @vol_id: volume ID
730 *
731 * Returns zero if volume is all right and a a negative error code if not.
689 */ 732 */
690static void paranoid_check_volume(struct ubi_device *ubi, int vol_id) 733static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
691{ 734{
692 int idx = vol_id2idx(ubi, vol_id); 735 int idx = vol_id2idx(ubi, vol_id);
693 int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker; 736 int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
@@ -705,16 +748,7 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
705 goto fail; 748 goto fail;
706 } 749 }
707 spin_unlock(&ubi->volumes_lock); 750 spin_unlock(&ubi->volumes_lock);
708 return; 751 return 0;
709 }
710
711 if (vol->exclusive) {
712 /*
713 * The volume may be being created at the moment, do not check
714 * it (e.g., it may be in the middle of ubi_create_volume().
715 */
716 spin_unlock(&ubi->volumes_lock);
717 return;
718 } 752 }
719 753
720 if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 || 754 if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 ||
@@ -727,7 +761,7 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
727 goto fail; 761 goto fail;
728 } 762 }
729 763
730 n = vol->alignment % ubi->min_io_size; 764 n = vol->alignment & (ubi->min_io_size - 1);
731 if (vol->alignment != 1 && n) { 765 if (vol->alignment != 1 && n) {
732 ubi_err("alignment is not multiple of min I/O unit"); 766 ubi_err("alignment is not multiple of min I/O unit");
733 goto fail; 767 goto fail;
@@ -824,31 +858,39 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
824 858
825 if (alignment != vol->alignment || data_pad != vol->data_pad || 859 if (alignment != vol->alignment || data_pad != vol->data_pad ||
826 upd_marker != vol->upd_marker || vol_type != vol->vol_type || 860 upd_marker != vol->upd_marker || vol_type != vol->vol_type ||
827 name_len!= vol->name_len || strncmp(name, vol->name, name_len)) { 861 name_len != vol->name_len || strncmp(name, vol->name, name_len)) {
828 ubi_err("volume info is different"); 862 ubi_err("volume info is different");
829 goto fail; 863 goto fail;
830 } 864 }
831 865
832 spin_unlock(&ubi->volumes_lock); 866 spin_unlock(&ubi->volumes_lock);
833 return; 867 return 0;
834 868
835fail: 869fail:
836 ubi_err("paranoid check failed for volume %d", vol_id); 870 ubi_err("paranoid check failed for volume %d", vol_id);
837 ubi_dbg_dump_vol_info(vol); 871 if (vol)
872 ubi_dbg_dump_vol_info(vol);
838 ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id); 873 ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
839 spin_unlock(&ubi->volumes_lock); 874 spin_unlock(&ubi->volumes_lock);
840 BUG(); 875 return -EINVAL;
841} 876}
842 877
843/** 878/**
844 * paranoid_check_volumes - check information about all volumes. 879 * paranoid_check_volumes - check information about all volumes.
845 * @ubi: UBI device description object 880 * @ubi: UBI device description object
881 *
882 * Returns zero if volumes are all right and a a negative error code if not.
846 */ 883 */
847static void paranoid_check_volumes(struct ubi_device *ubi) 884static int paranoid_check_volumes(struct ubi_device *ubi)
848{ 885{
849 int i; 886 int i, err = 0;
850 887
851 for (i = 0; i < ubi->vtbl_slots; i++) 888 for (i = 0; i < ubi->vtbl_slots; i++) {
852 paranoid_check_volume(ubi, i); 889 err = paranoid_check_volume(ubi, i);
890 if (err)
891 break;
892 }
893
894 return err;
853} 895}
854#endif 896#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index af36b12be278..217d0e111b2a 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -115,8 +115,58 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
115} 115}
116 116
117/** 117/**
118 * vtbl_check - check if volume table is not corrupted and contains sensible 118 * ubi_vtbl_rename_volumes - rename UBI volumes in the volume table.
119 * data. 119 * @ubi: UBI device description object
120 * @rename_list: list of &struct ubi_rename_entry objects
121 *
122 * This function re-names multiple volumes specified in @req in the volume
123 * table. Returns zero in case of success and a negative error code in case of
124 * failure.
125 */
126int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
127 struct list_head *rename_list)
128{
129 int i, err;
130 struct ubi_rename_entry *re;
131 struct ubi_volume *layout_vol;
132
133 list_for_each_entry(re, rename_list, list) {
134 uint32_t crc;
135 struct ubi_volume *vol = re->desc->vol;
136 struct ubi_vtbl_record *vtbl_rec = &ubi->vtbl[vol->vol_id];
137
138 if (re->remove) {
139 memcpy(vtbl_rec, &empty_vtbl_record,
140 sizeof(struct ubi_vtbl_record));
141 continue;
142 }
143
144 vtbl_rec->name_len = cpu_to_be16(re->new_name_len);
145 memcpy(vtbl_rec->name, re->new_name, re->new_name_len);
146 memset(vtbl_rec->name + re->new_name_len, 0,
147 UBI_VOL_NAME_MAX + 1 - re->new_name_len);
148 crc = crc32(UBI_CRC32_INIT, vtbl_rec,
149 UBI_VTBL_RECORD_SIZE_CRC);
150 vtbl_rec->crc = cpu_to_be32(crc);
151 }
152
153 layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
154 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
155 err = ubi_eba_unmap_leb(ubi, layout_vol, i);
156 if (err)
157 return err;
158
159 err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
160 ubi->vtbl_size, UBI_LONGTERM);
161 if (err)
162 return err;
163 }
164
165 return 0;
166}
167
168/**
169 * vtbl_check - check if volume table is not corrupted and sensible.
120 * @ubi: UBI device description object 170 * @ubi: UBI device description object
121 * @vtbl: volume table 171 * @vtbl: volume table
122 * 172 *
@@ -127,7 +177,7 @@ static int vtbl_check(const struct ubi_device *ubi,
127 const struct ubi_vtbl_record *vtbl) 177 const struct ubi_vtbl_record *vtbl)
128{ 178{
129 int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len; 179 int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len;
130 int upd_marker; 180 int upd_marker, err;
131 uint32_t crc; 181 uint32_t crc;
132 const char *name; 182 const char *name;
133 183
@@ -153,7 +203,7 @@ static int vtbl_check(const struct ubi_device *ubi,
153 if (reserved_pebs == 0) { 203 if (reserved_pebs == 0) {
154 if (memcmp(&vtbl[i], &empty_vtbl_record, 204 if (memcmp(&vtbl[i], &empty_vtbl_record,
155 UBI_VTBL_RECORD_SIZE)) { 205 UBI_VTBL_RECORD_SIZE)) {
156 dbg_err("bad empty record"); 206 err = 2;
157 goto bad; 207 goto bad;
158 } 208 }
159 continue; 209 continue;
@@ -161,56 +211,57 @@ static int vtbl_check(const struct ubi_device *ubi,
161 211
162 if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 || 212 if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 ||
163 name_len < 0) { 213 name_len < 0) {
164 dbg_err("negative values"); 214 err = 3;
165 goto bad; 215 goto bad;
166 } 216 }
167 217
168 if (alignment > ubi->leb_size || alignment == 0) { 218 if (alignment > ubi->leb_size || alignment == 0) {
169 dbg_err("bad alignment"); 219 err = 4;
170 goto bad; 220 goto bad;
171 } 221 }
172 222
173 n = alignment % ubi->min_io_size; 223 n = alignment & (ubi->min_io_size - 1);
174 if (alignment != 1 && n) { 224 if (alignment != 1 && n) {
175 dbg_err("alignment is not multiple of min I/O unit"); 225 err = 5;
176 goto bad; 226 goto bad;
177 } 227 }
178 228
179 n = ubi->leb_size % alignment; 229 n = ubi->leb_size % alignment;
180 if (data_pad != n) { 230 if (data_pad != n) {
181 dbg_err("bad data_pad, has to be %d", n); 231 dbg_err("bad data_pad, has to be %d", n);
232 err = 6;
182 goto bad; 233 goto bad;
183 } 234 }
184 235
185 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { 236 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
186 dbg_err("bad vol_type"); 237 err = 7;
187 goto bad; 238 goto bad;
188 } 239 }
189 240
190 if (upd_marker != 0 && upd_marker != 1) { 241 if (upd_marker != 0 && upd_marker != 1) {
191 dbg_err("bad upd_marker"); 242 err = 8;
192 goto bad; 243 goto bad;
193 } 244 }
194 245
195 if (reserved_pebs > ubi->good_peb_count) { 246 if (reserved_pebs > ubi->good_peb_count) {
196 dbg_err("too large reserved_pebs, good PEBs %d", 247 dbg_err("too large reserved_pebs, good PEBs %d",
197 ubi->good_peb_count); 248 ubi->good_peb_count);
249 err = 9;
198 goto bad; 250 goto bad;
199 } 251 }
200 252
201 if (name_len > UBI_VOL_NAME_MAX) { 253 if (name_len > UBI_VOL_NAME_MAX) {
202 dbg_err("too long volume name, max %d", 254 err = 10;
203 UBI_VOL_NAME_MAX);
204 goto bad; 255 goto bad;
205 } 256 }
206 257
207 if (name[0] == '\0') { 258 if (name[0] == '\0') {
208 dbg_err("NULL volume name"); 259 err = 11;
209 goto bad; 260 goto bad;
210 } 261 }
211 262
212 if (name_len != strnlen(name, name_len + 1)) { 263 if (name_len != strnlen(name, name_len + 1)) {
213 dbg_err("bad name_len"); 264 err = 12;
214 goto bad; 265 goto bad;
215 } 266 }
216 } 267 }
@@ -235,7 +286,7 @@ static int vtbl_check(const struct ubi_device *ubi,
235 return 0; 286 return 0;
236 287
237bad: 288bad:
238 ubi_err("volume table check failed, record %d", i); 289 ubi_err("volume table check failed: record %d, error %d", i, err);
239 ubi_dbg_dump_vtbl_record(&vtbl[i], i); 290 ubi_dbg_dump_vtbl_record(&vtbl[i], i);
240 return -EINVAL; 291 return -EINVAL;
241} 292}
@@ -287,7 +338,6 @@ retry:
287 vid_hdr->data_pad = cpu_to_be32(0); 338 vid_hdr->data_pad = cpu_to_be32(0);
288 vid_hdr->lnum = cpu_to_be32(copy); 339 vid_hdr->lnum = cpu_to_be32(copy);
289 vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum); 340 vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum);
290 vid_hdr->leb_ver = cpu_to_be32(old_seb ? old_seb->leb_ver + 1: 0);
291 341
292 /* The EC header is already there, write the VID header */ 342 /* The EC header is already there, write the VID header */
293 err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr); 343 err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr);
@@ -370,7 +420,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
370 * to LEB 0. 420 * to LEB 0.
371 */ 421 */
372 422
373 dbg_msg("check layout volume"); 423 dbg_gen("check layout volume");
374 424
375 /* Read both LEB 0 and LEB 1 into memory */ 425 /* Read both LEB 0 and LEB 1 into memory */
376 ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) { 426 ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
@@ -384,7 +434,16 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
384 err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0, 434 err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
385 ubi->vtbl_size); 435 ubi->vtbl_size);
386 if (err == UBI_IO_BITFLIPS || err == -EBADMSG) 436 if (err == UBI_IO_BITFLIPS || err == -EBADMSG)
387 /* Scrub the PEB later */ 437 /*
438 * Scrub the PEB later. Note, -EBADMSG indicates an
439 * uncorrectable ECC error, but we have our own CRC and
440 * the data will be checked later. If the data is OK,
441 * the PEB will be scrubbed (because we set
442 * seb->scrub). If the data is not OK, the contents of
443 * the PEB will be recovered from the second copy, and
444 * seb->scrub will be cleared in
445 * 'ubi_scan_add_used()'.
446 */
388 seb->scrub = 1; 447 seb->scrub = 1;
389 else if (err) 448 else if (err)
390 goto out_free; 449 goto out_free;
@@ -400,7 +459,8 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
400 if (!leb_corrupted[0]) { 459 if (!leb_corrupted[0]) {
401 /* LEB 0 is OK */ 460 /* LEB 0 is OK */
402 if (leb[1]) 461 if (leb[1])
403 leb_corrupted[1] = memcmp(leb[0], leb[1], ubi->vtbl_size); 462 leb_corrupted[1] = memcmp(leb[0], leb[1],
463 ubi->vtbl_size);
404 if (leb_corrupted[1]) { 464 if (leb_corrupted[1]) {
405 ubi_warn("volume table copy #2 is corrupted"); 465 ubi_warn("volume table copy #2 is corrupted");
406 err = create_vtbl(ubi, si, 1, leb[0]); 466 err = create_vtbl(ubi, si, 1, leb[0]);
@@ -620,30 +680,32 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
620static int check_sv(const struct ubi_volume *vol, 680static int check_sv(const struct ubi_volume *vol,
621 const struct ubi_scan_volume *sv) 681 const struct ubi_scan_volume *sv)
622{ 682{
683 int err;
684
623 if (sv->highest_lnum >= vol->reserved_pebs) { 685 if (sv->highest_lnum >= vol->reserved_pebs) {
624 dbg_err("bad highest_lnum"); 686 err = 1;
625 goto bad; 687 goto bad;
626 } 688 }
627 if (sv->leb_count > vol->reserved_pebs) { 689 if (sv->leb_count > vol->reserved_pebs) {
628 dbg_err("bad leb_count"); 690 err = 2;
629 goto bad; 691 goto bad;
630 } 692 }
631 if (sv->vol_type != vol->vol_type) { 693 if (sv->vol_type != vol->vol_type) {
632 dbg_err("bad vol_type"); 694 err = 3;
633 goto bad; 695 goto bad;
634 } 696 }
635 if (sv->used_ebs > vol->reserved_pebs) { 697 if (sv->used_ebs > vol->reserved_pebs) {
636 dbg_err("bad used_ebs"); 698 err = 4;
637 goto bad; 699 goto bad;
638 } 700 }
639 if (sv->data_pad != vol->data_pad) { 701 if (sv->data_pad != vol->data_pad) {
640 dbg_err("bad data_pad"); 702 err = 5;
641 goto bad; 703 goto bad;
642 } 704 }
643 return 0; 705 return 0;
644 706
645bad: 707bad:
646 ubi_err("bad scanning information"); 708 ubi_err("bad scanning information, error %d", err);
647 ubi_dbg_dump_sv(sv); 709 ubi_dbg_dump_sv(sv);
648 ubi_dbg_dump_vol_info(vol); 710 ubi_dbg_dump_vol_info(vol);
649 return -EINVAL; 711 return -EINVAL;
@@ -672,14 +734,13 @@ static int check_scanning_info(const struct ubi_device *ubi,
672 return -EINVAL; 734 return -EINVAL;
673 } 735 }
674 736
675 if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT&& 737 if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
676 si->highest_vol_id < UBI_INTERNAL_VOL_START) { 738 si->highest_vol_id < UBI_INTERNAL_VOL_START) {
677 ubi_err("too large volume ID %d found by scanning", 739 ubi_err("too large volume ID %d found by scanning",
678 si->highest_vol_id); 740 si->highest_vol_id);
679 return -EINVAL; 741 return -EINVAL;
680 } 742 }
681 743
682
683 for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { 744 for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
684 cond_resched(); 745 cond_resched();
685 746
@@ -717,8 +778,7 @@ static int check_scanning_info(const struct ubi_device *ubi,
717} 778}
718 779
719/** 780/**
720 * ubi_read_volume_table - read volume table. 781 * ubi_read_volume_table - read the volume table.
721 * information.
722 * @ubi: UBI device description object 782 * @ubi: UBI device description object
723 * @si: scanning information 783 * @si: scanning information
724 * 784 *
@@ -797,11 +857,10 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
797 857
798out_free: 858out_free:
799 vfree(ubi->vtbl); 859 vfree(ubi->vtbl);
800 for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) 860 for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
801 if (ubi->volumes[i]) { 861 kfree(ubi->volumes[i]);
802 kfree(ubi->volumes[i]); 862 ubi->volumes[i] = NULL;
803 ubi->volumes[i] = NULL; 863 }
804 }
805 return err; 864 return err;
806} 865}
807 866
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index a471a491f0ab..05d70937b543 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -19,22 +19,22 @@
19 */ 19 */
20 20
21/* 21/*
22 * UBI wear-leveling unit. 22 * UBI wear-leveling sub-system.
23 * 23 *
24 * This unit is responsible for wear-leveling. It works in terms of physical 24 * This sub-system is responsible for wear-leveling. It works in terms of
25 * eraseblocks and erase counters and knows nothing about logical eraseblocks, 25 * physical* eraseblocks and erase counters and knows nothing about logical
26 * volumes, etc. From this unit's perspective all physical eraseblocks are of 26 * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27 * two types - used and free. Used physical eraseblocks are those that were 27 * eraseblocks are of two types - used and free. Used physical eraseblocks are
28 * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are 28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
29 * those that were put by the 'ubi_wl_put_peb()' function. 29 * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
30 * 30 *
31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter 31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
32 * header. The rest of the physical eraseblock contains only 0xFF bytes. 32 * header. The rest of the physical eraseblock contains only %0xFF bytes.
33 * 33 *
34 * When physical eraseblocks are returned to the WL unit by means of the 34 * When physical eraseblocks are returned to the WL sub-system by means of the
35 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is 35 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36 * done asynchronously in context of the per-UBI device background thread, 36 * done asynchronously in context of the per-UBI device background thread,
37 * which is also managed by the WL unit. 37 * which is also managed by the WL sub-system.
38 * 38 *
39 * The wear-leveling is ensured by means of moving the contents of used 39 * The wear-leveling is ensured by means of moving the contents of used
40 * physical eraseblocks with low erase counter to free physical eraseblocks 40 * physical eraseblocks with low erase counter to free physical eraseblocks
@@ -43,34 +43,36 @@
43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick 43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
44 * an "optimal" physical eraseblock. For example, when it is known that the 44 * an "optimal" physical eraseblock. For example, when it is known that the
45 * physical eraseblock will be "put" soon because it contains short-term data, 45 * physical eraseblock will be "put" soon because it contains short-term data,
46 * the WL unit may pick a free physical eraseblock with low erase counter, and 46 * the WL sub-system may pick a free physical eraseblock with low erase
47 * so forth. 47 * counter, and so forth.
48 * 48 *
49 * If the WL unit fails to erase a physical eraseblock, it marks it as bad. 49 * If the WL sub-system fails to erase a physical eraseblock, it marks it as
50 * bad.
50 * 51 *
51 * This unit is also responsible for scrubbing. If a bit-flip is detected in a 52 * This sub-system is also responsible for scrubbing. If a bit-flip is detected
52 * physical eraseblock, it has to be moved. Technically this is the same as 53 * in a physical eraseblock, it has to be moved. Technically this is the same
53 * moving it for wear-leveling reasons. 54 * as moving it for wear-leveling reasons.
54 * 55 *
55 * As it was said, for the UBI unit all physical eraseblocks are either "free" 56 * As it was said, for the UBI sub-system all physical eraseblocks are either
56 * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used 57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
57 * eraseblocks are kept in a set of different RB-trees: @wl->used, 58 * used eraseblocks are kept in a set of different RB-trees: @wl->used,
58 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. 59 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub.
59 * 60 *
60 * Note, in this implementation, we keep a small in-RAM object for each physical 61 * Note, in this implementation, we keep a small in-RAM object for each physical
61 * eraseblock. This is surely not a scalable solution. But it appears to be good 62 * eraseblock. This is surely not a scalable solution. But it appears to be good
62 * enough for moderately large flashes and it is simple. In future, one may 63 * enough for moderately large flashes and it is simple. In future, one may
63 * re-work this unit and make it more scalable. 64 * re-work this sub-system and make it more scalable.
64 * 65 *
65 * At the moment this unit does not utilize the sequence number, which was 66 * At the moment this sub-system does not utilize the sequence number, which
66 * introduced relatively recently. But it would be wise to do this because the 67 * was introduced relatively recently. But it would be wise to do this because
67 * sequence number of a logical eraseblock characterizes how old is it. For 68 * the sequence number of a logical eraseblock characterizes how old is it. For
68 * example, when we move a PEB with low erase counter, and we need to pick the 69 * example, when we move a PEB with low erase counter, and we need to pick the
69 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we 70 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
70 * pick target PEB with an average EC if our PEB is not very "old". This is a 71 * pick target PEB with an average EC if our PEB is not very "old". This is a
71 * room for future re-works of the WL unit. 72 * room for future re-works of the WL sub-system.
72 * 73 *
73 * FIXME: looks too complex, should be simplified (later). 74 * Note: the stuff with protection trees looks too complex and is difficult to
75 * understand. Should be fixed.
74 */ 76 */
75 77
76#include <linux/slab.h> 78#include <linux/slab.h>
@@ -92,20 +94,21 @@
92 94
93/* 95/*
94 * Maximum difference between two erase counters. If this threshold is 96 * Maximum difference between two erase counters. If this threshold is
95 * exceeded, the WL unit starts moving data from used physical eraseblocks with 97 * exceeded, the WL sub-system starts moving data from used physical
96 * low erase counter to free physical eraseblocks with high erase counter. 98 * eraseblocks with low erase counter to free physical eraseblocks with high
99 * erase counter.
97 */ 100 */
98#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD 101#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
99 102
100/* 103/*
101 * When a physical eraseblock is moved, the WL unit has to pick the target 104 * When a physical eraseblock is moved, the WL sub-system has to pick the target
102 * physical eraseblock to move to. The simplest way would be just to pick the 105 * physical eraseblock to move to. The simplest way would be just to pick the
103 * one with the highest erase counter. But in certain workloads this could lead 106 * one with the highest erase counter. But in certain workloads this could lead
104 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a 107 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
105 * situation when the picked physical eraseblock is constantly erased after the 108 * situation when the picked physical eraseblock is constantly erased after the
106 * data is written to it. So, we have a constant which limits the highest erase 109 * data is written to it. So, we have a constant which limits the highest erase
107 * counter of the free physical eraseblock to pick. Namely, the WL unit does 110 * counter of the free physical eraseblock to pick. Namely, the WL sub-system
108 * not pick eraseblocks with erase counter greater then the lowest erase 111 * does not pick eraseblocks with erase counter greater then the lowest erase
109 * counter plus %WL_FREE_MAX_DIFF. 112 * counter plus %WL_FREE_MAX_DIFF.
110 */ 113 */
111#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) 114#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
@@ -123,11 +126,11 @@
123 * @abs_ec: the absolute erase counter value when the protection ends 126 * @abs_ec: the absolute erase counter value when the protection ends
124 * @e: the wear-leveling entry of the physical eraseblock under protection 127 * @e: the wear-leveling entry of the physical eraseblock under protection
125 * 128 *
126 * When the WL unit returns a physical eraseblock, the physical eraseblock is 129 * When the WL sub-system returns a physical eraseblock, the physical
127 * protected from being moved for some "time". For this reason, the physical 130 * eraseblock is protected from being moved for some "time". For this reason,
128 * eraseblock is not directly moved from the @wl->free tree to the @wl->used 131 * the physical eraseblock is not directly moved from the @wl->free tree to the
129 * tree. There is one more tree in between where this physical eraseblock is 132 * @wl->used tree. There is one more tree in between where this physical
130 * temporarily stored (@wl->prot). 133 * eraseblock is temporarily stored (@wl->prot).
131 * 134 *
132 * All this protection stuff is needed because: 135 * All this protection stuff is needed because:
133 * o we don't want to move physical eraseblocks just after we have given them 136 * o we don't want to move physical eraseblocks just after we have given them
@@ -175,7 +178,6 @@ struct ubi_wl_prot_entry {
175 * @list: a link in the list of pending works 178 * @list: a link in the list of pending works
176 * @func: worker function 179 * @func: worker function
177 * @priv: private data of the worker function 180 * @priv: private data of the worker function
178 *
179 * @e: physical eraseblock to erase 181 * @e: physical eraseblock to erase
180 * @torture: if the physical eraseblock has to be tortured 182 * @torture: if the physical eraseblock has to be tortured
181 * 183 *
@@ -473,52 +475,47 @@ retry:
473 } 475 }
474 476
475 switch (dtype) { 477 switch (dtype) {
476 case UBI_LONGTERM: 478 case UBI_LONGTERM:
477 /* 479 /*
478 * For long term data we pick a physical eraseblock 480 * For long term data we pick a physical eraseblock with high
479 * with high erase counter. But the highest erase 481 * erase counter. But the highest erase counter we can pick is
480 * counter we can pick is bounded by the the lowest 482 * bounded by the the lowest erase counter plus
481 * erase counter plus %WL_FREE_MAX_DIFF. 483 * %WL_FREE_MAX_DIFF.
482 */ 484 */
483 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 485 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
484 protect = LT_PROTECTION; 486 protect = LT_PROTECTION;
485 break; 487 break;
486 case UBI_UNKNOWN: 488 case UBI_UNKNOWN:
487 /* 489 /*
488 * For unknown data we pick a physical eraseblock with 490 * For unknown data we pick a physical eraseblock with medium
489 * medium erase counter. But we by no means can pick a 491 * erase counter. But we by no means can pick a physical
490 * physical eraseblock with erase counter greater or 492 * eraseblock with erase counter greater or equivalent than the
491 * equivalent than the lowest erase counter plus 493 * lowest erase counter plus %WL_FREE_MAX_DIFF.
492 * %WL_FREE_MAX_DIFF. 494 */
493 */ 495 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb);
494 first = rb_entry(rb_first(&ubi->free), 496 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, rb);
495 struct ubi_wl_entry, rb);
496 last = rb_entry(rb_last(&ubi->free),
497 struct ubi_wl_entry, rb);
498 497
499 if (last->ec - first->ec < WL_FREE_MAX_DIFF) 498 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
500 e = rb_entry(ubi->free.rb_node, 499 e = rb_entry(ubi->free.rb_node,
501 struct ubi_wl_entry, rb); 500 struct ubi_wl_entry, rb);
502 else { 501 else {
503 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; 502 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
504 e = find_wl_entry(&ubi->free, medium_ec); 503 e = find_wl_entry(&ubi->free, medium_ec);
505 } 504 }
506 protect = U_PROTECTION; 505 protect = U_PROTECTION;
507 break; 506 break;
508 case UBI_SHORTTERM: 507 case UBI_SHORTTERM:
509 /* 508 /*
510 * For short term data we pick a physical eraseblock 509 * For short term data we pick a physical eraseblock with the
511 * with the lowest erase counter as we expect it will 510 * lowest erase counter as we expect it will be erased soon.
512 * be erased soon. 511 */
513 */ 512 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb);
514 e = rb_entry(rb_first(&ubi->free), 513 protect = ST_PROTECTION;
515 struct ubi_wl_entry, rb); 514 break;
516 protect = ST_PROTECTION; 515 default:
517 break; 516 protect = 0;
518 default: 517 e = NULL;
519 protect = 0; 518 BUG();
520 e = NULL;
521 BUG();
522 } 519 }
523 520
524 /* 521 /*
@@ -582,7 +579,8 @@ found:
582 * This function returns zero in case of success and a negative error code in 579 * This function returns zero in case of success and a negative error code in
583 * case of failure. 580 * case of failure.
584 */ 581 */
585static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture) 582static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
583 int torture)
586{ 584{
587 int err; 585 int err;
588 struct ubi_ec_hdr *ec_hdr; 586 struct ubi_ec_hdr *ec_hdr;
@@ -634,8 +632,7 @@ out_free:
634} 632}
635 633
636/** 634/**
637 * check_protection_over - check if it is time to stop protecting some 635 * check_protection_over - check if it is time to stop protecting some PEBs.
638 * physical eraseblocks.
639 * @ubi: UBI device description object 636 * @ubi: UBI device description object
640 * 637 *
641 * This function is called after each erase operation, when the absolute erase 638 * This function is called after each erase operation, when the absolute erase
@@ -871,6 +868,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
871 } 868 }
872 869
873 ubi_free_vid_hdr(ubi, vid_hdr); 870 ubi_free_vid_hdr(ubi, vid_hdr);
871 if (scrubbing && !protect)
872 ubi_msg("scrubbed PEB %d, data moved to PEB %d",
873 e1->pnum, e2->pnum);
874
874 spin_lock(&ubi->wl_lock); 875 spin_lock(&ubi->wl_lock);
875 if (protect) 876 if (protect)
876 prot_tree_add(ubi, e1, pe, protect); 877 prot_tree_add(ubi, e1, pe, protect);
@@ -1054,8 +1055,8 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1054 spin_unlock(&ubi->wl_lock); 1055 spin_unlock(&ubi->wl_lock);
1055 1056
1056 /* 1057 /*
1057 * One more erase operation has happened, take care about protected 1058 * One more erase operation has happened, take care about
1058 * physical eraseblocks. 1059 * protected physical eraseblocks.
1059 */ 1060 */
1060 check_protection_over(ubi); 1061 check_protection_over(ubi);
1061 1062
@@ -1136,7 +1137,7 @@ out_ro:
1136} 1137}
1137 1138
1138/** 1139/**
1139 * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit. 1140 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1140 * @ubi: UBI device description object 1141 * @ubi: UBI device description object
1141 * @pnum: physical eraseblock to return 1142 * @pnum: physical eraseblock to return
1142 * @torture: if this physical eraseblock has to be tortured 1143 * @torture: if this physical eraseblock has to be tortured
@@ -1175,11 +1176,11 @@ retry:
1175 /* 1176 /*
1176 * User is putting the physical eraseblock which was selected 1177 * User is putting the physical eraseblock which was selected
1177 * as the target the data is moved to. It may happen if the EBA 1178 * as the target the data is moved to. It may happen if the EBA
1178 * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but 1179 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1179 * the WL unit has not put the PEB to the "used" tree yet, but 1180 * but the WL sub-system has not put the PEB to the "used" tree
1180 * it is about to do this. So we just set a flag which will 1181 * yet, but it is about to do this. So we just set a flag which
1181 * tell the WL worker that the PEB is not needed anymore and 1182 * will tell the WL worker that the PEB is not needed anymore
1182 * should be scheduled for erasure. 1183 * and should be scheduled for erasure.
1183 */ 1184 */
1184 dbg_wl("PEB %d is the target of data moving", pnum); 1185 dbg_wl("PEB %d is the target of data moving", pnum);
1185 ubi_assert(!ubi->move_to_put); 1186 ubi_assert(!ubi->move_to_put);
@@ -1229,7 +1230,7 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1229{ 1230{
1230 struct ubi_wl_entry *e; 1231 struct ubi_wl_entry *e;
1231 1232
1232 ubi_msg("schedule PEB %d for scrubbing", pnum); 1233 dbg_msg("schedule PEB %d for scrubbing", pnum);
1233 1234
1234retry: 1235retry:
1235 spin_lock(&ubi->wl_lock); 1236 spin_lock(&ubi->wl_lock);
@@ -1368,7 +1369,7 @@ int ubi_thread(void *u)
1368 int err; 1369 int err;
1369 1370
1370 if (kthread_should_stop()) 1371 if (kthread_should_stop())
1371 goto out; 1372 break;
1372 1373
1373 if (try_to_freeze()) 1374 if (try_to_freeze())
1374 continue; 1375 continue;
@@ -1403,7 +1404,6 @@ int ubi_thread(void *u)
1403 cond_resched(); 1404 cond_resched();
1404 } 1405 }
1405 1406
1406out:
1407 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); 1407 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1408 return 0; 1408 return 0;
1409} 1409}
@@ -1426,8 +1426,7 @@ static void cancel_pending(struct ubi_device *ubi)
1426} 1426}
1427 1427
1428/** 1428/**
1429 * ubi_wl_init_scan - initialize the wear-leveling unit using scanning 1429 * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
1430 * information.
1431 * @ubi: UBI device description object 1430 * @ubi: UBI device description object
1432 * @si: scanning information 1431 * @si: scanning information
1433 * 1432 *
@@ -1584,13 +1583,12 @@ static void protection_trees_destroy(struct ubi_device *ubi)
1584} 1583}
1585 1584
1586/** 1585/**
1587 * ubi_wl_close - close the wear-leveling unit. 1586 * ubi_wl_close - close the wear-leveling sub-system.
1588 * @ubi: UBI device description object 1587 * @ubi: UBI device description object
1589 */ 1588 */
1590void ubi_wl_close(struct ubi_device *ubi) 1589void ubi_wl_close(struct ubi_device *ubi)
1591{ 1590{
1592 dbg_wl("close the UBI wear-leveling unit"); 1591 dbg_wl("close the WL sub-system");
1593
1594 cancel_pending(ubi); 1592 cancel_pending(ubi);
1595 protection_trees_destroy(ubi); 1593 protection_trees_destroy(ubi);
1596 tree_destroy(&ubi->used); 1594 tree_destroy(&ubi->used);
@@ -1602,8 +1600,7 @@ void ubi_wl_close(struct ubi_device *ubi)
1602#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1600#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1603 1601
1604/** 1602/**
1605 * paranoid_check_ec - make sure that the erase counter of a physical eraseblock 1603 * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
1606 * is correct.
1607 * @ubi: UBI device description object 1604 * @ubi: UBI device description object
1608 * @pnum: the physical eraseblock number to check 1605 * @pnum: the physical eraseblock number to check
1609 * @ec: the erase counter to check 1606 * @ec: the erase counter to check
@@ -1644,13 +1641,12 @@ out_free:
1644} 1641}
1645 1642
1646/** 1643/**
1647 * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present 1644 * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1648 * in a WL RB-tree.
1649 * @e: the wear-leveling entry to check 1645 * @e: the wear-leveling entry to check
1650 * @root: the root of the tree 1646 * @root: the root of the tree
1651 * 1647 *
1652 * This function returns zero if @e is in the @root RB-tree and %1 if it 1648 * This function returns zero if @e is in the @root RB-tree and %1 if it is
1653 * is not. 1649 * not.
1654 */ 1650 */
1655static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 1651static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1656 struct rb_root *root) 1652 struct rb_root *root)
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 32a4f17d35fc..ecd5c71a7a8a 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -2,12 +2,6 @@
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
4 * 4 *
5 * This version of the driver is specific to the FADS implementation,
6 * since the board contains control registers external to the processor
7 * for the control of the LevelOne LXT970 transceiver. The MPC860T manual
8 * describes connections using the internal parallel port I/O, which
9 * is basically all of Port D.
10 *
11 * Right now, I am very wasteful with the buffers. I allocate memory 5 * Right now, I am very wasteful with the buffers. I allocate memory
12 * pages and then divide them into 2K frame buffers. This way I know I 6 * pages and then divide them into 2K frame buffers. This way I know I
13 * have buffers large enough to hold one frame within one buffer descriptor. 7 * have buffers large enough to hold one frame within one buffer descriptor.
@@ -49,17 +43,9 @@
49#include <asm/pgtable.h> 43#include <asm/pgtable.h>
50#include <asm/cacheflush.h> 44#include <asm/cacheflush.h>
51 45
52#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || \
53 defined(CONFIG_M5272) || defined(CONFIG_M528x) || \
54 defined(CONFIG_M520x) || defined(CONFIG_M532x)
55#include <asm/coldfire.h> 46#include <asm/coldfire.h>
56#include <asm/mcfsim.h> 47#include <asm/mcfsim.h>
57#include "fec.h" 48#include "fec.h"
58#else
59#include <asm/8xx_immap.h>
60#include <asm/mpc8xx.h>
61#include "commproc.h"
62#endif
63 49
64#if defined(CONFIG_FEC2) 50#if defined(CONFIG_FEC2)
65#define FEC_MAX_PORTS 2 51#define FEC_MAX_PORTS 2
@@ -67,7 +53,7 @@
67#define FEC_MAX_PORTS 1 53#define FEC_MAX_PORTS 1
68#endif 54#endif
69 55
70#if defined(CONFIG_FADS) || defined(CONFIG_RPXCLASSIC) || defined(CONFIG_M5272) 56#if defined(CONFIG_M5272)
71#define HAVE_mii_link_interrupt 57#define HAVE_mii_link_interrupt
72#endif 58#endif
73 59
@@ -1235,14 +1221,9 @@ static phy_info_t const * const phy_info[] = {
1235 1221
1236/* ------------------------------------------------------------------------- */ 1222/* ------------------------------------------------------------------------- */
1237#ifdef HAVE_mii_link_interrupt 1223#ifdef HAVE_mii_link_interrupt
1238#ifdef CONFIG_RPXCLASSIC
1239static void
1240mii_link_interrupt(void *dev_id);
1241#else
1242static irqreturn_t 1224static irqreturn_t
1243mii_link_interrupt(int irq, void * dev_id); 1225mii_link_interrupt(int irq, void * dev_id);
1244#endif 1226#endif
1245#endif
1246 1227
1247#if defined(CONFIG_M5272) 1228#if defined(CONFIG_M5272)
1248/* 1229/*
@@ -1795,24 +1776,6 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
1795 1776
1796 if (request_8xxirq(FEC_INTERRUPT, fec_enet_interrupt, 0, "fec", dev) != 0) 1777 if (request_8xxirq(FEC_INTERRUPT, fec_enet_interrupt, 0, "fec", dev) != 0)
1797 panic("Could not allocate FEC IRQ!"); 1778 panic("Could not allocate FEC IRQ!");
1798
1799#ifdef CONFIG_RPXCLASSIC
1800 /* Make Port C, bit 15 an input that causes interrupts.
1801 */
1802 immap->im_ioport.iop_pcpar &= ~0x0001;
1803 immap->im_ioport.iop_pcdir &= ~0x0001;
1804 immap->im_ioport.iop_pcso &= ~0x0001;
1805 immap->im_ioport.iop_pcint |= 0x0001;
1806 cpm_install_handler(CPMVEC_PIO_PC15, mii_link_interrupt, dev);
1807
1808 /* Make LEDS reflect Link status.
1809 */
1810 *((uint *) RPX_CSR_ADDR) &= ~BCSR2_FETHLEDMODE;
1811#endif
1812#ifdef CONFIG_FADS
1813 if (request_8xxirq(SIU_IRQ2, mii_link_interrupt, 0, "mii", dev) != 0)
1814 panic("Could not allocate MII IRQ!");
1815#endif
1816} 1779}
1817 1780
1818static void __inline__ fec_get_mac(struct net_device *dev) 1781static void __inline__ fec_get_mac(struct net_device *dev)
@@ -1821,16 +1784,6 @@ static void __inline__ fec_get_mac(struct net_device *dev)
1821 1784
1822 bd = (bd_t *)__res; 1785 bd = (bd_t *)__res;
1823 memcpy(dev->dev_addr, bd->bi_enetaddr, ETH_ALEN); 1786 memcpy(dev->dev_addr, bd->bi_enetaddr, ETH_ALEN);
1824
1825#ifdef CONFIG_RPXCLASSIC
1826 /* The Embedded Planet boards have only one MAC address in
1827 * the EEPROM, but can have two Ethernet ports. For the
1828 * FEC port, we create another address by setting one of
1829 * the address bits above something that would have (up to
1830 * now) been allocated.
1831 */
1832 dev->dev_adrd[3] |= 0x80;
1833#endif
1834} 1787}
1835 1788
1836static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1789static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep)
@@ -2109,13 +2062,8 @@ mii_discover_phy(uint mii_reg, struct net_device *dev)
2109/* This interrupt occurs when the PHY detects a link change. 2062/* This interrupt occurs when the PHY detects a link change.
2110*/ 2063*/
2111#ifdef HAVE_mii_link_interrupt 2064#ifdef HAVE_mii_link_interrupt
2112#ifdef CONFIG_RPXCLASSIC
2113static void
2114mii_link_interrupt(void *dev_id)
2115#else
2116static irqreturn_t 2065static irqreturn_t
2117mii_link_interrupt(int irq, void * dev_id) 2066mii_link_interrupt(int irq, void * dev_id)
2118#endif
2119{ 2067{
2120 struct net_device *dev = dev_id; 2068 struct net_device *dev = dev_id;
2121 struct fec_enet_private *fep = netdev_priv(dev); 2069 struct fec_enet_private *fep = netdev_priv(dev);
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 00527805e4f1..e5a6e2e84540 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -33,6 +33,7 @@
33*/ 33*/
34 34
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/moduleparam.h>
36#include <linux/types.h> 37#include <linux/types.h>
37#include <linux/errno.h> 38#include <linux/errno.h>
38#include <linux/ioport.h> 39#include <linux/ioport.h>
@@ -52,7 +53,9 @@
52#include <asm/hvcall.h> 53#include <asm/hvcall.h>
53#include <asm/atomic.h> 54#include <asm/atomic.h>
54#include <asm/vio.h> 55#include <asm/vio.h>
56#include <asm/iommu.h>
55#include <asm/uaccess.h> 57#include <asm/uaccess.h>
58#include <asm/firmware.h>
56#include <linux/seq_file.h> 59#include <linux/seq_file.h>
57 60
58#include "ibmveth.h" 61#include "ibmveth.h"
@@ -94,8 +97,10 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
94static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); 97static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
95static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); 98static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
96static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); 99static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
100static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
97static struct kobj_type ktype_veth_pool; 101static struct kobj_type ktype_veth_pool;
98 102
103
99#ifdef CONFIG_PROC_FS 104#ifdef CONFIG_PROC_FS
100#define IBMVETH_PROC_DIR "ibmveth" 105#define IBMVETH_PROC_DIR "ibmveth"
101static struct proc_dir_entry *ibmveth_proc_dir; 106static struct proc_dir_entry *ibmveth_proc_dir;
@@ -226,16 +231,16 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
226 u32 i; 231 u32 i;
227 u32 count = pool->size - atomic_read(&pool->available); 232 u32 count = pool->size - atomic_read(&pool->available);
228 u32 buffers_added = 0; 233 u32 buffers_added = 0;
234 struct sk_buff *skb;
235 unsigned int free_index, index;
236 u64 correlator;
237 unsigned long lpar_rc;
238 dma_addr_t dma_addr;
229 239
230 mb(); 240 mb();
231 241
232 for(i = 0; i < count; ++i) { 242 for(i = 0; i < count; ++i) {
233 struct sk_buff *skb;
234 unsigned int free_index, index;
235 u64 correlator;
236 union ibmveth_buf_desc desc; 243 union ibmveth_buf_desc desc;
237 unsigned long lpar_rc;
238 dma_addr_t dma_addr;
239 244
240 skb = alloc_skb(pool->buff_size, GFP_ATOMIC); 245 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
241 246
@@ -255,6 +260,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
255 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 260 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
256 pool->buff_size, DMA_FROM_DEVICE); 261 pool->buff_size, DMA_FROM_DEVICE);
257 262
263 if (dma_mapping_error(dma_addr))
264 goto failure;
265
258 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; 266 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
259 pool->dma_addr[index] = dma_addr; 267 pool->dma_addr[index] = dma_addr;
260 pool->skbuff[index] = skb; 268 pool->skbuff[index] = skb;
@@ -267,20 +275,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
267 275
268 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 276 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
269 277
270 if(lpar_rc != H_SUCCESS) { 278 if (lpar_rc != H_SUCCESS)
271 pool->free_map[free_index] = index; 279 goto failure;
272 pool->skbuff[index] = NULL; 280 else {
273 if (pool->consumer_index == 0)
274 pool->consumer_index = pool->size - 1;
275 else
276 pool->consumer_index--;
277 dma_unmap_single(&adapter->vdev->dev,
278 pool->dma_addr[index], pool->buff_size,
279 DMA_FROM_DEVICE);
280 dev_kfree_skb_any(skb);
281 adapter->replenish_add_buff_failure++;
282 break;
283 } else {
284 buffers_added++; 281 buffers_added++;
285 adapter->replenish_add_buff_success++; 282 adapter->replenish_add_buff_success++;
286 } 283 }
@@ -288,6 +285,24 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
288 285
289 mb(); 286 mb();
290 atomic_add(buffers_added, &(pool->available)); 287 atomic_add(buffers_added, &(pool->available));
288 return;
289
290failure:
291 pool->free_map[free_index] = index;
292 pool->skbuff[index] = NULL;
293 if (pool->consumer_index == 0)
294 pool->consumer_index = pool->size - 1;
295 else
296 pool->consumer_index--;
297 if (!dma_mapping_error(dma_addr))
298 dma_unmap_single(&adapter->vdev->dev,
299 pool->dma_addr[index], pool->buff_size,
300 DMA_FROM_DEVICE);
301 dev_kfree_skb_any(skb);
302 adapter->replenish_add_buff_failure++;
303
304 mb();
305 atomic_add(buffers_added, &(pool->available));
291} 306}
292 307
293/* replenish routine */ 308/* replenish routine */
@@ -297,7 +312,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
297 312
298 adapter->replenish_task_cycles++; 313 adapter->replenish_task_cycles++;
299 314
300 for(i = 0; i < IbmVethNumBufferPools; i++) 315 for (i = (IbmVethNumBufferPools - 1); i >= 0; i--)
301 if(adapter->rx_buff_pool[i].active) 316 if(adapter->rx_buff_pool[i].active)
302 ibmveth_replenish_buffer_pool(adapter, 317 ibmveth_replenish_buffer_pool(adapter,
303 &adapter->rx_buff_pool[i]); 318 &adapter->rx_buff_pool[i]);
@@ -472,6 +487,18 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
472 if (adapter->rx_buff_pool[i].active) 487 if (adapter->rx_buff_pool[i].active)
473 ibmveth_free_buffer_pool(adapter, 488 ibmveth_free_buffer_pool(adapter,
474 &adapter->rx_buff_pool[i]); 489 &adapter->rx_buff_pool[i]);
490
491 if (adapter->bounce_buffer != NULL) {
492 if (!dma_mapping_error(adapter->bounce_buffer_dma)) {
493 dma_unmap_single(&adapter->vdev->dev,
494 adapter->bounce_buffer_dma,
495 adapter->netdev->mtu + IBMVETH_BUFF_OH,
496 DMA_BIDIRECTIONAL);
497 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
498 }
499 kfree(adapter->bounce_buffer);
500 adapter->bounce_buffer = NULL;
501 }
475} 502}
476 503
477static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, 504static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
@@ -607,6 +634,24 @@ static int ibmveth_open(struct net_device *netdev)
607 return rc; 634 return rc;
608 } 635 }
609 636
637 adapter->bounce_buffer =
638 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
639 if (!adapter->bounce_buffer) {
640 ibmveth_error_printk("unable to allocate bounce buffer\n");
641 ibmveth_cleanup(adapter);
642 napi_disable(&adapter->napi);
643 return -ENOMEM;
644 }
645 adapter->bounce_buffer_dma =
646 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
647 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
648 if (dma_mapping_error(adapter->bounce_buffer_dma)) {
649 ibmveth_error_printk("unable to map bounce buffer\n");
650 ibmveth_cleanup(adapter);
651 napi_disable(&adapter->napi);
652 return -ENOMEM;
653 }
654
610 ibmveth_debug_printk("initial replenish cycle\n"); 655 ibmveth_debug_printk("initial replenish cycle\n");
611 ibmveth_interrupt(netdev->irq, netdev); 656 ibmveth_interrupt(netdev->irq, netdev);
612 657
@@ -853,10 +898,12 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
853 unsigned int tx_packets = 0; 898 unsigned int tx_packets = 0;
854 unsigned int tx_send_failed = 0; 899 unsigned int tx_send_failed = 0;
855 unsigned int tx_map_failed = 0; 900 unsigned int tx_map_failed = 0;
901 int used_bounce = 0;
902 unsigned long data_dma_addr;
856 903
857 desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; 904 desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len;
858 desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data, 905 data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
859 skb->len, DMA_TO_DEVICE); 906 skb->len, DMA_TO_DEVICE);
860 907
861 if (skb->ip_summed == CHECKSUM_PARTIAL && 908 if (skb->ip_summed == CHECKSUM_PARTIAL &&
862 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { 909 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
@@ -875,12 +922,16 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
875 buf[1] = 0; 922 buf[1] = 0;
876 } 923 }
877 924
878 if (dma_mapping_error(desc.fields.address)) { 925 if (dma_mapping_error(data_dma_addr)) {
879 ibmveth_error_printk("tx: unable to map xmit buffer\n"); 926 if (!firmware_has_feature(FW_FEATURE_CMO))
927 ibmveth_error_printk("tx: unable to map xmit buffer\n");
928 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
929 skb->len);
930 desc.fields.address = adapter->bounce_buffer_dma;
880 tx_map_failed++; 931 tx_map_failed++;
881 tx_dropped++; 932 used_bounce = 1;
882 goto out; 933 } else
883 } 934 desc.fields.address = data_dma_addr;
884 935
885 /* send the frame. Arbitrarily set retrycount to 1024 */ 936 /* send the frame. Arbitrarily set retrycount to 1024 */
886 correlator = 0; 937 correlator = 0;
@@ -904,8 +955,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
904 netdev->trans_start = jiffies; 955 netdev->trans_start = jiffies;
905 } 956 }
906 957
907 dma_unmap_single(&adapter->vdev->dev, desc.fields.address, 958 if (!used_bounce)
908 skb->len, DMA_TO_DEVICE); 959 dma_unmap_single(&adapter->vdev->dev, data_dma_addr,
960 skb->len, DMA_TO_DEVICE);
909 961
910out: spin_lock_irqsave(&adapter->stats_lock, flags); 962out: spin_lock_irqsave(&adapter->stats_lock, flags);
911 netdev->stats.tx_dropped += tx_dropped; 963 netdev->stats.tx_dropped += tx_dropped;
@@ -1053,9 +1105,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1053static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) 1105static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1054{ 1106{
1055 struct ibmveth_adapter *adapter = dev->priv; 1107 struct ibmveth_adapter *adapter = dev->priv;
1108 struct vio_dev *viodev = adapter->vdev;
1056 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; 1109 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1057 int reinit = 0; 1110 int i;
1058 int i, rc;
1059 1111
1060 if (new_mtu < IBMVETH_MAX_MTU) 1112 if (new_mtu < IBMVETH_MAX_MTU)
1061 return -EINVAL; 1113 return -EINVAL;
@@ -1067,23 +1119,34 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1067 if (i == IbmVethNumBufferPools) 1119 if (i == IbmVethNumBufferPools)
1068 return -EINVAL; 1120 return -EINVAL;
1069 1121
1122 /* Deactivate all the buffer pools so that the next loop can activate
1123 only the buffer pools necessary to hold the new MTU */
1124 for (i = 0; i < IbmVethNumBufferPools; i++)
1125 if (adapter->rx_buff_pool[i].active) {
1126 ibmveth_free_buffer_pool(adapter,
1127 &adapter->rx_buff_pool[i]);
1128 adapter->rx_buff_pool[i].active = 0;
1129 }
1130
1070 /* Look for an active buffer pool that can hold the new MTU */ 1131 /* Look for an active buffer pool that can hold the new MTU */
1071 for(i = 0; i<IbmVethNumBufferPools; i++) { 1132 for(i = 0; i<IbmVethNumBufferPools; i++) {
1072 if (!adapter->rx_buff_pool[i].active) { 1133 adapter->rx_buff_pool[i].active = 1;
1073 adapter->rx_buff_pool[i].active = 1;
1074 reinit = 1;
1075 }
1076 1134
1077 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { 1135 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1078 if (reinit && netif_running(adapter->netdev)) { 1136 if (netif_running(adapter->netdev)) {
1079 adapter->pool_config = 1; 1137 adapter->pool_config = 1;
1080 ibmveth_close(adapter->netdev); 1138 ibmveth_close(adapter->netdev);
1081 adapter->pool_config = 0; 1139 adapter->pool_config = 0;
1082 dev->mtu = new_mtu; 1140 dev->mtu = new_mtu;
1083 if ((rc = ibmveth_open(adapter->netdev))) 1141 vio_cmo_set_dev_desired(viodev,
1084 return rc; 1142 ibmveth_get_desired_dma
1085 } else 1143 (viodev));
1086 dev->mtu = new_mtu; 1144 return ibmveth_open(adapter->netdev);
1145 }
1146 dev->mtu = new_mtu;
1147 vio_cmo_set_dev_desired(viodev,
1148 ibmveth_get_desired_dma
1149 (viodev));
1087 return 0; 1150 return 0;
1088 } 1151 }
1089 } 1152 }
@@ -1098,6 +1161,46 @@ static void ibmveth_poll_controller(struct net_device *dev)
1098} 1161}
1099#endif 1162#endif
1100 1163
1164/**
1165 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1166 *
1167 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1168 *
1169 * Return value:
1170 * Number of bytes of IO data the driver will need to perform well.
1171 */
1172static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1173{
1174 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1175 struct ibmveth_adapter *adapter;
1176 unsigned long ret;
1177 int i;
1178 int rxqentries = 1;
1179
1180 /* netdev inits at probe time along with the structures we need below*/
1181 if (netdev == NULL)
1182 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
1183
1184 adapter = netdev_priv(netdev);
1185
1186 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1187 ret += IOMMU_PAGE_ALIGN(netdev->mtu);
1188
1189 for (i = 0; i < IbmVethNumBufferPools; i++) {
1190 /* add the size of the active receive buffers */
1191 if (adapter->rx_buff_pool[i].active)
1192 ret +=
1193 adapter->rx_buff_pool[i].size *
1194 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1195 buff_size);
1196 rxqentries += adapter->rx_buff_pool[i].size;
1197 }
1198 /* add the size of the receive queue entries */
1199 ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
1200
1201 return ret;
1202}
1203
1101static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 1204static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1102{ 1205{
1103 int rc, i; 1206 int rc, i;
@@ -1242,6 +1345,8 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
1242 ibmveth_proc_unregister_adapter(adapter); 1345 ibmveth_proc_unregister_adapter(adapter);
1243 1346
1244 free_netdev(netdev); 1347 free_netdev(netdev);
1348 dev_set_drvdata(&dev->dev, NULL);
1349
1245 return 0; 1350 return 0;
1246} 1351}
1247 1352
@@ -1402,14 +1507,15 @@ const char * buf, size_t count)
1402 return -EPERM; 1507 return -EPERM;
1403 } 1508 }
1404 1509
1405 pool->active = 0;
1406 if (netif_running(netdev)) { 1510 if (netif_running(netdev)) {
1407 adapter->pool_config = 1; 1511 adapter->pool_config = 1;
1408 ibmveth_close(netdev); 1512 ibmveth_close(netdev);
1513 pool->active = 0;
1409 adapter->pool_config = 0; 1514 adapter->pool_config = 0;
1410 if ((rc = ibmveth_open(netdev))) 1515 if ((rc = ibmveth_open(netdev)))
1411 return rc; 1516 return rc;
1412 } 1517 }
1518 pool->active = 0;
1413 } 1519 }
1414 } else if (attr == &veth_num_attr) { 1520 } else if (attr == &veth_num_attr) {
1415 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) 1521 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
@@ -1485,6 +1591,7 @@ static struct vio_driver ibmveth_driver = {
1485 .id_table = ibmveth_device_table, 1591 .id_table = ibmveth_device_table,
1486 .probe = ibmveth_probe, 1592 .probe = ibmveth_probe,
1487 .remove = ibmveth_remove, 1593 .remove = ibmveth_remove,
1594 .get_desired_dma = ibmveth_get_desired_dma,
1488 .driver = { 1595 .driver = {
1489 .name = ibmveth_driver_name, 1596 .name = ibmveth_driver_name,
1490 .owner = THIS_MODULE, 1597 .owner = THIS_MODULE,
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index 41f61cd18852..d28186948752 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -93,9 +93,12 @@ static inline long h_illan_attributes(unsigned long unit_address,
93 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) 93 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
94 94
95#define IbmVethNumBufferPools 5 95#define IbmVethNumBufferPools 5
96#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
96#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ 97#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
97#define IBMVETH_MAX_MTU 68 98#define IBMVETH_MAX_MTU 68
98#define IBMVETH_MAX_POOL_COUNT 4096 99#define IBMVETH_MAX_POOL_COUNT 4096
100#define IBMVETH_BUFF_LIST_SIZE 4096
101#define IBMVETH_FILT_LIST_SIZE 4096
99#define IBMVETH_MAX_BUF_SIZE (1024 * 128) 102#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
100 103
101static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; 104static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
@@ -143,6 +146,8 @@ struct ibmveth_adapter {
143 struct ibmveth_rx_q rx_queue; 146 struct ibmveth_rx_q rx_queue;
144 int pool_config; 147 int pool_config;
145 int rx_csum; 148 int rx_csum;
149 void *bounce_buffer;
150 dma_addr_t bounce_buffer_dma;
146 151
147 /* adapter specific stats */ 152 /* adapter specific stats */
148 u64 replenish_task_cycles; 153 u64 replenish_task_cycles;
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
index 70dff94a8bc6..04d5bc69a6f8 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/mlx4/cmd.c
@@ -67,6 +67,8 @@ enum {
67 CMD_STAT_BAD_INDEX = 0x0a, 67 CMD_STAT_BAD_INDEX = 0x0a,
68 /* FW image corrupted: */ 68 /* FW image corrupted: */
69 CMD_STAT_BAD_NVMEM = 0x0b, 69 CMD_STAT_BAD_NVMEM = 0x0b,
70 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
71 CMD_STAT_ICM_ERROR = 0x0c,
70 /* Attempt to modify a QP/EE which is not in the presumed state: */ 72 /* Attempt to modify a QP/EE which is not in the presumed state: */
71 CMD_STAT_BAD_QP_STATE = 0x10, 73 CMD_STAT_BAD_QP_STATE = 0x10,
72 /* Bad segment parameters (Address/Size): */ 74 /* Bad segment parameters (Address/Size): */
@@ -119,6 +121,7 @@ static int mlx4_status_to_errno(u8 status)
119 [CMD_STAT_BAD_RES_STATE] = -EBADF, 121 [CMD_STAT_BAD_RES_STATE] = -EBADF,
120 [CMD_STAT_BAD_INDEX] = -EBADF, 122 [CMD_STAT_BAD_INDEX] = -EBADF,
121 [CMD_STAT_BAD_NVMEM] = -EFAULT, 123 [CMD_STAT_BAD_NVMEM] = -EFAULT,
124 [CMD_STAT_ICM_ERROR] = -ENFILE,
122 [CMD_STAT_BAD_QP_STATE] = -EINVAL, 125 [CMD_STAT_BAD_QP_STATE] = -EINVAL,
123 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT, 126 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
124 [CMD_STAT_REG_BOUND] = -EBUSY, 127 [CMD_STAT_REG_BOUND] = -EBUSY,
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index e141a1513f07..ea3a09aaa844 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -33,6 +33,7 @@
33 33
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/mm.h>
36#include <linux/dma-mapping.h> 37#include <linux/dma-mapping.h>
37 38
38#include <linux/mlx4/cmd.h> 39#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 2b5006b9be67..57278224ba1e 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -46,6 +46,10 @@ enum {
46extern void __buggy_use_of_MLX4_GET(void); 46extern void __buggy_use_of_MLX4_GET(void);
47extern void __buggy_use_of_MLX4_PUT(void); 47extern void __buggy_use_of_MLX4_PUT(void);
48 48
49static int enable_qos;
50module_param(enable_qos, bool, 0444);
51MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
52
49#define MLX4_GET(dest, source, offset) \ 53#define MLX4_GET(dest, source, offset) \
50 do { \ 54 do { \
51 void *__p = (char *) (source) + (offset); \ 55 void *__p = (char *) (source) + (offset); \
@@ -198,7 +202,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
198#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e 202#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
199#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 203#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
200#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 204#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
201#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x97 205#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
202#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 206#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
203#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 207#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
204 208
@@ -373,12 +377,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
373 } 377 }
374 } 378 }
375 379
376 if (dev_cap->bmme_flags & 1) 380 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
377 mlx4_dbg(dev, "Base MM extensions: yes " 381 dev_cap->bmme_flags, dev_cap->reserved_lkey);
378 "(flags %d, rsvd L_Key %08x)\n",
379 dev_cap->bmme_flags, dev_cap->reserved_lkey);
380 else
381 mlx4_dbg(dev, "Base MM extensions: no\n");
382 382
383 /* 383 /*
384 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then 384 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
@@ -737,6 +737,10 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
737 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 737 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
738 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); 738 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
739 739
740 /* Enable QoS support if module parameter set */
741 if (enable_qos)
742 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
743
740 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 744 /* QPC/EEC/CQC/EQC/RDMARC attributes */
741 745
742 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 746 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index a0e046c149b7..fbf0e22be122 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -98,7 +98,7 @@ struct mlx4_dev_cap {
98 int cmpt_entry_sz; 98 int cmpt_entry_sz;
99 int mtt_entry_sz; 99 int mtt_entry_sz;
100 int resize_srq; 100 int resize_srq;
101 u8 bmme_flags; 101 u32 bmme_flags;
102 u32 reserved_lkey; 102 u32 reserved_lkey;
103 u64 max_icm_sz; 103 u64 max_icm_sz;
104 int max_gso_sz; 104 int max_gso_sz;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index d3736013fe9b..8e1d24cda1b0 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -158,6 +158,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
158 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 158 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
159 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 159 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
160 dev->caps.flags = dev_cap->flags; 160 dev->caps.flags = dev_cap->flags;
161 dev->caps.bmme_flags = dev_cap->bmme_flags;
162 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
161 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 163 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
162 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 164 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
163 165
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index a4023c2dd050..78038499cff5 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -118,6 +118,7 @@ struct mlx4_bitmap {
118 118
119struct mlx4_buddy { 119struct mlx4_buddy {
120 unsigned long **bits; 120 unsigned long **bits;
121 unsigned int *num_free;
121 int max_order; 122 int max_order;
122 spinlock_t lock; 123 spinlock_t lock;
123}; 124};
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 03a9abcce524..a3c04c5f12c2 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -47,7 +47,7 @@ struct mlx4_mpt_entry {
47 __be32 flags; 47 __be32 flags;
48 __be32 qpn; 48 __be32 qpn;
49 __be32 key; 49 __be32 key;
50 __be32 pd; 50 __be32 pd_flags;
51 __be64 start; 51 __be64 start;
52 __be64 length; 52 __be64 length;
53 __be32 lkey; 53 __be32 lkey;
@@ -61,11 +61,15 @@ struct mlx4_mpt_entry {
61} __attribute__((packed)); 61} __attribute__((packed));
62 62
63#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) 63#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
64#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
64#define MLX4_MPT_FLAG_MIO (1 << 17) 65#define MLX4_MPT_FLAG_MIO (1 << 17)
65#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) 66#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
66#define MLX4_MPT_FLAG_PHYSICAL (1 << 9) 67#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
67#define MLX4_MPT_FLAG_REGION (1 << 8) 68#define MLX4_MPT_FLAG_REGION (1 << 8)
68 69
70#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 26)
71#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
72
69#define MLX4_MTT_FLAG_PRESENT 1 73#define MLX4_MTT_FLAG_PRESENT 1
70 74
71#define MLX4_MPT_STATUS_SW 0xF0 75#define MLX4_MPT_STATUS_SW 0xF0
@@ -79,23 +83,26 @@ static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
79 83
80 spin_lock(&buddy->lock); 84 spin_lock(&buddy->lock);
81 85
82 for (o = order; o <= buddy->max_order; ++o) { 86 for (o = order; o <= buddy->max_order; ++o)
83 m = 1 << (buddy->max_order - o); 87 if (buddy->num_free[o]) {
84 seg = find_first_bit(buddy->bits[o], m); 88 m = 1 << (buddy->max_order - o);
85 if (seg < m) 89 seg = find_first_bit(buddy->bits[o], m);
86 goto found; 90 if (seg < m)
87 } 91 goto found;
92 }
88 93
89 spin_unlock(&buddy->lock); 94 spin_unlock(&buddy->lock);
90 return -1; 95 return -1;
91 96
92 found: 97 found:
93 clear_bit(seg, buddy->bits[o]); 98 clear_bit(seg, buddy->bits[o]);
99 --buddy->num_free[o];
94 100
95 while (o > order) { 101 while (o > order) {
96 --o; 102 --o;
97 seg <<= 1; 103 seg <<= 1;
98 set_bit(seg ^ 1, buddy->bits[o]); 104 set_bit(seg ^ 1, buddy->bits[o]);
105 ++buddy->num_free[o];
99 } 106 }
100 107
101 spin_unlock(&buddy->lock); 108 spin_unlock(&buddy->lock);
@@ -113,11 +120,13 @@ static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
113 120
114 while (test_bit(seg ^ 1, buddy->bits[order])) { 121 while (test_bit(seg ^ 1, buddy->bits[order])) {
115 clear_bit(seg ^ 1, buddy->bits[order]); 122 clear_bit(seg ^ 1, buddy->bits[order]);
123 --buddy->num_free[order];
116 seg >>= 1; 124 seg >>= 1;
117 ++order; 125 ++order;
118 } 126 }
119 127
120 set_bit(seg, buddy->bits[order]); 128 set_bit(seg, buddy->bits[order]);
129 ++buddy->num_free[order];
121 130
122 spin_unlock(&buddy->lock); 131 spin_unlock(&buddy->lock);
123} 132}
@@ -131,7 +140,9 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
131 140
132 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), 141 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
133 GFP_KERNEL); 142 GFP_KERNEL);
134 if (!buddy->bits) 143 buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *),
144 GFP_KERNEL);
145 if (!buddy->bits || !buddy->num_free)
135 goto err_out; 146 goto err_out;
136 147
137 for (i = 0; i <= buddy->max_order; ++i) { 148 for (i = 0; i <= buddy->max_order; ++i) {
@@ -143,6 +154,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
143 } 154 }
144 155
145 set_bit(0, buddy->bits[buddy->max_order]); 156 set_bit(0, buddy->bits[buddy->max_order]);
157 buddy->num_free[buddy->max_order] = 1;
146 158
147 return 0; 159 return 0;
148 160
@@ -150,9 +162,10 @@ err_out_free:
150 for (i = 0; i <= buddy->max_order; ++i) 162 for (i = 0; i <= buddy->max_order; ++i)
151 kfree(buddy->bits[i]); 163 kfree(buddy->bits[i]);
152 164
165err_out:
153 kfree(buddy->bits); 166 kfree(buddy->bits);
167 kfree(buddy->num_free);
154 168
155err_out:
156 return -ENOMEM; 169 return -ENOMEM;
157} 170}
158 171
@@ -164,6 +177,7 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
164 kfree(buddy->bits[i]); 177 kfree(buddy->bits[i]);
165 178
166 kfree(buddy->bits); 179 kfree(buddy->bits);
180 kfree(buddy->num_free);
167} 181}
168 182
169static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 183static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
@@ -314,21 +328,30 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
314 328
315 memset(mpt_entry, 0, sizeof *mpt_entry); 329 memset(mpt_entry, 0, sizeof *mpt_entry);
316 330
317 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS | 331 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
318 MLX4_MPT_FLAG_MIO |
319 MLX4_MPT_FLAG_REGION | 332 MLX4_MPT_FLAG_REGION |
320 mr->access); 333 mr->access);
321 334
322 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); 335 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
323 mpt_entry->pd = cpu_to_be32(mr->pd); 336 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
324 mpt_entry->start = cpu_to_be64(mr->iova); 337 mpt_entry->start = cpu_to_be64(mr->iova);
325 mpt_entry->length = cpu_to_be64(mr->size); 338 mpt_entry->length = cpu_to_be64(mr->size);
326 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 339 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
340
327 if (mr->mtt.order < 0) { 341 if (mr->mtt.order < 0) {
328 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 342 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
329 mpt_entry->mtt_seg = 0; 343 mpt_entry->mtt_seg = 0;
330 } else 344 } else {
331 mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt)); 345 mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
346 }
347
348 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
349 /* fast register MR in free state */
350 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
351 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG);
352 } else {
353 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
354 }
332 355
333 err = mlx4_SW2HW_MPT(dev, mailbox, 356 err = mlx4_SW2HW_MPT(dev, mailbox,
334 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); 357 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index 3a93c5f0f7ab..aa616892d09c 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -91,6 +91,13 @@ EXPORT_SYMBOL_GPL(mlx4_uar_free);
91 91
92int mlx4_init_uar_table(struct mlx4_dev *dev) 92int mlx4_init_uar_table(struct mlx4_dev *dev)
93{ 93{
94 if (dev->caps.num_uars <= 128) {
95 mlx4_err(dev, "Only %d UAR pages (need more than 128)\n",
96 dev->caps.num_uars);
97 mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
98 return -ENODEV;
99 }
100
94 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, 101 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
95 dev->caps.num_uars, dev->caps.num_uars - 1, 102 dev->caps.num_uars, dev->caps.num_uars - 1,
96 max(128, dev->caps.reserved_uars)); 103 max(128, dev->caps.reserved_uars));
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index f2051b209da2..2040965d7724 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -308,7 +308,7 @@ static void smc_reset(struct net_device *dev)
308 * can't handle it then there will be no recovery except for 308 * can't handle it then there will be no recovery except for
309 * a hard reset or power cycle 309 * a hard reset or power cycle
310 */ 310 */
311 if (nowait) 311 if (lp->cfg.flags & SMC91X_NOWAIT)
312 cfg |= CONFIG_NO_WAIT; 312 cfg |= CONFIG_NO_WAIT;
313 313
314 /* 314 /*
@@ -1939,8 +1939,11 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1939 if (retval) 1939 if (retval)
1940 goto err_out; 1940 goto err_out;
1941 1941
1942#ifdef SMC_USE_PXA_DMA 1942#ifdef CONFIG_ARCH_PXA
1943 { 1943# ifdef SMC_USE_PXA_DMA
1944 lp->cfg.flags |= SMC91X_USE_DMA;
1945# endif
1946 if (lp->cfg.flags & SMC91X_USE_DMA) {
1944 int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW, 1947 int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW,
1945 smc_pxa_dma_irq, NULL); 1948 smc_pxa_dma_irq, NULL);
1946 if (dma >= 0) 1949 if (dma >= 0)
@@ -1980,7 +1983,7 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1980 } 1983 }
1981 1984
1982err_out: 1985err_out:
1983#ifdef SMC_USE_PXA_DMA 1986#ifdef CONFIG_ARCH_PXA
1984 if (retval && dev->dma != (unsigned char)-1) 1987 if (retval && dev->dma != (unsigned char)-1)
1985 pxa_free_dma(dev->dma); 1988 pxa_free_dma(dev->dma);
1986#endif 1989#endif
@@ -2050,9 +2053,11 @@ static int smc_enable_device(struct platform_device *pdev)
2050 return 0; 2053 return 0;
2051} 2054}
2052 2055
2053static int smc_request_attrib(struct platform_device *pdev) 2056static int smc_request_attrib(struct platform_device *pdev,
2057 struct net_device *ndev)
2054{ 2058{
2055 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); 2059 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2060 struct smc_local *lp = netdev_priv(ndev);
2056 2061
2057 if (!res) 2062 if (!res)
2058 return 0; 2063 return 0;
@@ -2063,9 +2068,11 @@ static int smc_request_attrib(struct platform_device *pdev)
2063 return 0; 2068 return 0;
2064} 2069}
2065 2070
2066static void smc_release_attrib(struct platform_device *pdev) 2071static void smc_release_attrib(struct platform_device *pdev,
2072 struct net_device *ndev)
2067{ 2073{
2068 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); 2074 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2075 struct smc_local *lp = netdev_priv(ndev);
2069 2076
2070 if (res) 2077 if (res)
2071 release_mem_region(res->start, ATTRIB_SIZE); 2078 release_mem_region(res->start, ATTRIB_SIZE);
@@ -2123,27 +2130,14 @@ static int smc_drv_probe(struct platform_device *pdev)
2123 struct net_device *ndev; 2130 struct net_device *ndev;
2124 struct resource *res, *ires; 2131 struct resource *res, *ires;
2125 unsigned int __iomem *addr; 2132 unsigned int __iomem *addr;
2133 unsigned long irq_flags = SMC_IRQ_FLAGS;
2126 int ret; 2134 int ret;
2127 2135
2128 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2129 if (!res)
2130 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2131 if (!res) {
2132 ret = -ENODEV;
2133 goto out;
2134 }
2135
2136
2137 if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) {
2138 ret = -EBUSY;
2139 goto out;
2140 }
2141
2142 ndev = alloc_etherdev(sizeof(struct smc_local)); 2136 ndev = alloc_etherdev(sizeof(struct smc_local));
2143 if (!ndev) { 2137 if (!ndev) {
2144 printk("%s: could not allocate device.\n", CARDNAME); 2138 printk("%s: could not allocate device.\n", CARDNAME);
2145 ret = -ENOMEM; 2139 ret = -ENOMEM;
2146 goto out_release_io; 2140 goto out;
2147 } 2141 }
2148 SET_NETDEV_DEV(ndev, &pdev->dev); 2142 SET_NETDEV_DEV(ndev, &pdev->dev);
2149 2143
@@ -2152,37 +2146,47 @@ static int smc_drv_probe(struct platform_device *pdev)
2152 */ 2146 */
2153 2147
2154 lp = netdev_priv(ndev); 2148 lp = netdev_priv(ndev);
2155 lp->cfg.irq_flags = SMC_IRQ_FLAGS;
2156 2149
2157#ifdef SMC_DYNAMIC_BUS_CONFIG 2150 if (pd) {
2158 if (pd)
2159 memcpy(&lp->cfg, pd, sizeof(lp->cfg)); 2151 memcpy(&lp->cfg, pd, sizeof(lp->cfg));
2160 else { 2152 lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
2161 lp->cfg.flags = SMC91X_USE_8BIT; 2153 } else {
2162 lp->cfg.flags |= SMC91X_USE_16BIT; 2154 lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0;
2163 lp->cfg.flags |= SMC91X_USE_32BIT; 2155 lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0;
2156 lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0;
2157 lp->cfg.flags |= (nowait) ? SMC91X_NOWAIT : 0;
2164 } 2158 }
2165 2159
2166 lp->cfg.flags &= ~(SMC_CAN_USE_8BIT ? 0 : SMC91X_USE_8BIT);
2167 lp->cfg.flags &= ~(SMC_CAN_USE_16BIT ? 0 : SMC91X_USE_16BIT);
2168 lp->cfg.flags &= ~(SMC_CAN_USE_32BIT ? 0 : SMC91X_USE_32BIT);
2169#endif
2170
2171 ndev->dma = (unsigned char)-1; 2160 ndev->dma = (unsigned char)-1;
2172 2161
2162 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2163 if (!res)
2164 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2165 if (!res) {
2166 ret = -ENODEV;
2167 goto out_free_netdev;
2168 }
2169
2170
2171 if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) {
2172 ret = -EBUSY;
2173 goto out_free_netdev;
2174 }
2175
2173 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2176 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2174 if (!ires) { 2177 if (!ires) {
2175 ret = -ENODEV; 2178 ret = -ENODEV;
2176 goto out_free_netdev; 2179 goto out_release_io;
2177 } 2180 }
2178 2181
2179 ndev->irq = ires->start; 2182 ndev->irq = ires->start;
2180 if (SMC_IRQ_FLAGS == -1)
2181 lp->cfg.irq_flags = ires->flags & IRQF_TRIGGER_MASK;
2182 2183
2183 ret = smc_request_attrib(pdev); 2184 if (ires->flags & IRQF_TRIGGER_MASK)
2185 irq_flags = ires->flags & IRQF_TRIGGER_MASK;
2186
2187 ret = smc_request_attrib(pdev, ndev);
2184 if (ret) 2188 if (ret)
2185 goto out_free_netdev; 2189 goto out_release_io;
2186#if defined(CONFIG_SA1100_ASSABET) 2190#if defined(CONFIG_SA1100_ASSABET)
2187 NCR_0 |= NCR_ENET_OSC_EN; 2191 NCR_0 |= NCR_ENET_OSC_EN;
2188#endif 2192#endif
@@ -2197,7 +2201,7 @@ static int smc_drv_probe(struct platform_device *pdev)
2197 goto out_release_attrib; 2201 goto out_release_attrib;
2198 } 2202 }
2199 2203
2200#ifdef SMC_USE_PXA_DMA 2204#ifdef CONFIG_ARCH_PXA
2201 { 2205 {
2202 struct smc_local *lp = netdev_priv(ndev); 2206 struct smc_local *lp = netdev_priv(ndev);
2203 lp->device = &pdev->dev; 2207 lp->device = &pdev->dev;
@@ -2205,7 +2209,7 @@ static int smc_drv_probe(struct platform_device *pdev)
2205 } 2209 }
2206#endif 2210#endif
2207 2211
2208 ret = smc_probe(ndev, addr, lp->cfg.irq_flags); 2212 ret = smc_probe(ndev, addr, irq_flags);
2209 if (ret != 0) 2213 if (ret != 0)
2210 goto out_iounmap; 2214 goto out_iounmap;
2211 2215
@@ -2217,11 +2221,11 @@ static int smc_drv_probe(struct platform_device *pdev)
2217 platform_set_drvdata(pdev, NULL); 2221 platform_set_drvdata(pdev, NULL);
2218 iounmap(addr); 2222 iounmap(addr);
2219 out_release_attrib: 2223 out_release_attrib:
2220 smc_release_attrib(pdev); 2224 smc_release_attrib(pdev, ndev);
2221 out_free_netdev:
2222 free_netdev(ndev);
2223 out_release_io: 2225 out_release_io:
2224 release_mem_region(res->start, SMC_IO_EXTENT); 2226 release_mem_region(res->start, SMC_IO_EXTENT);
2227 out_free_netdev:
2228 free_netdev(ndev);
2225 out: 2229 out:
2226 printk("%s: not found (%d).\n", CARDNAME, ret); 2230 printk("%s: not found (%d).\n", CARDNAME, ret);
2227 2231
@@ -2240,14 +2244,14 @@ static int smc_drv_remove(struct platform_device *pdev)
2240 2244
2241 free_irq(ndev->irq, ndev); 2245 free_irq(ndev->irq, ndev);
2242 2246
2243#ifdef SMC_USE_PXA_DMA 2247#ifdef CONFIG_ARCH_PXA
2244 if (ndev->dma != (unsigned char)-1) 2248 if (ndev->dma != (unsigned char)-1)
2245 pxa_free_dma(ndev->dma); 2249 pxa_free_dma(ndev->dma);
2246#endif 2250#endif
2247 iounmap(lp->base); 2251 iounmap(lp->base);
2248 2252
2249 smc_release_datacs(pdev,ndev); 2253 smc_release_datacs(pdev,ndev);
2250 smc_release_attrib(pdev); 2254 smc_release_attrib(pdev,ndev);
2251 2255
2252 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); 2256 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2253 if (!res) 2257 if (!res)
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 8606818653f8..22209b6f1405 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -40,23 +40,46 @@
40 * Define your architecture specific bus configuration parameters here. 40 * Define your architecture specific bus configuration parameters here.
41 */ 41 */
42 42
43#if defined(CONFIG_ARCH_LUBBOCK) 43#if defined(CONFIG_ARCH_LUBBOCK) ||\
44 defined(CONFIG_MACH_MAINSTONE) ||\
45 defined(CONFIG_MACH_ZYLONITE) ||\
46 defined(CONFIG_MACH_LITTLETON)
44 47
45/* We can only do 16-bit reads and writes in the static memory space. */ 48#include <asm/mach-types.h>
46#define SMC_CAN_USE_8BIT 0 49
50/* Now the bus width is specified in the platform data
51 * pretend here to support all I/O access types
52 */
53#define SMC_CAN_USE_8BIT 1
47#define SMC_CAN_USE_16BIT 1 54#define SMC_CAN_USE_16BIT 1
48#define SMC_CAN_USE_32BIT 0 55#define SMC_CAN_USE_32BIT 1
49#define SMC_NOWAIT 1 56#define SMC_NOWAIT 1
50 57
51/* The first two address lines aren't connected... */ 58#define SMC_IO_SHIFT (lp->io_shift)
52#define SMC_IO_SHIFT 2
53 59
60#define SMC_inb(a, r) readb((a) + (r))
54#define SMC_inw(a, r) readw((a) + (r)) 61#define SMC_inw(a, r) readw((a) + (r))
55#define SMC_outw(v, a, r) writew(v, (a) + (r)) 62#define SMC_inl(a, r) readl((a) + (r))
63#define SMC_outb(v, a, r) writeb(v, (a) + (r))
64#define SMC_outl(v, a, r) writel(v, (a) + (r))
56#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) 65#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
57#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 66#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
67#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
68#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
58#define SMC_IRQ_FLAGS (-1) /* from resource */ 69#define SMC_IRQ_FLAGS (-1) /* from resource */
59 70
71/* We actually can't write halfwords properly if not word aligned */
72static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
73{
74 if (machine_is_mainstone() && reg & 2) {
75 unsigned int v = val << 16;
76 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
77 writel(v, ioaddr + (reg & ~2));
78 } else {
79 writew(val, ioaddr + reg);
80 }
81}
82
60#elif defined(CONFIG_BLACKFIN) 83#elif defined(CONFIG_BLACKFIN)
61 84
62#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH 85#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
@@ -195,7 +218,6 @@
195#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 218#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
196 219
197#elif defined(CONFIG_ARCH_INNOKOM) || \ 220#elif defined(CONFIG_ARCH_INNOKOM) || \
198 defined(CONFIG_MACH_MAINSTONE) || \
199 defined(CONFIG_ARCH_PXA_IDP) || \ 221 defined(CONFIG_ARCH_PXA_IDP) || \
200 defined(CONFIG_ARCH_RAMSES) || \ 222 defined(CONFIG_ARCH_RAMSES) || \
201 defined(CONFIG_ARCH_PCM027) 223 defined(CONFIG_ARCH_PCM027)
@@ -229,22 +251,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
229 } 251 }
230} 252}
231 253
232#elif defined(CONFIG_MACH_ZYLONITE)
233
234#define SMC_CAN_USE_8BIT 1
235#define SMC_CAN_USE_16BIT 1
236#define SMC_CAN_USE_32BIT 0
237#define SMC_IO_SHIFT 0
238#define SMC_NOWAIT 1
239#define SMC_USE_PXA_DMA 1
240#define SMC_inb(a, r) readb((a) + (r))
241#define SMC_inw(a, r) readw((a) + (r))
242#define SMC_insw(a, r, p, l) insw((a) + (r), p, l)
243#define SMC_outsw(a, r, p, l) outsw((a) + (r), p, l)
244#define SMC_outb(v, a, r) writeb(v, (a) + (r))
245#define SMC_outw(v, a, r) writew(v, (a) + (r))
246#define SMC_IRQ_FLAGS (-1) /* from resource */
247
248#elif defined(CONFIG_ARCH_OMAP) 254#elif defined(CONFIG_ARCH_OMAP)
249 255
250/* We can only do 16-bit reads and writes in the static memory space. */ 256/* We can only do 16-bit reads and writes in the static memory space. */
@@ -454,7 +460,6 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
454#define RPC_LSA_DEFAULT RPC_LED_100_10 460#define RPC_LSA_DEFAULT RPC_LED_100_10
455#define RPC_LSB_DEFAULT RPC_LED_TX_RX 461#define RPC_LSB_DEFAULT RPC_LED_TX_RX
456 462
457#define SMC_DYNAMIC_BUS_CONFIG
458#endif 463#endif
459 464
460 465
@@ -493,7 +498,7 @@ struct smc_local {
493 498
494 spinlock_t lock; 499 spinlock_t lock;
495 500
496#ifdef SMC_USE_PXA_DMA 501#ifdef CONFIG_ARCH_PXA
497 /* DMA needs the physical address of the chip */ 502 /* DMA needs the physical address of the chip */
498 u_long physaddr; 503 u_long physaddr;
499 struct device *device; 504 struct device *device;
@@ -501,20 +506,17 @@ struct smc_local {
501 void __iomem *base; 506 void __iomem *base;
502 void __iomem *datacs; 507 void __iomem *datacs;
503 508
509 /* the low address lines on some platforms aren't connected... */
510 int io_shift;
511
504 struct smc91x_platdata cfg; 512 struct smc91x_platdata cfg;
505}; 513};
506 514
507#ifdef SMC_DYNAMIC_BUS_CONFIG 515#define SMC_8BIT(p) ((p)->cfg.flags & SMC91X_USE_8BIT)
508#define SMC_8BIT(p) (((p)->cfg.flags & SMC91X_USE_8BIT) && SMC_CAN_USE_8BIT) 516#define SMC_16BIT(p) ((p)->cfg.flags & SMC91X_USE_16BIT)
509#define SMC_16BIT(p) (((p)->cfg.flags & SMC91X_USE_16BIT) && SMC_CAN_USE_16BIT) 517#define SMC_32BIT(p) ((p)->cfg.flags & SMC91X_USE_32BIT)
510#define SMC_32BIT(p) (((p)->cfg.flags & SMC91X_USE_32BIT) && SMC_CAN_USE_32BIT)
511#else
512#define SMC_8BIT(p) SMC_CAN_USE_8BIT
513#define SMC_16BIT(p) SMC_CAN_USE_16BIT
514#define SMC_32BIT(p) SMC_CAN_USE_32BIT
515#endif
516 518
517#ifdef SMC_USE_PXA_DMA 519#ifdef CONFIG_ARCH_PXA
518/* 520/*
519 * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is 521 * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is
520 * always happening in irq context so no need to worry about races. TX is 522 * always happening in irq context so no need to worry about races. TX is
@@ -608,7 +610,7 @@ smc_pxa_dma_irq(int dma, void *dummy)
608{ 610{
609 DCSR(dma) = 0; 611 DCSR(dma) = 0;
610} 612}
611#endif /* SMC_USE_PXA_DMA */ 613#endif /* CONFIG_ARCH_PXA */
612 614
613 615
614/* 616/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c28d7cb2035b..0196a0df9021 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -19,6 +19,7 @@
19//#define DEBUG 19//#define DEBUG
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
22#include <linux/ethtool.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/virtio.h> 24#include <linux/virtio.h>
24#include <linux/virtio_net.h> 25#include <linux/virtio_net.h>
@@ -54,9 +55,15 @@ struct virtnet_info
54 struct tasklet_struct tasklet; 55 struct tasklet_struct tasklet;
55 bool free_in_tasklet; 56 bool free_in_tasklet;
56 57
58 /* I like... big packets and I cannot lie! */
59 bool big_packets;
60
57 /* Receive & send queues. */ 61 /* Receive & send queues. */
58 struct sk_buff_head recv; 62 struct sk_buff_head recv;
59 struct sk_buff_head send; 63 struct sk_buff_head send;
64
65 /* Chain pages by the private ptr. */
66 struct page *pages;
60}; 67};
61 68
62static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb) 69static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb)
@@ -69,6 +76,23 @@ static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb)
69 sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr)); 76 sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr));
70} 77}
71 78
79static void give_a_page(struct virtnet_info *vi, struct page *page)
80{
81 page->private = (unsigned long)vi->pages;
82 vi->pages = page;
83}
84
85static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
86{
87 struct page *p = vi->pages;
88
89 if (p)
90 vi->pages = (struct page *)p->private;
91 else
92 p = alloc_page(gfp_mask);
93 return p;
94}
95
72static void skb_xmit_done(struct virtqueue *svq) 96static void skb_xmit_done(struct virtqueue *svq)
73{ 97{
74 struct virtnet_info *vi = svq->vdev->priv; 98 struct virtnet_info *vi = svq->vdev->priv;
@@ -88,6 +112,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
88 unsigned len) 112 unsigned len)
89{ 113{
90 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); 114 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
115 int err;
91 116
92 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 117 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
93 pr_debug("%s: short packet %i\n", dev->name, len); 118 pr_debug("%s: short packet %i\n", dev->name, len);
@@ -95,10 +120,23 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
95 goto drop; 120 goto drop;
96 } 121 }
97 len -= sizeof(struct virtio_net_hdr); 122 len -= sizeof(struct virtio_net_hdr);
98 BUG_ON(len > MAX_PACKET_LEN);
99 123
100 skb_trim(skb, len); 124 if (len <= MAX_PACKET_LEN) {
125 unsigned int i;
101 126
127 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
128 give_a_page(dev->priv, skb_shinfo(skb)->frags[i].page);
129 skb->data_len = 0;
130 skb_shinfo(skb)->nr_frags = 0;
131 }
132
133 err = pskb_trim(skb, len);
134 if (err) {
135 pr_debug("%s: pskb_trim failed %i %d\n", dev->name, len, err);
136 dev->stats.rx_dropped++;
137 goto drop;
138 }
139 skb->truesize += skb->data_len;
102 dev->stats.rx_bytes += skb->len; 140 dev->stats.rx_bytes += skb->len;
103 dev->stats.rx_packets++; 141 dev->stats.rx_packets++;
104 142
@@ -160,7 +198,7 @@ static void try_fill_recv(struct virtnet_info *vi)
160{ 198{
161 struct sk_buff *skb; 199 struct sk_buff *skb;
162 struct scatterlist sg[2+MAX_SKB_FRAGS]; 200 struct scatterlist sg[2+MAX_SKB_FRAGS];
163 int num, err; 201 int num, err, i;
164 202
165 sg_init_table(sg, 2+MAX_SKB_FRAGS); 203 sg_init_table(sg, 2+MAX_SKB_FRAGS);
166 for (;;) { 204 for (;;) {
@@ -170,6 +208,24 @@ static void try_fill_recv(struct virtnet_info *vi)
170 208
171 skb_put(skb, MAX_PACKET_LEN); 209 skb_put(skb, MAX_PACKET_LEN);
172 vnet_hdr_to_sg(sg, skb); 210 vnet_hdr_to_sg(sg, skb);
211
212 if (vi->big_packets) {
213 for (i = 0; i < MAX_SKB_FRAGS; i++) {
214 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
215 f->page = get_a_page(vi, GFP_ATOMIC);
216 if (!f->page)
217 break;
218
219 f->page_offset = 0;
220 f->size = PAGE_SIZE;
221
222 skb->data_len += PAGE_SIZE;
223 skb->len += PAGE_SIZE;
224
225 skb_shinfo(skb)->nr_frags++;
226 }
227 }
228
173 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 229 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
174 skb_queue_head(&vi->recv, skb); 230 skb_queue_head(&vi->recv, skb);
175 231
@@ -335,16 +391,11 @@ again:
335 free_old_xmit_skbs(vi); 391 free_old_xmit_skbs(vi);
336 392
337 /* If we has a buffer left over from last time, send it now. */ 393 /* If we has a buffer left over from last time, send it now. */
338 if (unlikely(vi->last_xmit_skb)) { 394 if (unlikely(vi->last_xmit_skb) &&
339 if (xmit_skb(vi, vi->last_xmit_skb) != 0) { 395 xmit_skb(vi, vi->last_xmit_skb) != 0)
340 /* Drop this skb: we only queue one. */ 396 goto stop_queue;
341 vi->dev->stats.tx_dropped++; 397
342 kfree_skb(skb); 398 vi->last_xmit_skb = NULL;
343 skb = NULL;
344 goto stop_queue;
345 }
346 vi->last_xmit_skb = NULL;
347 }
348 399
349 /* Put new one in send queue and do transmit */ 400 /* Put new one in send queue and do transmit */
350 if (likely(skb)) { 401 if (likely(skb)) {
@@ -370,6 +421,11 @@ stop_queue:
370 netif_start_queue(dev); 421 netif_start_queue(dev);
371 goto again; 422 goto again;
372 } 423 }
424 if (skb) {
425 /* Drop this skb: we only queue one. */
426 vi->dev->stats.tx_dropped++;
427 kfree_skb(skb);
428 }
373 goto done; 429 goto done;
374} 430}
375 431
@@ -408,6 +464,22 @@ static int virtnet_close(struct net_device *dev)
408 return 0; 464 return 0;
409} 465}
410 466
467static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
468{
469 struct virtnet_info *vi = netdev_priv(dev);
470 struct virtio_device *vdev = vi->vdev;
471
472 if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
473 return -ENOSYS;
474
475 return ethtool_op_set_tx_hw_csum(dev, data);
476}
477
478static struct ethtool_ops virtnet_ethtool_ops = {
479 .set_tx_csum = virtnet_set_tx_csum,
480 .set_sg = ethtool_op_set_sg,
481};
482
411static int virtnet_probe(struct virtio_device *vdev) 483static int virtnet_probe(struct virtio_device *vdev)
412{ 484{
413 int err; 485 int err;
@@ -427,6 +499,7 @@ static int virtnet_probe(struct virtio_device *vdev)
427#ifdef CONFIG_NET_POLL_CONTROLLER 499#ifdef CONFIG_NET_POLL_CONTROLLER
428 dev->poll_controller = virtnet_netpoll; 500 dev->poll_controller = virtnet_netpoll;
429#endif 501#endif
502 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
430 SET_NETDEV_DEV(dev, &vdev->dev); 503 SET_NETDEV_DEV(dev, &vdev->dev);
431 504
432 /* Do we support "hardware" checksums? */ 505 /* Do we support "hardware" checksums? */
@@ -462,11 +535,18 @@ static int virtnet_probe(struct virtio_device *vdev)
462 vi->dev = dev; 535 vi->dev = dev;
463 vi->vdev = vdev; 536 vi->vdev = vdev;
464 vdev->priv = vi; 537 vdev->priv = vi;
538 vi->pages = NULL;
465 539
466 /* If they give us a callback when all buffers are done, we don't need 540 /* If they give us a callback when all buffers are done, we don't need
467 * the timer. */ 541 * the timer. */
468 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY); 542 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
469 543
544 /* If we can receive ANY GSO packets, we must allocate large ones. */
545 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
546 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
547 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
548 vi->big_packets = true;
549
470 /* We expect two virtqueues, receive then send. */ 550 /* We expect two virtqueues, receive then send. */
471 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); 551 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
472 if (IS_ERR(vi->rvq)) { 552 if (IS_ERR(vi->rvq)) {
@@ -541,6 +621,10 @@ static void virtnet_remove(struct virtio_device *vdev)
541 vdev->config->del_vq(vi->svq); 621 vdev->config->del_vq(vi->svq);
542 vdev->config->del_vq(vi->rvq); 622 vdev->config->del_vq(vi->rvq);
543 unregister_netdev(vi->dev); 623 unregister_netdev(vi->dev);
624
625 while (vi->pages)
626 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
627
544 free_netdev(vi->dev); 628 free_netdev(vi->dev);
545} 629}
546 630
@@ -553,7 +637,9 @@ static unsigned int features[] = {
553 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 637 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
554 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 638 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
555 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 639 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
556 VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY, 640 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
641 VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */
642 VIRTIO_F_NOTIFY_ON_EMPTY,
557}; 643};
558 644
559static struct virtio_driver virtio_net = { 645static struct virtio_driver virtio_net = {
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 3a7a11a75fb4..1d7ec3129349 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -4,7 +4,7 @@ config OF_DEVICE
4 4
5config OF_GPIO 5config OF_GPIO
6 def_bool y 6 def_bool y
7 depends on OF && PPC_OF && HAVE_GPIO_LIB 7 depends on OF && PPC_OF && GPIOLIB
8 help 8 help
9 OpenFirmware GPIO accessors 9 OpenFirmware GPIO accessors
10 10
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index 5c015d310d4a..344e1b03dd8b 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -91,8 +91,6 @@ void of_register_i2c_devices(struct i2c_adapter *adap,
91 } 91 }
92 92
93 info.irq = irq_of_parse_and_map(node, 0); 93 info.irq = irq_of_parse_and_map(node, 0);
94 if (info.irq == NO_IRQ)
95 info.irq = -1;
96 94
97 if (of_find_i2c_driver(node, &info) < 0) { 95 if (of_find_i2c_driver(node, &info) < 0) {
98 irq_dispose_mapping(info.irq); 96 irq_dispose_mapping(info.irq);
diff --git a/drivers/parport/parport_ax88796.c b/drivers/parport/parport_ax88796.c
index 4ec220b2eae7..6938d2e9f18f 100644
--- a/drivers/parport/parport_ax88796.c
+++ b/drivers/parport/parport_ax88796.c
@@ -406,6 +406,8 @@ static int parport_ax88796_resume(struct platform_device *dev)
406#define parport_ax88796_resume NULL 406#define parport_ax88796_resume NULL
407#endif 407#endif
408 408
409MODULE_ALIAS("platform:ax88796-pp");
410
409static struct platform_driver axdrv = { 411static struct platform_driver axdrv = {
410 .driver = { 412 .driver = {
411 .name = "ax88796-pp", 413 .name = "ax88796-pp",
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index f941f609dbf3..8bf86ae2333f 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -317,10 +317,8 @@ int __init dmar_table_init(void)
317 return -ENODEV; 317 return -ENODEV;
318 } 318 }
319 319
320 if (list_empty(&dmar_rmrr_units)) { 320 if (list_empty(&dmar_rmrr_units))
321 printk(KERN_INFO PREFIX "No RMRR found\n"); 321 printk(KERN_INFO PREFIX "No RMRR found\n");
322 return -ENODEV;
323 }
324 322
325 return 0; 323 return 0;
326} 324}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d00f0e0d8453..e9c356236d27 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1040,7 +1040,7 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1040 * @dev: PCI device to handle. 1040 * @dev: PCI device to handle.
1041 * @state: PCI state from which device will issue PME#. 1041 * @state: PCI state from which device will issue PME#.
1042 */ 1042 */
1043static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) 1043bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1044{ 1044{
1045 if (!dev->pm_cap) 1045 if (!dev->pm_cap)
1046 return false; 1046 return false;
@@ -1123,17 +1123,10 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
1123} 1123}
1124 1124
1125/** 1125/**
1126 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1127 * @dev: Device to handle.
1128 *
1129 * Choose the power state appropriate for the device depending on whether
1130 * it can wake up the system and/or is power manageable by the platform
1131 * (PCI_D3hot is the default) and put the device into that state.
1132 */ 1126 */
1133int pci_prepare_to_sleep(struct pci_dev *dev) 1127pci_power_t pci_target_state(struct pci_dev *dev)
1134{ 1128{
1135 pci_power_t target_state = PCI_D3hot; 1129 pci_power_t target_state = PCI_D3hot;
1136 int error;
1137 1130
1138 if (platform_pci_power_manageable(dev)) { 1131 if (platform_pci_power_manageable(dev)) {
1139 /* 1132 /*
@@ -1160,7 +1153,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
1160 * to generate PME#. 1153 * to generate PME#.
1161 */ 1154 */
1162 if (!dev->pm_cap) 1155 if (!dev->pm_cap)
1163 return -EIO; 1156 return PCI_POWER_ERROR;
1164 1157
1165 if (dev->pme_support) { 1158 if (dev->pme_support) {
1166 while (target_state 1159 while (target_state
@@ -1169,6 +1162,25 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
1169 } 1162 }
1170 } 1163 }
1171 1164
1165 return target_state;
1166}
1167
1168/**
1169 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1170 * @dev: Device to handle.
1171 *
1172 * Choose the power state appropriate for the device depending on whether
1173 * it can wake up the system and/or is power manageable by the platform
1174 * (PCI_D3hot is the default) and put the device into that state.
1175 */
1176int pci_prepare_to_sleep(struct pci_dev *dev)
1177{
1178 pci_power_t target_state = pci_target_state(dev);
1179 int error;
1180
1181 if (target_state == PCI_POWER_ERROR)
1182 return -EIO;
1183
1172 pci_enable_wake(dev, target_state, true); 1184 pci_enable_wake(dev, target_state, true);
1173 1185
1174 error = pci_set_power_state(dev, target_state); 1186 error = pci_set_power_state(dev, target_state);
@@ -1918,7 +1930,9 @@ EXPORT_SYMBOL(pci_select_bars);
1918EXPORT_SYMBOL(pci_set_power_state); 1930EXPORT_SYMBOL(pci_set_power_state);
1919EXPORT_SYMBOL(pci_save_state); 1931EXPORT_SYMBOL(pci_save_state);
1920EXPORT_SYMBOL(pci_restore_state); 1932EXPORT_SYMBOL(pci_restore_state);
1933EXPORT_SYMBOL(pci_pme_capable);
1921EXPORT_SYMBOL(pci_enable_wake); 1934EXPORT_SYMBOL(pci_enable_wake);
1935EXPORT_SYMBOL(pci_target_state);
1922EXPORT_SYMBOL(pci_prepare_to_sleep); 1936EXPORT_SYMBOL(pci_prepare_to_sleep);
1923EXPORT_SYMBOL(pci_back_from_sleep); 1937EXPORT_SYMBOL(pci_back_from_sleep);
1924EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); 1938EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 4400dffbd93a..e1098c302c45 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -88,7 +88,7 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
88 if ((pos & 3) && cnt > 2) { 88 if ((pos & 3) && cnt > 2) {
89 unsigned short val; 89 unsigned short val;
90 pci_user_read_config_word(dev, pos, &val); 90 pci_user_read_config_word(dev, pos, &val);
91 __put_user(cpu_to_le16(val), (unsigned short __user *) buf); 91 __put_user(cpu_to_le16(val), (__le16 __user *) buf);
92 buf += 2; 92 buf += 2;
93 pos += 2; 93 pos += 2;
94 cnt -= 2; 94 cnt -= 2;
@@ -97,7 +97,7 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
97 while (cnt >= 4) { 97 while (cnt >= 4) {
98 unsigned int val; 98 unsigned int val;
99 pci_user_read_config_dword(dev, pos, &val); 99 pci_user_read_config_dword(dev, pos, &val);
100 __put_user(cpu_to_le32(val), (unsigned int __user *) buf); 100 __put_user(cpu_to_le32(val), (__le32 __user *) buf);
101 buf += 4; 101 buf += 4;
102 pos += 4; 102 pos += 4;
103 cnt -= 4; 103 cnt -= 4;
@@ -106,7 +106,7 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
106 if (cnt >= 2) { 106 if (cnt >= 2) {
107 unsigned short val; 107 unsigned short val;
108 pci_user_read_config_word(dev, pos, &val); 108 pci_user_read_config_word(dev, pos, &val);
109 __put_user(cpu_to_le16(val), (unsigned short __user *) buf); 109 __put_user(cpu_to_le16(val), (__le16 __user *) buf);
110 buf += 2; 110 buf += 2;
111 pos += 2; 111 pos += 2;
112 cnt -= 2; 112 cnt -= 2;
@@ -156,8 +156,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
156 } 156 }
157 157
158 if ((pos & 3) && cnt > 2) { 158 if ((pos & 3) && cnt > 2) {
159 unsigned short val; 159 __le16 val;
160 __get_user(val, (unsigned short __user *) buf); 160 __get_user(val, (__le16 __user *) buf);
161 pci_user_write_config_word(dev, pos, le16_to_cpu(val)); 161 pci_user_write_config_word(dev, pos, le16_to_cpu(val));
162 buf += 2; 162 buf += 2;
163 pos += 2; 163 pos += 2;
@@ -165,8 +165,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
165 } 165 }
166 166
167 while (cnt >= 4) { 167 while (cnt >= 4) {
168 unsigned int val; 168 __le32 val;
169 __get_user(val, (unsigned int __user *) buf); 169 __get_user(val, (__le32 __user *) buf);
170 pci_user_write_config_dword(dev, pos, le32_to_cpu(val)); 170 pci_user_write_config_dword(dev, pos, le32_to_cpu(val));
171 buf += 4; 171 buf += 4;
172 pos += 4; 172 pos += 4;
@@ -174,8 +174,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
174 } 174 }
175 175
176 if (cnt >= 2) { 176 if (cnt >= 2) {
177 unsigned short val; 177 __le16 val;
178 __get_user(val, (unsigned short __user *) buf); 178 __get_user(val, (__le16 __user *) buf);
179 pci_user_write_config_word(dev, pos, le16_to_cpu(val)); 179 pci_user_write_config_word(dev, pos, le16_to_cpu(val));
180 buf += 2; 180 buf += 2;
181 pos += 2; 181 pos += 2;
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index e45402adac3f..e0f884034c9f 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -219,7 +219,8 @@ config PCMCIA_SA1111
219config PCMCIA_PXA2XX 219config PCMCIA_PXA2XX
220 tristate "PXA2xx support" 220 tristate "PXA2xx support"
221 depends on ARM && ARCH_PXA && PCMCIA 221 depends on ARM && ARCH_PXA && PCMCIA
222 depends on ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL || MACH_ARMCORE 222 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
223 || MACH_ARMCORE || ARCH_PXA_PALM)
223 help 224 help
224 Say Y here to include support for the PXA2xx PCMCIA controller 225 Say Y here to include support for the PXA2xx PCMCIA controller
225 226
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 85c6cc931f97..269a9e913ba2 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -72,4 +72,5 @@ pxa2xx_cs-$(CONFIG_ARCH_LUBBOCK) += pxa2xx_lubbock.o sa1111_generic.o
72pxa2xx_cs-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o 72pxa2xx_cs-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o
73pxa2xx_cs-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o 73pxa2xx_cs-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o
74pxa2xx_cs-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x270.o 74pxa2xx_cs-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x270.o
75pxa2xx_cs-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
75 76
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index c21f9a9c3e3f..a34284b1482a 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -28,6 +28,7 @@
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/mm.h>
31#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
32#include <linux/of_platform.h> 33#include <linux/of_platform.h>
33 34
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index f123fce65f2e..bb95db7d2b76 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -5,83 +5,60 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * Compulab Ltd., 2003, 2007 8 * Compulab Ltd., 2003, 2007, 2008
9 * Mike Rapoport <mike@compulab.co.il> 9 * Mike Rapoport <mike@compulab.co.il>
10 * 10 *
11 */ 11 */
12 12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/platform_device.h> 13#include <linux/platform_device.h>
16#include <linux/irq.h> 14#include <linux/irq.h>
17#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/gpio.h>
18 17
19#include <pcmcia/ss.h>
20#include <asm/hardware.h>
21#include <asm/mach-types.h> 18#include <asm/mach-types.h>
22
23#include <asm/arch/pxa-regs.h> 19#include <asm/arch/pxa-regs.h>
24#include <asm/arch/pxa2xx-gpio.h>
25#include <asm/arch/cm-x270.h>
26 20
27#include "soc_common.h" 21#include "soc_common.h"
28 22
23#define GPIO_PCMCIA_S0_CD_VALID (84)
24#define GPIO_PCMCIA_S0_RDYINT (82)
25#define GPIO_PCMCIA_RESET (53)
26
27#define PCMCIA_S0_CD_VALID IRQ_GPIO(GPIO_PCMCIA_S0_CD_VALID)
28#define PCMCIA_S0_RDYINT IRQ_GPIO(GPIO_PCMCIA_S0_RDYINT)
29
30
29static struct pcmcia_irqs irqs[] = { 31static struct pcmcia_irqs irqs[] = {
30 { 0, PCMCIA_S0_CD_VALID, "PCMCIA0 CD" }, 32 { 0, PCMCIA_S0_CD_VALID, "PCMCIA0 CD" },
31 { 1, PCMCIA_S1_CD_VALID, "PCMCIA1 CD" },
32}; 33};
33 34
34static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 35static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
35{ 36{
36 GPSR(GPIO48_nPOE) = GPIO_bit(GPIO48_nPOE) | 37 int ret = gpio_request(GPIO_PCMCIA_RESET, "PCCard reset");
37 GPIO_bit(GPIO49_nPWE) | 38 if (ret)
38 GPIO_bit(GPIO50_nPIOR) | 39 return ret;
39 GPIO_bit(GPIO51_nPIOW) | 40 gpio_direction_output(GPIO_PCMCIA_RESET, 0);
40 GPIO_bit(GPIO85_nPCE_1) | 41
41 GPIO_bit(GPIO54_nPCE_2); 42 skt->irq = PCMCIA_S0_RDYINT;
42 43 ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
43 pxa_gpio_mode(GPIO48_nPOE_MD); 44 if (!ret)
44 pxa_gpio_mode(GPIO49_nPWE_MD); 45 gpio_free(GPIO_PCMCIA_RESET);
45 pxa_gpio_mode(GPIO50_nPIOR_MD); 46
46 pxa_gpio_mode(GPIO51_nPIOW_MD); 47 return ret;
47 pxa_gpio_mode(GPIO85_nPCE_1_MD);
48 pxa_gpio_mode(GPIO54_nPCE_2_MD);
49 pxa_gpio_mode(GPIO55_nPREG_MD);
50 pxa_gpio_mode(GPIO56_nPWAIT_MD);
51 pxa_gpio_mode(GPIO57_nIOIS16_MD);
52
53 /* Reset signal */
54 pxa_gpio_mode(GPIO53_nPCE_2 | GPIO_OUT);
55 GPCR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
56
57 set_irq_type(PCMCIA_S0_CD_VALID, IRQ_TYPE_EDGE_BOTH);
58 set_irq_type(PCMCIA_S1_CD_VALID, IRQ_TYPE_EDGE_BOTH);
59
60 /* irq's for slots: */
61 set_irq_type(PCMCIA_S0_RDYINT, IRQ_TYPE_EDGE_FALLING);
62 set_irq_type(PCMCIA_S1_RDYINT, IRQ_TYPE_EDGE_FALLING);
63
64 skt->irq = (skt->nr == 0) ? PCMCIA_S0_RDYINT : PCMCIA_S1_RDYINT;
65 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
66} 48}
67 49
68static void cmx270_pcmcia_shutdown(struct soc_pcmcia_socket *skt) 50static void cmx270_pcmcia_shutdown(struct soc_pcmcia_socket *skt)
69{ 51{
70 soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); 52 soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs));
71 53 gpio_free(GPIO_PCMCIA_RESET);
72 set_irq_type(IRQ_TO_GPIO(PCMCIA_S0_CD_VALID), IRQ_TYPE_NONE);
73 set_irq_type(IRQ_TO_GPIO(PCMCIA_S1_CD_VALID), IRQ_TYPE_NONE);
74
75 set_irq_type(IRQ_TO_GPIO(PCMCIA_S0_RDYINT), IRQ_TYPE_NONE);
76 set_irq_type(IRQ_TO_GPIO(PCMCIA_S1_RDYINT), IRQ_TYPE_NONE);
77} 54}
78 55
79 56
80static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt, 57static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
81 struct pcmcia_state *state) 58 struct pcmcia_state *state)
82{ 59{
83 state->detect = (PCC_DETECT(skt->nr) == 0) ? 1 : 0; 60 state->detect = (gpio_get_value(GPIO_PCMCIA_S0_CD_VALID) == 0) ? 1 : 0;
84 state->ready = (PCC_READY(skt->nr) == 0) ? 0 : 1; 61 state->ready = (gpio_get_value(GPIO_PCMCIA_S0_RDYINT) == 0) ? 0 : 1;
85 state->bvd1 = 1; 62 state->bvd1 = 1;
86 state->bvd2 = 1; 63 state->bvd2 = 1;
87 state->vs_3v = 0; 64 state->vs_3v = 0;
@@ -93,32 +70,16 @@ static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
93static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, 70static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
94 const socket_state_t *state) 71 const socket_state_t *state)
95{ 72{
96 GPSR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
97 pxa_gpio_mode(GPIO49_nPWE | GPIO_OUT);
98
99 switch (skt->nr) { 73 switch (skt->nr) {
100 case 0: 74 case 0:
101 if (state->flags & SS_RESET) { 75 if (state->flags & SS_RESET) {
102 GPCR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE); 76 gpio_set_value(GPIO_PCMCIA_RESET, 1);
103 GPSR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
104 udelay(10);
105 GPCR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
106 GPSR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
107 }
108 break;
109 case 1:
110 if (state->flags & SS_RESET) {
111 GPCR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
112 GPSR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
113 udelay(10); 77 udelay(10);
114 GPCR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2); 78 gpio_set_value(GPIO_PCMCIA_RESET, 0);
115 GPSR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
116 } 79 }
117 break; 80 break;
118 } 81 }
119 82
120 pxa_gpio_mode(GPIO49_nPWE_MD);
121
122 return 0; 83 return 0;
123} 84}
124 85
@@ -139,7 +100,7 @@ static struct pcmcia_low_level cmx270_pcmcia_ops __initdata = {
139 .configure_socket = cmx270_pcmcia_configure_socket, 100 .configure_socket = cmx270_pcmcia_configure_socket,
140 .socket_init = cmx270_pcmcia_socket_init, 101 .socket_init = cmx270_pcmcia_socket_init,
141 .socket_suspend = cmx270_pcmcia_socket_suspend, 102 .socket_suspend = cmx270_pcmcia_socket_suspend,
142 .nr = 2, 103 .nr = 1,
143}; 104};
144 105
145static struct platform_device *cmx270_pcmcia_device; 106static struct platform_device *cmx270_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c
new file mode 100644
index 000000000000..4abde190c1f5
--- /dev/null
+++ b/drivers/pcmcia/pxa2xx_palmtx.c
@@ -0,0 +1,118 @@
1/*
2 * linux/drivers/pcmcia/pxa2xx_palmtx.c
3 *
4 * Driver for Palm T|X PCMCIA
5 *
6 * Copyright (C) 2007-2008 Marek Vasut <marek.vasut@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/platform_device.h>
16
17#include <asm/mach-types.h>
18
19#include <asm/arch/gpio.h>
20#include <asm/arch/palmtx.h>
21
22#include "soc_common.h"
23
24static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
25{
26 skt->irq = IRQ_GPIO(GPIO_NR_PALMTX_PCMCIA_READY);
27 return 0;
28}
29
30static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
31{
32}
33
34static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
35 struct pcmcia_state *state)
36{
37 state->detect = 1; /* always inserted */
38 state->ready = !!gpio_get_value(GPIO_NR_PALMTX_PCMCIA_READY);
39 state->bvd1 = 1;
40 state->bvd2 = 1;
41 state->wrprot = 0;
42 state->vs_3v = 1;
43 state->vs_Xv = 0;
44}
45
46static int
47palmtx_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
48 const socket_state_t *state)
49{
50 gpio_set_value(GPIO_NR_PALMTX_PCMCIA_POWER1, 1);
51 gpio_set_value(GPIO_NR_PALMTX_PCMCIA_POWER2, 1);
52 gpio_set_value(GPIO_NR_PALMTX_PCMCIA_RESET,
53 !!(state->flags & SS_RESET));
54
55 return 0;
56}
57
58static void palmtx_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
59{
60}
61
62static void palmtx_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
63{
64}
65
66static struct pcmcia_low_level palmtx_pcmcia_ops = {
67 .owner = THIS_MODULE,
68
69 .first = 0,
70 .nr = 1,
71
72 .hw_init = palmtx_pcmcia_hw_init,
73 .hw_shutdown = palmtx_pcmcia_hw_shutdown,
74
75 .socket_state = palmtx_pcmcia_socket_state,
76 .configure_socket = palmtx_pcmcia_configure_socket,
77
78 .socket_init = palmtx_pcmcia_socket_init,
79 .socket_suspend = palmtx_pcmcia_socket_suspend,
80};
81
82static struct platform_device *palmtx_pcmcia_device;
83
84static int __init palmtx_pcmcia_init(void)
85{
86 int ret;
87
88 if (!machine_is_palmtx())
89 return -ENODEV;
90
91 palmtx_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
92 if (!palmtx_pcmcia_device)
93 return -ENOMEM;
94
95 ret = platform_device_add_data(palmtx_pcmcia_device, &palmtx_pcmcia_ops,
96 sizeof(palmtx_pcmcia_ops));
97
98 if (!ret)
99 ret = platform_device_add(palmtx_pcmcia_device);
100
101 if (ret)
102 platform_device_put(palmtx_pcmcia_device);
103
104 return ret;
105}
106
107static void __exit palmtx_pcmcia_exit(void)
108{
109 platform_device_unregister(palmtx_pcmcia_device);
110}
111
112fs_initcall(palmtx_pcmcia_init);
113module_exit(palmtx_pcmcia_exit);
114
115MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
116MODULE_DESCRIPTION("PCMCIA support for Palm T|X");
117MODULE_ALIAS("platform:pxa2xx-pcmcia");
118MODULE_LICENSE("GPL");
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 58c806e9c58a..4d17d384578d 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -49,4 +49,10 @@ config BATTERY_OLPC
49 help 49 help
50 Say Y to enable support for the battery on the OLPC laptop. 50 Say Y to enable support for the battery on the OLPC laptop.
51 51
52config BATTERY_PALMTX
53 tristate "Palm T|X battery"
54 depends on MACH_PALMTX
55 help
56 Say Y to enable support for the battery in Palm T|X.
57
52endif # POWER_SUPPLY 58endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 6413ded5fe5f..6f43a54ee420 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_APM_POWER) += apm_power.o
20obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o 20obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
21obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o 21obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
22obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o 22obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
23obj-$(CONFIG_BATTERY_PALMTX) += palmtx_battery.o
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index 71be36f18709..308ddb201b66 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -433,6 +433,8 @@ static int ds2760_battery_resume(struct platform_device *pdev)
433 433
434#endif /* CONFIG_PM */ 434#endif /* CONFIG_PM */
435 435
436MODULE_ALIAS("platform:ds2760-battery");
437
436static struct platform_driver ds2760_battery_driver = { 438static struct platform_driver ds2760_battery_driver = {
437 .driver = { 439 .driver = {
438 .name = "ds2760-battery", 440 .name = "ds2760-battery",
diff --git a/drivers/power/palmtx_battery.c b/drivers/power/palmtx_battery.c
new file mode 100644
index 000000000000..244bb273a637
--- /dev/null
+++ b/drivers/power/palmtx_battery.c
@@ -0,0 +1,198 @@
1/*
2 * linux/drivers/power/palmtx_battery.c
3 *
4 * Battery measurement code for Palm T|X Handheld computer
5 *
6 * based on tosa_battery.c
7 *
8 * Copyright (C) 2008 Marek Vasut <marek.vasut@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/power_supply.h>
18#include <linux/wm97xx.h>
19#include <linux/delay.h>
20#include <linux/spinlock.h>
21#include <linux/interrupt.h>
22#include <linux/gpio.h>
23
24#include <asm/mach-types.h>
25#include <asm/arch/palmtx.h>
26
27static DEFINE_MUTEX(bat_lock);
28static struct work_struct bat_work;
29struct mutex work_lock;
30int bat_status = POWER_SUPPLY_STATUS_DISCHARGING;
31
32static unsigned long palmtx_read_bat(struct power_supply *bat_ps)
33{
34 return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data,
35 WM97XX_AUX_ID3) * 1000 / 414;
36}
37
38static unsigned long palmtx_read_temp(struct power_supply *bat_ps)
39{
40 return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data,
41 WM97XX_AUX_ID2);
42}
43
44static int palmtx_bat_get_property(struct power_supply *bat_ps,
45 enum power_supply_property psp,
46 union power_supply_propval *val)
47{
48 switch (psp) {
49 case POWER_SUPPLY_PROP_STATUS:
50 val->intval = bat_status;
51 break;
52 case POWER_SUPPLY_PROP_TECHNOLOGY:
53 val->intval = POWER_SUPPLY_TECHNOLOGY_LIPO;
54 break;
55 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
56 val->intval = palmtx_read_bat(bat_ps);
57 break;
58 case POWER_SUPPLY_PROP_VOLTAGE_MAX:
59 case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
60 val->intval = PALMTX_BAT_MAX_VOLTAGE;
61 break;
62 case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
63 val->intval = PALMTX_BAT_MIN_VOLTAGE;
64 break;
65 case POWER_SUPPLY_PROP_TEMP:
66 val->intval = palmtx_read_temp(bat_ps);
67 break;
68 case POWER_SUPPLY_PROP_PRESENT:
69 val->intval = 1;
70 break;
71 default:
72 return -EINVAL;
73 }
74 return 0;
75}
76
77static void palmtx_bat_external_power_changed(struct power_supply *bat_ps)
78{
79 schedule_work(&bat_work);
80}
81
82static char *status_text[] = {
83 [POWER_SUPPLY_STATUS_UNKNOWN] = "Unknown",
84 [POWER_SUPPLY_STATUS_CHARGING] = "Charging",
85 [POWER_SUPPLY_STATUS_DISCHARGING] = "Discharging",
86};
87
88static void palmtx_bat_update(struct power_supply *bat_ps)
89{
90 int old_status = bat_status;
91
92 mutex_lock(&work_lock);
93
94 bat_status = gpio_get_value(GPIO_NR_PALMTX_POWER_DETECT) ?
95 POWER_SUPPLY_STATUS_CHARGING :
96 POWER_SUPPLY_STATUS_DISCHARGING;
97
98 if (old_status != bat_status) {
99 pr_debug("%s %s -> %s\n", bat_ps->name,
100 status_text[old_status],
101 status_text[bat_status]);
102 power_supply_changed(bat_ps);
103 }
104
105 mutex_unlock(&work_lock);
106}
107
108static enum power_supply_property palmtx_bat_main_props[] = {
109 POWER_SUPPLY_PROP_STATUS,
110 POWER_SUPPLY_PROP_TECHNOLOGY,
111 POWER_SUPPLY_PROP_VOLTAGE_NOW,
112 POWER_SUPPLY_PROP_VOLTAGE_MAX,
113 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
114 POWER_SUPPLY_PROP_TEMP,
115 POWER_SUPPLY_PROP_PRESENT,
116};
117
118struct power_supply bat_ps = {
119 .name = "main-battery",
120 .type = POWER_SUPPLY_TYPE_BATTERY,
121 .properties = palmtx_bat_main_props,
122 .num_properties = ARRAY_SIZE(palmtx_bat_main_props),
123 .get_property = palmtx_bat_get_property,
124 .external_power_changed = palmtx_bat_external_power_changed,
125 .use_for_apm = 1,
126};
127
128static void palmtx_bat_work(struct work_struct *work)
129{
130 palmtx_bat_update(&bat_ps);
131}
132
133#ifdef CONFIG_PM
134static int palmtx_bat_suspend(struct platform_device *dev, pm_message_t state)
135{
136 flush_scheduled_work();
137 return 0;
138}
139
140static int palmtx_bat_resume(struct platform_device *dev)
141{
142 schedule_work(&bat_work);
143 return 0;
144}
145#else
146#define palmtx_bat_suspend NULL
147#define palmtx_bat_resume NULL
148#endif
149
150static int __devinit palmtx_bat_probe(struct platform_device *dev)
151{
152 int ret = 0;
153
154 if (!machine_is_palmtx())
155 return -ENODEV;
156
157 mutex_init(&work_lock);
158
159 INIT_WORK(&bat_work, palmtx_bat_work);
160
161 ret = power_supply_register(&dev->dev, &bat_ps);
162 if (!ret)
163 schedule_work(&bat_work);
164
165 return ret;
166}
167
168static int __devexit palmtx_bat_remove(struct platform_device *dev)
169{
170 power_supply_unregister(&bat_ps);
171 return 0;
172}
173
174static struct platform_driver palmtx_bat_driver = {
175 .driver.name = "wm97xx-battery",
176 .driver.owner = THIS_MODULE,
177 .probe = palmtx_bat_probe,
178 .remove = __devexit_p(palmtx_bat_remove),
179 .suspend = palmtx_bat_suspend,
180 .resume = palmtx_bat_resume,
181};
182
183static int __init palmtx_bat_init(void)
184{
185 return platform_driver_register(&palmtx_bat_driver);
186}
187
188static void __exit palmtx_bat_exit(void)
189{
190 platform_driver_unregister(&palmtx_bat_driver);
191}
192
193module_init(palmtx_bat_init);
194module_exit(palmtx_bat_exit);
195
196MODULE_LICENSE("GPL");
197MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
198MODULE_DESCRIPTION("Palm T|X battery driver");
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index 82810b7bff9c..0471ec743ab9 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -362,6 +362,8 @@ static int pda_power_resume(struct platform_device *pdev)
362#define pda_power_resume NULL 362#define pda_power_resume NULL
363#endif /* CONFIG_PM */ 363#endif /* CONFIG_PM */
364 364
365MODULE_ALIAS("platform:pda-power");
366
365static struct platform_driver pda_power_pdrv = { 367static struct platform_driver pda_power_pdrv = {
366 .driver = { 368 .driver = {
367 .name = "pda-power", 369 .name = "pda-power",
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index fc85bf2e4a97..90ab73825401 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -273,6 +273,25 @@ comment "SPI RTC drivers"
273 273
274if SPI_MASTER 274if SPI_MASTER
275 275
276config RTC_DRV_M41T94
277 tristate "ST M41T94"
278 help
279 If you say yes here you will get support for the
280 ST M41T94 SPI RTC chip.
281
282 This driver can also be built as a module. If so, the module
283 will be called rtc-m41t94.
284
285config RTC_DRV_DS1305
286 tristate "Dallas/Maxim DS1305/DS1306"
287 help
288 Select this driver to get support for the Dallas/Maxim DS1305
289 and DS1306 real time clock chips. These support a trickle
290 charger, alarms, and NVRAM in addition to the clock.
291
292 This driver can also be built as a module. If so, the module
293 will be called rtc-ds1305.
294
276config RTC_DRV_MAX6902 295config RTC_DRV_MAX6902
277 tristate "Maxim MAX6902" 296 tristate "Maxim MAX6902"
278 help 297 help
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index b5d9d67df887..18622ef84cab 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
24obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o 24obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
25obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o 25obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
26obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o 26obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o
27obj-$(CONFIG_RTC_DRV_DS1305) += rtc-ds1305.o
27obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o 28obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o
28obj-$(CONFIG_RTC_DRV_DS1374) += rtc-ds1374.o 29obj-$(CONFIG_RTC_DRV_DS1374) += rtc-ds1374.o
29obj-$(CONFIG_RTC_DRV_DS1511) += rtc-ds1511.o 30obj-$(CONFIG_RTC_DRV_DS1511) += rtc-ds1511.o
@@ -34,6 +35,7 @@ obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
34obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o 35obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o
35obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o 36obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
36obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o 37obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
38obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o
37obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o 39obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
38obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o 40obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
39obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o 41obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 9c3db934cc24..cd32d05db773 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -171,8 +171,10 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
171 | BIN2BCD(tm.tm_mday) << 24 171 | BIN2BCD(tm.tm_mday) << 24
172 | AT91_RTC_DATEEN | AT91_RTC_MTHEN); 172 | AT91_RTC_DATEEN | AT91_RTC_MTHEN);
173 173
174 if (alrm->enabled) 174 if (alrm->enabled) {
175 at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
175 at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM); 176 at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
177 }
176 178
177 pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, 179 pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
178 at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour, 180 at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
@@ -191,28 +193,22 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
191 193
192 pr_debug("%s(): cmd=%08x, arg=%08lx.\n", __func__, cmd, arg); 194 pr_debug("%s(): cmd=%08x, arg=%08lx.\n", __func__, cmd, arg);
193 195
196 /* important: scrub old status before enabling IRQs */
194 switch (cmd) { 197 switch (cmd) {
195 case RTC_AIE_OFF: /* alarm off */ 198 case RTC_AIE_OFF: /* alarm off */
196 at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM); 199 at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
197 break; 200 break;
198 case RTC_AIE_ON: /* alarm on */ 201 case RTC_AIE_ON: /* alarm on */
202 at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
199 at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM); 203 at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
200 break; 204 break;
201 case RTC_UIE_OFF: /* update off */ 205 case RTC_UIE_OFF: /* update off */
202 case RTC_PIE_OFF: /* periodic off */
203 at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV); 206 at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV);
204 break; 207 break;
205 case RTC_UIE_ON: /* update on */ 208 case RTC_UIE_ON: /* update on */
206 case RTC_PIE_ON: /* periodic on */ 209 at91_sys_write(AT91_RTC_SCCR, AT91_RTC_SECEV);
207 at91_sys_write(AT91_RTC_IER, AT91_RTC_SECEV); 210 at91_sys_write(AT91_RTC_IER, AT91_RTC_SECEV);
208 break; 211 break;
209 case RTC_IRQP_READ: /* read periodic alarm frequency */
210 ret = put_user(AT91_RTC_FREQ, (unsigned long *) arg);
211 break;
212 case RTC_IRQP_SET: /* set periodic alarm frequency */
213 if (arg != AT91_RTC_FREQ)
214 ret = -EINVAL;
215 break;
216 default: 212 default:
217 ret = -ENOIOCTLCMD; 213 ret = -ENOIOCTLCMD;
218 break; 214 break;
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index d7bb9bac71df..6ea349aba3ba 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -36,25 +36,9 @@
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/mod_devicetable.h> 37#include <linux/mod_devicetable.h>
38 38
39#ifdef CONFIG_HPET_EMULATE_RTC
40#include <asm/hpet.h>
41#endif
42
43/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */ 39/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
44#include <asm-generic/rtc.h> 40#include <asm-generic/rtc.h>
45 41
46#ifndef CONFIG_HPET_EMULATE_RTC
47#define is_hpet_enabled() 0
48#define hpet_set_alarm_time(hrs, min, sec) do { } while (0)
49#define hpet_set_periodic_freq(arg) 0
50#define hpet_mask_rtc_irq_bit(arg) do { } while (0)
51#define hpet_set_rtc_irq_bit(arg) do { } while (0)
52#define hpet_rtc_timer_init() do { } while (0)
53#define hpet_register_irq_handler(h) 0
54#define hpet_unregister_irq_handler(h) do { } while (0)
55extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
56#endif
57
58struct cmos_rtc { 42struct cmos_rtc {
59 struct rtc_device *rtc; 43 struct rtc_device *rtc;
60 struct device *dev; 44 struct device *dev;
@@ -93,6 +77,72 @@ static inline int is_intr(u8 rtc_intr)
93 77
94/*----------------------------------------------------------------*/ 78/*----------------------------------------------------------------*/
95 79
80/* Much modern x86 hardware has HPETs (10+ MHz timers) which, because
81 * many BIOS programmers don't set up "sane mode" IRQ routing, are mostly
82 * used in a broken "legacy replacement" mode. The breakage includes
83 * HPET #1 hijacking the IRQ for this RTC, and being unavailable for
84 * other (better) use.
85 *
86 * When that broken mode is in use, platform glue provides a partial
87 * emulation of hardware RTC IRQ facilities using HPET #1. We don't
88 * want to use HPET for anything except those IRQs though...
89 */
90#ifdef CONFIG_HPET_EMULATE_RTC
91#include <asm/hpet.h>
92#else
93
94static inline int is_hpet_enabled(void)
95{
96 return 0;
97}
98
99static inline int hpet_mask_rtc_irq_bit(unsigned long mask)
100{
101 return 0;
102}
103
104static inline int hpet_set_rtc_irq_bit(unsigned long mask)
105{
106 return 0;
107}
108
109static inline int
110hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
111{
112 return 0;
113}
114
115static inline int hpet_set_periodic_freq(unsigned long freq)
116{
117 return 0;
118}
119
120static inline int hpet_rtc_dropped_irq(void)
121{
122 return 0;
123}
124
125static inline int hpet_rtc_timer_init(void)
126{
127 return 0;
128}
129
130extern irq_handler_t hpet_rtc_interrupt;
131
132static inline int hpet_register_irq_handler(irq_handler_t handler)
133{
134 return 0;
135}
136
137static inline int hpet_unregister_irq_handler(irq_handler_t handler)
138{
139 return 0;
140}
141
142#endif
143
144/*----------------------------------------------------------------*/
145
96static int cmos_read_time(struct device *dev, struct rtc_time *t) 146static int cmos_read_time(struct device *dev, struct rtc_time *t)
97{ 147{
98 /* REVISIT: if the clock has a "century" register, use 148 /* REVISIT: if the clock has a "century" register, use
@@ -185,11 +235,56 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
185 return 0; 235 return 0;
186} 236}
187 237
238static void cmos_checkintr(struct cmos_rtc *cmos, unsigned char rtc_control)
239{
240 unsigned char rtc_intr;
241
242 /* NOTE after changing RTC_xIE bits we always read INTR_FLAGS;
243 * allegedly some older rtcs need that to handle irqs properly
244 */
245 rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
246
247 if (is_hpet_enabled())
248 return;
249
250 rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
251 if (is_intr(rtc_intr))
252 rtc_update_irq(cmos->rtc, 1, rtc_intr);
253}
254
255static void cmos_irq_enable(struct cmos_rtc *cmos, unsigned char mask)
256{
257 unsigned char rtc_control;
258
259 /* flush any pending IRQ status, notably for update irqs,
260 * before we enable new IRQs
261 */
262 rtc_control = CMOS_READ(RTC_CONTROL);
263 cmos_checkintr(cmos, rtc_control);
264
265 rtc_control |= mask;
266 CMOS_WRITE(rtc_control, RTC_CONTROL);
267 hpet_set_rtc_irq_bit(mask);
268
269 cmos_checkintr(cmos, rtc_control);
270}
271
272static void cmos_irq_disable(struct cmos_rtc *cmos, unsigned char mask)
273{
274 unsigned char rtc_control;
275
276 rtc_control = CMOS_READ(RTC_CONTROL);
277 rtc_control &= ~mask;
278 CMOS_WRITE(rtc_control, RTC_CONTROL);
279 hpet_mask_rtc_irq_bit(mask);
280
281 cmos_checkintr(cmos, rtc_control);
282}
283
188static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) 284static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
189{ 285{
190 struct cmos_rtc *cmos = dev_get_drvdata(dev); 286 struct cmos_rtc *cmos = dev_get_drvdata(dev);
191 unsigned char mon, mday, hrs, min, sec; 287 unsigned char mon, mday, hrs, min, sec;
192 unsigned char rtc_control, rtc_intr;
193 288
194 if (!is_valid_irq(cmos->irq)) 289 if (!is_valid_irq(cmos->irq))
195 return -EIO; 290 return -EIO;
@@ -213,17 +308,10 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
213 sec = t->time.tm_sec; 308 sec = t->time.tm_sec;
214 sec = (sec < 60) ? BIN2BCD(sec) : 0xff; 309 sec = (sec < 60) ? BIN2BCD(sec) : 0xff;
215 310
216 hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min, t->time.tm_sec);
217 spin_lock_irq(&rtc_lock); 311 spin_lock_irq(&rtc_lock);
218 312
219 /* next rtc irq must not be from previous alarm setting */ 313 /* next rtc irq must not be from previous alarm setting */
220 rtc_control = CMOS_READ(RTC_CONTROL); 314 cmos_irq_disable(cmos, RTC_AIE);
221 rtc_control &= ~RTC_AIE;
222 CMOS_WRITE(rtc_control, RTC_CONTROL);
223 rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
224 rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
225 if (is_intr(rtc_intr))
226 rtc_update_irq(cmos->rtc, 1, rtc_intr);
227 315
228 /* update alarm */ 316 /* update alarm */
229 CMOS_WRITE(hrs, RTC_HOURS_ALARM); 317 CMOS_WRITE(hrs, RTC_HOURS_ALARM);
@@ -237,14 +325,13 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
237 CMOS_WRITE(mon, cmos->mon_alrm); 325 CMOS_WRITE(mon, cmos->mon_alrm);
238 } 326 }
239 327
240 if (t->enabled) { 328 /* FIXME the HPET alarm glue currently ignores day_alrm
241 rtc_control |= RTC_AIE; 329 * and mon_alrm ...
242 CMOS_WRITE(rtc_control, RTC_CONTROL); 330 */
243 rtc_intr = CMOS_READ(RTC_INTR_FLAGS); 331 hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min, t->time.tm_sec);
244 rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF; 332
245 if (is_intr(rtc_intr)) 333 if (t->enabled)
246 rtc_update_irq(cmos->rtc, 1, rtc_intr); 334 cmos_irq_enable(cmos, RTC_AIE);
247 }
248 335
249 spin_unlock_irq(&rtc_lock); 336 spin_unlock_irq(&rtc_lock);
250 337
@@ -267,8 +354,8 @@ static int cmos_irq_set_freq(struct device *dev, int freq)
267 f = 16 - f; 354 f = 16 - f;
268 355
269 spin_lock_irqsave(&rtc_lock, flags); 356 spin_lock_irqsave(&rtc_lock, flags);
270 if (!hpet_set_periodic_freq(freq)) 357 hpet_set_periodic_freq(freq);
271 CMOS_WRITE(RTC_REF_CLCK_32KHZ | f, RTC_FREQ_SELECT); 358 CMOS_WRITE(RTC_REF_CLCK_32KHZ | f, RTC_FREQ_SELECT);
272 spin_unlock_irqrestore(&rtc_lock, flags); 359 spin_unlock_irqrestore(&rtc_lock, flags);
273 360
274 return 0; 361 return 0;
@@ -277,26 +364,17 @@ static int cmos_irq_set_freq(struct device *dev, int freq)
277static int cmos_irq_set_state(struct device *dev, int enabled) 364static int cmos_irq_set_state(struct device *dev, int enabled)
278{ 365{
279 struct cmos_rtc *cmos = dev_get_drvdata(dev); 366 struct cmos_rtc *cmos = dev_get_drvdata(dev);
280 unsigned char rtc_control, rtc_intr;
281 unsigned long flags; 367 unsigned long flags;
282 368
283 if (!is_valid_irq(cmos->irq)) 369 if (!is_valid_irq(cmos->irq))
284 return -ENXIO; 370 return -ENXIO;
285 371
286 spin_lock_irqsave(&rtc_lock, flags); 372 spin_lock_irqsave(&rtc_lock, flags);
287 rtc_control = CMOS_READ(RTC_CONTROL);
288 373
289 if (enabled) 374 if (enabled)
290 rtc_control |= RTC_PIE; 375 cmos_irq_enable(cmos, RTC_PIE);
291 else 376 else
292 rtc_control &= ~RTC_PIE; 377 cmos_irq_disable(cmos, RTC_PIE);
293
294 CMOS_WRITE(rtc_control, RTC_CONTROL);
295
296 rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
297 rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
298 if (is_intr(rtc_intr))
299 rtc_update_irq(cmos->rtc, 1, rtc_intr);
300 378
301 spin_unlock_irqrestore(&rtc_lock, flags); 379 spin_unlock_irqrestore(&rtc_lock, flags);
302 return 0; 380 return 0;
@@ -308,7 +386,6 @@ static int
308cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) 386cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
309{ 387{
310 struct cmos_rtc *cmos = dev_get_drvdata(dev); 388 struct cmos_rtc *cmos = dev_get_drvdata(dev);
311 unsigned char rtc_control, rtc_intr;
312 unsigned long flags; 389 unsigned long flags;
313 390
314 switch (cmd) { 391 switch (cmd) {
@@ -316,51 +393,29 @@ cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
316 case RTC_AIE_ON: 393 case RTC_AIE_ON:
317 case RTC_UIE_OFF: 394 case RTC_UIE_OFF:
318 case RTC_UIE_ON: 395 case RTC_UIE_ON:
319 case RTC_PIE_OFF:
320 case RTC_PIE_ON:
321 if (!is_valid_irq(cmos->irq)) 396 if (!is_valid_irq(cmos->irq))
322 return -EINVAL; 397 return -EINVAL;
323 break; 398 break;
399 /* PIE ON/OFF is handled by cmos_irq_set_state() */
324 default: 400 default:
325 return -ENOIOCTLCMD; 401 return -ENOIOCTLCMD;
326 } 402 }
327 403
328 spin_lock_irqsave(&rtc_lock, flags); 404 spin_lock_irqsave(&rtc_lock, flags);
329 rtc_control = CMOS_READ(RTC_CONTROL);
330 switch (cmd) { 405 switch (cmd) {
331 case RTC_AIE_OFF: /* alarm off */ 406 case RTC_AIE_OFF: /* alarm off */
332 rtc_control &= ~RTC_AIE; 407 cmos_irq_disable(cmos, RTC_AIE);
333 hpet_mask_rtc_irq_bit(RTC_AIE);
334 break; 408 break;
335 case RTC_AIE_ON: /* alarm on */ 409 case RTC_AIE_ON: /* alarm on */
336 rtc_control |= RTC_AIE; 410 cmos_irq_enable(cmos, RTC_AIE);
337 hpet_set_rtc_irq_bit(RTC_AIE);
338 break; 411 break;
339 case RTC_UIE_OFF: /* update off */ 412 case RTC_UIE_OFF: /* update off */
340 rtc_control &= ~RTC_UIE; 413 cmos_irq_disable(cmos, RTC_UIE);
341 hpet_mask_rtc_irq_bit(RTC_UIE);
342 break; 414 break;
343 case RTC_UIE_ON: /* update on */ 415 case RTC_UIE_ON: /* update on */
344 rtc_control |= RTC_UIE; 416 cmos_irq_enable(cmos, RTC_UIE);
345 hpet_set_rtc_irq_bit(RTC_UIE);
346 break;
347 case RTC_PIE_OFF: /* periodic off */
348 rtc_control &= ~RTC_PIE;
349 hpet_mask_rtc_irq_bit(RTC_PIE);
350 break;
351 case RTC_PIE_ON: /* periodic on */
352 rtc_control |= RTC_PIE;
353 hpet_set_rtc_irq_bit(RTC_PIE);
354 break; 417 break;
355 } 418 }
356 if (!is_hpet_enabled())
357 CMOS_WRITE(rtc_control, RTC_CONTROL);
358
359 rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
360 rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
361 if (is_intr(rtc_intr))
362 rtc_update_irq(cmos->rtc, 1, rtc_intr);
363
364 spin_unlock_irqrestore(&rtc_lock, flags); 419 spin_unlock_irqrestore(&rtc_lock, flags);
365 return 0; 420 return 0;
366} 421}
@@ -502,27 +557,29 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
502 u8 rtc_control; 557 u8 rtc_control;
503 558
504 spin_lock(&rtc_lock); 559 spin_lock(&rtc_lock);
505 /* 560
506 * In this case it is HPET RTC interrupt handler 561 /* When the HPET interrupt handler calls us, the interrupt
507 * calling us, with the interrupt information 562 * status is passed as arg1 instead of the irq number. But
508 * passed as arg1, instead of irq. 563 * always clear irq status, even when HPET is in the way.
564 *
565 * Note that HPET and RTC are almost certainly out of phase,
566 * giving different IRQ status ...
509 */ 567 */
568 irqstat = CMOS_READ(RTC_INTR_FLAGS);
569 rtc_control = CMOS_READ(RTC_CONTROL);
510 if (is_hpet_enabled()) 570 if (is_hpet_enabled())
511 irqstat = (unsigned long)irq & 0xF0; 571 irqstat = (unsigned long)irq & 0xF0;
512 else { 572 irqstat &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
513 irqstat = CMOS_READ(RTC_INTR_FLAGS);
514 rtc_control = CMOS_READ(RTC_CONTROL);
515 irqstat &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
516 }
517 573
518 /* All Linux RTC alarms should be treated as if they were oneshot. 574 /* All Linux RTC alarms should be treated as if they were oneshot.
519 * Similar code may be needed in system wakeup paths, in case the 575 * Similar code may be needed in system wakeup paths, in case the
520 * alarm woke the system. 576 * alarm woke the system.
521 */ 577 */
522 if (irqstat & RTC_AIE) { 578 if (irqstat & RTC_AIE) {
523 rtc_control = CMOS_READ(RTC_CONTROL);
524 rtc_control &= ~RTC_AIE; 579 rtc_control &= ~RTC_AIE;
525 CMOS_WRITE(rtc_control, RTC_CONTROL); 580 CMOS_WRITE(rtc_control, RTC_CONTROL);
581 hpet_mask_rtc_irq_bit(RTC_AIE);
582
526 CMOS_READ(RTC_INTR_FLAGS); 583 CMOS_READ(RTC_INTR_FLAGS);
527 } 584 }
528 spin_unlock(&rtc_lock); 585 spin_unlock(&rtc_lock);
@@ -629,18 +686,13 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
629 * do something about other clock frequencies. 686 * do something about other clock frequencies.
630 */ 687 */
631 cmos_rtc.rtc->irq_freq = 1024; 688 cmos_rtc.rtc->irq_freq = 1024;
632 if (!hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq)) 689 hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq);
633 CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT); 690 CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT);
691
692 /* disable irqs */
693 cmos_irq_disable(&cmos_rtc, RTC_PIE | RTC_AIE | RTC_UIE);
634 694
635 /* disable irqs.
636 *
637 * NOTE after changing RTC_xIE bits we always read INTR_FLAGS;
638 * allegedly some older rtcs need that to handle irqs properly
639 */
640 rtc_control = CMOS_READ(RTC_CONTROL); 695 rtc_control = CMOS_READ(RTC_CONTROL);
641 rtc_control &= ~(RTC_PIE | RTC_AIE | RTC_UIE);
642 CMOS_WRITE(rtc_control, RTC_CONTROL);
643 CMOS_READ(RTC_INTR_FLAGS);
644 696
645 spin_unlock_irq(&rtc_lock); 697 spin_unlock_irq(&rtc_lock);
646 698
@@ -687,7 +739,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
687 goto cleanup2; 739 goto cleanup2;
688 } 740 }
689 741
690 pr_info("%s: alarms up to one %s%s\n", 742 pr_info("%s: alarms up to one %s%s%s\n",
691 cmos_rtc.rtc->dev.bus_id, 743 cmos_rtc.rtc->dev.bus_id,
692 is_valid_irq(rtc_irq) 744 is_valid_irq(rtc_irq)
693 ? (cmos_rtc.mon_alrm 745 ? (cmos_rtc.mon_alrm
@@ -695,8 +747,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
695 : (cmos_rtc.day_alrm 747 : (cmos_rtc.day_alrm
696 ? "month" : "day")) 748 ? "month" : "day"))
697 : "no", 749 : "no",
698 cmos_rtc.century ? ", y3k" : "" 750 cmos_rtc.century ? ", y3k" : "",
699 ); 751 is_hpet_enabled() ? ", hpet irqs" : "");
700 752
701 return 0; 753 return 0;
702 754
@@ -713,13 +765,8 @@ cleanup0:
713 765
714static void cmos_do_shutdown(void) 766static void cmos_do_shutdown(void)
715{ 767{
716 unsigned char rtc_control;
717
718 spin_lock_irq(&rtc_lock); 768 spin_lock_irq(&rtc_lock);
719 rtc_control = CMOS_READ(RTC_CONTROL); 769 cmos_irq_disable(&cmos_rtc, RTC_IRQMASK);
720 rtc_control &= ~(RTC_PIE|RTC_AIE|RTC_UIE);
721 CMOS_WRITE(rtc_control, RTC_CONTROL);
722 CMOS_READ(RTC_INTR_FLAGS);
723 spin_unlock_irq(&rtc_lock); 770 spin_unlock_irq(&rtc_lock);
724} 771}
725 772
@@ -760,17 +807,17 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg)
760 spin_lock_irq(&rtc_lock); 807 spin_lock_irq(&rtc_lock);
761 cmos->suspend_ctrl = tmp = CMOS_READ(RTC_CONTROL); 808 cmos->suspend_ctrl = tmp = CMOS_READ(RTC_CONTROL);
762 if (tmp & (RTC_PIE|RTC_AIE|RTC_UIE)) { 809 if (tmp & (RTC_PIE|RTC_AIE|RTC_UIE)) {
763 unsigned char irqstat; 810 unsigned char mask;
764 811
765 if (do_wake) 812 if (do_wake)
766 tmp &= ~(RTC_PIE|RTC_UIE); 813 mask = RTC_IRQMASK & ~RTC_AIE;
767 else 814 else
768 tmp &= ~(RTC_PIE|RTC_AIE|RTC_UIE); 815 mask = RTC_IRQMASK;
816 tmp &= ~mask;
769 CMOS_WRITE(tmp, RTC_CONTROL); 817 CMOS_WRITE(tmp, RTC_CONTROL);
770 irqstat = CMOS_READ(RTC_INTR_FLAGS); 818 hpet_mask_rtc_irq_bit(mask);
771 irqstat &= (tmp & RTC_IRQMASK) | RTC_IRQF; 819
772 if (is_intr(irqstat)) 820 cmos_checkintr(cmos, tmp);
773 rtc_update_irq(cmos->rtc, 1, irqstat);
774 } 821 }
775 spin_unlock_irq(&rtc_lock); 822 spin_unlock_irq(&rtc_lock);
776 823
@@ -796,7 +843,8 @@ static int cmos_resume(struct device *dev)
796 unsigned char tmp = cmos->suspend_ctrl; 843 unsigned char tmp = cmos->suspend_ctrl;
797 844
798 /* re-enable any irqs previously active */ 845 /* re-enable any irqs previously active */
799 if (tmp & (RTC_PIE|RTC_AIE|RTC_UIE)) { 846 if (tmp & RTC_IRQMASK) {
847 unsigned char mask;
800 848
801 if (cmos->enabled_wake) { 849 if (cmos->enabled_wake) {
802 if (cmos->wake_off) 850 if (cmos->wake_off)
@@ -807,18 +855,28 @@ static int cmos_resume(struct device *dev)
807 } 855 }
808 856
809 spin_lock_irq(&rtc_lock); 857 spin_lock_irq(&rtc_lock);
810 CMOS_WRITE(tmp, RTC_CONTROL); 858 do {
811 tmp = CMOS_READ(RTC_INTR_FLAGS); 859 CMOS_WRITE(tmp, RTC_CONTROL);
812 tmp &= (cmos->suspend_ctrl & RTC_IRQMASK) | RTC_IRQF; 860 hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK);
813 if (is_intr(tmp)) 861
814 rtc_update_irq(cmos->rtc, 1, tmp); 862 mask = CMOS_READ(RTC_INTR_FLAGS);
863 mask &= (tmp & RTC_IRQMASK) | RTC_IRQF;
864 if (!is_hpet_enabled() || !is_intr(mask))
865 break;
866
867 /* force one-shot behavior if HPET blocked
868 * the wake alarm's irq
869 */
870 rtc_update_irq(cmos->rtc, 1, mask);
871 tmp &= ~RTC_AIE;
872 hpet_mask_rtc_irq_bit(RTC_AIE);
873 } while (mask & RTC_AIE);
815 spin_unlock_irq(&rtc_lock); 874 spin_unlock_irq(&rtc_lock);
816 } 875 }
817 876
818 pr_debug("%s: resume, ctrl %02x\n", 877 pr_debug("%s: resume, ctrl %02x\n",
819 cmos_rtc.rtc->dev.bus_id, 878 cmos_rtc.rtc->dev.bus_id,
820 cmos->suspend_ctrl); 879 tmp);
821
822 880
823 return 0; 881 return 0;
824} 882}
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 0114a78b7cbb..0a870b7e5c32 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -209,7 +209,7 @@ static unsigned int rtc_dev_poll(struct file *file, poll_table *wait)
209 return (data != 0) ? (POLLIN | POLLRDNORM) : 0; 209 return (data != 0) ? (POLLIN | POLLRDNORM) : 0;
210} 210}
211 211
212static int rtc_dev_ioctl(struct inode *inode, struct file *file, 212static long rtc_dev_ioctl(struct file *file,
213 unsigned int cmd, unsigned long arg) 213 unsigned int cmd, unsigned long arg)
214{ 214{
215 int err = 0; 215 int err = 0;
@@ -219,6 +219,10 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
219 struct rtc_wkalrm alarm; 219 struct rtc_wkalrm alarm;
220 void __user *uarg = (void __user *) arg; 220 void __user *uarg = (void __user *) arg;
221 221
222 err = mutex_lock_interruptible(&rtc->ops_lock);
223 if (err)
224 return -EBUSY;
225
222 /* check that the calling task has appropriate permissions 226 /* check that the calling task has appropriate permissions
223 * for certain ioctls. doing this check here is useful 227 * for certain ioctls. doing this check here is useful
224 * to avoid duplicate code in each driver. 228 * to avoid duplicate code in each driver.
@@ -227,26 +231,31 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
227 case RTC_EPOCH_SET: 231 case RTC_EPOCH_SET:
228 case RTC_SET_TIME: 232 case RTC_SET_TIME:
229 if (!capable(CAP_SYS_TIME)) 233 if (!capable(CAP_SYS_TIME))
230 return -EACCES; 234 err = -EACCES;
231 break; 235 break;
232 236
233 case RTC_IRQP_SET: 237 case RTC_IRQP_SET:
234 if (arg > rtc->max_user_freq && !capable(CAP_SYS_RESOURCE)) 238 if (arg > rtc->max_user_freq && !capable(CAP_SYS_RESOURCE))
235 return -EACCES; 239 err = -EACCES;
236 break; 240 break;
237 241
238 case RTC_PIE_ON: 242 case RTC_PIE_ON:
239 if (rtc->irq_freq > rtc->max_user_freq && 243 if (rtc->irq_freq > rtc->max_user_freq &&
240 !capable(CAP_SYS_RESOURCE)) 244 !capable(CAP_SYS_RESOURCE))
241 return -EACCES; 245 err = -EACCES;
242 break; 246 break;
243 } 247 }
244 248
249 if (err)
250 goto done;
251
245 /* try the driver's ioctl interface */ 252 /* try the driver's ioctl interface */
246 if (ops->ioctl) { 253 if (ops->ioctl) {
247 err = ops->ioctl(rtc->dev.parent, cmd, arg); 254 err = ops->ioctl(rtc->dev.parent, cmd, arg);
248 if (err != -ENOIOCTLCMD) 255 if (err != -ENOIOCTLCMD) {
256 mutex_unlock(&rtc->ops_lock);
249 return err; 257 return err;
258 }
250 } 259 }
251 260
252 /* if the driver does not provide the ioctl interface 261 /* if the driver does not provide the ioctl interface
@@ -265,15 +274,19 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
265 274
266 switch (cmd) { 275 switch (cmd) {
267 case RTC_ALM_READ: 276 case RTC_ALM_READ:
277 mutex_unlock(&rtc->ops_lock);
278
268 err = rtc_read_alarm(rtc, &alarm); 279 err = rtc_read_alarm(rtc, &alarm);
269 if (err < 0) 280 if (err < 0)
270 return err; 281 return err;
271 282
272 if (copy_to_user(uarg, &alarm.time, sizeof(tm))) 283 if (copy_to_user(uarg, &alarm.time, sizeof(tm)))
273 return -EFAULT; 284 err = -EFAULT;
274 break; 285 return err;
275 286
276 case RTC_ALM_SET: 287 case RTC_ALM_SET:
288 mutex_unlock(&rtc->ops_lock);
289
277 if (copy_from_user(&alarm.time, uarg, sizeof(tm))) 290 if (copy_from_user(&alarm.time, uarg, sizeof(tm)))
278 return -EFAULT; 291 return -EFAULT;
279 292
@@ -321,24 +334,26 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
321 } 334 }
322 } 335 }
323 336
324 err = rtc_set_alarm(rtc, &alarm); 337 return rtc_set_alarm(rtc, &alarm);
325 break;
326 338
327 case RTC_RD_TIME: 339 case RTC_RD_TIME:
340 mutex_unlock(&rtc->ops_lock);
341
328 err = rtc_read_time(rtc, &tm); 342 err = rtc_read_time(rtc, &tm);
329 if (err < 0) 343 if (err < 0)
330 return err; 344 return err;
331 345
332 if (copy_to_user(uarg, &tm, sizeof(tm))) 346 if (copy_to_user(uarg, &tm, sizeof(tm)))
333 return -EFAULT; 347 err = -EFAULT;
334 break; 348 return err;
335 349
336 case RTC_SET_TIME: 350 case RTC_SET_TIME:
351 mutex_unlock(&rtc->ops_lock);
352
337 if (copy_from_user(&tm, uarg, sizeof(tm))) 353 if (copy_from_user(&tm, uarg, sizeof(tm)))
338 return -EFAULT; 354 return -EFAULT;
339 355
340 err = rtc_set_time(rtc, &tm); 356 return rtc_set_time(rtc, &tm);
341 break;
342 357
343 case RTC_PIE_ON: 358 case RTC_PIE_ON:
344 err = rtc_irq_set_state(rtc, NULL, 1); 359 err = rtc_irq_set_state(rtc, NULL, 1);
@@ -376,34 +391,37 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
376 break; 391 break;
377#endif 392#endif
378 case RTC_WKALM_SET: 393 case RTC_WKALM_SET:
394 mutex_unlock(&rtc->ops_lock);
379 if (copy_from_user(&alarm, uarg, sizeof(alarm))) 395 if (copy_from_user(&alarm, uarg, sizeof(alarm)))
380 return -EFAULT; 396 return -EFAULT;
381 397
382 err = rtc_set_alarm(rtc, &alarm); 398 return rtc_set_alarm(rtc, &alarm);
383 break;
384 399
385 case RTC_WKALM_RD: 400 case RTC_WKALM_RD:
401 mutex_unlock(&rtc->ops_lock);
386 err = rtc_read_alarm(rtc, &alarm); 402 err = rtc_read_alarm(rtc, &alarm);
387 if (err < 0) 403 if (err < 0)
388 return err; 404 return err;
389 405
390 if (copy_to_user(uarg, &alarm, sizeof(alarm))) 406 if (copy_to_user(uarg, &alarm, sizeof(alarm)))
391 return -EFAULT; 407 err = -EFAULT;
392 break; 408 return err;
393 409
394#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 410#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
395 case RTC_UIE_OFF: 411 case RTC_UIE_OFF:
396 clear_uie(rtc); 412 clear_uie(rtc);
397 return 0; 413 break;
398 414
399 case RTC_UIE_ON: 415 case RTC_UIE_ON:
400 return set_uie(rtc); 416 err = set_uie(rtc);
401#endif 417#endif
402 default: 418 default:
403 err = -ENOTTY; 419 err = -ENOTTY;
404 break; 420 break;
405 } 421 }
406 422
423done:
424 mutex_unlock(&rtc->ops_lock);
407 return err; 425 return err;
408} 426}
409 427
@@ -432,7 +450,7 @@ static const struct file_operations rtc_dev_fops = {
432 .llseek = no_llseek, 450 .llseek = no_llseek,
433 .read = rtc_dev_read, 451 .read = rtc_dev_read,
434 .poll = rtc_dev_poll, 452 .poll = rtc_dev_poll,
435 .ioctl = rtc_dev_ioctl, 453 .unlocked_ioctl = rtc_dev_ioctl,
436 .open = rtc_dev_open, 454 .open = rtc_dev_open,
437 .release = rtc_dev_release, 455 .release = rtc_dev_release,
438 .fasync = rtc_dev_fasync, 456 .fasync = rtc_dev_fasync,
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
new file mode 100644
index 000000000000..b91d02a3ace9
--- /dev/null
+++ b/drivers/rtc/rtc-ds1305.c
@@ -0,0 +1,847 @@
1/*
2 * rtc-ds1305.c -- driver for DS1305 and DS1306 SPI RTC chips
3 *
4 * Copyright (C) 2008 David Brownell
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/bcd.h>
14#include <linux/rtc.h>
15#include <linux/workqueue.h>
16
17#include <linux/spi/spi.h>
18#include <linux/spi/ds1305.h>
19
20
21/*
22 * Registers ... mask DS1305_WRITE into register address to write,
23 * otherwise you're reading it. All non-bitmask values are BCD.
24 */
25#define DS1305_WRITE 0x80
26
27
28/* RTC date/time ... the main special cases are that we:
29 * - Need fancy "hours" encoding in 12hour mode
30 * - Don't rely on the "day-of-week" field (or tm_wday)
31 * - Are a 21st-century clock (2000 <= year < 2100)
32 */
33#define DS1305_RTC_LEN 7 /* bytes for RTC regs */
34
35#define DS1305_SEC 0x00 /* register addresses */
36#define DS1305_MIN 0x01
37#define DS1305_HOUR 0x02
38# define DS1305_HR_12 0x40 /* set == 12 hr mode */
39# define DS1305_HR_PM 0x20 /* set == PM (12hr mode) */
40#define DS1305_WDAY 0x03
41#define DS1305_MDAY 0x04
42#define DS1305_MON 0x05
43#define DS1305_YEAR 0x06
44
45
46/* The two alarms have only sec/min/hour/wday fields (ALM_LEN).
47 * DS1305_ALM_DISABLE disables a match field (some combos are bad).
48 *
49 * NOTE that since we don't use WDAY, we limit ourselves to alarms
50 * only one day into the future (vs potentially up to a week).
51 *
52 * NOTE ALSO that while we could generate once-a-second IRQs (UIE), we
53 * don't currently support them. We'd either need to do it only when
54 * no alarm is pending (not the standard model), or to use the second
55 * alarm (implying that this is a DS1305 not DS1306, *and* that either
56 * it's wired up a second IRQ we know, or that INTCN is set)
57 */
58#define DS1305_ALM_LEN 4 /* bytes for ALM regs */
59#define DS1305_ALM_DISABLE 0x80
60
61#define DS1305_ALM0(r) (0x07 + (r)) /* register addresses */
62#define DS1305_ALM1(r) (0x0b + (r))
63
64
65/* three control registers */
66#define DS1305_CONTROL_LEN 3 /* bytes of control regs */
67
68#define DS1305_CONTROL 0x0f /* register addresses */
69# define DS1305_nEOSC 0x80 /* low enables oscillator */
70# define DS1305_WP 0x40 /* write protect */
71# define DS1305_INTCN 0x04 /* clear == only int0 used */
72# define DS1306_1HZ 0x04 /* enable 1Hz output */
73# define DS1305_AEI1 0x02 /* enable ALM1 IRQ */
74# define DS1305_AEI0 0x01 /* enable ALM0 IRQ */
75#define DS1305_STATUS 0x10
76/* status has just AEIx bits, mirrored as IRQFx */
77#define DS1305_TRICKLE 0x11
78/* trickle bits are defined in <linux/spi/ds1305.h> */
79
80/* a bunch of NVRAM */
81#define DS1305_NVRAM_LEN 96 /* bytes of NVRAM */
82
83#define DS1305_NVRAM 0x20 /* register addresses */
84
85
86struct ds1305 {
87 struct spi_device *spi;
88 struct rtc_device *rtc;
89
90 struct work_struct work;
91
92 unsigned long flags;
93#define FLAG_EXITING 0
94
95 bool hr12;
96 u8 ctrl[DS1305_CONTROL_LEN];
97};
98
99
100/*----------------------------------------------------------------------*/
101
102/*
103 * Utilities ... tolerate 12-hour AM/PM notation in case of non-Linux
104 * software (like a bootloader) which may require it.
105 */
106
107static unsigned bcd2hour(u8 bcd)
108{
109 if (bcd & DS1305_HR_12) {
110 unsigned hour = 0;
111
112 bcd &= ~DS1305_HR_12;
113 if (bcd & DS1305_HR_PM) {
114 hour = 12;
115 bcd &= ~DS1305_HR_PM;
116 }
117 hour += BCD2BIN(bcd);
118 return hour - 1;
119 }
120 return BCD2BIN(bcd);
121}
122
123static u8 hour2bcd(bool hr12, int hour)
124{
125 if (hr12) {
126 hour++;
127 if (hour <= 12)
128 return DS1305_HR_12 | BIN2BCD(hour);
129 hour -= 12;
130 return DS1305_HR_12 | DS1305_HR_PM | BIN2BCD(hour);
131 }
132 return BIN2BCD(hour);
133}
134
135/*----------------------------------------------------------------------*/
136
137/*
138 * Interface to RTC framework
139 */
140
141#ifdef CONFIG_RTC_INTF_DEV
142
143/*
144 * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl)
145 */
146static int ds1305_ioctl(struct device *dev, unsigned cmd, unsigned long arg)
147{
148 struct ds1305 *ds1305 = dev_get_drvdata(dev);
149 u8 buf[2];
150 int status = -ENOIOCTLCMD;
151
152 buf[0] = DS1305_WRITE | DS1305_CONTROL;
153 buf[1] = ds1305->ctrl[0];
154
155 switch (cmd) {
156 case RTC_AIE_OFF:
157 status = 0;
158 if (!(buf[1] & DS1305_AEI0))
159 goto done;
160 buf[1] &= ~DS1305_AEI0;
161 break;
162
163 case RTC_AIE_ON:
164 status = 0;
165 if (ds1305->ctrl[0] & DS1305_AEI0)
166 goto done;
167 buf[1] |= DS1305_AEI0;
168 break;
169 }
170 if (status == 0) {
171 status = spi_write_then_read(ds1305->spi, buf, sizeof buf,
172 NULL, 0);
173 if (status >= 0)
174 ds1305->ctrl[0] = buf[1];
175 }
176
177done:
178 return status;
179}
180
181#else
182#define ds1305_ioctl NULL
183#endif
184
185/*
186 * Get/set of date and time is pretty normal.
187 */
188
189static int ds1305_get_time(struct device *dev, struct rtc_time *time)
190{
191 struct ds1305 *ds1305 = dev_get_drvdata(dev);
192 u8 addr = DS1305_SEC;
193 u8 buf[DS1305_RTC_LEN];
194 int status;
195
196 /* Use write-then-read to get all the date/time registers
197 * since dma from stack is nonportable
198 */
199 status = spi_write_then_read(ds1305->spi, &addr, sizeof addr,
200 buf, sizeof buf);
201 if (status < 0)
202 return status;
203
204 dev_vdbg(dev, "%s: %02x %02x %02x, %02x %02x %02x %02x\n",
205 "read", buf[0], buf[1], buf[2], buf[3],
206 buf[4], buf[5], buf[6]);
207
208 /* Decode the registers */
209 time->tm_sec = BCD2BIN(buf[DS1305_SEC]);
210 time->tm_min = BCD2BIN(buf[DS1305_MIN]);
211 time->tm_hour = bcd2hour(buf[DS1305_HOUR]);
212 time->tm_wday = buf[DS1305_WDAY] - 1;
213 time->tm_mday = BCD2BIN(buf[DS1305_MDAY]);
214 time->tm_mon = BCD2BIN(buf[DS1305_MON]) - 1;
215 time->tm_year = BCD2BIN(buf[DS1305_YEAR]) + 100;
216
217 dev_vdbg(dev, "%s secs=%d, mins=%d, "
218 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
219 "read", time->tm_sec, time->tm_min,
220 time->tm_hour, time->tm_mday,
221 time->tm_mon, time->tm_year, time->tm_wday);
222
223 /* Time may not be set */
224 return rtc_valid_tm(time);
225}
226
227static int ds1305_set_time(struct device *dev, struct rtc_time *time)
228{
229 struct ds1305 *ds1305 = dev_get_drvdata(dev);
230 u8 buf[1 + DS1305_RTC_LEN];
231 u8 *bp = buf;
232
233 dev_vdbg(dev, "%s secs=%d, mins=%d, "
234 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
235 "write", time->tm_sec, time->tm_min,
236 time->tm_hour, time->tm_mday,
237 time->tm_mon, time->tm_year, time->tm_wday);
238
239 /* Write registers starting at the first time/date address. */
240 *bp++ = DS1305_WRITE | DS1305_SEC;
241
242 *bp++ = BIN2BCD(time->tm_sec);
243 *bp++ = BIN2BCD(time->tm_min);
244 *bp++ = hour2bcd(ds1305->hr12, time->tm_hour);
245 *bp++ = (time->tm_wday < 7) ? (time->tm_wday + 1) : 1;
246 *bp++ = BIN2BCD(time->tm_mday);
247 *bp++ = BIN2BCD(time->tm_mon + 1);
248 *bp++ = BIN2BCD(time->tm_year - 100);
249
250 dev_dbg(dev, "%s: %02x %02x %02x, %02x %02x %02x %02x\n",
251 "write", buf[1], buf[2], buf[3],
252 buf[4], buf[5], buf[6], buf[7]);
253
254 /* use write-then-read since dma from stack is nonportable */
255 return spi_write_then_read(ds1305->spi, buf, sizeof buf,
256 NULL, 0);
257}
258
259/*
260 * Get/set of alarm is a bit funky:
261 *
262 * - First there's the inherent raciness of getting the (partitioned)
263 * status of an alarm that could trigger while we're reading parts
264 * of that status.
265 *
266 * - Second there's its limited range (we could increase it a bit by
267 * relying on WDAY), which means it will easily roll over.
268 *
269 * - Third there's the choice of two alarms and alarm signals.
270 * Here we use ALM0 and expect that nINT0 (open drain) is used;
271 * that's the only real option for DS1306 runtime alarms, and is
272 * natural on DS1305.
273 *
274 * - Fourth, there's also ALM1, and a second interrupt signal:
275 * + On DS1305 ALM1 uses nINT1 (when INTCN=1) else nINT0;
276 * + On DS1306 ALM1 only uses INT1 (an active high pulse)
277 * and it won't work when VCC1 is active.
278 *
279 * So to be most general, we should probably set both alarms to the
280 * same value, letting ALM1 be the wakeup event source on DS1306
281 * and handling several wiring options on DS1305.
282 *
283 * - Fifth, we support the polled mode (as well as possible; why not?)
284 * even when no interrupt line is wired to an IRQ.
285 */
286
287/*
288 * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl)
289 */
290static int ds1305_get_alarm(struct device *dev, struct rtc_wkalrm *alm)
291{
292 struct ds1305 *ds1305 = dev_get_drvdata(dev);
293 struct spi_device *spi = ds1305->spi;
294 u8 addr;
295 int status;
296 u8 buf[DS1305_ALM_LEN];
297
298 /* Refresh control register cache BEFORE reading ALM0 registers,
299 * since reading alarm registers acks any pending IRQ. That
300 * makes returning "pending" status a bit of a lie, but that bit
301 * of EFI status is at best fragile anyway (given IRQ handlers).
302 */
303 addr = DS1305_CONTROL;
304 status = spi_write_then_read(spi, &addr, sizeof addr,
305 ds1305->ctrl, sizeof ds1305->ctrl);
306 if (status < 0)
307 return status;
308
309 alm->enabled = !!(ds1305->ctrl[0] & DS1305_AEI0);
310 alm->pending = !!(ds1305->ctrl[1] & DS1305_AEI0);
311
312 /* get and check ALM0 registers */
313 addr = DS1305_ALM0(DS1305_SEC);
314 status = spi_write_then_read(spi, &addr, sizeof addr,
315 buf, sizeof buf);
316 if (status < 0)
317 return status;
318
319 dev_vdbg(dev, "%s: %02x %02x %02x %02x\n",
320 "alm0 read", buf[DS1305_SEC], buf[DS1305_MIN],
321 buf[DS1305_HOUR], buf[DS1305_WDAY]);
322
323 if ((DS1305_ALM_DISABLE & buf[DS1305_SEC])
324 || (DS1305_ALM_DISABLE & buf[DS1305_MIN])
325 || (DS1305_ALM_DISABLE & buf[DS1305_HOUR]))
326 return -EIO;
327
328 /* Stuff these values into alm->time and let RTC framework code
329 * fill in the rest ... and also handle rollover to tomorrow when
330 * that's needed.
331 */
332 alm->time.tm_sec = BCD2BIN(buf[DS1305_SEC]);
333 alm->time.tm_min = BCD2BIN(buf[DS1305_MIN]);
334 alm->time.tm_hour = bcd2hour(buf[DS1305_HOUR]);
335 alm->time.tm_mday = -1;
336 alm->time.tm_mon = -1;
337 alm->time.tm_year = -1;
338 /* next three fields are unused by Linux */
339 alm->time.tm_wday = -1;
340 alm->time.tm_mday = -1;
341 alm->time.tm_isdst = -1;
342
343 return 0;
344}
345
346/*
347 * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl)
348 */
349static int ds1305_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
350{
351 struct ds1305 *ds1305 = dev_get_drvdata(dev);
352 struct spi_device *spi = ds1305->spi;
353 unsigned long now, later;
354 struct rtc_time tm;
355 int status;
356 u8 buf[1 + DS1305_ALM_LEN];
357
358 /* convert desired alarm to time_t */
359 status = rtc_tm_to_time(&alm->time, &later);
360 if (status < 0)
361 return status;
362
363 /* Read current time as time_t */
364 status = ds1305_get_time(dev, &tm);
365 if (status < 0)
366 return status;
367 status = rtc_tm_to_time(&tm, &now);
368 if (status < 0)
369 return status;
370
371 /* make sure alarm fires within the next 24 hours */
372 if (later <= now)
373 return -EINVAL;
374 if ((later - now) > 24 * 60 * 60)
375 return -EDOM;
376
377 /* disable alarm if needed */
378 if (ds1305->ctrl[0] & DS1305_AEI0) {
379 ds1305->ctrl[0] &= ~DS1305_AEI0;
380
381 buf[0] = DS1305_WRITE | DS1305_CONTROL;
382 buf[1] = ds1305->ctrl[0];
383 status = spi_write_then_read(ds1305->spi, buf, 2, NULL, 0);
384 if (status < 0)
385 return status;
386 }
387
388 /* write alarm */
389 buf[0] = DS1305_WRITE | DS1305_ALM0(DS1305_SEC);
390 buf[1 + DS1305_SEC] = BIN2BCD(alm->time.tm_sec);
391 buf[1 + DS1305_MIN] = BIN2BCD(alm->time.tm_min);
392 buf[1 + DS1305_HOUR] = hour2bcd(ds1305->hr12, alm->time.tm_hour);
393 buf[1 + DS1305_WDAY] = DS1305_ALM_DISABLE;
394
395 dev_dbg(dev, "%s: %02x %02x %02x %02x\n",
396 "alm0 write", buf[1 + DS1305_SEC], buf[1 + DS1305_MIN],
397 buf[1 + DS1305_HOUR], buf[1 + DS1305_WDAY]);
398
399 status = spi_write_then_read(spi, buf, sizeof buf, NULL, 0);
400 if (status < 0)
401 return status;
402
403 /* enable alarm if requested */
404 if (alm->enabled) {
405 ds1305->ctrl[0] |= DS1305_AEI0;
406
407 buf[0] = DS1305_WRITE | DS1305_CONTROL;
408 buf[1] = ds1305->ctrl[0];
409 status = spi_write_then_read(ds1305->spi, buf, 2, NULL, 0);
410 }
411
412 return status;
413}
414
415#ifdef CONFIG_PROC_FS
416
417static int ds1305_proc(struct device *dev, struct seq_file *seq)
418{
419 struct ds1305 *ds1305 = dev_get_drvdata(dev);
420 char *diodes = "no";
421 char *resistors = "";
422
423 /* ctrl[2] is treated as read-only; no locking needed */
424 if ((ds1305->ctrl[2] & 0xf0) == DS1305_TRICKLE_MAGIC) {
425 switch (ds1305->ctrl[2] & 0x0c) {
426 case DS1305_TRICKLE_DS2:
427 diodes = "2 diodes, ";
428 break;
429 case DS1305_TRICKLE_DS1:
430 diodes = "1 diode, ";
431 break;
432 default:
433 goto done;
434 }
435 switch (ds1305->ctrl[2] & 0x03) {
436 case DS1305_TRICKLE_2K:
437 resistors = "2k Ohm";
438 break;
439 case DS1305_TRICKLE_4K:
440 resistors = "4k Ohm";
441 break;
442 case DS1305_TRICKLE_8K:
443 resistors = "8k Ohm";
444 break;
445 default:
446 diodes = "no";
447 break;
448 }
449 }
450
451done:
452 return seq_printf(seq,
453 "trickle_charge\t: %s%s\n",
454 diodes, resistors);
455}
456
457#else
458#define ds1305_proc NULL
459#endif
460
461static const struct rtc_class_ops ds1305_ops = {
462 .ioctl = ds1305_ioctl,
463 .read_time = ds1305_get_time,
464 .set_time = ds1305_set_time,
465 .read_alarm = ds1305_get_alarm,
466 .set_alarm = ds1305_set_alarm,
467 .proc = ds1305_proc,
468};
469
470static void ds1305_work(struct work_struct *work)
471{
472 struct ds1305 *ds1305 = container_of(work, struct ds1305, work);
473 struct mutex *lock = &ds1305->rtc->ops_lock;
474 struct spi_device *spi = ds1305->spi;
475 u8 buf[3];
476 int status;
477
478 /* lock to protect ds1305->ctrl */
479 mutex_lock(lock);
480
481 /* Disable the IRQ, and clear its status ... for now, we "know"
482 * that if more than one alarm is active, they're in sync.
483 * Note that reading ALM data registers also clears IRQ status.
484 */
485 ds1305->ctrl[0] &= ~(DS1305_AEI1 | DS1305_AEI0);
486 ds1305->ctrl[1] = 0;
487
488 buf[0] = DS1305_WRITE | DS1305_CONTROL;
489 buf[1] = ds1305->ctrl[0];
490 buf[2] = 0;
491
492 status = spi_write_then_read(spi, buf, sizeof buf,
493 NULL, 0);
494 if (status < 0)
495 dev_dbg(&spi->dev, "clear irq --> %d\n", status);
496
497 mutex_unlock(lock);
498
499 if (!test_bit(FLAG_EXITING, &ds1305->flags))
500 enable_irq(spi->irq);
501
502 /* rtc_update_irq() requires an IRQ-disabled context */
503 local_irq_disable();
504 rtc_update_irq(ds1305->rtc, 1, RTC_AF | RTC_IRQF);
505 local_irq_enable();
506}
507
508/*
509 * This "real" IRQ handler hands off to a workqueue mostly to allow
510 * mutex locking for ds1305->ctrl ... unlike I2C, we could issue async
511 * I/O requests in IRQ context (to clear the IRQ status).
512 */
513static irqreturn_t ds1305_irq(int irq, void *p)
514{
515 struct ds1305 *ds1305 = p;
516
517 disable_irq(irq);
518 schedule_work(&ds1305->work);
519 return IRQ_HANDLED;
520}
521
522/*----------------------------------------------------------------------*/
523
524/*
525 * Interface for NVRAM
526 */
527
528static void msg_init(struct spi_message *m, struct spi_transfer *x,
529 u8 *addr, size_t count, char *tx, char *rx)
530{
531 spi_message_init(m);
532 memset(x, 0, 2 * sizeof(*x));
533
534 x->tx_buf = addr;
535 x->len = 1;
536 spi_message_add_tail(x, m);
537
538 x++;
539
540 x->tx_buf = tx;
541 x->rx_buf = rx;
542 x->len = count;
543 spi_message_add_tail(x, m);
544}
545
546static ssize_t
547ds1305_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
548 char *buf, loff_t off, size_t count)
549{
550 struct spi_device *spi;
551 u8 addr;
552 struct spi_message m;
553 struct spi_transfer x[2];
554 int status;
555
556 spi = container_of(kobj, struct spi_device, dev.kobj);
557
558 if (unlikely(off >= DS1305_NVRAM_LEN))
559 return 0;
560 if (count >= DS1305_NVRAM_LEN)
561 count = DS1305_NVRAM_LEN;
562 if ((off + count) > DS1305_NVRAM_LEN)
563 count = DS1305_NVRAM_LEN - off;
564 if (unlikely(!count))
565 return count;
566
567 addr = DS1305_NVRAM + off;
568 msg_init(&m, x, &addr, count, NULL, buf);
569
570 status = spi_sync(spi, &m);
571 if (status < 0)
572 dev_err(&spi->dev, "nvram %s error %d\n", "read", status);
573 return (status < 0) ? status : count;
574}
575
576static ssize_t
577ds1305_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
578 char *buf, loff_t off, size_t count)
579{
580 struct spi_device *spi;
581 u8 addr;
582 struct spi_message m;
583 struct spi_transfer x[2];
584 int status;
585
586 spi = container_of(kobj, struct spi_device, dev.kobj);
587
588 if (unlikely(off >= DS1305_NVRAM_LEN))
589 return -EFBIG;
590 if (count >= DS1305_NVRAM_LEN)
591 count = DS1305_NVRAM_LEN;
592 if ((off + count) > DS1305_NVRAM_LEN)
593 count = DS1305_NVRAM_LEN - off;
594 if (unlikely(!count))
595 return count;
596
597 addr = (DS1305_WRITE | DS1305_NVRAM) + off;
598 msg_init(&m, x, &addr, count, buf, NULL);
599
600 status = spi_sync(spi, &m);
601 if (status < 0)
602 dev_err(&spi->dev, "nvram %s error %d\n", "write", status);
603 return (status < 0) ? status : count;
604}
605
606static struct bin_attribute nvram = {
607 .attr.name = "nvram",
608 .attr.mode = S_IRUGO | S_IWUSR,
609 .attr.owner = THIS_MODULE,
610 .read = ds1305_nvram_read,
611 .write = ds1305_nvram_write,
612 .size = DS1305_NVRAM_LEN,
613};
614
615/*----------------------------------------------------------------------*/
616
617/*
618 * Interface to SPI stack
619 */
620
621static int __devinit ds1305_probe(struct spi_device *spi)
622{
623 struct ds1305 *ds1305;
624 struct rtc_device *rtc;
625 int status;
626 u8 addr, value;
627 struct ds1305_platform_data *pdata = spi->dev.platform_data;
628 bool write_ctrl = false;
629
630 /* Sanity check board setup data. This may be hooked up
631 * in 3wire mode, but we don't care. Note that unless
632 * there's an inverter in place, this needs SPI_CS_HIGH!
633 */
634 if ((spi->bits_per_word && spi->bits_per_word != 8)
635 || (spi->max_speed_hz > 2000000)
636 || !(spi->mode & SPI_CPHA))
637 return -EINVAL;
638
639 /* set up driver data */
640 ds1305 = kzalloc(sizeof *ds1305, GFP_KERNEL);
641 if (!ds1305)
642 return -ENOMEM;
643 ds1305->spi = spi;
644 spi_set_drvdata(spi, ds1305);
645
646 /* read and cache control registers */
647 addr = DS1305_CONTROL;
648 status = spi_write_then_read(spi, &addr, sizeof addr,
649 ds1305->ctrl, sizeof ds1305->ctrl);
650 if (status < 0) {
651 dev_dbg(&spi->dev, "can't %s, %d\n",
652 "read", status);
653 goto fail0;
654 }
655
656 dev_dbg(&spi->dev, "ctrl %s: %02x %02x %02x\n",
657 "read", ds1305->ctrl[0],
658 ds1305->ctrl[1], ds1305->ctrl[2]);
659
660 /* Sanity check register values ... partially compensating for the
661 * fact that SPI has no device handshake. A pullup on MISO would
662 * make these tests fail; but not all systems will have one. If
663 * some register is neither 0x00 nor 0xff, a chip is likely there.
664 */
665 if ((ds1305->ctrl[0] & 0x38) != 0 || (ds1305->ctrl[1] & 0xfc) != 0) {
666 dev_dbg(&spi->dev, "RTC chip is not present\n");
667 status = -ENODEV;
668 goto fail0;
669 }
670 if (ds1305->ctrl[2] == 0)
671 dev_dbg(&spi->dev, "chip may not be present\n");
672
673 /* enable writes if needed ... if we were paranoid it would
674 * make sense to enable them only when absolutely necessary.
675 */
676 if (ds1305->ctrl[0] & DS1305_WP) {
677 u8 buf[2];
678
679 ds1305->ctrl[0] &= ~DS1305_WP;
680
681 buf[0] = DS1305_WRITE | DS1305_CONTROL;
682 buf[1] = ds1305->ctrl[0];
683 status = spi_write_then_read(spi, buf, sizeof buf, NULL, 0);
684
685 dev_dbg(&spi->dev, "clear WP --> %d\n", status);
686 if (status < 0)
687 goto fail0;
688 }
689
690 /* on DS1305, maybe start oscillator; like most low power
691 * oscillators, it may take a second to stabilize
692 */
693 if (ds1305->ctrl[0] & DS1305_nEOSC) {
694 ds1305->ctrl[0] &= ~DS1305_nEOSC;
695 write_ctrl = true;
696 dev_warn(&spi->dev, "SET TIME!\n");
697 }
698
699 /* ack any pending IRQs */
700 if (ds1305->ctrl[1]) {
701 ds1305->ctrl[1] = 0;
702 write_ctrl = true;
703 }
704
705 /* this may need one-time (re)init */
706 if (pdata) {
707 /* maybe enable trickle charge */
708 if (((ds1305->ctrl[2] & 0xf0) != DS1305_TRICKLE_MAGIC)) {
709 ds1305->ctrl[2] = DS1305_TRICKLE_MAGIC
710 | pdata->trickle;
711 write_ctrl = true;
712 }
713
714 /* on DS1306, configure 1 Hz signal */
715 if (pdata->is_ds1306) {
716 if (pdata->en_1hz) {
717 if (!(ds1305->ctrl[0] & DS1306_1HZ)) {
718 ds1305->ctrl[0] |= DS1306_1HZ;
719 write_ctrl = true;
720 }
721 } else {
722 if (ds1305->ctrl[0] & DS1306_1HZ) {
723 ds1305->ctrl[0] &= ~DS1306_1HZ;
724 write_ctrl = true;
725 }
726 }
727 }
728 }
729
730 if (write_ctrl) {
731 u8 buf[4];
732
733 buf[0] = DS1305_WRITE | DS1305_CONTROL;
734 buf[1] = ds1305->ctrl[0];
735 buf[2] = ds1305->ctrl[1];
736 buf[3] = ds1305->ctrl[2];
737 status = spi_write_then_read(spi, buf, sizeof buf, NULL, 0);
738 if (status < 0) {
739 dev_dbg(&spi->dev, "can't %s, %d\n",
740 "write", status);
741 goto fail0;
742 }
743
744 dev_dbg(&spi->dev, "ctrl %s: %02x %02x %02x\n",
745 "write", ds1305->ctrl[0],
746 ds1305->ctrl[1], ds1305->ctrl[2]);
747 }
748
749 /* see if non-Linux software set up AM/PM mode */
750 addr = DS1305_HOUR;
751 status = spi_write_then_read(spi, &addr, sizeof addr,
752 &value, sizeof value);
753 if (status < 0) {
754 dev_dbg(&spi->dev, "read HOUR --> %d\n", status);
755 goto fail0;
756 }
757
758 ds1305->hr12 = (DS1305_HR_12 & value) != 0;
759 if (ds1305->hr12)
760 dev_dbg(&spi->dev, "AM/PM\n");
761
762 /* register RTC ... from here on, ds1305->ctrl needs locking */
763 rtc = rtc_device_register("ds1305", &spi->dev,
764 &ds1305_ops, THIS_MODULE);
765 if (IS_ERR(rtc)) {
766 status = PTR_ERR(rtc);
767 dev_dbg(&spi->dev, "register rtc --> %d\n", status);
768 goto fail0;
769 }
770 ds1305->rtc = rtc;
771
772 /* Maybe set up alarm IRQ; be ready to handle it triggering right
773 * away. NOTE that we don't share this. The signal is active low,
774 * and we can't ack it before a SPI message delay. We temporarily
775 * disable the IRQ until it's acked, which lets us work with more
776 * IRQ trigger modes (not all IRQ controllers can do falling edge).
777 */
778 if (spi->irq) {
779 INIT_WORK(&ds1305->work, ds1305_work);
780 status = request_irq(spi->irq, ds1305_irq,
781 0, dev_name(&rtc->dev), ds1305);
782 if (status < 0) {
783 dev_dbg(&spi->dev, "request_irq %d --> %d\n",
784 spi->irq, status);
785 goto fail1;
786 }
787 }
788
789 /* export NVRAM */
790 status = sysfs_create_bin_file(&spi->dev.kobj, &nvram);
791 if (status < 0) {
792 dev_dbg(&spi->dev, "register nvram --> %d\n", status);
793 goto fail2;
794 }
795
796 return 0;
797
798fail2:
799 free_irq(spi->irq, ds1305);
800fail1:
801 rtc_device_unregister(rtc);
802fail0:
803 kfree(ds1305);
804 return status;
805}
806
807static int __devexit ds1305_remove(struct spi_device *spi)
808{
809 struct ds1305 *ds1305 = spi_get_drvdata(spi);
810
811 sysfs_remove_bin_file(&spi->dev.kobj, &nvram);
812
813 /* carefully shut down irq and workqueue, if present */
814 if (spi->irq) {
815 set_bit(FLAG_EXITING, &ds1305->flags);
816 free_irq(spi->irq, ds1305);
817 flush_scheduled_work();
818 }
819
820 rtc_device_unregister(ds1305->rtc);
821 spi_set_drvdata(spi, NULL);
822 kfree(ds1305);
823 return 0;
824}
825
826static struct spi_driver ds1305_driver = {
827 .driver.name = "rtc-ds1305",
828 .driver.owner = THIS_MODULE,
829 .probe = ds1305_probe,
830 .remove = __devexit_p(ds1305_remove),
831 /* REVISIT add suspend/resume */
832};
833
834static int __init ds1305_init(void)
835{
836 return spi_register_driver(&ds1305_driver);
837}
838module_init(ds1305_init);
839
840static void __exit ds1305_exit(void)
841{
842 spi_unregister_driver(&ds1305_driver);
843}
844module_exit(ds1305_exit);
845
846MODULE_DESCRIPTION("RTC driver for DS1305 and DS1306 chips");
847MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 0a19c06019be..24bc1689fc74 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -13,21 +13,21 @@
13 * 13 *
14 */ 14 */
15 15
16#include <linux/module.h> 16#include <linux/bcd.h>
17#include <linux/i2c.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/rtc.h>
19#include <linux/slab.h> 22#include <linux/slab.h>
20#include <linux/smp_lock.h> 23#include <linux/smp_lock.h>
21#include <linux/string.h> 24#include <linux/string.h>
22#include <linux/i2c.h>
23#include <linux/rtc.h>
24#include <linux/bcd.h>
25#ifdef CONFIG_RTC_DRV_M41T80_WDT 25#ifdef CONFIG_RTC_DRV_M41T80_WDT
26#include <linux/miscdevice.h>
27#include <linux/watchdog.h>
28#include <linux/reboot.h>
29#include <linux/fs.h> 26#include <linux/fs.h>
30#include <linux/ioctl.h> 27#include <linux/ioctl.h>
28#include <linux/miscdevice.h>
29#include <linux/reboot.h>
30#include <linux/watchdog.h>
31#endif 31#endif
32 32
33#define M41T80_REG_SSEC 0 33#define M41T80_REG_SSEC 0
@@ -631,14 +631,12 @@ static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
631 return -EFAULT; 631 return -EFAULT;
632 632
633 if (rv & WDIOS_DISABLECARD) { 633 if (rv & WDIOS_DISABLECARD) {
634 printk(KERN_INFO 634 pr_info("rtc-m41t80: disable watchdog\n");
635 "rtc-m41t80: disable watchdog\n");
636 wdt_disable(); 635 wdt_disable();
637 } 636 }
638 637
639 if (rv & WDIOS_ENABLECARD) { 638 if (rv & WDIOS_ENABLECARD) {
640 printk(KERN_INFO 639 pr_info("rtc-m41t80: enable watchdog\n");
641 "rtc-m41t80: enable watchdog\n");
642 wdt_ping(); 640 wdt_ping();
643 } 641 }
644 642
diff --git a/drivers/rtc/rtc-m41t94.c b/drivers/rtc/rtc-m41t94.c
new file mode 100644
index 000000000000..9b19499c829e
--- /dev/null
+++ b/drivers/rtc/rtc-m41t94.c
@@ -0,0 +1,173 @@
1/*
2 * Driver for ST M41T94 SPI RTC
3 *
4 * Copyright (C) 2008 Kim B. Heino
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/platform_device.h>
14#include <linux/rtc.h>
15#include <linux/spi/spi.h>
16#include <linux/bcd.h>
17
18#define M41T94_REG_SECONDS 0x01
19#define M41T94_REG_MINUTES 0x02
20#define M41T94_REG_HOURS 0x03
21#define M41T94_REG_WDAY 0x04
22#define M41T94_REG_DAY 0x05
23#define M41T94_REG_MONTH 0x06
24#define M41T94_REG_YEAR 0x07
25#define M41T94_REG_HT 0x0c
26
27#define M41T94_BIT_HALT 0x40
28#define M41T94_BIT_STOP 0x80
29#define M41T94_BIT_CB 0x40
30#define M41T94_BIT_CEB 0x80
31
32static int m41t94_set_time(struct device *dev, struct rtc_time *tm)
33{
34 struct spi_device *spi = to_spi_device(dev);
35 u8 buf[8]; /* write cmd + 7 registers */
36
37 dev_dbg(dev, "%s secs=%d, mins=%d, "
38 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
39 "write", tm->tm_sec, tm->tm_min,
40 tm->tm_hour, tm->tm_mday,
41 tm->tm_mon, tm->tm_year, tm->tm_wday);
42
43 buf[0] = 0x80 | M41T94_REG_SECONDS; /* write time + date */
44 buf[M41T94_REG_SECONDS] = BIN2BCD(tm->tm_sec);
45 buf[M41T94_REG_MINUTES] = BIN2BCD(tm->tm_min);
46 buf[M41T94_REG_HOURS] = BIN2BCD(tm->tm_hour);
47 buf[M41T94_REG_WDAY] = BIN2BCD(tm->tm_wday + 1);
48 buf[M41T94_REG_DAY] = BIN2BCD(tm->tm_mday);
49 buf[M41T94_REG_MONTH] = BIN2BCD(tm->tm_mon + 1);
50
51 buf[M41T94_REG_HOURS] |= M41T94_BIT_CEB;
52 if (tm->tm_year >= 100)
53 buf[M41T94_REG_HOURS] |= M41T94_BIT_CB;
54 buf[M41T94_REG_YEAR] = BIN2BCD(tm->tm_year % 100);
55
56 return spi_write(spi, buf, 8);
57}
58
59static int m41t94_read_time(struct device *dev, struct rtc_time *tm)
60{
61 struct spi_device *spi = to_spi_device(dev);
62 u8 buf[2];
63 int ret, hour;
64
65 /* clear halt update bit */
66 ret = spi_w8r8(spi, M41T94_REG_HT);
67 if (ret < 0)
68 return ret;
69 if (ret & M41T94_BIT_HALT) {
70 buf[0] = 0x80 | M41T94_REG_HT;
71 buf[1] = ret & ~M41T94_BIT_HALT;
72 spi_write(spi, buf, 2);
73 }
74
75 /* clear stop bit */
76 ret = spi_w8r8(spi, M41T94_REG_SECONDS);
77 if (ret < 0)
78 return ret;
79 if (ret & M41T94_BIT_STOP) {
80 buf[0] = 0x80 | M41T94_REG_SECONDS;
81 buf[1] = ret & ~M41T94_BIT_STOP;
82 spi_write(spi, buf, 2);
83 }
84
85 tm->tm_sec = BCD2BIN(spi_w8r8(spi, M41T94_REG_SECONDS));
86 tm->tm_min = BCD2BIN(spi_w8r8(spi, M41T94_REG_MINUTES));
87 hour = spi_w8r8(spi, M41T94_REG_HOURS);
88 tm->tm_hour = BCD2BIN(hour & 0x3f);
89 tm->tm_wday = BCD2BIN(spi_w8r8(spi, M41T94_REG_WDAY)) - 1;
90 tm->tm_mday = BCD2BIN(spi_w8r8(spi, M41T94_REG_DAY));
91 tm->tm_mon = BCD2BIN(spi_w8r8(spi, M41T94_REG_MONTH)) - 1;
92 tm->tm_year = BCD2BIN(spi_w8r8(spi, M41T94_REG_YEAR));
93 if ((hour & M41T94_BIT_CB) || !(hour & M41T94_BIT_CEB))
94 tm->tm_year += 100;
95
96 dev_dbg(dev, "%s secs=%d, mins=%d, "
97 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
98 "read", tm->tm_sec, tm->tm_min,
99 tm->tm_hour, tm->tm_mday,
100 tm->tm_mon, tm->tm_year, tm->tm_wday);
101
102 /* initial clock setting can be undefined */
103 return rtc_valid_tm(tm);
104}
105
106static const struct rtc_class_ops m41t94_rtc_ops = {
107 .read_time = m41t94_read_time,
108 .set_time = m41t94_set_time,
109};
110
111static struct spi_driver m41t94_driver;
112
113static int __devinit m41t94_probe(struct spi_device *spi)
114{
115 struct rtc_device *rtc;
116 int res;
117
118 spi->bits_per_word = 8;
119 spi_setup(spi);
120
121 res = spi_w8r8(spi, M41T94_REG_SECONDS);
122 if (res < 0) {
123 dev_err(&spi->dev, "not found.\n");
124 return res;
125 }
126
127 rtc = rtc_device_register(m41t94_driver.driver.name,
128 &spi->dev, &m41t94_rtc_ops, THIS_MODULE);
129 if (IS_ERR(rtc))
130 return PTR_ERR(rtc);
131
132 dev_set_drvdata(&spi->dev, rtc);
133
134 return 0;
135}
136
137static int __devexit m41t94_remove(struct spi_device *spi)
138{
139 struct rtc_device *rtc = platform_get_drvdata(spi);
140
141 if (rtc)
142 rtc_device_unregister(rtc);
143
144 return 0;
145}
146
147static struct spi_driver m41t94_driver = {
148 .driver = {
149 .name = "rtc-m41t94",
150 .bus = &spi_bus_type,
151 .owner = THIS_MODULE,
152 },
153 .probe = m41t94_probe,
154 .remove = __devexit_p(m41t94_remove),
155};
156
157static __init int m41t94_init(void)
158{
159 return spi_register_driver(&m41t94_driver);
160}
161
162module_init(m41t94_init);
163
164static __exit void m41t94_exit(void)
165{
166 spi_unregister_driver(&m41t94_driver);
167}
168
169module_exit(m41t94_exit);
170
171MODULE_AUTHOR("Kim B. Heino <Kim.Heino@bluegiga.com>");
172MODULE_DESCRIPTION("Driver for ST M41T94 SPI RTC");
173MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index eb23d8423f42..8876605d4d4b 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -92,18 +92,6 @@
92#define rtc_write(val, addr) omap_writeb(val, OMAP_RTC_BASE + (addr)) 92#define rtc_write(val, addr) omap_writeb(val, OMAP_RTC_BASE + (addr))
93 93
94 94
95/* platform_bus isn't hotpluggable, so for static linkage it'd be safe
96 * to get rid of probe() and remove() code ... too bad the driver struct
97 * remembers probe(), that's about 25% of the runtime footprint!!
98 */
99#ifndef MODULE
100#undef __devexit
101#undef __devexit_p
102#define __devexit __exit
103#define __devexit_p __exit_p
104#endif
105
106
107/* we rely on the rtc framework to handle locking (rtc->ops_lock), 95/* we rely on the rtc framework to handle locking (rtc->ops_lock),
108 * so the only other requirement is that register accesses which 96 * so the only other requirement is that register accesses which
109 * require BUSY to be clear are made with IRQs locally disabled 97 * require BUSY to be clear are made with IRQs locally disabled
@@ -324,7 +312,7 @@ static struct rtc_class_ops omap_rtc_ops = {
324static int omap_rtc_alarm; 312static int omap_rtc_alarm;
325static int omap_rtc_timer; 313static int omap_rtc_timer;
326 314
327static int __devinit omap_rtc_probe(struct platform_device *pdev) 315static int __init omap_rtc_probe(struct platform_device *pdev)
328{ 316{
329 struct resource *res, *mem; 317 struct resource *res, *mem;
330 struct rtc_device *rtc; 318 struct rtc_device *rtc;
@@ -440,7 +428,7 @@ fail:
440 return -EIO; 428 return -EIO;
441} 429}
442 430
443static int __devexit omap_rtc_remove(struct platform_device *pdev) 431static int __exit omap_rtc_remove(struct platform_device *pdev)
444{ 432{
445 struct rtc_device *rtc = platform_get_drvdata(pdev);; 433 struct rtc_device *rtc = platform_get_drvdata(pdev);;
446 434
@@ -498,8 +486,7 @@ static void omap_rtc_shutdown(struct platform_device *pdev)
498 486
499MODULE_ALIAS("platform:omap_rtc"); 487MODULE_ALIAS("platform:omap_rtc");
500static struct platform_driver omap_rtc_driver = { 488static struct platform_driver omap_rtc_driver = {
501 .probe = omap_rtc_probe, 489 .remove = __exit_p(omap_rtc_remove),
502 .remove = __devexit_p(omap_rtc_remove),
503 .suspend = omap_rtc_suspend, 490 .suspend = omap_rtc_suspend,
504 .resume = omap_rtc_resume, 491 .resume = omap_rtc_resume,
505 .shutdown = omap_rtc_shutdown, 492 .shutdown = omap_rtc_shutdown,
@@ -511,7 +498,7 @@ static struct platform_driver omap_rtc_driver = {
511 498
512static int __init rtc_init(void) 499static int __init rtc_init(void)
513{ 500{
514 return platform_driver_register(&omap_rtc_driver); 501 return platform_driver_probe(&omap_rtc_driver, omap_rtc_probe);
515} 502}
516module_init(rtc_init); 503module_init(rtc_init);
517 504
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index 3d09d8f0b1f0..d388c662bf4b 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -2,6 +2,7 @@
2 * drivers/rtc/rtc-pcf8583.c 2 * drivers/rtc/rtc-pcf8583.c
3 * 3 *
4 * Copyright (C) 2000 Russell King 4 * Copyright (C) 2000 Russell King
5 * Copyright (C) 2008 Wolfram Sang & Juergen Beisert, Pengutronix
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -14,7 +15,6 @@
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/i2c.h> 16#include <linux/i2c.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/string.h>
18#include <linux/rtc.h> 18#include <linux/rtc.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/errno.h> 20#include <linux/errno.h>
@@ -27,7 +27,6 @@ struct rtc_mem {
27}; 27};
28 28
29struct pcf8583 { 29struct pcf8583 {
30 struct i2c_client client;
31 struct rtc_device *rtc; 30 struct rtc_device *rtc;
32 unsigned char ctrl; 31 unsigned char ctrl;
33}; 32};
@@ -40,10 +39,6 @@ struct pcf8583 {
40#define CTRL_ALARM 0x02 39#define CTRL_ALARM 0x02
41#define CTRL_TIMER 0x01 40#define CTRL_TIMER 0x01
42 41
43static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END };
44
45/* Module parameters */
46I2C_CLIENT_INSMOD;
47 42
48static struct i2c_driver pcf8583_driver; 43static struct i2c_driver pcf8583_driver;
49 44
@@ -269,106 +264,60 @@ static const struct rtc_class_ops pcf8583_rtc_ops = {
269 .set_time = pcf8583_rtc_set_time, 264 .set_time = pcf8583_rtc_set_time,
270}; 265};
271 266
272static int pcf8583_probe(struct i2c_adapter *adap, int addr, int kind); 267static int pcf8583_probe(struct i2c_client *client,
273 268 const struct i2c_device_id *id)
274static int pcf8583_attach(struct i2c_adapter *adap)
275{
276 return i2c_probe(adap, &addr_data, pcf8583_probe);
277}
278
279static int pcf8583_detach(struct i2c_client *client)
280{
281 int err;
282 struct pcf8583 *pcf = i2c_get_clientdata(client);
283 struct rtc_device *rtc = pcf->rtc;
284
285 if (rtc)
286 rtc_device_unregister(rtc);
287
288 if ((err = i2c_detach_client(client)))
289 return err;
290
291 kfree(pcf);
292 return 0;
293}
294
295static struct i2c_driver pcf8583_driver = {
296 .driver = {
297 .name = "pcf8583",
298 },
299 .id = I2C_DRIVERID_PCF8583,
300 .attach_adapter = pcf8583_attach,
301 .detach_client = pcf8583_detach,
302};
303
304static int pcf8583_probe(struct i2c_adapter *adap, int addr, int kind)
305{ 269{
306 struct pcf8583 *pcf; 270 struct pcf8583 *pcf8583;
307 struct i2c_client *client;
308 struct rtc_device *rtc;
309 unsigned char buf[1], ad[1] = { 0 };
310 int err; 271 int err;
311 struct i2c_msg msgs[2] = {
312 {
313 .addr = addr,
314 .flags = 0,
315 .len = 1,
316 .buf = ad,
317 }, {
318 .addr = addr,
319 .flags = I2C_M_RD,
320 .len = 1,
321 .buf = buf,
322 }
323 };
324 272
325 if (!i2c_check_functionality(adap, I2C_FUNC_I2C)) 273 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
326 return 0; 274 return -ENODEV;
327 275
328 pcf = kzalloc(sizeof(*pcf), GFP_KERNEL); 276 pcf8583 = kzalloc(sizeof(struct pcf8583), GFP_KERNEL);
329 if (!pcf) 277 if (!pcf8583)
330 return -ENOMEM; 278 return -ENOMEM;
331 279
332 client = &pcf->client; 280 pcf8583->rtc = rtc_device_register(pcf8583_driver.driver.name,
281 &client->dev, &pcf8583_rtc_ops, THIS_MODULE);
333 282
334 client->addr = addr; 283 if (IS_ERR(pcf8583->rtc)) {
335 client->adapter = adap; 284 err = PTR_ERR(pcf8583->rtc);
336 client->driver = &pcf8583_driver;
337
338 strlcpy(client->name, pcf8583_driver.driver.name, I2C_NAME_SIZE);
339
340 if (i2c_transfer(client->adapter, msgs, 2) != 2) {
341 err = -EIO;
342 goto exit_kfree; 285 goto exit_kfree;
343 } 286 }
344 287
345 err = i2c_attach_client(client); 288 i2c_set_clientdata(client, pcf8583);
346 289 return 0;
347 if (err)
348 goto exit_kfree;
349
350 rtc = rtc_device_register(pcf8583_driver.driver.name, &client->dev,
351 &pcf8583_rtc_ops, THIS_MODULE);
352 290
353 if (IS_ERR(rtc)) { 291exit_kfree:
354 err = PTR_ERR(rtc); 292 kfree(pcf8583);
355 goto exit_detach; 293 return err;
356 } 294}
357 295
358 pcf->rtc = rtc; 296static int __devexit pcf8583_remove(struct i2c_client *client)
359 i2c_set_clientdata(client, pcf); 297{
360 set_ctrl(client, buf[0]); 298 struct pcf8583 *pcf8583 = i2c_get_clientdata(client);
361 299
300 if (pcf8583->rtc)
301 rtc_device_unregister(pcf8583->rtc);
302 kfree(pcf8583);
362 return 0; 303 return 0;
304}
363 305
364exit_detach: 306static const struct i2c_device_id pcf8583_id[] = {
365 i2c_detach_client(client); 307 { "pcf8583", 0 },
366 308 { }
367exit_kfree: 309};
368 kfree(pcf); 310MODULE_DEVICE_TABLE(i2c, pcf8583_id);
369 311
370 return err; 312static struct i2c_driver pcf8583_driver = {
371} 313 .driver = {
314 .name = "pcf8583",
315 .owner = THIS_MODULE,
316 },
317 .probe = pcf8583_probe,
318 .remove = __devexit_p(pcf8583_remove),
319 .id_table = pcf8583_id,
320};
372 321
373static __init int pcf8583_init(void) 322static __init int pcf8583_init(void)
374{ 323{
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index fed86e507fdf..54b1ebb01502 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -36,10 +36,8 @@ static struct resource *s3c_rtc_mem;
36static void __iomem *s3c_rtc_base; 36static void __iomem *s3c_rtc_base;
37static int s3c_rtc_alarmno = NO_IRQ; 37static int s3c_rtc_alarmno = NO_IRQ;
38static int s3c_rtc_tickno = NO_IRQ; 38static int s3c_rtc_tickno = NO_IRQ;
39static int s3c_rtc_freq = 1;
40 39
41static DEFINE_SPINLOCK(s3c_rtc_pie_lock); 40static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
42static unsigned int tick_count;
43 41
44/* IRQ Handlers */ 42/* IRQ Handlers */
45 43
@@ -55,7 +53,7 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id)
55{ 53{
56 struct rtc_device *rdev = id; 54 struct rtc_device *rdev = id;
57 55
58 rtc_update_irq(rdev, tick_count++, RTC_PF | RTC_IRQF); 56 rtc_update_irq(rdev, 1, RTC_PF | RTC_IRQF);
59 return IRQ_HANDLED; 57 return IRQ_HANDLED;
60} 58}
61 59
@@ -74,35 +72,37 @@ static void s3c_rtc_setaie(int to)
74 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); 72 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
75} 73}
76 74
77static void s3c_rtc_setpie(int to) 75static int s3c_rtc_setpie(struct device *dev, int enabled)
78{ 76{
79 unsigned int tmp; 77 unsigned int tmp;
80 78
81 pr_debug("%s: pie=%d\n", __func__, to); 79 pr_debug("%s: pie=%d\n", __func__, enabled);
82 80
83 spin_lock_irq(&s3c_rtc_pie_lock); 81 spin_lock_irq(&s3c_rtc_pie_lock);
84 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE; 82 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE;
85 83
86 if (to) 84 if (enabled)
87 tmp |= S3C2410_TICNT_ENABLE; 85 tmp |= S3C2410_TICNT_ENABLE;
88 86
89 writeb(tmp, s3c_rtc_base + S3C2410_TICNT); 87 writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
90 spin_unlock_irq(&s3c_rtc_pie_lock); 88 spin_unlock_irq(&s3c_rtc_pie_lock);
89
90 return 0;
91} 91}
92 92
93static void s3c_rtc_setfreq(int freq) 93static int s3c_rtc_setfreq(struct device *dev, int freq)
94{ 94{
95 unsigned int tmp; 95 unsigned int tmp;
96 96
97 spin_lock_irq(&s3c_rtc_pie_lock); 97 spin_lock_irq(&s3c_rtc_pie_lock);
98 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE;
99
100 s3c_rtc_freq = freq;
101 98
99 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE;
102 tmp |= (128 / freq)-1; 100 tmp |= (128 / freq)-1;
103 101
104 writeb(tmp, s3c_rtc_base + S3C2410_TICNT); 102 writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
105 spin_unlock_irq(&s3c_rtc_pie_lock); 103 spin_unlock_irq(&s3c_rtc_pie_lock);
104
105 return 0;
106} 106}
107 107
108/* Time read/write */ 108/* Time read/write */
@@ -267,12 +267,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
267 267
268 writeb(alrm_en, base + S3C2410_RTCALM); 268 writeb(alrm_en, base + S3C2410_RTCALM);
269 269
270 if (0) { 270 s3c_rtc_setaie(alrm->enabled);
271 alrm_en = readb(base + S3C2410_RTCALM);
272 alrm_en &= ~S3C2410_RTCALM_ALMEN;
273 writeb(alrm_en, base + S3C2410_RTCALM);
274 disable_irq_wake(s3c_rtc_alarmno);
275 }
276 271
277 if (alrm->enabled) 272 if (alrm->enabled)
278 enable_irq_wake(s3c_rtc_alarmno); 273 enable_irq_wake(s3c_rtc_alarmno);
@@ -282,59 +277,12 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
282 return 0; 277 return 0;
283} 278}
284 279
285static int s3c_rtc_ioctl(struct device *dev,
286 unsigned int cmd, unsigned long arg)
287{
288 unsigned int ret = -ENOIOCTLCMD;
289
290 switch (cmd) {
291 case RTC_AIE_OFF:
292 case RTC_AIE_ON:
293 s3c_rtc_setaie((cmd == RTC_AIE_ON) ? 1 : 0);
294 ret = 0;
295 break;
296
297 case RTC_PIE_OFF:
298 case RTC_PIE_ON:
299 tick_count = 0;
300 s3c_rtc_setpie((cmd == RTC_PIE_ON) ? 1 : 0);
301 ret = 0;
302 break;
303
304 case RTC_IRQP_READ:
305 ret = put_user(s3c_rtc_freq, (unsigned long __user *)arg);
306 break;
307
308 case RTC_IRQP_SET:
309 if (!is_power_of_2(arg)) {
310 ret = -EINVAL;
311 goto exit;
312 }
313
314 pr_debug("s3c2410_rtc: setting frequency %ld\n", arg);
315
316 s3c_rtc_setfreq(arg);
317 ret = 0;
318 break;
319
320 case RTC_UIE_ON:
321 case RTC_UIE_OFF:
322 ret = -EINVAL;
323 }
324
325 exit:
326 return ret;
327}
328
329static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) 280static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
330{ 281{
331 unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT); 282 unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
332 283
333 seq_printf(seq, "periodic_IRQ\t: %s\n", 284 seq_printf(seq, "periodic_IRQ\t: %s\n",
334 (ticnt & S3C2410_TICNT_ENABLE) ? "yes" : "no" ); 285 (ticnt & S3C2410_TICNT_ENABLE) ? "yes" : "no" );
335
336 seq_printf(seq, "periodic_freq\t: %d\n", s3c_rtc_freq);
337
338 return 0; 286 return 0;
339} 287}
340 288
@@ -374,7 +322,7 @@ static void s3c_rtc_release(struct device *dev)
374 322
375 /* do not clear AIE here, it may be needed for wake */ 323 /* do not clear AIE here, it may be needed for wake */
376 324
377 s3c_rtc_setpie(0); 325 s3c_rtc_setpie(dev, 0);
378 free_irq(s3c_rtc_alarmno, rtc_dev); 326 free_irq(s3c_rtc_alarmno, rtc_dev);
379 free_irq(s3c_rtc_tickno, rtc_dev); 327 free_irq(s3c_rtc_tickno, rtc_dev);
380} 328}
@@ -382,11 +330,12 @@ static void s3c_rtc_release(struct device *dev)
382static const struct rtc_class_ops s3c_rtcops = { 330static const struct rtc_class_ops s3c_rtcops = {
383 .open = s3c_rtc_open, 331 .open = s3c_rtc_open,
384 .release = s3c_rtc_release, 332 .release = s3c_rtc_release,
385 .ioctl = s3c_rtc_ioctl,
386 .read_time = s3c_rtc_gettime, 333 .read_time = s3c_rtc_gettime,
387 .set_time = s3c_rtc_settime, 334 .set_time = s3c_rtc_settime,
388 .read_alarm = s3c_rtc_getalarm, 335 .read_alarm = s3c_rtc_getalarm,
389 .set_alarm = s3c_rtc_setalarm, 336 .set_alarm = s3c_rtc_setalarm,
337 .irq_set_freq = s3c_rtc_setfreq,
338 .irq_set_state = s3c_rtc_setpie,
390 .proc = s3c_rtc_proc, 339 .proc = s3c_rtc_proc,
391}; 340};
392 341
@@ -430,14 +379,14 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
430 } 379 }
431} 380}
432 381
433static int s3c_rtc_remove(struct platform_device *dev) 382static int __devexit s3c_rtc_remove(struct platform_device *dev)
434{ 383{
435 struct rtc_device *rtc = platform_get_drvdata(dev); 384 struct rtc_device *rtc = platform_get_drvdata(dev);
436 385
437 platform_set_drvdata(dev, NULL); 386 platform_set_drvdata(dev, NULL);
438 rtc_device_unregister(rtc); 387 rtc_device_unregister(rtc);
439 388
440 s3c_rtc_setpie(0); 389 s3c_rtc_setpie(&dev->dev, 0);
441 s3c_rtc_setaie(0); 390 s3c_rtc_setaie(0);
442 391
443 iounmap(s3c_rtc_base); 392 iounmap(s3c_rtc_base);
@@ -447,7 +396,7 @@ static int s3c_rtc_remove(struct platform_device *dev)
447 return 0; 396 return 0;
448} 397}
449 398
450static int s3c_rtc_probe(struct platform_device *pdev) 399static int __devinit s3c_rtc_probe(struct platform_device *pdev)
451{ 400{
452 struct rtc_device *rtc; 401 struct rtc_device *rtc;
453 struct resource *res; 402 struct resource *res;
@@ -504,7 +453,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
504 pr_debug("s3c2410_rtc: RTCCON=%02x\n", 453 pr_debug("s3c2410_rtc: RTCCON=%02x\n",
505 readb(s3c_rtc_base + S3C2410_RTCCON)); 454 readb(s3c_rtc_base + S3C2410_RTCCON));
506 455
507 s3c_rtc_setfreq(s3c_rtc_freq); 456 s3c_rtc_setfreq(&pdev->dev, 1);
508 457
509 /* register RTC and exit */ 458 /* register RTC and exit */
510 459
@@ -560,7 +509,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
560 509
561static struct platform_driver s3c2410_rtcdrv = { 510static struct platform_driver s3c2410_rtcdrv = {
562 .probe = s3c_rtc_probe, 511 .probe = s3c_rtc_probe,
563 .remove = s3c_rtc_remove, 512 .remove = __devexit_p(s3c_rtc_remove),
564 .suspend = s3c_rtc_suspend, 513 .suspend = s3c_rtc_suspend,
565 .resume = s3c_rtc_resume, 514 .resume = s3c_rtc_resume,
566 .driver = { 515 .driver = {
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index be9c70d0b193..884b635f028b 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for NEC VR4100 series Real Time Clock unit. 2 * Driver for NEC VR4100 series Real Time Clock unit.
3 * 3 *
4 * Copyright (C) 2003-2006 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 4 * Copyright (C) 2003-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -34,7 +34,7 @@
34 34
35MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); 35MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
36MODULE_DESCRIPTION("NEC VR4100 series RTC driver"); 36MODULE_DESCRIPTION("NEC VR4100 series RTC driver");
37MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL v2");
38 38
39/* RTC 1 registers */ 39/* RTC 1 registers */
40#define ETIMELREG 0x00 40#define ETIMELREG 0x00
@@ -82,7 +82,6 @@ static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */
82 82
83static DEFINE_SPINLOCK(rtc_lock); 83static DEFINE_SPINLOCK(rtc_lock);
84static char rtc_name[] = "RTC"; 84static char rtc_name[] = "RTC";
85static unsigned long periodic_frequency;
86static unsigned long periodic_count; 85static unsigned long periodic_count;
87static unsigned int alarm_enabled; 86static unsigned int alarm_enabled;
88static int aie_irq = -1; 87static int aie_irq = -1;
@@ -207,10 +206,37 @@ static int vr41xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
207 return 0; 206 return 0;
208} 207}
209 208
210static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) 209static int vr41xx_rtc_irq_set_freq(struct device *dev, int freq)
211{ 210{
212 unsigned long count; 211 unsigned long count;
213 212
213 count = RTC_FREQUENCY;
214 do_div(count, freq);
215
216 periodic_count = count;
217
218 spin_lock_irq(&rtc_lock);
219
220 rtc1_write(RTCL1LREG, count);
221 rtc1_write(RTCL1HREG, count >> 16);
222
223 spin_unlock_irq(&rtc_lock);
224
225 return 0;
226}
227
228static int vr41xx_rtc_irq_set_state(struct device *dev, int enabled)
229{
230 if (enabled)
231 enable_irq(pie_irq);
232 else
233 disable_irq(pie_irq);
234
235 return 0;
236}
237
238static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
239{
214 switch (cmd) { 240 switch (cmd) {
215 case RTC_AIE_ON: 241 case RTC_AIE_ON:
216 spin_lock_irq(&rtc_lock); 242 spin_lock_irq(&rtc_lock);
@@ -232,33 +258,6 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long
232 258
233 spin_unlock_irq(&rtc_lock); 259 spin_unlock_irq(&rtc_lock);
234 break; 260 break;
235 case RTC_PIE_ON:
236 enable_irq(pie_irq);
237 break;
238 case RTC_PIE_OFF:
239 disable_irq(pie_irq);
240 break;
241 case RTC_IRQP_READ:
242 return put_user(periodic_frequency, (unsigned long __user *)arg);
243 break;
244 case RTC_IRQP_SET:
245 if (arg > MAX_PERIODIC_RATE)
246 return -EINVAL;
247
248 periodic_frequency = arg;
249
250 count = RTC_FREQUENCY;
251 do_div(count, arg);
252
253 periodic_count = count;
254
255 spin_lock_irq(&rtc_lock);
256
257 rtc1_write(RTCL1LREG, count);
258 rtc1_write(RTCL1HREG, count >> 16);
259
260 spin_unlock_irq(&rtc_lock);
261 break;
262 case RTC_EPOCH_READ: 261 case RTC_EPOCH_READ:
263 return put_user(epoch, (unsigned long __user *)arg); 262 return put_user(epoch, (unsigned long __user *)arg);
264 case RTC_EPOCH_SET: 263 case RTC_EPOCH_SET:
@@ -309,6 +308,8 @@ static const struct rtc_class_ops vr41xx_rtc_ops = {
309 .set_time = vr41xx_rtc_set_time, 308 .set_time = vr41xx_rtc_set_time,
310 .read_alarm = vr41xx_rtc_read_alarm, 309 .read_alarm = vr41xx_rtc_read_alarm,
311 .set_alarm = vr41xx_rtc_set_alarm, 310 .set_alarm = vr41xx_rtc_set_alarm,
311 .irq_set_freq = vr41xx_rtc_irq_set_freq,
312 .irq_set_state = vr41xx_rtc_irq_set_state,
312}; 313};
313 314
314static int __devinit rtc_probe(struct platform_device *pdev) 315static int __devinit rtc_probe(struct platform_device *pdev)
@@ -346,6 +347,8 @@ static int __devinit rtc_probe(struct platform_device *pdev)
346 goto err_iounmap_all; 347 goto err_iounmap_all;
347 } 348 }
348 349
350 rtc->max_user_freq = MAX_PERIODIC_RATE;
351
349 spin_lock_irq(&rtc_lock); 352 spin_lock_irq(&rtc_lock);
350 353
351 rtc1_write(ECMPLREG, 0); 354 rtc1_write(ECMPLREG, 0);
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 5ab34340919b..79954bd6bfa5 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -15,6 +15,7 @@
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/virtio.h> 16#include <linux/virtio.h>
17#include <linux/virtio_config.h> 17#include <linux/virtio_config.h>
18#include <linux/virtio_console.h>
18#include <linux/interrupt.h> 19#include <linux/interrupt.h>
19#include <linux/virtio_ring.h> 20#include <linux/virtio_ring.h>
20#include <linux/pfn.h> 21#include <linux/pfn.h>
@@ -87,16 +88,20 @@ static u32 kvm_get_features(struct virtio_device *vdev)
87 return features; 88 return features;
88} 89}
89 90
90static void kvm_set_features(struct virtio_device *vdev, u32 features) 91static void kvm_finalize_features(struct virtio_device *vdev)
91{ 92{
92 unsigned int i; 93 unsigned int i, bits;
93 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; 94 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
94 /* Second half of bitmap is features we accept. */ 95 /* Second half of bitmap is features we accept. */
95 u8 *out_features = kvm_vq_features(desc) + desc->feature_len; 96 u8 *out_features = kvm_vq_features(desc) + desc->feature_len;
96 97
98 /* Give virtio_ring a chance to accept features. */
99 vring_transport_features(vdev);
100
97 memset(out_features, 0, desc->feature_len); 101 memset(out_features, 0, desc->feature_len);
98 for (i = 0; i < min(desc->feature_len * 8, 32); i++) { 102 bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
99 if (features & (1 << i)) 103 for (i = 0; i < bits; i++) {
104 if (test_bit(i, vdev->features))
100 out_features[i / 8] |= (1 << (i % 8)); 105 out_features[i / 8] |= (1 << (i % 8));
101 } 106 }
102} 107}
@@ -222,7 +227,7 @@ static void kvm_del_vq(struct virtqueue *vq)
222 */ 227 */
223static struct virtio_config_ops kvm_vq_configspace_ops = { 228static struct virtio_config_ops kvm_vq_configspace_ops = {
224 .get_features = kvm_get_features, 229 .get_features = kvm_get_features,
225 .set_features = kvm_set_features, 230 .finalize_features = kvm_finalize_features,
226 .get = kvm_get, 231 .get = kvm_get,
227 .set = kvm_set, 232 .set = kvm_set,
228 .get_status = kvm_get_status, 233 .get_status = kvm_get_status,
@@ -333,6 +338,25 @@ static int __init kvm_devices_init(void)
333 return 0; 338 return 0;
334} 339}
335 340
341/* code for early console output with virtio_console */
342static __init int early_put_chars(u32 vtermno, const char *buf, int count)
343{
344 char scratch[17];
345 unsigned int len = count;
346
347 if (len > sizeof(scratch) - 1)
348 len = sizeof(scratch) - 1;
349 scratch[len] = '\0';
350 memcpy(scratch, buf, len);
351 kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, __pa(scratch));
352 return len;
353}
354
355void s390_virtio_console_init(void)
356{
357 virtio_cons_early_init(early_put_chars);
358}
359
336/* 360/*
337 * We do this after core stuff, but before the drivers. 361 * We do this after core stuff, but before the drivers.
338 */ 362 */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index eb702b96d57c..c4a7c06793c5 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -3819,6 +3819,20 @@ static int ibmvfc_remove(struct vio_dev *vdev)
3819 return 0; 3819 return 0;
3820} 3820}
3821 3821
3822/**
3823 * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
3824 * @vdev: vio device struct
3825 *
3826 * Return value:
3827 * Number of bytes the driver will need to DMA map at the same time in
3828 * order to perform well.
3829 */
3830static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
3831{
3832 unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
3833 return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
3834}
3835
3822static struct vio_device_id ibmvfc_device_table[] __devinitdata = { 3836static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
3823 {"fcp", "IBM,vfc-client"}, 3837 {"fcp", "IBM,vfc-client"},
3824 { "", "" } 3838 { "", "" }
@@ -3829,6 +3843,7 @@ static struct vio_driver ibmvfc_driver = {
3829 .id_table = ibmvfc_device_table, 3843 .id_table = ibmvfc_device_table,
3830 .probe = ibmvfc_probe, 3844 .probe = ibmvfc_probe,
3831 .remove = ibmvfc_remove, 3845 .remove = ibmvfc_remove,
3846 .get_desired_dma = ibmvfc_get_desired_dma,
3832 .driver = { 3847 .driver = {
3833 .name = IBMVFC_NAME, 3848 .name = IBMVFC_NAME,
3834 .owner = THIS_MODULE, 3849 .owner = THIS_MODULE,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 5d23368a1bce..20000ec79b04 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -72,6 +72,7 @@
72#include <linux/delay.h> 72#include <linux/delay.h>
73#include <asm/firmware.h> 73#include <asm/firmware.h>
74#include <asm/vio.h> 74#include <asm/vio.h>
75#include <asm/firmware.h>
75#include <scsi/scsi.h> 76#include <scsi/scsi.h>
76#include <scsi/scsi_cmnd.h> 77#include <scsi/scsi_cmnd.h>
77#include <scsi/scsi_host.h> 78#include <scsi/scsi_host.h>
@@ -426,8 +427,10 @@ static int map_sg_data(struct scsi_cmnd *cmd,
426 SG_ALL * sizeof(struct srp_direct_buf), 427 SG_ALL * sizeof(struct srp_direct_buf),
427 &evt_struct->ext_list_token, 0); 428 &evt_struct->ext_list_token, 0);
428 if (!evt_struct->ext_list) { 429 if (!evt_struct->ext_list) {
429 sdev_printk(KERN_ERR, cmd->device, 430 if (!firmware_has_feature(FW_FEATURE_CMO))
430 "Can't allocate memory for indirect table\n"); 431 sdev_printk(KERN_ERR, cmd->device,
432 "Can't allocate memory "
433 "for indirect table\n");
431 return 0; 434 return 0;
432 } 435 }
433 } 436 }
@@ -743,7 +746,9 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
743 srp_cmd->lun = ((u64) lun) << 48; 746 srp_cmd->lun = ((u64) lun) << 48;
744 747
745 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { 748 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
746 sdev_printk(KERN_ERR, cmnd->device, "couldn't convert cmd to srp_cmd\n"); 749 if (!firmware_has_feature(FW_FEATURE_CMO))
750 sdev_printk(KERN_ERR, cmnd->device,
751 "couldn't convert cmd to srp_cmd\n");
747 free_event_struct(&hostdata->pool, evt_struct); 752 free_event_struct(&hostdata->pool, evt_struct);
748 return SCSI_MLQUEUE_HOST_BUSY; 753 return SCSI_MLQUEUE_HOST_BUSY;
749 } 754 }
@@ -855,7 +860,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
855 DMA_BIDIRECTIONAL); 860 DMA_BIDIRECTIONAL);
856 861
857 if (dma_mapping_error(req->buffer)) { 862 if (dma_mapping_error(req->buffer)) {
858 dev_err(hostdata->dev, "Unable to map request_buffer for adapter_info!\n"); 863 if (!firmware_has_feature(FW_FEATURE_CMO))
864 dev_err(hostdata->dev,
865 "Unable to map request_buffer for "
866 "adapter_info!\n");
859 free_event_struct(&hostdata->pool, evt_struct); 867 free_event_struct(&hostdata->pool, evt_struct);
860 return; 868 return;
861 } 869 }
@@ -1400,7 +1408,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1400 DMA_BIDIRECTIONAL); 1408 DMA_BIDIRECTIONAL);
1401 1409
1402 if (dma_mapping_error(host_config->buffer)) { 1410 if (dma_mapping_error(host_config->buffer)) {
1403 dev_err(hostdata->dev, "dma_mapping error getting host config\n"); 1411 if (!firmware_has_feature(FW_FEATURE_CMO))
1412 dev_err(hostdata->dev,
1413 "dma_mapping error getting host config\n");
1404 free_event_struct(&hostdata->pool, evt_struct); 1414 free_event_struct(&hostdata->pool, evt_struct);
1405 return -1; 1415 return -1;
1406 } 1416 }
@@ -1604,7 +1614,7 @@ static struct scsi_host_template driver_template = {
1604 .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler, 1614 .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
1605 .slave_configure = ibmvscsi_slave_configure, 1615 .slave_configure = ibmvscsi_slave_configure,
1606 .change_queue_depth = ibmvscsi_change_queue_depth, 1616 .change_queue_depth = ibmvscsi_change_queue_depth,
1607 .cmd_per_lun = 16, 1617 .cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
1608 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT, 1618 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
1609 .this_id = -1, 1619 .this_id = -1,
1610 .sg_tablesize = SG_ALL, 1620 .sg_tablesize = SG_ALL,
@@ -1613,6 +1623,26 @@ static struct scsi_host_template driver_template = {
1613}; 1623};
1614 1624
1615/** 1625/**
1626 * ibmvscsi_get_desired_dma - Calculate IO memory desired by the driver
1627 *
1628 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1629 *
1630 * Return value:
1631 * Number of bytes of IO data the driver will need to perform well.
1632 */
1633static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
1634{
1635 /* iu_storage data allocated in initialize_event_pool */
1636 unsigned long desired_io = max_requests * sizeof(union viosrp_iu);
1637
1638 /* add io space for sg data */
1639 desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT *
1640 IBMVSCSI_CMDS_PER_LUN_DEFAULT);
1641
1642 return desired_io;
1643}
1644
1645/**
1616 * Called by bus code for each adapter 1646 * Called by bus code for each adapter
1617 */ 1647 */
1618static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) 1648static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
@@ -1641,7 +1671,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1641 hostdata->host = host; 1671 hostdata->host = host;
1642 hostdata->dev = dev; 1672 hostdata->dev = dev;
1643 atomic_set(&hostdata->request_limit, -1); 1673 atomic_set(&hostdata->request_limit, -1);
1644 hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */ 1674 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
1645 1675
1646 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_requests); 1676 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_requests);
1647 if (rc != 0 && rc != H_RESOURCE) { 1677 if (rc != 0 && rc != H_RESOURCE) {
@@ -1735,6 +1765,7 @@ static struct vio_driver ibmvscsi_driver = {
1735 .id_table = ibmvscsi_device_table, 1765 .id_table = ibmvscsi_device_table,
1736 .probe = ibmvscsi_probe, 1766 .probe = ibmvscsi_probe,
1737 .remove = ibmvscsi_remove, 1767 .remove = ibmvscsi_remove,
1768 .get_desired_dma = ibmvscsi_get_desired_dma,
1738 .driver = { 1769 .driver = {
1739 .name = "ibmvscsi", 1770 .name = "ibmvscsi",
1740 .owner = THIS_MODULE, 1771 .owner = THIS_MODULE,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 46e850e302c7..2d4339d5e16e 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -45,6 +45,8 @@ struct Scsi_Host;
45#define MAX_INDIRECT_BUFS 10 45#define MAX_INDIRECT_BUFS 10
46 46
47#define IBMVSCSI_MAX_REQUESTS_DEFAULT 100 47#define IBMVSCSI_MAX_REQUESTS_DEFAULT 100
48#define IBMVSCSI_CMDS_PER_LUN_DEFAULT 16
49#define IBMVSCSI_MAX_SECTORS_DEFAULT 256 /* 32 * 8 = default max I/O 32 pages */
48#define IBMVSCSI_MAX_CMDS_PER_LUN 64 50#define IBMVSCSI_MAX_CMDS_PER_LUN 64
49 51
50/* ------------------------------------------------------------ 52/* ------------------------------------------------------------
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index f843c1383a4b..b40a673985aa 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -84,7 +84,6 @@ typedef struct ide_scsi_obj {
84 struct Scsi_Host *host; 84 struct Scsi_Host *host;
85 85
86 struct ide_atapi_pc *pc; /* Current packet command */ 86 struct ide_atapi_pc *pc; /* Current packet command */
87 unsigned long flags; /* Status/Action flags */
88 unsigned long transform; /* SCSI cmd translation layer */ 87 unsigned long transform; /* SCSI cmd translation layer */
89 unsigned long log; /* log flags */ 88 unsigned long log; /* log flags */
90} idescsi_scsi_t; 89} idescsi_scsi_t;
@@ -102,8 +101,13 @@ static struct ide_scsi_obj *ide_scsi_get(struct gendisk *disk)
102 101
103 mutex_lock(&idescsi_ref_mutex); 102 mutex_lock(&idescsi_ref_mutex);
104 scsi = ide_scsi_g(disk); 103 scsi = ide_scsi_g(disk);
105 if (scsi) 104 if (scsi) {
106 scsi_host_get(scsi->host); 105 scsi_host_get(scsi->host);
106 if (ide_device_get(scsi->drive)) {
107 scsi_host_put(scsi->host);
108 scsi = NULL;
109 }
110 }
107 mutex_unlock(&idescsi_ref_mutex); 111 mutex_unlock(&idescsi_ref_mutex);
108 return scsi; 112 return scsi;
109} 113}
@@ -111,6 +115,7 @@ static struct ide_scsi_obj *ide_scsi_get(struct gendisk *disk)
111static void ide_scsi_put(struct ide_scsi_obj *scsi) 115static void ide_scsi_put(struct ide_scsi_obj *scsi)
112{ 116{
113 mutex_lock(&idescsi_ref_mutex); 117 mutex_lock(&idescsi_ref_mutex);
118 ide_device_put(scsi->drive);
114 scsi_host_put(scsi->host); 119 scsi_host_put(scsi->host);
115 mutex_unlock(&idescsi_ref_mutex); 120 mutex_unlock(&idescsi_ref_mutex);
116} 121}
@@ -126,23 +131,14 @@ static inline idescsi_scsi_t *drive_to_idescsi(ide_drive_t *ide_drive)
126} 131}
127 132
128/* 133/*
129 * Per ATAPI device status bits.
130 */
131#define IDESCSI_DRQ_INTERRUPT 0 /* DRQ interrupt device */
132
133/*
134 * ide-scsi requests.
135 */
136#define IDESCSI_PC_RQ 90
137
138/*
139 * PIO data transfer routine using the scatter gather table. 134 * PIO data transfer routine using the scatter gather table.
140 */ 135 */
141static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc, 136static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
142 unsigned int bcount, int write) 137 unsigned int bcount, int write)
143{ 138{
144 ide_hwif_t *hwif = drive->hwif; 139 ide_hwif_t *hwif = drive->hwif;
145 xfer_func_t *xf = write ? hwif->output_data : hwif->input_data; 140 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
141 xfer_func_t *xf = write ? tp_ops->output_data : tp_ops->input_data;
146 char *buf; 142 char *buf;
147 int count; 143 int count;
148 144
@@ -211,15 +207,15 @@ static int idescsi_check_condition(ide_drive_t *drive,
211 207
212 /* stuff a sense request in front of our current request */ 208 /* stuff a sense request in front of our current request */
213 pc = kzalloc(sizeof(struct ide_atapi_pc), GFP_ATOMIC); 209 pc = kzalloc(sizeof(struct ide_atapi_pc), GFP_ATOMIC);
214 rq = kmalloc(sizeof(struct request), GFP_ATOMIC); 210 rq = blk_get_request(drive->queue, READ, GFP_ATOMIC);
215 buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_ATOMIC); 211 buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_ATOMIC);
216 if (!pc || !rq || !buf) { 212 if (!pc || !rq || !buf) {
217 kfree(buf); 213 kfree(buf);
218 kfree(rq); 214 if (rq)
215 blk_put_request(rq);
219 kfree(pc); 216 kfree(pc);
220 return -ENOMEM; 217 return -ENOMEM;
221 } 218 }
222 blk_rq_init(NULL, rq);
223 rq->special = (char *) pc; 219 rq->special = (char *) pc;
224 pc->rq = rq; 220 pc->rq = rq;
225 pc->buf = buf; 221 pc->buf = buf;
@@ -228,7 +224,6 @@ static int idescsi_check_condition(ide_drive_t *drive,
228 rq->cmd_type = REQ_TYPE_SENSE; 224 rq->cmd_type = REQ_TYPE_SENSE;
229 rq->cmd_flags |= REQ_PREEMPT; 225 rq->cmd_flags |= REQ_PREEMPT;
230 pc->timeout = jiffies + WAIT_READY; 226 pc->timeout = jiffies + WAIT_READY;
231 pc->callback = ide_scsi_callback;
232 /* NOTE! Save the failed packet command in "rq->buffer" */ 227 /* NOTE! Save the failed packet command in "rq->buffer" */
233 rq->buffer = (void *) failed_cmd->special; 228 rq->buffer = (void *) failed_cmd->special;
234 pc->scsi_cmd = ((struct ide_atapi_pc *) failed_cmd->special)->scsi_cmd; 229 pc->scsi_cmd = ((struct ide_atapi_pc *) failed_cmd->special)->scsi_cmd;
@@ -237,6 +232,8 @@ static int idescsi_check_condition(ide_drive_t *drive,
237 ide_scsi_hex_dump(pc->c, 6); 232 ide_scsi_hex_dump(pc->c, 6);
238 } 233 }
239 rq->rq_disk = scsi->disk; 234 rq->rq_disk = scsi->disk;
235 rq->ref_count++;
236 memcpy(rq->cmd, pc->c, 12);
240 ide_do_drive_cmd(drive, rq); 237 ide_do_drive_cmd(drive, rq);
241 return 0; 238 return 0;
242} 239}
@@ -246,10 +243,9 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
246{ 243{
247 ide_hwif_t *hwif = drive->hwif; 244 ide_hwif_t *hwif = drive->hwif;
248 245
249 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 246 if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
250 /* force an abort */ 247 /* force an abort */
251 hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE, 248 hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE);
252 hwif->io_ports.command_addr);
253 249
254 rq->errors++; 250 rq->errors++;
255 251
@@ -283,7 +279,7 @@ static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs)
283 SCSI_SENSE_BUFFERSIZE); 279 SCSI_SENSE_BUFFERSIZE);
284 kfree(pc->buf); 280 kfree(pc->buf);
285 kfree(pc); 281 kfree(pc);
286 kfree(rq); 282 blk_put_request(rq);
287 pc = opc; 283 pc = opc;
288 rq = pc->rq; 284 rq = pc->rq;
289 pc->scsi_cmd->result = (CHECK_CONDITION << 1) | 285 pc->scsi_cmd->result = (CHECK_CONDITION << 1) |
@@ -314,7 +310,7 @@ static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs)
314 pc->done(pc->scsi_cmd); 310 pc->done(pc->scsi_cmd);
315 spin_unlock_irqrestore(host->host_lock, flags); 311 spin_unlock_irqrestore(host->host_lock, flags);
316 kfree(pc); 312 kfree(pc);
317 kfree(rq); 313 blk_put_request(rq);
318 scsi->pc = NULL; 314 scsi->pc = NULL;
319 return 0; 315 return 0;
320} 316}
@@ -421,10 +417,6 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r
421 417
422 if (blk_sense_request(rq) || blk_special_request(rq)) { 418 if (blk_sense_request(rq) || blk_special_request(rq)) {
423 struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special; 419 struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special;
424 idescsi_scsi_t *scsi = drive_to_idescsi(drive);
425
426 if (test_bit(IDESCSI_DRQ_INTERRUPT, &scsi->flags))
427 pc->flags |= PC_FLAG_DRQ_INTERRUPT;
428 420
429 if (drive->using_dma && !idescsi_map_sg(drive, pc)) 421 if (drive->using_dma && !idescsi_map_sg(drive, pc))
430 pc->flags |= PC_FLAG_DMA_OK; 422 pc->flags |= PC_FLAG_DMA_OK;
@@ -460,11 +452,14 @@ static inline void idescsi_add_settings(ide_drive_t *drive) { ; }
460static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi) 452static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi)
461{ 453{
462 if (drive->id && (drive->id->config & 0x0060) == 0x20) 454 if (drive->id && (drive->id->config & 0x0060) == 0x20)
463 set_bit (IDESCSI_DRQ_INTERRUPT, &scsi->flags); 455 set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags);
464 clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform); 456 clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform);
465#if IDESCSI_DEBUG_LOG 457#if IDESCSI_DEBUG_LOG
466 set_bit(IDESCSI_LOG_CMD, &scsi->log); 458 set_bit(IDESCSI_LOG_CMD, &scsi->log);
467#endif /* IDESCSI_DEBUG_LOG */ 459#endif /* IDESCSI_DEBUG_LOG */
460
461 drive->pc_callback = ide_scsi_callback;
462
468 idescsi_add_settings(drive); 463 idescsi_add_settings(drive);
469} 464}
470 465
@@ -589,6 +584,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
589 ide_drive_t *drive = scsi->drive; 584 ide_drive_t *drive = scsi->drive;
590 struct request *rq = NULL; 585 struct request *rq = NULL;
591 struct ide_atapi_pc *pc = NULL; 586 struct ide_atapi_pc *pc = NULL;
587 int write = cmd->sc_data_direction == DMA_TO_DEVICE;
592 588
593 if (!drive) { 589 if (!drive) {
594 scmd_printk (KERN_ERR, cmd, "drive not present\n"); 590 scmd_printk (KERN_ERR, cmd, "drive not present\n");
@@ -596,7 +592,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
596 } 592 }
597 scsi = drive_to_idescsi(drive); 593 scsi = drive_to_idescsi(drive);
598 pc = kmalloc(sizeof(struct ide_atapi_pc), GFP_ATOMIC); 594 pc = kmalloc(sizeof(struct ide_atapi_pc), GFP_ATOMIC);
599 rq = kmalloc(sizeof(struct request), GFP_ATOMIC); 595 rq = blk_get_request(drive->queue, write, GFP_ATOMIC);
600 if (rq == NULL || pc == NULL) { 596 if (rq == NULL || pc == NULL) {
601 printk (KERN_ERR "ide-scsi: %s: out of memory\n", drive->name); 597 printk (KERN_ERR "ide-scsi: %s: out of memory\n", drive->name);
602 goto abort; 598 goto abort;
@@ -616,7 +612,6 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
616 pc->scsi_cmd = cmd; 612 pc->scsi_cmd = cmd;
617 pc->done = done; 613 pc->done = done;
618 pc->timeout = jiffies + cmd->timeout_per_command; 614 pc->timeout = jiffies + cmd->timeout_per_command;
619 pc->callback = ide_scsi_callback;
620 615
621 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { 616 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
622 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number); 617 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
@@ -627,16 +622,18 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
627 } 622 }
628 } 623 }
629 624
630 blk_rq_init(NULL, rq);
631 rq->special = (char *) pc; 625 rq->special = (char *) pc;
632 rq->cmd_type = REQ_TYPE_SPECIAL; 626 rq->cmd_type = REQ_TYPE_SPECIAL;
633 spin_unlock_irq(host->host_lock); 627 spin_unlock_irq(host->host_lock);
628 rq->ref_count++;
629 memcpy(rq->cmd, pc->c, 12);
634 blk_execute_rq_nowait(drive->queue, scsi->disk, rq, 0, NULL); 630 blk_execute_rq_nowait(drive->queue, scsi->disk, rq, 0, NULL);
635 spin_lock_irq(host->host_lock); 631 spin_lock_irq(host->host_lock);
636 return 0; 632 return 0;
637abort: 633abort:
638 kfree (pc); 634 kfree (pc);
639 kfree (rq); 635 if (rq)
636 blk_put_request(rq);
640 cmd->result = DID_ERROR << 16; 637 cmd->result = DID_ERROR << 16;
641 done(cmd); 638 done(cmd);
642 return 0; 639 return 0;
@@ -684,7 +681,9 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
684 681
685 if (blk_sense_request(scsi->pc->rq)) 682 if (blk_sense_request(scsi->pc->rq))
686 kfree(scsi->pc->buf); 683 kfree(scsi->pc->buf);
687 kfree(scsi->pc->rq); 684 /* we need to call blk_put_request twice. */
685 blk_put_request(scsi->pc->rq);
686 blk_put_request(scsi->pc->rq);
688 kfree(scsi->pc); 687 kfree(scsi->pc);
689 scsi->pc = NULL; 688 scsi->pc = NULL;
690 689
@@ -736,7 +735,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
736 kfree(scsi->pc->buf); 735 kfree(scsi->pc->buf);
737 kfree(scsi->pc); 736 kfree(scsi->pc);
738 scsi->pc = NULL; 737 scsi->pc = NULL;
739 kfree(req); 738 blk_put_request(req);
740 739
741 /* now nuke the drive queue */ 740 /* now nuke the drive queue */
742 while ((req = elv_next_request(drive->queue))) { 741 while ((req = elv_next_request(drive->queue))) {
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 2c87db98cdfb..f9cf70151366 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -7,6 +7,7 @@
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/mm.h>
10#include <linux/init.h> 11#include <linux/init.h>
11 12
12#include <asm/irq.h> 13#include <asm/irq.h>
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 27f34a9f9cb7..a97f1ae11f78 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1293,7 +1293,18 @@ receive_chars(struct uart_8250_port *up, unsigned int *status)
1293 char flag; 1293 char flag;
1294 1294
1295 do { 1295 do {
1296 ch = serial_inp(up, UART_RX); 1296 if (likely(lsr & UART_LSR_DR))
1297 ch = serial_inp(up, UART_RX);
1298 else
1299 /*
1300 * Intel 82571 has a Serial Over Lan device that will
1301 * set UART_LSR_BI without setting UART_LSR_DR when
1302 * it receives a break. To avoid reading from the
1303 * receive buffer without UART_LSR_DR bit set, we
1304 * just force the read character to be 0
1305 */
1306 ch = 0;
1307
1297 flag = TTY_NORMAL; 1308 flag = TTY_NORMAL;
1298 up->port.icount.rx++; 1309 up->port.icount.rx++;
1299 1310
@@ -1342,7 +1353,7 @@ receive_chars(struct uart_8250_port *up, unsigned int *status)
1342 1353
1343ignore_char: 1354ignore_char:
1344 lsr = serial_inp(up, UART_LSR); 1355 lsr = serial_inp(up, UART_LSR);
1345 } while ((lsr & UART_LSR_DR) && (max_count-- > 0)); 1356 } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0));
1346 spin_unlock(&up->port.lock); 1357 spin_unlock(&up->port.lock);
1347 tty_flip_buffer_push(tty); 1358 tty_flip_buffer_push(tty);
1348 spin_lock(&up->port.lock); 1359 spin_lock(&up->port.lock);
@@ -1425,7 +1436,7 @@ serial8250_handle_port(struct uart_8250_port *up)
1425 1436
1426 DEBUG_INTR("status = %x...", status); 1437 DEBUG_INTR("status = %x...", status);
1427 1438
1428 if (status & UART_LSR_DR) 1439 if (status & (UART_LSR_DR | UART_LSR_BI))
1429 receive_chars(up, &status); 1440 receive_chars(up, &status);
1430 check_modem_status(up); 1441 check_modem_status(up);
1431 if (status & UART_LSR_THRE) 1442 if (status & UART_LSR_THRE)
diff --git a/drivers/serial/8250_gsc.c b/drivers/serial/8250_gsc.c
index 4eb7437a404a..0416ad3bc127 100644
--- a/drivers/serial/8250_gsc.c
+++ b/drivers/serial/8250_gsc.c
@@ -119,3 +119,5 @@ int __init probe_serial_gsc(void)
119} 119}
120 120
121module_init(probe_serial_gsc); 121module_init(probe_serial_gsc);
122
123MODULE_LICENSE("GPL");
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 1b36087665a2..c2f23933155b 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -767,6 +767,9 @@ pci_default_setup(struct serial_private *priv, struct pciserial_board *board,
767#define PCI_SUBDEVICE_ID_POCTAL232 0x0308 767#define PCI_SUBDEVICE_ID_POCTAL232 0x0308
768#define PCI_SUBDEVICE_ID_POCTAL422 0x0408 768#define PCI_SUBDEVICE_ID_POCTAL422 0x0408
769 769
770/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
771#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
772
770/* 773/*
771 * Master list of serial port init/setup/exit quirks. 774 * Master list of serial port init/setup/exit quirks.
772 * This does not describe the general nature of the port. 775 * This does not describe the general nature of the port.
@@ -882,6 +885,15 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
882 }, 885 },
883 { 886 {
884 .vendor = PCI_VENDOR_ID_PLX, 887 .vendor = PCI_VENDOR_ID_PLX,
888 .device = PCI_DEVICE_ID_PLX_9050,
889 .subvendor = PCI_VENDOR_ID_PLX,
890 .subdevice = PCI_SUBDEVICE_ID_UNKNOWN_0x1584,
891 .init = pci_plx9050_init,
892 .setup = pci_default_setup,
893 .exit = __devexit_p(pci_plx9050_exit),
894 },
895 {
896 .vendor = PCI_VENDOR_ID_PLX,
885 .device = PCI_DEVICE_ID_PLX_ROMULUS, 897 .device = PCI_DEVICE_ID_PLX_ROMULUS,
886 .subvendor = PCI_VENDOR_ID_PLX, 898 .subvendor = PCI_VENDOR_ID_PLX,
887 .subdevice = PCI_DEVICE_ID_PLX_ROMULUS, 899 .subdevice = PCI_DEVICE_ID_PLX_ROMULUS,
@@ -2197,6 +2209,11 @@ static struct pci_device_id serial_pci_tbl[] = {
2197 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_1077, 2209 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_1077,
2198 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2210 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2199 pbn_b2_4_921600 }, 2211 pbn_b2_4_921600 },
2212 /* Unknown card - subdevice 0x1584 */
2213 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
2214 PCI_VENDOR_ID_PLX,
2215 PCI_SUBDEVICE_ID_UNKNOWN_0x1584, 0, 0,
2216 pbn_b0_4_115200 },
2200 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, 2217 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
2201 PCI_SUBVENDOR_ID_KEYSPAN, 2218 PCI_SUBVENDOR_ID_KEYSPAN,
2202 PCI_SUBDEVICE_ID_KEYSPAN_SX2, 0, 0, 2219 PCI_SUBDEVICE_ID_KEYSPAN_SX2, 0, 0,
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 8fc7451c0049..3b4a14e355c1 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -942,22 +942,6 @@ config SERIAL_IP22_ZILOG_CONSOLE
942 depends on SERIAL_IP22_ZILOG=y 942 depends on SERIAL_IP22_ZILOG=y
943 select SERIAL_CORE_CONSOLE 943 select SERIAL_CORE_CONSOLE
944 944
945config V850E_UART
946 bool "NEC V850E on-chip UART support"
947 depends on V850E_MA1 || V850E_ME2 || V850E_TEG || V850E2_ANNA || V850E_AS85EP1
948 select SERIAL_CORE
949 default y
950
951config V850E_UARTB
952 bool
953 depends on V850E_UART && V850E_ME2
954 default y
955
956config V850E_UART_CONSOLE
957 bool "Use NEC V850E on-chip UART for console"
958 depends on V850E_UART
959 select SERIAL_CORE_CONSOLE
960
961config SERIAL_SH_SCI 945config SERIAL_SH_SCI
962 tristate "SuperH SCI(F) serial port support" 946 tristate "SuperH SCI(F) serial port support"
963 depends on SUPERH || H8300 947 depends on SUPERH || H8300
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index 93e407ee08b9..a4f86927a74b 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -201,6 +201,10 @@ static void cpm_uart_int_tx(struct uart_port *port)
201 cpm_uart_tx_pump(port); 201 cpm_uart_tx_pump(port);
202} 202}
203 203
204#ifdef CONFIG_CONSOLE_POLL
205static int serial_polled;
206#endif
207
204/* 208/*
205 * Receive characters 209 * Receive characters
206 */ 210 */
@@ -222,6 +226,12 @@ static void cpm_uart_int_rx(struct uart_port *port)
222 */ 226 */
223 bdp = pinfo->rx_cur; 227 bdp = pinfo->rx_cur;
224 for (;;) { 228 for (;;) {
229#ifdef CONFIG_CONSOLE_POLL
230 if (unlikely(serial_polled)) {
231 serial_polled = 0;
232 return;
233 }
234#endif
225 /* get status */ 235 /* get status */
226 status = in_be16(&bdp->cbd_sc); 236 status = in_be16(&bdp->cbd_sc);
227 /* If this one is empty, return happy */ 237 /* If this one is empty, return happy */
@@ -253,7 +263,12 @@ static void cpm_uart_int_rx(struct uart_port *port)
253 goto handle_error; 263 goto handle_error;
254 if (uart_handle_sysrq_char(port, ch)) 264 if (uart_handle_sysrq_char(port, ch))
255 continue; 265 continue;
256 266#ifdef CONFIG_CONSOLE_POLL
267 if (unlikely(serial_polled)) {
268 serial_polled = 0;
269 return;
270 }
271#endif
257 error_return: 272 error_return:
258 tty_insert_flip_char(tty, ch, flg); 273 tty_insert_flip_char(tty, ch, flg);
259 274
@@ -420,10 +435,13 @@ static void cpm_uart_shutdown(struct uart_port *port)
420 } 435 }
421 436
422 /* Shut them really down and reinit buffer descriptors */ 437 /* Shut them really down and reinit buffer descriptors */
423 if (IS_SMC(pinfo)) 438 if (IS_SMC(pinfo)) {
439 out_be16(&pinfo->smcup->smc_brkcr, 0);
424 cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX); 440 cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX);
425 else 441 } else {
442 out_be16(&pinfo->sccup->scc_brkcr, 0);
426 cpm_line_cr_cmd(pinfo, CPM_CR_GRA_STOP_TX); 443 cpm_line_cr_cmd(pinfo, CPM_CR_GRA_STOP_TX);
444 }
427 445
428 cpm_uart_initbd(pinfo); 446 cpm_uart_initbd(pinfo);
429 } 447 }
@@ -539,9 +557,11 @@ static void cpm_uart_set_termios(struct uart_port *port,
539 * enables, because we want to put them back if they were 557 * enables, because we want to put them back if they were
540 * present. 558 * present.
541 */ 559 */
542 prev_mode = in_be16(&smcp->smc_smcmr); 560 prev_mode = in_be16(&smcp->smc_smcmr) & (SMCMR_REN | SMCMR_TEN);
543 out_be16(&smcp->smc_smcmr, smcr_mk_clen(bits) | cval | SMCMR_SM_UART); 561 /* Output in *one* operation, so we don't interrupt RX/TX if they
544 setbits16(&smcp->smc_smcmr, (prev_mode & (SMCMR_REN | SMCMR_TEN))); 562 * were already enabled. */
563 out_be16(&smcp->smc_smcmr, smcr_mk_clen(bits) | cval |
564 SMCMR_SM_UART | prev_mode);
545 } else { 565 } else {
546 out_be16(&sccp->scc_psmr, (sbits << 12) | scval); 566 out_be16(&sccp->scc_psmr, (sbits << 12) | scval);
547 } 567 }
@@ -865,6 +885,80 @@ static void cpm_uart_config_port(struct uart_port *port, int flags)
865 cpm_uart_request_port(port); 885 cpm_uart_request_port(port);
866 } 886 }
867} 887}
888
889#ifdef CONFIG_CONSOLE_POLL
890/* Serial polling routines for writing and reading from the uart while
891 * in an interrupt or debug context.
892 */
893
894#define GDB_BUF_SIZE 512 /* power of 2, please */
895
896static char poll_buf[GDB_BUF_SIZE];
897static char *pollp;
898static int poll_chars;
899
900static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
901{
902 u_char c, *cp;
903 volatile cbd_t *bdp;
904 int i;
905
906 /* Get the address of the host memory buffer.
907 */
908 bdp = pinfo->rx_cur;
909 while (bdp->cbd_sc & BD_SC_EMPTY)
910 ;
911
912 /* If the buffer address is in the CPM DPRAM, don't
913 * convert it.
914 */
915 cp = cpm2cpu_addr(bdp->cbd_bufaddr, pinfo);
916
917 if (obuf) {
918 i = c = bdp->cbd_datlen;
919 while (i-- > 0)
920 *obuf++ = *cp++;
921 } else
922 c = *cp;
923 bdp->cbd_sc &= ~(BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID);
924 bdp->cbd_sc |= BD_SC_EMPTY;
925
926 if (bdp->cbd_sc & BD_SC_WRAP)
927 bdp = pinfo->rx_bd_base;
928 else
929 bdp++;
930 pinfo->rx_cur = (cbd_t *)bdp;
931
932 return (int)c;
933}
934
935static int cpm_get_poll_char(struct uart_port *port)
936{
937 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
938
939 if (!serial_polled) {
940 serial_polled = 1;
941 poll_chars = 0;
942 }
943 if (poll_chars <= 0) {
944 poll_chars = poll_wait_key(poll_buf, pinfo);
945 pollp = poll_buf;
946 }
947 poll_chars--;
948 return *pollp++;
949}
950
951static void cpm_put_poll_char(struct uart_port *port,
952 unsigned char c)
953{
954 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
955 static char ch[2];
956
957 ch[0] = (char)c;
958 cpm_uart_early_write(pinfo->port.line, ch, 1);
959}
960#endif /* CONFIG_CONSOLE_POLL */
961
868static struct uart_ops cpm_uart_pops = { 962static struct uart_ops cpm_uart_pops = {
869 .tx_empty = cpm_uart_tx_empty, 963 .tx_empty = cpm_uart_tx_empty,
870 .set_mctrl = cpm_uart_set_mctrl, 964 .set_mctrl = cpm_uart_set_mctrl,
@@ -882,6 +976,10 @@ static struct uart_ops cpm_uart_pops = {
882 .request_port = cpm_uart_request_port, 976 .request_port = cpm_uart_request_port,
883 .config_port = cpm_uart_config_port, 977 .config_port = cpm_uart_config_port,
884 .verify_port = cpm_uart_verify_port, 978 .verify_port = cpm_uart_verify_port,
979#ifdef CONFIG_CONSOLE_POLL
980 .poll_get_char = cpm_get_poll_char,
981 .poll_put_char = cpm_put_poll_char,
982#endif
885}; 983};
886 984
887struct uart_cpm_port cpm_uart_ports[UART_NR]; 985struct uart_cpm_port cpm_uart_ports[UART_NR];
@@ -1105,12 +1203,14 @@ static int __init cpm_uart_console_setup(struct console *co, char *options)
1105 udbg_putc = NULL; 1203 udbg_putc = NULL;
1106#endif 1204#endif
1107 1205
1108 cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX);
1109
1110 if (IS_SMC(pinfo)) { 1206 if (IS_SMC(pinfo)) {
1207 out_be16(&pinfo->smcup->smc_brkcr, 0);
1208 cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX);
1111 clrbits8(&pinfo->smcp->smc_smcm, SMCM_RX | SMCM_TX); 1209 clrbits8(&pinfo->smcp->smc_smcm, SMCM_RX | SMCM_TX);
1112 clrbits16(&pinfo->smcp->smc_smcmr, SMCMR_REN | SMCMR_TEN); 1210 clrbits16(&pinfo->smcp->smc_smcmr, SMCMR_REN | SMCMR_TEN);
1113 } else { 1211 } else {
1212 out_be16(&pinfo->sccup->scc_brkcr, 0);
1213 cpm_line_cr_cmd(pinfo, CPM_CR_GRA_STOP_TX);
1114 clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_TX | UART_SCCM_RX); 1214 clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_TX | UART_SCCM_RX);
1115 clrbits32(&pinfo->sccp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); 1215 clrbits32(&pinfo->sccp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
1116 } 1216 }
diff --git a/drivers/serial/dz.c b/drivers/serial/dz.c
index a81d2c2ff8a2..6042b87797a1 100644
--- a/drivers/serial/dz.c
+++ b/drivers/serial/dz.c
@@ -642,6 +642,26 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
642 spin_unlock_irqrestore(&dport->port.lock, flags); 642 spin_unlock_irqrestore(&dport->port.lock, flags);
643} 643}
644 644
645/*
646 * Hack alert!
647 * Required solely so that the initial PROM-based console
648 * works undisturbed in parallel with this one.
649 */
650static void dz_pm(struct uart_port *uport, unsigned int state,
651 unsigned int oldstate)
652{
653 struct dz_port *dport = to_dport(uport);
654 unsigned long flags;
655
656 spin_lock_irqsave(&dport->port.lock, flags);
657 if (state < 3)
658 dz_start_tx(&dport->port);
659 else
660 dz_stop_tx(&dport->port);
661 spin_unlock_irqrestore(&dport->port.lock, flags);
662}
663
664
645static const char *dz_type(struct uart_port *uport) 665static const char *dz_type(struct uart_port *uport)
646{ 666{
647 return "DZ"; 667 return "DZ";
@@ -738,6 +758,7 @@ static struct uart_ops dz_ops = {
738 .startup = dz_startup, 758 .startup = dz_startup,
739 .shutdown = dz_shutdown, 759 .shutdown = dz_shutdown,
740 .set_termios = dz_set_termios, 760 .set_termios = dz_set_termios,
761 .pm = dz_pm,
741 .type = dz_type, 762 .type = dz_type,
742 .release_port = dz_release_port, 763 .release_port = dz_release_port,
743 .request_port = dz_request_port, 764 .request_port = dz_request_port,
@@ -861,7 +882,10 @@ static int __init dz_console_setup(struct console *co, char *options)
861 if (ret) 882 if (ret)
862 return ret; 883 return ret;
863 884
885 spin_lock_init(&dport->port.lock); /* For dz_pm(). */
886
864 dz_reset(dport); 887 dz_reset(dport);
888 dz_pm(uport, 0, -1);
865 889
866 if (options) 890 if (options)
867 uart_parse_options(options, &baud, &parity, &bits, &flow); 891 uart_parse_options(options, &baud, &parity, &bits, &flow);
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c
index c9f53e71f252..61d3ade5286c 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/serial/mpsc.c
@@ -921,6 +921,10 @@ static int mpsc_make_ready(struct mpsc_port_info *pi)
921 return 0; 921 return 0;
922} 922}
923 923
924#ifdef CONFIG_CONSOLE_POLL
925static int serial_polled;
926#endif
927
924/* 928/*
925 ****************************************************************************** 929 ******************************************************************************
926 * 930 *
@@ -956,7 +960,12 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi)
956 while (!((cmdstat = be32_to_cpu(rxre->cmdstat)) 960 while (!((cmdstat = be32_to_cpu(rxre->cmdstat))
957 & SDMA_DESC_CMDSTAT_O)) { 961 & SDMA_DESC_CMDSTAT_O)) {
958 bytes_in = be16_to_cpu(rxre->bytecnt); 962 bytes_in = be16_to_cpu(rxre->bytecnt);
959 963#ifdef CONFIG_CONSOLE_POLL
964 if (unlikely(serial_polled)) {
965 serial_polled = 0;
966 return 0;
967 }
968#endif
960 /* Following use of tty struct directly is deprecated */ 969 /* Following use of tty struct directly is deprecated */
961 if (unlikely(tty_buffer_request_room(tty, bytes_in) 970 if (unlikely(tty_buffer_request_room(tty, bytes_in)
962 < bytes_in)) { 971 < bytes_in)) {
@@ -1017,6 +1026,12 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi)
1017 if (uart_handle_sysrq_char(&pi->port, *bp)) { 1026 if (uart_handle_sysrq_char(&pi->port, *bp)) {
1018 bp++; 1027 bp++;
1019 bytes_in--; 1028 bytes_in--;
1029#ifdef CONFIG_CONSOLE_POLL
1030 if (unlikely(serial_polled)) {
1031 serial_polled = 0;
1032 return 0;
1033 }
1034#endif
1020 goto next_frame; 1035 goto next_frame;
1021 } 1036 }
1022 1037
@@ -1519,6 +1534,133 @@ static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
1519 1534
1520 return rc; 1535 return rc;
1521} 1536}
1537#ifdef CONFIG_CONSOLE_POLL
1538/* Serial polling routines for writing and reading from the uart while
1539 * in an interrupt or debug context.
1540 */
1541
1542static char poll_buf[2048];
1543static int poll_ptr;
1544static int poll_cnt;
1545static void mpsc_put_poll_char(struct uart_port *port,
1546 unsigned char c);
1547
1548static int mpsc_get_poll_char(struct uart_port *port)
1549{
1550 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1551 struct mpsc_rx_desc *rxre;
1552 u32 cmdstat, bytes_in, i;
1553 u8 *bp;
1554
1555 if (!serial_polled)
1556 serial_polled = 1;
1557
1558 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
1559
1560 if (poll_cnt) {
1561 poll_cnt--;
1562 return poll_buf[poll_ptr++];
1563 }
1564 poll_ptr = 0;
1565 poll_cnt = 0;
1566
1567 while (poll_cnt == 0) {
1568 rxre = (struct mpsc_rx_desc *)(pi->rxr +
1569 (pi->rxr_posn*MPSC_RXRE_SIZE));
1570 dma_cache_sync(pi->port.dev, (void *)rxre,
1571 MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1572#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1573 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1574 invalidate_dcache_range((ulong)rxre,
1575 (ulong)rxre + MPSC_RXRE_SIZE);
1576#endif
1577 /*
1578 * Loop through Rx descriptors handling ones that have
1579 * been completed.
1580 */
1581 while (poll_cnt == 0 &&
1582 !((cmdstat = be32_to_cpu(rxre->cmdstat)) &
1583 SDMA_DESC_CMDSTAT_O)){
1584 bytes_in = be16_to_cpu(rxre->bytecnt);
1585 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
1586 dma_cache_sync(pi->port.dev, (void *) bp,
1587 MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
1588#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1589 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1590 invalidate_dcache_range((ulong)bp,
1591 (ulong)bp + MPSC_RXBE_SIZE);
1592#endif
1593 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
1594 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) &&
1595 !(cmdstat & pi->port.ignore_status_mask)) {
1596 poll_buf[poll_cnt] = *bp;
1597 poll_cnt++;
1598 } else {
1599 for (i = 0; i < bytes_in; i++) {
1600 poll_buf[poll_cnt] = *bp++;
1601 poll_cnt++;
1602 }
1603 pi->port.icount.rx += bytes_in;
1604 }
1605 rxre->bytecnt = cpu_to_be16(0);
1606 wmb();
1607 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
1608 SDMA_DESC_CMDSTAT_EI |
1609 SDMA_DESC_CMDSTAT_F |
1610 SDMA_DESC_CMDSTAT_L);
1611 wmb();
1612 dma_cache_sync(pi->port.dev, (void *)rxre,
1613 MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
1614#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1615 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1616 flush_dcache_range((ulong)rxre,
1617 (ulong)rxre + MPSC_RXRE_SIZE);
1618#endif
1619
1620 /* Advance to next descriptor */
1621 pi->rxr_posn = (pi->rxr_posn + 1) &
1622 (MPSC_RXR_ENTRIES - 1);
1623 rxre = (struct mpsc_rx_desc *)(pi->rxr +
1624 (pi->rxr_posn * MPSC_RXRE_SIZE));
1625 dma_cache_sync(pi->port.dev, (void *)rxre,
1626 MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1627#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1628 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1629 invalidate_dcache_range((ulong)rxre,
1630 (ulong)rxre + MPSC_RXRE_SIZE);
1631#endif
1632 }
1633
1634 /* Restart rx engine, if its stopped */
1635 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1636 mpsc_start_rx(pi);
1637 }
1638 if (poll_cnt) {
1639 poll_cnt--;
1640 return poll_buf[poll_ptr++];
1641 }
1642
1643 return 0;
1644}
1645
1646
1647static void mpsc_put_poll_char(struct uart_port *port,
1648 unsigned char c)
1649{
1650 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1651 u32 data;
1652
1653 data = readl(pi->mpsc_base + MPSC_MPCR);
1654 writeb(c, pi->mpsc_base + MPSC_CHR_1);
1655 mb();
1656 data = readl(pi->mpsc_base + MPSC_CHR_2);
1657 data |= MPSC_CHR_2_TTCS;
1658 writel(data, pi->mpsc_base + MPSC_CHR_2);
1659 mb();
1660
1661 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS);
1662}
1663#endif
1522 1664
1523static struct uart_ops mpsc_pops = { 1665static struct uart_ops mpsc_pops = {
1524 .tx_empty = mpsc_tx_empty, 1666 .tx_empty = mpsc_tx_empty,
@@ -1537,6 +1679,10 @@ static struct uart_ops mpsc_pops = {
1537 .request_port = mpsc_request_port, 1679 .request_port = mpsc_request_port,
1538 .config_port = mpsc_config_port, 1680 .config_port = mpsc_config_port,
1539 .verify_port = mpsc_verify_port, 1681 .verify_port = mpsc_verify_port,
1682#ifdef CONFIG_CONSOLE_POLL
1683 .poll_get_char = mpsc_get_poll_char,
1684 .poll_put_char = mpsc_put_poll_char,
1685#endif
1540}; 1686};
1541 1687
1542/* 1688/*
diff --git a/drivers/serial/zs.c b/drivers/serial/zs.c
index bd45b6230fd8..9e6a873f8203 100644
--- a/drivers/serial/zs.c
+++ b/drivers/serial/zs.c
@@ -787,7 +787,6 @@ static int zs_startup(struct uart_port *uport)
787 zport->regs[1] &= ~RxINT_MASK; 787 zport->regs[1] &= ~RxINT_MASK;
788 zport->regs[1] |= RxINT_ALL | TxINT_ENAB | EXT_INT_ENAB; 788 zport->regs[1] |= RxINT_ALL | TxINT_ENAB | EXT_INT_ENAB;
789 zport->regs[3] |= RxENABLE; 789 zport->regs[3] |= RxENABLE;
790 zport->regs[5] |= TxENAB;
791 zport->regs[15] |= BRKIE; 790 zport->regs[15] |= BRKIE;
792 write_zsreg(zport, R1, zport->regs[1]); 791 write_zsreg(zport, R1, zport->regs[1]);
793 write_zsreg(zport, R3, zport->regs[3]); 792 write_zsreg(zport, R3, zport->regs[3]);
@@ -814,7 +813,6 @@ static void zs_shutdown(struct uart_port *uport)
814 813
815 spin_lock_irqsave(&scc->zlock, flags); 814 spin_lock_irqsave(&scc->zlock, flags);
816 815
817 zport->regs[5] &= ~TxENAB;
818 zport->regs[3] &= ~RxENABLE; 816 zport->regs[3] &= ~RxENABLE;
819 write_zsreg(zport, R5, zport->regs[5]); 817 write_zsreg(zport, R5, zport->regs[5]);
820 write_zsreg(zport, R3, zport->regs[3]); 818 write_zsreg(zport, R3, zport->regs[3]);
@@ -959,6 +957,23 @@ static void zs_set_termios(struct uart_port *uport, struct ktermios *termios,
959 spin_unlock_irqrestore(&scc->zlock, flags); 957 spin_unlock_irqrestore(&scc->zlock, flags);
960} 958}
961 959
960/*
961 * Hack alert!
962 * Required solely so that the initial PROM-based console
963 * works undisturbed in parallel with this one.
964 */
965static void zs_pm(struct uart_port *uport, unsigned int state,
966 unsigned int oldstate)
967{
968 struct zs_port *zport = to_zport(uport);
969
970 if (state < 3)
971 zport->regs[5] |= TxENAB;
972 else
973 zport->regs[5] &= ~TxENAB;
974 write_zsreg(zport, R5, zport->regs[5]);
975}
976
962 977
963static const char *zs_type(struct uart_port *uport) 978static const char *zs_type(struct uart_port *uport)
964{ 979{
@@ -1041,6 +1056,7 @@ static struct uart_ops zs_ops = {
1041 .startup = zs_startup, 1056 .startup = zs_startup,
1042 .shutdown = zs_shutdown, 1057 .shutdown = zs_shutdown,
1043 .set_termios = zs_set_termios, 1058 .set_termios = zs_set_termios,
1059 .pm = zs_pm,
1044 .type = zs_type, 1060 .type = zs_type,
1045 .release_port = zs_release_port, 1061 .release_port = zs_release_port,
1046 .request_port = zs_request_port, 1062 .request_port = zs_request_port,
@@ -1190,6 +1206,7 @@ static int __init zs_console_setup(struct console *co, char *options)
1190 return ret; 1206 return ret;
1191 1207
1192 zs_reset(zport); 1208 zs_reset(zport);
1209 zs_pm(uport, 0, -1);
1193 1210
1194 if (options) 1211 if (options)
1195 uart_parse_options(options, &baud, &parity, &bits, &flow); 1212 uart_parse_options(options, &baud, &parity, &bits, &flow);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 66ec5d8808de..2303521b4f09 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -49,25 +49,26 @@ config SPI_MASTER
49 controller and the protocol drivers for the SPI slave chips 49 controller and the protocol drivers for the SPI slave chips
50 that are connected. 50 that are connected.
51 51
52if SPI_MASTER
53
52comment "SPI Master Controller Drivers" 54comment "SPI Master Controller Drivers"
53 depends on SPI_MASTER
54 55
55config SPI_ATMEL 56config SPI_ATMEL
56 tristate "Atmel SPI Controller" 57 tristate "Atmel SPI Controller"
57 depends on (ARCH_AT91 || AVR32) && SPI_MASTER 58 depends on (ARCH_AT91 || AVR32)
58 help 59 help
59 This selects a driver for the Atmel SPI Controller, present on 60 This selects a driver for the Atmel SPI Controller, present on
60 many AT32 (AVR32) and AT91 (ARM) chips. 61 many AT32 (AVR32) and AT91 (ARM) chips.
61 62
62config SPI_BFIN 63config SPI_BFIN
63 tristate "SPI controller driver for ADI Blackfin5xx" 64 tristate "SPI controller driver for ADI Blackfin5xx"
64 depends on SPI_MASTER && BLACKFIN 65 depends on BLACKFIN
65 help 66 help
66 This is the SPI controller master driver for Blackfin 5xx processor. 67 This is the SPI controller master driver for Blackfin 5xx processor.
67 68
68config SPI_AU1550 69config SPI_AU1550
69 tristate "Au1550/Au12x0 SPI Controller" 70 tristate "Au1550/Au12x0 SPI Controller"
70 depends on SPI_MASTER && (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL 71 depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL
71 select SPI_BITBANG 72 select SPI_BITBANG
72 help 73 help
73 If you say yes to this option, support will be included for the 74 If you say yes to this option, support will be included for the
@@ -78,7 +79,6 @@ config SPI_AU1550
78 79
79config SPI_BITBANG 80config SPI_BITBANG
80 tristate "Bitbanging SPI master" 81 tristate "Bitbanging SPI master"
81 depends on SPI_MASTER && EXPERIMENTAL
82 help 82 help
83 With a few GPIO pins, your system can bitbang the SPI protocol. 83 With a few GPIO pins, your system can bitbang the SPI protocol.
84 Select this to get SPI support through I/O pins (GPIO, parallel 84 Select this to get SPI support through I/O pins (GPIO, parallel
@@ -92,7 +92,7 @@ config SPI_BITBANG
92 92
93config SPI_BUTTERFLY 93config SPI_BUTTERFLY
94 tristate "Parallel port adapter for AVR Butterfly (DEVELOPMENT)" 94 tristate "Parallel port adapter for AVR Butterfly (DEVELOPMENT)"
95 depends on SPI_MASTER && PARPORT && EXPERIMENTAL 95 depends on PARPORT
96 select SPI_BITBANG 96 select SPI_BITBANG
97 help 97 help
98 This uses a custom parallel port cable to connect to an AVR 98 This uses a custom parallel port cable to connect to an AVR
@@ -102,14 +102,14 @@ config SPI_BUTTERFLY
102 102
103config SPI_IMX 103config SPI_IMX
104 tristate "Freescale iMX SPI controller" 104 tristate "Freescale iMX SPI controller"
105 depends on SPI_MASTER && ARCH_IMX && EXPERIMENTAL 105 depends on ARCH_IMX && EXPERIMENTAL
106 help 106 help
107 This enables using the Freescale iMX SPI controller in master 107 This enables using the Freescale iMX SPI controller in master
108 mode. 108 mode.
109 109
110config SPI_LM70_LLP 110config SPI_LM70_LLP
111 tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)" 111 tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
112 depends on SPI_MASTER && PARPORT && EXPERIMENTAL 112 depends on PARPORT && EXPERIMENTAL
113 select SPI_BITBANG 113 select SPI_BITBANG
114 help 114 help
115 This driver supports the NS LM70 LLP Evaluation Board, 115 This driver supports the NS LM70 LLP Evaluation Board,
@@ -118,14 +118,14 @@ config SPI_LM70_LLP
118 118
119config SPI_MPC52xx_PSC 119config SPI_MPC52xx_PSC
120 tristate "Freescale MPC52xx PSC SPI controller" 120 tristate "Freescale MPC52xx PSC SPI controller"
121 depends on SPI_MASTER && PPC_MPC52xx && EXPERIMENTAL 121 depends on PPC_MPC52xx && EXPERIMENTAL
122 help 122 help
123 This enables using the Freescale MPC52xx Programmable Serial 123 This enables using the Freescale MPC52xx Programmable Serial
124 Controller in master SPI mode. 124 Controller in master SPI mode.
125 125
126config SPI_MPC83xx 126config SPI_MPC83xx
127 tristate "Freescale MPC83xx/QUICC Engine SPI controller" 127 tristate "Freescale MPC83xx/QUICC Engine SPI controller"
128 depends on SPI_MASTER && (PPC_83xx || QUICC_ENGINE) && EXPERIMENTAL 128 depends on (PPC_83xx || QUICC_ENGINE) && EXPERIMENTAL
129 help 129 help
130 This enables using the Freescale MPC83xx and QUICC Engine SPI 130 This enables using the Freescale MPC83xx and QUICC Engine SPI
131 controllers in master mode. 131 controllers in master mode.
@@ -137,21 +137,21 @@ config SPI_MPC83xx
137 137
138config SPI_OMAP_UWIRE 138config SPI_OMAP_UWIRE
139 tristate "OMAP1 MicroWire" 139 tristate "OMAP1 MicroWire"
140 depends on SPI_MASTER && ARCH_OMAP1 140 depends on ARCH_OMAP1
141 select SPI_BITBANG 141 select SPI_BITBANG
142 help 142 help
143 This hooks up to the MicroWire controller on OMAP1 chips. 143 This hooks up to the MicroWire controller on OMAP1 chips.
144 144
145config SPI_OMAP24XX 145config SPI_OMAP24XX
146 tristate "McSPI driver for OMAP24xx/OMAP34xx" 146 tristate "McSPI driver for OMAP24xx/OMAP34xx"
147 depends on SPI_MASTER && (ARCH_OMAP24XX || ARCH_OMAP34XX) 147 depends on ARCH_OMAP24XX || ARCH_OMAP34XX
148 help 148 help
149 SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI 149 SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI
150 (McSPI) modules. 150 (McSPI) modules.
151 151
152config SPI_PXA2XX 152config SPI_PXA2XX
153 tristate "PXA2xx SSP SPI master" 153 tristate "PXA2xx SSP SPI master"
154 depends on SPI_MASTER && ARCH_PXA && EXPERIMENTAL 154 depends on ARCH_PXA && EXPERIMENTAL
155 select PXA_SSP 155 select PXA_SSP
156 help 156 help
157 This enables using a PXA2xx SSP port as a SPI master controller. 157 This enables using a PXA2xx SSP port as a SPI master controller.
@@ -160,14 +160,14 @@ config SPI_PXA2XX
160 160
161config SPI_S3C24XX 161config SPI_S3C24XX
162 tristate "Samsung S3C24XX series SPI" 162 tristate "Samsung S3C24XX series SPI"
163 depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL 163 depends on ARCH_S3C2410 && EXPERIMENTAL
164 select SPI_BITBANG 164 select SPI_BITBANG
165 help 165 help
166 SPI driver for Samsung S3C24XX series ARM SoCs 166 SPI driver for Samsung S3C24XX series ARM SoCs
167 167
168config SPI_S3C24XX_GPIO 168config SPI_S3C24XX_GPIO
169 tristate "Samsung S3C24XX series SPI by GPIO" 169 tristate "Samsung S3C24XX series SPI by GPIO"
170 depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL 170 depends on ARCH_S3C2410 && EXPERIMENTAL
171 select SPI_BITBANG 171 select SPI_BITBANG
172 help 172 help
173 SPI driver for Samsung S3C24XX series ARM SoCs using 173 SPI driver for Samsung S3C24XX series ARM SoCs using
@@ -177,20 +177,20 @@ config SPI_S3C24XX_GPIO
177 177
178config SPI_SH_SCI 178config SPI_SH_SCI
179 tristate "SuperH SCI SPI controller" 179 tristate "SuperH SCI SPI controller"
180 depends on SPI_MASTER && SUPERH 180 depends on SUPERH
181 select SPI_BITBANG 181 select SPI_BITBANG
182 help 182 help
183 SPI driver for SuperH SCI blocks. 183 SPI driver for SuperH SCI blocks.
184 184
185config SPI_TXX9 185config SPI_TXX9
186 tristate "Toshiba TXx9 SPI controller" 186 tristate "Toshiba TXx9 SPI controller"
187 depends on SPI_MASTER && GENERIC_GPIO && CPU_TX49XX 187 depends on GENERIC_GPIO && CPU_TX49XX
188 help 188 help
189 SPI driver for Toshiba TXx9 MIPS SoCs 189 SPI driver for Toshiba TXx9 MIPS SoCs
190 190
191config SPI_XILINX 191config SPI_XILINX
192 tristate "Xilinx SPI controller" 192 tristate "Xilinx SPI controller"
193 depends on SPI_MASTER && XILINX_VIRTEX && EXPERIMENTAL 193 depends on XILINX_VIRTEX && EXPERIMENTAL
194 select SPI_BITBANG 194 select SPI_BITBANG
195 help 195 help
196 This exposes the SPI controller IP from the Xilinx EDK. 196 This exposes the SPI controller IP from the Xilinx EDK.
@@ -207,11 +207,10 @@ config SPI_XILINX
207# being probably the most widely used ones. 207# being probably the most widely used ones.
208# 208#
209comment "SPI Protocol Masters" 209comment "SPI Protocol Masters"
210 depends on SPI_MASTER
211 210
212config SPI_AT25 211config SPI_AT25
213 tristate "SPI EEPROMs from most vendors" 212 tristate "SPI EEPROMs from most vendors"
214 depends on SPI_MASTER && SYSFS 213 depends on SYSFS
215 help 214 help
216 Enable this driver to get read/write support to most SPI EEPROMs, 215 Enable this driver to get read/write support to most SPI EEPROMs,
217 after you configure the board init code to know about each eeprom 216 after you configure the board init code to know about each eeprom
@@ -222,7 +221,7 @@ config SPI_AT25
222 221
223config SPI_SPIDEV 222config SPI_SPIDEV
224 tristate "User mode SPI device driver support" 223 tristate "User mode SPI device driver support"
225 depends on SPI_MASTER && EXPERIMENTAL 224 depends on EXPERIMENTAL
226 help 225 help
227 This supports user mode SPI protocol drivers. 226 This supports user mode SPI protocol drivers.
228 227
@@ -231,7 +230,7 @@ config SPI_SPIDEV
231 230
232config SPI_TLE62X0 231config SPI_TLE62X0
233 tristate "Infineon TLE62X0 (for power switching)" 232 tristate "Infineon TLE62X0 (for power switching)"
234 depends on SPI_MASTER && SYSFS 233 depends on SYSFS
235 help 234 help
236 SPI driver for Infineon TLE62X0 series line driver chips, 235 SPI driver for Infineon TLE62X0 series line driver chips,
237 such as the TLE6220, TLE6230 and TLE6240. This provides a 236 such as the TLE6220, TLE6230 and TLE6240. This provides a
@@ -242,6 +241,8 @@ config SPI_TLE62X0
242# Add new SPI protocol masters in alphabetical order above this line 241# Add new SPI protocol masters in alphabetical order above this line
243# 242#
244 243
244endif # SPI_MASTER
245
245# (slave support would go here) 246# (slave support would go here)
246 247
247endif # SPI 248endif # SPI
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index 072c4a595334..9149689c79d9 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -26,6 +26,7 @@
26#include <linux/errno.h> 26#include <linux/errno.h>
27#include <linux/device.h> 27#include <linux/device.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/resource.h>
29#include <linux/spi/spi.h> 30#include <linux/spi/spi.h>
30#include <linux/spi/spi_bitbang.h> 31#include <linux/spi/spi_bitbang.h>
31#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
@@ -81,6 +82,7 @@ struct au1550_spi {
81 struct spi_master *master; 82 struct spi_master *master;
82 struct device *dev; 83 struct device *dev;
83 struct au1550_spi_info *pdata; 84 struct au1550_spi_info *pdata;
85 struct resource *ioarea;
84}; 86};
85 87
86 88
@@ -96,6 +98,8 @@ static dbdev_tab_t au1550_spi_mem_dbdev =
96 .dev_intpolarity = 0 98 .dev_intpolarity = 0
97}; 99};
98 100
101static int ddma_memid; /* id to above mem dma device */
102
99static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw); 103static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw);
100 104
101 105
@@ -480,9 +484,13 @@ static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw)
480 au1xxx_dbdma_reset(hw->dma_tx_ch); 484 au1xxx_dbdma_reset(hw->dma_tx_ch);
481 au1550_spi_reset_fifos(hw); 485 au1550_spi_reset_fifos(hw);
482 486
483 dev_err(hw->dev, 487 if (evnt == PSC_SPIEVNT_RO)
484 "Unexpected SPI error: event=0x%x stat=0x%x!\n", 488 dev_err(hw->dev,
485 evnt, stat); 489 "dma transfer: receive FIFO overflow!\n");
490 else
491 dev_err(hw->dev,
492 "dma transfer: unexpected SPI error "
493 "(event=0x%x stat=0x%x)!\n", evnt, stat);
486 494
487 complete(&hw->master_done); 495 complete(&hw->master_done);
488 return IRQ_HANDLED; 496 return IRQ_HANDLED;
@@ -592,17 +600,17 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
592 600
593 if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO 601 if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO
594 | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO 602 | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO
595 | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD)) 603 | PSC_SPIEVNT_SD))
596 != 0) { 604 != 0) {
597 dev_err(hw->dev,
598 "Unexpected SPI error: event=0x%x stat=0x%x!\n",
599 evnt, stat);
600 /* 605 /*
601 * due to an error we consider transfer as done, 606 * due to an error we consider transfer as done,
602 * so mask all events until before next transfer start 607 * so mask all events until before next transfer start
603 */ 608 */
604 au1550_spi_mask_ack_all(hw); 609 au1550_spi_mask_ack_all(hw);
605 au1550_spi_reset_fifos(hw); 610 au1550_spi_reset_fifos(hw);
611 dev_err(hw->dev,
612 "pio transfer: unexpected SPI error "
613 "(event=0x%x stat=0x%x)!\n", evnt, stat);
606 complete(&hw->master_done); 614 complete(&hw->master_done);
607 return IRQ_HANDLED; 615 return IRQ_HANDLED;
608 } 616 }
@@ -616,27 +624,50 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
616 stat = hw->regs->psc_spistat; 624 stat = hw->regs->psc_spistat;
617 au_sync(); 625 au_sync();
618 626
619 if ((stat & PSC_SPISTAT_RE) == 0 && hw->rx_count < hw->len) { 627 /*
628 * Take care to not let the Rx FIFO overflow.
629 *
630 * We only write a byte if we have read one at least. Initially,
631 * the write fifo is full, so we should read from the read fifo
632 * first.
633 * In case we miss a word from the read fifo, we should get a
634 * RO event and should back out.
635 */
636 if (!(stat & PSC_SPISTAT_RE) && hw->rx_count < hw->len) {
620 hw->rx_word(hw); 637 hw->rx_word(hw);
621 /* ack the receive request event */
622 hw->regs->psc_spievent = PSC_SPIEVNT_RR;
623 au_sync();
624 busy = 1; 638 busy = 1;
625 }
626 639
627 if ((stat & PSC_SPISTAT_TF) == 0 && hw->tx_count < hw->len) { 640 if (!(stat & PSC_SPISTAT_TF) && hw->tx_count < hw->len)
628 hw->tx_word(hw); 641 hw->tx_word(hw);
629 /* ack the transmit request event */
630 hw->regs->psc_spievent = PSC_SPIEVNT_TR;
631 au_sync();
632 busy = 1;
633 } 642 }
634 } while (busy); 643 } while (busy);
635 644
636 evnt = hw->regs->psc_spievent; 645 hw->regs->psc_spievent = PSC_SPIEVNT_RR | PSC_SPIEVNT_TR;
637 au_sync(); 646 au_sync();
638 647
639 if (hw->rx_count >= hw->len || (evnt & PSC_SPIEVNT_MD) != 0) { 648 /*
649 * Restart the SPI transmission in case of a transmit underflow.
650 * This seems to work despite the notes in the Au1550 data book
651 * of Figure 8-4 with flowchart for SPI master operation:
652 *
653 * """Note 1: An XFR Error Interrupt occurs, unless masked,
654 * for any of the following events: Tx FIFO Underflow,
655 * Rx FIFO Overflow, or Multiple-master Error
656 * Note 2: In case of a Tx Underflow Error, all zeroes are
657 * transmitted."""
658 *
659 * By simply restarting the spi transfer on Tx Underflow Error,
660 * we assume that spi transfer was paused instead of zeroes
661 * transmittion mentioned in the Note 2 of Au1550 data book.
662 */
663 if (evnt & PSC_SPIEVNT_TU) {
664 hw->regs->psc_spievent = PSC_SPIEVNT_TU | PSC_SPIEVNT_MD;
665 au_sync();
666 hw->regs->psc_spipcr = PSC_SPIPCR_MS;
667 au_sync();
668 }
669
670 if (hw->rx_count >= hw->len) {
640 /* transfer completed successfully */ 671 /* transfer completed successfully */
641 au1550_spi_mask_ack_all(hw); 672 au1550_spi_mask_ack_all(hw);
642 complete(&hw->master_done); 673 complete(&hw->master_done);
@@ -725,6 +756,8 @@ static void __init au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
725 stat = hw->regs->psc_spistat; 756 stat = hw->regs->psc_spistat;
726 au_sync(); 757 au_sync();
727 } while ((stat & PSC_SPISTAT_DR) == 0); 758 } while ((stat & PSC_SPISTAT_DR) == 0);
759
760 au1550_spi_reset_fifos(hw);
728} 761}
729 762
730 763
@@ -732,6 +765,7 @@ static int __init au1550_spi_probe(struct platform_device *pdev)
732{ 765{
733 struct au1550_spi *hw; 766 struct au1550_spi *hw;
734 struct spi_master *master; 767 struct spi_master *master;
768 struct resource *r;
735 int err = 0; 769 int err = 0;
736 770
737 master = spi_alloc_master(&pdev->dev, sizeof(struct au1550_spi)); 771 master = spi_alloc_master(&pdev->dev, sizeof(struct au1550_spi));
@@ -753,76 +787,64 @@ static int __init au1550_spi_probe(struct platform_device *pdev)
753 goto err_no_pdata; 787 goto err_no_pdata;
754 } 788 }
755 789
756 platform_set_drvdata(pdev, hw); 790 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
757 791 if (!r) {
758 init_completion(&hw->master_done); 792 dev_err(&pdev->dev, "no IRQ\n");
759 793 err = -ENODEV;
760 hw->bitbang.master = hw->master; 794 goto err_no_iores;
761 hw->bitbang.setup_transfer = au1550_spi_setupxfer; 795 }
762 hw->bitbang.chipselect = au1550_spi_chipsel; 796 hw->irq = r->start;
763 hw->bitbang.master->setup = au1550_spi_setup; 797
764 hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs; 798 hw->usedma = 0;
799 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
800 if (r) {
801 hw->dma_tx_id = r->start;
802 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
803 if (r) {
804 hw->dma_rx_id = r->start;
805 if (usedma && ddma_memid) {
806 if (pdev->dev.dma_mask == NULL)
807 dev_warn(&pdev->dev, "no dma mask\n");
808 else
809 hw->usedma = 1;
810 }
811 }
812 }
765 813
766 switch (hw->pdata->bus_num) { 814 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
767 case 0: 815 if (!r) {
768 hw->irq = AU1550_PSC0_INT; 816 dev_err(&pdev->dev, "no mmio resource\n");
769 hw->regs = (volatile psc_spi_t *)PSC0_BASE_ADDR; 817 err = -ENODEV;
770 hw->dma_rx_id = DSCR_CMD0_PSC0_RX; 818 goto err_no_iores;
771 hw->dma_tx_id = DSCR_CMD0_PSC0_TX;
772 break;
773 case 1:
774 hw->irq = AU1550_PSC1_INT;
775 hw->regs = (volatile psc_spi_t *)PSC1_BASE_ADDR;
776 hw->dma_rx_id = DSCR_CMD0_PSC1_RX;
777 hw->dma_tx_id = DSCR_CMD0_PSC1_TX;
778 break;
779 case 2:
780 hw->irq = AU1550_PSC2_INT;
781 hw->regs = (volatile psc_spi_t *)PSC2_BASE_ADDR;
782 hw->dma_rx_id = DSCR_CMD0_PSC2_RX;
783 hw->dma_tx_id = DSCR_CMD0_PSC2_TX;
784 break;
785 case 3:
786 hw->irq = AU1550_PSC3_INT;
787 hw->regs = (volatile psc_spi_t *)PSC3_BASE_ADDR;
788 hw->dma_rx_id = DSCR_CMD0_PSC3_RX;
789 hw->dma_tx_id = DSCR_CMD0_PSC3_TX;
790 break;
791 default:
792 dev_err(&pdev->dev, "Wrong bus_num of SPI\n");
793 err = -ENOENT;
794 goto err_no_pdata;
795 } 819 }
796 820
797 if (request_mem_region((unsigned long)hw->regs, sizeof(psc_spi_t), 821 hw->ioarea = request_mem_region(r->start, sizeof(psc_spi_t),
798 pdev->name) == NULL) { 822 pdev->name);
823 if (!hw->ioarea) {
799 dev_err(&pdev->dev, "Cannot reserve iomem region\n"); 824 dev_err(&pdev->dev, "Cannot reserve iomem region\n");
800 err = -ENXIO; 825 err = -ENXIO;
801 goto err_no_iores; 826 goto err_no_iores;
802 } 827 }
803 828
804 829 hw->regs = (psc_spi_t __iomem *)ioremap(r->start, sizeof(psc_spi_t));
805 if (usedma) { 830 if (!hw->regs) {
806 if (pdev->dev.dma_mask == NULL) 831 dev_err(&pdev->dev, "cannot ioremap\n");
807 dev_warn(&pdev->dev, "no dma mask\n"); 832 err = -ENXIO;
808 else 833 goto err_ioremap;
809 hw->usedma = 1;
810 } 834 }
811 835
812 if (hw->usedma) { 836 platform_set_drvdata(pdev, hw);
813 /* 837
814 * create memory device with 8 bits dev_devwidth 838 init_completion(&hw->master_done);
815 * needed for proper byte ordering to spi fifo 839
816 */ 840 hw->bitbang.master = hw->master;
817 int memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev); 841 hw->bitbang.setup_transfer = au1550_spi_setupxfer;
818 if (!memid) { 842 hw->bitbang.chipselect = au1550_spi_chipsel;
819 dev_err(&pdev->dev, 843 hw->bitbang.master->setup = au1550_spi_setup;
820 "Cannot create dma 8 bit mem device\n"); 844 hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs;
821 err = -ENXIO;
822 goto err_dma_add_dev;
823 }
824 845
825 hw->dma_tx_ch = au1xxx_dbdma_chan_alloc(memid, 846 if (hw->usedma) {
847 hw->dma_tx_ch = au1xxx_dbdma_chan_alloc(ddma_memid,
826 hw->dma_tx_id, NULL, (void *)hw); 848 hw->dma_tx_id, NULL, (void *)hw);
827 if (hw->dma_tx_ch == 0) { 849 if (hw->dma_tx_ch == 0) {
828 dev_err(&pdev->dev, 850 dev_err(&pdev->dev,
@@ -841,7 +863,7 @@ static int __init au1550_spi_probe(struct platform_device *pdev)
841 863
842 864
843 hw->dma_rx_ch = au1xxx_dbdma_chan_alloc(hw->dma_rx_id, 865 hw->dma_rx_ch = au1xxx_dbdma_chan_alloc(hw->dma_rx_id,
844 memid, NULL, (void *)hw); 866 ddma_memid, NULL, (void *)hw);
845 if (hw->dma_rx_ch == 0) { 867 if (hw->dma_rx_ch == 0) {
846 dev_err(&pdev->dev, 868 dev_err(&pdev->dev,
847 "Cannot allocate rx dma channel\n"); 869 "Cannot allocate rx dma channel\n");
@@ -874,7 +896,7 @@ static int __init au1550_spi_probe(struct platform_device *pdev)
874 goto err_no_irq; 896 goto err_no_irq;
875 } 897 }
876 898
877 master->bus_num = hw->pdata->bus_num; 899 master->bus_num = pdev->id;
878 master->num_chipselect = hw->pdata->num_chipselect; 900 master->num_chipselect = hw->pdata->num_chipselect;
879 901
880 /* 902 /*
@@ -924,8 +946,11 @@ err_no_txdma_descr:
924 au1xxx_dbdma_chan_free(hw->dma_tx_ch); 946 au1xxx_dbdma_chan_free(hw->dma_tx_ch);
925 947
926err_no_txdma: 948err_no_txdma:
927err_dma_add_dev: 949 iounmap((void __iomem *)hw->regs);
928 release_mem_region((unsigned long)hw->regs, sizeof(psc_spi_t)); 950
951err_ioremap:
952 release_resource(hw->ioarea);
953 kfree(hw->ioarea);
929 954
930err_no_iores: 955err_no_iores:
931err_no_pdata: 956err_no_pdata:
@@ -944,7 +969,9 @@ static int __exit au1550_spi_remove(struct platform_device *pdev)
944 969
945 spi_bitbang_stop(&hw->bitbang); 970 spi_bitbang_stop(&hw->bitbang);
946 free_irq(hw->irq, hw); 971 free_irq(hw->irq, hw);
947 release_mem_region((unsigned long)hw->regs, sizeof(psc_spi_t)); 972 iounmap((void __iomem *)hw->regs);
973 release_resource(hw->ioarea);
974 kfree(hw->ioarea);
948 975
949 if (hw->usedma) { 976 if (hw->usedma) {
950 au1550_spi_dma_rxtmp_free(hw); 977 au1550_spi_dma_rxtmp_free(hw);
@@ -971,12 +998,24 @@ static struct platform_driver au1550_spi_drv = {
971 998
972static int __init au1550_spi_init(void) 999static int __init au1550_spi_init(void)
973{ 1000{
1001 /*
1002 * create memory device with 8 bits dev_devwidth
1003 * needed for proper byte ordering to spi fifo
1004 */
1005 if (usedma) {
1006 ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev);
1007 if (!ddma_memid)
1008 printk(KERN_ERR "au1550-spi: cannot add memory"
1009 "dbdma device\n");
1010 }
974 return platform_driver_probe(&au1550_spi_drv, au1550_spi_probe); 1011 return platform_driver_probe(&au1550_spi_drv, au1550_spi_probe);
975} 1012}
976module_init(au1550_spi_init); 1013module_init(au1550_spi_init);
977 1014
978static void __exit au1550_spi_exit(void) 1015static void __exit au1550_spi_exit(void)
979{ 1016{
1017 if (usedma && ddma_memid)
1018 au1xxx_ddma_del_device(ddma_memid);
980 platform_driver_unregister(&au1550_spi_drv); 1019 platform_driver_unregister(&au1550_spi_drv);
981} 1020}
982module_exit(au1550_spi_exit); 1021module_exit(au1550_spi_exit);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 1771b2456bfa..ecca4a6a6f94 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -218,6 +218,8 @@ struct spi_device *spi_new_device(struct spi_master *master,
218 if (!spi_master_get(master)) 218 if (!spi_master_get(master))
219 return NULL; 219 return NULL;
220 220
221 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
222
221 proxy = kzalloc(sizeof *proxy, GFP_KERNEL); 223 proxy = kzalloc(sizeof *proxy, GFP_KERNEL);
222 if (!proxy) { 224 if (!proxy) {
223 dev_err(dev, "can't alloc dev for cs%d\n", 225 dev_err(dev, "can't alloc dev for cs%d\n",
@@ -229,7 +231,7 @@ struct spi_device *spi_new_device(struct spi_master *master,
229 proxy->max_speed_hz = chip->max_speed_hz; 231 proxy->max_speed_hz = chip->max_speed_hz;
230 proxy->mode = chip->mode; 232 proxy->mode = chip->mode;
231 proxy->irq = chip->irq; 233 proxy->irq = chip->irq;
232 proxy->modalias = chip->modalias; 234 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
233 235
234 snprintf(proxy->dev.bus_id, sizeof proxy->dev.bus_id, 236 snprintf(proxy->dev.bus_id, sizeof proxy->dev.bus_id,
235 "%s.%u", master->dev.bus_id, 237 "%s.%u", master->dev.bus_id,
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c
index 6832da6f7109..070c6219e2d6 100644
--- a/drivers/spi/spi_mpc83xx.c
+++ b/drivers/spi/spi_mpc83xx.c
@@ -266,21 +266,24 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
266 266
267 cs->hw_mode |= SPMODE_LEN(bits_per_word); 267 cs->hw_mode |= SPMODE_LEN(bits_per_word);
268 268
269 if ((mpc83xx_spi->spibrg / hz) >= 64) { 269 if ((mpc83xx_spi->spibrg / hz) > 64) {
270 pm = mpc83xx_spi->spibrg / (hz * 64) - 1; 270 pm = mpc83xx_spi->spibrg / (hz * 64);
271 if (pm > 0x0f) { 271 if (pm > 16) {
272 dev_err(&spi->dev, "Requested speed is too " 272 cs->hw_mode |= SPMODE_DIV16;
273 "low: %d Hz. Will use %d Hz instead.\n", 273 pm /= 16;
274 hz, mpc83xx_spi->spibrg / 1024); 274 if (pm > 16) {
275 pm = 0x0f; 275 dev_err(&spi->dev, "Requested speed is too "
276 "low: %d Hz. Will use %d Hz instead.\n",
277 hz, mpc83xx_spi->spibrg / 1024);
278 pm = 16;
279 }
276 } 280 }
277 cs->hw_mode |= SPMODE_PM(pm) | SPMODE_DIV16; 281 } else
278 } else {
279 pm = mpc83xx_spi->spibrg / (hz * 4); 282 pm = mpc83xx_spi->spibrg / (hz * 4);
280 if (pm) 283 if (pm)
281 pm--; 284 pm--;
282 cs->hw_mode |= SPMODE_PM(pm); 285
283 } 286 cs->hw_mode |= SPMODE_PM(pm);
284 regval = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode); 287 regval = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode);
285 if (cs->hw_mode != regval) { 288 if (cs->hw_mode != regval) {
286 unsigned long flags; 289 unsigned long flags;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 2833fd772a24..e5e0cfed5e3b 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -228,7 +228,6 @@ static int spidev_message(struct spidev_data *spidev,
228 * We walk the array of user-provided transfers, using each one 228 * We walk the array of user-provided transfers, using each one
229 * to initialize a kernel version of the same transfer. 229 * to initialize a kernel version of the same transfer.
230 */ 230 */
231 mutex_lock(&spidev->buf_lock);
232 buf = spidev->buffer; 231 buf = spidev->buffer;
233 total = 0; 232 total = 0;
234 for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; 233 for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
@@ -296,14 +295,12 @@ static int spidev_message(struct spidev_data *spidev,
296 status = total; 295 status = total;
297 296
298done: 297done:
299 mutex_unlock(&spidev->buf_lock);
300 kfree(k_xfers); 298 kfree(k_xfers);
301 return status; 299 return status;
302} 300}
303 301
304static int 302static long
305spidev_ioctl(struct inode *inode, struct file *filp, 303spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
306 unsigned int cmd, unsigned long arg)
307{ 304{
308 int err = 0; 305 int err = 0;
309 int retval = 0; 306 int retval = 0;
@@ -341,6 +338,14 @@ spidev_ioctl(struct inode *inode, struct file *filp,
341 if (spi == NULL) 338 if (spi == NULL)
342 return -ESHUTDOWN; 339 return -ESHUTDOWN;
343 340
341 /* use the buffer lock here for triple duty:
342 * - prevent I/O (from us) so calling spi_setup() is safe;
343 * - prevent concurrent SPI_IOC_WR_* from morphing
344 * data fields while SPI_IOC_RD_* reads them;
345 * - SPI_IOC_MESSAGE needs the buffer locked "normally".
346 */
347 mutex_lock(&spidev->buf_lock);
348
344 switch (cmd) { 349 switch (cmd) {
345 /* read requests */ 350 /* read requests */
346 case SPI_IOC_RD_MODE: 351 case SPI_IOC_RD_MODE:
@@ -456,6 +461,8 @@ spidev_ioctl(struct inode *inode, struct file *filp,
456 kfree(ioc); 461 kfree(ioc);
457 break; 462 break;
458 } 463 }
464
465 mutex_unlock(&spidev->buf_lock);
459 spi_dev_put(spi); 466 spi_dev_put(spi);
460 return retval; 467 return retval;
461} 468}
@@ -533,7 +540,7 @@ static struct file_operations spidev_fops = {
533 */ 540 */
534 .write = spidev_write, 541 .write = spidev_write,
535 .read = spidev_read, 542 .read = spidev_read,
536 .ioctl = spidev_ioctl, 543 .unlocked_ioctl = spidev_ioctl,
537 .open = spidev_open, 544 .open = spidev_open,
538 .release = spidev_release, 545 .release = spidev_release,
539}; 546};
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 113a0468ffcb..68d6f4988fb5 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -353,11 +353,12 @@ static int __init xilinx_spi_probe(struct platform_device *dev)
353 goto put_master; 353 goto put_master;
354 } 354 }
355 355
356 xspi->irq = platform_get_irq(dev, 0); 356 ret = platform_get_irq(dev, 0);
357 if (xspi->irq < 0) { 357 if (ret < 0) {
358 ret = -ENXIO; 358 ret = -ENXIO;
359 goto unmap_io; 359 goto unmap_io;
360 } 360 }
361 xspi->irq = ret;
361 362
362 master->bus_num = pdata->bus_num; 363 master->bus_num = pdata->bus_num;
363 master->num_chipselect = pdata->num_chipselect; 364 master->num_chipselect = pdata->num_chipselect;
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index 49cd9793404f..ec7aeb502d15 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -6095,15 +6095,15 @@ static int capabilities_check(IXJ *j, struct phone_capability *pcreq)
6095 return retval; 6095 return retval;
6096} 6096}
6097 6097
6098static int ixj_ioctl(struct inode *inode, struct file *file_p, unsigned int cmd, unsigned long arg) 6098static long do_ixj_ioctl(struct file *file_p, unsigned int cmd, unsigned long arg)
6099{ 6099{
6100 IXJ_TONE ti; 6100 IXJ_TONE ti;
6101 IXJ_FILTER jf; 6101 IXJ_FILTER jf;
6102 IXJ_FILTER_RAW jfr; 6102 IXJ_FILTER_RAW jfr;
6103 void __user *argp = (void __user *)arg; 6103 void __user *argp = (void __user *)arg;
6104 6104 struct inode *inode = file_p->f_path.dentry->d_inode;
6105 unsigned int raise, mant;
6106 unsigned int minor = iminor(inode); 6105 unsigned int minor = iminor(inode);
6106 unsigned int raise, mant;
6107 int board = NUM(inode); 6107 int board = NUM(inode);
6108 6108
6109 IXJ *j = get_ixj(NUM(inode)); 6109 IXJ *j = get_ixj(NUM(inode));
@@ -6661,6 +6661,15 @@ static int ixj_ioctl(struct inode *inode, struct file *file_p, unsigned int cmd,
6661 return retval; 6661 return retval;
6662} 6662}
6663 6663
6664static long ixj_ioctl(struct file *file_p, unsigned int cmd, unsigned long arg)
6665{
6666 long ret;
6667 lock_kernel();
6668 ret = do_ixj_ioctl(file_p, cmd, arg);
6669 unlock_kernel();
6670 return ret;
6671}
6672
6664static int ixj_fasync(int fd, struct file *file_p, int mode) 6673static int ixj_fasync(int fd, struct file *file_p, int mode)
6665{ 6674{
6666 IXJ *j = get_ixj(NUM(file_p->f_path.dentry->d_inode)); 6675 IXJ *j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
@@ -6674,7 +6683,7 @@ static const struct file_operations ixj_fops =
6674 .read = ixj_enhanced_read, 6683 .read = ixj_enhanced_read,
6675 .write = ixj_enhanced_write, 6684 .write = ixj_enhanced_write,
6676 .poll = ixj_poll, 6685 .poll = ixj_poll,
6677 .ioctl = ixj_ioctl, 6686 .unlocked_ioctl = ixj_ioctl,
6678 .release = ixj_release, 6687 .release = ixj_release,
6679 .fasync = ixj_fasync 6688 .fasync = ixj_fasync
6680}; 6689};
diff --git a/drivers/usb/gadget/at91_udc.h b/drivers/usb/gadget/at91_udc.h
index a973f2a50fb9..c65d62295890 100644
--- a/drivers/usb/gadget/at91_udc.h
+++ b/drivers/usb/gadget/at91_udc.h
@@ -171,7 +171,7 @@ struct at91_request {
171#endif 171#endif
172 172
173#define ERR(stuff...) pr_err("udc: " stuff) 173#define ERR(stuff...) pr_err("udc: " stuff)
174#define WARN(stuff...) pr_warning("udc: " stuff) 174#define WARNING(stuff...) pr_warning("udc: " stuff)
175#define INFO(stuff...) pr_info("udc: " stuff) 175#define INFO(stuff...) pr_info("udc: " stuff)
176#define DBG(stuff...) pr_debug("udc: " stuff) 176#define DBG(stuff...) pr_debug("udc: " stuff)
177 177
diff --git a/drivers/usb/gadget/cdc2.c b/drivers/usb/gadget/cdc2.c
index d490d0289507..a39a4b940c33 100644
--- a/drivers/usb/gadget/cdc2.c
+++ b/drivers/usb/gadget/cdc2.c
@@ -170,7 +170,7 @@ static int __init cdc_bind(struct usb_composite_dev *cdev)
170 * but if the controller isn't recognized at all then 170 * but if the controller isn't recognized at all then
171 * that assumption is a bit more likely to be wrong. 171 * that assumption is a bit more likely to be wrong.
172 */ 172 */
173 WARN(cdev, "controller '%s' not recognized; trying %s\n", 173 WARNING(cdev, "controller '%s' not recognized; trying %s\n",
174 gadget->name, 174 gadget->name,
175 cdc_config_driver.label); 175 cdc_config_driver.label);
176 device_desc.bcdDevice = 176 device_desc.bcdDevice =
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index d7aaaa29b1e1..bcac2e68660d 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -293,7 +293,7 @@ static int __init eth_bind(struct usb_composite_dev *cdev)
293 * but if the controller isn't recognized at all then 293 * but if the controller isn't recognized at all then
294 * that assumption is a bit more likely to be wrong. 294 * that assumption is a bit more likely to be wrong.
295 */ 295 */
296 WARN(cdev, "controller '%s' not recognized; trying %s\n", 296 WARNING(cdev, "controller '%s' not recognized; trying %s\n",
297 gadget->name, 297 gadget->name,
298 eth_config_driver.label); 298 eth_config_driver.label);
299 device_desc.bcdDevice = 299 device_desc.bcdDevice =
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 15c24edbb61a..ea2c31d18080 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -308,7 +308,7 @@ MODULE_LICENSE("Dual BSD/GPL");
308 dev_vdbg(&(d)->gadget->dev , fmt , ## args) 308 dev_vdbg(&(d)->gadget->dev , fmt , ## args)
309#define ERROR(d, fmt, args...) \ 309#define ERROR(d, fmt, args...) \
310 dev_err(&(d)->gadget->dev , fmt , ## args) 310 dev_err(&(d)->gadget->dev , fmt , ## args)
311#define WARN(d, fmt, args...) \ 311#define WARNING(d, fmt, args...) \
312 dev_warn(&(d)->gadget->dev , fmt , ## args) 312 dev_warn(&(d)->gadget->dev , fmt , ## args)
313#define INFO(d, fmt, args...) \ 313#define INFO(d, fmt, args...) \
314 dev_info(&(d)->gadget->dev , fmt , ## args) 314 dev_info(&(d)->gadget->dev , fmt , ## args)
@@ -1091,7 +1091,7 @@ static int ep0_queue(struct fsg_dev *fsg)
1091 if (rc != 0 && rc != -ESHUTDOWN) { 1091 if (rc != 0 && rc != -ESHUTDOWN) {
1092 1092
1093 /* We can't do much more than wait for a reset */ 1093 /* We can't do much more than wait for a reset */
1094 WARN(fsg, "error in submission: %s --> %d\n", 1094 WARNING(fsg, "error in submission: %s --> %d\n",
1095 fsg->ep0->name, rc); 1095 fsg->ep0->name, rc);
1096 } 1096 }
1097 return rc; 1097 return rc;
@@ -1227,7 +1227,7 @@ static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1227 1227
1228 /* Save the command for later */ 1228 /* Save the command for later */
1229 if (fsg->cbbuf_cmnd_size) 1229 if (fsg->cbbuf_cmnd_size)
1230 WARN(fsg, "CB[I] overwriting previous command\n"); 1230 WARNING(fsg, "CB[I] overwriting previous command\n");
1231 fsg->cbbuf_cmnd_size = req->actual; 1231 fsg->cbbuf_cmnd_size = req->actual;
1232 memcpy(fsg->cbbuf_cmnd, req->buf, fsg->cbbuf_cmnd_size); 1232 memcpy(fsg->cbbuf_cmnd, req->buf, fsg->cbbuf_cmnd_size);
1233 1233
@@ -1506,7 +1506,7 @@ static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
1506 * submissions if DMA is enabled. */ 1506 * submissions if DMA is enabled. */
1507 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP && 1507 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
1508 req->length == 0)) 1508 req->length == 0))
1509 WARN(fsg, "error in submission: %s --> %d\n", 1509 WARNING(fsg, "error in submission: %s --> %d\n",
1510 ep->name, rc); 1510 ep->name, rc);
1511 } 1511 }
1512} 1512}
@@ -2294,7 +2294,7 @@ static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
2294 VDBG(fsg, "delayed bulk-in endpoint halt\n"); 2294 VDBG(fsg, "delayed bulk-in endpoint halt\n");
2295 while (rc != 0) { 2295 while (rc != 0) {
2296 if (rc != -EAGAIN) { 2296 if (rc != -EAGAIN) {
2297 WARN(fsg, "usb_ep_set_halt -> %d\n", rc); 2297 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
2298 rc = 0; 2298 rc = 0;
2299 break; 2299 break;
2300 } 2300 }
@@ -2317,7 +2317,7 @@ static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
2317 VDBG(fsg, "delayed bulk-in endpoint wedge\n"); 2317 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
2318 while (rc != 0) { 2318 while (rc != 0) {
2319 if (rc != -EAGAIN) { 2319 if (rc != -EAGAIN) {
2320 WARN(fsg, "usb_ep_set_wedge -> %d\n", rc); 2320 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
2321 rc = 0; 2321 rc = 0;
2322 break; 2322 break;
2323 } 2323 }
@@ -3755,7 +3755,7 @@ static int __init check_parameters(struct fsg_dev *fsg)
3755 if (gcnum >= 0) 3755 if (gcnum >= 0)
3756 mod_data.release = 0x0300 + gcnum; 3756 mod_data.release = 0x0300 + gcnum;
3757 else { 3757 else {
3758 WARN(fsg, "controller '%s' not recognized\n", 3758 WARNING(fsg, "controller '%s' not recognized\n",
3759 fsg->gadget->name); 3759 fsg->gadget->name);
3760 mod_data.release = 0x0399; 3760 mod_data.release = 0x0399;
3761 } 3761 }
diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_usb2_udc.c
index 1695382f30fe..1cfccf102a2d 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.c
+++ b/drivers/usb/gadget/fsl_usb2_udc.c
@@ -1538,7 +1538,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
1538 1538
1539 /* If the ep is configured */ 1539 /* If the ep is configured */
1540 if (curr_ep->name == NULL) { 1540 if (curr_ep->name == NULL) {
1541 WARN("Invalid EP?"); 1541 WARNING("Invalid EP?");
1542 continue; 1542 continue;
1543 } 1543 }
1544 1544
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 98b1483ef6a5..6131752a38bc 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -552,7 +552,7 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
552#endif 552#endif
553 553
554#define ERR(stuff...) pr_err("udc: " stuff) 554#define ERR(stuff...) pr_err("udc: " stuff)
555#define WARN(stuff...) pr_warning("udc: " stuff) 555#define WARNING(stuff...) pr_warning("udc: " stuff)
556#define INFO(stuff...) pr_info("udc: " stuff) 556#define INFO(stuff...) pr_info("udc: " stuff)
557 557
558/*-------------------------------------------------------------------------*/ 558/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 7f4d4828e3aa..ea8651e3da1a 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -138,8 +138,6 @@ static void gmidi_transmit(struct gmidi_device* dev, struct usb_request* req);
138 dev_vdbg(&(d)->gadget->dev , fmt , ## args) 138 dev_vdbg(&(d)->gadget->dev , fmt , ## args)
139#define ERROR(d, fmt, args...) \ 139#define ERROR(d, fmt, args...) \
140 dev_err(&(d)->gadget->dev , fmt , ## args) 140 dev_err(&(d)->gadget->dev , fmt , ## args)
141#define WARN(d, fmt, args...) \
142 dev_warn(&(d)->gadget->dev , fmt , ## args)
143#define INFO(d, fmt, args...) \ 141#define INFO(d, fmt, args...) \
144 dev_info(&(d)->gadget->dev , fmt , ## args) 142 dev_info(&(d)->gadget->dev , fmt , ## args)
145 143
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 48f1c63b7013..60aa04847b18 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1768,7 +1768,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1768 * usb_gadget_driver_{register,unregister}() must change. 1768 * usb_gadget_driver_{register,unregister}() must change.
1769 */ 1769 */
1770 if (the_controller) { 1770 if (the_controller) {
1771 WARN(dev, "ignoring %s\n", pci_name(pdev)); 1771 WARNING(dev, "ignoring %s\n", pci_name(pdev));
1772 return -EBUSY; 1772 return -EBUSY;
1773 } 1773 }
1774 if (!pdev->irq) { 1774 if (!pdev->irq) {
diff --git a/drivers/usb/gadget/goku_udc.h b/drivers/usb/gadget/goku_udc.h
index bc4eb1e0b507..566cb2319056 100644
--- a/drivers/usb/gadget/goku_udc.h
+++ b/drivers/usb/gadget/goku_udc.h
@@ -285,7 +285,7 @@ struct goku_udc {
285 285
286#define ERROR(dev,fmt,args...) \ 286#define ERROR(dev,fmt,args...) \
287 xprintk(dev , KERN_ERR , fmt , ## args) 287 xprintk(dev , KERN_ERR , fmt , ## args)
288#define WARN(dev,fmt,args...) \ 288#define WARNING(dev,fmt,args...) \
289 xprintk(dev , KERN_WARNING , fmt , ## args) 289 xprintk(dev , KERN_WARNING , fmt , ## args)
290#define INFO(dev,fmt,args...) \ 290#define INFO(dev,fmt,args...) \
291 xprintk(dev , KERN_INFO , fmt , ## args) 291 xprintk(dev , KERN_INFO , fmt , ## args)
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 04692d59fc1c..f4585d3e90d7 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -262,8 +262,6 @@ static const char *CHIP;
262 262
263#define ERROR(dev,fmt,args...) \ 263#define ERROR(dev,fmt,args...) \
264 xprintk(dev , KERN_ERR , fmt , ## args) 264 xprintk(dev , KERN_ERR , fmt , ## args)
265#define WARN(dev,fmt,args...) \
266 xprintk(dev , KERN_WARNING , fmt , ## args)
267#define INFO(dev,fmt,args...) \ 265#define INFO(dev,fmt,args...) \
268 xprintk(dev , KERN_INFO , fmt , ## args) 266 xprintk(dev , KERN_INFO , fmt , ## args)
269 267
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index b67ab677af72..5cfb5ebf3881 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -1007,7 +1007,7 @@ static void scan_dma_completions (struct net2280_ep *ep)
1007 * 0122, and 0124; not all cases trigger the warning. 1007 * 0122, and 0124; not all cases trigger the warning.
1008 */ 1008 */
1009 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) { 1009 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
1010 WARN (ep->dev, "%s lost packet sync!\n", 1010 WARNING (ep->dev, "%s lost packet sync!\n",
1011 ep->ep.name); 1011 ep->ep.name);
1012 req->req.status = -EOVERFLOW; 1012 req->req.status = -EOVERFLOW;
1013 } else if ((tmp = readl (&ep->regs->ep_avail)) != 0) { 1013 } else if ((tmp = readl (&ep->regs->ep_avail)) != 0) {
diff --git a/drivers/usb/gadget/net2280.h b/drivers/usb/gadget/net2280.h
index 1f2af398a9a4..81a71dbdc2c6 100644
--- a/drivers/usb/gadget/net2280.h
+++ b/drivers/usb/gadget/net2280.h
@@ -272,7 +272,7 @@ static inline void net2280_led_shutdown (struct net2280 *dev)
272 272
273#define ERROR(dev,fmt,args...) \ 273#define ERROR(dev,fmt,args...) \
274 xprintk(dev , KERN_ERR , fmt , ## args) 274 xprintk(dev , KERN_ERR , fmt , ## args)
275#define WARN(dev,fmt,args...) \ 275#define WARNING(dev,fmt,args...) \
276 xprintk(dev , KERN_WARNING , fmt , ## args) 276 xprintk(dev , KERN_WARNING , fmt , ## args)
277#define INFO(dev,fmt,args...) \ 277#define INFO(dev,fmt,args...) \
278 xprintk(dev , KERN_INFO , fmt , ## args) 278 xprintk(dev , KERN_INFO , fmt , ## args)
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 4b79a8509e84..395bd1844482 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -1120,7 +1120,7 @@ static int omap_ep_set_halt(struct usb_ep *_ep, int value)
1120 status = -EINVAL; 1120 status = -EINVAL;
1121 else if (value) { 1121 else if (value) {
1122 if (ep->udc->ep0_set_config) { 1122 if (ep->udc->ep0_set_config) {
1123 WARN("error changing config?\n"); 1123 WARNING("error changing config?\n");
1124 omap_writew(UDC_CLR_CFG, UDC_SYSCON2); 1124 omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
1125 } 1125 }
1126 omap_writew(UDC_STALL_CMD, UDC_SYSCON2); 1126 omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
@@ -1764,7 +1764,7 @@ do_stall:
1764 u.r.bRequestType, u.r.bRequest, status); 1764 u.r.bRequestType, u.r.bRequest, status);
1765 if (udc->ep0_set_config) { 1765 if (udc->ep0_set_config) {
1766 if (udc->ep0_reset_config) 1766 if (udc->ep0_reset_config)
1767 WARN("error resetting config?\n"); 1767 WARNING("error resetting config?\n");
1768 else 1768 else
1769 omap_writew(UDC_CLR_CFG, UDC_SYSCON2); 1769 omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
1770 } 1770 }
@@ -3076,7 +3076,7 @@ static int omap_udc_suspend(struct platform_device *dev, pm_message_t message)
3076 * which would prevent entry to deep sleep... 3076 * which would prevent entry to deep sleep...
3077 */ 3077 */
3078 if ((devstat & UDC_ATT) != 0 && (devstat & UDC_SUS) == 0) { 3078 if ((devstat & UDC_ATT) != 0 && (devstat & UDC_SUS) == 0) {
3079 WARN("session active; suspend requires disconnect\n"); 3079 WARNING("session active; suspend requires disconnect\n");
3080 omap_pullup(&udc->gadget, 0); 3080 omap_pullup(&udc->gadget, 0);
3081 } 3081 }
3082 3082
diff --git a/drivers/usb/gadget/omap_udc.h b/drivers/usb/gadget/omap_udc.h
index 8522bbb12278..29edc51b6b22 100644
--- a/drivers/usb/gadget/omap_udc.h
+++ b/drivers/usb/gadget/omap_udc.h
@@ -188,7 +188,7 @@ struct omap_udc {
188#endif 188#endif
189 189
190#define ERR(stuff...) pr_err("udc: " stuff) 190#define ERR(stuff...) pr_err("udc: " stuff)
191#define WARN(stuff...) pr_warning("udc: " stuff) 191#define WARNING(stuff...) pr_warning("udc: " stuff)
192#define INFO(stuff...) pr_info("udc: " stuff) 192#define INFO(stuff...) pr_info("udc: " stuff)
193#define DBG(stuff...) pr_debug("udc: " stuff) 193#define DBG(stuff...) pr_debug("udc: " stuff)
194 194
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 49cd9e145a9b..e0090085b78e 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -179,7 +179,7 @@ module_param(qlen, uint, S_IRUGO|S_IWUSR);
179 179
180#define ERROR(dev, fmt, args...) \ 180#define ERROR(dev, fmt, args...) \
181 xprintk(dev, KERN_ERR, fmt, ## args) 181 xprintk(dev, KERN_ERR, fmt, ## args)
182#define WARN(dev, fmt, args...) \ 182#define WARNING(dev, fmt, args...) \
183 xprintk(dev, KERN_WARNING, fmt, ## args) 183 xprintk(dev, KERN_WARNING, fmt, ## args)
184#define INFO(dev, fmt, args...) \ 184#define INFO(dev, fmt, args...) \
185 xprintk(dev, KERN_INFO, fmt, ## args) 185 xprintk(dev, KERN_INFO, fmt, ## args)
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index fbd6289977c8..7e6725d89976 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -152,9 +152,10 @@ static int is_vbus_present(void)
152static void pullup_off(void) 152static void pullup_off(void)
153{ 153{
154 struct pxa2xx_udc_mach_info *mach = the_controller->mach; 154 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
155 int off_level = mach->gpio_pullup_inverted;
155 156
156 if (mach->gpio_pullup) 157 if (mach->gpio_pullup)
157 gpio_set_value(mach->gpio_pullup, 0); 158 gpio_set_value(mach->gpio_pullup, off_level);
158 else if (mach->udc_command) 159 else if (mach->udc_command)
159 mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT); 160 mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
160} 161}
@@ -162,9 +163,10 @@ static void pullup_off(void)
162static void pullup_on(void) 163static void pullup_on(void)
163{ 164{
164 struct pxa2xx_udc_mach_info *mach = the_controller->mach; 165 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
166 int on_level = !mach->gpio_pullup_inverted;
165 167
166 if (mach->gpio_pullup) 168 if (mach->gpio_pullup)
167 gpio_set_value(mach->gpio_pullup, 1); 169 gpio_set_value(mach->gpio_pullup, on_level);
168 else if (mach->udc_command) 170 else if (mach->udc_command)
169 mach->udc_command(PXA2XX_UDC_CMD_CONNECT); 171 mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
170} 172}
@@ -340,7 +342,7 @@ pxa25x_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
340 struct pxa25x_request *req; 342 struct pxa25x_request *req;
341 343
342 req = container_of (_req, struct pxa25x_request, req); 344 req = container_of (_req, struct pxa25x_request, req);
343 WARN_ON (!list_empty (&req->queue)); 345 WARN_ON(!list_empty (&req->queue));
344 kfree(req); 346 kfree(req);
345} 347}
346 348
@@ -1554,7 +1556,7 @@ config_change:
1554 * tell us about config change events, 1556 * tell us about config change events,
1555 * so later ones may fail... 1557 * so later ones may fail...
1556 */ 1558 */
1557 WARN("config change %02x fail %d?\n", 1559 WARNING("config change %02x fail %d?\n",
1558 u.r.bRequest, i); 1560 u.r.bRequest, i);
1559 return; 1561 return;
1560 /* TODO experiment: if has_cfr, 1562 /* TODO experiment: if has_cfr,
@@ -2328,7 +2330,7 @@ static int pxa25x_udc_suspend(struct platform_device *dev, pm_message_t state)
2328 unsigned long flags; 2330 unsigned long flags;
2329 2331
2330 if (!udc->mach->gpio_pullup && !udc->mach->udc_command) 2332 if (!udc->mach->gpio_pullup && !udc->mach->udc_command)
2331 WARN("USB host won't detect disconnect!\n"); 2333 WARNING("USB host won't detect disconnect!\n");
2332 udc->suspended = 1; 2334 udc->suspended = 1;
2333 2335
2334 local_irq_save(flags); 2336 local_irq_save(flags);
diff --git a/drivers/usb/gadget/pxa25x_udc.h b/drivers/usb/gadget/pxa25x_udc.h
index 4d11ece7c95f..c8a13215e02c 100644
--- a/drivers/usb/gadget/pxa25x_udc.h
+++ b/drivers/usb/gadget/pxa25x_udc.h
@@ -259,7 +259,7 @@ dump_state(struct pxa25x_udc *dev)
259#define DBG(lvl, stuff...) do{if ((lvl) <= UDC_DEBUG) DMSG(stuff);}while(0) 259#define DBG(lvl, stuff...) do{if ((lvl) <= UDC_DEBUG) DMSG(stuff);}while(0)
260 260
261#define ERR(stuff...) pr_err("udc: " stuff) 261#define ERR(stuff...) pr_err("udc: " stuff)
262#define WARN(stuff...) pr_warning("udc: " stuff) 262#define WARNING(stuff...) pr_warning("udc: " stuff)
263#define INFO(stuff...) pr_info("udc: " stuff) 263#define INFO(stuff...) pr_info("udc: " stuff)
264 264
265 265
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 5458f43a8668..3791e6271903 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -116,7 +116,6 @@ static inline int qlen(struct usb_gadget *gadget)
116#undef DBG 116#undef DBG
117#undef VDBG 117#undef VDBG
118#undef ERROR 118#undef ERROR
119#undef WARN
120#undef INFO 119#undef INFO
121 120
122#define xprintk(d, level, fmt, args...) \ 121#define xprintk(d, level, fmt, args...) \
@@ -140,8 +139,6 @@ static inline int qlen(struct usb_gadget *gadget)
140 139
141#define ERROR(dev, fmt, args...) \ 140#define ERROR(dev, fmt, args...) \
142 xprintk(dev , KERN_ERR , fmt , ## args) 141 xprintk(dev , KERN_ERR , fmt , ## args)
143#define WARN(dev, fmt, args...) \
144 xprintk(dev , KERN_WARNING , fmt , ## args)
145#define INFO(dev, fmt, args...) \ 142#define INFO(dev, fmt, args...) \
146 xprintk(dev , KERN_INFO , fmt , ## args) 143 xprintk(dev , KERN_INFO , fmt , ## args)
147 144
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 31178e10cbbe..ce1ca0ba0515 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -882,7 +882,7 @@ static void isp116x_endpoint_disable(struct usb_hcd *hcd,
882 for (i = 0; i < 100 && !list_empty(&hep->urb_list); i++) 882 for (i = 0; i < 100 && !list_empty(&hep->urb_list); i++)
883 msleep(3); 883 msleep(3);
884 if (!list_empty(&hep->urb_list)) 884 if (!list_empty(&hep->urb_list))
885 WARN("ep %p not empty?\n", ep); 885 WARNING("ep %p not empty?\n", ep);
886 886
887 kfree(ep); 887 kfree(ep);
888 hep->hcpriv = NULL; 888 hep->hcpriv = NULL;
diff --git a/drivers/usb/host/isp116x.h b/drivers/usb/host/isp116x.h
index 595b90a99848..aa211bafcff9 100644
--- a/drivers/usb/host/isp116x.h
+++ b/drivers/usb/host/isp116x.h
@@ -338,7 +338,7 @@ struct isp116x_ep {
338#endif 338#endif
339 339
340#define ERR(stuff...) printk(KERN_ERR "116x: " stuff) 340#define ERR(stuff...) printk(KERN_ERR "116x: " stuff)
341#define WARN(stuff...) printk(KERN_WARNING "116x: " stuff) 341#define WARNING(stuff...) printk(KERN_WARNING "116x: " stuff)
342#define INFO(stuff...) printk(KERN_INFO "116x: " stuff) 342#define INFO(stuff...) printk(KERN_INFO "116x: " stuff)
343 343
344/* ------------------------------------------------- */ 344/* ------------------------------------------------- */
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 340d72da554a..8a74bbb57d08 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1026,7 +1026,7 @@ sl811h_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1026 if (!list_empty(&hep->urb_list)) 1026 if (!list_empty(&hep->urb_list))
1027 msleep(3); 1027 msleep(3);
1028 if (!list_empty(&hep->urb_list)) 1028 if (!list_empty(&hep->urb_list))
1029 WARN("ep %p not empty?\n", ep); 1029 WARNING("ep %p not empty?\n", ep);
1030 1030
1031 kfree(ep); 1031 kfree(ep);
1032 hep->hcpriv = NULL; 1032 hep->hcpriv = NULL;
diff --git a/drivers/usb/host/sl811.h b/drivers/usb/host/sl811.h
index 7690d98e42a7..b6b8c1f233dd 100644
--- a/drivers/usb/host/sl811.h
+++ b/drivers/usb/host/sl811.h
@@ -261,6 +261,6 @@ sl811_read_buf(struct sl811 *sl811, int addr, void *buf, size_t count)
261#endif 261#endif
262 262
263#define ERR(stuff...) printk(KERN_ERR "sl811: " stuff) 263#define ERR(stuff...) printk(KERN_ERR "sl811: " stuff)
264#define WARN(stuff...) printk(KERN_WARNING "sl811: " stuff) 264#define WARNING(stuff...) printk(KERN_WARNING "sl811: " stuff)
265#define INFO(stuff...) printk(KERN_INFO "sl811: " stuff) 265#define INFO(stuff...) printk(KERN_INFO "sl811: " stuff)
266 266
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 054dedd28127..b358c4e1cf21 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -81,7 +81,7 @@ static struct usb_device *testdev_to_usbdev (struct usbtest_dev *test)
81 81
82#define ERROR(tdev, fmt, args...) \ 82#define ERROR(tdev, fmt, args...) \
83 dev_err(&(tdev)->intf->dev , fmt , ## args) 83 dev_err(&(tdev)->intf->dev , fmt , ## args)
84#define WARN(tdev, fmt, args...) \ 84#define WARNING(tdev, fmt, args...) \
85 dev_warn(&(tdev)->intf->dev , fmt , ## args) 85 dev_warn(&(tdev)->intf->dev , fmt , ## args)
86 86
87/*-------------------------------------------------------------------------*/ 87/*-------------------------------------------------------------------------*/
@@ -1946,7 +1946,7 @@ usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id)
1946 1946
1947 status = get_endpoints (dev, intf); 1947 status = get_endpoints (dev, intf);
1948 if (status < 0) { 1948 if (status < 0) {
1949 WARN(dev, "couldn't get endpoints, %d\n", 1949 WARNING(dev, "couldn't get endpoints, %d\n",
1950 status); 1950 status);
1951 return status; 1951 return status;
1952 } 1952 }
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 9b887ef64ff1..70d135e0cc47 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1658,6 +1658,32 @@ config FB_PM3
1658 similar boards, 3DLabs Permedia3 Create!, Appian Jeronimo 2000 1658 similar boards, 3DLabs Permedia3 Create!, Appian Jeronimo 2000
1659 and maybe other boards. 1659 and maybe other boards.
1660 1660
1661config FB_CARMINE
1662 tristate "Fujitsu carmine frame buffer support"
1663 depends on FB && PCI
1664 select FB_CFB_FILLRECT
1665 select FB_CFB_COPYAREA
1666 select FB_CFB_IMAGEBLIT
1667 help
1668 This is the frame buffer device driver for the Fujitsu Carmine chip.
1669 The driver provides two independent frame buffer devices.
1670
1671choice
1672 depends on FB_CARMINE
1673 prompt "DRAM timing"
1674 default FB_CARMINE_DRAM_EVAL
1675
1676config FB_CARMINE_DRAM_EVAL
1677 bool "Eval board timings"
1678 help
1679 Use timings which work on the eval card.
1680
1681config CARMINE_DRAM_CUSTOM
1682 bool "Custom board timings"
1683 help
1684 Use custom board timings.
1685endchoice
1686
1661config FB_AU1100 1687config FB_AU1100
1662 bool "Au1100 LCD Driver" 1688 bool "Au1100 LCD Driver"
1663 depends on (FB = y) && MIPS && SOC_AU1100 1689 depends on (FB = y) && MIPS && SOC_AU1100
@@ -1840,6 +1866,16 @@ config FB_W100
1840 1866
1841 If unsure, say N. 1867 If unsure, say N.
1842 1868
1869config FB_SH_MOBILE_LCDC
1870 tristate "SuperH Mobile LCDC framebuffer support"
1871 depends on FB && SUPERH
1872 select FB_CFB_FILLRECT
1873 select FB_CFB_COPYAREA
1874 select FB_CFB_IMAGEBLIT
1875 default m
1876 ---help---
1877 Frame buffer driver for the on-chip SH-Mobile LCD controller.
1878
1843config FB_S3C2410 1879config FB_S3C2410
1844 tristate "S3C2410 LCD framebuffer support" 1880 tristate "S3C2410 LCD framebuffer support"
1845 depends on FB && ARCH_S3C2410 1881 depends on FB && ARCH_S3C2410
@@ -1951,6 +1987,23 @@ config FB_AM200EPD
1951 This enables support for the Metronome display controller used on 1987 This enables support for the Metronome display controller used on
1952 the E-Ink AM-200 EPD devkit. 1988 the E-Ink AM-200 EPD devkit.
1953 1989
1990config FB_COBALT
1991 tristate "Cobalt server LCD frame buffer support"
1992 depends on FB && MIPS_COBALT
1993
1994config FB_SH7760
1995 bool "SH7760/SH7763 LCDC support"
1996 depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763)
1997 select FB_CFB_FILLRECT
1998 select FB_CFB_COPYAREA
1999 select FB_CFB_IMAGEBLIT
2000 help
2001 Support for the SH7760/SH7763 integrated (D)STN/TFT LCD Controller.
2002 Supports display resolutions up to 1024x1024 pixel, grayscale and
2003 color operation, with depths ranging from 1 bpp to 8 bpp monochrome
2004 and 8, 15 or 16 bpp color; 90 degrees clockwise display rotation for
2005 panels <= 320 pixel horizontal resolution.
2006
1954config FB_VIRTUAL 2007config FB_VIRTUAL
1955 tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)" 2008 tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
1956 depends on FB 2009 depends on FB
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 04bca35403ff..0ebc1bfd2514 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -106,17 +106,22 @@ obj-$(CONFIG_FB_PMAGB_B) += pmagb-b-fb.o
106obj-$(CONFIG_FB_MAXINE) += maxinefb.o 106obj-$(CONFIG_FB_MAXINE) += maxinefb.o
107obj-$(CONFIG_FB_METRONOME) += metronomefb.o 107obj-$(CONFIG_FB_METRONOME) += metronomefb.o
108obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o 108obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o
109obj-$(CONFIG_FB_SH7760) += sh7760fb.o
109obj-$(CONFIG_FB_IMX) += imxfb.o 110obj-$(CONFIG_FB_IMX) += imxfb.o
110obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o 111obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o
111obj-$(CONFIG_FB_FSL_DIU) += fsl-diu-fb.o 112obj-$(CONFIG_FB_FSL_DIU) += fsl-diu-fb.o
113obj-$(CONFIG_FB_COBALT) += cobalt_lcdfb.o
112obj-$(CONFIG_FB_PNX4008_DUM) += pnx4008/ 114obj-$(CONFIG_FB_PNX4008_DUM) += pnx4008/
113obj-$(CONFIG_FB_PNX4008_DUM_RGB) += pnx4008/ 115obj-$(CONFIG_FB_PNX4008_DUM_RGB) += pnx4008/
114obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o 116obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o
115obj-$(CONFIG_FB_PS3) += ps3fb.o 117obj-$(CONFIG_FB_PS3) += ps3fb.o
116obj-$(CONFIG_FB_SM501) += sm501fb.o 118obj-$(CONFIG_FB_SM501) += sm501fb.o
117obj-$(CONFIG_FB_XILINX) += xilinxfb.o 119obj-$(CONFIG_FB_XILINX) += xilinxfb.o
120obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
121obj-$(CONFIG_FB_SH7343VOU) += sh7343_voufb.o
118obj-$(CONFIG_FB_OMAP) += omap/ 122obj-$(CONFIG_FB_OMAP) += omap/
119obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o 123obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
124obj-$(CONFIG_FB_CARMINE) += carminefb.o
120 125
121# Platform or fallback drivers go here 126# Platform or fallback drivers go here
122obj-$(CONFIG_FB_UVESA) += uvesafb.o 127obj-$(CONFIG_FB_UVESA) += uvesafb.o
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index eedb8285e32f..017233d0c481 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -23,6 +23,7 @@
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/ctype.h> 24#include <linux/ctype.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/mm.h>
26#include <linux/init.h> 27#include <linux/init.h>
27#include <linux/fb.h> 28#include <linux/fb.h>
28#include <linux/platform_device.h> 29#include <linux/platform_device.h>
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index 45c154ade9ca..b8e9a8682f2d 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -1136,7 +1136,6 @@ static int amifb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg
1136 * Interface to the low level console driver 1136 * Interface to the low level console driver
1137 */ 1137 */
1138 1138
1139int amifb_init(void);
1140static void amifb_deinit(void); 1139static void amifb_deinit(void);
1141 1140
1142 /* 1141 /*
@@ -2048,13 +2047,16 @@ static void amifb_copyarea(struct fb_info *info,
2048 width = x2 - dx; 2047 width = x2 - dx;
2049 height = y2 - dy; 2048 height = y2 - dy;
2050 2049
2050 if (area->sx + dx < area->dx || area->sy + dy < area->dy)
2051 return;
2052
2051 /* update sx,sy */ 2053 /* update sx,sy */
2052 sx = area->sx + (dx - area->dx); 2054 sx = area->sx + (dx - area->dx);
2053 sy = area->sy + (dy - area->dy); 2055 sy = area->sy + (dy - area->dy);
2054 2056
2055 /* the source must be completely inside the virtual screen */ 2057 /* the source must be completely inside the virtual screen */
2056 if (sx < 0 || sy < 0 || (sx + width) > info->var.xres_virtual || 2058 if (sx + width > info->var.xres_virtual ||
2057 (sy + height) > info->var.yres_virtual) 2059 sy + height > info->var.yres_virtual)
2058 return; 2060 return;
2059 2061
2060 if (dy > sy || (dy == sy && dx > sx)) { 2062 if (dy > sy || (dy == sy && dx > sx)) {
@@ -2245,7 +2247,7 @@ static inline void chipfree(void)
2245 * Initialisation 2247 * Initialisation
2246 */ 2248 */
2247 2249
2248int __init amifb_init(void) 2250static int __init amifb_init(void)
2249{ 2251{
2250 int tag, i, err = 0; 2252 int tag, i, err = 0;
2251 u_long chipptr; 2253 u_long chipptr;
@@ -3790,16 +3792,14 @@ static void ami_rebuild_copper(void)
3790 } 3792 }
3791} 3793}
3792 3794
3793 3795static void __exit amifb_exit(void)
3794module_init(amifb_init);
3795
3796#ifdef MODULE
3797MODULE_LICENSE("GPL");
3798
3799void cleanup_module(void)
3800{ 3796{
3801 unregister_framebuffer(&fb_info); 3797 unregister_framebuffer(&fb_info);
3802 amifb_deinit(); 3798 amifb_deinit();
3803 amifb_video_off(); 3799 amifb_video_off();
3804} 3800}
3805#endif /* MODULE */ 3801
3802module_init(amifb_init);
3803module_exit(amifb_exit);
3804
3805MODULE_LICENSE("GPL");
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index fa55d356b535..77eb8b34fbfa 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -2593,13 +2593,16 @@ static void atafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
2593 width = x2 - dx; 2593 width = x2 - dx;
2594 height = y2 - dy; 2594 height = y2 - dy;
2595 2595
2596 if (area->sx + dx < area->dx || area->sy + dy < area->dy)
2597 return;
2598
2596 /* update sx,sy */ 2599 /* update sx,sy */
2597 sx = area->sx + (dx - area->dx); 2600 sx = area->sx + (dx - area->dx);
2598 sy = area->sy + (dy - area->dy); 2601 sy = area->sy + (dy - area->dy);
2599 2602
2600 /* the source must be completely inside the virtual screen */ 2603 /* the source must be completely inside the virtual screen */
2601 if (sx < 0 || sy < 0 || (sx + width) > info->var.xres_virtual || 2604 if (sx + width > info->var.xres_virtual ||
2602 (sy + height) > info->var.yres_virtual) 2605 sy + height > info->var.yres_virtual)
2603 return; 2606 return;
2604 2607
2605 if (dy > sy || (dy == sy && dx > sx)) { 2608 if (dy > sy || (dy == sy && dx > sx)) {
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index b004036d4087..5b3a15dffb5f 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -256,6 +256,20 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
256 return 0; 256 return 0;
257} 257}
258 258
259static const struct fb_videomode *atmel_lcdfb_choose_mode(struct fb_var_screeninfo *var,
260 struct fb_info *info)
261{
262 struct fb_videomode varfbmode;
263 const struct fb_videomode *fbmode = NULL;
264
265 fb_var_to_videomode(&varfbmode, var);
266 fbmode = fb_find_nearest_mode(&varfbmode, &info->modelist);
267 if (fbmode)
268 fb_videomode_to_var(var, fbmode);
269 return fbmode;
270}
271
272
259/** 273/**
260 * atmel_lcdfb_check_var - Validates a var passed in. 274 * atmel_lcdfb_check_var - Validates a var passed in.
261 * @var: frame buffer variable screen structure 275 * @var: frame buffer variable screen structure
@@ -289,6 +303,15 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
289 clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000; 303 clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000;
290 304
291 dev_dbg(dev, "%s:\n", __func__); 305 dev_dbg(dev, "%s:\n", __func__);
306
307 if (!(var->pixclock && var->bits_per_pixel)) {
308 /* choose a suitable mode if possible */
309 if (!atmel_lcdfb_choose_mode(var, info)) {
310 dev_err(dev, "needed value not specified\n");
311 return -EINVAL;
312 }
313 }
314
292 dev_dbg(dev, " resolution: %ux%u\n", var->xres, var->yres); 315 dev_dbg(dev, " resolution: %ux%u\n", var->xres, var->yres);
293 dev_dbg(dev, " pixclk: %lu KHz\n", PICOS2KHZ(var->pixclock)); 316 dev_dbg(dev, " pixclk: %lu KHz\n", PICOS2KHZ(var->pixclock));
294 dev_dbg(dev, " bpp: %u\n", var->bits_per_pixel); 317 dev_dbg(dev, " bpp: %u\n", var->bits_per_pixel);
@@ -299,6 +322,13 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
299 return -EINVAL; 322 return -EINVAL;
300 } 323 }
301 324
325 /* Do not allow to have real resoulution larger than virtual */
326 if (var->xres > var->xres_virtual)
327 var->xres_virtual = var->xres;
328
329 if (var->yres > var->yres_virtual)
330 var->yres_virtual = var->yres;
331
302 /* Force same alignment for each line */ 332 /* Force same alignment for each line */
303 var->xres = (var->xres + 3) & ~3UL; 333 var->xres = (var->xres + 3) & ~3UL;
304 var->xres_virtual = (var->xres_virtual + 3) & ~3UL; 334 var->xres_virtual = (var->xres_virtual + 3) & ~3UL;
@@ -379,6 +409,35 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
379 return 0; 409 return 0;
380} 410}
381 411
412/*
413 * LCD reset sequence
414 */
415static void atmel_lcdfb_reset(struct atmel_lcdfb_info *sinfo)
416{
417 might_sleep();
418
419 /* LCD power off */
420 lcdc_writel(sinfo, ATMEL_LCDC_PWRCON, sinfo->guard_time << ATMEL_LCDC_GUARDT_OFFSET);
421
422 /* wait for the LCDC core to become idle */
423 while (lcdc_readl(sinfo, ATMEL_LCDC_PWRCON) & ATMEL_LCDC_BUSY)
424 msleep(10);
425
426 /* DMA disable */
427 lcdc_writel(sinfo, ATMEL_LCDC_DMACON, 0);
428
429 /* wait for DMA engine to become idle */
430 while (lcdc_readl(sinfo, ATMEL_LCDC_DMACON) & ATMEL_LCDC_DMABUSY)
431 msleep(10);
432
433 /* LCD power on */
434 lcdc_writel(sinfo, ATMEL_LCDC_PWRCON,
435 (sinfo->guard_time << ATMEL_LCDC_GUARDT_OFFSET) | ATMEL_LCDC_PWR);
436
437 /* DMA enable */
438 lcdc_writel(sinfo, ATMEL_LCDC_DMACON, sinfo->default_dmacon);
439}
440
382/** 441/**
383 * atmel_lcdfb_set_par - Alters the hardware state. 442 * atmel_lcdfb_set_par - Alters the hardware state.
384 * @info: frame buffer structure that represents a single frame buffer 443 * @info: frame buffer structure that represents a single frame buffer
@@ -401,6 +460,8 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
401 unsigned long clk_value_khz; 460 unsigned long clk_value_khz;
402 unsigned long bits_per_line; 461 unsigned long bits_per_line;
403 462
463 might_sleep();
464
404 dev_dbg(info->device, "%s:\n", __func__); 465 dev_dbg(info->device, "%s:\n", __func__);
405 dev_dbg(info->device, " * resolution: %ux%u (%ux%u virtual)\n", 466 dev_dbg(info->device, " * resolution: %ux%u (%ux%u virtual)\n",
406 info->var.xres, info->var.yres, 467 info->var.xres, info->var.yres,
@@ -511,6 +572,8 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
511 572
512 /* Disable all interrupts */ 573 /* Disable all interrupts */
513 lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL); 574 lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL);
575 /* Enable FIFO & DMA errors */
576 lcdc_writel(sinfo, ATMEL_LCDC_IER, ATMEL_LCDC_UFLWI | ATMEL_LCDC_OWRI | ATMEL_LCDC_MERI);
514 577
515 /* ...wait for DMA engine to become idle... */ 578 /* ...wait for DMA engine to become idle... */
516 while (lcdc_readl(sinfo, ATMEL_LCDC_DMACON) & ATMEL_LCDC_DMABUSY) 579 while (lcdc_readl(sinfo, ATMEL_LCDC_DMACON) & ATMEL_LCDC_DMABUSY)
@@ -645,10 +708,26 @@ static irqreturn_t atmel_lcdfb_interrupt(int irq, void *dev_id)
645 u32 status; 708 u32 status;
646 709
647 status = lcdc_readl(sinfo, ATMEL_LCDC_ISR); 710 status = lcdc_readl(sinfo, ATMEL_LCDC_ISR);
648 lcdc_writel(sinfo, ATMEL_LCDC_IDR, status); 711 if (status & ATMEL_LCDC_UFLWI) {
712 dev_warn(info->device, "FIFO underflow %#x\n", status);
713 /* reset DMA and FIFO to avoid screen shifting */
714 schedule_work(&sinfo->task);
715 }
716 lcdc_writel(sinfo, ATMEL_LCDC_ICR, status);
649 return IRQ_HANDLED; 717 return IRQ_HANDLED;
650} 718}
651 719
720/*
721 * LCD controller task (to reset the LCD)
722 */
723static void atmel_lcdfb_task(struct work_struct *work)
724{
725 struct atmel_lcdfb_info *sinfo =
726 container_of(work, struct atmel_lcdfb_info, task);
727
728 atmel_lcdfb_reset(sinfo);
729}
730
652static int __init atmel_lcdfb_init_fbinfo(struct atmel_lcdfb_info *sinfo) 731static int __init atmel_lcdfb_init_fbinfo(struct atmel_lcdfb_info *sinfo)
653{ 732{
654 struct fb_info *info = sinfo->info; 733 struct fb_info *info = sinfo->info;
@@ -691,6 +770,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
691 struct fb_info *info; 770 struct fb_info *info;
692 struct atmel_lcdfb_info *sinfo; 771 struct atmel_lcdfb_info *sinfo;
693 struct atmel_lcdfb_info *pdata_sinfo; 772 struct atmel_lcdfb_info *pdata_sinfo;
773 struct fb_videomode fbmode;
694 struct resource *regs = NULL; 774 struct resource *regs = NULL;
695 struct resource *map = NULL; 775 struct resource *map = NULL;
696 int ret; 776 int ret;
@@ -824,6 +904,10 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
824 goto unmap_mmio; 904 goto unmap_mmio;
825 } 905 }
826 906
907 /* Some operations on the LCDC might sleep and
908 * require a preemptible task context */
909 INIT_WORK(&sinfo->task, atmel_lcdfb_task);
910
827 ret = atmel_lcdfb_init_fbinfo(sinfo); 911 ret = atmel_lcdfb_init_fbinfo(sinfo);
828 if (ret < 0) { 912 if (ret < 0) {
829 dev_err(dev, "init fbinfo failed: %d\n", ret); 913 dev_err(dev, "init fbinfo failed: %d\n", ret);
@@ -853,6 +937,10 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
853 goto free_cmap; 937 goto free_cmap;
854 } 938 }
855 939
940 /* add selected videomode to modelist */
941 fb_var_to_videomode(&fbmode, &info->var);
942 fb_add_videomode(&fbmode, &info->modelist);
943
856 /* Power up the LCDC screen */ 944 /* Power up the LCDC screen */
857 if (sinfo->atmel_lcdfb_power_control) 945 if (sinfo->atmel_lcdfb_power_control)
858 sinfo->atmel_lcdfb_power_control(1); 946 sinfo->atmel_lcdfb_power_control(1);
@@ -866,6 +954,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
866free_cmap: 954free_cmap:
867 fb_dealloc_cmap(&info->cmap); 955 fb_dealloc_cmap(&info->cmap);
868unregister_irqs: 956unregister_irqs:
957 cancel_work_sync(&sinfo->task);
869 free_irq(sinfo->irq_base, info); 958 free_irq(sinfo->irq_base, info);
870unmap_mmio: 959unmap_mmio:
871 exit_backlight(sinfo); 960 exit_backlight(sinfo);
@@ -903,6 +992,7 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
903 if (!sinfo) 992 if (!sinfo)
904 return 0; 993 return 0;
905 994
995 cancel_work_sync(&sinfo->task);
906 exit_backlight(sinfo); 996 exit_backlight(sinfo);
907 if (sinfo->atmel_lcdfb_power_control) 997 if (sinfo->atmel_lcdfb_power_control)
908 sinfo->atmel_lcdfb_power_control(0); 998 sinfo->atmel_lcdfb_power_control(0);
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 07b6addbb3c1..243ea4ab20c8 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1339,10 +1339,8 @@ static int aty128_var_to_pll(u32 period_in_ps, struct aty128_pll *pll,
1339 if (vclk * 12 < c.ppll_min) 1339 if (vclk * 12 < c.ppll_min)
1340 vclk = c.ppll_min/12; 1340 vclk = c.ppll_min/12;
1341 1341
1342 pll->post_divider = -1;
1343
1344 /* now, find an acceptable divider */ 1342 /* now, find an acceptable divider */
1345 for (i = 0; i < sizeof(post_dividers); i++) { 1343 for (i = 0; i < ARRAY_SIZE(post_dividers); i++) {
1346 output_freq = post_dividers[i] * vclk; 1344 output_freq = post_dividers[i] * vclk;
1347 if (output_freq >= c.ppll_min && output_freq <= c.ppll_max) { 1345 if (output_freq >= c.ppll_min && output_freq <= c.ppll_max) {
1348 pll->post_divider = post_dividers[i]; 1346 pll->post_divider = post_dividers[i];
@@ -1350,7 +1348,7 @@ static int aty128_var_to_pll(u32 period_in_ps, struct aty128_pll *pll,
1350 } 1348 }
1351 } 1349 }
1352 1350
1353 if (pll->post_divider < 0) 1351 if (i == ARRAY_SIZE(post_dividers))
1354 return -EINVAL; 1352 return -EINVAL;
1355 1353
1356 /* calculate feedback divider */ 1354 /* calculate feedback divider */
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index bd4ac0bafecb..620ba8120368 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -424,7 +424,6 @@ static struct {
424#endif /* CONFIG_FB_ATY_CT */ 424#endif /* CONFIG_FB_ATY_CT */
425}; 425};
426 426
427/* can not fail */
428static int __devinit correct_chipset(struct atyfb_par *par) 427static int __devinit correct_chipset(struct atyfb_par *par)
429{ 428{
430 u8 rev; 429 u8 rev;
@@ -437,6 +436,9 @@ static int __devinit correct_chipset(struct atyfb_par *par)
437 if (par->pci_id == aty_chips[i].pci_id) 436 if (par->pci_id == aty_chips[i].pci_id)
438 break; 437 break;
439 438
439 if (i < 0)
440 return -ENODEV;
441
440 name = aty_chips[i].name; 442 name = aty_chips[i].name;
441 par->pll_limits.pll_max = aty_chips[i].pll; 443 par->pll_limits.pll_max = aty_chips[i].pll;
442 par->pll_limits.mclk = aty_chips[i].mclk; 444 par->pll_limits.mclk = aty_chips[i].mclk;
@@ -2229,6 +2231,7 @@ static int __devinit aty_init(struct fb_info *info)
2229 const char *ramname = NULL, *xtal; 2231 const char *ramname = NULL, *xtal;
2230 int gtb_memsize, has_var = 0; 2232 int gtb_memsize, has_var = 0;
2231 struct fb_var_screeninfo var; 2233 struct fb_var_screeninfo var;
2234 int ret;
2232 2235
2233 init_waitqueue_head(&par->vblank.wait); 2236 init_waitqueue_head(&par->vblank.wait);
2234 spin_lock_init(&par->int_lock); 2237 spin_lock_init(&par->int_lock);
@@ -2610,7 +2613,8 @@ static int __devinit aty_init(struct fb_info *info)
2610 var.yres_virtual = var.yres; 2613 var.yres_virtual = var.yres;
2611 } 2614 }
2612 2615
2613 if (atyfb_check_var(&var, info)) { 2616 ret = atyfb_check_var(&var, info);
2617 if (ret) {
2614 PRINTKE("can't set default video mode\n"); 2618 PRINTKE("can't set default video mode\n");
2615 goto aty_init_exit; 2619 goto aty_init_exit;
2616 } 2620 }
@@ -2621,10 +2625,12 @@ static int __devinit aty_init(struct fb_info *info)
2621#endif /* CONFIG_FB_ATY_CT */ 2625#endif /* CONFIG_FB_ATY_CT */
2622 info->var = var; 2626 info->var = var;
2623 2627
2624 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) 2628 ret = fb_alloc_cmap(&info->cmap, 256, 0);
2629 if (ret < 0)
2625 goto aty_init_exit; 2630 goto aty_init_exit;
2626 2631
2627 if (register_framebuffer(info) < 0) { 2632 ret = register_framebuffer(info);
2633 if (ret < 0) {
2628 fb_dealloc_cmap(&info->cmap); 2634 fb_dealloc_cmap(&info->cmap);
2629 goto aty_init_exit; 2635 goto aty_init_exit;
2630 } 2636 }
@@ -2650,7 +2656,7 @@ aty_init_exit:
2650 par->mtrr_aper = -1; 2656 par->mtrr_aper = -1;
2651 } 2657 }
2652#endif 2658#endif
2653 return -1; 2659 return ret;
2654} 2660}
2655 2661
2656static void aty_resume_chip(struct fb_info *info) 2662static void aty_resume_chip(struct fb_info *info)
@@ -2709,8 +2715,7 @@ static int atyfb_blank(int blank, struct fb_info *info)
2709 if (par->lock_blank || par->asleep) 2715 if (par->lock_blank || par->asleep)
2710 return 0; 2716 return 0;
2711 2717
2712#ifdef CONFIG_FB_ATY_BACKLIGHT 2718#ifdef CONFIG_FB_ATY_GENERIC_LCD
2713#elif defined(CONFIG_FB_ATY_GENERIC_LCD)
2714 if (par->lcd_table && blank > FB_BLANK_NORMAL && 2719 if (par->lcd_table && blank > FB_BLANK_NORMAL &&
2715 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) { 2720 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
2716 u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par); 2721 u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
@@ -2739,8 +2744,7 @@ static int atyfb_blank(int blank, struct fb_info *info)
2739 } 2744 }
2740 aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par); 2745 aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par);
2741 2746
2742#ifdef CONFIG_FB_ATY_BACKLIGHT 2747#ifdef CONFIG_FB_ATY_GENERIC_LCD
2743#elif defined(CONFIG_FB_ATY_GENERIC_LCD)
2744 if (par->lcd_table && blank <= FB_BLANK_NORMAL && 2748 if (par->lcd_table && blank <= FB_BLANK_NORMAL &&
2745 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) { 2749 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
2746 u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par); 2750 u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
@@ -3331,7 +3335,7 @@ static int __devinit init_from_bios(struct atyfb_par *par)
3331 PRINTKE("no BIOS frequency table found, use parameters\n"); 3335 PRINTKE("no BIOS frequency table found, use parameters\n");
3332 ret = -ENXIO; 3336 ret = -ENXIO;
3333 } 3337 }
3334 iounmap((void* __iomem )bios_base); 3338 iounmap((void __iomem *)bios_base);
3335 3339
3336 return ret; 3340 return ret;
3337} 3341}
@@ -3418,14 +3422,7 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
3418 struct fb_info *info; 3422 struct fb_info *info;
3419 struct resource *rp; 3423 struct resource *rp;
3420 struct atyfb_par *par; 3424 struct atyfb_par *par;
3421 int i, rc = -ENOMEM; 3425 int rc = -ENOMEM;
3422
3423 for (i = ARRAY_SIZE(aty_chips) - 1; i >= 0; i--)
3424 if (pdev->device == aty_chips[i].pci_id)
3425 break;
3426
3427 if (i < 0)
3428 return -ENODEV;
3429 3426
3430 /* Enable device in PCI config */ 3427 /* Enable device in PCI config */
3431 if (pci_enable_device(pdev)) { 3428 if (pci_enable_device(pdev)) {
@@ -3456,7 +3453,7 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
3456 par = info->par; 3453 par = info->par;
3457 info->fix = atyfb_fix; 3454 info->fix = atyfb_fix;
3458 info->device = &pdev->dev; 3455 info->device = &pdev->dev;
3459 par->pci_id = aty_chips[i].pci_id; 3456 par->pci_id = pdev->device;
3460 par->res_start = res_start; 3457 par->res_start = res_start;
3461 par->res_size = res_size; 3458 par->res_size = res_size;
3462 par->irq = pdev->irq; 3459 par->irq = pdev->irq;
@@ -3474,7 +3471,8 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
3474 pci_set_drvdata(pdev, info); 3471 pci_set_drvdata(pdev, info);
3475 3472
3476 /* Init chip & register framebuffer */ 3473 /* Init chip & register framebuffer */
3477 if (aty_init(info)) 3474 rc = aty_init(info);
3475 if (rc)
3478 goto err_release_io; 3476 goto err_release_io;
3479 3477
3480#ifdef __sparc__ 3478#ifdef __sparc__
@@ -3655,18 +3653,62 @@ static void __devexit atyfb_pci_remove(struct pci_dev *pdev)
3655 atyfb_remove(info); 3653 atyfb_remove(info);
3656} 3654}
3657 3655
3658/*
3659 * This driver uses its own matching table. That will be more difficult
3660 * to fix, so for now, we just match against any ATI ID and let the
3661 * probe() function find out what's up. That also mean we don't have
3662 * a module ID table though.
3663 */
3664static struct pci_device_id atyfb_pci_tbl[] = { 3656static struct pci_device_id atyfb_pci_tbl[] = {
3665 { PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 3657#ifdef CONFIG_FB_ATY_GX
3666 PCI_BASE_CLASS_DISPLAY << 16, 0xff0000, 0 }, 3658 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GX) },
3667 { 0, } 3659 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64CX) },
3660#endif /* CONFIG_FB_ATY_GX */
3661
3662#ifdef CONFIG_FB_ATY_CT
3663 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64CT) },
3664 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64ET) },
3665
3666 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LT) },
3667
3668 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64VT) },
3669 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GT) },
3670
3671 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64VU) },
3672 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GU) },
3673
3674 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LG) },
3675
3676 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64VV) },
3677
3678 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GV) },
3679 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GW) },
3680 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GY) },
3681 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GZ) },
3682
3683 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GB) },
3684 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GD) },
3685 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GI) },
3686 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GP) },
3687 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GQ) },
3688
3689 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LB) },
3690 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LD) },
3691 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LI) },
3692 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LP) },
3693 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LQ) },
3694
3695 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GM) },
3696 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GN) },
3697 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GO) },
3698 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GL) },
3699 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GR) },
3700 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GS) },
3701
3702 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LM) },
3703 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LN) },
3704 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LR) },
3705 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LS) },
3706#endif /* CONFIG_FB_ATY_CT */
3707 { }
3668}; 3708};
3669 3709
3710MODULE_DEVICE_TABLE(pci, atyfb_pci_tbl);
3711
3670static struct pci_driver atyfb_driver = { 3712static struct pci_driver atyfb_driver = {
3671 .name = "atyfb", 3713 .name = "atyfb",
3672 .id_table = atyfb_pci_tbl, 3714 .id_table = atyfb_pci_tbl,
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 400e9264e456..652273e9f5f9 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -2098,15 +2098,7 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
2098 2098
2099static ssize_t radeon_show_one_edid(char *buf, loff_t off, size_t count, const u8 *edid) 2099static ssize_t radeon_show_one_edid(char *buf, loff_t off, size_t count, const u8 *edid)
2100{ 2100{
2101 if (off > EDID_LENGTH) 2101 return memory_read_from_buffer(buf, count, &off, edid, EDID_LENGTH);
2102 return 0;
2103
2104 if (off + count > EDID_LENGTH)
2105 count = EDID_LENGTH - off;
2106
2107 memcpy(buf, edid + off, count);
2108
2109 return count;
2110} 2102}
2111 2103
2112 2104
@@ -2161,6 +2153,7 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
2161 struct radeonfb_info *rinfo; 2153 struct radeonfb_info *rinfo;
2162 int ret; 2154 int ret;
2163 unsigned char c1, c2; 2155 unsigned char c1, c2;
2156 int err = 0;
2164 2157
2165 pr_debug("radeonfb_pci_register BEGIN\n"); 2158 pr_debug("radeonfb_pci_register BEGIN\n");
2166 2159
@@ -2340,9 +2333,14 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
2340 2333
2341 /* Register some sysfs stuff (should be done better) */ 2334 /* Register some sysfs stuff (should be done better) */
2342 if (rinfo->mon1_EDID) 2335 if (rinfo->mon1_EDID)
2343 sysfs_create_bin_file(&rinfo->pdev->dev.kobj, &edid1_attr); 2336 err |= sysfs_create_bin_file(&rinfo->pdev->dev.kobj,
2337 &edid1_attr);
2344 if (rinfo->mon2_EDID) 2338 if (rinfo->mon2_EDID)
2345 sysfs_create_bin_file(&rinfo->pdev->dev.kobj, &edid2_attr); 2339 err |= sysfs_create_bin_file(&rinfo->pdev->dev.kobj,
2340 &edid2_attr);
2341 if (err)
2342 pr_warning("%s() Creating sysfs files failed, continuing\n",
2343 __func__);
2346 2344
2347 /* save current mode regs before we switch into the new one 2345 /* save current mode regs before we switch into the new one
2348 * so we can restore this upon __exit 2346 * so we can restore this upon __exit
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 30bf7f2f1635..452b770d8cc9 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -36,6 +36,30 @@ config LCD_LTV350QV
36 36
37 The LTV350QV panel is present on all ATSTK1000 boards. 37 The LTV350QV panel is present on all ATSTK1000 boards.
38 38
39config LCD_ILI9320
40 tristate
41 depends on LCD_CLASS_DEVICE && BACKLIGHT_LCD_SUPPORT
42 default n
43 help
44 If you have a panel based on the ILI9320 controller chip
45 then say y to include a power driver for it.
46
47config LCD_VGG2432A4
48 tristate "VGG2432A4 LCM device support"
49 depends on BACKLIGHT_LCD_SUPPORT && LCD_CLASS_DEVICE && SPI_MASTER
50 select LCD_ILI9320
51 default n
52 help
53 If you have a VGG2432A4 panel based on the ILI9320 controller chip
54 then say y to include a power driver for it.
55
56config LCD_PLATFORM
57 tristate "Platform LCD controls"
58 depends on LCD_CLASS_DEVICE
59 help
60 This driver provides a platform-device registered LCD power
61 control interface.
62
39# 63#
40# Backlight 64# Backlight
41# 65#
@@ -63,6 +87,18 @@ config BACKLIGHT_ATMEL_LCDC
63 If in doubt, it's safe to enable this option; it doesn't kick 87 If in doubt, it's safe to enable this option; it doesn't kick
64 in unless the board's description says it's wired that way. 88 in unless the board's description says it's wired that way.
65 89
90config BACKLIGHT_ATMEL_PWM
91 tristate "Atmel PWM backlight control"
92 depends on BACKLIGHT_CLASS_DEVICE && ATMEL_PWM
93 default n
94 help
95 Say Y here if you want to use the PWM peripheral in Atmel AT91 and
96 AVR32 devices. This driver will need additional platform data to know
97 which PWM instance to use and how to configure it.
98
99 To compile this driver as a module, choose M here: the module will be
100 called atmel-pwm-bl.
101
66config BACKLIGHT_CORGI 102config BACKLIGHT_CORGI
67 tristate "Generic (aka Sharp Corgi) Backlight Driver" 103 tristate "Generic (aka Sharp Corgi) Backlight Driver"
68 depends on BACKLIGHT_CLASS_DEVICE 104 depends on BACKLIGHT_CLASS_DEVICE
@@ -119,3 +155,12 @@ config BACKLIGHT_PWM
119 help 155 help
120 If you have a LCD backlight adjustable by PWM, say Y to enable 156 If you have a LCD backlight adjustable by PWM, say Y to enable
121 this driver. 157 this driver.
158
159config BACKLIGHT_MBP_NVIDIA
160 tristate "MacBook Pro Nvidia Backlight Driver"
161 depends on BACKLIGHT_CLASS_DEVICE && X86
162 default n
163 help
164 If you have an Apple Macbook Pro with Nvidia graphics hardware say Y
165 to enable a driver for its backlight
166
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index b51a7cd12500..b405aace803f 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -1,9 +1,13 @@
1# Backlight & LCD drivers 1# Backlight & LCD drivers
2 2
3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o 3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o
4obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o 4obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o
5obj-$(CONFIG_LCD_ILI9320) += ili9320.o
6obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
7obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
5 8
6obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o 9obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
10obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
7obj-$(CONFIG_BACKLIGHT_CORGI) += corgi_bl.o 11obj-$(CONFIG_BACKLIGHT_CORGI) += corgi_bl.o
8obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o 12obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
9obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o 13obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
@@ -11,3 +15,5 @@ obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
11obj-$(CONFIG_BACKLIGHT_PROGEAR) += progear_bl.o 15obj-$(CONFIG_BACKLIGHT_PROGEAR) += progear_bl.o
12obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o 16obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
13obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o 17obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
18obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o
19
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
new file mode 100644
index 000000000000..505c0823a105
--- /dev/null
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -0,0 +1,244 @@
1/*
2 * Copyright (C) 2008 Atmel Corporation
3 *
4 * Backlight driver using Atmel PWM peripheral.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 */
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/fb.h>
15#include <linux/clk.h>
16#include <linux/gpio.h>
17#include <linux/backlight.h>
18#include <linux/atmel_pwm.h>
19#include <linux/atmel-pwm-bl.h>
20
21struct atmel_pwm_bl {
22 const struct atmel_pwm_bl_platform_data *pdata;
23 struct backlight_device *bldev;
24 struct platform_device *pdev;
25 struct pwm_channel pwmc;
26 int gpio_on;
27};
28
29static int atmel_pwm_bl_set_intensity(struct backlight_device *bd)
30{
31 struct atmel_pwm_bl *pwmbl = bl_get_data(bd);
32 int intensity = bd->props.brightness;
33 int pwm_duty;
34
35 if (bd->props.power != FB_BLANK_UNBLANK)
36 intensity = 0;
37 if (bd->props.fb_blank != FB_BLANK_UNBLANK)
38 intensity = 0;
39
40 if (pwmbl->pdata->pwm_active_low)
41 pwm_duty = pwmbl->pdata->pwm_duty_min + intensity;
42 else
43 pwm_duty = pwmbl->pdata->pwm_duty_max - intensity;
44
45 if (pwm_duty > pwmbl->pdata->pwm_duty_max)
46 pwm_duty = pwmbl->pdata->pwm_duty_max;
47 if (pwm_duty < pwmbl->pdata->pwm_duty_min)
48 pwm_duty = pwmbl->pdata->pwm_duty_min;
49
50 if (!intensity) {
51 if (pwmbl->gpio_on != -1) {
52 gpio_set_value(pwmbl->gpio_on,
53 0 ^ pwmbl->pdata->on_active_low);
54 }
55 pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty);
56 pwm_channel_disable(&pwmbl->pwmc);
57 } else {
58 pwm_channel_enable(&pwmbl->pwmc);
59 pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty);
60 if (pwmbl->gpio_on != -1) {
61 gpio_set_value(pwmbl->gpio_on,
62 1 ^ pwmbl->pdata->on_active_low);
63 }
64 }
65
66 return 0;
67}
68
69static int atmel_pwm_bl_get_intensity(struct backlight_device *bd)
70{
71 struct atmel_pwm_bl *pwmbl = bl_get_data(bd);
72 u8 intensity;
73
74 if (pwmbl->pdata->pwm_active_low) {
75 intensity = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY) -
76 pwmbl->pdata->pwm_duty_min;
77 } else {
78 intensity = pwmbl->pdata->pwm_duty_max -
79 pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY);
80 }
81
82 return intensity;
83}
84
85static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
86{
87 unsigned long pwm_rate = pwmbl->pwmc.mck;
88 unsigned long prescale = DIV_ROUND_UP(pwm_rate,
89 (pwmbl->pdata->pwm_frequency *
90 pwmbl->pdata->pwm_compare_max)) - 1;
91
92 /*
93 * Prescale must be power of two and maximum 0xf in size because of
94 * hardware limit. PWM speed will be:
95 * PWM module clock speed / (2 ^ prescale).
96 */
97 prescale = fls(prescale);
98 if (prescale > 0xf)
99 prescale = 0xf;
100
101 pwm_channel_writel(&pwmbl->pwmc, PWM_CMR, prescale);
102 pwm_channel_writel(&pwmbl->pwmc, PWM_CDTY,
103 pwmbl->pdata->pwm_duty_min +
104 pwmbl->bldev->props.brightness);
105 pwm_channel_writel(&pwmbl->pwmc, PWM_CPRD,
106 pwmbl->pdata->pwm_compare_max);
107
108 dev_info(&pwmbl->pdev->dev, "Atmel PWM backlight driver "
109 "(%lu Hz)\n", pwmbl->pwmc.mck /
110 pwmbl->pdata->pwm_compare_max /
111 (1 << prescale));
112
113 return pwm_channel_enable(&pwmbl->pwmc);
114}
115
116static struct backlight_ops atmel_pwm_bl_ops = {
117 .get_brightness = atmel_pwm_bl_get_intensity,
118 .update_status = atmel_pwm_bl_set_intensity,
119};
120
121static int atmel_pwm_bl_probe(struct platform_device *pdev)
122{
123 const struct atmel_pwm_bl_platform_data *pdata;
124 struct backlight_device *bldev;
125 struct atmel_pwm_bl *pwmbl;
126 int retval;
127
128 pwmbl = kzalloc(sizeof(struct atmel_pwm_bl), GFP_KERNEL);
129 if (!pwmbl)
130 return -ENOMEM;
131
132 pwmbl->pdev = pdev;
133
134 pdata = pdev->dev.platform_data;
135 if (!pdata) {
136 retval = -ENODEV;
137 goto err_free_mem;
138 }
139
140 if (pdata->pwm_compare_max < pdata->pwm_duty_max ||
141 pdata->pwm_duty_min > pdata->pwm_duty_max ||
142 pdata->pwm_frequency == 0) {
143 retval = -EINVAL;
144 goto err_free_mem;
145 }
146
147 pwmbl->pdata = pdata;
148 pwmbl->gpio_on = pdata->gpio_on;
149
150 retval = pwm_channel_alloc(pdata->pwm_channel, &pwmbl->pwmc);
151 if (retval)
152 goto err_free_mem;
153
154 if (pwmbl->gpio_on != -1) {
155 retval = gpio_request(pwmbl->gpio_on, "gpio_atmel_pwm_bl");
156 if (retval) {
157 pwmbl->gpio_on = -1;
158 goto err_free_pwm;
159 }
160
161 /* Turn display off by defatult. */
162 retval = gpio_direction_output(pwmbl->gpio_on,
163 0 ^ pdata->on_active_low);
164 if (retval)
165 goto err_free_gpio;
166 }
167
168 bldev = backlight_device_register("atmel-pwm-bl",
169 &pdev->dev, pwmbl, &atmel_pwm_bl_ops);
170 if (IS_ERR(bldev)) {
171 retval = PTR_ERR(bldev);
172 goto err_free_gpio;
173 }
174
175 pwmbl->bldev = bldev;
176
177 platform_set_drvdata(pdev, pwmbl);
178
179 /* Power up the backlight by default at middle intesity. */
180 bldev->props.power = FB_BLANK_UNBLANK;
181 bldev->props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min;
182 bldev->props.brightness = bldev->props.max_brightness / 2;
183
184 retval = atmel_pwm_bl_init_pwm(pwmbl);
185 if (retval)
186 goto err_free_bl_dev;
187
188 atmel_pwm_bl_set_intensity(bldev);
189
190 return 0;
191
192err_free_bl_dev:
193 platform_set_drvdata(pdev, NULL);
194 backlight_device_unregister(bldev);
195err_free_gpio:
196 if (pwmbl->gpio_on != -1)
197 gpio_free(pwmbl->gpio_on);
198err_free_pwm:
199 pwm_channel_free(&pwmbl->pwmc);
200err_free_mem:
201 kfree(pwmbl);
202 return retval;
203}
204
205static int __exit atmel_pwm_bl_remove(struct platform_device *pdev)
206{
207 struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev);
208
209 if (pwmbl->gpio_on != -1) {
210 gpio_set_value(pwmbl->gpio_on, 0);
211 gpio_free(pwmbl->gpio_on);
212 }
213 pwm_channel_disable(&pwmbl->pwmc);
214 pwm_channel_free(&pwmbl->pwmc);
215 backlight_device_unregister(pwmbl->bldev);
216 platform_set_drvdata(pdev, NULL);
217 kfree(pwmbl);
218
219 return 0;
220}
221
222static struct platform_driver atmel_pwm_bl_driver = {
223 .driver = {
224 .name = "atmel-pwm-bl",
225 },
226 /* REVISIT add suspend() and resume() */
227 .remove = __exit_p(atmel_pwm_bl_remove),
228};
229
230static int __init atmel_pwm_bl_init(void)
231{
232 return platform_driver_probe(&atmel_pwm_bl_driver, atmel_pwm_bl_probe);
233}
234module_init(atmel_pwm_bl_init);
235
236static void __exit atmel_pwm_bl_exit(void)
237{
238 platform_driver_unregister(&atmel_pwm_bl_driver);
239}
240module_exit(atmel_pwm_bl_exit);
241
242MODULE_AUTHOR("Hans-Christian egtvedt <hans-christian.egtvedt@atmel.com>");
243MODULE_DESCRIPTION("Atmel PWM backlight driver");
244MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 39394757679c..fab0bc874b58 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -191,6 +191,7 @@ static struct device_attribute bl_device_attributes[] = {
191 * backlight_device class. 191 * backlight_device class.
192 * @name: the name of the new object(must be the same as the name of the 192 * @name: the name of the new object(must be the same as the name of the
193 * respective framebuffer device). 193 * respective framebuffer device).
194 * @parent: a pointer to the parent device
194 * @devdata: an optional pointer to be stored for private driver use. The 195 * @devdata: an optional pointer to be stored for private driver use. The
195 * methods may retrieve it by using bl_get_data(bd). 196 * methods may retrieve it by using bl_get_data(bd).
196 * @ops: the backlight operations structure. 197 * @ops: the backlight operations structure.
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
new file mode 100644
index 000000000000..ba89b41b639c
--- /dev/null
+++ b/drivers/video/backlight/ili9320.c
@@ -0,0 +1,330 @@
1/* drivers/video/backlight/ili9320.c
2 *
3 * ILI9320 LCD controller driver core.
4 *
5 * Copyright 2007 Simtec Electronics
6 * http://armlinux.simtec.co.uk/
7 * Ben Dooks <ben@simtec.co.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/delay.h>
15#include <linux/err.h>
16#include <linux/fb.h>
17#include <linux/init.h>
18#include <linux/lcd.h>
19#include <linux/module.h>
20
21#include <linux/spi/spi.h>
22
23#include <video/ili9320.h>
24
25#include "ili9320.h"
26
27
28static inline int ili9320_write_spi(struct ili9320 *ili,
29 unsigned int reg,
30 unsigned int value)
31{
32 struct ili9320_spi *spi = &ili->access.spi;
33 unsigned char *addr = spi->buffer_addr;
34 unsigned char *data = spi->buffer_data;
35
36 /* spi message consits of:
37 * first byte: ID and operation
38 */
39
40 addr[0] = spi->id | ILI9320_SPI_INDEX | ILI9320_SPI_WRITE;
41 addr[1] = reg >> 8;
42 addr[2] = reg;
43
44 /* second message is the data to transfer */
45
46 data[0] = spi->id | ILI9320_SPI_DATA | ILI9320_SPI_WRITE;
47 data[1] = value >> 8;
48 data[2] = value;
49
50 return spi_sync(spi->dev, &spi->message);
51}
52
53int ili9320_write(struct ili9320 *ili, unsigned int reg, unsigned int value)
54{
55 dev_dbg(ili->dev, "write: reg=%02x, val=%04x\n", reg, value);
56 return ili->write(ili, reg, value);
57}
58
59EXPORT_SYMBOL_GPL(ili9320_write);
60
61int ili9320_write_regs(struct ili9320 *ili,
62 struct ili9320_reg *values,
63 int nr_values)
64{
65 int index;
66 int ret;
67
68 for (index = 0; index < nr_values; index++, values++) {
69 ret = ili9320_write(ili, values->address, values->value);
70 if (ret != 0)
71 return ret;
72 }
73
74 return 0;
75}
76
77EXPORT_SYMBOL_GPL(ili9320_write_regs);
78
79static void ili9320_reset(struct ili9320 *lcd)
80{
81 struct ili9320_platdata *cfg = lcd->platdata;
82
83 cfg->reset(1);
84 mdelay(50);
85
86 cfg->reset(0);
87 mdelay(50);
88
89 cfg->reset(1);
90 mdelay(100);
91}
92
93static inline int ili9320_init_chip(struct ili9320 *lcd)
94{
95 int ret;
96
97 ili9320_reset(lcd);
98
99 ret = lcd->client->init(lcd, lcd->platdata);
100 if (ret != 0) {
101 dev_err(lcd->dev, "failed to initialise display\n");
102 return ret;
103 }
104
105 lcd->initialised = 1;
106 return 0;
107}
108
109static inline int ili9320_power_on(struct ili9320 *lcd)
110{
111 if (!lcd->initialised)
112 ili9320_init_chip(lcd);
113
114 lcd->display1 |= (ILI9320_DISPLAY1_D(3) | ILI9320_DISPLAY1_BASEE);
115 ili9320_write(lcd, ILI9320_DISPLAY1, lcd->display1);
116
117 return 0;
118}
119
120static inline int ili9320_power_off(struct ili9320 *lcd)
121{
122 lcd->display1 &= ~(ILI9320_DISPLAY1_D(3) | ILI9320_DISPLAY1_BASEE);
123 ili9320_write(lcd, ILI9320_DISPLAY1, lcd->display1);
124
125 return 0;
126}
127
128#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
129
130static int ili9320_power(struct ili9320 *lcd, int power)
131{
132 int ret = 0;
133
134 dev_dbg(lcd->dev, "power %d => %d\n", lcd->power, power);
135
136 if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power))
137 ret = ili9320_power_on(lcd);
138 else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power))
139 ret = ili9320_power_off(lcd);
140
141 if (ret == 0)
142 lcd->power = power;
143 else
144 dev_warn(lcd->dev, "failed to set power mode %d\n", power);
145
146 return ret;
147}
148
149static inline struct ili9320 *to_our_lcd(struct lcd_device *lcd)
150{
151 return lcd_get_data(lcd);
152}
153
154static int ili9320_set_power(struct lcd_device *ld, int power)
155{
156 struct ili9320 *lcd = to_our_lcd(ld);
157
158 return ili9320_power(lcd, power);
159}
160
161static int ili9320_get_power(struct lcd_device *ld)
162{
163 struct ili9320 *lcd = to_our_lcd(ld);
164
165 return lcd->power;
166}
167
168static struct lcd_ops ili9320_ops = {
169 .get_power = ili9320_get_power,
170 .set_power = ili9320_set_power,
171};
172
173static void __devinit ili9320_setup_spi(struct ili9320 *ili,
174 struct spi_device *dev)
175{
176 struct ili9320_spi *spi = &ili->access.spi;
177
178 ili->write = ili9320_write_spi;
179 spi->dev = dev;
180
181 /* fill the two messages we are going to use to send the data
182 * with, the first the address followed by the data. The datasheet
183 * says they should be done as two distinct cycles of the SPI CS line.
184 */
185
186 spi->xfer[0].tx_buf = spi->buffer_addr;
187 spi->xfer[1].tx_buf = spi->buffer_data;
188 spi->xfer[0].len = 3;
189 spi->xfer[1].len = 3;
190 spi->xfer[0].bits_per_word = 8;
191 spi->xfer[1].bits_per_word = 8;
192 spi->xfer[0].cs_change = 1;
193
194 spi_message_init(&spi->message);
195 spi_message_add_tail(&spi->xfer[0], &spi->message);
196 spi_message_add_tail(&spi->xfer[1], &spi->message);
197}
198
199int __devinit ili9320_probe_spi(struct spi_device *spi,
200 struct ili9320_client *client)
201{
202 struct ili9320_platdata *cfg = spi->dev.platform_data;
203 struct device *dev = &spi->dev;
204 struct ili9320 *ili;
205 struct lcd_device *lcd;
206 int ret = 0;
207
208 /* verify we where given some information */
209
210 if (cfg == NULL) {
211 dev_err(dev, "no platform data supplied\n");
212 return -EINVAL;
213 }
214
215 if (cfg->hsize <= 0 || cfg->vsize <= 0 || cfg->reset == NULL) {
216 dev_err(dev, "invalid platform data supplied\n");
217 return -EINVAL;
218 }
219
220 /* allocate and initialse our state */
221
222 ili = kzalloc(sizeof(struct ili9320), GFP_KERNEL);
223 if (ili == NULL) {
224 dev_err(dev, "no memory for device\n");
225 return -ENOMEM;
226 }
227
228 ili->access.spi.id = ILI9320_SPI_IDCODE | ILI9320_SPI_ID(1);
229
230 ili->dev = dev;
231 ili->client = client;
232 ili->power = FB_BLANK_POWERDOWN;
233 ili->platdata = cfg;
234
235 dev_set_drvdata(&spi->dev, ili);
236
237 ili9320_setup_spi(ili, spi);
238
239 lcd = lcd_device_register("ili9320", dev, ili, &ili9320_ops);
240 if (IS_ERR(lcd)) {
241 dev_err(dev, "failed to register lcd device\n");
242 ret = PTR_ERR(lcd);
243 goto err_free;
244 }
245
246 ili->lcd = lcd;
247
248 dev_info(dev, "initialising %s\n", client->name);
249
250 ret = ili9320_power(ili, FB_BLANK_UNBLANK);
251 if (ret != 0) {
252 dev_err(dev, "failed to set lcd power state\n");
253 goto err_unregister;
254 }
255
256 return 0;
257
258 err_unregister:
259 lcd_device_unregister(lcd);
260
261 err_free:
262 kfree(ili);
263
264 return ret;
265}
266
267EXPORT_SYMBOL_GPL(ili9320_probe_spi);
268
269int __devexit ili9320_remove(struct ili9320 *ili)
270{
271 ili9320_power(ili, FB_BLANK_POWERDOWN);
272
273 lcd_device_unregister(ili->lcd);
274 kfree(ili);
275
276 return 0;
277}
278
279EXPORT_SYMBOL_GPL(ili9320_remove);
280
281#ifdef CONFIG_PM
282int ili9320_suspend(struct ili9320 *lcd, pm_message_t state)
283{
284 int ret;
285
286 dev_dbg(lcd->dev, "%s: event %d\n", __func__, state.event);
287
288 if (state.event == PM_EVENT_SUSPEND) {
289 ret = ili9320_power(lcd, FB_BLANK_POWERDOWN);
290
291 if (lcd->platdata->suspend == ILI9320_SUSPEND_DEEP) {
292 ili9320_write(lcd, ILI9320_POWER1, lcd->power1 |
293 ILI9320_POWER1_SLP |
294 ILI9320_POWER1_DSTB);
295 lcd->initialised = 0;
296 }
297
298 return ret;
299 }
300
301 return 0;
302}
303
304EXPORT_SYMBOL_GPL(ili9320_suspend);
305
306int ili9320_resume(struct ili9320 *lcd)
307{
308 dev_info(lcd->dev, "resuming from power state %d\n", lcd->power);
309
310 if (lcd->platdata->suspend == ILI9320_SUSPEND_DEEP) {
311 ili9320_write(lcd, ILI9320_POWER1, 0x00);
312 }
313
314 return ili9320_power(lcd, FB_BLANK_UNBLANK);
315}
316
317EXPORT_SYMBOL_GPL(ili9320_resume);
318#endif
319
320/* Power down all displays on reboot, poweroff or halt */
321void ili9320_shutdown(struct ili9320 *lcd)
322{
323 ili9320_power(lcd, FB_BLANK_POWERDOWN);
324}
325
326EXPORT_SYMBOL_GPL(ili9320_shutdown);
327
328MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
329MODULE_DESCRIPTION("ILI9320 LCD Driver");
330MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/ili9320.h b/drivers/video/backlight/ili9320.h
new file mode 100644
index 000000000000..e388eca7cac5
--- /dev/null
+++ b/drivers/video/backlight/ili9320.h
@@ -0,0 +1,80 @@
1/* drivers/video/backlight/ili9320.h
2 *
3 * ILI9320 LCD controller driver core.
4 *
5 * Copyright 2007 Simtec Electronics
6 * Ben Dooks <ben@simtec.co.uk>
7 *
8 * http://armlinux.simtec.co.uk/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15/* Holder for register and value pairs. */
16struct ili9320_reg {
17 unsigned short address;
18 unsigned short value;
19};
20
21struct ili9320;
22
23struct ili9320_client {
24 const char *name;
25 int (*init)(struct ili9320 *ili, struct ili9320_platdata *cfg);
26
27};
28/* Device attached via an SPI bus. */
29struct ili9320_spi {
30 struct spi_device *dev;
31 struct spi_message message;
32 struct spi_transfer xfer[2];
33
34 unsigned char id;
35 unsigned char buffer_addr[4];
36 unsigned char buffer_data[4];
37};
38
39/* ILI9320 device state. */
40struct ili9320 {
41 union {
42 struct ili9320_spi spi; /* SPI attachged device. */
43 } access; /* Register access method. */
44
45 struct device *dev;
46 struct lcd_device *lcd; /* LCD device we created. */
47 struct ili9320_client *client;
48 struct ili9320_platdata *platdata;
49
50 int power; /* current power state. */
51 int initialised;
52
53 unsigned short display1;
54 unsigned short power1;
55
56 int (*write)(struct ili9320 *ili, unsigned int reg, unsigned int val);
57};
58
59
60/* ILI9320 register access routines */
61
62extern int ili9320_write(struct ili9320 *ili,
63 unsigned int reg, unsigned int value);
64
65extern int ili9320_write_regs(struct ili9320 *ili,
66 struct ili9320_reg *values,
67 int nr_values);
68
69/* Device probe */
70
71extern int ili9320_probe_spi(struct spi_device *spi,
72 struct ili9320_client *cli);
73
74extern int ili9320_remove(struct ili9320 *lcd);
75extern void ili9320_shutdown(struct ili9320 *lcd);
76
77/* PM */
78
79extern int ili9320_suspend(struct ili9320 *lcd, pm_message_t state);
80extern int ili9320_resume(struct ili9320 *lcd);
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 299fd318dd45..b15b2b84a6f7 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -33,7 +33,7 @@ static int fb_notifier_callback(struct notifier_block *self,
33 ld = container_of(self, struct lcd_device, fb_notif); 33 ld = container_of(self, struct lcd_device, fb_notif);
34 mutex_lock(&ld->ops_lock); 34 mutex_lock(&ld->ops_lock);
35 if (ld->ops) 35 if (ld->ops)
36 if (!ld->ops->check_fb || ld->ops->check_fb(evdata->info)) 36 if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info))
37 ld->ops->set_power(ld, *(int *)evdata->data); 37 ld->ops->set_power(ld, *(int *)evdata->data);
38 mutex_unlock(&ld->ops_lock); 38 mutex_unlock(&ld->ops_lock);
39 return 0; 39 return 0;
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
new file mode 100644
index 000000000000..385cba40ea87
--- /dev/null
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -0,0 +1,116 @@
1/*
2 * Backlight Driver for Nvidia 8600 in Macbook Pro
3 *
4 * Copyright (c) Red Hat <mjg@redhat.com>
5 * Based on code from Pommed:
6 * Copyright (C) 2006 Nicolas Boichat <nicolas @boichat.ch>
7 * Copyright (C) 2006 Felipe Alfaro Solana <felipe_alfaro @linuxmail.org>
8 * Copyright (C) 2007 Julien BLACHE <jb@jblache.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This driver triggers SMIs which cause the firmware to change the
15 * backlight brightness. This is icky in many ways, but it's impractical to
16 * get at the firmware code in order to figure out what it's actually doing.
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/backlight.h>
24#include <linux/err.h>
25#include <linux/dmi.h>
26#include <linux/io.h>
27
28static struct backlight_device *mbp_backlight_device;
29
30static struct dmi_system_id __initdata mbp_device_table[] = {
31 {
32 .ident = "3,1",
33 .matches = {
34 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
35 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3,1"),
36 },
37 },
38 {
39 .ident = "3,2",
40 .matches = {
41 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
42 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3,2"),
43 },
44 },
45 {
46 .ident = "4,1",
47 .matches = {
48 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
49 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro4,1"),
50 },
51 },
52 { }
53};
54
55static int mbp_send_intensity(struct backlight_device *bd)
56{
57 int intensity = bd->props.brightness;
58
59 outb(0x04 | (intensity << 4), 0xb3);
60 outb(0xbf, 0xb2);
61
62 return 0;
63}
64
65static int mbp_get_intensity(struct backlight_device *bd)
66{
67 outb(0x03, 0xb3);
68 outb(0xbf, 0xb2);
69 return inb(0xb3) >> 4;
70}
71
72static struct backlight_ops mbp_ops = {
73 .get_brightness = mbp_get_intensity,
74 .update_status = mbp_send_intensity,
75};
76
77static int __init mbp_init(void)
78{
79 if (!dmi_check_system(mbp_device_table))
80 return -ENODEV;
81
82 if (!request_region(0xb2, 2, "Macbook Pro backlight"))
83 return -ENXIO;
84
85 mbp_backlight_device = backlight_device_register("mbp_backlight",
86 NULL, NULL,
87 &mbp_ops);
88 if (IS_ERR(mbp_backlight_device)) {
89 release_region(0xb2, 2);
90 return PTR_ERR(mbp_backlight_device);
91 }
92
93 mbp_backlight_device->props.max_brightness = 15;
94 mbp_backlight_device->props.brightness =
95 mbp_get_intensity(mbp_backlight_device);
96 backlight_update_status(mbp_backlight_device);
97
98 return 0;
99}
100
101static void __exit mbp_exit(void)
102{
103 backlight_device_unregister(mbp_backlight_device);
104
105 release_region(0xb2, 2);
106}
107
108module_init(mbp_init);
109module_exit(mbp_exit);
110
111MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
112MODULE_DESCRIPTION("Nvidia-based Macbook Pro Backlight Driver");
113MODULE_LICENSE("GPL");
114MODULE_ALIAS("svnAppleInc.:pnMacBookPro3,1");
115MODULE_ALIAS("svnAppleInc.:pnMacBookPro3,2");
116MODULE_ALIAS("svnAppleInc.:pnMacBookPro4,1");
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
new file mode 100644
index 000000000000..72d44dbfce82
--- /dev/null
+++ b/drivers/video/backlight/platform_lcd.c
@@ -0,0 +1,172 @@
1/* drivers/video/backlight/platform_lcd.c
2 *
3 * Copyright 2008 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * Generic platform-device LCD power control interface.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12*/
13
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/fb.h>
17#include <linux/backlight.h>
18#include <linux/lcd.h>
19
20#include <video/platform_lcd.h>
21
22struct platform_lcd {
23 struct device *us;
24 struct lcd_device *lcd;
25 struct plat_lcd_data *pdata;
26
27 unsigned int power;
28 unsigned int suspended : 1;
29};
30
31static inline struct platform_lcd *to_our_lcd(struct lcd_device *lcd)
32{
33 return lcd_get_data(lcd);
34}
35
36static int platform_lcd_get_power(struct lcd_device *lcd)
37{
38 struct platform_lcd *plcd = to_our_lcd(lcd);
39
40 return plcd->power;
41}
42
43static int platform_lcd_set_power(struct lcd_device *lcd, int power)
44{
45 struct platform_lcd *plcd = to_our_lcd(lcd);
46 int lcd_power = 1;
47
48 if (power == FB_BLANK_POWERDOWN || plcd->suspended)
49 lcd_power = 0;
50
51 plcd->pdata->set_power(plcd->pdata, lcd_power);
52 plcd->power = power;
53
54 return 0;
55}
56
57static int platform_lcd_match(struct lcd_device *lcd, struct fb_info *info)
58{
59 struct platform_lcd *plcd = to_our_lcd(lcd);
60 struct plat_lcd_data *pdata = plcd->pdata;
61
62 if (pdata->match_fb)
63 return pdata->match_fb(pdata, info);
64
65 return plcd->us->parent == info->device;
66}
67
68static struct lcd_ops platform_lcd_ops = {
69 .get_power = platform_lcd_get_power,
70 .set_power = platform_lcd_set_power,
71 .check_fb = platform_lcd_match,
72};
73
74static int __devinit platform_lcd_probe(struct platform_device *pdev)
75{
76 struct plat_lcd_data *pdata;
77 struct platform_lcd *plcd;
78 struct device *dev = &pdev->dev;
79 int err;
80
81 pdata = pdev->dev.platform_data;
82 if (!pdata) {
83 dev_err(dev, "no platform data supplied\n");
84 return -EINVAL;
85 }
86
87 plcd = kzalloc(sizeof(struct platform_lcd), GFP_KERNEL);
88 if (!plcd) {
89 dev_err(dev, "no memory for state\n");
90 return -ENOMEM;
91 }
92
93 plcd->us = dev;
94 plcd->pdata = pdata;
95 plcd->lcd = lcd_device_register("platform-lcd", dev,
96 plcd, &platform_lcd_ops);
97 if (IS_ERR(plcd->lcd)) {
98 dev_err(dev, "cannot register lcd device\n");
99 err = PTR_ERR(plcd->lcd);
100 goto err_mem;
101 }
102
103 platform_set_drvdata(pdev, plcd);
104 return 0;
105
106 err_mem:
107 kfree(plcd);
108 return err;
109}
110
111static int __devexit platform_lcd_remove(struct platform_device *pdev)
112{
113 struct platform_lcd *plcd = platform_get_drvdata(pdev);
114
115 lcd_device_unregister(plcd->lcd);
116 kfree(plcd);
117
118 return 0;
119}
120
121#ifdef CONFIG_PM
122static int platform_lcd_suspend(struct platform_device *pdev, pm_message_t st)
123{
124 struct platform_lcd *plcd = platform_get_drvdata(pdev);
125
126 plcd->suspended = 1;
127 platform_lcd_set_power(plcd->lcd, plcd->power);
128
129 return 0;
130}
131
132static int platform_lcd_resume(struct platform_device *pdev)
133{
134 struct platform_lcd *plcd = platform_get_drvdata(pdev);
135
136 plcd->suspended = 0;
137 platform_lcd_set_power(plcd->lcd, plcd->power);
138
139 return 0;
140}
141#else
142#define platform_lcd_suspend NULL
143#define platform_lcd_resume NULL
144#endif
145
146static struct platform_driver platform_lcd_driver = {
147 .driver = {
148 .name = "platform-lcd",
149 .owner = THIS_MODULE,
150 },
151 .probe = platform_lcd_probe,
152 .remove = __devexit_p(platform_lcd_remove),
153 .suspend = platform_lcd_suspend,
154 .resume = platform_lcd_resume,
155};
156
157static int __init platform_lcd_init(void)
158{
159 return platform_driver_register(&platform_lcd_driver);
160}
161
162static void __exit platform_lcd_cleanup(void)
163{
164 platform_driver_unregister(&platform_lcd_driver);
165}
166
167module_init(platform_lcd_init);
168module_exit(platform_lcd_cleanup);
169
170MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
171MODULE_LICENSE("GPL v2");
172MODULE_ALIAS("platform:platform-lcd");
diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c
new file mode 100644
index 000000000000..593c7687d54a
--- /dev/null
+++ b/drivers/video/backlight/vgg2432a4.c
@@ -0,0 +1,284 @@
1/* drivers/video/backlight/vgg2432a4.c
2 *
3 * VGG2432A4 (ILI9320) LCD controller driver.
4 *
5 * Copyright 2007 Simtec Electronics
6 * http://armlinux.simtec.co.uk/
7 * Ben Dooks <ben@simtec.co.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/delay.h>
15#include <linux/err.h>
16#include <linux/fb.h>
17#include <linux/init.h>
18#include <linux/lcd.h>
19#include <linux/module.h>
20
21#include <linux/spi/spi.h>
22
23#include <video/ili9320.h>
24
25#include "ili9320.h"
26
27/* Device initialisation sequences */
28
29static struct ili9320_reg vgg_init1[] = {
30 {
31 .address = ILI9320_POWER1,
32 .value = ILI9320_POWER1_AP(0) | ILI9320_POWER1_BT(0),
33 }, {
34 .address = ILI9320_POWER2,
35 .value = (ILI9320_POWER2_VC(7) |
36 ILI9320_POWER2_DC0(0) | ILI9320_POWER2_DC1(0)),
37 }, {
38 .address = ILI9320_POWER3,
39 .value = ILI9320_POWER3_VRH(0),
40 }, {
41 .address = ILI9320_POWER4,
42 .value = ILI9320_POWER4_VREOUT(0),
43 },
44};
45
46static struct ili9320_reg vgg_init2[] = {
47 {
48 .address = ILI9320_POWER1,
49 .value = (ILI9320_POWER1_AP(3) | ILI9320_POWER1_APE |
50 ILI9320_POWER1_BT(7) | ILI9320_POWER1_SAP),
51 }, {
52 .address = ILI9320_POWER2,
53 .value = ILI9320_POWER2_VC(7) | ILI9320_POWER2_DC0(3),
54 }
55};
56
57static struct ili9320_reg vgg_gamma[] = {
58 {
59 .address = ILI9320_GAMMA1,
60 .value = 0x0000,
61 }, {
62 .address = ILI9320_GAMMA2,
63 .value = 0x0505,
64 }, {
65 .address = ILI9320_GAMMA3,
66 .value = 0x0004,
67 }, {
68 .address = ILI9320_GAMMA4,
69 .value = 0x0006,
70 }, {
71 .address = ILI9320_GAMMA5,
72 .value = 0x0707,
73 }, {
74 .address = ILI9320_GAMMA6,
75 .value = 0x0105,
76 }, {
77 .address = ILI9320_GAMMA7,
78 .value = 0x0002,
79 }, {
80 .address = ILI9320_GAMMA8,
81 .value = 0x0707,
82 }, {
83 .address = ILI9320_GAMMA9,
84 .value = 0x0704,
85 }, {
86 .address = ILI9320_GAMMA10,
87 .value = 0x807,
88 }
89
90};
91
92static struct ili9320_reg vgg_init0[] = {
93 [0] = {
94 /* set direction and scan mode gate */
95 .address = ILI9320_DRIVER,
96 .value = ILI9320_DRIVER_SS,
97 }, {
98 .address = ILI9320_DRIVEWAVE,
99 .value = (ILI9320_DRIVEWAVE_MUSTSET |
100 ILI9320_DRIVEWAVE_EOR | ILI9320_DRIVEWAVE_BC),
101 }, {
102 .address = ILI9320_ENTRYMODE,
103 .value = ILI9320_ENTRYMODE_ID(3) | ILI9320_ENTRYMODE_BGR,
104 }, {
105 .address = ILI9320_RESIZING,
106 .value = 0x0,
107 },
108};
109
110
111static int vgg2432a4_lcd_init(struct ili9320 *lcd,
112 struct ili9320_platdata *cfg)
113{
114 unsigned int addr;
115 int ret;
116
117 /* Set VCore before anything else (VGG243237-6UFLWA) */
118 ret = ili9320_write(lcd, 0x00e5, 0x8000);
119 if (ret)
120 goto err_initial;
121
122 /* Start the oscillator up before we can do anything else. */
123 ret = ili9320_write(lcd, ILI9320_OSCILATION, ILI9320_OSCILATION_OSC);
124 if (ret)
125 goto err_initial;
126
127 /* must wait at-lesat 10ms after starting */
128 mdelay(15);
129
130 ret = ili9320_write_regs(lcd, vgg_init0, ARRAY_SIZE(vgg_init0));
131 if (ret != 0)
132 goto err_initial;
133
134 ili9320_write(lcd, ILI9320_DISPLAY2, cfg->display2);
135 ili9320_write(lcd, ILI9320_DISPLAY3, cfg->display3);
136 ili9320_write(lcd, ILI9320_DISPLAY4, cfg->display4);
137
138 ili9320_write(lcd, ILI9320_RGB_IF1, cfg->rgb_if1);
139 ili9320_write(lcd, ILI9320_FRAMEMAKER, 0x0);
140 ili9320_write(lcd, ILI9320_RGB_IF2, ILI9320_RGBIF2_DPL);
141
142 ret = ili9320_write_regs(lcd, vgg_init1, ARRAY_SIZE(vgg_init1));
143 if (ret != 0)
144 goto err_vgg;
145
146 mdelay(300);
147
148 ret = ili9320_write_regs(lcd, vgg_init2, ARRAY_SIZE(vgg_init2));
149 if (ret != 0)
150 goto err_vgg2;
151
152 mdelay(100);
153
154 ili9320_write(lcd, ILI9320_POWER3, 0x13c);
155
156 mdelay(100);
157
158 ili9320_write(lcd, ILI9320_POWER4, 0x1c00);
159 ili9320_write(lcd, ILI9320_POWER7, 0x000e);
160
161 mdelay(100);
162
163 ili9320_write(lcd, ILI9320_GRAM_HORIZ_ADDR, 0x00);
164 ili9320_write(lcd, ILI9320_GRAM_VERT_ADD, 0x00);
165
166 ret = ili9320_write_regs(lcd, vgg_gamma, ARRAY_SIZE(vgg_gamma));
167 if (ret != 0)
168 goto err_vgg3;
169
170 ili9320_write(lcd, ILI9320_HORIZ_START, 0x0);
171 ili9320_write(lcd, ILI9320_HORIZ_END, cfg->hsize - 1);
172 ili9320_write(lcd, ILI9320_VERT_START, 0x0);
173 ili9320_write(lcd, ILI9320_VERT_END, cfg->vsize - 1);
174
175 ili9320_write(lcd, ILI9320_DRIVER2,
176 ILI9320_DRIVER2_NL(((cfg->vsize - 240) / 8) + 0x1D));
177
178 ili9320_write(lcd, ILI9320_BASE_IMAGE, 0x1);
179 ili9320_write(lcd, ILI9320_VERT_SCROLL, 0x00);
180
181 for (addr = ILI9320_PARTIAL1_POSITION; addr <= ILI9320_PARTIAL2_END;
182 addr++) {
183 ili9320_write(lcd, addr, 0x0);
184 }
185
186 ili9320_write(lcd, ILI9320_INTERFACE1, 0x10);
187 ili9320_write(lcd, ILI9320_INTERFACE2, cfg->interface2);
188 ili9320_write(lcd, ILI9320_INTERFACE3, cfg->interface3);
189 ili9320_write(lcd, ILI9320_INTERFACE4, cfg->interface4);
190 ili9320_write(lcd, ILI9320_INTERFACE5, cfg->interface5);
191 ili9320_write(lcd, ILI9320_INTERFACE6, cfg->interface6);
192
193 lcd->display1 = (ILI9320_DISPLAY1_D(3) | ILI9320_DISPLAY1_DTE |
194 ILI9320_DISPLAY1_GON | ILI9320_DISPLAY1_BASEE |
195 0x40);
196
197 ili9320_write(lcd, ILI9320_DISPLAY1, lcd->display1);
198
199 return 0;
200
201 err_vgg3:
202 err_vgg2:
203 err_vgg:
204 err_initial:
205 return ret;
206}
207
208#ifdef CONFIG_PM
209static int vgg2432a4_suspend(struct spi_device *spi, pm_message_t state)
210{
211 return ili9320_suspend(dev_get_drvdata(&spi->dev), state);
212}
213
214static int vgg2432a4_resume(struct spi_device *spi)
215{
216 return ili9320_resume(dev_get_drvdata(&spi->dev));
217}
218#else
219#define vgg2432a4_suspend NULL
220#define vgg2432a4_resume NULL
221#endif
222
223static struct ili9320_client vgg2432a4_client = {
224 .name = "VGG2432A4",
225 .init = vgg2432a4_lcd_init,
226};
227
228/* Device probe */
229
230static int __devinit vgg2432a4_probe(struct spi_device *spi)
231{
232 int ret;
233
234 ret = ili9320_probe_spi(spi, &vgg2432a4_client);
235 if (ret != 0) {
236 dev_err(&spi->dev, "failed to initialise ili9320\n");
237 return ret;
238 }
239
240 return 0;
241}
242
243static int __devexit vgg2432a4_remove(struct spi_device *spi)
244{
245 return ili9320_remove(dev_get_drvdata(&spi->dev));
246}
247
248static void vgg2432a4_shutdown(struct spi_device *spi)
249{
250 ili9320_shutdown(dev_get_drvdata(&spi->dev));
251}
252
253static struct spi_driver vgg2432a4_driver = {
254 .driver = {
255 .name = "VGG2432A4",
256 .owner = THIS_MODULE,
257 },
258 .probe = vgg2432a4_probe,
259 .remove = __devexit_p(vgg2432a4_remove),
260 .shutdown = vgg2432a4_shutdown,
261 .suspend = vgg2432a4_suspend,
262 .resume = vgg2432a4_resume,
263};
264
265/* Device driver initialisation */
266
267static int __init vgg2432a4_init(void)
268{
269 return spi_register_driver(&vgg2432a4_driver);
270}
271
272static void __exit vgg2432a4_exit(void)
273{
274 spi_unregister_driver(&vgg2432a4_driver);
275}
276
277module_init(vgg2432a4_init);
278module_exit(vgg2432a4_exit);
279
280MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
281MODULE_DESCRIPTION("VGG2432A4 LCD Driver");
282MODULE_LICENSE("GPL v2");
283
284
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 49834a67a623..940467aed13f 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -478,7 +478,7 @@ static int bfin_lcd_set_contrast(struct lcd_device *dev, int contrast)
478 return 0; 478 return 0;
479} 479}
480 480
481static int bfin_lcd_check_fb(struct fb_info *fi) 481static int bfin_lcd_check_fb(struct lcd_device *dev, struct fb_info *fi)
482{ 482{
483 if (!fi || (fi == &bfin_bf54x_fb)) 483 if (!fi || (fi == &bfin_bf54x_fb))
484 return 1; 484 return 1;
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 135d6dd7e672..7d1b819e501c 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -396,7 +396,7 @@ static int bfin_lcd_set_contrast(struct lcd_device *dev, int contrast)
396 return 0; 396 return 0;
397} 397}
398 398
399static int bfin_lcd_check_fb(struct fb_info *fi) 399static int bfin_lcd_check_fb(struct lcd_device *dev, struct fb_info *fi)
400{ 400{
401 if (!fi || (fi == &bfin_t350mcqb_fb)) 401 if (!fi || (fi == &bfin_t350mcqb_fb))
402 return 1; 402 return 1;
diff --git a/drivers/video/carminefb.c b/drivers/video/carminefb.c
new file mode 100644
index 000000000000..e15bb447440a
--- /dev/null
+++ b/drivers/video/carminefb.c
@@ -0,0 +1,790 @@
1/*
2 * Frame buffer driver for the Carmine GPU.
3 *
4 * The driver configures the GPU as follows
5 * - FB0 is display 0 with unique memory area
6 * - FB1 is display 1 with unique memory area
7 * - both display use 32 bit colors
8 */
9#include <linux/delay.h>
10#include <linux/errno.h>
11#include <linux/fb.h>
12#include <linux/interrupt.h>
13#include <linux/pci.h>
14
15#include "carminefb.h"
16#include "carminefb_regs.h"
17
18#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
19#error "The endianness of the target host has not been defined."
20#endif
21
22/*
23 * The initial video mode can be supplied via two different ways:
24 * - as a string that is passed to fb_find_mode() (module option fb_mode_str)
25 * - as an integer that picks the video mode from carmine_modedb[] (module
26 * option fb_mode)
27 *
28 * If nothing is used than the initial video mode will be the
29 * CARMINEFB_DEFAULT_VIDEO_MODE member of the carmine_modedb[].
30 */
31#define CARMINEFB_DEFAULT_VIDEO_MODE 1
32
33static unsigned int fb_mode = CARMINEFB_DEFAULT_VIDEO_MODE;
34module_param(fb_mode, uint, 444);
35MODULE_PARM_DESC(fb_mode, "Initial video mode as integer.");
36
37static char *fb_mode_str;
38module_param(fb_mode_str, charp, 444);
39MODULE_PARM_DESC(fb_mode_str, "Initial video mode in characters.");
40
41/*
42 * Carminefb displays:
43 * 0b000 None
44 * 0b001 Display 0
45 * 0b010 Display 1
46 */
47static int fb_displays = CARMINE_USE_DISPLAY0 | CARMINE_USE_DISPLAY1;
48module_param(fb_displays, int, 444);
49MODULE_PARM_DESC(fb_displays, "Bit mode, which displays are used");
50
51struct carmine_hw {
52 void __iomem *v_regs;
53 void __iomem *screen_mem;
54 struct fb_info *fb[MAX_DISPLAY];
55};
56
57struct carmine_resolution {
58 u32 htp;
59 u32 hsp;
60 u32 hsw;
61 u32 hdp;
62 u32 vtr;
63 u32 vsp;
64 u32 vsw;
65 u32 vdp;
66 u32 disp_mode;
67};
68
69struct carmine_fb {
70 void __iomem *display_reg;
71 void __iomem *screen_base;
72 u32 smem_offset;
73 u32 cur_mode;
74 u32 new_mode;
75 struct carmine_resolution *res;
76 u32 pseudo_palette[16];
77};
78
79static struct fb_fix_screeninfo carminefb_fix __devinitdata = {
80 .id = "Carmine",
81 .type = FB_TYPE_PACKED_PIXELS,
82 .visual = FB_VISUAL_TRUECOLOR,
83 .accel = FB_ACCEL_NONE,
84};
85
86static const struct fb_videomode carmine_modedb[] = {
87 {
88 .name = "640x480",
89 .xres = 640,
90 .yres = 480,
91 }, {
92 .name = "800x600",
93 .xres = 800,
94 .yres = 600,
95 },
96};
97
98static struct carmine_resolution car_modes[] = {
99 {
100 /* 640x480 */
101 .htp = 800,
102 .hsp = 672,
103 .hsw = 96,
104 .hdp = 640,
105 .vtr = 525,
106 .vsp = 490,
107 .vsw = 2,
108 .vdp = 480,
109 .disp_mode = 0x1400,
110 },
111 {
112 /* 800x600 */
113 .htp = 1060,
114 .hsp = 864,
115 .hsw = 72,
116 .hdp = 800,
117 .vtr = 628,
118 .vsp = 601,
119 .vsw = 2,
120 .vdp = 600,
121 .disp_mode = 0x0d00,
122 }
123};
124
125static int carmine_find_mode(const struct fb_var_screeninfo *var)
126{
127 int i;
128
129 for (i = 0; i < ARRAY_SIZE(car_modes); i++)
130 if (car_modes[i].hdp == var->xres &&
131 car_modes[i].vdp == var->yres)
132 return i;
133 return -EINVAL;
134}
135
136static void c_set_disp_reg(const struct carmine_fb *par,
137 u32 offset, u32 val)
138{
139 writel(val, par->display_reg + offset);
140}
141
142static u32 c_get_disp_reg(const struct carmine_fb *par,
143 u32 offset)
144{
145 return readl(par->display_reg + offset);
146}
147
148static void c_set_hw_reg(const struct carmine_hw *hw,
149 u32 offset, u32 val)
150{
151 writel(val, hw->v_regs + offset);
152}
153
154static u32 c_get_hw_reg(const struct carmine_hw *hw,
155 u32 offset)
156{
157 return readl(hw->v_regs + offset);
158}
159
160static int carmine_setcolreg(unsigned regno, unsigned red, unsigned green,
161 unsigned blue, unsigned transp, struct fb_info *info)
162{
163 if (regno >= 16)
164 return 1;
165
166 red >>= 8;
167 green >>= 8;
168 blue >>= 8;
169 transp >>= 8;
170
171 ((u32 *)info->pseudo_palette)[regno] = be32_to_cpu(transp << 24 |
172 red << 0 | green << 8 | blue << 16);
173 return 0;
174}
175
176static int carmine_check_var(struct fb_var_screeninfo *var,
177 struct fb_info *info)
178{
179 int ret;
180
181 ret = carmine_find_mode(var);
182 if (ret < 0)
183 return ret;
184
185 if (var->grayscale || var->rotate || var->nonstd)
186 return -EINVAL;
187
188 var->xres_virtual = var->xres;
189 var->yres_virtual = var->yres;
190
191 var->bits_per_pixel = 32;
192
193#ifdef __BIG_ENDIAN
194 var->transp.offset = 24;
195 var->red.offset = 0;
196 var->green.offset = 8;
197 var->blue.offset = 16;
198#else
199 var->transp.offset = 24;
200 var->red.offset = 16;
201 var->green.offset = 8;
202 var->blue.offset = 0;
203#endif
204
205 var->red.length = 8;
206 var->green.length = 8;
207 var->blue.length = 8;
208 var->transp.length = 8;
209
210 var->red.msb_right = 0;
211 var->green.msb_right = 0;
212 var->blue.msb_right = 0;
213 var->transp.msb_right = 0;
214 return 0;
215}
216
217static void carmine_init_display_param(struct carmine_fb *par)
218{
219 u32 width;
220 u32 height;
221 u32 param;
222 u32 window_size;
223 u32 soffset = par->smem_offset;
224
225 c_set_disp_reg(par, CARMINE_DISP_REG_C_TRANS, 0);
226 c_set_disp_reg(par, CARMINE_DISP_REG_MLMR_TRANS, 0);
227 c_set_disp_reg(par, CARMINE_DISP_REG_CURSOR_MODE,
228 CARMINE_CURSOR0_PRIORITY_MASK |
229 CARMINE_CURSOR1_PRIORITY_MASK |
230 CARMINE_CURSOR_CUTZ_MASK);
231
232 /* Set default cursor position */
233 c_set_disp_reg(par, CARMINE_DISP_REG_CUR1_POS, 0 << 16 | 0);
234 c_set_disp_reg(par, CARMINE_DISP_REG_CUR2_POS, 0 << 16 | 0);
235
236 /* Set default display mode */
237 c_set_disp_reg(par, CARMINE_DISP_REG_L0_EXT_MODE, CARMINE_WINDOW_MODE |
238 CARMINE_EXT_CMODE_DIRECT24_RGBA);
239 c_set_disp_reg(par, CARMINE_DISP_REG_L1_EXT_MODE,
240 CARMINE_EXT_CMODE_DIRECT24_RGBA);
241 c_set_disp_reg(par, CARMINE_DISP_REG_L2_EXT_MODE, CARMINE_EXTEND_MODE |
242 CARMINE_EXT_CMODE_DIRECT24_RGBA);
243 c_set_disp_reg(par, CARMINE_DISP_REG_L3_EXT_MODE, CARMINE_EXTEND_MODE |
244 CARMINE_EXT_CMODE_DIRECT24_RGBA);
245 c_set_disp_reg(par, CARMINE_DISP_REG_L4_EXT_MODE, CARMINE_EXTEND_MODE |
246 CARMINE_EXT_CMODE_DIRECT24_RGBA);
247 c_set_disp_reg(par, CARMINE_DISP_REG_L5_EXT_MODE, CARMINE_EXTEND_MODE |
248 CARMINE_EXT_CMODE_DIRECT24_RGBA);
249 c_set_disp_reg(par, CARMINE_DISP_REG_L6_EXT_MODE, CARMINE_EXTEND_MODE |
250 CARMINE_EXT_CMODE_DIRECT24_RGBA);
251 c_set_disp_reg(par, CARMINE_DISP_REG_L7_EXT_MODE, CARMINE_EXTEND_MODE |
252 CARMINE_EXT_CMODE_DIRECT24_RGBA);
253
254 /* Set default frame size to layer mode register */
255 width = par->res->hdp * 4 / CARMINE_DISP_WIDTH_UNIT;
256 width = width << CARMINE_DISP_WIDTH_SHIFT;
257
258 height = par->res->vdp - 1;
259 param = width | height;
260
261 c_set_disp_reg(par, CARMINE_DISP_REG_L0_MODE_W_H, param);
262 c_set_disp_reg(par, CARMINE_DISP_REG_L1_WIDTH, width);
263 c_set_disp_reg(par, CARMINE_DISP_REG_L2_MODE_W_H, param);
264 c_set_disp_reg(par, CARMINE_DISP_REG_L3_MODE_W_H, param);
265 c_set_disp_reg(par, CARMINE_DISP_REG_L4_MODE_W_H, param);
266 c_set_disp_reg(par, CARMINE_DISP_REG_L5_MODE_W_H, param);
267 c_set_disp_reg(par, CARMINE_DISP_REG_L6_MODE_W_H, param);
268 c_set_disp_reg(par, CARMINE_DISP_REG_L7_MODE_W_H, param);
269
270 /* Set default pos and size */
271 window_size = (par->res->vdp - 1) << CARMINE_DISP_WIN_H_SHIFT;
272 window_size |= par->res->hdp;
273
274 c_set_disp_reg(par, CARMINE_DISP_REG_L0_WIN_POS, 0);
275 c_set_disp_reg(par, CARMINE_DISP_REG_L0_WIN_SIZE, window_size);
276 c_set_disp_reg(par, CARMINE_DISP_REG_L1_WIN_POS, 0);
277 c_set_disp_reg(par, CARMINE_DISP_REG_L1_WIN_SIZE, window_size);
278 c_set_disp_reg(par, CARMINE_DISP_REG_L2_WIN_POS, 0);
279 c_set_disp_reg(par, CARMINE_DISP_REG_L2_WIN_SIZE, window_size);
280 c_set_disp_reg(par, CARMINE_DISP_REG_L3_WIN_POS, 0);
281 c_set_disp_reg(par, CARMINE_DISP_REG_L3_WIN_SIZE, window_size);
282 c_set_disp_reg(par, CARMINE_DISP_REG_L4_WIN_POS, 0);
283 c_set_disp_reg(par, CARMINE_DISP_REG_L4_WIN_SIZE, window_size);
284 c_set_disp_reg(par, CARMINE_DISP_REG_L5_WIN_POS, 0);
285 c_set_disp_reg(par, CARMINE_DISP_REG_L5_WIN_SIZE, window_size);
286 c_set_disp_reg(par, CARMINE_DISP_REG_L6_WIN_POS, 0);
287 c_set_disp_reg(par, CARMINE_DISP_REG_L6_WIN_SIZE, window_size);
288 c_set_disp_reg(par, CARMINE_DISP_REG_L7_WIN_POS, 0);
289 c_set_disp_reg(par, CARMINE_DISP_REG_L7_WIN_SIZE, window_size);
290
291 /* Set default origin address */
292 c_set_disp_reg(par, CARMINE_DISP_REG_L0_ORG_ADR, soffset);
293 c_set_disp_reg(par, CARMINE_DISP_REG_L1_ORG_ADR, soffset);
294 c_set_disp_reg(par, CARMINE_DISP_REG_L2_ORG_ADR1, soffset);
295 c_set_disp_reg(par, CARMINE_DISP_REG_L3_ORG_ADR1, soffset);
296 c_set_disp_reg(par, CARMINE_DISP_REG_L4_ORG_ADR1, soffset);
297 c_set_disp_reg(par, CARMINE_DISP_REG_L5_ORG_ADR1, soffset);
298 c_set_disp_reg(par, CARMINE_DISP_REG_L6_ORG_ADR1, soffset);
299 c_set_disp_reg(par, CARMINE_DISP_REG_L7_ORG_ADR1, soffset);
300
301 /* Set default display address */
302 c_set_disp_reg(par, CARMINE_DISP_REG_L0_DISP_ADR, soffset);
303 c_set_disp_reg(par, CARMINE_DISP_REG_L2_DISP_ADR1, soffset);
304 c_set_disp_reg(par, CARMINE_DISP_REG_L3_DISP_ADR1, soffset);
305 c_set_disp_reg(par, CARMINE_DISP_REG_L4_DISP_ADR1, soffset);
306 c_set_disp_reg(par, CARMINE_DISP_REG_L5_DISP_ADR1, soffset);
307 c_set_disp_reg(par, CARMINE_DISP_REG_L6_DISP_ADR0, soffset);
308 c_set_disp_reg(par, CARMINE_DISP_REG_L7_DISP_ADR0, soffset);
309
310 /* Set default display position */
311 c_set_disp_reg(par, CARMINE_DISP_REG_L0_DISP_POS, 0);
312 c_set_disp_reg(par, CARMINE_DISP_REG_L2_DISP_POS, 0);
313 c_set_disp_reg(par, CARMINE_DISP_REG_L3_DISP_POS, 0);
314 c_set_disp_reg(par, CARMINE_DISP_REG_L4_DISP_POS, 0);
315 c_set_disp_reg(par, CARMINE_DISP_REG_L5_DISP_POS, 0);
316 c_set_disp_reg(par, CARMINE_DISP_REG_L6_DISP_POS, 0);
317 c_set_disp_reg(par, CARMINE_DISP_REG_L7_DISP_POS, 0);
318
319 /* Set default blend mode */
320 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L0, 0);
321 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L1, 0);
322 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L2, 0);
323 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L3, 0);
324 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L4, 0);
325 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L5, 0);
326 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L6, 0);
327 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L7, 0);
328
329 /* default transparency mode */
330 c_set_disp_reg(par, CARMINE_DISP_REG_L0_TRANS, 0);
331 c_set_disp_reg(par, CARMINE_DISP_REG_L1_TRANS, 0);
332 c_set_disp_reg(par, CARMINE_DISP_REG_L2_TRANS, 0);
333 c_set_disp_reg(par, CARMINE_DISP_REG_L3_TRANS, 0);
334 c_set_disp_reg(par, CARMINE_DISP_REG_L4_TRANS, 0);
335 c_set_disp_reg(par, CARMINE_DISP_REG_L5_TRANS, 0);
336 c_set_disp_reg(par, CARMINE_DISP_REG_L6_TRANS, 0);
337 c_set_disp_reg(par, CARMINE_DISP_REG_L7_TRANS, 0);
338
339 /* Set default read skip parameter */
340 c_set_disp_reg(par, CARMINE_DISP_REG_L0RM, 0);
341 c_set_disp_reg(par, CARMINE_DISP_REG_L2RM, 0);
342 c_set_disp_reg(par, CARMINE_DISP_REG_L3RM, 0);
343 c_set_disp_reg(par, CARMINE_DISP_REG_L4RM, 0);
344 c_set_disp_reg(par, CARMINE_DISP_REG_L5RM, 0);
345 c_set_disp_reg(par, CARMINE_DISP_REG_L6RM, 0);
346 c_set_disp_reg(par, CARMINE_DISP_REG_L7RM, 0);
347
348 c_set_disp_reg(par, CARMINE_DISP_REG_L0PX, 0);
349 c_set_disp_reg(par, CARMINE_DISP_REG_L2PX, 0);
350 c_set_disp_reg(par, CARMINE_DISP_REG_L3PX, 0);
351 c_set_disp_reg(par, CARMINE_DISP_REG_L4PX, 0);
352 c_set_disp_reg(par, CARMINE_DISP_REG_L5PX, 0);
353 c_set_disp_reg(par, CARMINE_DISP_REG_L6PX, 0);
354 c_set_disp_reg(par, CARMINE_DISP_REG_L7PX, 0);
355
356 c_set_disp_reg(par, CARMINE_DISP_REG_L0PY, 0);
357 c_set_disp_reg(par, CARMINE_DISP_REG_L2PY, 0);
358 c_set_disp_reg(par, CARMINE_DISP_REG_L3PY, 0);
359 c_set_disp_reg(par, CARMINE_DISP_REG_L4PY, 0);
360 c_set_disp_reg(par, CARMINE_DISP_REG_L5PY, 0);
361 c_set_disp_reg(par, CARMINE_DISP_REG_L6PY, 0);
362 c_set_disp_reg(par, CARMINE_DISP_REG_L7PY, 0);
363}
364
365static void set_display_parameters(struct carmine_fb *par)
366{
367 u32 mode;
368 u32 hdp, vdp, htp, hsp, hsw, vtr, vsp, vsw;
369
370 /*
371 * display timing. Parameters are decreased by one because hardware
372 * spec is 0 to (n - 1)
373 * */
374 hdp = par->res->hdp - 1;
375 vdp = par->res->vdp - 1;
376 htp = par->res->htp - 1;
377 hsp = par->res->hsp - 1;
378 hsw = par->res->hsw - 1;
379 vtr = par->res->vtr - 1;
380 vsp = par->res->vsp - 1;
381 vsw = par->res->vsw - 1;
382
383 c_set_disp_reg(par, CARMINE_DISP_REG_H_TOTAL,
384 htp << CARMINE_DISP_HTP_SHIFT);
385 c_set_disp_reg(par, CARMINE_DISP_REG_H_PERIOD,
386 (hdp << CARMINE_DISP_HDB_SHIFT) | hdp);
387 c_set_disp_reg(par, CARMINE_DISP_REG_V_H_W_H_POS,
388 (vsw << CARMINE_DISP_VSW_SHIFT) |
389 (hsw << CARMINE_DISP_HSW_SHIFT) |
390 (hsp));
391 c_set_disp_reg(par, CARMINE_DISP_REG_V_TOTAL,
392 vtr << CARMINE_DISP_VTR_SHIFT);
393 c_set_disp_reg(par, CARMINE_DISP_REG_V_PERIOD_POS,
394 (vdp << CARMINE_DISP_VDP_SHIFT) | vsp);
395
396 /* clock */
397 mode = c_get_disp_reg(par, CARMINE_DISP_REG_DCM1);
398 mode = (mode & ~CARMINE_DISP_DCM_MASK) |
399 (par->res->disp_mode & CARMINE_DISP_DCM_MASK);
400 /* enable video output and layer 0 */
401 mode |= CARMINE_DEN | CARMINE_L0E;
402 c_set_disp_reg(par, CARMINE_DISP_REG_DCM1, mode);
403}
404
405static int carmine_set_par(struct fb_info *info)
406{
407 struct carmine_fb *par = info->par;
408 int ret;
409
410 ret = carmine_find_mode(&info->var);
411 if (ret < 0)
412 return ret;
413
414 par->new_mode = ret;
415 if (par->cur_mode != par->new_mode) {
416
417 par->cur_mode = par->new_mode;
418 par->res = &car_modes[par->new_mode];
419
420 carmine_init_display_param(par);
421 set_display_parameters(par);
422 }
423
424 info->fix.line_length = info->var.xres * info->var.bits_per_pixel / 8;
425 return 0;
426}
427
428static int init_hardware(struct carmine_hw *hw)
429{
430 u32 flags;
431 u32 loops;
432 u32 ret;
433
434 /* Initalize Carmine */
435 /* Sets internal clock */
436 c_set_hw_reg(hw, CARMINE_CTL_REG + CARMINE_CTL_REG_CLOCK_ENABLE,
437 CARMINE_DFLT_IP_CLOCK_ENABLE);
438
439 /* Video signal output is turned off */
440 c_set_hw_reg(hw, CARMINE_DISP0_REG + CARMINE_DISP_REG_DCM1, 0);
441 c_set_hw_reg(hw, CARMINE_DISP1_REG + CARMINE_DISP_REG_DCM1, 0);
442
443 /* Software reset */
444 c_set_hw_reg(hw, CARMINE_CTL_REG + CARMINE_CTL_REG_SOFTWARE_RESET, 1);
445 c_set_hw_reg(hw, CARMINE_CTL_REG + CARMINE_CTL_REG_SOFTWARE_RESET, 0);
446
447 /* I/O mode settings */
448 flags = CARMINE_DFLT_IP_DCTL_IO_CONT1 << 16 |
449 CARMINE_DFLT_IP_DCTL_IO_CONT0;
450 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_IOCONT1_IOCONT0,
451 flags);
452
453 /* DRAM initial sequence */
454 flags = CARMINE_DFLT_IP_DCTL_MODE << 16 | CARMINE_DFLT_IP_DCTL_ADD;
455 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_MODE_ADD,
456 flags);
457
458 flags = CARMINE_DFLT_IP_DCTL_SET_TIME1 << 16 |
459 CARMINE_DFLT_IP_DCTL_EMODE;
460 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_SETTIME1_EMODE,
461 flags);
462
463 flags = CARMINE_DFLT_IP_DCTL_REFRESH << 16 |
464 CARMINE_DFLT_IP_DCTL_SET_TIME2;
465 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_REFRESH_SETTIME2,
466 flags);
467
468 flags = CARMINE_DFLT_IP_DCTL_RESERVE2 << 16 |
469 CARMINE_DFLT_IP_DCTL_FIFO_DEPTH;
470 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_RSV2_RSV1, flags);
471
472 flags = CARMINE_DFLT_IP_DCTL_DDRIF2 << 16 | CARMINE_DFLT_IP_DCTL_DDRIF1;
473 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_DDRIF2_DDRIF1,
474 flags);
475
476 flags = CARMINE_DFLT_IP_DCTL_RESERVE0 << 16 |
477 CARMINE_DFLT_IP_DCTL_STATES;
478 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_RSV0_STATES,
479 flags);
480
481 /* Executes DLL reset */
482 if (CARMINE_DCTL_DLL_RESET) {
483 for (loops = 0; loops < CARMINE_DCTL_INIT_WAIT_LIMIT; loops++) {
484
485 ret = c_get_hw_reg(hw, CARMINE_DCTL_REG +
486 CARMINE_DCTL_REG_RSV0_STATES);
487 ret &= CARMINE_DCTL_REG_STATES_MASK;
488 if (!ret)
489 break;
490
491 mdelay(CARMINE_DCTL_INIT_WAIT_INTERVAL);
492 }
493
494 if (loops >= CARMINE_DCTL_INIT_WAIT_LIMIT) {
495 printk(KERN_ERR "DRAM init failed\n");
496 return -EIO;
497 }
498 }
499
500 flags = CARMINE_DFLT_IP_DCTL_MODE_AFT_RST << 16 |
501 CARMINE_DFLT_IP_DCTL_ADD;
502 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_MODE_ADD, flags);
503
504 flags = CARMINE_DFLT_IP_DCTL_RESERVE0 << 16 |
505 CARMINE_DFLT_IP_DCTL_STATES_AFT_RST;
506 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_RSV0_STATES,
507 flags);
508
509 /* Initialize the write back register */
510 c_set_hw_reg(hw, CARMINE_WB_REG + CARMINE_WB_REG_WBM,
511 CARMINE_WB_REG_WBM_DEFAULT);
512
513 /* Initialize the Kottos registers */
514 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_VRINTM, 0);
515 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_VRERRM, 0);
516
517 /* Set DC offsets */
518 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_DC_OFFSET_PX, 0);
519 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_DC_OFFSET_PY, 0);
520 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_DC_OFFSET_LX, 0);
521 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_DC_OFFSET_LY, 0);
522 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_DC_OFFSET_TX, 0);
523 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_DC_OFFSET_TY, 0);
524 return 0;
525}
526
527static struct fb_ops carminefb_ops = {
528 .owner = THIS_MODULE,
529 .fb_fillrect = cfb_fillrect,
530 .fb_copyarea = cfb_copyarea,
531 .fb_imageblit = cfb_imageblit,
532
533 .fb_check_var = carmine_check_var,
534 .fb_set_par = carmine_set_par,
535 .fb_setcolreg = carmine_setcolreg,
536};
537
538static int alloc_carmine_fb(void __iomem *regs, void __iomem *smem_base,
539 int smem_offset, struct device *device, struct fb_info **rinfo)
540{
541 int ret;
542 struct fb_info *info;
543 struct carmine_fb *par;
544
545 info = framebuffer_alloc(sizeof *par, device);
546 if (!info)
547 return -ENOMEM;
548
549 par = info->par;
550 par->display_reg = regs;
551 par->smem_offset = smem_offset;
552
553 info->screen_base = smem_base + smem_offset;
554 info->screen_size = CARMINE_DISPLAY_MEM;
555 info->fbops = &carminefb_ops;
556
557 info->fix = carminefb_fix;
558 info->pseudo_palette = par->pseudo_palette;
559 info->flags = FBINFO_DEFAULT;
560
561 ret = fb_alloc_cmap(&info->cmap, 256, 1);
562 if (ret < 0)
563 goto err_free_fb;
564
565 if (fb_mode > ARRAY_SIZE(carmine_modedb))
566 fb_mode = CARMINEFB_DEFAULT_VIDEO_MODE;
567
568 par->cur_mode = par->new_mode = ~0;
569
570 ret = fb_find_mode(&info->var, info, fb_mode_str, carmine_modedb,
571 ARRAY_SIZE(carmine_modedb),
572 &carmine_modedb[fb_mode], 32);
573 if (!ret || ret == 4) {
574 ret = -EINVAL;
575 goto err_dealloc_cmap;
576 }
577
578 fb_videomode_to_modelist(carmine_modedb, ARRAY_SIZE(carmine_modedb),
579 &info->modelist);
580
581 ret = register_framebuffer(info);
582 if (ret < 0)
583 goto err_dealloc_cmap;
584
585 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
586 info->fix.id);
587
588 *rinfo = info;
589 return 0;
590
591err_dealloc_cmap:
592 fb_dealloc_cmap(&info->cmap);
593err_free_fb:
594 framebuffer_release(info);
595 return ret;
596}
597
598static void cleanup_fb_device(struct fb_info *info)
599{
600 if (info) {
601 unregister_framebuffer(info);
602 fb_dealloc_cmap(&info->cmap);
603 framebuffer_release(info);
604 }
605}
606
607static int __devinit carminefb_probe(struct pci_dev *dev,
608 const struct pci_device_id *ent)
609{
610 struct carmine_hw *hw;
611 struct device *device = &dev->dev;
612 struct fb_info *info;
613 int ret;
614
615 ret = pci_enable_device(dev);
616 if (ret)
617 return ret;
618
619 ret = -ENOMEM;
620 hw = kzalloc(sizeof *hw, GFP_KERNEL);
621 if (!hw)
622 goto err_enable_pci;
623
624 carminefb_fix.mmio_start = pci_resource_start(dev, CARMINE_CONFIG_BAR);
625 carminefb_fix.mmio_len = pci_resource_len(dev, CARMINE_CONFIG_BAR);
626
627 if (!request_mem_region(carminefb_fix.mmio_start,
628 carminefb_fix.mmio_len,
629 "carminefb regbase")) {
630 printk(KERN_ERR "carminefb: Can't reserve regbase.\n");
631 ret = -EBUSY;
632 goto err_free_hw;
633 }
634 hw->v_regs = ioremap_nocache(carminefb_fix.mmio_start,
635 carminefb_fix.mmio_len);
636 if (!hw->v_regs) {
637 printk(KERN_ERR "carminefb: Can't remap %s register.\n",
638 carminefb_fix.id);
639 goto err_free_reg_mmio;
640 }
641
642 carminefb_fix.smem_start = pci_resource_start(dev, CARMINE_MEMORY_BAR);
643 carminefb_fix.smem_len = pci_resource_len(dev, CARMINE_MEMORY_BAR);
644
645 /* The memory area tends to be very large (256 MiB). Remap only what
646 * is required for that largest resolution to avoid remaps at run
647 * time
648 */
649 if (carminefb_fix.smem_len > CARMINE_TOTAL_DIPLAY_MEM)
650 carminefb_fix.smem_len = CARMINE_TOTAL_DIPLAY_MEM;
651
652 else if (carminefb_fix.smem_len < CARMINE_TOTAL_DIPLAY_MEM) {
653 printk(KERN_ERR "carminefb: Memory bar is only %d bytes, %d "
654 "are required.", carminefb_fix.smem_len,
655 CARMINE_TOTAL_DIPLAY_MEM);
656 goto err_free_reg_mmio;
657 }
658
659 if (!request_mem_region(carminefb_fix.smem_start,
660 carminefb_fix.smem_len, "carminefb smem")) {
661 printk(KERN_ERR "carminefb: Can't reserve smem.\n");
662 goto err_unmap_vregs;
663 }
664
665 hw->screen_mem = ioremap_nocache(carminefb_fix.smem_start,
666 carminefb_fix.smem_len);
667 if (!hw->screen_mem) {
668 printk(KERN_ERR "carmine: Can't ioremap smem area.\n");
669 release_mem_region(carminefb_fix.smem_start,
670 carminefb_fix.smem_len);
671 goto err_reg_smem;
672 }
673
674 ret = init_hardware(hw);
675 if (ret)
676 goto err_unmap_screen;
677
678 info = NULL;
679 if (fb_displays & CARMINE_USE_DISPLAY0) {
680 ret = alloc_carmine_fb(hw->v_regs + CARMINE_DISP0_REG,
681 hw->screen_mem, CARMINE_DISPLAY_MEM * 0,
682 device, &info);
683 if (ret)
684 goto err_deinit_hw;
685 }
686
687 hw->fb[0] = info;
688
689 info = NULL;
690 if (fb_displays & CARMINE_USE_DISPLAY1) {
691 ret = alloc_carmine_fb(hw->v_regs + CARMINE_DISP1_REG,
692 hw->screen_mem, CARMINE_DISPLAY_MEM * 1,
693 device, &info);
694 if (ret)
695 goto err_cleanup_fb0;
696 }
697
698 hw->fb[1] = info;
699 info = NULL;
700
701 pci_set_drvdata(dev, hw);
702 return 0;
703
704err_cleanup_fb0:
705 cleanup_fb_device(hw->fb[0]);
706err_deinit_hw:
707 /* disable clock, etc */
708 c_set_hw_reg(hw, CARMINE_CTL_REG + CARMINE_CTL_REG_CLOCK_ENABLE, 0);
709err_unmap_screen:
710 iounmap(hw->screen_mem);
711err_reg_smem:
712 release_mem_region(carminefb_fix.mmio_start, carminefb_fix.mmio_len);
713err_unmap_vregs:
714 iounmap(hw->v_regs);
715err_free_reg_mmio:
716 release_mem_region(carminefb_fix.mmio_start, carminefb_fix.mmio_len);
717err_free_hw:
718 kfree(hw);
719err_enable_pci:
720 pci_disable_device(dev);
721 return ret;
722}
723
724static void __devexit carminefb_remove(struct pci_dev *dev)
725{
726 struct carmine_hw *hw = pci_get_drvdata(dev);
727 struct fb_fix_screeninfo fix;
728 int i;
729
730 /* in case we use only fb1 and not fb1 */
731 if (hw->fb[0])
732 fix = hw->fb[0]->fix;
733 else
734 fix = hw->fb[1]->fix;
735
736 /* deactivate display(s) and switch clocks */
737 c_set_hw_reg(hw, CARMINE_DISP0_REG + CARMINE_DISP_REG_DCM1, 0);
738 c_set_hw_reg(hw, CARMINE_DISP1_REG + CARMINE_DISP_REG_DCM1, 0);
739 c_set_hw_reg(hw, CARMINE_CTL_REG + CARMINE_CTL_REG_CLOCK_ENABLE, 0);
740
741 for (i = 0; i < MAX_DISPLAY; i++)
742 cleanup_fb_device(hw->fb[i]);
743
744 iounmap(hw->screen_mem);
745 release_mem_region(fix.smem_start, fix.smem_len);
746 iounmap(hw->v_regs);
747 release_mem_region(fix.mmio_start, fix.mmio_len);
748
749 pci_set_drvdata(dev, NULL);
750 pci_disable_device(dev);
751 kfree(hw);
752}
753
754#define PCI_VENDOR_ID_FUJITU_LIMITED 0x10cf
755static struct pci_device_id carmine_devices[] __devinitdata = {
756{
757 PCI_DEVICE(PCI_VENDOR_ID_FUJITU_LIMITED, 0x202b)},
758 {0, 0, 0, 0, 0, 0, 0}
759};
760
761MODULE_DEVICE_TABLE(pci, carmine_devices);
762
763static struct pci_driver carmine_pci_driver = {
764 .name = "carminefb",
765 .id_table = carmine_devices,
766 .probe = carminefb_probe,
767 .remove = __devexit_p(carminefb_remove),
768};
769
770static int __init carminefb_init(void)
771{
772 if (!(fb_displays &
773 (CARMINE_USE_DISPLAY0 | CARMINE_USE_DISPLAY1))) {
774 printk(KERN_ERR "If you disable both displays than you don't "
775 "need the driver at all\n");
776 return -EINVAL;
777 }
778 return pci_register_driver(&carmine_pci_driver);
779}
780module_init(carminefb_init);
781
782static void __exit carminefb_cleanup(void)
783{
784 pci_unregister_driver(&carmine_pci_driver);
785}
786module_exit(carminefb_cleanup);
787
788MODULE_AUTHOR("Sebastian Siewior <bigeasy@linutronix.de>");
789MODULE_DESCRIPTION("Framebuffer driver for Fujitsu Carmine based devices");
790MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/carminefb.h b/drivers/video/carminefb.h
new file mode 100644
index 000000000000..05306de0c6b6
--- /dev/null
+++ b/drivers/video/carminefb.h
@@ -0,0 +1,64 @@
1#ifndef CARMINE_CARMINE_H
2#define CARMINE_CARMINE_H
3
4#define CARMINE_MEMORY_BAR 2
5#define CARMINE_CONFIG_BAR 3
6
7#define MAX_DISPLAY 2
8#define CARMINE_DISPLAY_MEM (800 * 600 * 4)
9#define CARMINE_TOTAL_DIPLAY_MEM (CARMINE_DISPLAY_MEM * MAX_DISPLAY)
10
11#define CARMINE_USE_DISPLAY0 (1 << 0)
12#define CARMINE_USE_DISPLAY1 (1 << 1)
13
14/*
15 * This values work on the eval card. Custom boards may use different timings,
16 * here an example :)
17 */
18
19/* DRAM initialization values */
20#ifdef CONFIG_FB_CARMINE_DRAM_EVAL
21
22#define CARMINE_DFLT_IP_CLOCK_ENABLE (0x03ff)
23#define CARMINE_DFLT_IP_DCTL_ADD (0x05c3)
24#define CARMINE_DFLT_IP_DCTL_MODE (0x0121)
25#define CARMINE_DFLT_IP_DCTL_EMODE (0x8000)
26#define CARMINE_DFLT_IP_DCTL_SET_TIME1 (0x4749)
27#define CARMINE_DFLT_IP_DCTL_SET_TIME2 (0x2a22)
28#define CARMINE_DFLT_IP_DCTL_REFRESH (0x0042)
29#define CARMINE_DFLT_IP_DCTL_STATES (0x0003)
30#define CARMINE_DFLT_IP_DCTL_RESERVE0 (0x0020)
31#define CARMINE_DFLT_IP_DCTL_FIFO_DEPTH (0x000f)
32#define CARMINE_DFLT_IP_DCTL_RESERVE2 (0x0000)
33#define CARMINE_DFLT_IP_DCTL_DDRIF1 (0x6646)
34#define CARMINE_DFLT_IP_DCTL_DDRIF2 (0x0055)
35#define CARMINE_DFLT_IP_DCTL_MODE_AFT_RST (0x0021)
36#define CARMINE_DFLT_IP_DCTL_STATES_AFT_RST (0x0002)
37#define CARMINE_DFLT_IP_DCTL_IO_CONT0 (0x0555)
38#define CARMINE_DFLT_IP_DCTL_IO_CONT1 (0x0555)
39#define CARMINE_DCTL_DLL_RESET (1)
40#endif
41
42#ifdef CONFIG_CARMINE_DRAM_CUSTOM
43
44#define CARMINE_DFLT_IP_CLOCK_ENABLE (0x03ff)
45#define CARMINE_DFLT_IP_DCTL_ADD (0x03b2)
46#define CARMINE_DFLT_IP_DCTL_MODE (0x0161)
47#define CARMINE_DFLT_IP_DCTL_EMODE (0x8000)
48#define CARMINE_DFLT_IP_DCTL_SET_TIME1 (0x2628)
49#define CARMINE_DFLT_IP_DCTL_SET_TIME2 (0x1a09)
50#define CARMINE_DFLT_IP_DCTL_REFRESH (0x00fe)
51#define CARMINE_DFLT_IP_DCTL_STATES (0x0003)
52#define CARMINE_DFLT_IP_DCTL_RESERVE0 (0x0020)
53#define CARMINE_DFLT_IP_DCTL_FIFO_DEPTH (0x000f)
54#define CARMINE_DFLT_IP_DCTL_RESERVE2 (0x0000)
55#define CARMINE_DFLT_IP_DCTL_DDRIF1 (0x0646)
56#define CARMINE_DFLT_IP_DCTL_DDRIF2 (0x55aa)
57#define CARMINE_DFLT_IP_DCTL_MODE_AFT_RST (0x0061)
58#define CARMINE_DFLT_IP_DCTL_STATES_AFT_RST (0x0002)
59#define CARMINE_DFLT_IP_DCTL_IO_CONT0 (0x0555)
60#define CARMINE_DFLT_IP_DCTL_IO_CONT1 (0x0555)
61#define CARMINE_DCTL_DLL_RESET (1)
62#endif
63
64#endif
diff --git a/drivers/video/carminefb_regs.h b/drivers/video/carminefb_regs.h
new file mode 100644
index 000000000000..045215600b73
--- /dev/null
+++ b/drivers/video/carminefb_regs.h
@@ -0,0 +1,159 @@
1#ifndef _CARMINEFB_REGS_H
2#define _CARMINEFB_REGS_H
3
4#define CARMINE_OVERLAY_EXT_MODE (0x00000002)
5#define CARMINE_GRAPH_REG (0x00000000)
6#define CARMINE_DISP0_REG (0x00100000)
7#define CARMINE_DISP1_REG (0x00140000)
8#define CARMINE_WB_REG (0x00180000)
9#define CARMINE_DCTL_REG (0x00300000)
10#define CARMINE_CTL_REG (0x00400000)
11#define CARMINE_WINDOW_MODE (0x00000001)
12#define CARMINE_EXTEND_MODE (CARMINE_WINDOW_MODE | \
13 CARMINE_OVERLAY_EXT_MODE)
14#define CARMINE_L0E (1 << 16)
15#define CARMINE_L2E (1 << 18)
16#define CARMINE_DEN (1 << 31)
17
18#define CARMINE_EXT_CMODE_DIRECT24_RGBA (0xC0000000)
19#define CARMINE_DCTL_REG_MODE_ADD (0x00)
20#define CARMINE_DCTL_REG_SETTIME1_EMODE (0x04)
21#define CARMINE_DCTL_REG_REFRESH_SETTIME2 (0x08)
22#define CARMINE_DCTL_REG_RSV0_STATES (0x0C)
23#define CARMINE_DCTL_REG_RSV2_RSV1 (0x10)
24#define CARMINE_DCTL_REG_DDRIF2_DDRIF1 (0x14)
25#define CARMINE_DCTL_REG_IOCONT1_IOCONT0 (0x24)
26#define CARMINE_DCTL_REG_STATES_MASK (0x000F)
27#define CARMINE_DCTL_INIT_WAIT_INTERVAL (1)
28#define CARMINE_DCTL_INIT_WAIT_LIMIT (5000)
29#define CARMINE_WB_REG_WBM_DEFAULT (0x0001c020)
30#define CARMINE_DISP_REG_L0RM (0x1880)
31#define CARMINE_DISP_REG_L0PX (0x1884)
32#define CARMINE_DISP_REG_L0PY (0x1888)
33#define CARMINE_DISP_REG_L2RM (0x18A0)
34#define CARMINE_DISP_REG_L2PX (0x18A4)
35#define CARMINE_DISP_REG_L2PY (0x18A8)
36#define CARMINE_DISP_REG_L3RM (0x18B0)
37#define CARMINE_DISP_REG_L3PX (0x18B4)
38#define CARMINE_DISP_REG_L3PY (0x18B8)
39#define CARMINE_DISP_REG_L4RM (0x18C0)
40#define CARMINE_DISP_REG_L4PX (0x18C4)
41#define CARMINE_DISP_REG_L4PY (0x18C8)
42#define CARMINE_DISP_REG_L5RM (0x18D0)
43#define CARMINE_DISP_REG_L5PX (0x18D4)
44#define CARMINE_DISP_REG_L5PY (0x18D8)
45#define CARMINE_DISP_REG_L6RM (0x1924)
46#define CARMINE_DISP_REG_L6PX (0x1928)
47#define CARMINE_DISP_REG_L6PY (0x192C)
48#define CARMINE_DISP_REG_L7RM (0x1964)
49#define CARMINE_DISP_REG_L7PX (0x1968)
50#define CARMINE_DISP_REG_L7PY (0x196C)
51#define CARMINE_WB_REG_WBM (0x0004)
52#define CARMINE_DISP_HTP_SHIFT (16)
53#define CARMINE_DISP_HDB_SHIFT (16)
54#define CARMINE_DISP_HSW_SHIFT (16)
55#define CARMINE_DISP_VSW_SHIFT (24)
56#define CARMINE_DISP_VTR_SHIFT (16)
57#define CARMINE_DISP_VDP_SHIFT (16)
58#define CARMINE_CURSOR_CUTZ_MASK (0x00000100)
59#define CARMINE_CURSOR0_PRIORITY_MASK (0x00010000)
60#define CARMINE_CURSOR1_PRIORITY_MASK (0x00020000)
61#define CARMINE_DISP_WIDTH_SHIFT (16)
62#define CARMINE_DISP_WIN_H_SHIFT (16)
63#define CARMINE_DISP_REG_H_TOTAL (0x0004)
64#define CARMINE_DISP_REG_H_PERIOD (0x0008)
65#define CARMINE_DISP_REG_V_H_W_H_POS (0x000C)
66#define CARMINE_DISP_REG_V_TOTAL (0x0010)
67#define CARMINE_DISP_REG_V_PERIOD_POS (0x0014)
68#define CARMINE_DISP_REG_L0_MODE_W_H (0x0020)
69#define CARMINE_DISP_REG_L0_ORG_ADR (0x0024)
70#define CARMINE_DISP_REG_L0_DISP_ADR (0x0028)
71#define CARMINE_DISP_REG_L0_DISP_POS (0x002C)
72#define CARMINE_DISP_REG_L1_WIDTH (0x0030)
73#define CARMINE_DISP_REG_L1_ORG_ADR (0x0034)
74#define CARMINE_DISP_REG_L2_MODE_W_H (0x0040)
75#define CARMINE_DISP_REG_L2_ORG_ADR1 (0x0044)
76#define CARMINE_DISP_REG_L2_DISP_ADR1 (0x0048)
77#define CARMINE_DISP_REG_L2_DISP_POS (0x0054)
78#define CARMINE_DISP_REG_L3_MODE_W_H (0x0058)
79#define CARMINE_DISP_REG_L3_ORG_ADR1 (0x005C)
80#define CARMINE_DISP_REG_L3_DISP_ADR1 (0x0060)
81#define CARMINE_DISP_REG_L3_DISP_POS (0x006C)
82#define CARMINE_DISP_REG_L4_MODE_W_H (0x0070)
83#define CARMINE_DISP_REG_L4_ORG_ADR1 (0x0074)
84#define CARMINE_DISP_REG_L4_DISP_ADR1 (0x0078)
85#define CARMINE_DISP_REG_L4_DISP_POS (0x0084)
86#define CARMINE_DISP_REG_L5_MODE_W_H (0x0088)
87#define CARMINE_DISP_REG_L5_ORG_ADR1 (0x008C)
88#define CARMINE_DISP_REG_L5_DISP_ADR1 (0x0090)
89#define CARMINE_DISP_REG_L5_DISP_POS (0x009C)
90#define CARMINE_DISP_REG_CURSOR_MODE (0x00A0)
91#define CARMINE_DISP_REG_CUR1_POS (0x00A8)
92#define CARMINE_DISP_REG_CUR2_POS (0x00B0)
93#define CARMINE_DISP_REG_C_TRANS (0x00BC)
94#define CARMINE_DISP_REG_MLMR_TRANS (0x00C0)
95#define CARMINE_DISP_REG_L0_EXT_MODE (0x0110)
96#define CARMINE_DISP_REG_L0_WIN_POS (0x0114)
97#define CARMINE_DISP_REG_L0_WIN_SIZE (0x0118)
98#define CARMINE_DISP_REG_L1_EXT_MODE (0x0120)
99#define CARMINE_DISP_REG_L1_WIN_POS (0x0124)
100#define CARMINE_DISP_REG_L1_WIN_SIZE (0x0128)
101#define CARMINE_DISP_REG_L2_EXT_MODE (0x0130)
102#define CARMINE_DISP_REG_L2_WIN_POS (0x0134)
103#define CARMINE_DISP_REG_L2_WIN_SIZE (0x0138)
104#define CARMINE_DISP_REG_L3_EXT_MODE (0x0140)
105#define CARMINE_DISP_REG_L3_WIN_POS (0x0144)
106#define CARMINE_DISP_REG_L3_WIN_SIZE (0x0148)
107#define CARMINE_DISP_REG_L4_EXT_MODE (0x0150)
108#define CARMINE_DISP_REG_L4_WIN_POS (0x0154)
109#define CARMINE_DISP_REG_L4_WIN_SIZE (0x0158)
110#define CARMINE_DISP_REG_L5_EXT_MODE (0x0160)
111#define CARMINE_DISP_REG_L5_WIN_POS (0x0164)
112#define CARMINE_DISP_REG_L5_WIN_SIZE (0x0168)
113#define CARMINE_DISP_REG_L6_EXT_MODE (0x1918)
114#define CARMINE_DISP_REG_L6_WIN_POS (0x191c)
115#define CARMINE_DISP_REG_L6_WIN_SIZE (0x1920)
116#define CARMINE_DISP_REG_L7_EXT_MODE (0x1958)
117#define CARMINE_DISP_REG_L7_WIN_POS (0x195c)
118#define CARMINE_DISP_REG_L7_WIN_SIZE (0x1960)
119#define CARMINE_DISP_REG_BLEND_MODE_L0 (0x00B4)
120#define CARMINE_DISP_REG_BLEND_MODE_L1 (0x0188)
121#define CARMINE_DISP_REG_BLEND_MODE_L2 (0x018C)
122#define CARMINE_DISP_REG_BLEND_MODE_L3 (0x0190)
123#define CARMINE_DISP_REG_BLEND_MODE_L4 (0x0194)
124#define CARMINE_DISP_REG_BLEND_MODE_L5 (0x0198)
125#define CARMINE_DISP_REG_BLEND_MODE_L6 (0x1990)
126#define CARMINE_DISP_REG_BLEND_MODE_L7 (0x1994)
127#define CARMINE_DISP_REG_L0_TRANS (0x01A0)
128#define CARMINE_DISP_REG_L1_TRANS (0x01A4)
129#define CARMINE_DISP_REG_L2_TRANS (0x01A8)
130#define CARMINE_DISP_REG_L3_TRANS (0x01AC)
131#define CARMINE_DISP_REG_L4_TRANS (0x01B0)
132#define CARMINE_DISP_REG_L5_TRANS (0x01B4)
133#define CARMINE_DISP_REG_L6_TRANS (0x1998)
134#define CARMINE_DISP_REG_L7_TRANS (0x199c)
135#define CARMINE_EXTEND_MODE_MASK (0x00000003)
136#define CARMINE_DISP_DCM_MASK (0x0000FFFF)
137#define CARMINE_DISP_REG_DCM1 (0x0100)
138#define CARMINE_DISP_WIDTH_UNIT (64)
139#define CARMINE_DISP_REG_L6_MODE_W_H (0x1900)
140#define CARMINE_DISP_REG_L6_ORG_ADR1 (0x1904)
141#define CARMINE_DISP_REG_L6_DISP_ADR0 (0x1908)
142#define CARMINE_DISP_REG_L6_DISP_POS (0x1914)
143#define CARMINE_DISP_REG_L7_MODE_W_H (0x1940)
144#define CARMINE_DISP_REG_L7_ORG_ADR1 (0x1944)
145#define CARMINE_DISP_REG_L7_DISP_ADR0 (0x1948)
146#define CARMINE_DISP_REG_L7_DISP_POS (0x1954)
147#define CARMINE_CTL_REG_CLOCK_ENABLE (0x000C)
148#define CARMINE_CTL_REG_SOFTWARE_RESET (0x0010)
149#define CARMINE_CTL_REG_IST_MASK_ALL (0x07FFFFFF)
150#define CARMINE_GRAPH_REG_VRINTM (0x00028064)
151#define CARMINE_GRAPH_REG_VRERRM (0x0002806C)
152#define CARMINE_GRAPH_REG_DC_OFFSET_PX (0x0004005C)
153#define CARMINE_GRAPH_REG_DC_OFFSET_PY (0x00040060)
154#define CARMINE_GRAPH_REG_DC_OFFSET_LX (0x00040064)
155#define CARMINE_GRAPH_REG_DC_OFFSET_LY (0x00040068)
156#define CARMINE_GRAPH_REG_DC_OFFSET_TX (0x0004006C)
157#define CARMINE_GRAPH_REG_DC_OFFSET_TY (0x00040070)
158
159#endif
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
new file mode 100644
index 000000000000..7bad24ed04ef
--- /dev/null
+++ b/drivers/video/cobalt_lcdfb.c
@@ -0,0 +1,371 @@
1/*
2 * Cobalt server LCD frame buffer driver.
3 *
4 * Copyright (C) 2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20#include <linux/delay.h>
21#include <linux/fb.h>
22#include <linux/init.h>
23#include <linux/io.h>
24#include <linux/ioport.h>
25#include <linux/uaccess.h>
26#include <linux/platform_device.h>
27
28/*
29 * Cursor position address
30 * \X 0 1 2 ... 14 15
31 * Y+----+----+----+---+----+----+
32 * 0|0x00|0x01|0x02|...|0x0e|0x0f|
33 * +----+----+----+---+----+----+
34 * 1|0x40|0x41|0x42|...|0x4e|0x4f|
35 * +----+----+----+---+----+----+
36 */
37#define LCD_DATA_REG_OFFSET 0x10
38#define LCD_XRES_MAX 16
39#define LCD_YRES_MAX 2
40#define LCD_CHARS_MAX 32
41
42#define LCD_CLEAR 0x01
43#define LCD_CURSOR_MOVE_HOME 0x02
44#define LCD_RESET 0x06
45#define LCD_OFF 0x08
46#define LCD_CURSOR_OFF 0x0c
47#define LCD_CURSOR_BLINK_OFF 0x0e
48#define LCD_CURSOR_ON 0x0f
49#define LCD_ON LCD_CURSOR_ON
50#define LCD_CURSOR_MOVE_LEFT 0x10
51#define LCD_CURSOR_MOVE_RIGHT 0x14
52#define LCD_DISPLAY_LEFT 0x18
53#define LCD_DISPLAY_RIGHT 0x1c
54#define LCD_PRERESET 0x3f /* execute 4 times continuously */
55#define LCD_BUSY 0x80
56
57#define LCD_GRAPHIC_MODE 0x40
58#define LCD_TEXT_MODE 0x80
59#define LCD_CUR_POS_MASK 0x7f
60
61#define LCD_CUR_POS(x) ((x) & LCD_CUR_POS_MASK)
62#define LCD_TEXT_POS(x) ((x) | LCD_TEXT_MODE)
63
64static inline void lcd_write_control(struct fb_info *info, u8 control)
65{
66 writel((u32)control << 24, info->screen_base);
67}
68
69static inline u8 lcd_read_control(struct fb_info *info)
70{
71 return readl(info->screen_base) >> 24;
72}
73
74static inline void lcd_write_data(struct fb_info *info, u8 data)
75{
76 writel((u32)data << 24, info->screen_base + LCD_DATA_REG_OFFSET);
77}
78
79static inline u8 lcd_read_data(struct fb_info *info)
80{
81 return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24;
82}
83
84static int lcd_busy_wait(struct fb_info *info)
85{
86 u8 val = 0;
87 int timeout = 10, retval = 0;
88
89 do {
90 val = lcd_read_control(info);
91 val &= LCD_BUSY;
92 if (val != LCD_BUSY)
93 break;
94
95 if (msleep_interruptible(1))
96 return -EINTR;
97
98 timeout--;
99 } while (timeout);
100
101 if (val == LCD_BUSY)
102 retval = -EBUSY;
103
104 return retval;
105}
106
107static void lcd_clear(struct fb_info *info)
108{
109 int i;
110
111 for (i = 0; i < 4; i++) {
112 udelay(150);
113
114 lcd_write_control(info, LCD_PRERESET);
115 }
116
117 udelay(150);
118
119 lcd_write_control(info, LCD_CLEAR);
120
121 udelay(150);
122
123 lcd_write_control(info, LCD_RESET);
124}
125
126static struct fb_fix_screeninfo cobalt_lcdfb_fix __initdata = {
127 .id = "cobalt-lcd",
128 .type = FB_TYPE_TEXT,
129 .type_aux = FB_AUX_TEXT_MDA,
130 .visual = FB_VISUAL_MONO01,
131 .line_length = LCD_XRES_MAX,
132 .accel = FB_ACCEL_NONE,
133};
134
135static ssize_t cobalt_lcdfb_read(struct fb_info *info, char __user *buf,
136 size_t count, loff_t *ppos)
137{
138 char src[LCD_CHARS_MAX];
139 unsigned long pos;
140 int len, retval = 0;
141
142 pos = *ppos;
143 if (pos >= LCD_CHARS_MAX || count == 0)
144 return 0;
145
146 if (count > LCD_CHARS_MAX)
147 count = LCD_CHARS_MAX;
148
149 if (pos + count > LCD_CHARS_MAX)
150 count = LCD_CHARS_MAX - pos;
151
152 for (len = 0; len < count; len++) {
153 retval = lcd_busy_wait(info);
154 if (retval < 0)
155 break;
156
157 lcd_write_control(info, LCD_TEXT_POS(pos));
158
159 retval = lcd_busy_wait(info);
160 if (retval < 0)
161 break;
162
163 src[len] = lcd_read_data(info);
164 if (pos == 0x0f)
165 pos = 0x40;
166 else
167 pos++;
168 }
169
170 if (retval < 0 && signal_pending(current))
171 return -ERESTARTSYS;
172
173 if (copy_to_user(buf, src, len))
174 return -EFAULT;
175
176 *ppos += len;
177
178 return len;
179}
180
181static ssize_t cobalt_lcdfb_write(struct fb_info *info, const char __user *buf,
182 size_t count, loff_t *ppos)
183{
184 char dst[LCD_CHARS_MAX];
185 unsigned long pos;
186 int len, retval = 0;
187
188 pos = *ppos;
189 if (pos >= LCD_CHARS_MAX || count == 0)
190 return 0;
191
192 if (count > LCD_CHARS_MAX)
193 count = LCD_CHARS_MAX;
194
195 if (pos + count > LCD_CHARS_MAX)
196 count = LCD_CHARS_MAX - pos;
197
198 if (copy_from_user(dst, buf, count))
199 return -EFAULT;
200
201 for (len = 0; len < count; len++) {
202 retval = lcd_busy_wait(info);
203 if (retval < 0)
204 break;
205
206 lcd_write_control(info, LCD_TEXT_POS(pos));
207
208 retval = lcd_busy_wait(info);
209 if (retval < 0)
210 break;
211
212 lcd_write_data(info, dst[len]);
213 if (pos == 0x0f)
214 pos = 0x40;
215 else
216 pos++;
217 }
218
219 if (retval < 0 && signal_pending(current))
220 return -ERESTARTSYS;
221
222 *ppos += len;
223
224 return len;
225}
226
227static int cobalt_lcdfb_blank(int blank_mode, struct fb_info *info)
228{
229 int retval;
230
231 retval = lcd_busy_wait(info);
232 if (retval < 0)
233 return retval;
234
235 switch (blank_mode) {
236 case FB_BLANK_UNBLANK:
237 lcd_write_control(info, LCD_ON);
238 break;
239 default:
240 lcd_write_control(info, LCD_OFF);
241 break;
242 }
243
244 return 0;
245}
246
247static int cobalt_lcdfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
248{
249 u32 x, y;
250 int retval;
251
252 switch (cursor->set) {
253 case FB_CUR_SETPOS:
254 x = cursor->image.dx;
255 y = cursor->image.dy;
256 if (x >= LCD_XRES_MAX || y >= LCD_YRES_MAX)
257 return -EINVAL;
258
259 retval = lcd_busy_wait(info);
260 if (retval < 0)
261 return retval;
262
263 lcd_write_control(info,
264 LCD_TEXT_POS(info->fix.line_length * y + x));
265 break;
266 default:
267 return -EINVAL;
268 }
269
270 retval = lcd_busy_wait(info);
271 if (retval < 0)
272 return retval;
273
274 if (cursor->enable)
275 lcd_write_control(info, LCD_CURSOR_ON);
276 else
277 lcd_write_control(info, LCD_CURSOR_OFF);
278
279 return 0;
280}
281
282static struct fb_ops cobalt_lcd_fbops = {
283 .owner = THIS_MODULE,
284 .fb_read = cobalt_lcdfb_read,
285 .fb_write = cobalt_lcdfb_write,
286 .fb_blank = cobalt_lcdfb_blank,
287 .fb_cursor = cobalt_lcdfb_cursor,
288};
289
290static int __init cobalt_lcdfb_probe(struct platform_device *dev)
291{
292 struct fb_info *info;
293 struct resource *res;
294 int retval;
295
296 info = framebuffer_alloc(0, &dev->dev);
297 if (!info)
298 return -ENOMEM;
299
300 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
301 if (!res) {
302 framebuffer_release(info);
303 return -EBUSY;
304 }
305
306 info->screen_size = res->end - res->start + 1;
307 info->screen_base = ioremap(res->start, info->screen_size);
308 info->fbops = &cobalt_lcd_fbops;
309 info->fix = cobalt_lcdfb_fix;
310 info->fix.smem_start = res->start;
311 info->fix.smem_len = info->screen_size;
312 info->pseudo_palette = NULL;
313 info->par = NULL;
314 info->flags = FBINFO_DEFAULT;
315
316 retval = register_framebuffer(info);
317 if (retval < 0) {
318 iounmap(info->screen_base);
319 framebuffer_release(info);
320 return retval;
321 }
322
323 platform_set_drvdata(dev, info);
324
325 lcd_clear(info);
326
327 printk(KERN_INFO "fb%d: Cobalt server LCD frame buffer device\n",
328 info->node);
329
330 return 0;
331}
332
333static int __devexit cobalt_lcdfb_remove(struct platform_device *dev)
334{
335 struct fb_info *info;
336
337 info = platform_get_drvdata(dev);
338 if (info) {
339 iounmap(info->screen_base);
340 unregister_framebuffer(info);
341 framebuffer_release(info);
342 }
343
344 return 0;
345}
346
347static struct platform_driver cobalt_lcdfb_driver = {
348 .probe = cobalt_lcdfb_probe,
349 .remove = __devexit_p(cobalt_lcdfb_remove),
350 .driver = {
351 .name = "cobalt-lcd",
352 .owner = THIS_MODULE,
353 },
354};
355
356static int __init cobalt_lcdfb_init(void)
357{
358 return platform_driver_register(&cobalt_lcdfb_driver);
359}
360
361static void __exit cobalt_lcdfb_exit(void)
362{
363 platform_driver_unregister(&cobalt_lcdfb_driver);
364}
365
366module_init(cobalt_lcdfb_init);
367module_exit(cobalt_lcdfb_exit);
368
369MODULE_LICENSE("GPL v2");
370MODULE_AUTHOR("Yoichi Yuasa");
371MODULE_DESCRIPTION("Cobalt server LCD frame buffer driver");
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 4be3b46c069b..3ccfa76d9b2a 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -107,9 +107,7 @@ static struct display fb_display[MAX_NR_CONSOLES];
107 107
108static signed char con2fb_map[MAX_NR_CONSOLES]; 108static signed char con2fb_map[MAX_NR_CONSOLES];
109static signed char con2fb_map_boot[MAX_NR_CONSOLES]; 109static signed char con2fb_map_boot[MAX_NR_CONSOLES];
110#ifndef MODULE 110
111static int logo_height;
112#endif
113static int logo_lines; 111static int logo_lines;
114/* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO 112/* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO
115 enums. */ 113 enums. */
@@ -607,6 +605,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
607 struct fbcon_ops *ops = info->fbcon_par; 605 struct fbcon_ops *ops = info->fbcon_par;
608 int cnt, erase = vc->vc_video_erase_char, step; 606 int cnt, erase = vc->vc_video_erase_char, step;
609 unsigned short *save = NULL, *r, *q; 607 unsigned short *save = NULL, *r, *q;
608 int logo_height;
610 609
611 if (info->flags & FBINFO_MODULE) { 610 if (info->flags & FBINFO_MODULE) {
612 logo_shown = FBCON_LOGO_DONTSHOW; 611 logo_shown = FBCON_LOGO_DONTSHOW;
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
index 0135e0395456..de1b1365279b 100644
--- a/drivers/video/console/fbcon.h
+++ b/drivers/video/console/fbcon.h
@@ -92,7 +92,7 @@ struct fbcon_ops {
92#define attr_fgcol(fgshift,s) \ 92#define attr_fgcol(fgshift,s) \
93 (((s) >> (fgshift)) & 0x0f) 93 (((s) >> (fgshift)) & 0x0f)
94#define attr_bgcol(bgshift,s) \ 94#define attr_bgcol(bgshift,s) \
95 (((s) >> (bgshift)) & 0x0f) 95 (((s) >> (bgshift)) & 0x07)
96 96
97/* Monochrome */ 97/* Monochrome */
98#define attr_bold(s) \ 98#define attr_bold(s) \
@@ -146,10 +146,8 @@ static inline int attr_col_ec(int shift, struct vc_data *vc,
146 return is_fg ? fg : bg; 146 return is_fg ? fg : bg;
147} 147}
148 148
149#define attr_bgcol_ec(bgshift,vc,info) \ 149#define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0)
150 attr_col_ec(bgshift,vc,info,0); 150#define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1)
151#define attr_fgcol_ec(fgshift,vc,info) \
152 attr_col_ec(fgshift,vc,info,1);
153 151
154/* Font */ 152/* Font */
155#define REFCOUNT(fd) (((int *)(fd))[-1]) 153#define REFCOUNT(fd) (((int *)(fd))[-1])
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index 38a296bbdfc9..9901064199bd 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -71,13 +71,15 @@ static char *mda_type_name;
71 71
72/* console information */ 72/* console information */
73 73
74static int mda_first_vc = 1; 74static int mda_first_vc = 13;
75static int mda_last_vc = 16; 75static int mda_last_vc = 16;
76 76
77static struct vc_data *mda_display_fg = NULL; 77static struct vc_data *mda_display_fg = NULL;
78 78
79module_param(mda_first_vc, int, 0); 79module_param(mda_first_vc, int, 0);
80MODULE_PARM_DESC(mda_first_vc, "First virtual console. Default: 13");
80module_param(mda_last_vc, int, 0); 81module_param(mda_last_vc, int, 0);
82MODULE_PARM_DESC(mda_last_vc, "Last virtual console. Default: 16");
81 83
82/* MDA register values 84/* MDA register values
83 */ 85 */
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 1cd5071e5362..5d84b3431098 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -35,6 +35,7 @@
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/efi.h> 36#include <linux/efi.h>
37#include <linux/fb.h> 37#include <linux/fb.h>
38#include <linux/major.h>
38 39
39#include <asm/fb.h> 40#include <asm/fb.h>
40 41
@@ -848,9 +849,8 @@ int
848fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var) 849fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var)
849{ 850{
850 struct fb_fix_screeninfo *fix = &info->fix; 851 struct fb_fix_screeninfo *fix = &info->fix;
851 int xoffset = var->xoffset; 852 unsigned int yres = info->var.yres;
852 int yoffset = var->yoffset; 853 int err = 0;
853 int err = 0, yres = info->var.yres;
854 854
855 if (var->yoffset > 0) { 855 if (var->yoffset > 0) {
856 if (var->vmode & FB_VMODE_YWRAP) { 856 if (var->vmode & FB_VMODE_YWRAP) {
@@ -866,8 +866,8 @@ fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var)
866 (var->xoffset % fix->xpanstep))) 866 (var->xoffset % fix->xpanstep)))
867 err = -EINVAL; 867 err = -EINVAL;
868 868
869 if (err || !info->fbops->fb_pan_display || xoffset < 0 || 869 if (err || !info->fbops->fb_pan_display ||
870 yoffset < 0 || var->yoffset + yres > info->var.yres_virtual || 870 var->yoffset + yres > info->var.yres_virtual ||
871 var->xoffset + info->var.xres > info->var.xres_virtual) 871 var->xoffset + info->var.xres > info->var.xres_virtual)
872 return -EINVAL; 872 return -EINVAL;
873 873
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index 052e18058498..6a0aa180c266 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -879,7 +879,7 @@ int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var)
879 if (edid_is_timing_block(block)) { 879 if (edid_is_timing_block(block)) {
880 var->xres = var->xres_virtual = H_ACTIVE; 880 var->xres = var->xres_virtual = H_ACTIVE;
881 var->yres = var->yres_virtual = V_ACTIVE; 881 var->yres = var->yres_virtual = V_ACTIVE;
882 var->height = var->width = -1; 882 var->height = var->width = 0;
883 var->right_margin = H_SYNC_OFFSET; 883 var->right_margin = H_SYNC_OFFSET;
884 var->left_margin = (H_ACTIVE + H_BLANKING) - 884 var->left_margin = (H_ACTIVE + H_BLANKING) -
885 (H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH); 885 (H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 09d7e22c6fef..9cd36c223d33 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -279,58 +279,42 @@ static struct diu_hw dr = {
279 279
280static struct diu_pool pool; 280static struct diu_pool pool;
281 281
282/* To allocate memory for framebuffer. First try __get_free_pages(). If it 282/**
283 * fails, try rh_alloc. The reason is __get_free_pages() cannot allocate 283 * fsl_diu_alloc - allocate memory for the DIU
284 * very large memory (more than 4MB). We don't want to allocate all memory 284 * @size: number of bytes to allocate
285 * in rheap since small memory allocation/deallocation will fragment the 285 * @param: returned physical address of memory
286 * rheap and make the furture large allocation fail. 286 *
287 * This function allocates a physically-contiguous block of memory.
287 */ 288 */
288 289static void *fsl_diu_alloc(size_t size, phys_addr_t *phys)
289static void *fsl_diu_alloc(unsigned long size, phys_addr_t *phys)
290{ 290{
291 void *virt; 291 void *virt;
292 292
293 pr_debug("size=%lu\n", size); 293 pr_debug("size=%zu\n", size);
294 294
295 virt = (void *)__get_free_pages(GFP_DMA | __GFP_ZERO, get_order(size)); 295 virt = alloc_pages_exact(size, GFP_DMA | __GFP_ZERO);
296 if (virt) { 296 if (virt) {
297 *phys = virt_to_phys(virt); 297 *phys = virt_to_phys(virt);
298 pr_debug("virt %p, phys=%llx\n", virt, (uint64_t) *phys); 298 pr_debug("virt=%p phys=%llx\n", virt,
299 return virt; 299 (unsigned long long)*phys);
300 }
301 if (!diu_ops.diu_mem) {
302 printk(KERN_INFO "%s: no diu_mem."
303 " To reserve more memory, put 'diufb=15M' "
304 "in the command line\n", __func__);
305 return NULL;
306 }
307
308 virt = (void *)rh_alloc(&diu_ops.diu_rh_info, size, "DIU");
309 if (virt) {
310 *phys = virt_to_bus(virt);
311 memset(virt, 0, size);
312 } 300 }
313 301
314 pr_debug("rh virt=%p phys=%llx\n", virt, (unsigned long long)*phys);
315
316 return virt; 302 return virt;
317} 303}
318 304
319static void fsl_diu_free(void *p, unsigned long size) 305/**
306 * fsl_diu_free - release DIU memory
307 * @virt: pointer returned by fsl_diu_alloc()
308 * @size: number of bytes allocated by fsl_diu_alloc()
309 *
310 * This function releases memory allocated by fsl_diu_alloc().
311 */
312static void fsl_diu_free(void *virt, size_t size)
320{ 313{
321 pr_debug("p=%p size=%lu\n", p, size); 314 pr_debug("virt=%p size=%zu\n", virt, size);
322 315
323 if (!p) 316 if (virt && size)
324 return; 317 free_pages_exact(virt, size);
325
326 if ((p >= diu_ops.diu_mem) &&
327 (p < (diu_ops.diu_mem + diu_ops.diu_size))) {
328 pr_debug("rh\n");
329 rh_free(&diu_ops.diu_rh_info, (unsigned long) p);
330 } else {
331 pr_debug("dma\n");
332 free_pages((unsigned long)p, get_order(size));
333 }
334} 318}
335 319
336static int fsl_diu_enable_panel(struct fb_info *info) 320static int fsl_diu_enable_panel(struct fb_info *info)
diff --git a/drivers/video/geode/lxfb.h b/drivers/video/geode/lxfb.h
index 3b9416f4ee20..6a51448fd3f7 100644
--- a/drivers/video/geode/lxfb.h
+++ b/drivers/video/geode/lxfb.h
@@ -51,8 +51,6 @@ static inline unsigned int lx_get_pitch(unsigned int xres, int bpp)
51} 51}
52 52
53void lx_set_mode(struct fb_info *); 53void lx_set_mode(struct fb_info *);
54void lx_get_gamma(struct fb_info *, unsigned int *, int);
55void lx_set_gamma(struct fb_info *, unsigned int *, int);
56unsigned int lx_framebuffer_size(void); 54unsigned int lx_framebuffer_size(void);
57int lx_blank_display(struct fb_info *, int); 55int lx_blank_display(struct fb_info *, int);
58void lx_set_palette_reg(struct fb_info *, unsigned int, unsigned int, 56void lx_set_palette_reg(struct fb_info *, unsigned int, unsigned int,
diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c
index aaef9165ec9b..b1cd49c99356 100644
--- a/drivers/video/geode/lxfb_ops.c
+++ b/drivers/video/geode/lxfb_ops.c
@@ -517,25 +517,25 @@ void lx_set_palette_reg(struct fb_info *info, unsigned regno,
517int lx_blank_display(struct fb_info *info, int blank_mode) 517int lx_blank_display(struct fb_info *info, int blank_mode)
518{ 518{
519 struct lxfb_par *par = info->par; 519 struct lxfb_par *par = info->par;
520 u32 dcfg, fp_pm; 520 u32 dcfg, misc, fp_pm;
521 int blank, hsync, vsync, crt; 521 int blank, hsync, vsync;
522 522
523 /* CRT power saving modes. */ 523 /* CRT power saving modes. */
524 switch (blank_mode) { 524 switch (blank_mode) {
525 case FB_BLANK_UNBLANK: 525 case FB_BLANK_UNBLANK:
526 blank = 0; hsync = 1; vsync = 1; crt = 1; 526 blank = 0; hsync = 1; vsync = 1;
527 break; 527 break;
528 case FB_BLANK_NORMAL: 528 case FB_BLANK_NORMAL:
529 blank = 1; hsync = 1; vsync = 1; crt = 1; 529 blank = 1; hsync = 1; vsync = 1;
530 break; 530 break;
531 case FB_BLANK_VSYNC_SUSPEND: 531 case FB_BLANK_VSYNC_SUSPEND:
532 blank = 1; hsync = 1; vsync = 0; crt = 1; 532 blank = 1; hsync = 1; vsync = 0;
533 break; 533 break;
534 case FB_BLANK_HSYNC_SUSPEND: 534 case FB_BLANK_HSYNC_SUSPEND:
535 blank = 1; hsync = 0; vsync = 1; crt = 1; 535 blank = 1; hsync = 0; vsync = 1;
536 break; 536 break;
537 case FB_BLANK_POWERDOWN: 537 case FB_BLANK_POWERDOWN:
538 blank = 1; hsync = 0; vsync = 0; crt = 0; 538 blank = 1; hsync = 0; vsync = 0;
539 break; 539 break;
540 default: 540 default:
541 return -EINVAL; 541 return -EINVAL;
@@ -545,15 +545,23 @@ int lx_blank_display(struct fb_info *info, int blank_mode)
545 dcfg &= ~(VP_DCFG_DAC_BL_EN | VP_DCFG_HSYNC_EN | VP_DCFG_VSYNC_EN | 545 dcfg &= ~(VP_DCFG_DAC_BL_EN | VP_DCFG_HSYNC_EN | VP_DCFG_VSYNC_EN |
546 VP_DCFG_CRT_EN); 546 VP_DCFG_CRT_EN);
547 if (!blank) 547 if (!blank)
548 dcfg |= VP_DCFG_DAC_BL_EN; 548 dcfg |= VP_DCFG_DAC_BL_EN | VP_DCFG_CRT_EN;
549 if (hsync) 549 if (hsync)
550 dcfg |= VP_DCFG_HSYNC_EN; 550 dcfg |= VP_DCFG_HSYNC_EN;
551 if (vsync) 551 if (vsync)
552 dcfg |= VP_DCFG_VSYNC_EN; 552 dcfg |= VP_DCFG_VSYNC_EN;
553 if (crt) 553
554 dcfg |= VP_DCFG_CRT_EN;
555 write_vp(par, VP_DCFG, dcfg); 554 write_vp(par, VP_DCFG, dcfg);
556 555
556 misc = read_vp(par, VP_MISC);
557
558 if (vsync && hsync)
559 misc &= ~VP_MISC_DACPWRDN;
560 else
561 misc |= VP_MISC_DACPWRDN;
562
563 write_vp(par, VP_MISC, misc);
564
557 /* Power on/off flat panel */ 565 /* Power on/off flat panel */
558 566
559 if (par->output & OUTPUT_PANEL) { 567 if (par->output & OUTPUT_PANEL) {
diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
index c18880d9db1f..0129c044f6d6 100644
--- a/drivers/video/hgafb.c
+++ b/drivers/video/hgafb.c
@@ -551,7 +551,7 @@ static struct fb_ops hgafb_ops = {
551 * Initialization 551 * Initialization
552 */ 552 */
553 553
554static int __init hgafb_probe(struct device *device) 554static int __init hgafb_probe(struct platform_device *pdev)
555{ 555{
556 struct fb_info *info; 556 struct fb_info *info;
557 557
@@ -565,7 +565,7 @@ static int __init hgafb_probe(struct device *device)
565 printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n", 565 printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
566 hga_type_name, hga_vram_len/1024); 566 hga_type_name, hga_vram_len/1024);
567 567
568 info = framebuffer_alloc(0, NULL); 568 info = framebuffer_alloc(0, &pdev->dev);
569 if (!info) { 569 if (!info) {
570 iounmap(hga_vram); 570 iounmap(hga_vram);
571 return -ENOMEM; 571 return -ENOMEM;
@@ -593,13 +593,13 @@ static int __init hgafb_probe(struct device *device)
593 593
594 printk(KERN_INFO "fb%d: %s frame buffer device\n", 594 printk(KERN_INFO "fb%d: %s frame buffer device\n",
595 info->node, info->fix.id); 595 info->node, info->fix.id);
596 dev_set_drvdata(device, info); 596 platform_set_drvdata(pdev, info);
597 return 0; 597 return 0;
598} 598}
599 599
600static int hgafb_remove(struct device *device) 600static int hgafb_remove(struct platform_device *pdev)
601{ 601{
602 struct fb_info *info = dev_get_drvdata(device); 602 struct fb_info *info = platform_get_drvdata(pdev);
603 603
604 hga_txt_mode(); 604 hga_txt_mode();
605 hga_clear_screen(); 605 hga_clear_screen();
@@ -620,16 +620,15 @@ static int hgafb_remove(struct device *device)
620 return 0; 620 return 0;
621} 621}
622 622
623static struct device_driver hgafb_driver = { 623static struct platform_driver hgafb_driver = {
624 .name = "hgafb",
625 .bus = &platform_bus_type,
626 .probe = hgafb_probe, 624 .probe = hgafb_probe,
627 .remove = hgafb_remove, 625 .remove = hgafb_remove,
626 .driver = {
627 .name = "hgafb",
628 },
628}; 629};
629 630
630static struct platform_device hgafb_device = { 631static struct platform_device *hgafb_device;
631 .name = "hgafb",
632};
633 632
634static int __init hgafb_init(void) 633static int __init hgafb_init(void)
635{ 634{
@@ -638,12 +637,15 @@ static int __init hgafb_init(void)
638 if (fb_get_options("hgafb", NULL)) 637 if (fb_get_options("hgafb", NULL))
639 return -ENODEV; 638 return -ENODEV;
640 639
641 ret = driver_register(&hgafb_driver); 640 ret = platform_driver_register(&hgafb_driver);
642 641
643 if (!ret) { 642 if (!ret) {
644 ret = platform_device_register(&hgafb_device); 643 hgafb_device = platform_device_register_simple("hgafb", 0, NULL, 0);
645 if (ret) 644
646 driver_unregister(&hgafb_driver); 645 if (IS_ERR(hgafb_device)) {
646 platform_driver_unregister(&hgafb_driver);
647 ret = PTR_ERR(hgafb_device);
648 }
647 } 649 }
648 650
649 return ret; 651 return ret;
@@ -651,8 +653,8 @@ static int __init hgafb_init(void)
651 653
652static void __exit hgafb_exit(void) 654static void __exit hgafb_exit(void)
653{ 655{
654 platform_device_unregister(&hgafb_device); 656 platform_device_unregister(hgafb_device);
655 driver_unregister(&hgafb_driver); 657 platform_driver_unregister(&hgafb_driver);
656} 658}
657 659
658/* ------------------------------------------------------------------------- 660/* -------------------------------------------------------------------------
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 94e4d3ac1a05..0c5a475c1cae 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -24,6 +24,7 @@
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/mm.h>
27#include <linux/fb.h> 28#include <linux/fb.h>
28#include <linux/delay.h> 29#include <linux/delay.h>
29#include <linux/init.h> 30#include <linux/init.h>
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index 5246b0402d76..25172b2a2a94 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -201,7 +201,6 @@ static int neoFindMode(int xres, int yres, int depth)
201 * 201 *
202 * Determine the closest clock frequency to the one requested. 202 * Determine the closest clock frequency to the one requested.
203 */ 203 */
204#define REF_FREQ 0xe517 /* 14.31818 in 20.12 fixed point */
205#define MAX_N 127 204#define MAX_N 127
206#define MAX_D 31 205#define MAX_D 31
207#define MAX_F 1 206#define MAX_F 1
@@ -211,27 +210,24 @@ static void neoCalcVCLK(const struct fb_info *info,
211{ 210{
212 int n, d, f; 211 int n, d, f;
213 int n_best = 0, d_best = 0, f_best = 0; 212 int n_best = 0, d_best = 0, f_best = 0;
214 long f_best_diff = (0x7ffff << 12); /* 20.12 */ 213 long f_best_diff = 0x7ffff;
215 long f_target = (freq << 12) / 1000; /* 20.12 */
216 214
217 for (f = 0; f <= MAX_F; f++) 215 for (f = 0; f <= MAX_F; f++)
218 for (n = 0; n <= MAX_N; n++) 216 for (d = 0; d <= MAX_D; d++)
219 for (d = 0; d <= MAX_D; d++) { 217 for (n = 0; n <= MAX_N; n++) {
220 long f_out; /* 20.12 */ 218 long f_out;
221 long f_diff; /* 20.12 */ 219 long f_diff;
222 220
223 f_out = 221 f_out = ((14318 * (n + 1)) / (d + 1)) >> f;
224 ((((n + 1) << 12) / ((d + 222 f_diff = abs(f_out - freq);
225 1) * 223 if (f_diff <= f_best_diff) {
226 (1 << f))) >> 12)
227 * REF_FREQ;
228 f_diff = abs(f_out - f_target);
229 if (f_diff < f_best_diff) {
230 f_best_diff = f_diff; 224 f_best_diff = f_diff;
231 n_best = n; 225 n_best = n;
232 d_best = d; 226 d_best = d;
233 f_best = f; 227 f_best = f;
234 } 228 }
229 if (f_out > freq)
230 break;
235 } 231 }
236 232
237 if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 || 233 if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 ||
@@ -248,11 +244,11 @@ static void neoCalcVCLK(const struct fb_info *info,
248 par->VCLK3Denominator = d_best; 244 par->VCLK3Denominator = d_best;
249 245
250#ifdef NEOFB_DEBUG 246#ifdef NEOFB_DEBUG
251 printk("neoVCLK: f:%d NumLow=%d NumHi=%d Den=%d Df=%d\n", 247 printk(KERN_DEBUG "neoVCLK: f:%ld NumLow=%d NumHi=%d Den=%d Df=%ld\n",
252 f_target >> 12, 248 freq,
253 par->VCLK3NumeratorLow, 249 par->VCLK3NumeratorLow,
254 par->VCLK3NumeratorHigh, 250 par->VCLK3NumeratorHigh,
255 par->VCLK3Denominator, f_best_diff >> 12); 251 par->VCLK3Denominator, f_best_diff);
256#endif 252#endif
257} 253}
258 254
@@ -263,15 +259,20 @@ static void neoCalcVCLK(const struct fb_info *info,
263 */ 259 */
264 260
265static int vgaHWInit(const struct fb_var_screeninfo *var, 261static int vgaHWInit(const struct fb_var_screeninfo *var,
266 const struct fb_info *info, 262 struct neofb_par *par)
267 struct neofb_par *par, struct xtimings *timings)
268{ 263{
264 int hsync_end = var->xres + var->right_margin + var->hsync_len;
265 int htotal = (hsync_end + var->left_margin) >> 3;
266 int vsync_start = var->yres + var->lower_margin;
267 int vsync_end = vsync_start + var->vsync_len;
268 int vtotal = vsync_end + var->upper_margin;
269
269 par->MiscOutReg = 0x23; 270 par->MiscOutReg = 0x23;
270 271
271 if (!(timings->sync & FB_SYNC_HOR_HIGH_ACT)) 272 if (!(var->sync & FB_SYNC_HOR_HIGH_ACT))
272 par->MiscOutReg |= 0x40; 273 par->MiscOutReg |= 0x40;
273 274
274 if (!(timings->sync & FB_SYNC_VERT_HIGH_ACT)) 275 if (!(var->sync & FB_SYNC_VERT_HIGH_ACT))
275 par->MiscOutReg |= 0x80; 276 par->MiscOutReg |= 0x80;
276 277
277 /* 278 /*
@@ -286,25 +287,25 @@ static int vgaHWInit(const struct fb_var_screeninfo *var,
286 /* 287 /*
287 * CRTC Controller 288 * CRTC Controller
288 */ 289 */
289 par->CRTC[0] = (timings->HTotal >> 3) - 5; 290 par->CRTC[0] = htotal - 5;
290 par->CRTC[1] = (timings->HDisplay >> 3) - 1; 291 par->CRTC[1] = (var->xres >> 3) - 1;
291 par->CRTC[2] = (timings->HDisplay >> 3) - 1; 292 par->CRTC[2] = (var->xres >> 3) - 1;
292 par->CRTC[3] = (((timings->HTotal >> 3) - 1) & 0x1F) | 0x80; 293 par->CRTC[3] = ((htotal - 1) & 0x1F) | 0x80;
293 par->CRTC[4] = (timings->HSyncStart >> 3); 294 par->CRTC[4] = ((var->xres + var->right_margin) >> 3);
294 par->CRTC[5] = ((((timings->HTotal >> 3) - 1) & 0x20) << 2) 295 par->CRTC[5] = (((htotal - 1) & 0x20) << 2)
295 | (((timings->HSyncEnd >> 3)) & 0x1F); 296 | (((hsync_end >> 3)) & 0x1F);
296 par->CRTC[6] = (timings->VTotal - 2) & 0xFF; 297 par->CRTC[6] = (vtotal - 2) & 0xFF;
297 par->CRTC[7] = (((timings->VTotal - 2) & 0x100) >> 8) 298 par->CRTC[7] = (((vtotal - 2) & 0x100) >> 8)
298 | (((timings->VDisplay - 1) & 0x100) >> 7) 299 | (((var->yres - 1) & 0x100) >> 7)
299 | ((timings->VSyncStart & 0x100) >> 6) 300 | ((vsync_start & 0x100) >> 6)
300 | (((timings->VDisplay - 1) & 0x100) >> 5) 301 | (((var->yres - 1) & 0x100) >> 5)
301 | 0x10 | (((timings->VTotal - 2) & 0x200) >> 4) 302 | 0x10 | (((vtotal - 2) & 0x200) >> 4)
302 | (((timings->VDisplay - 1) & 0x200) >> 3) 303 | (((var->yres - 1) & 0x200) >> 3)
303 | ((timings->VSyncStart & 0x200) >> 2); 304 | ((vsync_start & 0x200) >> 2);
304 par->CRTC[8] = 0x00; 305 par->CRTC[8] = 0x00;
305 par->CRTC[9] = (((timings->VDisplay - 1) & 0x200) >> 4) | 0x40; 306 par->CRTC[9] = (((var->yres - 1) & 0x200) >> 4) | 0x40;
306 307
307 if (timings->dblscan) 308 if (var->vmode & FB_VMODE_DOUBLE)
308 par->CRTC[9] |= 0x80; 309 par->CRTC[9] |= 0x80;
309 310
310 par->CRTC[10] = 0x00; 311 par->CRTC[10] = 0x00;
@@ -313,13 +314,13 @@ static int vgaHWInit(const struct fb_var_screeninfo *var,
313 par->CRTC[13] = 0x00; 314 par->CRTC[13] = 0x00;
314 par->CRTC[14] = 0x00; 315 par->CRTC[14] = 0x00;
315 par->CRTC[15] = 0x00; 316 par->CRTC[15] = 0x00;
316 par->CRTC[16] = timings->VSyncStart & 0xFF; 317 par->CRTC[16] = vsync_start & 0xFF;
317 par->CRTC[17] = (timings->VSyncEnd & 0x0F) | 0x20; 318 par->CRTC[17] = (vsync_end & 0x0F) | 0x20;
318 par->CRTC[18] = (timings->VDisplay - 1) & 0xFF; 319 par->CRTC[18] = (var->yres - 1) & 0xFF;
319 par->CRTC[19] = var->xres_virtual >> 4; 320 par->CRTC[19] = var->xres_virtual >> 4;
320 par->CRTC[20] = 0x00; 321 par->CRTC[20] = 0x00;
321 par->CRTC[21] = (timings->VDisplay - 1) & 0xFF; 322 par->CRTC[21] = (var->yres - 1) & 0xFF;
322 par->CRTC[22] = (timings->VTotal - 1) & 0xFF; 323 par->CRTC[22] = (vtotal - 1) & 0xFF;
323 par->CRTC[23] = 0xC3; 324 par->CRTC[23] = 0xC3;
324 par->CRTC[24] = 0xFF; 325 par->CRTC[24] = 0xFF;
325 326
@@ -483,7 +484,8 @@ static inline int neo2200_sync(struct fb_info *info)
483{ 484{
484 struct neofb_par *par = info->par; 485 struct neofb_par *par = info->par;
485 486
486 while (readl(&par->neo2200->bltStat) & 1); 487 while (readl(&par->neo2200->bltStat) & 1)
488 cpu_relax();
487 return 0; 489 return 0;
488} 490}
489 491
@@ -591,34 +593,14 @@ static int
591neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) 593neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
592{ 594{
593 struct neofb_par *par = info->par; 595 struct neofb_par *par = info->par;
594 unsigned int pixclock = var->pixclock;
595 struct xtimings timings;
596 int memlen, vramlen; 596 int memlen, vramlen;
597 int mode_ok = 0; 597 int mode_ok = 0;
598 598
599 DBG("neofb_check_var"); 599 DBG("neofb_check_var");
600 600
601 if (!pixclock) 601 if (PICOS2KHZ(var->pixclock) > par->maxClock)
602 pixclock = 10000; /* 10ns = 100MHz */
603 timings.pixclock = 1000000000 / pixclock;
604 if (timings.pixclock < 1)
605 timings.pixclock = 1;
606
607 if (timings.pixclock > par->maxClock)
608 return -EINVAL; 602 return -EINVAL;
609 603
610 timings.dblscan = var->vmode & FB_VMODE_DOUBLE;
611 timings.interlaced = var->vmode & FB_VMODE_INTERLACED;
612 timings.HDisplay = var->xres;
613 timings.HSyncStart = timings.HDisplay + var->right_margin;
614 timings.HSyncEnd = timings.HSyncStart + var->hsync_len;
615 timings.HTotal = timings.HSyncEnd + var->left_margin;
616 timings.VDisplay = var->yres;
617 timings.VSyncStart = timings.VDisplay + var->lower_margin;
618 timings.VSyncEnd = timings.VSyncStart + var->vsync_len;
619 timings.VTotal = timings.VSyncEnd + var->upper_margin;
620 timings.sync = var->sync;
621
622 /* Is the mode larger than the LCD panel? */ 604 /* Is the mode larger than the LCD panel? */
623 if (par->internal_display && 605 if (par->internal_display &&
624 ((var->xres > par->NeoPanelWidth) || 606 ((var->xres > par->NeoPanelWidth) ||
@@ -759,11 +741,11 @@ neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
759static int neofb_set_par(struct fb_info *info) 741static int neofb_set_par(struct fb_info *info)
760{ 742{
761 struct neofb_par *par = info->par; 743 struct neofb_par *par = info->par;
762 struct xtimings timings;
763 unsigned char temp; 744 unsigned char temp;
764 int i, clock_hi = 0; 745 int i, clock_hi = 0;
765 int lcd_stretch; 746 int lcd_stretch;
766 int hoffset, voffset; 747 int hoffset, voffset;
748 int vsync_start, vtotal;
767 749
768 DBG("neofb_set_par"); 750 DBG("neofb_set_par");
769 751
@@ -771,28 +753,15 @@ static int neofb_set_par(struct fb_info *info)
771 753
772 vgaHWProtect(1); /* Blank the screen */ 754 vgaHWProtect(1); /* Blank the screen */
773 755
774 timings.dblscan = info->var.vmode & FB_VMODE_DOUBLE; 756 vsync_start = info->var.yres + info->var.lower_margin;
775 timings.interlaced = info->var.vmode & FB_VMODE_INTERLACED; 757 vtotal = vsync_start + info->var.vsync_len + info->var.upper_margin;
776 timings.HDisplay = info->var.xres;
777 timings.HSyncStart = timings.HDisplay + info->var.right_margin;
778 timings.HSyncEnd = timings.HSyncStart + info->var.hsync_len;
779 timings.HTotal = timings.HSyncEnd + info->var.left_margin;
780 timings.VDisplay = info->var.yres;
781 timings.VSyncStart = timings.VDisplay + info->var.lower_margin;
782 timings.VSyncEnd = timings.VSyncStart + info->var.vsync_len;
783 timings.VTotal = timings.VSyncEnd + info->var.upper_margin;
784 timings.sync = info->var.sync;
785 timings.pixclock = PICOS2KHZ(info->var.pixclock);
786
787 if (timings.pixclock < 1)
788 timings.pixclock = 1;
789 758
790 /* 759 /*
791 * This will allocate the datastructure and initialize all of the 760 * This will allocate the datastructure and initialize all of the
792 * generic VGA registers. 761 * generic VGA registers.
793 */ 762 */
794 763
795 if (vgaHWInit(&info->var, info, par, &timings)) 764 if (vgaHWInit(&info->var, par))
796 return -EINVAL; 765 return -EINVAL;
797 766
798 /* 767 /*
@@ -831,10 +800,10 @@ static int neofb_set_par(struct fb_info *info)
831 par->ExtCRTDispAddr = 0x10; 800 par->ExtCRTDispAddr = 0x10;
832 801
833 /* Vertical Extension */ 802 /* Vertical Extension */
834 par->VerticalExt = (((timings.VTotal - 2) & 0x400) >> 10) 803 par->VerticalExt = (((vtotal - 2) & 0x400) >> 10)
835 | (((timings.VDisplay - 1) & 0x400) >> 9) 804 | (((info->var.yres - 1) & 0x400) >> 9)
836 | (((timings.VSyncStart) & 0x400) >> 8) 805 | (((vsync_start) & 0x400) >> 8)
837 | (((timings.VSyncStart) & 0x400) >> 7); 806 | (((vsync_start) & 0x400) >> 7);
838 807
839 /* Fast write bursts on unless disabled. */ 808 /* Fast write bursts on unless disabled. */
840 if (par->pci_burst) 809 if (par->pci_burst)
@@ -995,7 +964,7 @@ static int neofb_set_par(struct fb_info *info)
995 * Calculate the VCLK that most closely matches the requested dot 964 * Calculate the VCLK that most closely matches the requested dot
996 * clock. 965 * clock.
997 */ 966 */
998 neoCalcVCLK(info, par, timings.pixclock); 967 neoCalcVCLK(info, par, PICOS2KHZ(info->var.pixclock));
999 968
1000 /* Since we program the clocks ourselves, always use VCLK3. */ 969 /* Since we program the clocks ourselves, always use VCLK3. */
1001 par->MiscOutReg |= 0x0C; 970 par->MiscOutReg |= 0x0C;
@@ -1927,9 +1896,6 @@ static int __devinit neo_init_hw(struct fb_info *info)
1927 int maxClock = 65000; 1896 int maxClock = 65000;
1928 int CursorMem = 1024; 1897 int CursorMem = 1024;
1929 int CursorOff = 0x100; 1898 int CursorOff = 0x100;
1930 int linearSize = 1024;
1931 int maxWidth = 1024;
1932 int maxHeight = 1024;
1933 1899
1934 DBG("neo_init_hw"); 1900 DBG("neo_init_hw");
1935 1901
@@ -1948,81 +1914,52 @@ static int __devinit neo_init_hw(struct fb_info *info)
1948 case FB_ACCEL_NEOMAGIC_NM2070: 1914 case FB_ACCEL_NEOMAGIC_NM2070:
1949 videoRam = 896; 1915 videoRam = 896;
1950 maxClock = 65000; 1916 maxClock = 65000;
1951 CursorMem = 2048;
1952 CursorOff = 0x100;
1953 linearSize = 1024;
1954 maxWidth = 1024;
1955 maxHeight = 1024;
1956 break; 1917 break;
1957 case FB_ACCEL_NEOMAGIC_NM2090: 1918 case FB_ACCEL_NEOMAGIC_NM2090:
1958 case FB_ACCEL_NEOMAGIC_NM2093: 1919 case FB_ACCEL_NEOMAGIC_NM2093:
1959 videoRam = 1152;
1960 maxClock = 80000;
1961 CursorMem = 2048;
1962 CursorOff = 0x100;
1963 linearSize = 2048;
1964 maxWidth = 1024;
1965 maxHeight = 1024;
1966 break;
1967 case FB_ACCEL_NEOMAGIC_NM2097: 1920 case FB_ACCEL_NEOMAGIC_NM2097:
1968 videoRam = 1152; 1921 videoRam = 1152;
1969 maxClock = 80000; 1922 maxClock = 80000;
1970 CursorMem = 1024;
1971 CursorOff = 0x100;
1972 linearSize = 2048;
1973 maxWidth = 1024;
1974 maxHeight = 1024;
1975 break; 1923 break;
1976 case FB_ACCEL_NEOMAGIC_NM2160: 1924 case FB_ACCEL_NEOMAGIC_NM2160:
1977 videoRam = 2048; 1925 videoRam = 2048;
1978 maxClock = 90000; 1926 maxClock = 90000;
1979 CursorMem = 1024;
1980 CursorOff = 0x100;
1981 linearSize = 2048;
1982 maxWidth = 1024;
1983 maxHeight = 1024;
1984 break; 1927 break;
1985 case FB_ACCEL_NEOMAGIC_NM2200: 1928 case FB_ACCEL_NEOMAGIC_NM2200:
1986 videoRam = 2560; 1929 videoRam = 2560;
1987 maxClock = 110000; 1930 maxClock = 110000;
1988 CursorMem = 1024;
1989 CursorOff = 0x1000;
1990 linearSize = 4096;
1991 maxWidth = 1280;
1992 maxHeight = 1024; /* ???? */
1993
1994 par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase;
1995 break; 1931 break;
1996 case FB_ACCEL_NEOMAGIC_NM2230: 1932 case FB_ACCEL_NEOMAGIC_NM2230:
1997 videoRam = 3008; 1933 videoRam = 3008;
1998 maxClock = 110000; 1934 maxClock = 110000;
1999 CursorMem = 1024;
2000 CursorOff = 0x1000;
2001 linearSize = 4096;
2002 maxWidth = 1280;
2003 maxHeight = 1024; /* ???? */
2004
2005 par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase;
2006 break; 1935 break;
2007 case FB_ACCEL_NEOMAGIC_NM2360: 1936 case FB_ACCEL_NEOMAGIC_NM2360:
2008 videoRam = 4096; 1937 videoRam = 4096;
2009 maxClock = 110000; 1938 maxClock = 110000;
2010 CursorMem = 1024;
2011 CursorOff = 0x1000;
2012 linearSize = 4096;
2013 maxWidth = 1280;
2014 maxHeight = 1024; /* ???? */
2015
2016 par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase;
2017 break; 1939 break;
2018 case FB_ACCEL_NEOMAGIC_NM2380: 1940 case FB_ACCEL_NEOMAGIC_NM2380:
2019 videoRam = 6144; 1941 videoRam = 6144;
2020 maxClock = 110000; 1942 maxClock = 110000;
1943 break;
1944 }
1945 switch (info->fix.accel) {
1946 case FB_ACCEL_NEOMAGIC_NM2070:
1947 case FB_ACCEL_NEOMAGIC_NM2090:
1948 case FB_ACCEL_NEOMAGIC_NM2093:
1949 CursorMem = 2048;
1950 CursorOff = 0x100;
1951 break;
1952 case FB_ACCEL_NEOMAGIC_NM2097:
1953 case FB_ACCEL_NEOMAGIC_NM2160:
1954 CursorMem = 1024;
1955 CursorOff = 0x100;
1956 break;
1957 case FB_ACCEL_NEOMAGIC_NM2200:
1958 case FB_ACCEL_NEOMAGIC_NM2230:
1959 case FB_ACCEL_NEOMAGIC_NM2360:
1960 case FB_ACCEL_NEOMAGIC_NM2380:
2021 CursorMem = 1024; 1961 CursorMem = 1024;
2022 CursorOff = 0x1000; 1962 CursorOff = 0x1000;
2023 linearSize = 8192;
2024 maxWidth = 1280;
2025 maxHeight = 1024; /* ???? */
2026 1963
2027 par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase; 1964 par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase;
2028 break; 1965 break;
@@ -2036,7 +1973,7 @@ static int __devinit neo_init_hw(struct fb_info *info)
2036*/ 1973*/
2037 par->maxClock = maxClock; 1974 par->maxClock = maxClock;
2038 par->cursorOff = CursorOff; 1975 par->cursorOff = CursorOff;
2039 return ((videoRam * 1024)); 1976 return videoRam * 1024;
2040} 1977}
2041 1978
2042 1979
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index ab32ceb06178..ab77c51fe9d6 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -20,6 +20,7 @@
20 */ 20 */
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/mm.h>
23#include <linux/vmalloc.h> 24#include <linux/vmalloc.h>
24#include <linux/clk.h> 25#include <linux/clk.h>
25#include <linux/io.h> 26#include <linux/io.h>
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 14d0f7a11145..f85af5c4fa68 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -25,6 +25,7 @@
25 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 25 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 */ 26 */
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/mm.h>
28#include <linux/uaccess.h> 29#include <linux/uaccess.h>
29 30
30#include <asm/mach-types.h> 31#include <asm/mach-types.h>
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index d0746261c957..2b707a8ce5de 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -30,6 +30,7 @@
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/mm.h>
33#include <linux/fb.h> 34#include <linux/fb.h>
34#include <linux/delay.h> 35#include <linux/delay.h>
35#include <linux/init.h> 36#include <linux/init.h>
@@ -40,6 +41,7 @@
40#include <linux/clk.h> 41#include <linux/clk.h>
41#include <linux/err.h> 42#include <linux/err.h>
42#include <linux/completion.h> 43#include <linux/completion.h>
44#include <linux/mutex.h>
43#include <linux/kthread.h> 45#include <linux/kthread.h>
44#include <linux/freezer.h> 46#include <linux/freezer.h>
45 47
@@ -227,6 +229,22 @@ static int pxafb_bpp_to_lccr3(struct fb_var_screeninfo *var)
227 case 4: ret = LCCR3_4BPP; break; 229 case 4: ret = LCCR3_4BPP; break;
228 case 8: ret = LCCR3_8BPP; break; 230 case 8: ret = LCCR3_8BPP; break;
229 case 16: ret = LCCR3_16BPP; break; 231 case 16: ret = LCCR3_16BPP; break;
232 case 24:
233 switch (var->red.length + var->green.length +
234 var->blue.length + var->transp.length) {
235 case 18: ret = LCCR3_18BPP_P | LCCR3_PDFOR_3; break;
236 case 19: ret = LCCR3_19BPP_P; break;
237 }
238 break;
239 case 32:
240 switch (var->red.length + var->green.length +
241 var->blue.length + var->transp.length) {
242 case 18: ret = LCCR3_18BPP | LCCR3_PDFOR_3; break;
243 case 19: ret = LCCR3_19BPP; break;
244 case 24: ret = LCCR3_24BPP | LCCR3_PDFOR_3; break;
245 case 25: ret = LCCR3_25BPP; break;
246 }
247 break;
230 } 248 }
231 return ret; 249 return ret;
232} 250}
@@ -345,6 +363,41 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
345 var->green.offset = 5; var->green.length = 6; 363 var->green.offset = 5; var->green.length = 6;
346 var->blue.offset = 0; var->blue.length = 5; 364 var->blue.offset = 0; var->blue.length = 5;
347 var->transp.offset = var->transp.length = 0; 365 var->transp.offset = var->transp.length = 0;
366 } else if (var->bits_per_pixel > 16) {
367 struct pxafb_mode_info *mode;
368
369 mode = pxafb_getmode(inf, var);
370 if (!mode)
371 return -EINVAL;
372
373 switch (mode->depth) {
374 case 18: /* RGB666 */
375 var->transp.offset = var->transp.length = 0;
376 var->red.offset = 12; var->red.length = 6;
377 var->green.offset = 6; var->green.length = 6;
378 var->blue.offset = 0; var->blue.length = 6;
379 break;
380 case 19: /* RGBT666 */
381 var->transp.offset = 18; var->transp.length = 1;
382 var->red.offset = 12; var->red.length = 6;
383 var->green.offset = 6; var->green.length = 6;
384 var->blue.offset = 0; var->blue.length = 6;
385 break;
386 case 24: /* RGB888 */
387 var->transp.offset = var->transp.length = 0;
388 var->red.offset = 16; var->red.length = 8;
389 var->green.offset = 8; var->green.length = 8;
390 var->blue.offset = 0; var->blue.length = 8;
391 break;
392 case 25: /* RGBT888 */
393 var->transp.offset = 24; var->transp.length = 1;
394 var->red.offset = 16; var->red.length = 8;
395 var->green.offset = 8; var->green.length = 8;
396 var->blue.offset = 0; var->blue.length = 8;
397 break;
398 default:
399 return -EINVAL;
400 }
348 } else { 401 } else {
349 var->red.offset = var->green.offset = 0; 402 var->red.offset = var->green.offset = 0;
350 var->blue.offset = var->transp.offset = 0; 403 var->blue.offset = var->transp.offset = 0;
@@ -376,7 +429,7 @@ static int pxafb_set_par(struct fb_info *info)
376 struct pxafb_info *fbi = (struct pxafb_info *)info; 429 struct pxafb_info *fbi = (struct pxafb_info *)info;
377 struct fb_var_screeninfo *var = &info->var; 430 struct fb_var_screeninfo *var = &info->var;
378 431
379 if (var->bits_per_pixel == 16) 432 if (var->bits_per_pixel >= 16)
380 fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR; 433 fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR;
381 else if (!fbi->cmap_static) 434 else if (!fbi->cmap_static)
382 fbi->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR; 435 fbi->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR;
@@ -391,7 +444,7 @@ static int pxafb_set_par(struct fb_info *info)
391 444
392 fbi->fb.fix.line_length = var->xres_virtual * 445 fbi->fb.fix.line_length = var->xres_virtual *
393 var->bits_per_pixel / 8; 446 var->bits_per_pixel / 8;
394 if (var->bits_per_pixel == 16) 447 if (var->bits_per_pixel >= 16)
395 fbi->palette_size = 0; 448 fbi->palette_size = 0;
396 else 449 else
397 fbi->palette_size = var->bits_per_pixel == 1 ? 450 fbi->palette_size = var->bits_per_pixel == 1 ?
@@ -404,7 +457,7 @@ static int pxafb_set_par(struct fb_info *info)
404 */ 457 */
405 pxafb_set_truecolor(fbi->fb.fix.visual == FB_VISUAL_TRUECOLOR); 458 pxafb_set_truecolor(fbi->fb.fix.visual == FB_VISUAL_TRUECOLOR);
406 459
407 if (fbi->fb.var.bits_per_pixel == 16) 460 if (fbi->fb.var.bits_per_pixel >= 16)
408 fb_dealloc_cmap(&fbi->fb.cmap); 461 fb_dealloc_cmap(&fbi->fb.cmap);
409 else 462 else
410 fb_alloc_cmap(&fbi->fb.cmap, 1<<fbi->fb.var.bits_per_pixel, 0); 463 fb_alloc_cmap(&fbi->fb.cmap, 1<<fbi->fb.var.bits_per_pixel, 0);
@@ -831,6 +884,8 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var,
831 case 4: 884 case 4:
832 case 8: 885 case 8:
833 case 16: 886 case 16:
887 case 24:
888 case 32:
834 break; 889 break;
835 default: 890 default:
836 printk(KERN_ERR "%s: invalid bit depth %d\n", 891 printk(KERN_ERR "%s: invalid bit depth %d\n",
@@ -968,6 +1023,11 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi)
968 1023
969 for (gpio = 58; ldd_bits; gpio++, ldd_bits--) 1024 for (gpio = 58; ldd_bits; gpio++, ldd_bits--)
970 pxa_gpio_mode(gpio | GPIO_ALT_FN_2_OUT); 1025 pxa_gpio_mode(gpio | GPIO_ALT_FN_2_OUT);
1026 /* 18 bit interface */
1027 if (fbi->fb.var.bits_per_pixel > 16) {
1028 pxa_gpio_mode(86 | GPIO_ALT_FN_2_OUT);
1029 pxa_gpio_mode(87 | GPIO_ALT_FN_2_OUT);
1030 }
971 pxa_gpio_mode(GPIO74_LCD_FCLK_MD); 1031 pxa_gpio_mode(GPIO74_LCD_FCLK_MD);
972 pxa_gpio_mode(GPIO75_LCD_LCLK_MD); 1032 pxa_gpio_mode(GPIO75_LCD_LCLK_MD);
973 pxa_gpio_mode(GPIO76_LCD_PCLK_MD); 1033 pxa_gpio_mode(GPIO76_LCD_PCLK_MD);
@@ -1058,7 +1118,7 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state)
1058{ 1118{
1059 u_int old_state; 1119 u_int old_state;
1060 1120
1061 down(&fbi->ctrlr_sem); 1121 mutex_lock(&fbi->ctrlr_lock);
1062 1122
1063 old_state = fbi->state; 1123 old_state = fbi->state;
1064 1124
@@ -1146,7 +1206,7 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state)
1146 } 1206 }
1147 break; 1207 break;
1148 } 1208 }
1149 up(&fbi->ctrlr_sem); 1209 mutex_unlock(&fbi->ctrlr_lock);
1150} 1210}
1151 1211
1152/* 1212/*
@@ -1399,7 +1459,7 @@ static struct pxafb_info * __devinit pxafb_init_fbinfo(struct device *dev)
1399 1459
1400 init_waitqueue_head(&fbi->ctrlr_wait); 1460 init_waitqueue_head(&fbi->ctrlr_wait);
1401 INIT_WORK(&fbi->task, pxafb_task); 1461 INIT_WORK(&fbi->task, pxafb_task);
1402 init_MUTEX(&fbi->ctrlr_sem); 1462 mutex_init(&fbi->ctrlr_lock);
1403 init_completion(&fbi->disable_done); 1463 init_completion(&fbi->disable_done);
1404#ifdef CONFIG_FB_PXA_SMARTPANEL 1464#ifdef CONFIG_FB_PXA_SMARTPANEL
1405 init_completion(&fbi->command_done); 1465 init_completion(&fbi->command_done);
diff --git a/drivers/video/pxafb.h b/drivers/video/pxafb.h
index 8238dc826429..31541b86f13d 100644
--- a/drivers/video/pxafb.h
+++ b/drivers/video/pxafb.h
@@ -106,7 +106,7 @@ struct pxafb_info {
106 106
107 volatile u_char state; 107 volatile u_char state;
108 volatile u_char task_state; 108 volatile u_char task_state;
109 struct semaphore ctrlr_sem; 109 struct mutex ctrlr_lock;
110 wait_queue_head_t ctrlr_wait; 110 wait_queue_head_t ctrlr_wait;
111 struct work_struct task; 111 struct work_struct task;
112 112
diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c
index ab2b2110478b..78bcdbc3f484 100644
--- a/drivers/video/sa1100fb.c
+++ b/drivers/video/sa1100fb.c
@@ -167,6 +167,7 @@
167#include <linux/string.h> 167#include <linux/string.h>
168#include <linux/interrupt.h> 168#include <linux/interrupt.h>
169#include <linux/slab.h> 169#include <linux/slab.h>
170#include <linux/mm.h>
170#include <linux/fb.h> 171#include <linux/fb.h>
171#include <linux/delay.h> 172#include <linux/delay.h>
172#include <linux/init.h> 173#include <linux/init.h>
@@ -174,6 +175,7 @@
174#include <linux/cpufreq.h> 175#include <linux/cpufreq.h>
175#include <linux/platform_device.h> 176#include <linux/platform_device.h>
176#include <linux/dma-mapping.h> 177#include <linux/dma-mapping.h>
178#include <linux/mutex.h>
177 179
178#include <asm/hardware.h> 180#include <asm/hardware.h>
179#include <asm/io.h> 181#include <asm/io.h>
@@ -1107,7 +1109,7 @@ static void set_ctrlr_state(struct sa1100fb_info *fbi, u_int state)
1107{ 1109{
1108 u_int old_state; 1110 u_int old_state;
1109 1111
1110 down(&fbi->ctrlr_sem); 1112 mutex_lock(&fbi->ctrlr_lock);
1111 1113
1112 old_state = fbi->state; 1114 old_state = fbi->state;
1113 1115
@@ -1192,7 +1194,7 @@ static void set_ctrlr_state(struct sa1100fb_info *fbi, u_int state)
1192 } 1194 }
1193 break; 1195 break;
1194 } 1196 }
1195 up(&fbi->ctrlr_sem); 1197 mutex_unlock(&fbi->ctrlr_lock);
1196} 1198}
1197 1199
1198/* 1200/*
@@ -1444,7 +1446,7 @@ static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev)
1444 1446
1445 init_waitqueue_head(&fbi->ctrlr_wait); 1447 init_waitqueue_head(&fbi->ctrlr_wait);
1446 INIT_WORK(&fbi->task, sa1100fb_task); 1448 INIT_WORK(&fbi->task, sa1100fb_task);
1447 init_MUTEX(&fbi->ctrlr_sem); 1449 mutex_init(&fbi->ctrlr_lock);
1448 1450
1449 return fbi; 1451 return fbi;
1450} 1452}
diff --git a/drivers/video/sa1100fb.h b/drivers/video/sa1100fb.h
index f465b27ed860..86831db9a042 100644
--- a/drivers/video/sa1100fb.h
+++ b/drivers/video/sa1100fb.h
@@ -100,7 +100,7 @@ struct sa1100fb_info {
100 100
101 volatile u_char state; 101 volatile u_char state;
102 volatile u_char task_state; 102 volatile u_char task_state;
103 struct semaphore ctrlr_sem; 103 struct mutex ctrlr_lock;
104 wait_queue_head_t ctrlr_wait; 104 wait_queue_head_t ctrlr_wait;
105 struct work_struct task; 105 struct work_struct task;
106 106
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
new file mode 100644
index 000000000000..4d0e28c5790b
--- /dev/null
+++ b/drivers/video/sh7760fb.c
@@ -0,0 +1,658 @@
1/*
2 * SH7760/SH7763 LCDC Framebuffer driver.
3 *
4 * (c) 2006-2008 MSC Vertriebsges.m.b.H.,
5 * Manuel Lauss <mano@roarinelk.homelinux.net>
6 * (c) 2008 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * PLEASE HAVE A LOOK AT Documentation/fb/sh7760fb.txt!
13 *
14 * Thanks to Siegfried Schaefer <s.schaefer at schaefer-edv.de>
15 * for his original source and testing!
16 */
17
18#include <linux/completion.h>
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/fb.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27
28#include <asm/sh7760fb.h>
29
30struct sh7760fb_par {
31 void __iomem *base;
32 int irq;
33
34 struct sh7760fb_platdata *pd; /* display information */
35
36 dma_addr_t fbdma; /* physical address */
37
38 int rot; /* rotation enabled? */
39
40 u32 pseudo_palette[16];
41
42 struct platform_device *dev;
43 struct resource *ioarea;
44 struct completion vsync; /* vsync irq event */
45};
46
47static irqreturn_t sh7760fb_irq(int irq, void *data)
48{
49 struct completion *c = data;
50
51 complete(c);
52
53 return IRQ_HANDLED;
54}
55
56static void sh7760fb_wait_vsync(struct fb_info *info)
57{
58 struct sh7760fb_par *par = info->par;
59
60 if (par->pd->novsync)
61 return;
62
63 iowrite16(ioread16(par->base + LDINTR) & ~VINT_CHECK,
64 par->base + LDINTR);
65
66 if (par->irq < 0) {
67 /* poll for vert. retrace: status bit is sticky */
68 while (!(ioread16(par->base + LDINTR) & VINT_CHECK))
69 cpu_relax();
70 } else {
71 /* a "wait_for_irq_event(par->irq)" would be extremely nice */
72 init_completion(&par->vsync);
73 enable_irq(par->irq);
74 wait_for_completion(&par->vsync);
75 disable_irq_nosync(par->irq);
76 }
77}
78
79/* wait_for_lps - wait until power supply has reached a certain state. */
80static int wait_for_lps(struct sh7760fb_par *par, int val)
81{
82 int i = 100;
83 while (--i && ((ioread16(par->base + LDPMMR) & 3) != val))
84 msleep(1);
85
86 if (i <= 0)
87 return -ETIMEDOUT;
88
89 return 0;
90}
91
92/* en/disable the LCDC */
93static int sh7760fb_blank(int blank, struct fb_info *info)
94{
95 struct sh7760fb_par *par = info->par;
96 struct sh7760fb_platdata *pd = par->pd;
97 unsigned short cntr = ioread16(par->base + LDCNTR);
98 unsigned short intr = ioread16(par->base + LDINTR);
99 int lps;
100
101 if (blank == FB_BLANK_UNBLANK) {
102 intr |= VINT_START;
103 cntr = LDCNTR_DON2 | LDCNTR_DON;
104 lps = 3;
105 } else {
106 intr &= ~VINT_START;
107 cntr = LDCNTR_DON2;
108 lps = 0;
109 }
110
111 if (pd->blank)
112 pd->blank(blank);
113
114 iowrite16(intr, par->base + LDINTR);
115 iowrite16(cntr, par->base + LDCNTR);
116
117 return wait_for_lps(par, lps);
118}
119
120/* set color registers */
121static int sh7760fb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
122{
123 struct sh7760fb_par *par = info->par;
124 u32 s = cmap->start;
125 u32 l = cmap->len;
126 u16 *r = cmap->red;
127 u16 *g = cmap->green;
128 u16 *b = cmap->blue;
129 u32 col, tmo;
130 int ret;
131
132 ret = 0;
133
134 sh7760fb_wait_vsync(info);
135
136 /* request palette access */
137 iowrite16(LDPALCR_PALEN, par->base + LDPALCR);
138
139 /* poll for access grant */
140 tmo = 100;
141 while (!(ioread16(par->base + LDPALCR) & LDPALCR_PALS) && (--tmo))
142 cpu_relax();
143
144 if (!tmo) {
145 ret = 1;
146 dev_dbg(info->dev, "no palette access!\n");
147 goto out;
148 }
149
150 while (l && (s < 256)) {
151 col = ((*r) & 0xff) << 16;
152 col |= ((*g) & 0xff) << 8;
153 col |= ((*b) & 0xff);
154 col &= SH7760FB_PALETTE_MASK;
155
156 if (s < 16)
157 ((u32 *) (info->pseudo_palette))[s] = s;
158
159 s++;
160 l--;
161 r++;
162 g++;
163 b++;
164 }
165out:
166 iowrite16(0, par->base + LDPALCR);
167 return ret;
168}
169
170static void encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info,
171 unsigned long stride)
172{
173 memset(fix, 0, sizeof(struct fb_fix_screeninfo));
174 strcpy(fix->id, "sh7760-lcdc");
175
176 fix->smem_start = (unsigned long)info->screen_base;
177 fix->smem_len = info->screen_size;
178
179 fix->line_length = stride;
180}
181
182static int sh7760fb_get_color_info(struct device *dev,
183 u16 lddfr, int *bpp, int *gray)
184{
185 int lbpp, lgray;
186
187 lgray = lbpp = 0;
188
189 switch (lddfr & LDDFR_COLOR_MASK) {
190 case LDDFR_1BPP_MONO:
191 lgray = 1;
192 lbpp = 1;
193 break;
194 case LDDFR_2BPP_MONO:
195 lgray = 1;
196 lbpp = 2;
197 break;
198 case LDDFR_4BPP_MONO:
199 lgray = 1;
200 case LDDFR_4BPP:
201 lbpp = 4;
202 break;
203 case LDDFR_6BPP_MONO:
204 lgray = 1;
205 case LDDFR_8BPP:
206 lbpp = 8;
207 break;
208 case LDDFR_16BPP_RGB555:
209 case LDDFR_16BPP_RGB565:
210 lbpp = 16;
211 lgray = 0;
212 break;
213 default:
214 dev_dbg(dev, "unsupported LDDFR bit depth.\n");
215 return -EINVAL;
216 }
217
218 if (bpp)
219 *bpp = lbpp;
220 if (gray)
221 *gray = lgray;
222
223 return 0;
224}
225
226static int sh7760fb_check_var(struct fb_var_screeninfo *var,
227 struct fb_info *info)
228{
229 struct fb_fix_screeninfo *fix = &info->fix;
230 struct sh7760fb_par *par = info->par;
231 int ret, bpp;
232
233 /* get color info from register value */
234 ret = sh7760fb_get_color_info(info->dev, par->pd->lddfr, &bpp, NULL);
235 if (ret)
236 return ret;
237
238 var->bits_per_pixel = bpp;
239
240 if ((var->grayscale) && (var->bits_per_pixel == 1))
241 fix->visual = FB_VISUAL_MONO10;
242 else if (var->bits_per_pixel >= 15)
243 fix->visual = FB_VISUAL_TRUECOLOR;
244 else
245 fix->visual = FB_VISUAL_PSEUDOCOLOR;
246
247 /* TODO: add some more validation here */
248 return 0;
249}
250
251/*
252 * sh7760fb_set_par - set videomode.
253 *
254 * NOTE: The rotation, grayscale and DSTN codepaths are
255 * totally untested!
256 */
257static int sh7760fb_set_par(struct fb_info *info)
258{
259 struct sh7760fb_par *par = info->par;
260 struct fb_videomode *vm = par->pd->def_mode;
261 unsigned long sbase, dstn_off, ldsarl, stride;
262 unsigned short hsynp, hsynw, htcn, hdcn;
263 unsigned short vsynp, vsynw, vtln, vdln;
264 unsigned short lddfr, ldmtr;
265 int ret, bpp, gray;
266
267 par->rot = par->pd->rotate;
268
269 /* rotate only works with xres <= 320 */
270 if (par->rot && (vm->xres > 320)) {
271 dev_dbg(info->dev, "rotation disabled due to display size\n");
272 par->rot = 0;
273 }
274
275 /* calculate LCDC reg vals from display parameters */
276 hsynp = vm->right_margin + vm->xres;
277 hsynw = vm->hsync_len;
278 htcn = vm->left_margin + hsynp + hsynw;
279 hdcn = vm->xres;
280 vsynp = vm->lower_margin + vm->yres;
281 vsynw = vm->vsync_len;
282 vtln = vm->upper_margin + vsynp + vsynw;
283 vdln = vm->yres;
284
285 /* get color info from register value */
286 ret = sh7760fb_get_color_info(info->dev, par->pd->lddfr, &bpp, &gray);
287 if (ret)
288 return ret;
289
290 dev_dbg(info->dev, "%dx%d %dbpp %s (orientation %s)\n", hdcn,
291 vdln, bpp, gray ? "grayscale" : "color",
292 par->rot ? "rotated" : "normal");
293
294#ifdef CONFIG_CPU_LITTLE_ENDIAN
295 lddfr = par->pd->lddfr | (1 << 8);
296#else
297 lddfr = par->pd->lddfr & ~(1 << 8);
298#endif
299
300 ldmtr = par->pd->ldmtr;
301
302 if (!(vm->sync & FB_SYNC_HOR_HIGH_ACT))
303 ldmtr |= LDMTR_CL1POL;
304 if (!(vm->sync & FB_SYNC_VERT_HIGH_ACT))
305 ldmtr |= LDMTR_FLMPOL;
306
307 /* shut down LCDC before changing display parameters */
308 sh7760fb_blank(FB_BLANK_POWERDOWN, info);
309
310 iowrite16(par->pd->ldickr, par->base + LDICKR); /* pixclock */
311 iowrite16(ldmtr, par->base + LDMTR); /* polarities */
312 iowrite16(lddfr, par->base + LDDFR); /* color/depth */
313 iowrite16((par->rot ? 1 << 13 : 0), par->base + LDSMR); /* rotate */
314 iowrite16(par->pd->ldpmmr, par->base + LDPMMR); /* Power Management */
315 iowrite16(par->pd->ldpspr, par->base + LDPSPR); /* Power Supply Ctrl */
316
317 /* display resolution */
318 iowrite16(((htcn >> 3) - 1) | (((hdcn >> 3) - 1) << 8),
319 par->base + LDHCNR);
320 iowrite16(vdln - 1, par->base + LDVDLNR);
321 iowrite16(vtln - 1, par->base + LDVTLNR);
322 /* h/v sync signals */
323 iowrite16((vsynp - 1) | ((vsynw - 1) << 12), par->base + LDVSYNR);
324 iowrite16(((hsynp >> 3) - 1) | (((hsynw >> 3) - 1) << 12),
325 par->base + LDHSYNR);
326 /* AC modulation sig */
327 iowrite16(par->pd->ldaclnr, par->base + LDACLNR);
328
329 stride = (par->rot) ? vtln : hdcn;
330 if (!gray)
331 stride *= (bpp + 7) >> 3;
332 else {
333 if (bpp == 1)
334 stride >>= 3;
335 else if (bpp == 2)
336 stride >>= 2;
337 else if (bpp == 4)
338 stride >>= 1;
339 /* 6 bpp == 8 bpp */
340 }
341
342 /* if rotated, stride must be power of 2 */
343 if (par->rot) {
344 unsigned long bit = 1 << 31;
345 while (bit) {
346 if (stride & bit)
347 break;
348 bit >>= 1;
349 }
350 if (stride & ~bit)
351 stride = bit << 1; /* not P-o-2, round up */
352 }
353 iowrite16(stride, par->base + LDLAOR);
354
355 /* set display mem start address */
356 sbase = (unsigned long)par->fbdma;
357 if (par->rot)
358 sbase += (hdcn - 1) * stride;
359
360 iowrite32(sbase, par->base + LDSARU);
361
362 /*
363 * for DSTN need to set address for lower half.
364 * I (mlau) don't know which address to set it to,
365 * so I guessed at (stride * yres/2).
366 */
367 if (((ldmtr & 0x003f) >= LDMTR_DSTN_MONO_8) &&
368 ((ldmtr & 0x003f) <= LDMTR_DSTN_COLOR_16)) {
369
370 dev_dbg(info->dev, " ***** DSTN untested! *****\n");
371
372 dstn_off = stride;
373 if (par->rot)
374 dstn_off *= hdcn >> 1;
375 else
376 dstn_off *= vdln >> 1;
377
378 ldsarl = sbase + dstn_off;
379 } else
380 ldsarl = 0;
381
382 iowrite32(ldsarl, par->base + LDSARL); /* mem for lower half of DSTN */
383
384 encode_fix(&info->fix, info, stride);
385 sh7760fb_check_var(&info->var, info);
386
387 sh7760fb_blank(FB_BLANK_UNBLANK, info); /* panel on! */
388
389 dev_dbg(info->dev, "hdcn : %6d htcn : %6d\n", hdcn, htcn);
390 dev_dbg(info->dev, "hsynw : %6d hsynp : %6d\n", hsynw, hsynp);
391 dev_dbg(info->dev, "vdln : %6d vtln : %6d\n", vdln, vtln);
392 dev_dbg(info->dev, "vsynw : %6d vsynp : %6d\n", vsynw, vsynp);
393 dev_dbg(info->dev, "clksrc: %6d clkdiv: %6d\n",
394 (par->pd->ldickr >> 12) & 3, par->pd->ldickr & 0x1f);
395 dev_dbg(info->dev, "ldpmmr: 0x%04x ldpspr: 0x%04x\n", par->pd->ldpmmr,
396 par->pd->ldpspr);
397 dev_dbg(info->dev, "ldmtr : 0x%04x lddfr : 0x%04x\n", ldmtr, lddfr);
398 dev_dbg(info->dev, "ldlaor: %ld\n", stride);
399 dev_dbg(info->dev, "ldsaru: 0x%08lx ldsarl: 0x%08lx\n", sbase, ldsarl);
400
401 return 0;
402}
403
404static struct fb_ops sh7760fb_ops = {
405 .owner = THIS_MODULE,
406 .fb_blank = sh7760fb_blank,
407 .fb_check_var = sh7760fb_check_var,
408 .fb_setcmap = sh7760fb_setcmap,
409 .fb_set_par = sh7760fb_set_par,
410 .fb_fillrect = cfb_fillrect,
411 .fb_copyarea = cfb_copyarea,
412 .fb_imageblit = cfb_imageblit,
413};
414
415static void sh7760fb_free_mem(struct fb_info *info)
416{
417 struct sh7760fb_par *par = info->par;
418
419 if (!info->screen_base)
420 return;
421
422 dma_free_coherent(info->dev, info->screen_size,
423 info->screen_base, par->fbdma);
424
425 par->fbdma = 0;
426 info->screen_base = NULL;
427 info->screen_size = 0;
428}
429
430/* allocate the framebuffer memory. This memory must be in Area3,
431 * (dictated by the DMA engine) and contiguous, at a 512 byte boundary.
432 */
433static int sh7760fb_alloc_mem(struct fb_info *info)
434{
435 struct sh7760fb_par *par = info->par;
436 void *fbmem;
437 unsigned long vram;
438 int ret, bpp;
439
440 if (info->screen_base)
441 return 0;
442
443 /* get color info from register value */
444 ret = sh7760fb_get_color_info(info->dev, par->pd->lddfr, &bpp, NULL);
445 if (ret) {
446 printk(KERN_ERR "colinfo\n");
447 return ret;
448 }
449
450 /* min VRAM: xres_min = 16, yres_min = 1, bpp = 1: 2byte -> 1 page
451 max VRAM: xres_max = 1024, yres_max = 1024, bpp = 16: 2MB */
452
453 vram = info->var.xres * info->var.yres;
454 if (info->var.grayscale) {
455 if (bpp == 1)
456 vram >>= 3;
457 else if (bpp == 2)
458 vram >>= 2;
459 else if (bpp == 4)
460 vram >>= 1;
461 } else if (bpp > 8)
462 vram *= 2;
463 if ((vram < 1) || (vram > 1024 * 2048)) {
464 dev_dbg(info->dev, "too much VRAM required. Check settings\n");
465 return -ENODEV;
466 }
467
468 if (vram < PAGE_SIZE)
469 vram = PAGE_SIZE;
470
471 fbmem = dma_alloc_coherent(info->dev, vram, &par->fbdma, GFP_KERNEL);
472
473 if (!fbmem)
474 return -ENOMEM;
475
476 if ((par->fbdma & SH7760FB_DMA_MASK) != SH7760FB_DMA_MASK) {
477 sh7760fb_free_mem(info);
478 dev_err(info->dev, "kernel gave me memory at 0x%08lx, which is"
479 "unusable for the LCDC\n", (unsigned long)par->fbdma);
480 return -ENOMEM;
481 }
482
483 info->screen_base = fbmem;
484 info->screen_size = vram;
485
486 return 0;
487}
488
489static int __devinit sh7760fb_probe(struct platform_device *pdev)
490{
491 struct fb_info *info;
492 struct resource *res;
493 struct sh7760fb_par *par;
494 int ret;
495
496 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
497 if (unlikely(res == NULL)) {
498 dev_err(&pdev->dev, "invalid resource\n");
499 return -EINVAL;
500 }
501
502 info = framebuffer_alloc(sizeof(struct sh7760fb_par), &pdev->dev);
503 if (!info)
504 return -ENOMEM;
505
506 par = info->par;
507 par->dev = pdev;
508
509 par->pd = pdev->dev.platform_data;
510 if (!par->pd) {
511 dev_dbg(info->dev, "no display setup data!\n");
512 ret = -ENODEV;
513 goto out_fb;
514 }
515
516 par->ioarea = request_mem_region(res->start,
517 (res->end - res->start), pdev->name);
518 if (!par->ioarea) {
519 dev_err(&pdev->dev, "mmio area busy\n");
520 ret = -EBUSY;
521 goto out_fb;
522 }
523
524 par->base = ioremap_nocache(res->start, res->end - res->start + 1);
525 if (!par->base) {
526 dev_err(&pdev->dev, "cannot remap\n");
527 ret = -ENODEV;
528 goto out_res;
529 }
530
531 iowrite16(0, par->base + LDINTR); /* disable vsync irq */
532 par->irq = platform_get_irq(pdev, 0);
533 if (par->irq >= 0) {
534 ret = request_irq(par->irq, sh7760fb_irq, 0,
535 "sh7760-lcdc", &par->vsync);
536 if (ret) {
537 dev_err(&pdev->dev, "cannot grab IRQ\n");
538 par->irq = -ENXIO;
539 } else
540 disable_irq_nosync(par->irq);
541 }
542
543 fb_videomode_to_var(&info->var, par->pd->def_mode);
544
545 ret = sh7760fb_alloc_mem(info);
546 if (ret) {
547 dev_dbg(info->dev, "framebuffer memory allocation failed!\n");
548 goto out_unmap;
549 }
550
551 info->pseudo_palette = par->pseudo_palette;
552
553 /* fixup color register bitpositions. These are fixed by hardware */
554 info->var.red.offset = 11;
555 info->var.red.length = 5;
556 info->var.red.msb_right = 0;
557
558 info->var.green.offset = 5;
559 info->var.green.length = 6;
560 info->var.green.msb_right = 0;
561
562 info->var.blue.offset = 0;
563 info->var.blue.length = 5;
564 info->var.blue.msb_right = 0;
565
566 info->var.transp.offset = 0;
567 info->var.transp.length = 0;
568 info->var.transp.msb_right = 0;
569
570 /* set the DON2 bit now, before cmap allocation, as it will randomize
571 * palette memory.
572 */
573 iowrite16(LDCNTR_DON2, par->base + LDCNTR);
574 info->fbops = &sh7760fb_ops;
575
576 ret = fb_alloc_cmap(&info->cmap, 256, 0);
577 if (ret) {
578 dev_dbg(info->dev, "Unable to allocate cmap memory\n");
579 goto out_mem;
580 }
581
582 ret = register_framebuffer(info);
583 if (ret < 0) {
584 dev_dbg(info->dev, "cannot register fb!\n");
585 goto out_cmap;
586 }
587 platform_set_drvdata(pdev, info);
588
589 printk(KERN_INFO "%s: memory at phys 0x%08lx-0x%08lx, size %ld KiB\n",
590 pdev->name,
591 (unsigned long)par->fbdma,
592 (unsigned long)(par->fbdma + info->screen_size - 1),
593 info->screen_size >> 10);
594
595 return 0;
596
597out_cmap:
598 sh7760fb_blank(FB_BLANK_POWERDOWN, info);
599 fb_dealloc_cmap(&info->cmap);
600out_mem:
601 sh7760fb_free_mem(info);
602out_unmap:
603 if (par->irq >= 0)
604 free_irq(par->irq, &par->vsync);
605 iounmap(par->base);
606out_res:
607 release_resource(par->ioarea);
608 kfree(par->ioarea);
609out_fb:
610 framebuffer_release(info);
611 return ret;
612}
613
614static int __devexit sh7760fb_remove(struct platform_device *dev)
615{
616 struct fb_info *info = platform_get_drvdata(dev);
617 struct sh7760fb_par *par = info->par;
618
619 sh7760fb_blank(FB_BLANK_POWERDOWN, info);
620 unregister_framebuffer(info);
621 fb_dealloc_cmap(&info->cmap);
622 sh7760fb_free_mem(info);
623 if (par->irq >= 0)
624 free_irq(par->irq, par);
625 iounmap(par->base);
626 release_resource(par->ioarea);
627 kfree(par->ioarea);
628 framebuffer_release(info);
629 platform_set_drvdata(dev, NULL);
630
631 return 0;
632}
633
634static struct platform_driver sh7760_lcdc_driver = {
635 .driver = {
636 .name = "sh7760-lcdc",
637 .owner = THIS_MODULE,
638 },
639 .probe = sh7760fb_probe,
640 .remove = __devexit_p(sh7760fb_remove),
641};
642
643static int __init sh7760fb_init(void)
644{
645 return platform_driver_register(&sh7760_lcdc_driver);
646}
647
648static void __exit sh7760fb_exit(void)
649{
650 platform_driver_unregister(&sh7760_lcdc_driver);
651}
652
653module_init(sh7760fb_init);
654module_exit(sh7760fb_exit);
655
656MODULE_AUTHOR("Nobuhiro Iwamatsu, Manuel Lauss");
657MODULE_DESCRIPTION("FBdev for SH7760/63 integrated LCD Controller");
658MODULE_LICENSE("GPL");
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
new file mode 100644
index 000000000000..f6ef6cca73cd
--- /dev/null
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -0,0 +1,725 @@
1/*
2 * SuperH Mobile LCDC Framebuffer
3 *
4 * Copyright (c) 2008 Magnus Damm
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/delay.h>
14#include <linux/mm.h>
15#include <linux/fb.h>
16#include <linux/clk.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <asm/sh_mobile_lcdc.h>
20
21#define PALETTE_NR 16
22
23struct sh_mobile_lcdc_priv;
24struct sh_mobile_lcdc_chan {
25 struct sh_mobile_lcdc_priv *lcdc;
26 unsigned long *reg_offs;
27 unsigned long ldmt1r_value;
28 unsigned long enabled; /* ME and SE in LDCNT2R */
29 struct sh_mobile_lcdc_chan_cfg cfg;
30 u32 pseudo_palette[PALETTE_NR];
31 struct fb_info info;
32 dma_addr_t dma_handle;
33};
34
35struct sh_mobile_lcdc_priv {
36 void __iomem *base;
37 struct clk *clk;
38 unsigned long lddckr;
39 struct sh_mobile_lcdc_chan ch[2];
40};
41
42/* shared registers */
43#define _LDDCKR 0x410
44#define _LDDCKSTPR 0x414
45#define _LDINTR 0x468
46#define _LDSR 0x46c
47#define _LDCNT1R 0x470
48#define _LDCNT2R 0x474
49#define _LDDDSR 0x47c
50#define _LDDWD0R 0x800
51#define _LDDRDR 0x840
52#define _LDDWAR 0x900
53#define _LDDRAR 0x904
54
55/* per-channel registers */
56enum { LDDCKPAT1R, LDDCKPAT2R, LDMT1R, LDMT2R, LDMT3R, LDDFR, LDSM1R,
57 LDSA1R, LDMLSR, LDHCNR, LDHSYNR, LDVLNR, LDVSYNR, LDPMR };
58
59static unsigned long lcdc_offs_mainlcd[] = {
60 [LDDCKPAT1R] = 0x400,
61 [LDDCKPAT2R] = 0x404,
62 [LDMT1R] = 0x418,
63 [LDMT2R] = 0x41c,
64 [LDMT3R] = 0x420,
65 [LDDFR] = 0x424,
66 [LDSM1R] = 0x428,
67 [LDSA1R] = 0x430,
68 [LDMLSR] = 0x438,
69 [LDHCNR] = 0x448,
70 [LDHSYNR] = 0x44c,
71 [LDVLNR] = 0x450,
72 [LDVSYNR] = 0x454,
73 [LDPMR] = 0x460,
74};
75
76static unsigned long lcdc_offs_sublcd[] = {
77 [LDDCKPAT1R] = 0x408,
78 [LDDCKPAT2R] = 0x40c,
79 [LDMT1R] = 0x600,
80 [LDMT2R] = 0x604,
81 [LDMT3R] = 0x608,
82 [LDDFR] = 0x60c,
83 [LDSM1R] = 0x610,
84 [LDSA1R] = 0x618,
85 [LDMLSR] = 0x620,
86 [LDHCNR] = 0x624,
87 [LDHSYNR] = 0x628,
88 [LDVLNR] = 0x62c,
89 [LDVSYNR] = 0x630,
90 [LDPMR] = 0x63c,
91};
92
93#define START_LCDC 0x00000001
94#define LCDC_RESET 0x00000100
95#define DISPLAY_BEU 0x00000008
96#define LCDC_ENABLE 0x00000001
97
98static void lcdc_write_chan(struct sh_mobile_lcdc_chan *chan,
99 int reg_nr, unsigned long data)
100{
101 iowrite32(data, chan->lcdc->base + chan->reg_offs[reg_nr]);
102}
103
104static unsigned long lcdc_read_chan(struct sh_mobile_lcdc_chan *chan,
105 int reg_nr)
106{
107 return ioread32(chan->lcdc->base + chan->reg_offs[reg_nr]);
108}
109
110static void lcdc_write(struct sh_mobile_lcdc_priv *priv,
111 unsigned long reg_offs, unsigned long data)
112{
113 iowrite32(data, priv->base + reg_offs);
114}
115
116static unsigned long lcdc_read(struct sh_mobile_lcdc_priv *priv,
117 unsigned long reg_offs)
118{
119 return ioread32(priv->base + reg_offs);
120}
121
122static void lcdc_wait_bit(struct sh_mobile_lcdc_priv *priv,
123 unsigned long reg_offs,
124 unsigned long mask, unsigned long until)
125{
126 while ((lcdc_read(priv, reg_offs) & mask) != until)
127 cpu_relax();
128}
129
130static int lcdc_chan_is_sublcd(struct sh_mobile_lcdc_chan *chan)
131{
132 return chan->cfg.chan == LCDC_CHAN_SUBLCD;
133}
134
135static void lcdc_sys_write_index(void *handle, unsigned long data)
136{
137 struct sh_mobile_lcdc_chan *ch = handle;
138
139 lcdc_write(ch->lcdc, _LDDWD0R, data | 0x10000000);
140 lcdc_wait_bit(ch->lcdc, _LDSR, 2, 0);
141 lcdc_write(ch->lcdc, _LDDWAR, 1 | (lcdc_chan_is_sublcd(ch) ? 2 : 0));
142}
143
144static void lcdc_sys_write_data(void *handle, unsigned long data)
145{
146 struct sh_mobile_lcdc_chan *ch = handle;
147
148 lcdc_write(ch->lcdc, _LDDWD0R, data | 0x11000000);
149 lcdc_wait_bit(ch->lcdc, _LDSR, 2, 0);
150 lcdc_write(ch->lcdc, _LDDWAR, 1 | (lcdc_chan_is_sublcd(ch) ? 2 : 0));
151}
152
153static unsigned long lcdc_sys_read_data(void *handle)
154{
155 struct sh_mobile_lcdc_chan *ch = handle;
156
157 lcdc_write(ch->lcdc, _LDDRDR, 0x01000000);
158 lcdc_wait_bit(ch->lcdc, _LDSR, 2, 0);
159 lcdc_write(ch->lcdc, _LDDRAR, 1 | (lcdc_chan_is_sublcd(ch) ? 2 : 0));
160 udelay(1);
161
162 return lcdc_read(ch->lcdc, _LDDRDR) & 0xffff;
163}
164
165struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
166 lcdc_sys_write_index,
167 lcdc_sys_write_data,
168 lcdc_sys_read_data,
169};
170
171static void sh_mobile_lcdc_start_stop(struct sh_mobile_lcdc_priv *priv,
172 int start)
173{
174 unsigned long tmp = lcdc_read(priv, _LDCNT2R);
175 int k;
176
177 /* start or stop the lcdc */
178 if (start)
179 lcdc_write(priv, _LDCNT2R, tmp | START_LCDC);
180 else
181 lcdc_write(priv, _LDCNT2R, tmp & ~START_LCDC);
182
183 /* wait until power is applied/stopped on all channels */
184 for (k = 0; k < ARRAY_SIZE(priv->ch); k++)
185 if (lcdc_read(priv, _LDCNT2R) & priv->ch[k].enabled)
186 while (1) {
187 tmp = lcdc_read_chan(&priv->ch[k], LDPMR) & 3;
188 if (start && tmp == 3)
189 break;
190 if (!start && tmp == 0)
191 break;
192 cpu_relax();
193 }
194
195 if (!start)
196 lcdc_write(priv, _LDDCKSTPR, 1); /* stop dotclock */
197}
198
199static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
200{
201 struct sh_mobile_lcdc_chan *ch;
202 struct fb_videomode *lcd_cfg;
203 struct sh_mobile_lcdc_board_cfg *board_cfg;
204 unsigned long tmp;
205 int k, m;
206 int ret = 0;
207
208 /* reset */
209 lcdc_write(priv, _LDCNT2R, lcdc_read(priv, _LDCNT2R) | LCDC_RESET);
210 lcdc_wait_bit(priv, _LDCNT2R, LCDC_RESET, 0);
211
212 /* enable LCDC channels */
213 tmp = lcdc_read(priv, _LDCNT2R);
214 tmp |= priv->ch[0].enabled;
215 tmp |= priv->ch[1].enabled;
216 lcdc_write(priv, _LDCNT2R, tmp);
217
218 /* read data from external memory, avoid using the BEU for now */
219 lcdc_write(priv, _LDCNT2R, lcdc_read(priv, _LDCNT2R) & ~DISPLAY_BEU);
220
221 /* stop the lcdc first */
222 sh_mobile_lcdc_start_stop(priv, 0);
223
224 /* configure clocks */
225 tmp = priv->lddckr;
226 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
227 ch = &priv->ch[k];
228
229 if (!priv->ch[k].enabled)
230 continue;
231
232 m = ch->cfg.clock_divider;
233 if (!m)
234 continue;
235
236 if (m == 1)
237 m = 1 << 6;
238 tmp |= m << (lcdc_chan_is_sublcd(ch) ? 8 : 0);
239
240 lcdc_write_chan(ch, LDDCKPAT1R, 0x00000000);
241 lcdc_write_chan(ch, LDDCKPAT2R, (1 << (m/2)) - 1);
242 }
243
244 lcdc_write(priv, _LDDCKR, tmp);
245
246 /* start dotclock again */
247 lcdc_write(priv, _LDDCKSTPR, 0);
248 lcdc_wait_bit(priv, _LDDCKSTPR, ~0, 0);
249
250 /* interrupts are disabled */
251 lcdc_write(priv, _LDINTR, 0);
252
253 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
254 ch = &priv->ch[k];
255 lcd_cfg = &ch->cfg.lcd_cfg;
256
257 if (!ch->enabled)
258 continue;
259
260 tmp = ch->ldmt1r_value;
261 tmp |= (lcd_cfg->sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : 1 << 28;
262 tmp |= (lcd_cfg->sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : 1 << 27;
263 lcdc_write_chan(ch, LDMT1R, tmp);
264
265 /* setup SYS bus */
266 lcdc_write_chan(ch, LDMT2R, ch->cfg.sys_bus_cfg.ldmt2r);
267 lcdc_write_chan(ch, LDMT3R, ch->cfg.sys_bus_cfg.ldmt3r);
268
269 /* horizontal configuration */
270 tmp = lcd_cfg->xres + lcd_cfg->hsync_len;
271 tmp += lcd_cfg->left_margin;
272 tmp += lcd_cfg->right_margin;
273 tmp /= 8; /* HTCN */
274 tmp |= (lcd_cfg->xres / 8) << 16; /* HDCN */
275 lcdc_write_chan(ch, LDHCNR, tmp);
276
277 tmp = lcd_cfg->xres;
278 tmp += lcd_cfg->right_margin;
279 tmp /= 8; /* HSYNP */
280 tmp |= (lcd_cfg->hsync_len / 8) << 16; /* HSYNW */
281 lcdc_write_chan(ch, LDHSYNR, tmp);
282
283 /* power supply */
284 lcdc_write_chan(ch, LDPMR, 0);
285
286 /* vertical configuration */
287 tmp = lcd_cfg->yres + lcd_cfg->vsync_len;
288 tmp += lcd_cfg->upper_margin;
289 tmp += lcd_cfg->lower_margin; /* VTLN */
290 tmp |= lcd_cfg->yres << 16; /* VDLN */
291 lcdc_write_chan(ch, LDVLNR, tmp);
292
293 tmp = lcd_cfg->yres;
294 tmp += lcd_cfg->lower_margin; /* VSYNP */
295 tmp |= lcd_cfg->vsync_len << 16; /* VSYNW */
296 lcdc_write_chan(ch, LDVSYNR, tmp);
297
298 board_cfg = &ch->cfg.board_cfg;
299 if (board_cfg->setup_sys)
300 ret = board_cfg->setup_sys(board_cfg->board_data, ch,
301 &sh_mobile_lcdc_sys_bus_ops);
302 if (ret)
303 return ret;
304 }
305
306 /* --- display_lcdc_data() --- */
307 lcdc_write(priv, _LDINTR, 0x00000f00);
308
309 /* word and long word swap */
310 lcdc_write(priv, _LDDDSR, lcdc_read(priv, _LDDDSR) | 6);
311
312 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
313 ch = &priv->ch[k];
314
315 if (!priv->ch[k].enabled)
316 continue;
317
318 /* set bpp format in PKF[4:0] */
319 tmp = lcdc_read_chan(ch, LDDFR);
320 tmp &= ~(0x0001001f);
321 tmp |= (priv->ch[k].info.var.bits_per_pixel == 16) ? 3 : 0;
322 lcdc_write_chan(ch, LDDFR, tmp);
323
324 /* point out our frame buffer */
325 lcdc_write_chan(ch, LDSA1R, ch->info.fix.smem_start);
326
327 /* set line size */
328 lcdc_write_chan(ch, LDMLSR, ch->info.fix.line_length);
329
330 /* continuous read mode */
331 lcdc_write_chan(ch, LDSM1R, 0);
332 }
333
334 /* display output */
335 lcdc_write(priv, _LDCNT1R, LCDC_ENABLE);
336
337 /* start the lcdc */
338 sh_mobile_lcdc_start_stop(priv, 1);
339
340 /* tell the board code to enable the panel */
341 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
342 ch = &priv->ch[k];
343 board_cfg = &ch->cfg.board_cfg;
344 if (board_cfg->display_on)
345 board_cfg->display_on(board_cfg->board_data);
346 }
347
348 return 0;
349}
350
351static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
352{
353 struct sh_mobile_lcdc_chan *ch;
354 struct sh_mobile_lcdc_board_cfg *board_cfg;
355 int k;
356
357 /* tell the board code to disable the panel */
358 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
359 ch = &priv->ch[k];
360 board_cfg = &ch->cfg.board_cfg;
361 if (board_cfg->display_off)
362 board_cfg->display_off(board_cfg->board_data);
363 }
364
365 /* stop the lcdc */
366 sh_mobile_lcdc_start_stop(priv, 0);
367}
368
369static int sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan *ch)
370{
371 int ifm, miftyp;
372
373 switch (ch->cfg.interface_type) {
374 case RGB8: ifm = 0; miftyp = 0; break;
375 case RGB9: ifm = 0; miftyp = 4; break;
376 case RGB12A: ifm = 0; miftyp = 5; break;
377 case RGB12B: ifm = 0; miftyp = 6; break;
378 case RGB16: ifm = 0; miftyp = 7; break;
379 case RGB18: ifm = 0; miftyp = 10; break;
380 case RGB24: ifm = 0; miftyp = 11; break;
381 case SYS8A: ifm = 1; miftyp = 0; break;
382 case SYS8B: ifm = 1; miftyp = 1; break;
383 case SYS8C: ifm = 1; miftyp = 2; break;
384 case SYS8D: ifm = 1; miftyp = 3; break;
385 case SYS9: ifm = 1; miftyp = 4; break;
386 case SYS12: ifm = 1; miftyp = 5; break;
387 case SYS16A: ifm = 1; miftyp = 7; break;
388 case SYS16B: ifm = 1; miftyp = 8; break;
389 case SYS16C: ifm = 1; miftyp = 9; break;
390 case SYS18: ifm = 1; miftyp = 10; break;
391 case SYS24: ifm = 1; miftyp = 11; break;
392 default: goto bad;
393 }
394
395 /* SUBLCD only supports SYS interface */
396 if (lcdc_chan_is_sublcd(ch)) {
397 if (ifm == 0)
398 goto bad;
399 else
400 ifm = 0;
401 }
402
403 ch->ldmt1r_value = (ifm << 12) | miftyp;
404 return 0;
405 bad:
406 return -EINVAL;
407}
408
409static int sh_mobile_lcdc_setup_clocks(struct device *dev, int clock_source,
410 struct sh_mobile_lcdc_priv *priv)
411{
412 char *str;
413 int icksel;
414
415 switch (clock_source) {
416 case LCDC_CLK_BUS: str = "bus_clk"; icksel = 0; break;
417 case LCDC_CLK_PERIPHERAL: str = "peripheral_clk"; icksel = 1; break;
418 case LCDC_CLK_EXTERNAL: str = NULL; icksel = 2; break;
419 default:
420 return -EINVAL;
421 }
422
423 priv->lddckr = icksel << 16;
424
425 if (str) {
426 priv->clk = clk_get(dev, str);
427 if (IS_ERR(priv->clk)) {
428 dev_err(dev, "cannot get clock %s\n", str);
429 return PTR_ERR(priv->clk);
430 }
431
432 clk_enable(priv->clk);
433 }
434
435 return 0;
436}
437
438static int sh_mobile_lcdc_setcolreg(u_int regno,
439 u_int red, u_int green, u_int blue,
440 u_int transp, struct fb_info *info)
441{
442 u32 *palette = info->pseudo_palette;
443
444 if (regno >= PALETTE_NR)
445 return -EINVAL;
446
447 /* only FB_VISUAL_TRUECOLOR supported */
448
449 red >>= 16 - info->var.red.length;
450 green >>= 16 - info->var.green.length;
451 blue >>= 16 - info->var.blue.length;
452 transp >>= 16 - info->var.transp.length;
453
454 palette[regno] = (red << info->var.red.offset) |
455 (green << info->var.green.offset) |
456 (blue << info->var.blue.offset) |
457 (transp << info->var.transp.offset);
458
459 return 0;
460}
461
462static struct fb_fix_screeninfo sh_mobile_lcdc_fix = {
463 .id = "SH Mobile LCDC",
464 .type = FB_TYPE_PACKED_PIXELS,
465 .visual = FB_VISUAL_TRUECOLOR,
466 .accel = FB_ACCEL_NONE,
467};
468
469static struct fb_ops sh_mobile_lcdc_ops = {
470 .fb_setcolreg = sh_mobile_lcdc_setcolreg,
471 .fb_fillrect = cfb_fillrect,
472 .fb_copyarea = cfb_copyarea,
473 .fb_imageblit = cfb_imageblit,
474};
475
476static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp)
477{
478 switch (bpp) {
479 case 16: /* PKF[4:0] = 00011 - RGB 565 */
480 var->red.offset = 11;
481 var->red.length = 5;
482 var->green.offset = 5;
483 var->green.length = 6;
484 var->blue.offset = 0;
485 var->blue.length = 5;
486 var->transp.offset = 0;
487 var->transp.length = 0;
488 break;
489
490 case 32: /* PKF[4:0] = 00000 - RGB 888
491 * sh7722 pdf says 00RRGGBB but reality is GGBB00RR
492 * this may be because LDDDSR has word swap enabled..
493 */
494 var->red.offset = 0;
495 var->red.length = 8;
496 var->green.offset = 24;
497 var->green.length = 8;
498 var->blue.offset = 16;
499 var->blue.length = 8;
500 var->transp.offset = 0;
501 var->transp.length = 0;
502 break;
503 default:
504 return -EINVAL;
505 }
506 var->bits_per_pixel = bpp;
507 var->red.msb_right = 0;
508 var->green.msb_right = 0;
509 var->blue.msb_right = 0;
510 var->transp.msb_right = 0;
511 return 0;
512}
513
514static int sh_mobile_lcdc_remove(struct platform_device *pdev);
515
516static int __init sh_mobile_lcdc_probe(struct platform_device *pdev)
517{
518 struct fb_info *info;
519 struct sh_mobile_lcdc_priv *priv;
520 struct sh_mobile_lcdc_info *pdata;
521 struct sh_mobile_lcdc_chan_cfg *cfg;
522 struct resource *res;
523 int error;
524 void *buf;
525 int i, j;
526
527 if (!pdev->dev.platform_data) {
528 dev_err(&pdev->dev, "no platform data defined\n");
529 error = -EINVAL;
530 goto err0;
531 }
532
533 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
534 if (res == NULL) {
535 dev_err(&pdev->dev, "cannot find IO resource\n");
536 error = -ENOENT;
537 goto err0;
538 }
539
540 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
541 if (!priv) {
542 dev_err(&pdev->dev, "cannot allocate device data\n");
543 error = -ENOMEM;
544 goto err0;
545 }
546
547 platform_set_drvdata(pdev, priv);
548 pdata = pdev->dev.platform_data;
549
550 j = 0;
551 for (i = 0; i < ARRAY_SIZE(pdata->ch); i++) {
552 priv->ch[j].lcdc = priv;
553 memcpy(&priv->ch[j].cfg, &pdata->ch[i], sizeof(pdata->ch[i]));
554
555 error = sh_mobile_lcdc_check_interface(&priv->ch[i]);
556 if (error) {
557 dev_err(&pdev->dev, "unsupported interface type\n");
558 goto err1;
559 }
560
561 switch (pdata->ch[i].chan) {
562 case LCDC_CHAN_MAINLCD:
563 priv->ch[j].enabled = 1 << 1;
564 priv->ch[j].reg_offs = lcdc_offs_mainlcd;
565 j++;
566 break;
567 case LCDC_CHAN_SUBLCD:
568 priv->ch[j].enabled = 1 << 2;
569 priv->ch[j].reg_offs = lcdc_offs_sublcd;
570 j++;
571 break;
572 }
573 }
574
575 if (!j) {
576 dev_err(&pdev->dev, "no channels defined\n");
577 error = -EINVAL;
578 goto err1;
579 }
580
581 error = sh_mobile_lcdc_setup_clocks(&pdev->dev,
582 pdata->clock_source, priv);
583 if (error) {
584 dev_err(&pdev->dev, "unable to setup clocks\n");
585 goto err1;
586 }
587
588 priv->lddckr = pdata->lddckr;
589 priv->base = ioremap_nocache(res->start, (res->end - res->start) + 1);
590
591 for (i = 0; i < j; i++) {
592 info = &priv->ch[i].info;
593 cfg = &priv->ch[i].cfg;
594
595 info->fbops = &sh_mobile_lcdc_ops;
596 info->var.xres = info->var.xres_virtual = cfg->lcd_cfg.xres;
597 info->var.yres = info->var.yres_virtual = cfg->lcd_cfg.yres;
598 info->var.activate = FB_ACTIVATE_NOW;
599 error = sh_mobile_lcdc_set_bpp(&info->var, cfg->bpp);
600 if (error)
601 break;
602
603 info->fix = sh_mobile_lcdc_fix;
604 info->fix.line_length = cfg->lcd_cfg.xres * (cfg->bpp / 8);
605 info->fix.smem_len = info->fix.line_length * cfg->lcd_cfg.yres;
606
607 buf = dma_alloc_coherent(&pdev->dev, info->fix.smem_len,
608 &priv->ch[i].dma_handle, GFP_KERNEL);
609 if (!buf) {
610 dev_err(&pdev->dev, "unable to allocate buffer\n");
611 error = -ENOMEM;
612 break;
613 }
614
615 info->pseudo_palette = &priv->ch[i].pseudo_palette;
616 info->flags = FBINFO_FLAG_DEFAULT;
617
618 error = fb_alloc_cmap(&info->cmap, PALETTE_NR, 0);
619 if (error < 0) {
620 dev_err(&pdev->dev, "unable to allocate cmap\n");
621 dma_free_coherent(&pdev->dev, info->fix.smem_len,
622 buf, priv->ch[i].dma_handle);
623 break;
624 }
625
626 memset(buf, 0, info->fix.smem_len);
627 info->fix.smem_start = priv->ch[i].dma_handle;
628 info->screen_base = buf;
629 info->device = &pdev->dev;
630 }
631
632 if (error)
633 goto err1;
634
635 error = sh_mobile_lcdc_start(priv);
636 if (error) {
637 dev_err(&pdev->dev, "unable to start hardware\n");
638 goto err1;
639 }
640
641 for (i = 0; i < j; i++) {
642 error = register_framebuffer(&priv->ch[i].info);
643 if (error < 0)
644 goto err1;
645 }
646
647 for (i = 0; i < j; i++) {
648 info = &priv->ch[i].info;
649 dev_info(info->dev,
650 "registered %s/%s as %dx%d %dbpp.\n",
651 pdev->name,
652 (priv->ch[i].cfg.chan == LCDC_CHAN_MAINLCD) ?
653 "mainlcd" : "sublcd",
654 (int) priv->ch[i].cfg.lcd_cfg.xres,
655 (int) priv->ch[i].cfg.lcd_cfg.yres,
656 priv->ch[i].cfg.bpp);
657 }
658
659 return 0;
660 err1:
661 sh_mobile_lcdc_remove(pdev);
662 err0:
663 return error;
664}
665
666static int sh_mobile_lcdc_remove(struct platform_device *pdev)
667{
668 struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev);
669 struct fb_info *info;
670 int i;
671
672 for (i = 0; i < ARRAY_SIZE(priv->ch); i++)
673 if (priv->ch[i].info.dev)
674 unregister_framebuffer(&priv->ch[i].info);
675
676 sh_mobile_lcdc_stop(priv);
677
678 for (i = 0; i < ARRAY_SIZE(priv->ch); i++) {
679 info = &priv->ch[i].info;
680
681 if (!info->device)
682 continue;
683
684 dma_free_coherent(&pdev->dev, info->fix.smem_len,
685 info->screen_base, priv->ch[i].dma_handle);
686 fb_dealloc_cmap(&info->cmap);
687 }
688
689 if (priv->clk) {
690 clk_disable(priv->clk);
691 clk_put(priv->clk);
692 }
693
694 if (priv->base)
695 iounmap(priv->base);
696
697 kfree(priv);
698 return 0;
699}
700
701static struct platform_driver sh_mobile_lcdc_driver = {
702 .driver = {
703 .name = "sh_mobile_lcdc_fb",
704 .owner = THIS_MODULE,
705 },
706 .probe = sh_mobile_lcdc_probe,
707 .remove = sh_mobile_lcdc_remove,
708};
709
710static int __init sh_mobile_lcdc_init(void)
711{
712 return platform_driver_register(&sh_mobile_lcdc_driver);
713}
714
715static void __exit sh_mobile_lcdc_exit(void)
716{
717 platform_driver_unregister(&sh_mobile_lcdc_driver);
718}
719
720module_init(sh_mobile_lcdc_init);
721module_exit(sh_mobile_lcdc_exit);
722
723MODULE_DESCRIPTION("SuperH Mobile LCDC Framebuffer driver");
724MODULE_AUTHOR("Magnus Damm <damm@opensource.se>");
725MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/sis/init.h b/drivers/video/sis/init.h
index f40a680df86f..b96005c39c67 100644
--- a/drivers/video/sis/init.h
+++ b/drivers/video/sis/init.h
@@ -73,7 +73,6 @@
73#ifdef SIS_CP 73#ifdef SIS_CP
74#undef SIS_CP 74#undef SIS_CP
75#endif 75#endif
76#include <linux/version.h>
77#include <linux/types.h> 76#include <linux/types.h>
78#include <asm/io.h> 77#include <asm/io.h>
79#include <linux/fb.h> 78#include <linux/fb.h>
diff --git a/drivers/video/sis/init301.h b/drivers/video/sis/init301.h
index 7708e1e1d99e..51d99222375d 100644
--- a/drivers/video/sis/init301.h
+++ b/drivers/video/sis/init301.h
@@ -67,7 +67,6 @@
67#ifdef SIS_CP 67#ifdef SIS_CP
68#undef SIS_CP 68#undef SIS_CP
69#endif 69#endif
70#include <linux/version.h>
71#include <linux/types.h> 70#include <linux/types.h>
72#include <asm/io.h> 71#include <asm/io.h>
73#include <linux/fb.h> 72#include <linux/fb.h>
diff --git a/drivers/video/sis/initextlfb.c b/drivers/video/sis/initextlfb.c
index 47a33501549d..99c04a4855d1 100644
--- a/drivers/video/sis/initextlfb.c
+++ b/drivers/video/sis/initextlfb.c
@@ -30,7 +30,6 @@
30#include "vgatypes.h" 30#include "vgatypes.h"
31#include "vstruct.h" 31#include "vstruct.h"
32 32
33#include <linux/version.h>
34#include <linux/types.h> 33#include <linux/types.h>
35#include <linux/fb.h> 34#include <linux/fb.h>
36 35
diff --git a/drivers/video/sis/osdef.h b/drivers/video/sis/osdef.h
index c1492782cb18..6ff8f988a1a7 100644
--- a/drivers/video/sis/osdef.h
+++ b/drivers/video/sis/osdef.h
@@ -87,7 +87,6 @@
87/**********************************************************************/ 87/**********************************************************************/
88 88
89#ifdef SIS_LINUX_KERNEL 89#ifdef SIS_LINUX_KERNEL
90#include <linux/version.h>
91 90
92#ifdef CONFIG_FB_SIS_300 91#ifdef CONFIG_FB_SIS_300
93#define SIS300 92#define SIS300
diff --git a/drivers/video/sis/sis.h b/drivers/video/sis/sis.h
index a14e82211037..7c5710e3fb56 100644
--- a/drivers/video/sis/sis.h
+++ b/drivers/video/sis/sis.h
@@ -24,8 +24,6 @@
24#ifndef _SIS_H_ 24#ifndef _SIS_H_
25#define _SIS_H_ 25#define _SIS_H_
26 26
27#include <linux/version.h>
28
29#include "osdef.h" 27#include "osdef.h"
30#include <video/sisfb.h> 28#include <video/sisfb.h>
31 29
@@ -42,16 +40,6 @@
42#define SIS_NEW_CONFIG_COMPAT 40#define SIS_NEW_CONFIG_COMPAT
43#endif /* CONFIG_COMPAT */ 41#endif /* CONFIG_COMPAT */
44 42
45#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,8)
46#define SIS_IOTYPE1 void __iomem
47#define SIS_IOTYPE2 __iomem
48#define SISINITSTATIC static
49#else
50#define SIS_IOTYPE1 unsigned char
51#define SIS_IOTYPE2
52#define SISINITSTATIC
53#endif
54
55#undef SISFBDEBUG 43#undef SISFBDEBUG
56 44
57#ifdef SISFBDEBUG 45#ifdef SISFBDEBUG
@@ -505,8 +493,8 @@ struct sis_video_info {
505 493
506 unsigned long UMAsize, LFBsize; 494 unsigned long UMAsize, LFBsize;
507 495
508 SIS_IOTYPE1 *video_vbase; 496 void __iomem *video_vbase;
509 SIS_IOTYPE1 *mmio_vbase; 497 void __iomem *mmio_vbase;
510 498
511 unsigned char *bios_abase; 499 unsigned char *bios_abase;
512 500
@@ -533,8 +521,8 @@ struct sis_video_info {
533 int sisfb_nocrt2rate; 521 int sisfb_nocrt2rate;
534 522
535 u32 heapstart; /* offset */ 523 u32 heapstart; /* offset */
536 SIS_IOTYPE1 *sisfb_heap_start; /* address */ 524 void __iomem *sisfb_heap_start; /* address */
537 SIS_IOTYPE1 *sisfb_heap_end; /* address */ 525 void __iomem *sisfb_heap_end; /* address */
538 u32 sisfb_heap_size; 526 u32 sisfb_heap_size;
539 int havenoheap; 527 int havenoheap;
540 528
@@ -612,7 +600,7 @@ struct sis_video_info {
612 u8 detectedpdca; 600 u8 detectedpdca;
613 u8 detectedlcda; 601 u8 detectedlcda;
614 602
615 SIS_IOTYPE1 *hwcursor_vbase; 603 void __iomem *hwcursor_vbase;
616 604
617 int chronteltype; 605 int chronteltype;
618 int tvxpos, tvypos; 606 int tvxpos, tvypos;
diff --git a/drivers/video/sis/sis_accel.c b/drivers/video/sis/sis_accel.c
index 7addf91d2fea..ceb434c95c0d 100644
--- a/drivers/video/sis/sis_accel.c
+++ b/drivers/video/sis/sis_accel.c
@@ -28,7 +28,6 @@
28 * for more information and updates) 28 * for more information and updates)
29 */ 29 */
30 30
31#include <linux/version.h>
32#include <linux/module.h> 31#include <linux/module.h>
33#include <linux/kernel.h> 32#include <linux/kernel.h>
34#include <linux/fb.h> 33#include <linux/fb.h>
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index b9343844cd1f..346d6458cf76 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -33,7 +33,6 @@
33 * 33 *
34 */ 34 */
35 35
36#include <linux/version.h>
37#include <linux/module.h> 36#include <linux/module.h>
38#include <linux/moduleparam.h> 37#include <linux/moduleparam.h>
39#include <linux/kernel.h> 38#include <linux/kernel.h>
@@ -41,13 +40,7 @@
41#include <linux/errno.h> 40#include <linux/errno.h>
42#include <linux/string.h> 41#include <linux/string.h>
43#include <linux/mm.h> 42#include <linux/mm.h>
44
45#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
46#include <linux/tty.h>
47#else
48#include <linux/screen_info.h> 43#include <linux/screen_info.h>
49#endif
50
51#include <linux/slab.h> 44#include <linux/slab.h>
52#include <linux/fb.h> 45#include <linux/fb.h>
53#include <linux/selection.h> 46#include <linux/selection.h>
@@ -1167,11 +1160,7 @@ sisfb_set_mode(struct sis_video_info *ivideo, int clrscrn)
1167 unsigned short modeno = ivideo->mode_no; 1160 unsigned short modeno = ivideo->mode_no;
1168 1161
1169 /* >=2.6.12's fbcon clears the screen anyway */ 1162 /* >=2.6.12's fbcon clears the screen anyway */
1170#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
1171 if(!clrscrn) modeno |= 0x80;
1172#else
1173 modeno |= 0x80; 1163 modeno |= 0x80;
1174#endif
1175 1164
1176 outSISIDXREG(SISSR, IND_SIS_PASSWORD, SIS_PASSWORD); 1165 outSISIDXREG(SISSR, IND_SIS_PASSWORD, SIS_PASSWORD);
1177 1166
@@ -1436,11 +1425,8 @@ sisfb_set_par(struct fb_info *info)
1436 if((err = sisfb_do_set_var(&info->var, 1, info))) 1425 if((err = sisfb_do_set_var(&info->var, 1, info)))
1437 return err; 1426 return err;
1438 1427
1439#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
1440 sisfb_get_fix(&info->fix, info->currcon, info);
1441#else
1442 sisfb_get_fix(&info->fix, -1, info); 1428 sisfb_get_fix(&info->fix, -1, info);
1443#endif 1429
1444 return 0; 1430 return 0;
1445} 1431}
1446 1432
@@ -1676,14 +1662,8 @@ sisfb_blank(int blank, struct fb_info *info)
1676 1662
1677/* ----------- FBDev related routines for all series ---------- */ 1663/* ----------- FBDev related routines for all series ---------- */
1678 1664
1679#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
1680static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, 1665static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
1681 unsigned long arg) 1666 unsigned long arg)
1682#else
1683static int sisfb_ioctl(struct inode *inode, struct file *file,
1684 unsigned int cmd, unsigned long arg,
1685 struct fb_info *info)
1686#endif
1687{ 1667{
1688 struct sis_video_info *ivideo = (struct sis_video_info *)info->par; 1668 struct sis_video_info *ivideo = (struct sis_video_info *)info->par;
1689 struct sis_memreq sismemreq; 1669 struct sis_memreq sismemreq;
@@ -3986,8 +3966,7 @@ sisfb_handle_command(struct sis_video_info *ivideo, struct sisfb_cmd *sisfb_comm
3986} 3966}
3987 3967
3988#ifndef MODULE 3968#ifndef MODULE
3989SISINITSTATIC int __init 3969static int __init sisfb_setup(char *options)
3990sisfb_setup(char *options)
3991{ 3970{
3992 char *this_opt; 3971 char *this_opt;
3993 3972
@@ -4086,9 +4065,9 @@ sisfb_setup(char *options)
4086#endif 4065#endif
4087 4066
4088static int __devinit 4067static int __devinit
4089sisfb_check_rom(SIS_IOTYPE1 *rom_base, struct sis_video_info *ivideo) 4068sisfb_check_rom(void __iomem *rom_base, struct sis_video_info *ivideo)
4090{ 4069{
4091 SIS_IOTYPE1 *rom; 4070 void __iomem *rom;
4092 int romptr; 4071 int romptr;
4093 4072
4094 if((readb(rom_base) != 0x55) || (readb(rom_base + 1) != 0xaa)) 4073 if((readb(rom_base) != 0x55) || (readb(rom_base + 1) != 0xaa))
@@ -4117,10 +4096,9 @@ static unsigned char * __devinit
4117sisfb_find_rom(struct pci_dev *pdev) 4096sisfb_find_rom(struct pci_dev *pdev)
4118{ 4097{
4119 struct sis_video_info *ivideo = pci_get_drvdata(pdev); 4098 struct sis_video_info *ivideo = pci_get_drvdata(pdev);
4120 SIS_IOTYPE1 *rom_base; 4099 void __iomem *rom_base;
4121 unsigned char *myrombase = NULL; 4100 unsigned char *myrombase = NULL;
4122 u32 temp; 4101 u32 temp;
4123#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
4124 size_t romsize; 4102 size_t romsize;
4125 4103
4126 /* First, try the official pci ROM functions (except 4104 /* First, try the official pci ROM functions (except
@@ -4151,7 +4129,6 @@ sisfb_find_rom(struct pci_dev *pdev)
4151 } 4129 }
4152 4130
4153 if(myrombase) return myrombase; 4131 if(myrombase) return myrombase;
4154#endif
4155 4132
4156 /* Otherwise do it the conventional way. */ 4133 /* Otherwise do it the conventional way. */
4157 4134
@@ -4225,7 +4202,7 @@ sisfb_post_map_vram(struct sis_video_info *ivideo, unsigned int *mapsize,
4225static int __devinit 4202static int __devinit
4226sisfb_post_300_buswidth(struct sis_video_info *ivideo) 4203sisfb_post_300_buswidth(struct sis_video_info *ivideo)
4227{ 4204{
4228 SIS_IOTYPE1 *FBAddress = ivideo->video_vbase; 4205 void __iomem *FBAddress = ivideo->video_vbase;
4229 unsigned short temp; 4206 unsigned short temp;
4230 unsigned char reg; 4207 unsigned char reg;
4231 int i, j; 4208 int i, j;
@@ -4273,7 +4250,7 @@ sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth
4273 int PseudoRankCapacity, int PseudoAdrPinCount, 4250 int PseudoRankCapacity, int PseudoAdrPinCount,
4274 unsigned int mapsize) 4251 unsigned int mapsize)
4275{ 4252{
4276 SIS_IOTYPE1 *FBAddr = ivideo->video_vbase; 4253 void __iomem *FBAddr = ivideo->video_vbase;
4277 unsigned short sr14; 4254 unsigned short sr14;
4278 unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid; 4255 unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
4279 unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage; 4256 unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
@@ -5829,7 +5806,7 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5829 ivideo->engineok = 0; 5806 ivideo->engineok = 0;
5830 5807
5831 ivideo->sisfb_was_boot_device = 0; 5808 ivideo->sisfb_was_boot_device = 0;
5832#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)) 5809
5833 if(pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) { 5810 if(pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) {
5834 if(ivideo->sisvga_enabled) 5811 if(ivideo->sisvga_enabled)
5835 ivideo->sisfb_was_boot_device = 1; 5812 ivideo->sisfb_was_boot_device = 1;
@@ -5840,7 +5817,6 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5840 "as the primary VGA device\n"); 5817 "as the primary VGA device\n");
5841 } 5818 }
5842 } 5819 }
5843#endif
5844 5820
5845 ivideo->sisfb_parm_mem = sisfb_parm_mem; 5821 ivideo->sisfb_parm_mem = sisfb_parm_mem;
5846 ivideo->sisfb_accel = sisfb_accel; 5822 ivideo->sisfb_accel = sisfb_accel;
@@ -6010,7 +5986,7 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6010 ivideo->modeprechange = reg & 0x7f; 5986 ivideo->modeprechange = reg & 0x7f;
6011 } else if(ivideo->sisvga_enabled) { 5987 } else if(ivideo->sisvga_enabled) {
6012#if defined(__i386__) || defined(__x86_64__) 5988#if defined(__i386__) || defined(__x86_64__)
6013 unsigned char SIS_IOTYPE2 *tt = ioremap(0x400, 0x100); 5989 unsigned char __iomem *tt = ioremap(0x400, 0x100);
6014 if(tt) { 5990 if(tt) {
6015 ivideo->modeprechange = readb(tt + 0x49); 5991 ivideo->modeprechange = readb(tt + 0x49);
6016 iounmap(tt); 5992 iounmap(tt);
@@ -6503,7 +6479,7 @@ static struct pci_driver sisfb_driver = {
6503 .remove = __devexit_p(sisfb_remove) 6479 .remove = __devexit_p(sisfb_remove)
6504}; 6480};
6505 6481
6506SISINITSTATIC int __init sisfb_init(void) 6482static int __init sisfb_init(void)
6507{ 6483{
6508#ifndef MODULE 6484#ifndef MODULE
6509 char *options = NULL; 6485 char *options = NULL;
diff --git a/drivers/video/sis/sis_main.h b/drivers/video/sis/sis_main.h
index 3e3b7fa05d6c..9540e977270e 100644
--- a/drivers/video/sis/sis_main.h
+++ b/drivers/video/sis/sis_main.h
@@ -665,11 +665,11 @@ static struct _customttable {
665 665
666/* Interface used by the world */ 666/* Interface used by the world */
667#ifndef MODULE 667#ifndef MODULE
668SISINITSTATIC int sisfb_setup(char *options); 668static int sisfb_setup(char *options);
669#endif 669#endif
670 670
671/* Interface to the low level console driver */ 671/* Interface to the low level console driver */
672SISINITSTATIC int sisfb_init(void); 672static int sisfb_init(void);
673 673
674/* fbdev routines */ 674/* fbdev routines */
675static int sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, 675static int sisfb_get_fix(struct fb_fix_screeninfo *fix, int con,
diff --git a/drivers/video/sis/vgatypes.h b/drivers/video/sis/vgatypes.h
index b532fbd2b04c..81a22eaabfde 100644
--- a/drivers/video/sis/vgatypes.h
+++ b/drivers/video/sis/vgatypes.h
@@ -53,10 +53,6 @@
53#ifndef _VGATYPES_H_ 53#ifndef _VGATYPES_H_
54#define _VGATYPES_H_ 54#define _VGATYPES_H_
55 55
56#ifdef SIS_LINUX_KERNEL
57#include <linux/version.h>
58#endif
59
60#define SISIOMEMTYPE 56#define SISIOMEMTYPE
61 57
62#ifdef SIS_LINUX_KERNEL 58#ifdef SIS_LINUX_KERNEL
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 62321458f71a..df5336561d13 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -675,13 +675,13 @@ static struct fb_ops xxxfb_ops = {
675 * Initialization 675 * Initialization
676 */ 676 */
677 677
678/* static int __init xxfb_probe (struct device *device) -- for platform devs */ 678/* static int __init xxfb_probe (struct platform_device *pdev) -- for platform devs */
679static int __devinit xxxfb_probe(struct pci_dev *dev, 679static int __devinit xxxfb_probe(struct pci_dev *dev,
680 const struct pci_device_id *ent) 680 const struct pci_device_id *ent)
681{ 681{
682 struct fb_info *info; 682 struct fb_info *info;
683 struct xxx_par *par; 683 struct xxx_par *par;
684 struct device* device = &dev->dev; /* for pci drivers */ 684 struct device *device = &dev->dev; /* or &pdev->dev */
685 int cmap_len, retval; 685 int cmap_len, retval;
686 686
687 /* 687 /*
@@ -824,18 +824,18 @@ static int __devinit xxxfb_probe(struct pci_dev *dev,
824 return -EINVAL; 824 return -EINVAL;
825 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, 825 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
826 info->fix.id); 826 info->fix.id);
827 pci_set_drvdata(dev, info); /* or dev_set_drvdata(device, info) */ 827 pci_set_drvdata(dev, info); /* or platform_set_drvdata(pdev, info) */
828 return 0; 828 return 0;
829} 829}
830 830
831 /* 831 /*
832 * Cleanup 832 * Cleanup
833 */ 833 */
834/* static void __devexit xxxfb_remove(struct device *device) */ 834/* static void __devexit xxxfb_remove(struct platform_device *pdev) */
835static void __devexit xxxfb_remove(struct pci_dev *dev) 835static void __devexit xxxfb_remove(struct pci_dev *dev)
836{ 836{
837 struct fb_info *info = pci_get_drvdata(dev); 837 struct fb_info *info = pci_get_drvdata(dev);
838 /* or dev_get_drvdata(device); */ 838 /* or platform_get_drvdata(pdev); */
839 839
840 if (info) { 840 if (info) {
841 unregister_framebuffer(info); 841 unregister_framebuffer(info);
@@ -961,18 +961,17 @@ static int xxxfb_resume(struct platform_dev *dev)
961#define xxxfb_resume NULL 961#define xxxfb_resume NULL
962#endif /* CONFIG_PM */ 962#endif /* CONFIG_PM */
963 963
964static struct device_driver xxxfb_driver = { 964static struct platform_device_driver xxxfb_driver = {
965 .name = "xxxfb",
966 .bus = &platform_bus_type,
967 .probe = xxxfb_probe, 965 .probe = xxxfb_probe,
968 .remove = xxxfb_remove, 966 .remove = xxxfb_remove,
969 .suspend = xxxfb_suspend, /* optional but recommended */ 967 .suspend = xxxfb_suspend, /* optional but recommended */
970 .resume = xxxfb_resume, /* optional but recommended */ 968 .resume = xxxfb_resume, /* optional but recommended */
969 .driver = {
970 .name = "xxxfb",
971 },
971}; 972};
972 973
973static struct platform_device xxxfb_device = { 974static struct platform_device *xxxfb_device;
974 .name = "xxxfb",
975};
976 975
977#ifndef MODULE 976#ifndef MODULE
978 /* 977 /*
@@ -1002,12 +1001,16 @@ static int __init xxxfb_init(void)
1002 return -ENODEV; 1001 return -ENODEV;
1003 xxxfb_setup(option); 1002 xxxfb_setup(option);
1004#endif 1003#endif
1005 ret = driver_register(&xxxfb_driver); 1004 ret = platform_driver_register(&xxxfb_driver);
1006 1005
1007 if (!ret) { 1006 if (!ret) {
1008 ret = platform_device_register(&xxxfb_device); 1007 xxxfb_device = platform_device_register_simple("xxxfb", 0,
1009 if (ret) 1008 NULL, 0);
1010 driver_unregister(&xxxfb_driver); 1009
1010 if (IS_ERR(xxxfb_device)) {
1011 platform_driver_unregister(&xxxfb_driver);
1012 ret = PTR_ERR(xxxfb_device);
1013 }
1011 } 1014 }
1012 1015
1013 return ret; 1016 return ret;
@@ -1015,8 +1018,8 @@ static int __init xxxfb_init(void)
1015 1018
1016static void __exit xxxfb_exit(void) 1019static void __exit xxxfb_exit(void)
1017{ 1020{
1018 platform_device_unregister(&xxxfb_device); 1021 platform_device_unregister(xxxfb_device);
1019 driver_unregister(&xxxfb_driver); 1022 platform_driver_unregister(&xxxfb_driver);
1020} 1023}
1021#endif /* CONFIG_PCI */ 1024#endif /* CONFIG_PCI */
1022 1025
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 15d4a768b1f6..f94ae84a58cd 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -48,10 +48,15 @@ enum sm501_controller {
48 HEAD_PANEL = 1, 48 HEAD_PANEL = 1,
49}; 49};
50 50
51/* SM501 memory address */ 51/* SM501 memory address.
52 *
53 * This structure is used to track memory usage within the SM501 framebuffer
54 * allocation. The sm_addr field is stored as an offset as it is often used
55 * against both the physical and mapped addresses.
56 */
52struct sm501_mem { 57struct sm501_mem {
53 unsigned long size; 58 unsigned long size;
54 unsigned long sm_addr; 59 unsigned long sm_addr; /* offset from base of sm501 fb. */
55 void __iomem *k_addr; 60 void __iomem *k_addr;
56}; 61};
57 62
@@ -142,31 +147,68 @@ static inline void sm501fb_sync_regs(struct sm501fb_info *info)
142static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem, 147static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
143 unsigned int why, size_t size) 148 unsigned int why, size_t size)
144{ 149{
145 unsigned int ptr = 0; 150 struct sm501fb_par *par;
151 struct fb_info *fbi;
152 unsigned int ptr;
153 unsigned int end;
146 154
147 switch (why) { 155 switch (why) {
148 case SM501_MEMF_CURSOR: 156 case SM501_MEMF_CURSOR:
149 ptr = inf->fbmem_len - size; 157 ptr = inf->fbmem_len - size;
150 inf->fbmem_len = ptr; 158 inf->fbmem_len = ptr; /* adjust available memory. */
151 break; 159 break;
152 160
153 case SM501_MEMF_PANEL: 161 case SM501_MEMF_PANEL:
154 ptr = inf->fbmem_len - size; 162 ptr = inf->fbmem_len - size;
155 if (ptr < inf->fb[0]->fix.smem_len) 163 fbi = inf->fb[HEAD_CRT];
164
165 /* round down, some programs such as directfb do not draw
166 * 0,0 correctly unless the start is aligned to a page start.
167 */
168
169 if (ptr > 0)
170 ptr &= ~(PAGE_SIZE - 1);
171
172 if (fbi && ptr < fbi->fix.smem_len)
173 return -ENOMEM;
174
175 if (ptr < 0)
156 return -ENOMEM; 176 return -ENOMEM;
157 177
158 break; 178 break;
159 179
160 case SM501_MEMF_CRT: 180 case SM501_MEMF_CRT:
161 ptr = 0; 181 ptr = 0;
182
183 /* check to see if we have panel memory allocated
184 * which would put an limit on available memory. */
185
186 fbi = inf->fb[HEAD_PANEL];
187 if (fbi) {
188 par = fbi->par;
189 end = par->screen.k_addr ? par->screen.sm_addr : inf->fbmem_len;
190 } else
191 end = inf->fbmem_len;
192
193 if ((ptr + size) > end)
194 return -ENOMEM;
195
162 break; 196 break;
163 197
164 case SM501_MEMF_ACCEL: 198 case SM501_MEMF_ACCEL:
165 ptr = inf->fb[0]->fix.smem_len; 199 fbi = inf->fb[HEAD_CRT];
200 ptr = fbi ? fbi->fix.smem_len : 0;
201
202 fbi = inf->fb[HEAD_PANEL];
203 if (fbi) {
204 par = fbi->par;
205 end = par->screen.sm_addr;
206 } else
207 end = inf->fbmem_len;
166 208
167 if ((ptr + size) > 209 if ((ptr + size) > end)
168 (inf->fb[1]->fix.smem_start - inf->fbmem_res->start))
169 return -ENOMEM; 210 return -ENOMEM;
211
170 break; 212 break;
171 213
172 default: 214 default:
@@ -663,15 +705,25 @@ static void sm501fb_panel_power(struct sm501fb_info *fbi, int to)
663 sm501fb_sync_regs(fbi); 705 sm501fb_sync_regs(fbi);
664 mdelay(10); 706 mdelay(10);
665 707
708 /* VBIASEN */
709
666 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_VBIASEN)) { 710 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_VBIASEN)) {
667 control |= SM501_DC_PANEL_CONTROL_BIAS; /* VBIASEN */ 711 if (pd->flags & SM501FB_FLAG_PANEL_INV_VBIASEN)
712 control &= ~SM501_DC_PANEL_CONTROL_BIAS;
713 else
714 control |= SM501_DC_PANEL_CONTROL_BIAS;
715
668 writel(control, ctrl_reg); 716 writel(control, ctrl_reg);
669 sm501fb_sync_regs(fbi); 717 sm501fb_sync_regs(fbi);
670 mdelay(10); 718 mdelay(10);
671 } 719 }
672 720
673 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_FPEN)) { 721 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_FPEN)) {
674 control |= SM501_DC_PANEL_CONTROL_FPEN; 722 if (pd->flags & SM501FB_FLAG_PANEL_INV_FPEN)
723 control &= ~SM501_DC_PANEL_CONTROL_FPEN;
724 else
725 control |= SM501_DC_PANEL_CONTROL_FPEN;
726
675 writel(control, ctrl_reg); 727 writel(control, ctrl_reg);
676 sm501fb_sync_regs(fbi); 728 sm501fb_sync_regs(fbi);
677 mdelay(10); 729 mdelay(10);
@@ -679,14 +731,22 @@ static void sm501fb_panel_power(struct sm501fb_info *fbi, int to)
679 } else if (!to && (control & SM501_DC_PANEL_CONTROL_VDD) != 0) { 731 } else if (!to && (control & SM501_DC_PANEL_CONTROL_VDD) != 0) {
680 /* disable panel power */ 732 /* disable panel power */
681 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_FPEN)) { 733 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_FPEN)) {
682 control &= ~SM501_DC_PANEL_CONTROL_FPEN; 734 if (pd->flags & SM501FB_FLAG_PANEL_INV_FPEN)
735 control |= SM501_DC_PANEL_CONTROL_FPEN;
736 else
737 control &= ~SM501_DC_PANEL_CONTROL_FPEN;
738
683 writel(control, ctrl_reg); 739 writel(control, ctrl_reg);
684 sm501fb_sync_regs(fbi); 740 sm501fb_sync_regs(fbi);
685 mdelay(10); 741 mdelay(10);
686 } 742 }
687 743
688 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_VBIASEN)) { 744 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_VBIASEN)) {
689 control &= ~SM501_DC_PANEL_CONTROL_BIAS; 745 if (pd->flags & SM501FB_FLAG_PANEL_INV_VBIASEN)
746 control |= SM501_DC_PANEL_CONTROL_BIAS;
747 else
748 control &= ~SM501_DC_PANEL_CONTROL_BIAS;
749
690 writel(control, ctrl_reg); 750 writel(control, ctrl_reg);
691 sm501fb_sync_regs(fbi); 751 sm501fb_sync_regs(fbi);
692 mdelay(10); 752 mdelay(10);
@@ -1210,39 +1270,6 @@ static struct fb_ops sm501fb_ops_pnl = {
1210 .fb_imageblit = cfb_imageblit, 1270 .fb_imageblit = cfb_imageblit,
1211}; 1271};
1212 1272
1213/* sm501fb_info_alloc
1214 *
1215 * creates and initialises an sm501fb_info structure
1216*/
1217
1218static struct sm501fb_info *sm501fb_info_alloc(struct fb_info *fbinfo_crt,
1219 struct fb_info *fbinfo_pnl)
1220{
1221 struct sm501fb_info *info;
1222 struct sm501fb_par *par;
1223
1224 info = kzalloc(sizeof(struct sm501fb_info), GFP_KERNEL);
1225 if (info) {
1226 /* set the references back */
1227
1228 par = fbinfo_crt->par;
1229 par->info = info;
1230 par->head = HEAD_CRT;
1231 fbinfo_crt->pseudo_palette = &par->pseudo_palette;
1232
1233 par = fbinfo_pnl->par;
1234 par->info = info;
1235 par->head = HEAD_PANEL;
1236 fbinfo_pnl->pseudo_palette = &par->pseudo_palette;
1237
1238 /* store the two fbs into our info */
1239 info->fb[HEAD_CRT] = fbinfo_crt;
1240 info->fb[HEAD_PANEL] = fbinfo_pnl;
1241 }
1242
1243 return info;
1244}
1245
1246/* sm501_init_cursor 1273/* sm501_init_cursor
1247 * 1274 *
1248 * initialise hw cursor parameters 1275 * initialise hw cursor parameters
@@ -1250,10 +1277,16 @@ static struct sm501fb_info *sm501fb_info_alloc(struct fb_info *fbinfo_crt,
1250 1277
1251static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base) 1278static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
1252{ 1279{
1253 struct sm501fb_par *par = fbi->par; 1280 struct sm501fb_par *par;
1254 struct sm501fb_info *info = par->info; 1281 struct sm501fb_info *info;
1255 int ret; 1282 int ret;
1256 1283
1284 if (fbi == NULL)
1285 return 0;
1286
1287 par = fbi->par;
1288 info = par->info;
1289
1257 par->cursor_regs = info->regs + reg_base; 1290 par->cursor_regs = info->regs + reg_base;
1258 1291
1259 ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024); 1292 ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024);
@@ -1281,13 +1314,10 @@ static int sm501fb_start(struct sm501fb_info *info,
1281 struct platform_device *pdev) 1314 struct platform_device *pdev)
1282{ 1315{
1283 struct resource *res; 1316 struct resource *res;
1284 struct device *dev; 1317 struct device *dev = &pdev->dev;
1285 int k; 1318 int k;
1286 int ret; 1319 int ret;
1287 1320
1288 info->dev = dev = &pdev->dev;
1289 platform_set_drvdata(pdev, info);
1290
1291 info->irq = ret = platform_get_irq(pdev, 0); 1321 info->irq = ret = platform_get_irq(pdev, 0);
1292 if (ret < 0) { 1322 if (ret < 0) {
1293 /* we currently do not use the IRQ */ 1323 /* we currently do not use the IRQ */
@@ -1390,11 +1420,6 @@ static void sm501fb_stop(struct sm501fb_info *info)
1390 kfree(info->regs_res); 1420 kfree(info->regs_res);
1391} 1421}
1392 1422
1393static void sm501fb_info_release(struct sm501fb_info *info)
1394{
1395 kfree(info);
1396}
1397
1398static int sm501fb_init_fb(struct fb_info *fb, 1423static int sm501fb_init_fb(struct fb_info *fb,
1399 enum sm501_controller head, 1424 enum sm501_controller head,
1400 const char *fbname) 1425 const char *fbname)
@@ -1539,36 +1564,93 @@ static struct sm501_platdata_fb sm501fb_def_pdata = {
1539static char driver_name_crt[] = "sm501fb-crt"; 1564static char driver_name_crt[] = "sm501fb-crt";
1540static char driver_name_pnl[] = "sm501fb-panel"; 1565static char driver_name_pnl[] = "sm501fb-panel";
1541 1566
1542static int __init sm501fb_probe(struct platform_device *pdev) 1567static int __devinit sm501fb_probe_one(struct sm501fb_info *info,
1568 enum sm501_controller head)
1543{ 1569{
1544 struct sm501fb_info *info; 1570 unsigned char *name = (head == HEAD_CRT) ? "crt" : "panel";
1545 struct device *dev = &pdev->dev; 1571 struct sm501_platdata_fbsub *pd;
1546 struct fb_info *fbinfo_crt; 1572 struct sm501fb_par *par;
1547 struct fb_info *fbinfo_pnl; 1573 struct fb_info *fbi;
1548 int ret;
1549 1574
1550 /* allocate our framebuffers */ 1575 pd = (head == HEAD_CRT) ? info->pdata->fb_crt : info->pdata->fb_pnl;
1576
1577 /* Do not initialise if we've not been given any platform data */
1578 if (pd == NULL) {
1579 dev_info(info->dev, "no data for fb %s (disabled)\n", name);
1580 return 0;
1581 }
1551 1582
1552 fbinfo_crt = framebuffer_alloc(sizeof(struct sm501fb_par), dev); 1583 fbi = framebuffer_alloc(sizeof(struct sm501fb_par), info->dev);
1553 if (fbinfo_crt == NULL) { 1584 if (fbi == NULL) {
1554 dev_err(dev, "cannot allocate crt framebuffer\n"); 1585 dev_err(info->dev, "cannot allocate %s framebuffer\n", name);
1555 return -ENOMEM; 1586 return -ENOMEM;
1556 } 1587 }
1557 1588
1558 fbinfo_pnl = framebuffer_alloc(sizeof(struct sm501fb_par), dev); 1589 par = fbi->par;
1559 if (fbinfo_pnl == NULL) { 1590 par->info = info;
1560 dev_err(dev, "cannot allocate panel framebuffer\n"); 1591 par->head = head;
1561 ret = -ENOMEM; 1592 fbi->pseudo_palette = &par->pseudo_palette;
1562 goto fbinfo_crt_alloc_fail; 1593
1594 info->fb[head] = fbi;
1595
1596 return 0;
1597}
1598
1599/* Free up anything allocated by sm501fb_init_fb */
1600
1601static void sm501_free_init_fb(struct sm501fb_info *info,
1602 enum sm501_controller head)
1603{
1604 struct fb_info *fbi = info->fb[head];
1605
1606 fb_dealloc_cmap(&fbi->cmap);
1607}
1608
1609static int __devinit sm501fb_start_one(struct sm501fb_info *info,
1610 enum sm501_controller head,
1611 const char *drvname)
1612{
1613 struct fb_info *fbi = info->fb[head];
1614 int ret;
1615
1616 if (!fbi)
1617 return 0;
1618
1619 ret = sm501fb_init_fb(info->fb[head], head, drvname);
1620 if (ret) {
1621 dev_err(info->dev, "cannot initialise fb %s\n", drvname);
1622 return ret;
1623 }
1624
1625 ret = register_framebuffer(info->fb[head]);
1626 if (ret) {
1627 dev_err(info->dev, "failed to register fb %s\n", drvname);
1628 sm501_free_init_fb(info, head);
1629 return ret;
1563 } 1630 }
1564 1631
1565 info = sm501fb_info_alloc(fbinfo_crt, fbinfo_pnl); 1632 dev_info(info->dev, "fb%d: %s frame buffer\n", fbi->node, fbi->fix.id);
1566 if (info == NULL) { 1633
1567 dev_err(dev, "cannot allocate par\n"); 1634 return 0;
1568 ret = -ENOMEM; 1635}
1569 goto sm501fb_alloc_fail; 1636
1637static int __devinit sm501fb_probe(struct platform_device *pdev)
1638{
1639 struct sm501fb_info *info;
1640 struct device *dev = &pdev->dev;
1641 int ret;
1642
1643 /* allocate our framebuffers */
1644
1645 info = kzalloc(sizeof(struct sm501fb_info), GFP_KERNEL);
1646 if (!info) {
1647 dev_err(dev, "failed to allocate state\n");
1648 return -ENOMEM;
1570 } 1649 }
1571 1650
1651 info->dev = dev = &pdev->dev;
1652 platform_set_drvdata(pdev, info);
1653
1572 if (dev->parent->platform_data) { 1654 if (dev->parent->platform_data) {
1573 struct sm501_platdata *pd = dev->parent->platform_data; 1655 struct sm501_platdata *pd = dev->parent->platform_data;
1574 info->pdata = pd->fb; 1656 info->pdata = pd->fb;
@@ -1579,90 +1661,88 @@ static int __init sm501fb_probe(struct platform_device *pdev)
1579 info->pdata = &sm501fb_def_pdata; 1661 info->pdata = &sm501fb_def_pdata;
1580 } 1662 }
1581 1663
1582 /* start the framebuffers */ 1664 /* probe for the presence of each panel */
1583 1665
1584 ret = sm501fb_start(info, pdev); 1666 ret = sm501fb_probe_one(info, HEAD_CRT);
1585 if (ret) { 1667 if (ret < 0) {
1586 dev_err(dev, "cannot initialise SM501\n"); 1668 dev_err(dev, "failed to probe CRT\n");
1587 goto sm501fb_start_fail; 1669 goto err_alloc;
1588 } 1670 }
1589 1671
1590 /* CRT framebuffer setup */ 1672 ret = sm501fb_probe_one(info, HEAD_PANEL);
1673 if (ret < 0) {
1674 dev_err(dev, "failed to probe PANEL\n");
1675 goto err_probed_crt;
1676 }
1591 1677
1592 ret = sm501fb_init_fb(fbinfo_crt, HEAD_CRT, driver_name_crt); 1678 if (info->fb[HEAD_PANEL] == NULL &&
1593 if (ret) { 1679 info->fb[HEAD_CRT] == NULL) {
1594 dev_err(dev, "cannot initialise CRT fb\n"); 1680 dev_err(dev, "no framebuffers found\n");
1595 goto sm501fb_start_fail; 1681 goto err_alloc;
1596 } 1682 }
1597 1683
1598 /* Panel framebuffer setup */ 1684 /* get the resources for both of the framebuffers */
1599 1685
1600 ret = sm501fb_init_fb(fbinfo_pnl, HEAD_PANEL, driver_name_pnl); 1686 ret = sm501fb_start(info, pdev);
1601 if (ret) { 1687 if (ret) {
1602 dev_err(dev, "cannot initialise Panel fb\n"); 1688 dev_err(dev, "cannot initialise SM501\n");
1603 goto sm501fb_start_fail; 1689 goto err_probed_panel;
1604 } 1690 }
1605 1691
1606 /* register framebuffers */ 1692 ret = sm501fb_start_one(info, HEAD_CRT, driver_name_crt);
1607 1693 if (ret) {
1608 ret = register_framebuffer(fbinfo_crt); 1694 dev_err(dev, "failed to start CRT\n");
1609 if (ret < 0) { 1695 goto err_started;
1610 dev_err(dev, "failed to register CRT fb (%d)\n", ret);
1611 goto register_crt_fail;
1612 } 1696 }
1613 1697
1614 ret = register_framebuffer(fbinfo_pnl); 1698 ret = sm501fb_start_one(info, HEAD_PANEL, driver_name_pnl);
1615 if (ret < 0) { 1699 if (ret) {
1616 dev_err(dev, "failed to register panel fb (%d)\n", ret); 1700 dev_err(dev, "failed to start Panel\n");
1617 goto register_pnl_fail; 1701 goto err_started_crt;
1618 } 1702 }
1619 1703
1620 dev_info(dev, "fb%d: %s frame buffer device\n",
1621 fbinfo_crt->node, fbinfo_crt->fix.id);
1622
1623 dev_info(dev, "fb%d: %s frame buffer device\n",
1624 fbinfo_pnl->node, fbinfo_pnl->fix.id);
1625
1626 /* create device files */ 1704 /* create device files */
1627 1705
1628 ret = device_create_file(dev, &dev_attr_crt_src); 1706 ret = device_create_file(dev, &dev_attr_crt_src);
1629 if (ret) 1707 if (ret)
1630 goto crtsrc_fail; 1708 goto err_started_panel;
1631 1709
1632 ret = device_create_file(dev, &dev_attr_fbregs_pnl); 1710 ret = device_create_file(dev, &dev_attr_fbregs_pnl);
1633 if (ret) 1711 if (ret)
1634 goto fbregs_pnl_fail; 1712 goto err_attached_crtsrc_file;
1635 1713
1636 ret = device_create_file(dev, &dev_attr_fbregs_crt); 1714 ret = device_create_file(dev, &dev_attr_fbregs_crt);
1637 if (ret) 1715 if (ret)
1638 goto fbregs_crt_fail; 1716 goto err_attached_pnlregs_file;
1639 1717
1640 /* we registered, return ok */ 1718 /* we registered, return ok */
1641 return 0; 1719 return 0;
1642 1720
1643 fbregs_crt_fail: 1721err_attached_pnlregs_file:
1644 device_remove_file(dev, &dev_attr_fbregs_pnl); 1722 device_remove_file(dev, &dev_attr_fbregs_pnl);
1645 1723
1646 fbregs_pnl_fail: 1724err_attached_crtsrc_file:
1647 device_remove_file(dev, &dev_attr_crt_src); 1725 device_remove_file(dev, &dev_attr_crt_src);
1648 1726
1649 crtsrc_fail: 1727err_started_panel:
1650 unregister_framebuffer(fbinfo_pnl); 1728 unregister_framebuffer(info->fb[HEAD_PANEL]);
1729 sm501_free_init_fb(info, HEAD_PANEL);
1651 1730
1652 register_pnl_fail: 1731err_started_crt:
1653 unregister_framebuffer(fbinfo_crt); 1732 unregister_framebuffer(info->fb[HEAD_CRT]);
1733 sm501_free_init_fb(info, HEAD_CRT);
1654 1734
1655 register_crt_fail: 1735err_started:
1656 sm501fb_stop(info); 1736 sm501fb_stop(info);
1657 1737
1658 sm501fb_start_fail: 1738err_probed_panel:
1659 sm501fb_info_release(info); 1739 framebuffer_release(info->fb[HEAD_PANEL]);
1660 1740
1661 sm501fb_alloc_fail: 1741err_probed_crt:
1662 framebuffer_release(fbinfo_pnl); 1742 framebuffer_release(info->fb[HEAD_CRT]);
1663 1743
1664 fbinfo_crt_alloc_fail: 1744err_alloc:
1665 framebuffer_release(fbinfo_crt); 1745 kfree(info);
1666 1746
1667 return ret; 1747 return ret;
1668} 1748}
@@ -1681,11 +1761,14 @@ static int sm501fb_remove(struct platform_device *pdev)
1681 device_remove_file(&pdev->dev, &dev_attr_fbregs_pnl); 1761 device_remove_file(&pdev->dev, &dev_attr_fbregs_pnl);
1682 device_remove_file(&pdev->dev, &dev_attr_crt_src); 1762 device_remove_file(&pdev->dev, &dev_attr_crt_src);
1683 1763
1764 sm501_free_init_fb(info, HEAD_CRT);
1765 sm501_free_init_fb(info, HEAD_PANEL);
1766
1684 unregister_framebuffer(fbinfo_crt); 1767 unregister_framebuffer(fbinfo_crt);
1685 unregister_framebuffer(fbinfo_pnl); 1768 unregister_framebuffer(fbinfo_pnl);
1686 1769
1687 sm501fb_stop(info); 1770 sm501fb_stop(info);
1688 sm501fb_info_release(info); 1771 kfree(info);
1689 1772
1690 framebuffer_release(fbinfo_pnl); 1773 framebuffer_release(fbinfo_pnl);
1691 framebuffer_release(fbinfo_crt); 1774 framebuffer_release(fbinfo_crt);
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index ea9f19d25597..77aafcfae037 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -836,16 +836,12 @@ static int tdfxfb_pan_display(struct fb_var_screeninfo *var,
836 struct tdfx_par *par = info->par; 836 struct tdfx_par *par = info->par;
837 u32 addr = var->yoffset * info->fix.line_length; 837 u32 addr = var->yoffset * info->fix.line_length;
838 838
839 if (nopan || var->xoffset || (var->yoffset > var->yres_virtual)) 839 if (nopan || var->xoffset)
840 return -EINVAL;
841 if ((var->yoffset + var->yres > var->yres_virtual && nowrap))
842 return -EINVAL; 840 return -EINVAL;
843 841
844 banshee_make_room(par, 1); 842 banshee_make_room(par, 1);
845 tdfx_outl(par, VIDDESKSTART, addr); 843 tdfx_outl(par, VIDDESKSTART, addr);
846 844
847 info->var.xoffset = var->xoffset;
848 info->var.yoffset = var->yoffset;
849 return 0; 845 return 0;
850} 846}
851 847
@@ -1426,6 +1422,8 @@ MODULE_LICENSE("GPL");
1426module_param(hwcursor, int, 0644); 1422module_param(hwcursor, int, 0644);
1427MODULE_PARM_DESC(hwcursor, "Enable hardware cursor " 1423MODULE_PARM_DESC(hwcursor, "Enable hardware cursor "
1428 "(1=enable, 0=disable, default=1)"); 1424 "(1=enable, 0=disable, default=1)");
1425module_param(mode_option, charp, 0);
1426MODULE_PARM_DESC(mode_option, "Initial video mode e.g. '648x480-8@60'");
1429#ifdef CONFIG_MTRR 1427#ifdef CONFIG_MTRR
1430module_param(nomtrr, bool, 0); 1428module_param(nomtrr, bool, 0);
1431MODULE_PARM_DESC(nomtrr, "Disable MTRR support (default: enabled)"); 1429MODULE_PARM_DESC(nomtrr, "Disable MTRR support (default: enabled)");
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index beefab2992c0..479b2e79ad68 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Frame buffer driver for Trident Blade and Image series 2 * Frame buffer driver for Trident TGUI, Blade and Image series
3 * 3 *
4 * Copyright 2001, 2002 - Jani Monoses <jani@iv.ro> 4 * Copyright 2001, 2002 - Jani Monoses <jani@iv.ro>
5 * 5 *
@@ -13,7 +13,6 @@
13 * code, suggestions 13 * code, suggestions
14 * TODO: 14 * TODO:
15 * timing value tweaking so it looks good on every monitor in every mode 15 * timing value tweaking so it looks good on every monitor in every mode
16 * TGUI acceleration
17 */ 16 */
18 17
19#include <linux/module.h> 18#include <linux/module.h>
@@ -22,25 +21,26 @@
22#include <linux/pci.h> 21#include <linux/pci.h>
23 22
24#include <linux/delay.h> 23#include <linux/delay.h>
24#include <video/vga.h>
25#include <video/trident.h> 25#include <video/trident.h>
26 26
27#define VERSION "0.7.8-NEWAPI"
28
29struct tridentfb_par { 27struct tridentfb_par {
30 void __iomem *io_virt; /* iospace virtual memory address */ 28 void __iomem *io_virt; /* iospace virtual memory address */
29 u32 pseudo_pal[16];
30 int chip_id;
31 int flatpanel;
32 void (*init_accel) (struct tridentfb_par *, int, int);
33 void (*wait_engine) (struct tridentfb_par *);
34 void (*fill_rect)
35 (struct tridentfb_par *par, u32, u32, u32, u32, u32, u32);
36 void (*copy_rect)
37 (struct tridentfb_par *par, u32, u32, u32, u32, u32, u32);
38 void (*image_blit)
39 (struct tridentfb_par *par, const char*,
40 u32, u32, u32, u32, u32, u32);
41 unsigned char eng_oper; /* engine operation... */
31}; 42};
32 43
33static unsigned char eng_oper; /* engine operation... */
34static struct fb_ops tridentfb_ops;
35
36static struct tridentfb_par default_par;
37
38/* FIXME:kmalloc these 3 instead */
39static struct fb_info fb_info;
40static u32 pseudo_pal[16];
41
42static struct fb_var_screeninfo default_var;
43
44static struct fb_fix_screeninfo tridentfb_fix = { 44static struct fb_fix_screeninfo tridentfb_fix = {
45 .id = "Trident", 45 .id = "Trident",
46 .type = FB_TYPE_PACKED_PIXELS, 46 .type = FB_TYPE_PACKED_PIXELS,
@@ -49,27 +49,22 @@ static struct fb_fix_screeninfo tridentfb_fix = {
49 .accel = FB_ACCEL_NONE, 49 .accel = FB_ACCEL_NONE,
50}; 50};
51 51
52static int chip_id;
53
54static int defaultaccel;
55static int displaytype;
56
57/* defaults which are normally overriden by user values */ 52/* defaults which are normally overriden by user values */
58 53
59/* video mode */ 54/* video mode */
60static char *mode_option __devinitdata = "640x480"; 55static char *mode_option __devinitdata = "640x480-8@60";
61static int bpp = 8; 56static int bpp __devinitdata = 8;
62 57
63static int noaccel; 58static int noaccel __devinitdata;
64 59
65static int center; 60static int center;
66static int stretch; 61static int stretch;
67 62
68static int fp; 63static int fp __devinitdata;
69static int crt; 64static int crt __devinitdata;
70 65
71static int memsize; 66static int memsize __devinitdata;
72static int memdiff; 67static int memdiff __devinitdata;
73static int nativex; 68static int nativex;
74 69
75module_param(mode_option, charp, 0); 70module_param(mode_option, charp, 0);
@@ -84,25 +79,53 @@ module_param(memsize, int, 0);
84module_param(memdiff, int, 0); 79module_param(memdiff, int, 0);
85module_param(nativex, int, 0); 80module_param(nativex, int, 0);
86module_param(fp, int, 0); 81module_param(fp, int, 0);
82MODULE_PARM_DESC(fp, "Define if flatpanel is connected");
87module_param(crt, int, 0); 83module_param(crt, int, 0);
84MODULE_PARM_DESC(crt, "Define if CRT is connected");
85
86static inline int is_oldclock(int id)
87{
88 return (id == TGUI9440) ||
89 (id == TGUI9660) ||
90 (id == CYBER9320);
91}
92
93static inline int is_oldprotect(int id)
94{
95 return is_oldclock(id) ||
96 (id == PROVIDIA9685) ||
97 (id == CYBER9382) ||
98 (id == CYBER9385);
99}
100
101static inline int is_blade(int id)
102{
103 return (id == BLADE3D) ||
104 (id == CYBERBLADEE4) ||
105 (id == CYBERBLADEi7) ||
106 (id == CYBERBLADEi7D) ||
107 (id == CYBERBLADEi1) ||
108 (id == CYBERBLADEi1D) ||
109 (id == CYBERBLADEAi1) ||
110 (id == CYBERBLADEAi1D);
111}
88 112
89static int chip3D; 113static inline int is_xp(int id)
90static int chipcyber; 114{
115 return (id == CYBERBLADEXPAi1) ||
116 (id == CYBERBLADEXPm8) ||
117 (id == CYBERBLADEXPm16);
118}
91 119
92static int is3Dchip(int id) 120static inline int is3Dchip(int id)
93{ 121{
94 return ((id == BLADE3D) || (id == CYBERBLADEE4) || 122 return is_blade(id) || is_xp(id) ||
95 (id == CYBERBLADEi7) || (id == CYBERBLADEi7D) ||
96 (id == CYBER9397) || (id == CYBER9397DVD) || 123 (id == CYBER9397) || (id == CYBER9397DVD) ||
97 (id == CYBER9520) || (id == CYBER9525DVD) || 124 (id == CYBER9520) || (id == CYBER9525DVD) ||
98 (id == IMAGE975) || (id == IMAGE985) || 125 (id == IMAGE975) || (id == IMAGE985);
99 (id == CYBERBLADEi1) || (id == CYBERBLADEi1D) ||
100 (id == CYBERBLADEAi1) || (id == CYBERBLADEAi1D) ||
101 (id == CYBERBLADEXPm8) || (id == CYBERBLADEXPm16) ||
102 (id == CYBERBLADEXPAi1));
103} 126}
104 127
105static int iscyber(int id) 128static inline int iscyber(int id)
106{ 129{
107 switch (id) { 130 switch (id) {
108 case CYBER9388: 131 case CYBER9388:
@@ -122,12 +145,7 @@ static int iscyber(int id)
122 return 1; 145 return 1;
123 146
124 case CYBER9320: 147 case CYBER9320:
125 case TGUI9660:
126 case IMAGE975:
127 case IMAGE985:
128 case BLADE3D:
129 case CYBERBLADEi7: /* VIA MPV4 integrated version */ 148 case CYBERBLADEi7: /* VIA MPV4 integrated version */
130
131 default: 149 default:
132 /* case CYBERBLDAEXPm8: Strange */ 150 /* case CYBERBLDAEXPm8: Strange */
133 /* case CYBERBLDAEXPm16: Strange */ 151 /* case CYBERBLDAEXPm16: Strange */
@@ -135,147 +153,110 @@ static int iscyber(int id)
135 } 153 }
136} 154}
137 155
138#define CRT 0x3D0 /* CRTC registers offset for color display */ 156static inline void t_outb(struct tridentfb_par *p, u8 val, u16 reg)
139 157{
140#ifndef TRIDENT_MMIO 158 fb_writeb(val, p->io_virt + reg);
141 #define TRIDENT_MMIO 1 159}
142#endif
143
144#if TRIDENT_MMIO
145 #define t_outb(val, reg) writeb(val,((struct tridentfb_par *)(fb_info.par))->io_virt + reg)
146 #define t_inb(reg) readb(((struct tridentfb_par*)(fb_info.par))->io_virt + reg)
147#else
148 #define t_outb(val, reg) outb(val, reg)
149 #define t_inb(reg) inb(reg)
150#endif
151 160
161static inline u8 t_inb(struct tridentfb_par *p, u16 reg)
162{
163 return fb_readb(p->io_virt + reg);
164}
152 165
153static struct accel_switch { 166static inline void writemmr(struct tridentfb_par *par, u16 r, u32 v)
154 void (*init_accel) (int, int); 167{
155 void (*wait_engine) (void); 168 fb_writel(v, par->io_virt + r);
156 void (*fill_rect) (u32, u32, u32, u32, u32, u32); 169}
157 void (*copy_rect) (u32, u32, u32, u32, u32, u32);
158} *acc;
159 170
160#define writemmr(r, v) writel(v, ((struct tridentfb_par *)fb_info.par)->io_virt + r) 171static inline u32 readmmr(struct tridentfb_par *par, u16 r)
161#define readmmr(r) readl(((struct tridentfb_par *)fb_info.par)->io_virt + r) 172{
173 return fb_readl(par->io_virt + r);
174}
162 175
163/* 176/*
164 * Blade specific acceleration. 177 * Blade specific acceleration.
165 */ 178 */
166 179
167#define point(x, y) ((y) << 16 | (x)) 180#define point(x, y) ((y) << 16 | (x))
168#define STA 0x2120 181
169#define CMD 0x2144 182static void blade_init_accel(struct tridentfb_par *par, int pitch, int bpp)
170#define ROP 0x2148
171#define CLR 0x2160
172#define SR1 0x2100
173#define SR2 0x2104
174#define DR1 0x2108
175#define DR2 0x210C
176
177#define ROP_S 0xCC
178
179static void blade_init_accel(int pitch, int bpp)
180{ 183{
181 int v1 = (pitch >> 3) << 20; 184 int v1 = (pitch >> 3) << 20;
182 int tmp = 0, v2; 185 int tmp = bpp == 24 ? 2 : (bpp >> 4);
183 switch (bpp) { 186 int v2 = v1 | (tmp << 29);
184 case 8: 187
185 tmp = 0; 188 writemmr(par, 0x21C0, v2);
186 break; 189 writemmr(par, 0x21C4, v2);
187 case 15: 190 writemmr(par, 0x21B8, v2);
188 tmp = 5; 191 writemmr(par, 0x21BC, v2);
189 break; 192 writemmr(par, 0x21D0, v1);
190 case 16: 193 writemmr(par, 0x21D4, v1);
191 tmp = 1; 194 writemmr(par, 0x21C8, v1);
192 break; 195 writemmr(par, 0x21CC, v1);
193 case 24: 196 writemmr(par, 0x216C, 0);
194 case 32:
195 tmp = 2;
196 break;
197 }
198 v2 = v1 | (tmp << 29);
199 writemmr(0x21C0, v2);
200 writemmr(0x21C4, v2);
201 writemmr(0x21B8, v2);
202 writemmr(0x21BC, v2);
203 writemmr(0x21D0, v1);
204 writemmr(0x21D4, v1);
205 writemmr(0x21C8, v1);
206 writemmr(0x21CC, v1);
207 writemmr(0x216C, 0);
208} 197}
209 198
210static void blade_wait_engine(void) 199static void blade_wait_engine(struct tridentfb_par *par)
211{ 200{
212 while (readmmr(STA) & 0xFA800000) ; 201 while (readmmr(par, STATUS) & 0xFA800000)
202 cpu_relax();
213} 203}
214 204
215static void blade_fill_rect(u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop) 205static void blade_fill_rect(struct tridentfb_par *par,
206 u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop)
216{ 207{
217 writemmr(CLR, c); 208 writemmr(par, COLOR, c);
218 writemmr(ROP, rop ? 0x66 : ROP_S); 209 writemmr(par, ROP, rop ? ROP_X : ROP_S);
219 writemmr(CMD, 0x20000000 | 1 << 19 | 1 << 4 | 2 << 2); 210 writemmr(par, CMD, 0x20000000 | 1 << 19 | 1 << 4 | 2 << 2);
220 211
221 writemmr(DR1, point(x, y)); 212 writemmr(par, DST1, point(x, y));
222 writemmr(DR2, point(x + w - 1, y + h - 1)); 213 writemmr(par, DST2, point(x + w - 1, y + h - 1));
223} 214}
224 215
225static void blade_copy_rect(u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h) 216static void blade_image_blit(struct tridentfb_par *par, const char *data,
217 u32 x, u32 y, u32 w, u32 h, u32 c, u32 b)
218{
219 unsigned size = ((w + 31) >> 5) * h;
220
221 writemmr(par, COLOR, c);
222 writemmr(par, BGCOLOR, b);
223 writemmr(par, CMD, 0xa0000000 | 3 << 19);
224
225 writemmr(par, DST1, point(x, y));
226 writemmr(par, DST2, point(x + w - 1, y + h - 1));
227
228 memcpy(par->io_virt + 0x10000, data, 4 * size);
229}
230
231static void blade_copy_rect(struct tridentfb_par *par,
232 u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h)
226{ 233{
227 u32 s1, s2, d1, d2;
228 int direction = 2; 234 int direction = 2;
229 s1 = point(x1, y1); 235 u32 s1 = point(x1, y1);
230 s2 = point(x1 + w - 1, y1 + h - 1); 236 u32 s2 = point(x1 + w - 1, y1 + h - 1);
231 d1 = point(x2, y2); 237 u32 d1 = point(x2, y2);
232 d2 = point(x2 + w - 1, y2 + h - 1); 238 u32 d2 = point(x2 + w - 1, y2 + h - 1);
233 239
234 if ((y1 > y2) || ((y1 == y2) && (x1 > x2))) 240 if ((y1 > y2) || ((y1 == y2) && (x1 > x2)))
235 direction = 0; 241 direction = 0;
236 242
237 writemmr(ROP, ROP_S); 243 writemmr(par, ROP, ROP_S);
238 writemmr(CMD, 0xE0000000 | 1 << 19 | 1 << 4 | 1 << 2 | direction); 244 writemmr(par, CMD, 0xE0000000 | 1 << 19 | 1 << 4 | 1 << 2 | direction);
239 245
240 writemmr(SR1, direction ? s2 : s1); 246 writemmr(par, SRC1, direction ? s2 : s1);
241 writemmr(SR2, direction ? s1 : s2); 247 writemmr(par, SRC2, direction ? s1 : s2);
242 writemmr(DR1, direction ? d2 : d1); 248 writemmr(par, DST1, direction ? d2 : d1);
243 writemmr(DR2, direction ? d1 : d2); 249 writemmr(par, DST2, direction ? d1 : d2);
244} 250}
245 251
246static struct accel_switch accel_blade = {
247 blade_init_accel,
248 blade_wait_engine,
249 blade_fill_rect,
250 blade_copy_rect,
251};
252
253/* 252/*
254 * BladeXP specific acceleration functions 253 * BladeXP specific acceleration functions
255 */ 254 */
256 255
257#define ROP_P 0xF0 256static void xp_init_accel(struct tridentfb_par *par, int pitch, int bpp)
258#define masked_point(x, y) ((y & 0xffff)<<16|(x & 0xffff))
259
260static void xp_init_accel(int pitch, int bpp)
261{ 257{
262 int tmp = 0, v1; 258 unsigned char x = bpp == 24 ? 3 : (bpp >> 4);
263 unsigned char x = 0; 259 int v1 = pitch << (bpp == 24 ? 20 : (18 + x));
264
265 switch (bpp) {
266 case 8:
267 x = 0;
268 break;
269 case 16:
270 x = 1;
271 break;
272 case 24:
273 x = 3;
274 break;
275 case 32:
276 x = 2;
277 break;
278 }
279 260
280 switch (pitch << (bpp >> 3)) { 261 switch (pitch << (bpp >> 3)) {
281 case 8192: 262 case 8192:
@@ -293,42 +274,21 @@ static void xp_init_accel(int pitch, int bpp)
293 break; 274 break;
294 } 275 }
295 276
296 t_outb(x, 0x2125); 277 t_outb(par, x, 0x2125);
297
298 eng_oper = x | 0x40;
299
300 switch (bpp) {
301 case 8:
302 tmp = 18;
303 break;
304 case 15:
305 case 16:
306 tmp = 19;
307 break;
308 case 24:
309 case 32:
310 tmp = 20;
311 break;
312 }
313 278
314 v1 = pitch << tmp; 279 par->eng_oper = x | 0x40;
315 280
316 writemmr(0x2154, v1); 281 writemmr(par, 0x2154, v1);
317 writemmr(0x2150, v1); 282 writemmr(par, 0x2150, v1);
318 t_outb(3, 0x2126); 283 t_outb(par, 3, 0x2126);
319} 284}
320 285
321static void xp_wait_engine(void) 286static void xp_wait_engine(struct tridentfb_par *par)
322{ 287{
323 int busy; 288 int count = 0;
324 int count, timeout; 289 int timeout = 0;
325 290
326 count = 0; 291 while (t_inb(par, STATUS) & 0x80) {
327 timeout = 0;
328 for (;;) {
329 busy = t_inb(STA) & 0x80;
330 if (busy != 0x80)
331 return;
332 count++; 292 count++;
333 if (count == 10000000) { 293 if (count == 10000000) {
334 /* Timeout */ 294 /* Timeout */
@@ -336,30 +296,31 @@ static void xp_wait_engine(void)
336 timeout++; 296 timeout++;
337 if (timeout == 8) { 297 if (timeout == 8) {
338 /* Reset engine */ 298 /* Reset engine */
339 t_outb(0x00, 0x2120); 299 t_outb(par, 0x00, STATUS);
340 return; 300 return;
341 } 301 }
342 } 302 }
303 cpu_relax();
343 } 304 }
344} 305}
345 306
346static void xp_fill_rect(u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop) 307static void xp_fill_rect(struct tridentfb_par *par,
308 u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop)
347{ 309{
348 writemmr(0x2127, ROP_P); 310 writemmr(par, 0x2127, ROP_P);
349 writemmr(0x2158, c); 311 writemmr(par, 0x2158, c);
350 writemmr(0x2128, 0x4000); 312 writemmr(par, DRAWFL, 0x4000);
351 writemmr(0x2140, masked_point(h, w)); 313 writemmr(par, OLDDIM, point(h, w));
352 writemmr(0x2138, masked_point(y, x)); 314 writemmr(par, OLDDST, point(y, x));
353 t_outb(0x01, 0x2124); 315 t_outb(par, 0x01, OLDCMD);
354 t_outb(eng_oper, 0x2125); 316 t_outb(par, par->eng_oper, 0x2125);
355} 317}
356 318
357static void xp_copy_rect(u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h) 319static void xp_copy_rect(struct tridentfb_par *par,
320 u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h)
358{ 321{
359 int direction;
360 u32 x1_tmp, x2_tmp, y1_tmp, y2_tmp; 322 u32 x1_tmp, x2_tmp, y1_tmp, y2_tmp;
361 323 int direction = 0x0004;
362 direction = 0x0004;
363 324
364 if ((x1 < x2) && (y1 == y2)) { 325 if ((x1 < x2) && (y1 == y2)) {
365 direction |= 0x0200; 326 direction |= 0x0200;
@@ -379,103 +340,152 @@ static void xp_copy_rect(u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h)
379 y2_tmp = y2; 340 y2_tmp = y2;
380 } 341 }
381 342
382 writemmr(0x2128, direction); 343 writemmr(par, DRAWFL, direction);
383 t_outb(ROP_S, 0x2127); 344 t_outb(par, ROP_S, 0x2127);
384 writemmr(0x213C, masked_point(y1_tmp, x1_tmp)); 345 writemmr(par, OLDSRC, point(y1_tmp, x1_tmp));
385 writemmr(0x2138, masked_point(y2_tmp, x2_tmp)); 346 writemmr(par, OLDDST, point(y2_tmp, x2_tmp));
386 writemmr(0x2140, masked_point(h, w)); 347 writemmr(par, OLDDIM, point(h, w));
387 t_outb(0x01, 0x2124); 348 t_outb(par, 0x01, OLDCMD);
388} 349}
389 350
390static struct accel_switch accel_xp = {
391 xp_init_accel,
392 xp_wait_engine,
393 xp_fill_rect,
394 xp_copy_rect,
395};
396
397/* 351/*
398 * Image specific acceleration functions 352 * Image specific acceleration functions
399 */ 353 */
400static void image_init_accel(int pitch, int bpp) 354static void image_init_accel(struct tridentfb_par *par, int pitch, int bpp)
401{ 355{
402 int tmp = 0; 356 int tmp = bpp == 24 ? 2: (bpp >> 4);
403 switch (bpp) { 357
404 case 8: 358 writemmr(par, 0x2120, 0xF0000000);
405 tmp = 0; 359 writemmr(par, 0x2120, 0x40000000 | tmp);
406 break; 360 writemmr(par, 0x2120, 0x80000000);
407 case 15: 361 writemmr(par, 0x2144, 0x00000000);
408 tmp = 5; 362 writemmr(par, 0x2148, 0x00000000);
409 break; 363 writemmr(par, 0x2150, 0x00000000);
410 case 16: 364 writemmr(par, 0x2154, 0x00000000);
411 tmp = 1; 365 writemmr(par, 0x2120, 0x60000000 | (pitch << 16) | pitch);
412 break; 366 writemmr(par, 0x216C, 0x00000000);
413 case 24: 367 writemmr(par, 0x2170, 0x00000000);
414 case 32: 368 writemmr(par, 0x217C, 0x00000000);
415 tmp = 2; 369 writemmr(par, 0x2120, 0x10000000);
416 break; 370 writemmr(par, 0x2130, (2047 << 16) | 2047);
417 }
418 writemmr(0x2120, 0xF0000000);
419 writemmr(0x2120, 0x40000000 | tmp);
420 writemmr(0x2120, 0x80000000);
421 writemmr(0x2144, 0x00000000);
422 writemmr(0x2148, 0x00000000);
423 writemmr(0x2150, 0x00000000);
424 writemmr(0x2154, 0x00000000);
425 writemmr(0x2120, 0x60000000 | (pitch << 16) | pitch);
426 writemmr(0x216C, 0x00000000);
427 writemmr(0x2170, 0x00000000);
428 writemmr(0x217C, 0x00000000);
429 writemmr(0x2120, 0x10000000);
430 writemmr(0x2130, (2047 << 16) | 2047);
431} 371}
432 372
433static void image_wait_engine(void) 373static void image_wait_engine(struct tridentfb_par *par)
434{ 374{
435 while (readmmr(0x2164) & 0xF0000000) ; 375 while (readmmr(par, 0x2164) & 0xF0000000)
376 cpu_relax();
436} 377}
437 378
438static void image_fill_rect(u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop) 379static void image_fill_rect(struct tridentfb_par *par,
380 u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop)
439{ 381{
440 writemmr(0x2120, 0x80000000); 382 writemmr(par, 0x2120, 0x80000000);
441 writemmr(0x2120, 0x90000000 | ROP_S); 383 writemmr(par, 0x2120, 0x90000000 | ROP_S);
442 384
443 writemmr(0x2144, c); 385 writemmr(par, 0x2144, c);
444 386
445 writemmr(DR1, point(x, y)); 387 writemmr(par, DST1, point(x, y));
446 writemmr(DR2, point(x + w - 1, y + h - 1)); 388 writemmr(par, DST2, point(x + w - 1, y + h - 1));
447 389
448 writemmr(0x2124, 0x80000000 | 3 << 22 | 1 << 10 | 1 << 9); 390 writemmr(par, 0x2124, 0x80000000 | 3 << 22 | 1 << 10 | 1 << 9);
449} 391}
450 392
451static void image_copy_rect(u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h) 393static void image_copy_rect(struct tridentfb_par *par,
394 u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h)
452{ 395{
453 u32 s1, s2, d1, d2; 396 int direction = 0x4;
454 int direction = 2; 397 u32 s1 = point(x1, y1);
455 s1 = point(x1, y1); 398 u32 s2 = point(x1 + w - 1, y1 + h - 1);
456 s2 = point(x1 + w - 1, y1 + h - 1); 399 u32 d1 = point(x2, y2);
457 d1 = point(x2, y2); 400 u32 d2 = point(x2 + w - 1, y2 + h - 1);
458 d2 = point(x2 + w - 1, y2 + h - 1);
459 401
460 if ((y1 > y2) || ((y1 == y2) && (x1 > x2))) 402 if ((y1 > y2) || ((y1 == y2) && (x1 > x2)))
461 direction = 0; 403 direction = 0;
462 404
463 writemmr(0x2120, 0x80000000); 405 writemmr(par, 0x2120, 0x80000000);
464 writemmr(0x2120, 0x90000000 | ROP_S); 406 writemmr(par, 0x2120, 0x90000000 | ROP_S);
465 407
466 writemmr(SR1, direction ? s2 : s1); 408 writemmr(par, SRC1, direction ? s2 : s1);
467 writemmr(SR2, direction ? s1 : s2); 409 writemmr(par, SRC2, direction ? s1 : s2);
468 writemmr(DR1, direction ? d2 : d1); 410 writemmr(par, DST1, direction ? d2 : d1);
469 writemmr(DR2, direction ? d1 : d2); 411 writemmr(par, DST2, direction ? d1 : d2);
470 writemmr(0x2124, 0x80000000 | 1 << 22 | 1 << 10 | 1 << 7 | direction); 412 writemmr(par, 0x2124,
413 0x80000000 | 1 << 22 | 1 << 10 | 1 << 7 | direction);
471} 414}
472 415
473static struct accel_switch accel_image = { 416/*
474 image_init_accel, 417 * TGUI 9440/96XX acceleration
475 image_wait_engine, 418 */
476 image_fill_rect, 419
477 image_copy_rect, 420static void tgui_init_accel(struct tridentfb_par *par, int pitch, int bpp)
478}; 421{
422 unsigned char x = bpp == 24 ? 3 : (bpp >> 4);
423
424 /* disable clipping */
425 writemmr(par, 0x2148, 0);
426 writemmr(par, 0x214C, point(4095, 2047));
427
428 switch ((pitch * bpp) / 8) {
429 case 8192:
430 case 512:
431 x |= 0x00;
432 break;
433 case 1024:
434 x |= 0x04;
435 break;
436 case 2048:
437 x |= 0x08;
438 break;
439 case 4096:
440 x |= 0x0C;
441 break;
442 }
443
444 fb_writew(x, par->io_virt + 0x2122);
445}
446
447static void tgui_fill_rect(struct tridentfb_par *par,
448 u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop)
449{
450 t_outb(par, ROP_P, 0x2127);
451 writemmr(par, OLDCLR, c);
452 writemmr(par, DRAWFL, 0x4020);
453 writemmr(par, OLDDIM, point(w - 1, h - 1));
454 writemmr(par, OLDDST, point(x, y));
455 t_outb(par, 1, OLDCMD);
456}
457
458static void tgui_copy_rect(struct tridentfb_par *par,
459 u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h)
460{
461 int flags = 0;
462 u16 x1_tmp, x2_tmp, y1_tmp, y2_tmp;
463
464 if ((x1 < x2) && (y1 == y2)) {
465 flags |= 0x0200;
466 x1_tmp = x1 + w - 1;
467 x2_tmp = x2 + w - 1;
468 } else {
469 x1_tmp = x1;
470 x2_tmp = x2;
471 }
472
473 if (y1 < y2) {
474 flags |= 0x0100;
475 y1_tmp = y1 + h - 1;
476 y2_tmp = y2 + h - 1;
477 } else {
478 y1_tmp = y1;
479 y2_tmp = y2;
480 }
481
482 writemmr(par, DRAWFL, 0x4 | flags);
483 t_outb(par, ROP_S, 0x2127);
484 writemmr(par, OLDSRC, point(x1_tmp, y1_tmp));
485 writemmr(par, OLDDST, point(x2_tmp, y2_tmp));
486 writemmr(par, OLDDIM, point(w - 1, h - 1));
487 t_outb(par, 1, OLDCMD);
488}
479 489
480/* 490/*
481 * Accel functions called by the upper layers 491 * Accel functions called by the upper layers
@@ -484,129 +494,162 @@ static struct accel_switch accel_image = {
484static void tridentfb_fillrect(struct fb_info *info, 494static void tridentfb_fillrect(struct fb_info *info,
485 const struct fb_fillrect *fr) 495 const struct fb_fillrect *fr)
486{ 496{
487 int bpp = info->var.bits_per_pixel; 497 struct tridentfb_par *par = info->par;
488 int col = 0; 498 int col;
489 499
490 switch (bpp) { 500 if (info->flags & FBINFO_HWACCEL_DISABLED) {
491 default: 501 cfb_fillrect(info, fr);
492 case 8: 502 return;
493 col |= fr->color; 503 }
504 if (info->var.bits_per_pixel == 8) {
505 col = fr->color;
494 col |= col << 8; 506 col |= col << 8;
495 col |= col << 16; 507 col |= col << 16;
496 break; 508 } else
497 case 16:
498 col = ((u32 *)(info->pseudo_palette))[fr->color]; 509 col = ((u32 *)(info->pseudo_palette))[fr->color];
499 break; 510
500 case 32: 511 par->wait_engine(par);
501 col = ((u32 *)(info->pseudo_palette))[fr->color]; 512 par->fill_rect(par, fr->dx, fr->dy, fr->width,
502 break; 513 fr->height, col, fr->rop);
514}
515
516static void tridentfb_imageblit(struct fb_info *info,
517 const struct fb_image *img)
518{
519 struct tridentfb_par *par = info->par;
520 int col, bgcol;
521
522 if ((info->flags & FBINFO_HWACCEL_DISABLED) || img->depth != 1) {
523 cfb_imageblit(info, img);
524 return;
525 }
526 if (info->var.bits_per_pixel == 8) {
527 col = img->fg_color;
528 col |= col << 8;
529 col |= col << 16;
530 bgcol = img->bg_color;
531 bgcol |= bgcol << 8;
532 bgcol |= bgcol << 16;
533 } else {
534 col = ((u32 *)(info->pseudo_palette))[img->fg_color];
535 bgcol = ((u32 *)(info->pseudo_palette))[img->bg_color];
503 } 536 }
504 537
505 acc->fill_rect(fr->dx, fr->dy, fr->width, fr->height, col, fr->rop); 538 par->wait_engine(par);
506 acc->wait_engine(); 539 if (par->image_blit)
540 par->image_blit(par, img->data, img->dx, img->dy,
541 img->width, img->height, col, bgcol);
542 else
543 cfb_imageblit(info, img);
507} 544}
545
508static void tridentfb_copyarea(struct fb_info *info, 546static void tridentfb_copyarea(struct fb_info *info,
509 const struct fb_copyarea *ca) 547 const struct fb_copyarea *ca)
510{ 548{
511 acc->copy_rect(ca->sx, ca->sy, ca->dx, ca->dy, ca->width, ca->height); 549 struct tridentfb_par *par = info->par;
512 acc->wait_engine(); 550
551 if (info->flags & FBINFO_HWACCEL_DISABLED) {
552 cfb_copyarea(info, ca);
553 return;
554 }
555 par->wait_engine(par);
556 par->copy_rect(par, ca->sx, ca->sy, ca->dx, ca->dy,
557 ca->width, ca->height);
558}
559
560static int tridentfb_sync(struct fb_info *info)
561{
562 struct tridentfb_par *par = info->par;
563
564 if (!(info->flags & FBINFO_HWACCEL_DISABLED))
565 par->wait_engine(par);
566 return 0;
513} 567}
514#else /* !CONFIG_FB_TRIDENT_ACCEL */ 568#else
515#define tridentfb_fillrect cfb_fillrect 569#define tridentfb_fillrect cfb_fillrect
516#define tridentfb_copyarea cfb_copyarea 570#define tridentfb_copyarea cfb_copyarea
571#define tridentfb_imageblit cfb_imageblit
517#endif /* CONFIG_FB_TRIDENT_ACCEL */ 572#endif /* CONFIG_FB_TRIDENT_ACCEL */
518 573
519
520/* 574/*
521 * Hardware access functions 575 * Hardware access functions
522 */ 576 */
523 577
524static inline unsigned char read3X4(int reg) 578static inline unsigned char read3X4(struct tridentfb_par *par, int reg)
525{ 579{
526 struct tridentfb_par *par = (struct tridentfb_par *)fb_info.par; 580 return vga_mm_rcrt(par->io_virt, reg);
527 writeb(reg, par->io_virt + CRT + 4);
528 return readb(par->io_virt + CRT + 5);
529} 581}
530 582
531static inline void write3X4(int reg, unsigned char val) 583static inline void write3X4(struct tridentfb_par *par, int reg,
584 unsigned char val)
532{ 585{
533 struct tridentfb_par *par = (struct tridentfb_par *)fb_info.par; 586 vga_mm_wcrt(par->io_virt, reg, val);
534 writeb(reg, par->io_virt + CRT + 4);
535 writeb(val, par->io_virt + CRT + 5);
536} 587}
537 588
538static inline unsigned char read3C4(int reg) 589static inline unsigned char read3CE(struct tridentfb_par *par,
590 unsigned char reg)
539{ 591{
540 t_outb(reg, 0x3C4); 592 return vga_mm_rgfx(par->io_virt, reg);
541 return t_inb(0x3C5);
542} 593}
543 594
544static inline void write3C4(int reg, unsigned char val) 595static inline void writeAttr(struct tridentfb_par *par, int reg,
596 unsigned char val)
545{ 597{
546 t_outb(reg, 0x3C4); 598 fb_readb(par->io_virt + VGA_IS1_RC); /* flip-flop to index */
547 t_outb(val, 0x3C5); 599 vga_mm_wattr(par->io_virt, reg, val);
548} 600}
549 601
550static inline unsigned char read3CE(int reg) 602static inline void write3CE(struct tridentfb_par *par, int reg,
603 unsigned char val)
551{ 604{
552 t_outb(reg, 0x3CE); 605 vga_mm_wgfx(par->io_virt, reg, val);
553 return t_inb(0x3CF);
554} 606}
555 607
556static inline void writeAttr(int reg, unsigned char val) 608static void enable_mmio(struct tridentfb_par *par)
557{
558 readb(((struct tridentfb_par *)fb_info.par)->io_virt + CRT + 0x0A); /* flip-flop to index */
559 t_outb(reg, 0x3C0);
560 t_outb(val, 0x3C0);
561}
562
563static inline void write3CE(int reg, unsigned char val)
564{
565 t_outb(reg, 0x3CE);
566 t_outb(val, 0x3CF);
567}
568
569static void enable_mmio(void)
570{ 609{
571 /* Goto New Mode */ 610 /* Goto New Mode */
572 outb(0x0B, 0x3C4); 611 vga_io_rseq(0x0B);
573 inb(0x3C5);
574 612
575 /* Unprotect registers */ 613 /* Unprotect registers */
576 outb(NewMode1, 0x3C4); 614 vga_io_wseq(NewMode1, 0x80);
577 outb(0x80, 0x3C5); 615 if (!is_oldprotect(par->chip_id))
616 vga_io_wseq(Protection, 0x92);
578 617
579 /* Enable MMIO */ 618 /* Enable MMIO */
580 outb(PCIReg, 0x3D4); 619 outb(PCIReg, 0x3D4);
581 outb(inb(0x3D5) | 0x01, 0x3D5); 620 outb(inb(0x3D5) | 0x01, 0x3D5);
582} 621}
583 622
584static void disable_mmio(void) 623static void disable_mmio(struct tridentfb_par *par)
585{ 624{
586 /* Goto New Mode */ 625 /* Goto New Mode */
587 t_outb(0x0B, 0x3C4); 626 vga_mm_rseq(par->io_virt, 0x0B);
588 t_inb(0x3C5);
589 627
590 /* Unprotect registers */ 628 /* Unprotect registers */
591 t_outb(NewMode1, 0x3C4); 629 vga_mm_wseq(par->io_virt, NewMode1, 0x80);
592 t_outb(0x80, 0x3C5); 630 if (!is_oldprotect(par->chip_id))
631 vga_mm_wseq(par->io_virt, Protection, 0x92);
593 632
594 /* Disable MMIO */ 633 /* Disable MMIO */
595 t_outb(PCIReg, 0x3D4); 634 t_outb(par, PCIReg, 0x3D4);
596 t_outb(t_inb(0x3D5) & ~0x01, 0x3D5); 635 t_outb(par, t_inb(par, 0x3D5) & ~0x01, 0x3D5);
597} 636}
598 637
599#define crtc_unlock() write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F) 638static inline void crtc_unlock(struct tridentfb_par *par)
639{
640 write3X4(par, VGA_CRTC_V_SYNC_END,
641 read3X4(par, VGA_CRTC_V_SYNC_END) & 0x7F);
642}
600 643
601/* Return flat panel's maximum x resolution */ 644/* Return flat panel's maximum x resolution */
602static int __devinit get_nativex(void) 645static int __devinit get_nativex(struct tridentfb_par *par)
603{ 646{
604 int x, y, tmp; 647 int x, y, tmp;
605 648
606 if (nativex) 649 if (nativex)
607 return nativex; 650 return nativex;
608 651
609 tmp = (read3CE(VertStretch) >> 4) & 3; 652 tmp = (read3CE(par, VertStretch) >> 4) & 3;
610 653
611 switch (tmp) { 654 switch (tmp) {
612 case 0: 655 case 0:
@@ -632,77 +675,92 @@ static int __devinit get_nativex(void)
632} 675}
633 676
634/* Set pitch */ 677/* Set pitch */
635static void set_lwidth(int width) 678static inline void set_lwidth(struct tridentfb_par *par, int width)
636{ 679{
637 write3X4(Offset, width & 0xFF); 680 write3X4(par, VGA_CRTC_OFFSET, width & 0xFF);
638 write3X4(AddColReg, 681 write3X4(par, AddColReg,
639 (read3X4(AddColReg) & 0xCF) | ((width & 0x300) >> 4)); 682 (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4));
640} 683}
641 684
642/* For resolutions smaller than FP resolution stretch */ 685/* For resolutions smaller than FP resolution stretch */
643static void screen_stretch(void) 686static void screen_stretch(struct tridentfb_par *par)
644{ 687{
645 if (chip_id != CYBERBLADEXPAi1) 688 if (par->chip_id != CYBERBLADEXPAi1)
646 write3CE(BiosReg, 0); 689 write3CE(par, BiosReg, 0);
647 else 690 else
648 write3CE(BiosReg, 8); 691 write3CE(par, BiosReg, 8);
649 write3CE(VertStretch, (read3CE(VertStretch) & 0x7C) | 1); 692 write3CE(par, VertStretch, (read3CE(par, VertStretch) & 0x7C) | 1);
650 write3CE(HorStretch, (read3CE(HorStretch) & 0x7C) | 1); 693 write3CE(par, HorStretch, (read3CE(par, HorStretch) & 0x7C) | 1);
651} 694}
652 695
653/* For resolutions smaller than FP resolution center */ 696/* For resolutions smaller than FP resolution center */
654static void screen_center(void) 697static inline void screen_center(struct tridentfb_par *par)
655{ 698{
656 write3CE(VertStretch, (read3CE(VertStretch) & 0x7C) | 0x80); 699 write3CE(par, VertStretch, (read3CE(par, VertStretch) & 0x7C) | 0x80);
657 write3CE(HorStretch, (read3CE(HorStretch) & 0x7C) | 0x80); 700 write3CE(par, HorStretch, (read3CE(par, HorStretch) & 0x7C) | 0x80);
658} 701}
659 702
660/* Address of first shown pixel in display memory */ 703/* Address of first shown pixel in display memory */
661static void set_screen_start(int base) 704static void set_screen_start(struct tridentfb_par *par, int base)
662{ 705{
663 write3X4(StartAddrLow, base & 0xFF); 706 u8 tmp;
664 write3X4(StartAddrHigh, (base & 0xFF00) >> 8); 707 write3X4(par, VGA_CRTC_START_LO, base & 0xFF);
665 write3X4(CRTCModuleTest, 708 write3X4(par, VGA_CRTC_START_HI, (base & 0xFF00) >> 8);
666 (read3X4(CRTCModuleTest) & 0xDF) | ((base & 0x10000) >> 11)); 709 tmp = read3X4(par, CRTCModuleTest) & 0xDF;
667 write3X4(CRTHiOrd, 710 write3X4(par, CRTCModuleTest, tmp | ((base & 0x10000) >> 11));
668 (read3X4(CRTHiOrd) & 0xF8) | ((base & 0xE0000) >> 17)); 711 tmp = read3X4(par, CRTHiOrd) & 0xF8;
712 write3X4(par, CRTHiOrd, tmp | ((base & 0xE0000) >> 17));
669} 713}
670 714
671/* Set dotclock frequency */ 715/* Set dotclock frequency */
672static void set_vclk(unsigned long freq) 716static void set_vclk(struct tridentfb_par *par, unsigned long freq)
673{ 717{
674 int m, n, k; 718 int m, n, k;
675 unsigned long f, fi, d, di; 719 unsigned long fi, d, di;
676 unsigned char lo = 0, hi = 0; 720 unsigned char best_m = 0, best_n = 0, best_k = 0;
721 unsigned char hi, lo;
722 unsigned char shift = !is_oldclock(par->chip_id) ? 2 : 1;
677 723
678 d = 20000; 724 d = 20000;
679 for (k = 2; k >= 0; k--) 725 for (k = shift; k >= 0; k--)
680 for (m = 0; m < 63; m++) 726 for (m = 1; m < 32; m++) {
681 for (n = 0; n < 128; n++) { 727 n = ((m + 2) << shift) - 8;
728 for (n = (n < 0 ? 0 : n); n < 122; n++) {
682 fi = ((14318l * (n + 8)) / (m + 2)) >> k; 729 fi = ((14318l * (n + 8)) / (m + 2)) >> k;
683 if ((di = abs(fi - freq)) < d) { 730 di = abs(fi - freq);
731 if (di < d || (di == d && k == best_k)) {
684 d = di; 732 d = di;
685 f = fi; 733 best_n = n;
686 lo = n; 734 best_m = m;
687 hi = (k << 6) | m; 735 best_k = k;
688 } 736 }
689 if (fi > freq) 737 if (fi > freq)
690 break; 738 break;
691 } 739 }
692 if (chip3D) { 740 }
693 write3C4(ClockHigh, hi); 741
694 write3C4(ClockLow, lo); 742 if (is_oldclock(par->chip_id)) {
743 lo = best_n | (best_m << 7);
744 hi = (best_m >> 1) | (best_k << 4);
695 } else { 745 } else {
696 outb(lo, 0x43C8); 746 lo = best_n;
697 outb(hi, 0x43C9); 747 hi = best_m | (best_k << 6);
748 }
749
750 if (is3Dchip(par->chip_id)) {
751 vga_mm_wseq(par->io_virt, ClockHigh, hi);
752 vga_mm_wseq(par->io_virt, ClockLow, lo);
753 } else {
754 t_outb(par, lo, 0x43C8);
755 t_outb(par, hi, 0x43C9);
698 } 756 }
699 debug("VCLK = %X %X\n", hi, lo); 757 debug("VCLK = %X %X\n", hi, lo);
700} 758}
701 759
702/* Set number of lines for flat panels*/ 760/* Set number of lines for flat panels*/
703static void set_number_of_lines(int lines) 761static void set_number_of_lines(struct tridentfb_par *par, int lines)
704{ 762{
705 int tmp = read3CE(CyberEnhance) & 0x8F; 763 int tmp = read3CE(par, CyberEnhance) & 0x8F;
706 if (lines > 1024) 764 if (lines > 1024)
707 tmp |= 0x50; 765 tmp |= 0x50;
708 else if (lines > 768) 766 else if (lines > 768)
@@ -711,24 +769,24 @@ static void set_number_of_lines(int lines)
711 tmp |= 0x20; 769 tmp |= 0x20;
712 else if (lines > 480) 770 else if (lines > 480)
713 tmp |= 0x10; 771 tmp |= 0x10;
714 write3CE(CyberEnhance, tmp); 772 write3CE(par, CyberEnhance, tmp);
715} 773}
716 774
717/* 775/*
718 * If we see that FP is active we assume we have one. 776 * If we see that FP is active we assume we have one.
719 * Otherwise we have a CRT display.User can override. 777 * Otherwise we have a CRT display. User can override.
720 */ 778 */
721static unsigned int __devinit get_displaytype(void) 779static int __devinit is_flatpanel(struct tridentfb_par *par)
722{ 780{
723 if (fp) 781 if (fp)
724 return DISPLAY_FP; 782 return 1;
725 if (crt || !chipcyber) 783 if (crt || !iscyber(par->chip_id))
726 return DISPLAY_CRT; 784 return 0;
727 return (read3CE(FPConfig) & 0x10) ? DISPLAY_FP : DISPLAY_CRT; 785 return (read3CE(par, FPConfig) & 0x10) ? 1 : 0;
728} 786}
729 787
730/* Try detecting the video memory size */ 788/* Try detecting the video memory size */
731static unsigned int __devinit get_memsize(void) 789static unsigned int __devinit get_memsize(struct tridentfb_par *par)
732{ 790{
733 unsigned char tmp, tmp2; 791 unsigned char tmp, tmp2;
734 unsigned int k; 792 unsigned int k;
@@ -737,12 +795,12 @@ static unsigned int __devinit get_memsize(void)
737 if (memsize) 795 if (memsize)
738 k = memsize * Kb; 796 k = memsize * Kb;
739 else 797 else
740 switch (chip_id) { 798 switch (par->chip_id) {
741 case CYBER9525DVD: 799 case CYBER9525DVD:
742 k = 2560 * Kb; 800 k = 2560 * Kb;
743 break; 801 break;
744 default: 802 default:
745 tmp = read3X4(SPR) & 0x0F; 803 tmp = read3X4(par, SPR) & 0x0F;
746 switch (tmp) { 804 switch (tmp) {
747 805
748 case 0x01: 806 case 0x01:
@@ -774,7 +832,7 @@ static unsigned int __devinit get_memsize(void)
774 break; 832 break;
775 case 0x0E: /* XP */ 833 case 0x0E: /* XP */
776 834
777 tmp2 = read3C4(0xC1); 835 tmp2 = vga_mm_rseq(par->io_virt, 0xC1);
778 switch (tmp2) { 836 switch (tmp2) {
779 case 0x00: 837 case 0x00:
780 k = 20 * Mb; 838 k = 20 * Mb;
@@ -812,26 +870,67 @@ static unsigned int __devinit get_memsize(void)
812static int tridentfb_check_var(struct fb_var_screeninfo *var, 870static int tridentfb_check_var(struct fb_var_screeninfo *var,
813 struct fb_info *info) 871 struct fb_info *info)
814{ 872{
873 struct tridentfb_par *par = info->par;
815 int bpp = var->bits_per_pixel; 874 int bpp = var->bits_per_pixel;
875 int line_length;
876 int ramdac = 230000; /* 230MHz for most 3D chips */
816 debug("enter\n"); 877 debug("enter\n");
817 878
818 /* check color depth */ 879 /* check color depth */
819 if (bpp == 24) 880 if (bpp == 24)
820 bpp = var->bits_per_pixel = 32; 881 bpp = var->bits_per_pixel = 32;
882 if (bpp != 8 && bpp != 16 && bpp != 32)
883 return -EINVAL;
884 if (par->chip_id == TGUI9440 && bpp == 32)
885 return -EINVAL;
821 /* check whether resolution fits on panel and in memory */ 886 /* check whether resolution fits on panel and in memory */
822 if (flatpanel && nativex && var->xres > nativex) 887 if (par->flatpanel && nativex && var->xres > nativex)
888 return -EINVAL;
889 /* various resolution checks */
890 var->xres = (var->xres + 7) & ~0x7;
891 if (var->xres > var->xres_virtual)
892 var->xres_virtual = var->xres;
893 if (var->yres > var->yres_virtual)
894 var->yres_virtual = var->yres;
895 if (var->xres_virtual > 4095 || var->yres > 2048)
823 return -EINVAL; 896 return -EINVAL;
824 if (var->xres * var->yres_virtual * bpp / 8 > info->fix.smem_len) 897 /* prevent from position overflow for acceleration */
898 if (var->yres_virtual > 0xffff)
899 return -EINVAL;
900 line_length = var->xres_virtual * bpp / 8;
901
902 if (!is3Dchip(par->chip_id) &&
903 !(info->flags & FBINFO_HWACCEL_DISABLED)) {
904 /* acceleration requires line length to be power of 2 */
905 if (line_length <= 512)
906 var->xres_virtual = 512 * 8 / bpp;
907 else if (line_length <= 1024)
908 var->xres_virtual = 1024 * 8 / bpp;
909 else if (line_length <= 2048)
910 var->xres_virtual = 2048 * 8 / bpp;
911 else if (line_length <= 4096)
912 var->xres_virtual = 4096 * 8 / bpp;
913 else if (line_length <= 8192)
914 var->xres_virtual = 8192 * 8 / bpp;
915 else
916 return -EINVAL;
917
918 line_length = var->xres_virtual * bpp / 8;
919 }
920
921 /* datasheet specifies how to set panning only up to 4 MB */
922 if (line_length * (var->yres_virtual - var->yres) > (4 << 20))
923 var->yres_virtual = ((4 << 20) / line_length) + var->yres;
924
925 if (line_length * var->yres_virtual > info->fix.smem_len)
825 return -EINVAL; 926 return -EINVAL;
826 927
827 switch (bpp) { 928 switch (bpp) {
828 case 8: 929 case 8:
829 var->red.offset = 0; 930 var->red.offset = 0;
830 var->green.offset = 0; 931 var->red.length = 8;
831 var->blue.offset = 0; 932 var->green = var->red;
832 var->red.length = 6; 933 var->blue = var->red;
833 var->green.length = 6;
834 var->blue.length = 6;
835 break; 934 break;
836 case 16: 935 case 16:
837 var->red.offset = 11; 936 var->red.offset = 11;
@@ -852,6 +951,33 @@ static int tridentfb_check_var(struct fb_var_screeninfo *var,
852 default: 951 default:
853 return -EINVAL; 952 return -EINVAL;
854 } 953 }
954
955 if (is_xp(par->chip_id))
956 ramdac = 350000;
957
958 switch (par->chip_id) {
959 case TGUI9440:
960 ramdac = (bpp >= 16) ? 45000 : 90000;
961 break;
962 case CYBER9320:
963 case TGUI9660:
964 ramdac = 135000;
965 break;
966 case PROVIDIA9685:
967 case CYBER9388:
968 case CYBER9382:
969 case CYBER9385:
970 ramdac = 170000;
971 break;
972 }
973
974 /* The clock is doubled for 32 bpp */
975 if (bpp == 32)
976 ramdac /= 2;
977
978 if (PICOS2KHZ(var->pixclock) > ramdac)
979 return -EINVAL;
980
855 debug("exit\n"); 981 debug("exit\n");
856 982
857 return 0; 983 return 0;
@@ -862,25 +988,31 @@ static int tridentfb_check_var(struct fb_var_screeninfo *var,
862static int tridentfb_pan_display(struct fb_var_screeninfo *var, 988static int tridentfb_pan_display(struct fb_var_screeninfo *var,
863 struct fb_info *info) 989 struct fb_info *info)
864{ 990{
991 struct tridentfb_par *par = info->par;
865 unsigned int offset; 992 unsigned int offset;
866 993
867 debug("enter\n"); 994 debug("enter\n");
868 offset = (var->xoffset + (var->yoffset * var->xres)) 995 offset = (var->xoffset + (var->yoffset * var->xres_virtual))
869 * var->bits_per_pixel / 32; 996 * var->bits_per_pixel / 32;
870 info->var.xoffset = var->xoffset; 997 set_screen_start(par, offset);
871 info->var.yoffset = var->yoffset;
872 set_screen_start(offset);
873 debug("exit\n"); 998 debug("exit\n");
874 return 0; 999 return 0;
875} 1000}
876 1001
877#define shadowmode_on() write3CE(CyberControl, read3CE(CyberControl) | 0x81) 1002static inline void shadowmode_on(struct tridentfb_par *par)
878#define shadowmode_off() write3CE(CyberControl, read3CE(CyberControl) & 0x7E) 1003{
1004 write3CE(par, CyberControl, read3CE(par, CyberControl) | 0x81);
1005}
1006
1007static inline void shadowmode_off(struct tridentfb_par *par)
1008{
1009 write3CE(par, CyberControl, read3CE(par, CyberControl) & 0x7E);
1010}
879 1011
880/* Set the hardware to the requested video mode */ 1012/* Set the hardware to the requested video mode */
881static int tridentfb_set_par(struct fb_info *info) 1013static int tridentfb_set_par(struct fb_info *info)
882{ 1014{
883 struct tridentfb_par *par = (struct tridentfb_par *)(info->par); 1015 struct tridentfb_par *par = info->par;
884 u32 htotal, hdispend, hsyncstart, hsyncend, hblankstart, hblankend; 1016 u32 htotal, hdispend, hsyncstart, hsyncend, hblankstart, hblankend;
885 u32 vtotal, vdispend, vsyncstart, vsyncend, vblankstart, vblankend; 1017 u32 vtotal, vdispend, vsyncstart, vsyncend, vblankstart, vblankend;
886 struct fb_var_screeninfo *var = &info->var; 1018 struct fb_var_screeninfo *var = &info->var;
@@ -891,58 +1023,73 @@ static int tridentfb_set_par(struct fb_info *info)
891 debug("enter\n"); 1023 debug("enter\n");
892 hdispend = var->xres / 8 - 1; 1024 hdispend = var->xres / 8 - 1;
893 hsyncstart = (var->xres + var->right_margin) / 8; 1025 hsyncstart = (var->xres + var->right_margin) / 8;
894 hsyncend = var->hsync_len / 8; 1026 hsyncend = (var->xres + var->right_margin + var->hsync_len) / 8;
895 htotal = 1027 htotal = (var->xres + var->left_margin + var->right_margin +
896 (var->xres + var->left_margin + var->right_margin + 1028 var->hsync_len) / 8 - 5;
897 var->hsync_len) / 8 - 10;
898 hblankstart = hdispend + 1; 1029 hblankstart = hdispend + 1;
899 hblankend = htotal + 5; 1030 hblankend = htotal + 3;
900 1031
901 vdispend = var->yres - 1; 1032 vdispend = var->yres - 1;
902 vsyncstart = var->yres + var->lower_margin; 1033 vsyncstart = var->yres + var->lower_margin;
903 vsyncend = var->vsync_len; 1034 vsyncend = vsyncstart + var->vsync_len;
904 vtotal = var->upper_margin + vsyncstart + vsyncend - 2; 1035 vtotal = var->upper_margin + vsyncend - 2;
905 vblankstart = var->yres; 1036 vblankstart = vdispend + 1;
906 vblankend = vtotal + 2; 1037 vblankend = vtotal;
1038
1039 if (info->var.vmode & FB_VMODE_INTERLACED) {
1040 vtotal /= 2;
1041 vdispend /= 2;
1042 vsyncstart /= 2;
1043 vsyncend /= 2;
1044 vblankstart /= 2;
1045 vblankend /= 2;
1046 }
907 1047
908 crtc_unlock(); 1048 enable_mmio(par);
909 write3CE(CyberControl, 8); 1049 crtc_unlock(par);
1050 write3CE(par, CyberControl, 8);
1051 tmp = 0xEB;
1052 if (var->sync & FB_SYNC_HOR_HIGH_ACT)
1053 tmp &= ~0x40;
1054 if (var->sync & FB_SYNC_VERT_HIGH_ACT)
1055 tmp &= ~0x80;
910 1056
911 if (flatpanel && var->xres < nativex) { 1057 if (par->flatpanel && var->xres < nativex) {
912 /* 1058 /*
913 * on flat panels with native size larger 1059 * on flat panels with native size larger
914 * than requested resolution decide whether 1060 * than requested resolution decide whether
915 * we stretch or center 1061 * we stretch or center
916 */ 1062 */
917 t_outb(0xEB, 0x3C2); 1063 t_outb(par, tmp | 0xC0, VGA_MIS_W);
918 1064
919 shadowmode_on(); 1065 shadowmode_on(par);
920 1066
921 if (center) 1067 if (center)
922 screen_center(); 1068 screen_center(par);
923 else if (stretch) 1069 else if (stretch)
924 screen_stretch(); 1070 screen_stretch(par);
925 1071
926 } else { 1072 } else {
927 t_outb(0x2B, 0x3C2); 1073 t_outb(par, tmp, VGA_MIS_W);
928 write3CE(CyberControl, 8); 1074 write3CE(par, CyberControl, 8);
929 } 1075 }
930 1076
931 /* vertical timing values */ 1077 /* vertical timing values */
932 write3X4(CRTVTotal, vtotal & 0xFF); 1078 write3X4(par, VGA_CRTC_V_TOTAL, vtotal & 0xFF);
933 write3X4(CRTVDispEnd, vdispend & 0xFF); 1079 write3X4(par, VGA_CRTC_V_DISP_END, vdispend & 0xFF);
934 write3X4(CRTVSyncStart, vsyncstart & 0xFF); 1080 write3X4(par, VGA_CRTC_V_SYNC_START, vsyncstart & 0xFF);
935 write3X4(CRTVSyncEnd, (vsyncend & 0x0F)); 1081 write3X4(par, VGA_CRTC_V_SYNC_END, (vsyncend & 0x0F));
936 write3X4(CRTVBlankStart, vblankstart & 0xFF); 1082 write3X4(par, VGA_CRTC_V_BLANK_START, vblankstart & 0xFF);
937 write3X4(CRTVBlankEnd, 0 /* p->vblankend & 0xFF */ ); 1083 write3X4(par, VGA_CRTC_V_BLANK_END, vblankend & 0xFF);
938 1084
939 /* horizontal timing values */ 1085 /* horizontal timing values */
940 write3X4(CRTHTotal, htotal & 0xFF); 1086 write3X4(par, VGA_CRTC_H_TOTAL, htotal & 0xFF);
941 write3X4(CRTHDispEnd, hdispend & 0xFF); 1087 write3X4(par, VGA_CRTC_H_DISP, hdispend & 0xFF);
942 write3X4(CRTHSyncStart, hsyncstart & 0xFF); 1088 write3X4(par, VGA_CRTC_H_SYNC_START, hsyncstart & 0xFF);
943 write3X4(CRTHSyncEnd, (hsyncend & 0x1F) | ((hblankend & 0x20) << 2)); 1089 write3X4(par, VGA_CRTC_H_SYNC_END,
944 write3X4(CRTHBlankStart, hblankstart & 0xFF); 1090 (hsyncend & 0x1F) | ((hblankend & 0x20) << 2));
945 write3X4(CRTHBlankEnd, 0 /* (p->hblankend & 0x1F) */ ); 1091 write3X4(par, VGA_CRTC_H_BLANK_START, hblankstart & 0xFF);
1092 write3X4(par, VGA_CRTC_H_BLANK_END, hblankend & 0x1F);
946 1093
947 /* higher bits of vertical timing values */ 1094 /* higher bits of vertical timing values */
948 tmp = 0x10; 1095 tmp = 0x10;
@@ -954,39 +1101,43 @@ static int tridentfb_set_par(struct fb_info *info)
954 if (vtotal & 0x200) tmp |= 0x20; 1101 if (vtotal & 0x200) tmp |= 0x20;
955 if (vdispend & 0x200) tmp |= 0x40; 1102 if (vdispend & 0x200) tmp |= 0x40;
956 if (vsyncstart & 0x200) tmp |= 0x80; 1103 if (vsyncstart & 0x200) tmp |= 0x80;
957 write3X4(CRTOverflow, tmp); 1104 write3X4(par, VGA_CRTC_OVERFLOW, tmp);
958 1105
959 tmp = read3X4(CRTHiOrd) | 0x08; /* line compare bit 10 */ 1106 tmp = read3X4(par, CRTHiOrd) & 0x07;
1107 tmp |= 0x08; /* line compare bit 10 */
960 if (vtotal & 0x400) tmp |= 0x80; 1108 if (vtotal & 0x400) tmp |= 0x80;
961 if (vblankstart & 0x400) tmp |= 0x40; 1109 if (vblankstart & 0x400) tmp |= 0x40;
962 if (vsyncstart & 0x400) tmp |= 0x20; 1110 if (vsyncstart & 0x400) tmp |= 0x20;
963 if (vdispend & 0x400) tmp |= 0x10; 1111 if (vdispend & 0x400) tmp |= 0x10;
964 write3X4(CRTHiOrd, tmp); 1112 write3X4(par, CRTHiOrd, tmp);
965 1113
966 tmp = 0; 1114 tmp = (htotal >> 8) & 0x01;
967 if (htotal & 0x800) tmp |= 0x800 >> 11; 1115 tmp |= (hdispend >> 7) & 0x02;
968 if (hblankstart & 0x800) tmp |= 0x800 >> 7; 1116 tmp |= (hsyncstart >> 5) & 0x08;
969 write3X4(HorizOverflow, tmp); 1117 tmp |= (hblankstart >> 4) & 0x10;
1118 write3X4(par, HorizOverflow, tmp);
970 1119
971 tmp = 0x40; 1120 tmp = 0x40;
972 if (vblankstart & 0x200) tmp |= 0x20; 1121 if (vblankstart & 0x200) tmp |= 0x20;
973//FIXME if (info->var.vmode & FB_VMODE_DOUBLE) tmp |= 0x80; /* double scan for 200 line modes */ 1122//FIXME if (info->var.vmode & FB_VMODE_DOUBLE) tmp |= 0x80; /* double scan for 200 line modes */
974 write3X4(CRTMaxScanLine, tmp); 1123 write3X4(par, VGA_CRTC_MAX_SCAN, tmp);
975 1124
976 write3X4(CRTLineCompare, 0xFF); 1125 write3X4(par, VGA_CRTC_LINE_COMPARE, 0xFF);
977 write3X4(CRTPRowScan, 0); 1126 write3X4(par, VGA_CRTC_PRESET_ROW, 0);
978 write3X4(CRTModeControl, 0xC3); 1127 write3X4(par, VGA_CRTC_MODE, 0xC3);
979 1128
980 write3X4(LinearAddReg, 0x20); /* enable linear addressing */ 1129 write3X4(par, LinearAddReg, 0x20); /* enable linear addressing */
981 1130
982 tmp = (info->var.vmode & FB_VMODE_INTERLACED) ? 0x84 : 0x80; 1131 tmp = (info->var.vmode & FB_VMODE_INTERLACED) ? 0x84 : 0x80;
983 write3X4(CRTCModuleTest, tmp); /* enable access extended memory */ 1132 /* enable access extended memory */
984 1133 write3X4(par, CRTCModuleTest, tmp);
985 write3X4(GraphEngReg, 0x80); /* enable GE for text acceleration */ 1134 tmp = read3CE(par, MiscIntContReg) & ~0x4;
1135 if (info->var.vmode & FB_VMODE_INTERLACED)
1136 tmp |= 0x4;
1137 write3CE(par, MiscIntContReg, tmp);
986 1138
987#ifdef CONFIG_FB_TRIDENT_ACCEL 1139 /* enable GE for text acceleration */
988 acc->init_accel(info->var.xres, bpp); 1140 write3X4(par, GraphEngReg, 0x80);
989#endif
990 1141
991 switch (bpp) { 1142 switch (bpp) {
992 case 8: 1143 case 8:
@@ -1003,57 +1154,59 @@ static int tridentfb_set_par(struct fb_info *info)
1003 break; 1154 break;
1004 } 1155 }
1005 1156
1006 write3X4(PixelBusReg, tmp); 1157 write3X4(par, PixelBusReg, tmp);
1007 1158
1008 tmp = 0x10; 1159 tmp = read3X4(par, DRAMControl);
1009 if (chipcyber) 1160 if (!is_oldprotect(par->chip_id))
1161 tmp |= 0x10;
1162 if (iscyber(par->chip_id))
1010 tmp |= 0x20; 1163 tmp |= 0x20;
1011 write3X4(DRAMControl, tmp); /* both IO, linear enable */ 1164 write3X4(par, DRAMControl, tmp); /* both IO, linear enable */
1012 1165
1013 write3X4(InterfaceSel, read3X4(InterfaceSel) | 0x40); 1166 write3X4(par, InterfaceSel, read3X4(par, InterfaceSel) | 0x40);
1014 write3X4(Performance, 0x92); 1167 if (!is_xp(par->chip_id))
1015 write3X4(PCIReg, 0x07); /* MMIO & PCI read and write burst enable */ 1168 write3X4(par, Performance, read3X4(par, Performance) | 0x10);
1169 /* MMIO & PCI read and write burst enable */
1170 if (par->chip_id != TGUI9440 && par->chip_id != IMAGE975)
1171 write3X4(par, PCIReg, read3X4(par, PCIReg) | 0x06);
1172
1173 vga_mm_wseq(par->io_virt, 0, 3);
1174 vga_mm_wseq(par->io_virt, 1, 1); /* set char clock 8 dots wide */
1175 /* enable 4 maps because needed in chain4 mode */
1176 vga_mm_wseq(par->io_virt, 2, 0x0F);
1177 vga_mm_wseq(par->io_virt, 3, 0);
1178 vga_mm_wseq(par->io_virt, 4, 0x0E); /* memory mode enable bitmaps ?? */
1016 1179
1017 /* convert from picoseconds to kHz */ 1180 /* convert from picoseconds to kHz */
1018 vclk = PICOS2KHZ(info->var.pixclock); 1181 vclk = PICOS2KHZ(info->var.pixclock);
1019 if (bpp == 32) 1182
1183 /* divide clock by 2 if 32bpp chain4 mode display and CPU path */
1184 tmp = read3CE(par, MiscExtFunc) & 0xF0;
1185 if (bpp == 32 || (par->chip_id == TGUI9440 && bpp == 16)) {
1186 tmp |= 8;
1020 vclk *= 2; 1187 vclk *= 2;
1021 set_vclk(vclk);
1022
1023 write3C4(0, 3);
1024 write3C4(1, 1); /* set char clock 8 dots wide */
1025 write3C4(2, 0x0F); /* enable 4 maps because needed in chain4 mode */
1026 write3C4(3, 0);
1027 write3C4(4, 0x0E); /* memory mode enable bitmaps ?? */
1028
1029 write3CE(MiscExtFunc, (bpp == 32) ? 0x1A : 0x12); /* divide clock by 2 if 32bpp */
1030 /* chain4 mode display and CPU path */
1031 write3CE(0x5, 0x40); /* no CGA compat, allow 256 col */
1032 write3CE(0x6, 0x05); /* graphics mode */
1033 write3CE(0x7, 0x0F); /* planes? */
1034
1035 if (chip_id == CYBERBLADEXPAi1) {
1036 /* This fixes snow-effect in 32 bpp */
1037 write3X4(CRTHSyncStart, 0x84);
1038 } 1188 }
1189 set_vclk(par, vclk);
1190 write3CE(par, MiscExtFunc, tmp | 0x12);
1191 write3CE(par, 0x5, 0x40); /* no CGA compat, allow 256 col */
1192 write3CE(par, 0x6, 0x05); /* graphics mode */
1193 write3CE(par, 0x7, 0x0F); /* planes? */
1039 1194
1040 writeAttr(0x10, 0x41); /* graphics mode and support 256 color modes */ 1195 /* graphics mode and support 256 color modes */
1041 writeAttr(0x12, 0x0F); /* planes */ 1196 writeAttr(par, 0x10, 0x41);
1042 writeAttr(0x13, 0); /* horizontal pel panning */ 1197 writeAttr(par, 0x12, 0x0F); /* planes */
1198 writeAttr(par, 0x13, 0); /* horizontal pel panning */
1043 1199
1044 /* colors */ 1200 /* colors */
1045 for (tmp = 0; tmp < 0x10; tmp++) 1201 for (tmp = 0; tmp < 0x10; tmp++)
1046 writeAttr(tmp, tmp); 1202 writeAttr(par, tmp, tmp);
1047 readb(par->io_virt + CRT + 0x0A); /* flip-flop to index */ 1203 fb_readb(par->io_virt + VGA_IS1_RC); /* flip-flop to index */
1048 t_outb(0x20, 0x3C0); /* enable attr */ 1204 t_outb(par, 0x20, VGA_ATT_W); /* enable attr */
1049 1205
1050 switch (bpp) { 1206 switch (bpp) {
1051 case 8: 1207 case 8:
1052 tmp = 0; 1208 tmp = 0;
1053 break; 1209 break;
1054 case 15:
1055 tmp = 0x10;
1056 break;
1057 case 16: 1210 case 16:
1058 tmp = 0x30; 1211 tmp = 0x30;
1059 break; 1212 break;
@@ -1063,19 +1216,23 @@ static int tridentfb_set_par(struct fb_info *info)
1063 break; 1216 break;
1064 } 1217 }
1065 1218
1066 t_inb(0x3C8); 1219 t_inb(par, VGA_PEL_IW);
1067 t_inb(0x3C6); 1220 t_inb(par, VGA_PEL_MSK);
1068 t_inb(0x3C6); 1221 t_inb(par, VGA_PEL_MSK);
1069 t_inb(0x3C6); 1222 t_inb(par, VGA_PEL_MSK);
1070 t_inb(0x3C6); 1223 t_inb(par, VGA_PEL_MSK);
1071 t_outb(tmp, 0x3C6); 1224 t_outb(par, tmp, VGA_PEL_MSK);
1072 t_inb(0x3C8); 1225 t_inb(par, VGA_PEL_IW);
1073 1226
1074 if (flatpanel) 1227 if (par->flatpanel)
1075 set_number_of_lines(info->var.yres); 1228 set_number_of_lines(par, info->var.yres);
1076 set_lwidth(info->var.xres * bpp / (4 * 16)); 1229 info->fix.line_length = info->var.xres_virtual * bpp / 8;
1230 set_lwidth(par, info->fix.line_length / 8);
1231
1232 if (!(info->flags & FBINFO_HWACCEL_DISABLED))
1233 par->init_accel(par, info->var.xres_virtual, bpp);
1234
1077 info->fix.visual = (bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; 1235 info->fix.visual = (bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
1078 info->fix.line_length = info->var.xres * (bpp >> 3);
1079 info->cmap.len = (bpp == 8) ? 256 : 16; 1236 info->cmap.len = (bpp == 8) ? 256 : 16;
1080 debug("exit\n"); 1237 debug("exit\n");
1081 return 0; 1238 return 0;
@@ -1087,17 +1244,18 @@ static int tridentfb_setcolreg(unsigned regno, unsigned red, unsigned green,
1087 struct fb_info *info) 1244 struct fb_info *info)
1088{ 1245{
1089 int bpp = info->var.bits_per_pixel; 1246 int bpp = info->var.bits_per_pixel;
1247 struct tridentfb_par *par = info->par;
1090 1248
1091 if (regno >= info->cmap.len) 1249 if (regno >= info->cmap.len)
1092 return 1; 1250 return 1;
1093 1251
1094 if (bpp == 8) { 1252 if (bpp == 8) {
1095 t_outb(0xFF, 0x3C6); 1253 t_outb(par, 0xFF, VGA_PEL_MSK);
1096 t_outb(regno, 0x3C8); 1254 t_outb(par, regno, VGA_PEL_IW);
1097 1255
1098 t_outb(red >> 10, 0x3C9); 1256 t_outb(par, red >> 10, VGA_PEL_D);
1099 t_outb(green >> 10, 0x3C9); 1257 t_outb(par, green >> 10, VGA_PEL_D);
1100 t_outb(blue >> 10, 0x3C9); 1258 t_outb(par, blue >> 10, VGA_PEL_D);
1101 1259
1102 } else if (regno < 16) { 1260 } else if (regno < 16) {
1103 if (bpp == 16) { /* RGB 565 */ 1261 if (bpp == 16) { /* RGB 565 */
@@ -1108,28 +1266,28 @@ static int tridentfb_setcolreg(unsigned regno, unsigned red, unsigned green,
1108 col |= col << 16; 1266 col |= col << 16;
1109 ((u32 *)(info->pseudo_palette))[regno] = col; 1267 ((u32 *)(info->pseudo_palette))[regno] = col;
1110 } else if (bpp == 32) /* ARGB 8888 */ 1268 } else if (bpp == 32) /* ARGB 8888 */
1111 ((u32*)info->pseudo_palette)[regno] = 1269 ((u32 *)info->pseudo_palette)[regno] =
1112 ((transp & 0xFF00) << 16) | 1270 ((transp & 0xFF00) << 16) |
1113 ((red & 0xFF00) << 8) | 1271 ((red & 0xFF00) << 8) |
1114 ((green & 0xFF00)) | 1272 ((green & 0xFF00)) |
1115 ((blue & 0xFF00) >> 8); 1273 ((blue & 0xFF00) >> 8);
1116 } 1274 }
1117 1275
1118/* debug("exit\n"); */
1119 return 0; 1276 return 0;
1120} 1277}
1121 1278
1122/* Try blanking the screen.For flat panels it does nothing */ 1279/* Try blanking the screen. For flat panels it does nothing */
1123static int tridentfb_blank(int blank_mode, struct fb_info *info) 1280static int tridentfb_blank(int blank_mode, struct fb_info *info)
1124{ 1281{
1125 unsigned char PMCont, DPMSCont; 1282 unsigned char PMCont, DPMSCont;
1283 struct tridentfb_par *par = info->par;
1126 1284
1127 debug("enter\n"); 1285 debug("enter\n");
1128 if (flatpanel) 1286 if (par->flatpanel)
1129 return 0; 1287 return 0;
1130 t_outb(0x04, 0x83C8); /* Read DPMS Control */ 1288 t_outb(par, 0x04, 0x83C8); /* Read DPMS Control */
1131 PMCont = t_inb(0x83C6) & 0xFC; 1289 PMCont = t_inb(par, 0x83C6) & 0xFC;
1132 DPMSCont = read3CE(PowerStatus) & 0xFC; 1290 DPMSCont = read3CE(par, PowerStatus) & 0xFC;
1133 switch (blank_mode) { 1291 switch (blank_mode) {
1134 case FB_BLANK_UNBLANK: 1292 case FB_BLANK_UNBLANK:
1135 /* Screen: On, HSync: On, VSync: On */ 1293 /* Screen: On, HSync: On, VSync: On */
@@ -1155,9 +1313,9 @@ static int tridentfb_blank(int blank_mode, struct fb_info *info)
1155 break; 1313 break;
1156 } 1314 }
1157 1315
1158 write3CE(PowerStatus, DPMSCont); 1316 write3CE(par, PowerStatus, DPMSCont);
1159 t_outb(4, 0x83C8); 1317 t_outb(par, 4, 0x83C8);
1160 t_outb(PMCont, 0x83C6); 1318 t_outb(par, PMCont, 0x83C6);
1161 1319
1162 debug("exit\n"); 1320 debug("exit\n");
1163 1321
@@ -1174,33 +1332,46 @@ static struct fb_ops tridentfb_ops = {
1174 .fb_set_par = tridentfb_set_par, 1332 .fb_set_par = tridentfb_set_par,
1175 .fb_fillrect = tridentfb_fillrect, 1333 .fb_fillrect = tridentfb_fillrect,
1176 .fb_copyarea = tridentfb_copyarea, 1334 .fb_copyarea = tridentfb_copyarea,
1177 .fb_imageblit = cfb_imageblit, 1335 .fb_imageblit = tridentfb_imageblit,
1336#ifdef CONFIG_FB_TRIDENT_ACCEL
1337 .fb_sync = tridentfb_sync,
1338#endif
1178}; 1339};
1179 1340
1180static int __devinit trident_pci_probe(struct pci_dev * dev, 1341static int __devinit trident_pci_probe(struct pci_dev *dev,
1181 const struct pci_device_id * id) 1342 const struct pci_device_id *id)
1182{ 1343{
1183 int err; 1344 int err;
1184 unsigned char revision; 1345 unsigned char revision;
1346 struct fb_info *info;
1347 struct tridentfb_par *default_par;
1348 int chip3D;
1349 int chip_id;
1185 1350
1186 err = pci_enable_device(dev); 1351 err = pci_enable_device(dev);
1187 if (err) 1352 if (err)
1188 return err; 1353 return err;
1189 1354
1190 chip_id = id->device; 1355 info = framebuffer_alloc(sizeof(struct tridentfb_par), &dev->dev);
1356 if (!info)
1357 return -ENOMEM;
1358 default_par = info->par;
1191 1359
1192 if (chip_id == CYBERBLADEi1) 1360 chip_id = id->device;
1193 output("*** Please do use cyblafb, Cyberblade/i1 support "
1194 "will soon be removed from tridentfb!\n");
1195 1361
1362#ifndef CONFIG_FB_TRIDENT_ACCEL
1363 noaccel = 1;
1364#endif
1196 1365
1197 /* If PCI id is 0x9660 then further detect chip type */ 1366 /* If PCI id is 0x9660 then further detect chip type */
1198 1367
1199 if (chip_id == TGUI9660) { 1368 if (chip_id == TGUI9660) {
1200 outb(RevisionID, 0x3C4); 1369 revision = vga_io_rseq(RevisionID);
1201 revision = inb(0x3C5);
1202 1370
1203 switch (revision) { 1371 switch (revision) {
1372 case 0x21:
1373 chip_id = PROVIDIA9685;
1374 break;
1204 case 0x22: 1375 case 0x22:
1205 case 0x23: 1376 case 0x23:
1206 chip_id = CYBER9397; 1377 chip_id = CYBER9397;
@@ -1229,123 +1400,170 @@ static int __devinit trident_pci_probe(struct pci_dev * dev,
1229 } 1400 }
1230 1401
1231 chip3D = is3Dchip(chip_id); 1402 chip3D = is3Dchip(chip_id);
1232 chipcyber = iscyber(chip_id);
1233 1403
1234 if (is_xp(chip_id)) { 1404 if (is_xp(chip_id)) {
1235 acc = &accel_xp; 1405 default_par->init_accel = xp_init_accel;
1406 default_par->wait_engine = xp_wait_engine;
1407 default_par->fill_rect = xp_fill_rect;
1408 default_par->copy_rect = xp_copy_rect;
1409 tridentfb_fix.accel = FB_ACCEL_TRIDENT_BLADEXP;
1236 } else if (is_blade(chip_id)) { 1410 } else if (is_blade(chip_id)) {
1237 acc = &accel_blade; 1411 default_par->init_accel = blade_init_accel;
1238 } else { 1412 default_par->wait_engine = blade_wait_engine;
1239 acc = &accel_image; 1413 default_par->fill_rect = blade_fill_rect;
1414 default_par->copy_rect = blade_copy_rect;
1415 default_par->image_blit = blade_image_blit;
1416 tridentfb_fix.accel = FB_ACCEL_TRIDENT_BLADE3D;
1417 } else if (chip3D) { /* 3DImage family left */
1418 default_par->init_accel = image_init_accel;
1419 default_par->wait_engine = image_wait_engine;
1420 default_par->fill_rect = image_fill_rect;
1421 default_par->copy_rect = image_copy_rect;
1422 tridentfb_fix.accel = FB_ACCEL_TRIDENT_3DIMAGE;
1423 } else { /* TGUI 9440/96XX family */
1424 default_par->init_accel = tgui_init_accel;
1425 default_par->wait_engine = xp_wait_engine;
1426 default_par->fill_rect = tgui_fill_rect;
1427 default_par->copy_rect = tgui_copy_rect;
1428 tridentfb_fix.accel = FB_ACCEL_TRIDENT_TGUI;
1240 } 1429 }
1241 1430
1242 /* acceleration is on by default for 3D chips */ 1431 default_par->chip_id = chip_id;
1243 defaultaccel = chip3D && !noaccel;
1244
1245 fb_info.par = &default_par;
1246 1432
1247 /* setup MMIO region */ 1433 /* setup MMIO region */
1248 tridentfb_fix.mmio_start = pci_resource_start(dev, 1); 1434 tridentfb_fix.mmio_start = pci_resource_start(dev, 1);
1249 tridentfb_fix.mmio_len = chip3D ? 0x20000 : 0x10000; 1435 tridentfb_fix.mmio_len = pci_resource_len(dev, 1);
1250 1436
1251 if (!request_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len, "tridentfb")) { 1437 if (!request_mem_region(tridentfb_fix.mmio_start,
1438 tridentfb_fix.mmio_len, "tridentfb")) {
1252 debug("request_region failed!\n"); 1439 debug("request_region failed!\n");
1440 framebuffer_release(info);
1253 return -1; 1441 return -1;
1254 } 1442 }
1255 1443
1256 default_par.io_virt = ioremap_nocache(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); 1444 default_par->io_virt = ioremap_nocache(tridentfb_fix.mmio_start,
1445 tridentfb_fix.mmio_len);
1257 1446
1258 if (!default_par.io_virt) { 1447 if (!default_par->io_virt) {
1259 debug("ioremap failed\n"); 1448 debug("ioremap failed\n");
1260 err = -1; 1449 err = -1;
1261 goto out_unmap1; 1450 goto out_unmap1;
1262 } 1451 }
1263 1452
1264 enable_mmio(); 1453 enable_mmio(default_par);
1265 1454
1266 /* setup framebuffer memory */ 1455 /* setup framebuffer memory */
1267 tridentfb_fix.smem_start = pci_resource_start(dev, 0); 1456 tridentfb_fix.smem_start = pci_resource_start(dev, 0);
1268 tridentfb_fix.smem_len = get_memsize(); 1457 tridentfb_fix.smem_len = get_memsize(default_par);
1269 1458
1270 if (!request_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len, "tridentfb")) { 1459 if (!request_mem_region(tridentfb_fix.smem_start,
1460 tridentfb_fix.smem_len, "tridentfb")) {
1271 debug("request_mem_region failed!\n"); 1461 debug("request_mem_region failed!\n");
1272 disable_mmio(); 1462 disable_mmio(info->par);
1273 err = -1; 1463 err = -1;
1274 goto out_unmap1; 1464 goto out_unmap1;
1275 } 1465 }
1276 1466
1277 fb_info.screen_base = ioremap_nocache(tridentfb_fix.smem_start, 1467 info->screen_base = ioremap_nocache(tridentfb_fix.smem_start,
1278 tridentfb_fix.smem_len); 1468 tridentfb_fix.smem_len);
1279 1469
1280 if (!fb_info.screen_base) { 1470 if (!info->screen_base) {
1281 debug("ioremap failed\n"); 1471 debug("ioremap failed\n");
1282 err = -1; 1472 err = -1;
1283 goto out_unmap2; 1473 goto out_unmap2;
1284 } 1474 }
1285 1475
1286 output("%s board found\n", pci_name(dev)); 1476 default_par->flatpanel = is_flatpanel(default_par);
1287 displaytype = get_displaytype();
1288 1477
1289 if (flatpanel) 1478 if (default_par->flatpanel)
1290 nativex = get_nativex(); 1479 nativex = get_nativex(default_par);
1291 1480
1292 fb_info.fix = tridentfb_fix; 1481 info->fix = tridentfb_fix;
1293 fb_info.fbops = &tridentfb_ops; 1482 info->fbops = &tridentfb_ops;
1483 info->pseudo_palette = default_par->pseudo_pal;
1294 1484
1485 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
1486 if (!noaccel && default_par->init_accel) {
1487 info->flags &= ~FBINFO_HWACCEL_DISABLED;
1488 info->flags |= FBINFO_HWACCEL_COPYAREA;
1489 info->flags |= FBINFO_HWACCEL_FILLRECT;
1490 } else
1491 info->flags |= FBINFO_HWACCEL_DISABLED;
1295 1492
1296 fb_info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; 1493 info->pixmap.addr = kmalloc(4096, GFP_KERNEL);
1297#ifdef CONFIG_FB_TRIDENT_ACCEL 1494 if (!info->pixmap.addr) {
1298 fb_info.flags |= FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; 1495 err = -ENOMEM;
1299#endif 1496 goto out_unmap2;
1300 fb_info.pseudo_palette = pseudo_pal; 1497 }
1498
1499 info->pixmap.size = 4096;
1500 info->pixmap.buf_align = 4;
1501 info->pixmap.scan_align = 1;
1502 info->pixmap.access_align = 32;
1503 info->pixmap.flags = FB_PIXMAP_SYSTEM;
1301 1504
1302 if (!fb_find_mode(&default_var, &fb_info, 1505 if (default_par->image_blit) {
1506 info->flags |= FBINFO_HWACCEL_IMAGEBLIT;
1507 info->pixmap.scan_align = 4;
1508 }
1509
1510 if (noaccel) {
1511 printk(KERN_DEBUG "disabling acceleration\n");
1512 info->flags |= FBINFO_HWACCEL_DISABLED;
1513 info->pixmap.scan_align = 1;
1514 }
1515
1516 if (!fb_find_mode(&info->var, info,
1303 mode_option, NULL, 0, NULL, bpp)) { 1517 mode_option, NULL, 0, NULL, bpp)) {
1304 err = -EINVAL; 1518 err = -EINVAL;
1305 goto out_unmap2; 1519 goto out_unmap2;
1306 } 1520 }
1307 err = fb_alloc_cmap(&fb_info.cmap, 256, 0); 1521 err = fb_alloc_cmap(&info->cmap, 256, 0);
1308 if (err < 0) 1522 if (err < 0)
1309 goto out_unmap2; 1523 goto out_unmap2;
1310 1524
1311 if (defaultaccel && acc) 1525 info->var.activate |= FB_ACTIVATE_NOW;
1312 default_var.accel_flags |= FB_ACCELF_TEXT; 1526 info->device = &dev->dev;
1313 else 1527 if (register_framebuffer(info) < 0) {
1314 default_var.accel_flags &= ~FB_ACCELF_TEXT; 1528 printk(KERN_ERR "tridentfb: could not register framebuffer\n");
1315 default_var.activate |= FB_ACTIVATE_NOW; 1529 fb_dealloc_cmap(&info->cmap);
1316 fb_info.var = default_var;
1317 fb_info.device = &dev->dev;
1318 if (register_framebuffer(&fb_info) < 0) {
1319 printk(KERN_ERR "tridentfb: could not register Trident framebuffer\n");
1320 fb_dealloc_cmap(&fb_info.cmap);
1321 err = -EINVAL; 1530 err = -EINVAL;
1322 goto out_unmap2; 1531 goto out_unmap2;
1323 } 1532 }
1324 output("fb%d: %s frame buffer device %dx%d-%dbpp\n", 1533 output("fb%d: %s frame buffer device %dx%d-%dbpp\n",
1325 fb_info.node, fb_info.fix.id, default_var.xres, 1534 info->node, info->fix.id, info->var.xres,
1326 default_var.yres, default_var.bits_per_pixel); 1535 info->var.yres, info->var.bits_per_pixel);
1536
1537 pci_set_drvdata(dev, info);
1327 return 0; 1538 return 0;
1328 1539
1329out_unmap2: 1540out_unmap2:
1330 if (fb_info.screen_base) 1541 kfree(info->pixmap.addr);
1331 iounmap(fb_info.screen_base); 1542 if (info->screen_base)
1543 iounmap(info->screen_base);
1332 release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); 1544 release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
1333 disable_mmio(); 1545 disable_mmio(info->par);
1334out_unmap1: 1546out_unmap1:
1335 if (default_par.io_virt) 1547 if (default_par->io_virt)
1336 iounmap(default_par.io_virt); 1548 iounmap(default_par->io_virt);
1337 release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); 1549 release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
1550 framebuffer_release(info);
1338 return err; 1551 return err;
1339} 1552}
1340 1553
1341static void __devexit trident_pci_remove(struct pci_dev *dev) 1554static void __devexit trident_pci_remove(struct pci_dev *dev)
1342{ 1555{
1343 struct tridentfb_par *par = (struct tridentfb_par*)fb_info.par; 1556 struct fb_info *info = pci_get_drvdata(dev);
1344 unregister_framebuffer(&fb_info); 1557 struct tridentfb_par *par = info->par;
1558
1559 unregister_framebuffer(info);
1345 iounmap(par->io_virt); 1560 iounmap(par->io_virt);
1346 iounmap(fb_info.screen_base); 1561 iounmap(info->screen_base);
1347 release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); 1562 release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
1348 release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); 1563 release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
1564 pci_set_drvdata(dev, NULL);
1565 kfree(info->pixmap.addr);
1566 framebuffer_release(info);
1349} 1567}
1350 1568
1351/* List of boards that we are trying to support */ 1569/* List of boards that we are trying to support */
@@ -1358,6 +1576,7 @@ static struct pci_device_id trident_devices[] = {
1358 {PCI_VENDOR_ID_TRIDENT, CYBERBLADEAi1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1576 {PCI_VENDOR_ID_TRIDENT, CYBERBLADEAi1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1359 {PCI_VENDOR_ID_TRIDENT, CYBERBLADEAi1D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1577 {PCI_VENDOR_ID_TRIDENT, CYBERBLADEAi1D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1360 {PCI_VENDOR_ID_TRIDENT, CYBERBLADEE4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1578 {PCI_VENDOR_ID_TRIDENT, CYBERBLADEE4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1579 {PCI_VENDOR_ID_TRIDENT, TGUI9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1361 {PCI_VENDOR_ID_TRIDENT, TGUI9660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1580 {PCI_VENDOR_ID_TRIDENT, TGUI9660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1362 {PCI_VENDOR_ID_TRIDENT, IMAGE975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1581 {PCI_VENDOR_ID_TRIDENT, IMAGE975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1363 {PCI_VENDOR_ID_TRIDENT, IMAGE985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1582 {PCI_VENDOR_ID_TRIDENT, IMAGE985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -1399,9 +1618,9 @@ static int __init tridentfb_setup(char *options)
1399 if (!strncmp(opt, "noaccel", 7)) 1618 if (!strncmp(opt, "noaccel", 7))
1400 noaccel = 1; 1619 noaccel = 1;
1401 else if (!strncmp(opt, "fp", 2)) 1620 else if (!strncmp(opt, "fp", 2))
1402 displaytype = DISPLAY_FP; 1621 fp = 1;
1403 else if (!strncmp(opt, "crt", 3)) 1622 else if (!strncmp(opt, "crt", 3))
1404 displaytype = DISPLAY_CRT; 1623 fp = 0;
1405 else if (!strncmp(opt, "bpp=", 4)) 1624 else if (!strncmp(opt, "bpp=", 4))
1406 bpp = simple_strtoul(opt + 4, NULL, 0); 1625 bpp = simple_strtoul(opt + 4, NULL, 0);
1407 else if (!strncmp(opt, "center", 6)) 1626 else if (!strncmp(opt, "center", 6))
@@ -1430,7 +1649,6 @@ static int __init tridentfb_init(void)
1430 return -ENODEV; 1649 return -ENODEV;
1431 tridentfb_setup(option); 1650 tridentfb_setup(option);
1432#endif 1651#endif
1433 output("Trident framebuffer %s initializing\n", VERSION);
1434 return pci_register_driver(&tridentfb_pci_driver); 1652 return pci_register_driver(&tridentfb_pci_driver);
1435} 1653}
1436 1654
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index cdbb56edb6cb..50744229c7a9 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -2054,8 +2054,8 @@ MODULE_PARM_DESC(maxhf,
2054module_param(maxvf, ushort, 0); 2054module_param(maxvf, ushort, 0);
2055MODULE_PARM_DESC(maxvf, 2055MODULE_PARM_DESC(maxvf,
2056 "Maximum vertical frequency [Hz], overrides EDID data"); 2056 "Maximum vertical frequency [Hz], overrides EDID data");
2057module_param_named(mode, mode_option, charp, 0); 2057module_param(mode_option, charp, 0);
2058MODULE_PARM_DESC(mode, 2058MODULE_PARM_DESC(mode_option,
2059 "Specify initial video mode as \"<xres>x<yres>[-<bpp>][@<refresh>]\""); 2059 "Specify initial video mode as \"<xres>x<yres>[-<bpp>][@<refresh>]\"");
2060module_param(vbemode, ushort, 0); 2060module_param(vbemode, ushort, 0);
2061MODULE_PARM_DESC(vbemode, 2061MODULE_PARM_DESC(vbemode,
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index 072638a9528a..93fe08d6c78f 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -443,19 +443,29 @@ static int vfb_mmap(struct fb_info *info,
443} 443}
444 444
445#ifndef MODULE 445#ifndef MODULE
446/*
447 * The virtual framebuffer driver is only enabled if explicitly
448 * requested by passing 'video=vfb:' (or any actual options).
449 */
446static int __init vfb_setup(char *options) 450static int __init vfb_setup(char *options)
447{ 451{
448 char *this_opt; 452 char *this_opt;
449 453
454 vfb_enable = 0;
455
456 if (!options)
457 return 1;
458
450 vfb_enable = 1; 459 vfb_enable = 1;
451 460
452 if (!options || !*options) 461 if (!*options)
453 return 1; 462 return 1;
454 463
455 while ((this_opt = strsep(&options, ",")) != NULL) { 464 while ((this_opt = strsep(&options, ",")) != NULL) {
456 if (!*this_opt) 465 if (!*this_opt)
457 continue; 466 continue;
458 if (!strncmp(this_opt, "disable", 7)) 467 /* Test disable for backwards compatibility */
468 if (!strcmp(this_opt, "disable"))
459 vfb_enable = 0; 469 vfb_enable = 0;
460 } 470 }
461 return 1; 471 return 1;
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 9b3c5923365e..e31bca8a0cb2 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -26,18 +26,6 @@
26#include <asm/io.h> 26#include <asm/io.h>
27#include <video/vga.h> 27#include <video/vga.h>
28 28
29#define GRAPHICS_ADDR_REG VGA_GFX_I /* Graphics address register. */
30#define GRAPHICS_DATA_REG VGA_GFX_D /* Graphics data register. */
31
32#define SET_RESET_INDEX VGA_GFX_SR_VALUE /* Set/Reset Register index. */
33#define ENABLE_SET_RESET_INDEX VGA_GFX_SR_ENABLE /* Enable Set/Reset Register index. */
34#define DATA_ROTATE_INDEX VGA_GFX_DATA_ROTATE /* Data Rotate Register index. */
35#define GRAPHICS_MODE_INDEX VGA_GFX_MODE /* Graphics Mode Register index. */
36#define BIT_MASK_INDEX VGA_GFX_BIT_MASK /* Bit Mask Register index. */
37
38#define dac_reg (VGA_PEL_IW)
39#define dac_val (VGA_PEL_D)
40
41#define VGA_FB_PHYS 0xA0000 29#define VGA_FB_PHYS 0xA0000
42#define VGA_FB_PHYS_LEN 65536 30#define VGA_FB_PHYS_LEN 65536
43 31
@@ -108,7 +96,7 @@ static struct fb_fix_screeninfo vga16fb_fix __initdata = {
108 .visual = FB_VISUAL_PSEUDOCOLOR, 96 .visual = FB_VISUAL_PSEUDOCOLOR,
109 .xpanstep = 8, 97 .xpanstep = 8,
110 .ypanstep = 1, 98 .ypanstep = 1,
111 .line_length = 640/8, 99 .line_length = 640 / 8,
112 .accel = FB_ACCEL_NONE 100 .accel = FB_ACCEL_NONE
113}; 101};
114 102
@@ -135,23 +123,22 @@ static inline int setmode(int mode)
135{ 123{
136 int oldmode; 124 int oldmode;
137 125
138 vga_io_w(GRAPHICS_ADDR_REG, GRAPHICS_MODE_INDEX); 126 oldmode = vga_io_rgfx(VGA_GFX_MODE);
139 oldmode = vga_io_r(GRAPHICS_DATA_REG); 127 vga_io_w(VGA_GFX_D, mode);
140 vga_io_w(GRAPHICS_DATA_REG, mode);
141 return oldmode; 128 return oldmode;
142} 129}
143 130
144/* Select the Bit Mask Register and return its value. */ 131/* Select the Bit Mask Register and return its value. */
145static inline int selectmask(void) 132static inline int selectmask(void)
146{ 133{
147 return vga_io_rgfx(BIT_MASK_INDEX); 134 return vga_io_rgfx(VGA_GFX_BIT_MASK);
148} 135}
149 136
150/* Set the value of the Bit Mask Register. It must already have been 137/* Set the value of the Bit Mask Register. It must already have been
151 selected with selectmask(). */ 138 selected with selectmask(). */
152static inline void setmask(int mask) 139static inline void setmask(int mask)
153{ 140{
154 vga_io_w(GRAPHICS_DATA_REG, mask); 141 vga_io_w(VGA_GFX_D, mask);
155} 142}
156 143
157/* Set the Data Rotate Register and return its old value. 144/* Set the Data Rotate Register and return its old value.
@@ -161,9 +148,8 @@ static inline int setop(int op)
161{ 148{
162 int oldop; 149 int oldop;
163 150
164 vga_io_w(GRAPHICS_ADDR_REG, DATA_ROTATE_INDEX); 151 oldop = vga_io_rgfx(VGA_GFX_DATA_ROTATE);
165 oldop = vga_io_r(GRAPHICS_DATA_REG); 152 vga_io_w(VGA_GFX_D, op);
166 vga_io_w(GRAPHICS_DATA_REG, op);
167 return oldop; 153 return oldop;
168} 154}
169 155
@@ -173,9 +159,8 @@ static inline int setsr(int sr)
173{ 159{
174 int oldsr; 160 int oldsr;
175 161
176 vga_io_w(GRAPHICS_ADDR_REG, ENABLE_SET_RESET_INDEX); 162 oldsr = vga_io_rgfx(VGA_GFX_SR_ENABLE);
177 oldsr = vga_io_r(GRAPHICS_DATA_REG); 163 vga_io_w(VGA_GFX_D, sr);
178 vga_io_w(GRAPHICS_DATA_REG, sr);
179 return oldsr; 164 return oldsr;
180} 165}
181 166
@@ -184,22 +169,21 @@ static inline int setcolor(int color)
184{ 169{
185 int oldcolor; 170 int oldcolor;
186 171
187 vga_io_w(GRAPHICS_ADDR_REG, SET_RESET_INDEX); 172 oldcolor = vga_io_rgfx(VGA_GFX_SR_VALUE);
188 oldcolor = vga_io_r(GRAPHICS_DATA_REG); 173 vga_io_w(VGA_GFX_D, color);
189 vga_io_w(GRAPHICS_DATA_REG, color);
190 return oldcolor; 174 return oldcolor;
191} 175}
192 176
193/* Return the value in the Graphics Address Register. */ 177/* Return the value in the Graphics Address Register. */
194static inline int getindex(void) 178static inline int getindex(void)
195{ 179{
196 return vga_io_r(GRAPHICS_ADDR_REG); 180 return vga_io_r(VGA_GFX_I);
197} 181}
198 182
199/* Set the value in the Graphics Address Register. */ 183/* Set the value in the Graphics Address Register. */
200static inline void setindex(int index) 184static inline void setindex(int index)
201{ 185{
202 vga_io_w(GRAPHICS_ADDR_REG, index); 186 vga_io_w(VGA_GFX_I, index);
203} 187}
204 188
205static void vga16fb_pan_var(struct fb_info *info, 189static void vga16fb_pan_var(struct fb_info *info,
@@ -672,10 +656,10 @@ static void ega16_setpalette(int regno, unsigned red, unsigned green, unsigned b
672 656
673static void vga16_setpalette(int regno, unsigned red, unsigned green, unsigned blue) 657static void vga16_setpalette(int regno, unsigned red, unsigned green, unsigned blue)
674{ 658{
675 outb(regno, dac_reg); 659 outb(regno, VGA_PEL_IW);
676 outb(red >> 10, dac_val); 660 outb(red >> 10, VGA_PEL_D);
677 outb(green >> 10, dac_val); 661 outb(green >> 10, VGA_PEL_D);
678 outb(blue >> 10, dac_val); 662 outb(blue >> 10, VGA_PEL_D);
679} 663}
680 664
681static int vga16fb_setcolreg(unsigned regno, unsigned red, unsigned green, 665static int vga16fb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -719,28 +703,15 @@ static int vga16fb_pan_display(struct fb_var_screeninfo *var,
719 blanking code was originally by Huang shi chao, and modified by 703 blanking code was originally by Huang shi chao, and modified by
720 Christoph Rimek (chrimek@toppoint.de) and todd j. derr 704 Christoph Rimek (chrimek@toppoint.de) and todd j. derr
721 (tjd@barefoot.org) for Linux. */ 705 (tjd@barefoot.org) for Linux. */
722#define attrib_port VGA_ATC_IW
723#define seq_port_reg VGA_SEQ_I
724#define seq_port_val VGA_SEQ_D
725#define gr_port_reg VGA_GFX_I
726#define gr_port_val VGA_GFX_D
727#define video_misc_rd VGA_MIS_R
728#define video_misc_wr VGA_MIS_W
729#define vga_video_port_reg VGA_CRT_IC
730#define vga_video_port_val VGA_CRT_DC
731 706
732static void vga_vesa_blank(struct vga16fb_par *par, int mode) 707static void vga_vesa_blank(struct vga16fb_par *par, int mode)
733{ 708{
734 unsigned char SeqCtrlIndex; 709 unsigned char SeqCtrlIndex = vga_io_r(VGA_SEQ_I);
735 unsigned char CrtCtrlIndex; 710 unsigned char CrtCtrlIndex = vga_io_r(VGA_CRT_IC);
736 711
737 //cli();
738 SeqCtrlIndex = vga_io_r(seq_port_reg);
739 CrtCtrlIndex = vga_io_r(vga_video_port_reg);
740
741 /* save original values of VGA controller registers */ 712 /* save original values of VGA controller registers */
742 if(!par->vesa_blanked) { 713 if(!par->vesa_blanked) {
743 par->vga_state.CrtMiscIO = vga_io_r(video_misc_rd); 714 par->vga_state.CrtMiscIO = vga_io_r(VGA_MIS_R);
744 //sti(); 715 //sti();
745 716
746 par->vga_state.HorizontalTotal = vga_io_rcrt(0x00); /* HorizontalTotal */ 717 par->vga_state.HorizontalTotal = vga_io_rcrt(0x00); /* HorizontalTotal */
@@ -756,12 +727,11 @@ static void vga_vesa_blank(struct vga16fb_par *par, int mode)
756 727
757 /* assure that video is enabled */ 728 /* assure that video is enabled */
758 /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */ 729 /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */
759 //cli();
760 vga_io_wseq(0x01, par->vga_state.ClockingMode | 0x20); 730 vga_io_wseq(0x01, par->vga_state.ClockingMode | 0x20);
761 731
762 /* test for vertical retrace in process.... */ 732 /* test for vertical retrace in process.... */
763 if ((par->vga_state.CrtMiscIO & 0x80) == 0x80) 733 if ((par->vga_state.CrtMiscIO & 0x80) == 0x80)
764 vga_io_w(video_misc_wr, par->vga_state.CrtMiscIO & 0xef); 734 vga_io_w(VGA_MIS_W, par->vga_state.CrtMiscIO & 0xef);
765 735
766 /* 736 /*
767 * Set <End of vertical retrace> to minimum (0) and 737 * Set <End of vertical retrace> to minimum (0) and
@@ -769,12 +739,10 @@ static void vga_vesa_blank(struct vga16fb_par *par, int mode)
769 * Result: turn off vertical sync (VSync) pulse. 739 * Result: turn off vertical sync (VSync) pulse.
770 */ 740 */
771 if (mode & FB_BLANK_VSYNC_SUSPEND) { 741 if (mode & FB_BLANK_VSYNC_SUSPEND) {
772 outb_p(0x10,vga_video_port_reg); /* StartVertRetrace */ 742 vga_io_wcrt(VGA_CRTC_V_SYNC_START, 0xff);
773 outb_p(0xff,vga_video_port_val); /* maximum value */ 743 vga_io_wcrt(VGA_CRTC_V_SYNC_END, 0x40);
774 outb_p(0x11,vga_video_port_reg); /* EndVertRetrace */ 744 /* bits 9,10 of vert. retrace */
775 outb_p(0x40,vga_video_port_val); /* minimum (bits 0..3) */ 745 vga_io_wcrt(VGA_CRTC_OVERFLOW, par->vga_state.Overflow | 0x84);
776 outb_p(0x07,vga_video_port_reg); /* Overflow */
777 outb_p(par->vga_state.Overflow | 0x84,vga_video_port_val); /* bits 9,10 of vert. retrace */
778 } 746 }
779 747
780 if (mode & FB_BLANK_HSYNC_SUSPEND) { 748 if (mode & FB_BLANK_HSYNC_SUSPEND) {
@@ -783,29 +751,22 @@ static void vga_vesa_blank(struct vga16fb_par *par, int mode)
783 * <Start of horizontal Retrace> to maximum 751 * <Start of horizontal Retrace> to maximum
784 * Result: turn off horizontal sync (HSync) pulse. 752 * Result: turn off horizontal sync (HSync) pulse.
785 */ 753 */
786 outb_p(0x04,vga_video_port_reg); /* StartHorizRetrace */ 754 vga_io_wcrt(VGA_CRTC_H_SYNC_START, 0xff);
787 outb_p(0xff,vga_video_port_val); /* maximum */ 755 vga_io_wcrt(VGA_CRTC_H_SYNC_END, 0x00);
788 outb_p(0x05,vga_video_port_reg); /* EndHorizRetrace */
789 outb_p(0x00,vga_video_port_val); /* minimum (0) */
790 } 756 }
791 757
792 /* restore both index registers */ 758 /* restore both index registers */
793 outb_p(SeqCtrlIndex,seq_port_reg); 759 outb_p(SeqCtrlIndex, VGA_SEQ_I);
794 outb_p(CrtCtrlIndex,vga_video_port_reg); 760 outb_p(CrtCtrlIndex, VGA_CRT_IC);
795 //sti();
796} 761}
797 762
798static void vga_vesa_unblank(struct vga16fb_par *par) 763static void vga_vesa_unblank(struct vga16fb_par *par)
799{ 764{
800 unsigned char SeqCtrlIndex; 765 unsigned char SeqCtrlIndex = vga_io_r(VGA_SEQ_I);
801 unsigned char CrtCtrlIndex; 766 unsigned char CrtCtrlIndex = vga_io_r(VGA_CRT_IC);
802 767
803 //cli();
804 SeqCtrlIndex = vga_io_r(seq_port_reg);
805 CrtCtrlIndex = vga_io_r(vga_video_port_reg);
806
807 /* restore original values of VGA controller registers */ 768 /* restore original values of VGA controller registers */
808 vga_io_w(video_misc_wr, par->vga_state.CrtMiscIO); 769 vga_io_w(VGA_MIS_W, par->vga_state.CrtMiscIO);
809 770
810 /* HorizontalTotal */ 771 /* HorizontalTotal */
811 vga_io_wcrt(0x00, par->vga_state.HorizontalTotal); 772 vga_io_wcrt(0x00, par->vga_state.HorizontalTotal);
@@ -827,9 +788,8 @@ static void vga_vesa_unblank(struct vga16fb_par *par)
827 vga_io_wseq(0x01, par->vga_state.ClockingMode); 788 vga_io_wseq(0x01, par->vga_state.ClockingMode);
828 789
829 /* restore index/control registers */ 790 /* restore index/control registers */
830 vga_io_w(seq_port_reg, SeqCtrlIndex); 791 vga_io_w(VGA_SEQ_I, SeqCtrlIndex);
831 vga_io_w(vga_video_port_reg, CrtCtrlIndex); 792 vga_io_w(VGA_CRT_IC, CrtCtrlIndex);
832 //sti();
833} 793}
834 794
835static void vga_pal_blank(void) 795static void vga_pal_blank(void)
@@ -837,10 +797,10 @@ static void vga_pal_blank(void)
837 int i; 797 int i;
838 798
839 for (i=0; i<16; i++) { 799 for (i=0; i<16; i++) {
840 outb_p (i, dac_reg) ; 800 outb_p(i, VGA_PEL_IW);
841 outb_p (0, dac_val) ; 801 outb_p(0, VGA_PEL_D);
842 outb_p (0, dac_val) ; 802 outb_p(0, VGA_PEL_D);
843 outb_p (0, dac_val) ; 803 outb_p(0, VGA_PEL_D);
844 } 804 }
845} 805}
846 806
@@ -1087,12 +1047,15 @@ static void vga16fb_copyarea(struct fb_info *info, const struct fb_copyarea *are
1087 width = x2 - dx; 1047 width = x2 - dx;
1088 height = y2 - dy; 1048 height = y2 - dy;
1089 1049
1050 if (sx + dx < old_dx || sy + dy < old_dy)
1051 return;
1052
1090 /* update sx1,sy1 */ 1053 /* update sx1,sy1 */
1091 sx += (dx - old_dx); 1054 sx += (dx - old_dx);
1092 sy += (dy - old_dy); 1055 sy += (dy - old_dy);
1093 1056
1094 /* the source must be completely inside the virtual screen */ 1057 /* the source must be completely inside the virtual screen */
1095 if (sx < 0 || sy < 0 || (sx + width) > vxres || (sy + height) > vyres) 1058 if (sx + width > vxres || sy + height > vyres)
1096 return; 1059 return;
1097 1060
1098 switch (info->fix.type) { 1061 switch (info->fix.type) {
@@ -1482,6 +1445,7 @@ static void __exit vga16fb_exit(void)
1482 platform_driver_unregister(&vga16fb_driver); 1445 platform_driver_unregister(&vga16fb_driver);
1483} 1446}
1484 1447
1448MODULE_DESCRIPTION("Legacy VGA framebuffer device driver");
1485MODULE_LICENSE("GPL"); 1449MODULE_LICENSE("GPL");
1486module_init(vga16fb_init); 1450module_init(vga16fb_init);
1487module_exit(vga16fb_exit); 1451module_exit(vga16fb_exit);
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 7084e7e146c0..5b78fd0aff0a 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -71,13 +71,6 @@ static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env)
71 dev->id.device, dev->id.vendor); 71 dev->id.device, dev->id.vendor);
72} 72}
73 73
74static struct bus_type virtio_bus = {
75 .name = "virtio",
76 .match = virtio_dev_match,
77 .dev_attrs = virtio_dev_attrs,
78 .uevent = virtio_uevent,
79};
80
81static void add_status(struct virtio_device *dev, unsigned status) 74static void add_status(struct virtio_device *dev, unsigned status)
82{ 75{
83 dev->config->set_status(dev, dev->config->get_status(dev) | status); 76 dev->config->set_status(dev, dev->config->get_status(dev) | status);
@@ -120,12 +113,16 @@ static int virtio_dev_probe(struct device *_d)
120 set_bit(f, dev->features); 113 set_bit(f, dev->features);
121 } 114 }
122 115
116 /* Transport features always preserved to pass to finalize_features. */
117 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++)
118 if (device_features & (1 << i))
119 set_bit(i, dev->features);
120
123 err = drv->probe(dev); 121 err = drv->probe(dev);
124 if (err) 122 if (err)
125 add_status(dev, VIRTIO_CONFIG_S_FAILED); 123 add_status(dev, VIRTIO_CONFIG_S_FAILED);
126 else { 124 else {
127 /* They should never have set feature bits beyond 32 */ 125 dev->config->finalize_features(dev);
128 dev->config->set_features(dev, dev->features[0]);
129 add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); 126 add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
130 } 127 }
131 return err; 128 return err;
@@ -147,13 +144,20 @@ static int virtio_dev_remove(struct device *_d)
147 return 0; 144 return 0;
148} 145}
149 146
147static struct bus_type virtio_bus = {
148 .name = "virtio",
149 .match = virtio_dev_match,
150 .dev_attrs = virtio_dev_attrs,
151 .uevent = virtio_uevent,
152 .probe = virtio_dev_probe,
153 .remove = virtio_dev_remove,
154};
155
150int register_virtio_driver(struct virtio_driver *driver) 156int register_virtio_driver(struct virtio_driver *driver)
151{ 157{
152 /* Catch this early. */ 158 /* Catch this early. */
153 BUG_ON(driver->feature_table_size && !driver->feature_table); 159 BUG_ON(driver->feature_table_size && !driver->feature_table);
154 driver->driver.bus = &virtio_bus; 160 driver->driver.bus = &virtio_bus;
155 driver->driver.probe = virtio_dev_probe;
156 driver->driver.remove = virtio_dev_remove;
157 return driver_register(&driver->driver); 161 return driver_register(&driver->driver);
158} 162}
159EXPORT_SYMBOL_GPL(register_virtio_driver); 163EXPORT_SYMBOL_GPL(register_virtio_driver);
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index eae7236310e4..c7dc37c7cce9 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -94,12 +94,17 @@ static u32 vp_get_features(struct virtio_device *vdev)
94 return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); 94 return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
95} 95}
96 96
97/* virtio config->set_features() implementation */ 97/* virtio config->finalize_features() implementation */
98static void vp_set_features(struct virtio_device *vdev, u32 features) 98static void vp_finalize_features(struct virtio_device *vdev)
99{ 99{
100 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 100 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
101 101
102 iowrite32(features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES); 102 /* Give virtio_ring a chance to accept features. */
103 vring_transport_features(vdev);
104
105 /* We only support 32 feature bits. */
106 BUILD_BUG_ON(ARRAY_SIZE(vdev->features) != 1);
107 iowrite32(vdev->features[0], vp_dev->ioaddr+VIRTIO_PCI_GUEST_FEATURES);
103} 108}
104 109
105/* virtio config->get() implementation */ 110/* virtio config->get() implementation */
@@ -297,7 +302,7 @@ static struct virtio_config_ops virtio_pci_config_ops = {
297 .find_vq = vp_find_vq, 302 .find_vq = vp_find_vq,
298 .del_vq = vp_del_vq, 303 .del_vq = vp_del_vq,
299 .get_features = vp_get_features, 304 .get_features = vp_get_features,
300 .set_features = vp_set_features, 305 .finalize_features = vp_finalize_features,
301}; 306};
302 307
303/* the PCI probing function */ 308/* the PCI probing function */
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 72bf8bc09014..6eb5303fed11 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -18,6 +18,7 @@
18 */ 18 */
19#include <linux/virtio.h> 19#include <linux/virtio.h>
20#include <linux/virtio_ring.h> 20#include <linux/virtio_ring.h>
21#include <linux/virtio_config.h>
21#include <linux/device.h> 22#include <linux/device.h>
22 23
23#ifdef DEBUG 24#ifdef DEBUG
@@ -87,8 +88,11 @@ static int vring_add_buf(struct virtqueue *_vq,
87 if (vq->num_free < out + in) { 88 if (vq->num_free < out + in) {
88 pr_debug("Can't add buf len %i - avail = %i\n", 89 pr_debug("Can't add buf len %i - avail = %i\n",
89 out + in, vq->num_free); 90 out + in, vq->num_free);
90 /* We notify *even if* VRING_USED_F_NO_NOTIFY is set here. */ 91 /* FIXME: for historical reasons, we force a notify here if
91 vq->notify(&vq->vq); 92 * there are outgoing parts to the buffer. Presumably the
93 * host should service the ring ASAP. */
94 if (out)
95 vq->notify(&vq->vq);
92 END_USE(vq); 96 END_USE(vq);
93 return -ENOSPC; 97 return -ENOSPC;
94 } 98 }
@@ -320,4 +324,19 @@ void vring_del_virtqueue(struct virtqueue *vq)
320} 324}
321EXPORT_SYMBOL_GPL(vring_del_virtqueue); 325EXPORT_SYMBOL_GPL(vring_del_virtqueue);
322 326
327/* Manipulates transport-specific feature bits. */
328void vring_transport_features(struct virtio_device *vdev)
329{
330 unsigned int i;
331
332 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
333 switch (i) {
334 default:
335 /* We don't understand this bit. */
336 clear_bit(i, vdev->features);
337 }
338 }
339}
340EXPORT_SYMBOL_GPL(vring_transport_features);
341
323MODULE_LICENSE("GPL"); 342MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ccb78f66c2b6..48399e134c0d 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -788,8 +788,6 @@ config WATCHDOG_RIO
788 machines. The watchdog timeout period is normally one minute but 788 machines. The watchdog timeout period is normally one minute but
789 can be changed with a boot-time parameter. 789 can be changed with a boot-time parameter.
790 790
791# V850 Architecture
792
793# XTENSA Architecture 791# XTENSA Architecture
794 792
795# 793#
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 25b352b664d9..edd305a64e63 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -119,8 +119,6 @@ obj-$(CONFIG_SH_WDT) += shwdt.o
119 119
120# SPARC64 Architecture 120# SPARC64 Architecture
121 121
122# V850 Architecture
123
124# XTENSA Architecture 122# XTENSA Architecture
125 123
126# Architecture Independant 124# Architecture Independant
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 3da712cc7708..5290552d2ef7 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -15,7 +15,6 @@
15#include <linux/zorro.h> 15#include <linux/zorro.h>
16#include <linux/stat.h> 16#include <linux/stat.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/fs.h>
19 18
20#include "zorro.h" 19#include "zorro.h"
21 20
diff --git a/fs/Kconfig b/fs/Kconfig
index 37db79a2ff95..97e3bdedb1e6 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -902,65 +902,7 @@ endif # BLOCK
902 902
903menu "Pseudo filesystems" 903menu "Pseudo filesystems"
904 904
905config PROC_FS 905source "fs/proc/Kconfig"
906 bool "/proc file system support" if EMBEDDED
907 default y
908 help
909 This is a virtual file system providing information about the status
910 of the system. "Virtual" means that it doesn't take up any space on
911 your hard disk: the files are created on the fly by the kernel when
912 you try to access them. Also, you cannot read the files with older
913 version of the program less: you need to use more or cat.
914
915 It's totally cool; for example, "cat /proc/interrupts" gives
916 information about what the different IRQs are used for at the moment
917 (there is a small number of Interrupt ReQuest lines in your computer
918 that are used by the attached devices to gain the CPU's attention --
919 often a source of trouble if two devices are mistakenly configured
920 to use the same IRQ). The program procinfo to display some
921 information about your system gathered from the /proc file system.
922
923 Before you can use the /proc file system, it has to be mounted,
924 meaning it has to be given a location in the directory hierarchy.
925 That location should be /proc. A command such as "mount -t proc proc
926 /proc" or the equivalent line in /etc/fstab does the job.
927
928 The /proc file system is explained in the file
929 <file:Documentation/filesystems/proc.txt> and on the proc(5) manpage
930 ("man 5 proc").
931
932 This option will enlarge your kernel by about 67 KB. Several
933 programs depend on this, so everyone should say Y here.
934
935config PROC_KCORE
936 bool "/proc/kcore support" if !ARM
937 depends on PROC_FS && MMU
938
939config PROC_VMCORE
940 bool "/proc/vmcore support (EXPERIMENTAL)"
941 depends on PROC_FS && CRASH_DUMP
942 default y
943 help
944 Exports the dump image of crashed kernel in ELF format.
945
946config PROC_SYSCTL
947 bool "Sysctl support (/proc/sys)" if EMBEDDED
948 depends on PROC_FS
949 select SYSCTL
950 default y
951 ---help---
952 The sysctl interface provides a means of dynamically changing
953 certain kernel parameters and variables on the fly without requiring
954 a recompile of the kernel or reboot of the system. The primary
955 interface is through /proc/sys. If you say Y here a tree of
956 modifiable sysctl entries will be generated beneath the
957 /proc/sys directory. They are explained in the files
958 in <file:Documentation/sysctl/>. Note that enabling this
959 option will enlarge the kernel by at least 8 KB.
960
961 As it is generally a good thing, you should say Y here unless
962 building a kernel for install/rescue disks or your system is very
963 limited in memory.
964 906
965config SYSFS 907config SYSFS
966 bool "sysfs file system support" if EMBEDDED 908 bool "sysfs file system support" if EMBEDDED
@@ -2093,20 +2035,6 @@ config CODA_FS
2093 To compile the coda client support as a module, choose M here: the 2035 To compile the coda client support as a module, choose M here: the
2094 module will be called coda. 2036 module will be called coda.
2095 2037
2096config CODA_FS_OLD_API
2097 bool "Use 96-bit Coda file identifiers"
2098 depends on CODA_FS
2099 help
2100 A new kernel-userspace API had to be introduced for Coda v6.0
2101 to support larger 128-bit file identifiers as needed by the
2102 new realms implementation.
2103
2104 However this new API is not backward compatible with older
2105 clients. If you really need to run the old Coda userspace
2106 cache manager then say Y.
2107
2108 For most cases you probably want to say N.
2109
2110config AFS_FS 2038config AFS_FS
2111 tristate "Andrew File System support (AFS) (EXPERIMENTAL)" 2039 tristate "Andrew File System support (AFS) (EXPERIMENTAL)"
2112 depends on INET && EXPERIMENTAL 2040 depends on INET && EXPERIMENTAL
diff --git a/fs/aio.c b/fs/aio.c
index 0fb3117ddd93..0051fd94b44e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -586,7 +586,6 @@ static void use_mm(struct mm_struct *mm)
586 struct task_struct *tsk = current; 586 struct task_struct *tsk = current;
587 587
588 task_lock(tsk); 588 task_lock(tsk);
589 tsk->flags |= PF_BORROWED_MM;
590 active_mm = tsk->active_mm; 589 active_mm = tsk->active_mm;
591 atomic_inc(&mm->mm_count); 590 atomic_inc(&mm->mm_count);
592 tsk->mm = mm; 591 tsk->mm = mm;
@@ -610,7 +609,6 @@ static void unuse_mm(struct mm_struct *mm)
610 struct task_struct *tsk = current; 609 struct task_struct *tsk = current;
611 610
612 task_lock(tsk); 611 task_lock(tsk);
613 tsk->flags &= ~PF_BORROWED_MM;
614 tsk->mm = NULL; 612 tsk->mm = NULL;
615 /* active_mm is still 'mm' */ 613 /* active_mm is still 'mm' */
616 enter_lazy_tlb(mm, tsk); 614 enter_lazy_tlb(mm, tsk);
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 977ef208c051..3662dd44896b 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -58,8 +58,9 @@ static struct dentry_operations anon_inodefs_dentry_operations = {
58 * of the file 58 * of the file
59 * 59 *
60 * @name: [in] name of the "class" of the new file 60 * @name: [in] name of the "class" of the new file
61 * @fops [in] file operations for the new file 61 * @fops: [in] file operations for the new file
62 * @priv [in] private data for the new file (will be file's private_data) 62 * @priv: [in] private data for the new file (will be file's private_data)
63 * @flags: [in] flags
63 * 64 *
64 * Creates a new file by hooking it on a single inode. This is useful for files 65 * Creates a new file by hooking it on a single inode. This is useful for files
65 * that do not need to have a full-fledged inode in order to operate correctly. 66 * that do not need to have a full-fledged inode in order to operate correctly.
@@ -68,7 +69,7 @@ static struct dentry_operations anon_inodefs_dentry_operations = {
68 * setup. Returns new descriptor or -error. 69 * setup. Returns new descriptor or -error.
69 */ 70 */
70int anon_inode_getfd(const char *name, const struct file_operations *fops, 71int anon_inode_getfd(const char *name, const struct file_operations *fops,
71 void *priv) 72 void *priv, int flags)
72{ 73{
73 struct qstr this; 74 struct qstr this;
74 struct dentry *dentry; 75 struct dentry *dentry;
@@ -78,7 +79,7 @@ int anon_inode_getfd(const char *name, const struct file_operations *fops,
78 if (IS_ERR(anon_inode_inode)) 79 if (IS_ERR(anon_inode_inode))
79 return -ENODEV; 80 return -ENODEV;
80 81
81 error = get_unused_fd(); 82 error = get_unused_fd_flags(flags);
82 if (error < 0) 83 if (error < 0)
83 return error; 84 return error;
84 fd = error; 85 fd = error;
@@ -115,7 +116,7 @@ int anon_inode_getfd(const char *name, const struct file_operations *fops,
115 file->f_mapping = anon_inode_inode->i_mapping; 116 file->f_mapping = anon_inode_inode->i_mapping;
116 117
117 file->f_pos = 0; 118 file->f_pos = 0;
118 file->f_flags = O_RDWR; 119 file->f_flags = O_RDWR | (flags & O_NONBLOCK);
119 file->f_version = 0; 120 file->f_version = 0;
120 file->private_data = priv; 121 file->private_data = priv;
121 122
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index c3d352d7fa93..69a2f5c92319 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -52,7 +52,10 @@ struct autofs_info {
52 52
53 int flags; 53 int flags;
54 54
55 struct list_head rehash; 55 struct completion expire_complete;
56
57 struct list_head active;
58 struct list_head expiring;
56 59
57 struct autofs_sb_info *sbi; 60 struct autofs_sb_info *sbi;
58 unsigned long last_used; 61 unsigned long last_used;
@@ -68,15 +71,14 @@ struct autofs_info {
68}; 71};
69 72
70#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */ 73#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */
74#define AUTOFS_INF_MOUNTPOINT (1<<1) /* mountpoint status for direct expire */
71 75
72struct autofs_wait_queue { 76struct autofs_wait_queue {
73 wait_queue_head_t queue; 77 wait_queue_head_t queue;
74 struct autofs_wait_queue *next; 78 struct autofs_wait_queue *next;
75 autofs_wqt_t wait_queue_token; 79 autofs_wqt_t wait_queue_token;
76 /* We use the following to see what we are waiting for */ 80 /* We use the following to see what we are waiting for */
77 unsigned int hash; 81 struct qstr name;
78 unsigned int len;
79 char *name;
80 u32 dev; 82 u32 dev;
81 u64 ino; 83 u64 ino;
82 uid_t uid; 84 uid_t uid;
@@ -85,7 +87,7 @@ struct autofs_wait_queue {
85 pid_t tgid; 87 pid_t tgid;
86 /* This is for status reporting upon return */ 88 /* This is for status reporting upon return */
87 int status; 89 int status;
88 atomic_t wait_ctr; 90 unsigned int wait_ctr;
89}; 91};
90 92
91#define AUTOFS_SBI_MAGIC 0x6d4a556d 93#define AUTOFS_SBI_MAGIC 0x6d4a556d
@@ -112,8 +114,9 @@ struct autofs_sb_info {
112 struct mutex wq_mutex; 114 struct mutex wq_mutex;
113 spinlock_t fs_lock; 115 spinlock_t fs_lock;
114 struct autofs_wait_queue *queues; /* Wait queue pointer */ 116 struct autofs_wait_queue *queues; /* Wait queue pointer */
115 spinlock_t rehash_lock; 117 spinlock_t lookup_lock;
116 struct list_head rehash_list; 118 struct list_head active_list;
119 struct list_head expiring_list;
117}; 120};
118 121
119static inline struct autofs_sb_info *autofs4_sbi(struct super_block *sb) 122static inline struct autofs_sb_info *autofs4_sbi(struct super_block *sb)
@@ -138,18 +141,14 @@ static inline int autofs4_oz_mode(struct autofs_sb_info *sbi) {
138static inline int autofs4_ispending(struct dentry *dentry) 141static inline int autofs4_ispending(struct dentry *dentry)
139{ 142{
140 struct autofs_info *inf = autofs4_dentry_ino(dentry); 143 struct autofs_info *inf = autofs4_dentry_ino(dentry);
141 int pending = 0;
142 144
143 if (dentry->d_flags & DCACHE_AUTOFS_PENDING) 145 if (dentry->d_flags & DCACHE_AUTOFS_PENDING)
144 return 1; 146 return 1;
145 147
146 if (inf) { 148 if (inf->flags & AUTOFS_INF_EXPIRING)
147 spin_lock(&inf->sbi->fs_lock); 149 return 1;
148 pending = inf->flags & AUTOFS_INF_EXPIRING;
149 spin_unlock(&inf->sbi->fs_lock);
150 }
151 150
152 return pending; 151 return 0;
153} 152}
154 153
155static inline void autofs4_copy_atime(struct file *src, struct file *dst) 154static inline void autofs4_copy_atime(struct file *src, struct file *dst)
@@ -164,6 +163,7 @@ void autofs4_free_ino(struct autofs_info *);
164 163
165/* Expiration */ 164/* Expiration */
166int is_autofs4_dentry(struct dentry *); 165int is_autofs4_dentry(struct dentry *);
166int autofs4_expire_wait(struct dentry *dentry);
167int autofs4_expire_run(struct super_block *, struct vfsmount *, 167int autofs4_expire_run(struct super_block *, struct vfsmount *,
168 struct autofs_sb_info *, 168 struct autofs_sb_info *,
169 struct autofs_packet_expire __user *); 169 struct autofs_packet_expire __user *);
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 894fee54d4d8..cdabb796ff01 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -259,13 +259,15 @@ static struct dentry *autofs4_expire_direct(struct super_block *sb,
259 now = jiffies; 259 now = jiffies;
260 timeout = sbi->exp_timeout; 260 timeout = sbi->exp_timeout;
261 261
262 /* Lock the tree as we must expire as a whole */
263 spin_lock(&sbi->fs_lock); 262 spin_lock(&sbi->fs_lock);
264 if (!autofs4_direct_busy(mnt, root, timeout, do_now)) { 263 if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
265 struct autofs_info *ino = autofs4_dentry_ino(root); 264 struct autofs_info *ino = autofs4_dentry_ino(root);
266 265 if (d_mountpoint(root)) {
267 /* Set this flag early to catch sys_chdir and the like */ 266 ino->flags |= AUTOFS_INF_MOUNTPOINT;
267 root->d_mounted--;
268 }
268 ino->flags |= AUTOFS_INF_EXPIRING; 269 ino->flags |= AUTOFS_INF_EXPIRING;
270 init_completion(&ino->expire_complete);
269 spin_unlock(&sbi->fs_lock); 271 spin_unlock(&sbi->fs_lock);
270 return root; 272 return root;
271 } 273 }
@@ -292,6 +294,8 @@ static struct dentry *autofs4_expire_indirect(struct super_block *sb,
292 struct list_head *next; 294 struct list_head *next;
293 int do_now = how & AUTOFS_EXP_IMMEDIATE; 295 int do_now = how & AUTOFS_EXP_IMMEDIATE;
294 int exp_leaves = how & AUTOFS_EXP_LEAVES; 296 int exp_leaves = how & AUTOFS_EXP_LEAVES;
297 struct autofs_info *ino;
298 unsigned int ino_count;
295 299
296 if (!root) 300 if (!root)
297 return NULL; 301 return NULL;
@@ -316,6 +320,9 @@ static struct dentry *autofs4_expire_indirect(struct super_block *sb,
316 dentry = dget(dentry); 320 dentry = dget(dentry);
317 spin_unlock(&dcache_lock); 321 spin_unlock(&dcache_lock);
318 322
323 spin_lock(&sbi->fs_lock);
324 ino = autofs4_dentry_ino(dentry);
325
319 /* 326 /*
320 * Case 1: (i) indirect mount or top level pseudo direct mount 327 * Case 1: (i) indirect mount or top level pseudo direct mount
321 * (autofs-4.1). 328 * (autofs-4.1).
@@ -326,6 +333,11 @@ static struct dentry *autofs4_expire_indirect(struct super_block *sb,
326 DPRINTK("checking mountpoint %p %.*s", 333 DPRINTK("checking mountpoint %p %.*s",
327 dentry, (int)dentry->d_name.len, dentry->d_name.name); 334 dentry, (int)dentry->d_name.len, dentry->d_name.name);
328 335
336 /* Path walk currently on this dentry? */
337 ino_count = atomic_read(&ino->count) + 2;
338 if (atomic_read(&dentry->d_count) > ino_count)
339 goto next;
340
329 /* Can we umount this guy */ 341 /* Can we umount this guy */
330 if (autofs4_mount_busy(mnt, dentry)) 342 if (autofs4_mount_busy(mnt, dentry))
331 goto next; 343 goto next;
@@ -343,23 +355,25 @@ static struct dentry *autofs4_expire_indirect(struct super_block *sb,
343 355
344 /* Case 2: tree mount, expire iff entire tree is not busy */ 356 /* Case 2: tree mount, expire iff entire tree is not busy */
345 if (!exp_leaves) { 357 if (!exp_leaves) {
346 /* Lock the tree as we must expire as a whole */ 358 /* Path walk currently on this dentry? */
347 spin_lock(&sbi->fs_lock); 359 ino_count = atomic_read(&ino->count) + 1;
348 if (!autofs4_tree_busy(mnt, dentry, timeout, do_now)) { 360 if (atomic_read(&dentry->d_count) > ino_count)
349 struct autofs_info *inf = autofs4_dentry_ino(dentry); 361 goto next;
350 362
351 /* Set this flag early to catch sys_chdir and the like */ 363 if (!autofs4_tree_busy(mnt, dentry, timeout, do_now)) {
352 inf->flags |= AUTOFS_INF_EXPIRING;
353 spin_unlock(&sbi->fs_lock);
354 expired = dentry; 364 expired = dentry;
355 goto found; 365 goto found;
356 } 366 }
357 spin_unlock(&sbi->fs_lock);
358 /* 367 /*
359 * Case 3: pseudo direct mount, expire individual leaves 368 * Case 3: pseudo direct mount, expire individual leaves
360 * (autofs-4.1). 369 * (autofs-4.1).
361 */ 370 */
362 } else { 371 } else {
372 /* Path walk currently on this dentry? */
373 ino_count = atomic_read(&ino->count) + 1;
374 if (atomic_read(&dentry->d_count) > ino_count)
375 goto next;
376
363 expired = autofs4_check_leaves(mnt, dentry, timeout, do_now); 377 expired = autofs4_check_leaves(mnt, dentry, timeout, do_now);
364 if (expired) { 378 if (expired) {
365 dput(dentry); 379 dput(dentry);
@@ -367,6 +381,7 @@ static struct dentry *autofs4_expire_indirect(struct super_block *sb,
367 } 381 }
368 } 382 }
369next: 383next:
384 spin_unlock(&sbi->fs_lock);
370 dput(dentry); 385 dput(dentry);
371 spin_lock(&dcache_lock); 386 spin_lock(&dcache_lock);
372 next = next->next; 387 next = next->next;
@@ -377,12 +392,45 @@ next:
377found: 392found:
378 DPRINTK("returning %p %.*s", 393 DPRINTK("returning %p %.*s",
379 expired, (int)expired->d_name.len, expired->d_name.name); 394 expired, (int)expired->d_name.len, expired->d_name.name);
395 ino = autofs4_dentry_ino(expired);
396 ino->flags |= AUTOFS_INF_EXPIRING;
397 init_completion(&ino->expire_complete);
398 spin_unlock(&sbi->fs_lock);
380 spin_lock(&dcache_lock); 399 spin_lock(&dcache_lock);
381 list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child); 400 list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
382 spin_unlock(&dcache_lock); 401 spin_unlock(&dcache_lock);
383 return expired; 402 return expired;
384} 403}
385 404
405int autofs4_expire_wait(struct dentry *dentry)
406{
407 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
408 struct autofs_info *ino = autofs4_dentry_ino(dentry);
409 int status;
410
411 /* Block on any pending expire */
412 spin_lock(&sbi->fs_lock);
413 if (ino->flags & AUTOFS_INF_EXPIRING) {
414 spin_unlock(&sbi->fs_lock);
415
416 DPRINTK("waiting for expire %p name=%.*s",
417 dentry, dentry->d_name.len, dentry->d_name.name);
418
419 status = autofs4_wait(sbi, dentry, NFY_NONE);
420 wait_for_completion(&ino->expire_complete);
421
422 DPRINTK("expire done status=%d", status);
423
424 if (d_unhashed(dentry))
425 return -EAGAIN;
426
427 return status;
428 }
429 spin_unlock(&sbi->fs_lock);
430
431 return 0;
432}
433
386/* Perform an expiry operation */ 434/* Perform an expiry operation */
387int autofs4_expire_run(struct super_block *sb, 435int autofs4_expire_run(struct super_block *sb,
388 struct vfsmount *mnt, 436 struct vfsmount *mnt,
@@ -390,7 +438,9 @@ int autofs4_expire_run(struct super_block *sb,
390 struct autofs_packet_expire __user *pkt_p) 438 struct autofs_packet_expire __user *pkt_p)
391{ 439{
392 struct autofs_packet_expire pkt; 440 struct autofs_packet_expire pkt;
441 struct autofs_info *ino;
393 struct dentry *dentry; 442 struct dentry *dentry;
443 int ret = 0;
394 444
395 memset(&pkt,0,sizeof pkt); 445 memset(&pkt,0,sizeof pkt);
396 446
@@ -406,9 +456,15 @@ int autofs4_expire_run(struct super_block *sb,
406 dput(dentry); 456 dput(dentry);
407 457
408 if ( copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)) ) 458 if ( copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)) )
409 return -EFAULT; 459 ret = -EFAULT;
410 460
411 return 0; 461 spin_lock(&sbi->fs_lock);
462 ino = autofs4_dentry_ino(dentry);
463 ino->flags &= ~AUTOFS_INF_EXPIRING;
464 complete_all(&ino->expire_complete);
465 spin_unlock(&sbi->fs_lock);
466
467 return ret;
412} 468}
413 469
414/* Call repeatedly until it returns -EAGAIN, meaning there's nothing 470/* Call repeatedly until it returns -EAGAIN, meaning there's nothing
@@ -433,9 +489,16 @@ int autofs4_expire_multi(struct super_block *sb, struct vfsmount *mnt,
433 489
434 /* This is synchronous because it makes the daemon a 490 /* This is synchronous because it makes the daemon a
435 little easier */ 491 little easier */
436 ino->flags |= AUTOFS_INF_EXPIRING;
437 ret = autofs4_wait(sbi, dentry, NFY_EXPIRE); 492 ret = autofs4_wait(sbi, dentry, NFY_EXPIRE);
493
494 spin_lock(&sbi->fs_lock);
495 if (ino->flags & AUTOFS_INF_MOUNTPOINT) {
496 sb->s_root->d_mounted++;
497 ino->flags &= ~AUTOFS_INF_MOUNTPOINT;
498 }
438 ino->flags &= ~AUTOFS_INF_EXPIRING; 499 ino->flags &= ~AUTOFS_INF_EXPIRING;
500 complete_all(&ino->expire_complete);
501 spin_unlock(&sbi->fs_lock);
439 dput(dentry); 502 dput(dentry);
440 } 503 }
441 504
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 2fdcf5e1d236..7bb3e5ba0537 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -24,8 +24,10 @@
24 24
25static void ino_lnkfree(struct autofs_info *ino) 25static void ino_lnkfree(struct autofs_info *ino)
26{ 26{
27 kfree(ino->u.symlink); 27 if (ino->u.symlink) {
28 ino->u.symlink = NULL; 28 kfree(ino->u.symlink);
29 ino->u.symlink = NULL;
30 }
29} 31}
30 32
31struct autofs_info *autofs4_init_ino(struct autofs_info *ino, 33struct autofs_info *autofs4_init_ino(struct autofs_info *ino,
@@ -41,16 +43,18 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino,
41 if (ino == NULL) 43 if (ino == NULL)
42 return NULL; 44 return NULL;
43 45
44 ino->flags = 0; 46 if (!reinit) {
45 ino->mode = mode; 47 ino->flags = 0;
46 ino->inode = NULL; 48 ino->inode = NULL;
47 ino->dentry = NULL; 49 ino->dentry = NULL;
48 ino->size = 0; 50 ino->size = 0;
49 51 INIT_LIST_HEAD(&ino->active);
50 INIT_LIST_HEAD(&ino->rehash); 52 INIT_LIST_HEAD(&ino->expiring);
53 atomic_set(&ino->count, 0);
54 }
51 55
56 ino->mode = mode;
52 ino->last_used = jiffies; 57 ino->last_used = jiffies;
53 atomic_set(&ino->count, 0);
54 58
55 ino->sbi = sbi; 59 ino->sbi = sbi;
56 60
@@ -159,8 +163,8 @@ void autofs4_kill_sb(struct super_block *sb)
159 if (!sbi) 163 if (!sbi)
160 goto out_kill_sb; 164 goto out_kill_sb;
161 165
162 if (!sbi->catatonic) 166 /* Free wait queues, close pipe */
163 autofs4_catatonic_mode(sbi); /* Free wait queues, close pipe */ 167 autofs4_catatonic_mode(sbi);
164 168
165 /* Clean up and release dangling references */ 169 /* Clean up and release dangling references */
166 autofs4_force_release(sbi); 170 autofs4_force_release(sbi);
@@ -338,8 +342,9 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
338 mutex_init(&sbi->wq_mutex); 342 mutex_init(&sbi->wq_mutex);
339 spin_lock_init(&sbi->fs_lock); 343 spin_lock_init(&sbi->fs_lock);
340 sbi->queues = NULL; 344 sbi->queues = NULL;
341 spin_lock_init(&sbi->rehash_lock); 345 spin_lock_init(&sbi->lookup_lock);
342 INIT_LIST_HEAD(&sbi->rehash_list); 346 INIT_LIST_HEAD(&sbi->active_list);
347 INIT_LIST_HEAD(&sbi->expiring_list);
343 s->s_blocksize = 1024; 348 s->s_blocksize = 1024;
344 s->s_blocksize_bits = 10; 349 s->s_blocksize_bits = 10;
345 s->s_magic = AUTOFS_SUPER_MAGIC; 350 s->s_magic = AUTOFS_SUPER_MAGIC;
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index edf5b6bddb52..bcfb2dc0a61b 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -25,25 +25,25 @@ static int autofs4_dir_rmdir(struct inode *,struct dentry *);
25static int autofs4_dir_mkdir(struct inode *,struct dentry *,int); 25static int autofs4_dir_mkdir(struct inode *,struct dentry *,int);
26static int autofs4_root_ioctl(struct inode *, struct file *,unsigned int,unsigned long); 26static int autofs4_root_ioctl(struct inode *, struct file *,unsigned int,unsigned long);
27static int autofs4_dir_open(struct inode *inode, struct file *file); 27static int autofs4_dir_open(struct inode *inode, struct file *file);
28static int autofs4_dir_close(struct inode *inode, struct file *file);
29static int autofs4_dir_readdir(struct file * filp, void * dirent, filldir_t filldir);
30static int autofs4_root_readdir(struct file * filp, void * dirent, filldir_t filldir);
31static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *); 28static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *);
32static void *autofs4_follow_link(struct dentry *, struct nameidata *); 29static void *autofs4_follow_link(struct dentry *, struct nameidata *);
33 30
31#define TRIGGER_FLAGS (LOOKUP_CONTINUE | LOOKUP_DIRECTORY)
32#define TRIGGER_INTENTS (LOOKUP_OPEN | LOOKUP_CREATE)
33
34const struct file_operations autofs4_root_operations = { 34const struct file_operations autofs4_root_operations = {
35 .open = dcache_dir_open, 35 .open = dcache_dir_open,
36 .release = dcache_dir_close, 36 .release = dcache_dir_close,
37 .read = generic_read_dir, 37 .read = generic_read_dir,
38 .readdir = autofs4_root_readdir, 38 .readdir = dcache_readdir,
39 .ioctl = autofs4_root_ioctl, 39 .ioctl = autofs4_root_ioctl,
40}; 40};
41 41
42const struct file_operations autofs4_dir_operations = { 42const struct file_operations autofs4_dir_operations = {
43 .open = autofs4_dir_open, 43 .open = autofs4_dir_open,
44 .release = autofs4_dir_close, 44 .release = dcache_dir_close,
45 .read = generic_read_dir, 45 .read = generic_read_dir,
46 .readdir = autofs4_dir_readdir, 46 .readdir = dcache_readdir,
47}; 47};
48 48
49const struct inode_operations autofs4_indirect_root_inode_operations = { 49const struct inode_operations autofs4_indirect_root_inode_operations = {
@@ -70,42 +70,10 @@ const struct inode_operations autofs4_dir_inode_operations = {
70 .rmdir = autofs4_dir_rmdir, 70 .rmdir = autofs4_dir_rmdir,
71}; 71};
72 72
73static int autofs4_root_readdir(struct file *file, void *dirent,
74 filldir_t filldir)
75{
76 struct autofs_sb_info *sbi = autofs4_sbi(file->f_path.dentry->d_sb);
77 int oz_mode = autofs4_oz_mode(sbi);
78
79 DPRINTK("called, filp->f_pos = %lld", file->f_pos);
80
81 /*
82 * Don't set reghost flag if:
83 * 1) f_pos is larger than zero -- we've already been here.
84 * 2) we haven't even enabled reghosting in the 1st place.
85 * 3) this is the daemon doing a readdir
86 */
87 if (oz_mode && file->f_pos == 0 && sbi->reghost_enabled)
88 sbi->needs_reghost = 1;
89
90 DPRINTK("needs_reghost = %d", sbi->needs_reghost);
91
92 return dcache_readdir(file, dirent, filldir);
93}
94
95static int autofs4_dir_open(struct inode *inode, struct file *file) 73static int autofs4_dir_open(struct inode *inode, struct file *file)
96{ 74{
97 struct dentry *dentry = file->f_path.dentry; 75 struct dentry *dentry = file->f_path.dentry;
98 struct vfsmount *mnt = file->f_path.mnt;
99 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 76 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
100 struct dentry *cursor;
101 int status;
102
103 status = dcache_dir_open(inode, file);
104 if (status)
105 goto out;
106
107 cursor = file->private_data;
108 cursor->d_fsdata = NULL;
109 77
110 DPRINTK("file=%p dentry=%p %.*s", 78 DPRINTK("file=%p dentry=%p %.*s",
111 file, dentry, dentry->d_name.len, dentry->d_name.name); 79 file, dentry, dentry->d_name.len, dentry->d_name.name);
@@ -113,159 +81,32 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
113 if (autofs4_oz_mode(sbi)) 81 if (autofs4_oz_mode(sbi))
114 goto out; 82 goto out;
115 83
116 if (autofs4_ispending(dentry)) { 84 /*
117 DPRINTK("dentry busy"); 85 * An empty directory in an autofs file system is always a
118 dcache_dir_close(inode, file); 86 * mount point. The daemon must have failed to mount this
119 status = -EBUSY; 87 * during lookup so it doesn't exist. This can happen, for
120 goto out; 88 * example, if user space returns an incorrect status for a
121 } 89 * mount request. Otherwise we're doing a readdir on the
122 90 * autofs file system so just let the libfs routines handle
123 status = -ENOENT; 91 * it.
124 if (!d_mountpoint(dentry) && dentry->d_op && dentry->d_op->d_revalidate) { 92 */
125 struct nameidata nd; 93 spin_lock(&dcache_lock);
126 int empty, ret; 94 if (!d_mountpoint(dentry) && __simple_empty(dentry)) {
127
128 /* In case there are stale directory dentrys from a failed mount */
129 spin_lock(&dcache_lock);
130 empty = list_empty(&dentry->d_subdirs);
131 spin_unlock(&dcache_lock); 95 spin_unlock(&dcache_lock);
132 96 return -ENOENT;
133 if (!empty)
134 d_invalidate(dentry);
135
136 nd.flags = LOOKUP_DIRECTORY;
137 ret = (dentry->d_op->d_revalidate)(dentry, &nd);
138
139 if (ret <= 0) {
140 if (ret < 0)
141 status = ret;
142 dcache_dir_close(inode, file);
143 goto out;
144 }
145 } 97 }
98 spin_unlock(&dcache_lock);
146 99
147 if (d_mountpoint(dentry)) {
148 struct file *fp = NULL;
149 struct path fp_path = { .dentry = dentry, .mnt = mnt };
150
151 path_get(&fp_path);
152
153 if (!autofs4_follow_mount(&fp_path.mnt, &fp_path.dentry)) {
154 path_put(&fp_path);
155 dcache_dir_close(inode, file);
156 goto out;
157 }
158
159 fp = dentry_open(fp_path.dentry, fp_path.mnt, file->f_flags);
160 status = PTR_ERR(fp);
161 if (IS_ERR(fp)) {
162 dcache_dir_close(inode, file);
163 goto out;
164 }
165 cursor->d_fsdata = fp;
166 }
167 return 0;
168out:
169 return status;
170}
171
172static int autofs4_dir_close(struct inode *inode, struct file *file)
173{
174 struct dentry *dentry = file->f_path.dentry;
175 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
176 struct dentry *cursor = file->private_data;
177 int status = 0;
178
179 DPRINTK("file=%p dentry=%p %.*s",
180 file, dentry, dentry->d_name.len, dentry->d_name.name);
181
182 if (autofs4_oz_mode(sbi))
183 goto out;
184
185 if (autofs4_ispending(dentry)) {
186 DPRINTK("dentry busy");
187 status = -EBUSY;
188 goto out;
189 }
190
191 if (d_mountpoint(dentry)) {
192 struct file *fp = cursor->d_fsdata;
193 if (!fp) {
194 status = -ENOENT;
195 goto out;
196 }
197 filp_close(fp, current->files);
198 }
199out:
200 dcache_dir_close(inode, file);
201 return status;
202}
203
204static int autofs4_dir_readdir(struct file *file, void *dirent, filldir_t filldir)
205{
206 struct dentry *dentry = file->f_path.dentry;
207 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
208 struct dentry *cursor = file->private_data;
209 int status;
210
211 DPRINTK("file=%p dentry=%p %.*s",
212 file, dentry, dentry->d_name.len, dentry->d_name.name);
213
214 if (autofs4_oz_mode(sbi))
215 goto out;
216
217 if (autofs4_ispending(dentry)) {
218 DPRINTK("dentry busy");
219 return -EBUSY;
220 }
221
222 if (d_mountpoint(dentry)) {
223 struct file *fp = cursor->d_fsdata;
224
225 if (!fp)
226 return -ENOENT;
227
228 if (!fp->f_op || !fp->f_op->readdir)
229 goto out;
230
231 status = vfs_readdir(fp, filldir, dirent);
232 file->f_pos = fp->f_pos;
233 if (status)
234 autofs4_copy_atime(file, fp);
235 return status;
236 }
237out: 100out:
238 return dcache_readdir(file, dirent, filldir); 101 return dcache_dir_open(inode, file);
239} 102}
240 103
241static int try_to_fill_dentry(struct dentry *dentry, int flags) 104static int try_to_fill_dentry(struct dentry *dentry, int flags)
242{ 105{
243 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 106 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
244 struct autofs_info *ino = autofs4_dentry_ino(dentry); 107 struct autofs_info *ino = autofs4_dentry_ino(dentry);
245 struct dentry *new;
246 int status; 108 int status;
247 109
248 /* Block on any pending expiry here; invalidate the dentry
249 when expiration is done to trigger mount request with a new
250 dentry */
251 if (ino && (ino->flags & AUTOFS_INF_EXPIRING)) {
252 DPRINTK("waiting for expire %p name=%.*s",
253 dentry, dentry->d_name.len, dentry->d_name.name);
254
255 status = autofs4_wait(sbi, dentry, NFY_NONE);
256
257 DPRINTK("expire done status=%d", status);
258
259 /*
260 * If the directory still exists the mount request must
261 * continue otherwise it can't be followed at the right
262 * time during the walk.
263 */
264 status = d_invalidate(dentry);
265 if (status != -EBUSY)
266 return -EAGAIN;
267 }
268
269 DPRINTK("dentry=%p %.*s ino=%p", 110 DPRINTK("dentry=%p %.*s ino=%p",
270 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 111 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
271 112
@@ -292,7 +133,8 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags)
292 return status; 133 return status;
293 } 134 }
294 /* Trigger mount for path component or follow link */ 135 /* Trigger mount for path component or follow link */
295 } else if (flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY) || 136 } else if (dentry->d_flags & DCACHE_AUTOFS_PENDING ||
137 flags & (TRIGGER_FLAGS | TRIGGER_INTENTS) ||
296 current->link_count) { 138 current->link_count) {
297 DPRINTK("waiting for mount name=%.*s", 139 DPRINTK("waiting for mount name=%.*s",
298 dentry->d_name.len, dentry->d_name.name); 140 dentry->d_name.len, dentry->d_name.name);
@@ -320,26 +162,6 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags)
320 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING; 162 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
321 spin_unlock(&dentry->d_lock); 163 spin_unlock(&dentry->d_lock);
322 164
323 /*
324 * The dentry that is passed in from lookup may not be the one
325 * we end up using, as mkdir can create a new one. If this
326 * happens, and another process tries the lookup at the same time,
327 * it will set the PENDING flag on this new dentry, but add itself
328 * to our waitq. Then, if after the lookup succeeds, the first
329 * process that requested the mount performs another lookup of the
330 * same directory, it will show up as still pending! So, we need
331 * to redo the lookup here and clear pending on that dentry.
332 */
333 if (d_unhashed(dentry)) {
334 new = d_lookup(dentry->d_parent, &dentry->d_name);
335 if (new) {
336 spin_lock(&new->d_lock);
337 new->d_flags &= ~DCACHE_AUTOFS_PENDING;
338 spin_unlock(&new->d_lock);
339 dput(new);
340 }
341 }
342
343 return 0; 165 return 0;
344} 166}
345 167
@@ -355,51 +177,63 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
355 DPRINTK("dentry=%p %.*s oz_mode=%d nd->flags=%d", 177 DPRINTK("dentry=%p %.*s oz_mode=%d nd->flags=%d",
356 dentry, dentry->d_name.len, dentry->d_name.name, oz_mode, 178 dentry, dentry->d_name.len, dentry->d_name.name, oz_mode,
357 nd->flags); 179 nd->flags);
358 180 /*
359 /* If it's our master or we shouldn't trigger a mount we're done */ 181 * For an expire of a covered direct or offset mount we need
360 lookup_type = nd->flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY); 182 * to beeak out of follow_down() at the autofs mount trigger
361 if (oz_mode || !lookup_type) 183 * (d_mounted--), so we can see the expiring flag, and manage
184 * the blocking and following here until the expire is completed.
185 */
186 if (oz_mode) {
187 spin_lock(&sbi->fs_lock);
188 if (ino->flags & AUTOFS_INF_EXPIRING) {
189 spin_unlock(&sbi->fs_lock);
190 /* Follow down to our covering mount. */
191 if (!follow_down(&nd->path.mnt, &nd->path.dentry))
192 goto done;
193 goto follow;
194 }
195 spin_unlock(&sbi->fs_lock);
362 goto done; 196 goto done;
197 }
363 198
364 /* If an expire request is pending wait for it. */ 199 /* If an expire request is pending everyone must wait. */
365 if (ino && (ino->flags & AUTOFS_INF_EXPIRING)) { 200 autofs4_expire_wait(dentry);
366 DPRINTK("waiting for active request %p name=%.*s",
367 dentry, dentry->d_name.len, dentry->d_name.name);
368
369 status = autofs4_wait(sbi, dentry, NFY_NONE);
370 201
371 DPRINTK("request done status=%d", status); 202 /* We trigger a mount for almost all flags */
372 } 203 lookup_type = nd->flags & (TRIGGER_FLAGS | TRIGGER_INTENTS);
204 if (!(lookup_type || dentry->d_flags & DCACHE_AUTOFS_PENDING))
205 goto follow;
373 206
374 /* 207 /*
375 * If the dentry contains directories then it is an 208 * If the dentry contains directories then it is an autofs
376 * autofs multi-mount with no root mount offset. So 209 * multi-mount with no root mount offset. So don't try to
377 * don't try to mount it again. 210 * mount it again.
378 */ 211 */
379 spin_lock(&dcache_lock); 212 spin_lock(&dcache_lock);
380 if (!d_mountpoint(dentry) && __simple_empty(dentry)) { 213 if (dentry->d_flags & DCACHE_AUTOFS_PENDING ||
214 (!d_mountpoint(dentry) && __simple_empty(dentry))) {
381 spin_unlock(&dcache_lock); 215 spin_unlock(&dcache_lock);
382 216
383 status = try_to_fill_dentry(dentry, 0); 217 status = try_to_fill_dentry(dentry, 0);
384 if (status) 218 if (status)
385 goto out_error; 219 goto out_error;
386 220
387 /* 221 goto follow;
388 * The mount succeeded but if there is no root mount
389 * it must be an autofs multi-mount with no root offset
390 * so we don't need to follow the mount.
391 */
392 if (d_mountpoint(dentry)) {
393 if (!autofs4_follow_mount(&nd->path.mnt,
394 &nd->path.dentry)) {
395 status = -ENOENT;
396 goto out_error;
397 }
398 }
399
400 goto done;
401 } 222 }
402 spin_unlock(&dcache_lock); 223 spin_unlock(&dcache_lock);
224follow:
225 /*
226 * If there is no root mount it must be an autofs
227 * multi-mount with no root offset so we don't need
228 * to follow it.
229 */
230 if (d_mountpoint(dentry)) {
231 if (!autofs4_follow_mount(&nd->path.mnt,
232 &nd->path.dentry)) {
233 status = -ENOENT;
234 goto out_error;
235 }
236 }
403 237
404done: 238done:
405 return NULL; 239 return NULL;
@@ -424,12 +258,23 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
424 int status = 1; 258 int status = 1;
425 259
426 /* Pending dentry */ 260 /* Pending dentry */
261 spin_lock(&sbi->fs_lock);
427 if (autofs4_ispending(dentry)) { 262 if (autofs4_ispending(dentry)) {
428 /* The daemon never causes a mount to trigger */ 263 /* The daemon never causes a mount to trigger */
264 spin_unlock(&sbi->fs_lock);
265
429 if (oz_mode) 266 if (oz_mode)
430 return 1; 267 return 1;
431 268
432 /* 269 /*
270 * If the directory has gone away due to an expire
271 * we have been called as ->d_revalidate() and so
272 * we need to return false and proceed to ->lookup().
273 */
274 if (autofs4_expire_wait(dentry) == -EAGAIN)
275 return 0;
276
277 /*
433 * A zero status is success otherwise we have a 278 * A zero status is success otherwise we have a
434 * negative error code. 279 * negative error code.
435 */ 280 */
@@ -437,17 +282,9 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
437 if (status == 0) 282 if (status == 0)
438 return 1; 283 return 1;
439 284
440 /*
441 * A status of EAGAIN here means that the dentry has gone
442 * away while waiting for an expire to complete. If we are
443 * racing with expire lookup will wait for it so this must
444 * be a revalidate and we need to send it to lookup.
445 */
446 if (status == -EAGAIN)
447 return 0;
448
449 return status; 285 return status;
450 } 286 }
287 spin_unlock(&sbi->fs_lock);
451 288
452 /* Negative dentry.. invalidate if "old" */ 289 /* Negative dentry.. invalidate if "old" */
453 if (dentry->d_inode == NULL) 290 if (dentry->d_inode == NULL)
@@ -461,6 +298,7 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
461 DPRINTK("dentry=%p %.*s, emptydir", 298 DPRINTK("dentry=%p %.*s, emptydir",
462 dentry, dentry->d_name.len, dentry->d_name.name); 299 dentry, dentry->d_name.len, dentry->d_name.name);
463 spin_unlock(&dcache_lock); 300 spin_unlock(&dcache_lock);
301
464 /* The daemon never causes a mount to trigger */ 302 /* The daemon never causes a mount to trigger */
465 if (oz_mode) 303 if (oz_mode)
466 return 1; 304 return 1;
@@ -493,10 +331,12 @@ void autofs4_dentry_release(struct dentry *de)
493 struct autofs_sb_info *sbi = autofs4_sbi(de->d_sb); 331 struct autofs_sb_info *sbi = autofs4_sbi(de->d_sb);
494 332
495 if (sbi) { 333 if (sbi) {
496 spin_lock(&sbi->rehash_lock); 334 spin_lock(&sbi->lookup_lock);
497 if (!list_empty(&inf->rehash)) 335 if (!list_empty(&inf->active))
498 list_del(&inf->rehash); 336 list_del(&inf->active);
499 spin_unlock(&sbi->rehash_lock); 337 if (!list_empty(&inf->expiring))
338 list_del(&inf->expiring);
339 spin_unlock(&sbi->lookup_lock);
500 } 340 }
501 341
502 inf->dentry = NULL; 342 inf->dentry = NULL;
@@ -518,7 +358,7 @@ static struct dentry_operations autofs4_dentry_operations = {
518 .d_release = autofs4_dentry_release, 358 .d_release = autofs4_dentry_release,
519}; 359};
520 360
521static struct dentry *autofs4_lookup_unhashed(struct autofs_sb_info *sbi, struct dentry *parent, struct qstr *name) 361static struct dentry *autofs4_lookup_active(struct autofs_sb_info *sbi, struct dentry *parent, struct qstr *name)
522{ 362{
523 unsigned int len = name->len; 363 unsigned int len = name->len;
524 unsigned int hash = name->hash; 364 unsigned int hash = name->hash;
@@ -526,14 +366,66 @@ static struct dentry *autofs4_lookup_unhashed(struct autofs_sb_info *sbi, struct
526 struct list_head *p, *head; 366 struct list_head *p, *head;
527 367
528 spin_lock(&dcache_lock); 368 spin_lock(&dcache_lock);
529 spin_lock(&sbi->rehash_lock); 369 spin_lock(&sbi->lookup_lock);
530 head = &sbi->rehash_list; 370 head = &sbi->active_list;
531 list_for_each(p, head) { 371 list_for_each(p, head) {
532 struct autofs_info *ino; 372 struct autofs_info *ino;
533 struct dentry *dentry; 373 struct dentry *dentry;
534 struct qstr *qstr; 374 struct qstr *qstr;
535 375
536 ino = list_entry(p, struct autofs_info, rehash); 376 ino = list_entry(p, struct autofs_info, active);
377 dentry = ino->dentry;
378
379 spin_lock(&dentry->d_lock);
380
381 /* Already gone? */
382 if (atomic_read(&dentry->d_count) == 0)
383 goto next;
384
385 qstr = &dentry->d_name;
386
387 if (dentry->d_name.hash != hash)
388 goto next;
389 if (dentry->d_parent != parent)
390 goto next;
391
392 if (qstr->len != len)
393 goto next;
394 if (memcmp(qstr->name, str, len))
395 goto next;
396
397 if (d_unhashed(dentry)) {
398 dget(dentry);
399 spin_unlock(&dentry->d_lock);
400 spin_unlock(&sbi->lookup_lock);
401 spin_unlock(&dcache_lock);
402 return dentry;
403 }
404next:
405 spin_unlock(&dentry->d_lock);
406 }
407 spin_unlock(&sbi->lookup_lock);
408 spin_unlock(&dcache_lock);
409
410 return NULL;
411}
412
413static struct dentry *autofs4_lookup_expiring(struct autofs_sb_info *sbi, struct dentry *parent, struct qstr *name)
414{
415 unsigned int len = name->len;
416 unsigned int hash = name->hash;
417 const unsigned char *str = name->name;
418 struct list_head *p, *head;
419
420 spin_lock(&dcache_lock);
421 spin_lock(&sbi->lookup_lock);
422 head = &sbi->expiring_list;
423 list_for_each(p, head) {
424 struct autofs_info *ino;
425 struct dentry *dentry;
426 struct qstr *qstr;
427
428 ino = list_entry(p, struct autofs_info, expiring);
537 dentry = ino->dentry; 429 dentry = ino->dentry;
538 430
539 spin_lock(&dentry->d_lock); 431 spin_lock(&dentry->d_lock);
@@ -555,33 +447,16 @@ static struct dentry *autofs4_lookup_unhashed(struct autofs_sb_info *sbi, struct
555 goto next; 447 goto next;
556 448
557 if (d_unhashed(dentry)) { 449 if (d_unhashed(dentry)) {
558 struct inode *inode = dentry->d_inode;
559
560 ino = autofs4_dentry_ino(dentry);
561 list_del_init(&ino->rehash);
562 dget(dentry); 450 dget(dentry);
563 /*
564 * Make the rehashed dentry negative so the VFS
565 * behaves as it should.
566 */
567 if (inode) {
568 dentry->d_inode = NULL;
569 list_del_init(&dentry->d_alias);
570 spin_unlock(&dentry->d_lock);
571 spin_unlock(&sbi->rehash_lock);
572 spin_unlock(&dcache_lock);
573 iput(inode);
574 return dentry;
575 }
576 spin_unlock(&dentry->d_lock); 451 spin_unlock(&dentry->d_lock);
577 spin_unlock(&sbi->rehash_lock); 452 spin_unlock(&sbi->lookup_lock);
578 spin_unlock(&dcache_lock); 453 spin_unlock(&dcache_lock);
579 return dentry; 454 return dentry;
580 } 455 }
581next: 456next:
582 spin_unlock(&dentry->d_lock); 457 spin_unlock(&dentry->d_lock);
583 } 458 }
584 spin_unlock(&sbi->rehash_lock); 459 spin_unlock(&sbi->lookup_lock);
585 spin_unlock(&dcache_lock); 460 spin_unlock(&dcache_lock);
586 461
587 return NULL; 462 return NULL;
@@ -591,7 +466,8 @@ next:
591static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) 466static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
592{ 467{
593 struct autofs_sb_info *sbi; 468 struct autofs_sb_info *sbi;
594 struct dentry *unhashed; 469 struct autofs_info *ino;
470 struct dentry *expiring, *unhashed;
595 int oz_mode; 471 int oz_mode;
596 472
597 DPRINTK("name = %.*s", 473 DPRINTK("name = %.*s",
@@ -607,8 +483,26 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
607 DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d", 483 DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d",
608 current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode); 484 current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode);
609 485
610 unhashed = autofs4_lookup_unhashed(sbi, dentry->d_parent, &dentry->d_name); 486 expiring = autofs4_lookup_expiring(sbi, dentry->d_parent, &dentry->d_name);
611 if (!unhashed) { 487 if (expiring) {
488 /*
489 * If we are racing with expire the request might not
490 * be quite complete but the directory has been removed
491 * so it must have been successful, so just wait for it.
492 */
493 ino = autofs4_dentry_ino(expiring);
494 autofs4_expire_wait(expiring);
495 spin_lock(&sbi->lookup_lock);
496 if (!list_empty(&ino->expiring))
497 list_del_init(&ino->expiring);
498 spin_unlock(&sbi->lookup_lock);
499 dput(expiring);
500 }
501
502 unhashed = autofs4_lookup_active(sbi, dentry->d_parent, &dentry->d_name);
503 if (unhashed)
504 dentry = unhashed;
505 else {
612 /* 506 /*
613 * Mark the dentry incomplete but don't hash it. We do this 507 * Mark the dentry incomplete but don't hash it. We do this
614 * to serialize our inode creation operations (symlink and 508 * to serialize our inode creation operations (symlink and
@@ -622,39 +516,34 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
622 */ 516 */
623 dentry->d_op = &autofs4_root_dentry_operations; 517 dentry->d_op = &autofs4_root_dentry_operations;
624 518
625 dentry->d_fsdata = NULL;
626 d_instantiate(dentry, NULL);
627 } else {
628 struct autofs_info *ino = autofs4_dentry_ino(unhashed);
629 DPRINTK("rehash %p with %p", dentry, unhashed);
630 /* 519 /*
631 * If we are racing with expire the request might not 520 * And we need to ensure that the same dentry is used for
632 * be quite complete but the directory has been removed 521 * all following lookup calls until it is hashed so that
633 * so it must have been successful, so just wait for it. 522 * the dentry flags are persistent throughout the request.
634 * We need to ensure the AUTOFS_INF_EXPIRING flag is clear
635 * before continuing as revalidate may fail when calling
636 * try_to_fill_dentry (returning EAGAIN) if we don't.
637 */ 523 */
638 while (ino && (ino->flags & AUTOFS_INF_EXPIRING)) { 524 ino = autofs4_init_ino(NULL, sbi, 0555);
639 DPRINTK("wait for incomplete expire %p name=%.*s", 525 if (!ino)
640 unhashed, unhashed->d_name.len, 526 return ERR_PTR(-ENOMEM);
641 unhashed->d_name.name); 527
642 autofs4_wait(sbi, unhashed, NFY_NONE); 528 dentry->d_fsdata = ino;
643 DPRINTK("request completed"); 529 ino->dentry = dentry;
644 } 530
645 dentry = unhashed; 531 spin_lock(&sbi->lookup_lock);
532 list_add(&ino->active, &sbi->active_list);
533 spin_unlock(&sbi->lookup_lock);
534
535 d_instantiate(dentry, NULL);
646 } 536 }
647 537
648 if (!oz_mode) { 538 if (!oz_mode) {
649 spin_lock(&dentry->d_lock); 539 spin_lock(&dentry->d_lock);
650 dentry->d_flags |= DCACHE_AUTOFS_PENDING; 540 dentry->d_flags |= DCACHE_AUTOFS_PENDING;
651 spin_unlock(&dentry->d_lock); 541 spin_unlock(&dentry->d_lock);
652 } 542 if (dentry->d_op && dentry->d_op->d_revalidate) {
653 543 mutex_unlock(&dir->i_mutex);
654 if (dentry->d_op && dentry->d_op->d_revalidate) { 544 (dentry->d_op->d_revalidate)(dentry, nd);
655 mutex_unlock(&dir->i_mutex); 545 mutex_lock(&dir->i_mutex);
656 (dentry->d_op->d_revalidate)(dentry, nd); 546 }
657 mutex_lock(&dir->i_mutex);
658 } 547 }
659 548
660 /* 549 /*
@@ -673,9 +562,11 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
673 return ERR_PTR(-ERESTARTNOINTR); 562 return ERR_PTR(-ERESTARTNOINTR);
674 } 563 }
675 } 564 }
676 spin_lock(&dentry->d_lock); 565 if (!oz_mode) {
677 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING; 566 spin_lock(&dentry->d_lock);
678 spin_unlock(&dentry->d_lock); 567 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
568 spin_unlock(&dentry->d_lock);
569 }
679 } 570 }
680 571
681 /* 572 /*
@@ -706,7 +597,7 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
706 } 597 }
707 598
708 if (unhashed) 599 if (unhashed)
709 return dentry; 600 return unhashed;
710 601
711 return NULL; 602 return NULL;
712} 603}
@@ -728,20 +619,31 @@ static int autofs4_dir_symlink(struct inode *dir,
728 return -EACCES; 619 return -EACCES;
729 620
730 ino = autofs4_init_ino(ino, sbi, S_IFLNK | 0555); 621 ino = autofs4_init_ino(ino, sbi, S_IFLNK | 0555);
731 if (ino == NULL) 622 if (!ino)
732 return -ENOSPC; 623 return -ENOMEM;
733 624
734 ino->size = strlen(symname); 625 spin_lock(&sbi->lookup_lock);
735 ino->u.symlink = cp = kmalloc(ino->size + 1, GFP_KERNEL); 626 if (!list_empty(&ino->active))
627 list_del_init(&ino->active);
628 spin_unlock(&sbi->lookup_lock);
736 629
737 if (cp == NULL) { 630 ino->size = strlen(symname);
738 kfree(ino); 631 cp = kmalloc(ino->size + 1, GFP_KERNEL);
739 return -ENOSPC; 632 if (!cp) {
633 if (!dentry->d_fsdata)
634 kfree(ino);
635 return -ENOMEM;
740 } 636 }
741 637
742 strcpy(cp, symname); 638 strcpy(cp, symname);
743 639
744 inode = autofs4_get_inode(dir->i_sb, ino); 640 inode = autofs4_get_inode(dir->i_sb, ino);
641 if (!inode) {
642 kfree(cp);
643 if (!dentry->d_fsdata)
644 kfree(ino);
645 return -ENOMEM;
646 }
745 d_add(dentry, inode); 647 d_add(dentry, inode);
746 648
747 if (dir == dir->i_sb->s_root->d_inode) 649 if (dir == dir->i_sb->s_root->d_inode)
@@ -757,6 +659,7 @@ static int autofs4_dir_symlink(struct inode *dir,
757 atomic_inc(&p_ino->count); 659 atomic_inc(&p_ino->count);
758 ino->inode = inode; 660 ino->inode = inode;
759 661
662 ino->u.symlink = cp;
760 dir->i_mtime = CURRENT_TIME; 663 dir->i_mtime = CURRENT_TIME;
761 664
762 return 0; 665 return 0;
@@ -769,9 +672,8 @@ static int autofs4_dir_symlink(struct inode *dir,
769 * that the file no longer exists. However, doing that means that the 672 * that the file no longer exists. However, doing that means that the
770 * VFS layer can turn the dentry into a negative dentry. We don't want 673 * VFS layer can turn the dentry into a negative dentry. We don't want
771 * this, because the unlink is probably the result of an expire. 674 * this, because the unlink is probably the result of an expire.
772 * We simply d_drop it and add it to a rehash candidates list in the 675 * We simply d_drop it and add it to a expiring list in the super block,
773 * super block, which allows the dentry lookup to reuse it retaining 676 * which allows the dentry lookup to check for an incomplete expire.
774 * the flags, such as expire in progress, in case we're racing with expire.
775 * 677 *
776 * If a process is blocked on the dentry waiting for the expire to finish, 678 * If a process is blocked on the dentry waiting for the expire to finish,
777 * it will invalidate the dentry and try to mount with a new one. 679 * it will invalidate the dentry and try to mount with a new one.
@@ -801,9 +703,10 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
801 dir->i_mtime = CURRENT_TIME; 703 dir->i_mtime = CURRENT_TIME;
802 704
803 spin_lock(&dcache_lock); 705 spin_lock(&dcache_lock);
804 spin_lock(&sbi->rehash_lock); 706 spin_lock(&sbi->lookup_lock);
805 list_add(&ino->rehash, &sbi->rehash_list); 707 if (list_empty(&ino->expiring))
806 spin_unlock(&sbi->rehash_lock); 708 list_add(&ino->expiring, &sbi->expiring_list);
709 spin_unlock(&sbi->lookup_lock);
807 spin_lock(&dentry->d_lock); 710 spin_lock(&dentry->d_lock);
808 __d_drop(dentry); 711 __d_drop(dentry);
809 spin_unlock(&dentry->d_lock); 712 spin_unlock(&dentry->d_lock);
@@ -829,9 +732,10 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
829 spin_unlock(&dcache_lock); 732 spin_unlock(&dcache_lock);
830 return -ENOTEMPTY; 733 return -ENOTEMPTY;
831 } 734 }
832 spin_lock(&sbi->rehash_lock); 735 spin_lock(&sbi->lookup_lock);
833 list_add(&ino->rehash, &sbi->rehash_list); 736 if (list_empty(&ino->expiring))
834 spin_unlock(&sbi->rehash_lock); 737 list_add(&ino->expiring, &sbi->expiring_list);
738 spin_unlock(&sbi->lookup_lock);
835 spin_lock(&dentry->d_lock); 739 spin_lock(&dentry->d_lock);
836 __d_drop(dentry); 740 __d_drop(dentry);
837 spin_unlock(&dentry->d_lock); 741 spin_unlock(&dentry->d_lock);
@@ -866,10 +770,20 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
866 dentry, dentry->d_name.len, dentry->d_name.name); 770 dentry, dentry->d_name.len, dentry->d_name.name);
867 771
868 ino = autofs4_init_ino(ino, sbi, S_IFDIR | 0555); 772 ino = autofs4_init_ino(ino, sbi, S_IFDIR | 0555);
869 if (ino == NULL) 773 if (!ino)
870 return -ENOSPC; 774 return -ENOMEM;
775
776 spin_lock(&sbi->lookup_lock);
777 if (!list_empty(&ino->active))
778 list_del_init(&ino->active);
779 spin_unlock(&sbi->lookup_lock);
871 780
872 inode = autofs4_get_inode(dir->i_sb, ino); 781 inode = autofs4_get_inode(dir->i_sb, ino);
782 if (!inode) {
783 if (!dentry->d_fsdata)
784 kfree(ino);
785 return -ENOMEM;
786 }
873 d_add(dentry, inode); 787 d_add(dentry, inode);
874 788
875 if (dir == dir->i_sb->s_root->d_inode) 789 if (dir == dir->i_sb->s_root->d_inode)
@@ -922,44 +836,6 @@ static inline int autofs4_get_protosubver(struct autofs_sb_info *sbi, int __user
922} 836}
923 837
924/* 838/*
925 * Tells the daemon whether we need to reghost or not. Also, clears
926 * the reghost_needed flag.
927 */
928static inline int autofs4_ask_reghost(struct autofs_sb_info *sbi, int __user *p)
929{
930 int status;
931
932 DPRINTK("returning %d", sbi->needs_reghost);
933
934 status = put_user(sbi->needs_reghost, p);
935 if (status)
936 return status;
937
938 sbi->needs_reghost = 0;
939 return 0;
940}
941
942/*
943 * Enable / Disable reghosting ioctl() operation
944 */
945static inline int autofs4_toggle_reghost(struct autofs_sb_info *sbi, int __user *p)
946{
947 int status;
948 int val;
949
950 status = get_user(val, p);
951
952 DPRINTK("reghost = %d", val);
953
954 if (status)
955 return status;
956
957 /* turn on/off reghosting, with the val */
958 sbi->reghost_enabled = val;
959 return 0;
960}
961
962/*
963* Tells the daemon whether it can umount the autofs mount. 839* Tells the daemon whether it can umount the autofs mount.
964*/ 840*/
965static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p) 841static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p)
@@ -1023,11 +899,6 @@ static int autofs4_root_ioctl(struct inode *inode, struct file *filp,
1023 case AUTOFS_IOC_SETTIMEOUT: 899 case AUTOFS_IOC_SETTIMEOUT:
1024 return autofs4_get_set_timeout(sbi, p); 900 return autofs4_get_set_timeout(sbi, p);
1025 901
1026 case AUTOFS_IOC_TOGGLEREGHOST:
1027 return autofs4_toggle_reghost(sbi, p);
1028 case AUTOFS_IOC_ASKREGHOST:
1029 return autofs4_ask_reghost(sbi, p);
1030
1031 case AUTOFS_IOC_ASKUMOUNT: 902 case AUTOFS_IOC_ASKUMOUNT:
1032 return autofs4_ask_umount(filp->f_path.mnt, p); 903 return autofs4_ask_umount(filp->f_path.mnt, p);
1033 904
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 75e5955c3f6d..35216d18d8b5 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -28,6 +28,12 @@ void autofs4_catatonic_mode(struct autofs_sb_info *sbi)
28{ 28{
29 struct autofs_wait_queue *wq, *nwq; 29 struct autofs_wait_queue *wq, *nwq;
30 30
31 mutex_lock(&sbi->wq_mutex);
32 if (sbi->catatonic) {
33 mutex_unlock(&sbi->wq_mutex);
34 return;
35 }
36
31 DPRINTK("entering catatonic mode"); 37 DPRINTK("entering catatonic mode");
32 38
33 sbi->catatonic = 1; 39 sbi->catatonic = 1;
@@ -36,13 +42,18 @@ void autofs4_catatonic_mode(struct autofs_sb_info *sbi)
36 while (wq) { 42 while (wq) {
37 nwq = wq->next; 43 nwq = wq->next;
38 wq->status = -ENOENT; /* Magic is gone - report failure */ 44 wq->status = -ENOENT; /* Magic is gone - report failure */
39 kfree(wq->name); 45 if (wq->name.name) {
40 wq->name = NULL; 46 kfree(wq->name.name);
47 wq->name.name = NULL;
48 }
49 wq->wait_ctr--;
41 wake_up_interruptible(&wq->queue); 50 wake_up_interruptible(&wq->queue);
42 wq = nwq; 51 wq = nwq;
43 } 52 }
44 fput(sbi->pipe); /* Close the pipe */ 53 fput(sbi->pipe); /* Close the pipe */
45 sbi->pipe = NULL; 54 sbi->pipe = NULL;
55 sbi->pipefd = -1;
56 mutex_unlock(&sbi->wq_mutex);
46} 57}
47 58
48static int autofs4_write(struct file *file, const void *addr, int bytes) 59static int autofs4_write(struct file *file, const void *addr, int bytes)
@@ -89,10 +100,11 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
89 union autofs_packet_union v4_pkt; 100 union autofs_packet_union v4_pkt;
90 union autofs_v5_packet_union v5_pkt; 101 union autofs_v5_packet_union v5_pkt;
91 } pkt; 102 } pkt;
103 struct file *pipe = NULL;
92 size_t pktsz; 104 size_t pktsz;
93 105
94 DPRINTK("wait id = 0x%08lx, name = %.*s, type=%d", 106 DPRINTK("wait id = 0x%08lx, name = %.*s, type=%d",
95 wq->wait_queue_token, wq->len, wq->name, type); 107 wq->wait_queue_token, wq->name.len, wq->name.name, type);
96 108
97 memset(&pkt,0,sizeof pkt); /* For security reasons */ 109 memset(&pkt,0,sizeof pkt); /* For security reasons */
98 110
@@ -107,9 +119,9 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
107 pktsz = sizeof(*mp); 119 pktsz = sizeof(*mp);
108 120
109 mp->wait_queue_token = wq->wait_queue_token; 121 mp->wait_queue_token = wq->wait_queue_token;
110 mp->len = wq->len; 122 mp->len = wq->name.len;
111 memcpy(mp->name, wq->name, wq->len); 123 memcpy(mp->name, wq->name.name, wq->name.len);
112 mp->name[wq->len] = '\0'; 124 mp->name[wq->name.len] = '\0';
113 break; 125 break;
114 } 126 }
115 case autofs_ptype_expire_multi: 127 case autofs_ptype_expire_multi:
@@ -119,9 +131,9 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
119 pktsz = sizeof(*ep); 131 pktsz = sizeof(*ep);
120 132
121 ep->wait_queue_token = wq->wait_queue_token; 133 ep->wait_queue_token = wq->wait_queue_token;
122 ep->len = wq->len; 134 ep->len = wq->name.len;
123 memcpy(ep->name, wq->name, wq->len); 135 memcpy(ep->name, wq->name.name, wq->name.len);
124 ep->name[wq->len] = '\0'; 136 ep->name[wq->name.len] = '\0';
125 break; 137 break;
126 } 138 }
127 /* 139 /*
@@ -138,9 +150,9 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
138 pktsz = sizeof(*packet); 150 pktsz = sizeof(*packet);
139 151
140 packet->wait_queue_token = wq->wait_queue_token; 152 packet->wait_queue_token = wq->wait_queue_token;
141 packet->len = wq->len; 153 packet->len = wq->name.len;
142 memcpy(packet->name, wq->name, wq->len); 154 memcpy(packet->name, wq->name.name, wq->name.len);
143 packet->name[wq->len] = '\0'; 155 packet->name[wq->name.len] = '\0';
144 packet->dev = wq->dev; 156 packet->dev = wq->dev;
145 packet->ino = wq->ino; 157 packet->ino = wq->ino;
146 packet->uid = wq->uid; 158 packet->uid = wq->uid;
@@ -154,8 +166,19 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
154 return; 166 return;
155 } 167 }
156 168
157 if (autofs4_write(sbi->pipe, &pkt, pktsz)) 169 /* Check if we have become catatonic */
158 autofs4_catatonic_mode(sbi); 170 mutex_lock(&sbi->wq_mutex);
171 if (!sbi->catatonic) {
172 pipe = sbi->pipe;
173 get_file(pipe);
174 }
175 mutex_unlock(&sbi->wq_mutex);
176
177 if (pipe) {
178 if (autofs4_write(pipe, &pkt, pktsz))
179 autofs4_catatonic_mode(sbi);
180 fput(pipe);
181 }
159} 182}
160 183
161static int autofs4_getpath(struct autofs_sb_info *sbi, 184static int autofs4_getpath(struct autofs_sb_info *sbi,
@@ -191,58 +214,55 @@ static int autofs4_getpath(struct autofs_sb_info *sbi,
191} 214}
192 215
193static struct autofs_wait_queue * 216static struct autofs_wait_queue *
194autofs4_find_wait(struct autofs_sb_info *sbi, 217autofs4_find_wait(struct autofs_sb_info *sbi, struct qstr *qstr)
195 char *name, unsigned int hash, unsigned int len)
196{ 218{
197 struct autofs_wait_queue *wq; 219 struct autofs_wait_queue *wq;
198 220
199 for (wq = sbi->queues; wq; wq = wq->next) { 221 for (wq = sbi->queues; wq; wq = wq->next) {
200 if (wq->hash == hash && 222 if (wq->name.hash == qstr->hash &&
201 wq->len == len && 223 wq->name.len == qstr->len &&
202 wq->name && !memcmp(wq->name, name, len)) 224 wq->name.name &&
225 !memcmp(wq->name.name, qstr->name, qstr->len))
203 break; 226 break;
204 } 227 }
205 return wq; 228 return wq;
206} 229}
207 230
208int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, 231/*
209 enum autofs_notify notify) 232 * Check if we have a valid request.
233 * Returns
234 * 1 if the request should continue.
235 * In this case we can return an autofs_wait_queue entry if one is
236 * found or NULL to idicate a new wait needs to be created.
237 * 0 or a negative errno if the request shouldn't continue.
238 */
239static int validate_request(struct autofs_wait_queue **wait,
240 struct autofs_sb_info *sbi,
241 struct qstr *qstr,
242 struct dentry*dentry, enum autofs_notify notify)
210{ 243{
211 struct autofs_info *ino;
212 struct autofs_wait_queue *wq; 244 struct autofs_wait_queue *wq;
213 char *name; 245 struct autofs_info *ino;
214 unsigned int len = 0;
215 unsigned int hash = 0;
216 int status, type;
217
218 /* In catatonic mode, we don't wait for nobody */
219 if (sbi->catatonic)
220 return -ENOENT;
221
222 name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
223 if (!name)
224 return -ENOMEM;
225 246
226 /* If this is a direct mount request create a dummy name */ 247 /* Wait in progress, continue; */
227 if (IS_ROOT(dentry) && (sbi->type & AUTOFS_TYPE_DIRECT)) 248 wq = autofs4_find_wait(sbi, qstr);
228 len = sprintf(name, "%p", dentry); 249 if (wq) {
229 else { 250 *wait = wq;
230 len = autofs4_getpath(sbi, dentry, &name); 251 return 1;
231 if (!len) {
232 kfree(name);
233 return -ENOENT;
234 }
235 } 252 }
236 hash = full_name_hash(name, len);
237 253
238 if (mutex_lock_interruptible(&sbi->wq_mutex)) { 254 *wait = NULL;
239 kfree(name);
240 return -EINTR;
241 }
242 255
243 wq = autofs4_find_wait(sbi, name, hash, len); 256 /* If we don't yet have any info this is a new request */
244 ino = autofs4_dentry_ino(dentry); 257 ino = autofs4_dentry_ino(dentry);
245 if (!wq && ino && notify == NFY_NONE) { 258 if (!ino)
259 return 1;
260
261 /*
262 * If we've been asked to wait on an existing expire (NFY_NONE)
263 * but there is no wait in the queue ...
264 */
265 if (notify == NFY_NONE) {
246 /* 266 /*
247 * Either we've betean the pending expire to post it's 267 * Either we've betean the pending expire to post it's
248 * wait or it finished while we waited on the mutex. 268 * wait or it finished while we waited on the mutex.
@@ -253,13 +273,14 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
253 while (ino->flags & AUTOFS_INF_EXPIRING) { 273 while (ino->flags & AUTOFS_INF_EXPIRING) {
254 mutex_unlock(&sbi->wq_mutex); 274 mutex_unlock(&sbi->wq_mutex);
255 schedule_timeout_interruptible(HZ/10); 275 schedule_timeout_interruptible(HZ/10);
256 if (mutex_lock_interruptible(&sbi->wq_mutex)) { 276 if (mutex_lock_interruptible(&sbi->wq_mutex))
257 kfree(name);
258 return -EINTR; 277 return -EINTR;
278
279 wq = autofs4_find_wait(sbi, qstr);
280 if (wq) {
281 *wait = wq;
282 return 1;
259 } 283 }
260 wq = autofs4_find_wait(sbi, name, hash, len);
261 if (wq)
262 break;
263 } 284 }
264 285
265 /* 286 /*
@@ -267,18 +288,96 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
267 * cases where we wait on NFY_NONE neither depend on the 288 * cases where we wait on NFY_NONE neither depend on the
268 * return status of the wait. 289 * return status of the wait.
269 */ 290 */
270 if (!wq) { 291 return 0;
292 }
293
294 /*
295 * If we've been asked to trigger a mount and the request
296 * completed while we waited on the mutex ...
297 */
298 if (notify == NFY_MOUNT) {
299 /*
300 * If the dentry isn't hashed just go ahead and try the
301 * mount again with a new wait (not much else we can do).
302 */
303 if (!d_unhashed(dentry)) {
304 /*
305 * But if the dentry is hashed, that means that we
306 * got here through the revalidate path. Thus, we
307 * need to check if the dentry has been mounted
308 * while we waited on the wq_mutex. If it has,
309 * simply return success.
310 */
311 if (d_mountpoint(dentry))
312 return 0;
313 }
314 }
315
316 return 1;
317}
318
319int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
320 enum autofs_notify notify)
321{
322 struct autofs_wait_queue *wq;
323 struct qstr qstr;
324 char *name;
325 int status, ret, type;
326
327 /* In catatonic mode, we don't wait for nobody */
328 if (sbi->catatonic)
329 return -ENOENT;
330
331 if (!dentry->d_inode) {
332 /*
333 * A wait for a negative dentry is invalid for certain
334 * cases. A direct or offset mount "always" has its mount
335 * point directory created and so the request dentry must
336 * be positive or the map key doesn't exist. The situation
337 * is very similar for indirect mounts except only dentrys
338 * in the root of the autofs file system may be negative.
339 */
340 if (sbi->type & (AUTOFS_TYPE_DIRECT|AUTOFS_TYPE_OFFSET))
341 return -ENOENT;
342 else if (!IS_ROOT(dentry->d_parent))
343 return -ENOENT;
344 }
345
346 name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
347 if (!name)
348 return -ENOMEM;
349
350 /* If this is a direct mount request create a dummy name */
351 if (IS_ROOT(dentry) && (sbi->type & AUTOFS_TYPE_DIRECT))
352 qstr.len = sprintf(name, "%p", dentry);
353 else {
354 qstr.len = autofs4_getpath(sbi, dentry, &name);
355 if (!qstr.len) {
271 kfree(name); 356 kfree(name);
272 mutex_unlock(&sbi->wq_mutex); 357 return -ENOENT;
273 return 0;
274 } 358 }
275 } 359 }
360 qstr.name = name;
361 qstr.hash = full_name_hash(name, qstr.len);
362
363 if (mutex_lock_interruptible(&sbi->wq_mutex)) {
364 kfree(qstr.name);
365 return -EINTR;
366 }
367
368 ret = validate_request(&wq, sbi, &qstr, dentry, notify);
369 if (ret <= 0) {
370 if (ret == 0)
371 mutex_unlock(&sbi->wq_mutex);
372 kfree(qstr.name);
373 return ret;
374 }
276 375
277 if (!wq) { 376 if (!wq) {
278 /* Create a new wait queue */ 377 /* Create a new wait queue */
279 wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); 378 wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL);
280 if (!wq) { 379 if (!wq) {
281 kfree(name); 380 kfree(qstr.name);
282 mutex_unlock(&sbi->wq_mutex); 381 mutex_unlock(&sbi->wq_mutex);
283 return -ENOMEM; 382 return -ENOMEM;
284 } 383 }
@@ -289,9 +388,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
289 wq->next = sbi->queues; 388 wq->next = sbi->queues;
290 sbi->queues = wq; 389 sbi->queues = wq;
291 init_waitqueue_head(&wq->queue); 390 init_waitqueue_head(&wq->queue);
292 wq->hash = hash; 391 memcpy(&wq->name, &qstr, sizeof(struct qstr));
293 wq->name = name;
294 wq->len = len;
295 wq->dev = autofs4_get_dev(sbi); 392 wq->dev = autofs4_get_dev(sbi);
296 wq->ino = autofs4_get_ino(sbi); 393 wq->ino = autofs4_get_ino(sbi);
297 wq->uid = current->uid; 394 wq->uid = current->uid;
@@ -299,7 +396,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
299 wq->pid = current->pid; 396 wq->pid = current->pid;
300 wq->tgid = current->tgid; 397 wq->tgid = current->tgid;
301 wq->status = -EINTR; /* Status return if interrupted */ 398 wq->status = -EINTR; /* Status return if interrupted */
302 atomic_set(&wq->wait_ctr, 2); 399 wq->wait_ctr = 2;
303 mutex_unlock(&sbi->wq_mutex); 400 mutex_unlock(&sbi->wq_mutex);
304 401
305 if (sbi->version < 5) { 402 if (sbi->version < 5) {
@@ -319,28 +416,25 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
319 } 416 }
320 417
321 DPRINTK("new wait id = 0x%08lx, name = %.*s, nfy=%d\n", 418 DPRINTK("new wait id = 0x%08lx, name = %.*s, nfy=%d\n",
322 (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); 419 (unsigned long) wq->wait_queue_token, wq->name.len,
420 wq->name.name, notify);
323 421
324 /* autofs4_notify_daemon() may block */ 422 /* autofs4_notify_daemon() may block */
325 autofs4_notify_daemon(sbi, wq, type); 423 autofs4_notify_daemon(sbi, wq, type);
326 } else { 424 } else {
327 atomic_inc(&wq->wait_ctr); 425 wq->wait_ctr++;
328 mutex_unlock(&sbi->wq_mutex); 426 mutex_unlock(&sbi->wq_mutex);
329 kfree(name); 427 kfree(qstr.name);
330 DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", 428 DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d",
331 (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); 429 (unsigned long) wq->wait_queue_token, wq->name.len,
332 } 430 wq->name.name, notify);
333
334 /* wq->name is NULL if and only if the lock is already released */
335
336 if (sbi->catatonic) {
337 /* We might have slept, so check again for catatonic mode */
338 wq->status = -ENOENT;
339 kfree(wq->name);
340 wq->name = NULL;
341 } 431 }
342 432
343 if (wq->name) { 433 /*
434 * wq->name.name is NULL iff the lock is already released
435 * or the mount has been made catatonic.
436 */
437 if (wq->name.name) {
344 /* Block all but "shutdown" signals while waiting */ 438 /* Block all but "shutdown" signals while waiting */
345 sigset_t oldset; 439 sigset_t oldset;
346 unsigned long irqflags; 440 unsigned long irqflags;
@@ -351,7 +445,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
351 recalc_sigpending(); 445 recalc_sigpending();
352 spin_unlock_irqrestore(&current->sighand->siglock, irqflags); 446 spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
353 447
354 wait_event_interruptible(wq->queue, wq->name == NULL); 448 wait_event_interruptible(wq->queue, wq->name.name == NULL);
355 449
356 spin_lock_irqsave(&current->sighand->siglock, irqflags); 450 spin_lock_irqsave(&current->sighand->siglock, irqflags);
357 current->blocked = oldset; 451 current->blocked = oldset;
@@ -364,8 +458,10 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
364 status = wq->status; 458 status = wq->status;
365 459
366 /* Are we the last process to need status? */ 460 /* Are we the last process to need status? */
367 if (atomic_dec_and_test(&wq->wait_ctr)) 461 mutex_lock(&sbi->wq_mutex);
462 if (!--wq->wait_ctr)
368 kfree(wq); 463 kfree(wq);
464 mutex_unlock(&sbi->wq_mutex);
369 465
370 return status; 466 return status;
371} 467}
@@ -387,16 +483,13 @@ int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_tok
387 } 483 }
388 484
389 *wql = wq->next; /* Unlink from chain */ 485 *wql = wq->next; /* Unlink from chain */
390 mutex_unlock(&sbi->wq_mutex); 486 kfree(wq->name.name);
391 kfree(wq->name); 487 wq->name.name = NULL; /* Do not wait on this queue */
392 wq->name = NULL; /* Do not wait on this queue */
393
394 wq->status = status; 488 wq->status = status;
395 489 wake_up_interruptible(&wq->queue);
396 if (atomic_dec_and_test(&wq->wait_ctr)) /* Is anyone still waiting for this guy? */ 490 if (!--wq->wait_ctr)
397 kfree(wq); 491 kfree(wq);
398 else 492 mutex_unlock(&sbi->wq_mutex);
399 wake_up_interruptible(&wq->queue);
400 493
401 return 0; 494 return 0;
402} 495}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 639d2d8b5710..3b6ff854d983 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -131,6 +131,15 @@ static int padzero(unsigned long elf_bss)
131#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; }) 131#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
132#endif 132#endif
133 133
134#ifndef ELF_BASE_PLATFORM
135/*
136 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
137 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
138 * will be copied to the user stack in the same manner as AT_PLATFORM.
139 */
140#define ELF_BASE_PLATFORM NULL
141#endif
142
134static int 143static int
135create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, 144create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
136 unsigned long load_addr, unsigned long interp_load_addr) 145 unsigned long load_addr, unsigned long interp_load_addr)
@@ -142,7 +151,9 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
142 elf_addr_t __user *envp; 151 elf_addr_t __user *envp;
143 elf_addr_t __user *sp; 152 elf_addr_t __user *sp;
144 elf_addr_t __user *u_platform; 153 elf_addr_t __user *u_platform;
154 elf_addr_t __user *u_base_platform;
145 const char *k_platform = ELF_PLATFORM; 155 const char *k_platform = ELF_PLATFORM;
156 const char *k_base_platform = ELF_BASE_PLATFORM;
146 int items; 157 int items;
147 elf_addr_t *elf_info; 158 elf_addr_t *elf_info;
148 int ei_index = 0; 159 int ei_index = 0;
@@ -172,6 +183,19 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
172 return -EFAULT; 183 return -EFAULT;
173 } 184 }
174 185
186 /*
187 * If this architecture has a "base" platform capability
188 * string, copy it to userspace.
189 */
190 u_base_platform = NULL;
191 if (k_base_platform) {
192 size_t len = strlen(k_base_platform) + 1;
193
194 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
195 if (__copy_to_user(u_base_platform, k_base_platform, len))
196 return -EFAULT;
197 }
198
175 /* Create the ELF interpreter info */ 199 /* Create the ELF interpreter info */
176 elf_info = (elf_addr_t *)current->mm->saved_auxv; 200 elf_info = (elf_addr_t *)current->mm->saved_auxv;
177 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */ 201 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
@@ -209,6 +233,10 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
209 NEW_AUX_ENT(AT_PLATFORM, 233 NEW_AUX_ENT(AT_PLATFORM,
210 (elf_addr_t)(unsigned long)u_platform); 234 (elf_addr_t)(unsigned long)u_platform);
211 } 235 }
236 if (k_base_platform) {
237 NEW_AUX_ENT(AT_BASE_PLATFORM,
238 (elf_addr_t)(unsigned long)u_base_platform);
239 }
212 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) { 240 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
213 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data); 241 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
214 } 242 }
@@ -1478,7 +1506,7 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
1478 const struct user_regset_view *view = task_user_regset_view(dump_task); 1506 const struct user_regset_view *view = task_user_regset_view(dump_task);
1479 struct elf_thread_core_info *t; 1507 struct elf_thread_core_info *t;
1480 struct elf_prpsinfo *psinfo; 1508 struct elf_prpsinfo *psinfo;
1481 struct task_struct *g, *p; 1509 struct core_thread *ct;
1482 unsigned int i; 1510 unsigned int i;
1483 1511
1484 info->size = 0; 1512 info->size = 0;
@@ -1517,31 +1545,26 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
1517 /* 1545 /*
1518 * Allocate a structure for each thread. 1546 * Allocate a structure for each thread.
1519 */ 1547 */
1520 rcu_read_lock(); 1548 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1521 do_each_thread(g, p) 1549 t = kzalloc(offsetof(struct elf_thread_core_info,
1522 if (p->mm == dump_task->mm) { 1550 notes[info->thread_notes]),
1523 t = kzalloc(offsetof(struct elf_thread_core_info, 1551 GFP_KERNEL);
1524 notes[info->thread_notes]), 1552 if (unlikely(!t))
1525 GFP_ATOMIC); 1553 return 0;
1526 if (unlikely(!t)) { 1554
1527 rcu_read_unlock(); 1555 t->task = ct->task;
1528 return 0; 1556 if (ct->task == dump_task || !info->thread) {
1529 } 1557 t->next = info->thread;
1530 t->task = p; 1558 info->thread = t;
1531 if (p == dump_task || !info->thread) { 1559 } else {
1532 t->next = info->thread; 1560 /*
1533 info->thread = t; 1561 * Make sure to keep the original task at
1534 } else { 1562 * the head of the list.
1535 /* 1563 */
1536 * Make sure to keep the original task at 1564 t->next = info->thread->next;
1537 * the head of the list. 1565 info->thread->next = t;
1538 */
1539 t->next = info->thread->next;
1540 info->thread->next = t;
1541 }
1542 } 1566 }
1543 while_each_thread(g, p); 1567 }
1544 rcu_read_unlock();
1545 1568
1546 /* 1569 /*
1547 * Now fill in each thread's information. 1570 * Now fill in each thread's information.
@@ -1688,7 +1711,6 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
1688{ 1711{
1689#define NUM_NOTES 6 1712#define NUM_NOTES 6
1690 struct list_head *t; 1713 struct list_head *t;
1691 struct task_struct *g, *p;
1692 1714
1693 info->notes = NULL; 1715 info->notes = NULL;
1694 info->prstatus = NULL; 1716 info->prstatus = NULL;
@@ -1720,20 +1742,19 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
1720 1742
1721 info->thread_status_size = 0; 1743 info->thread_status_size = 0;
1722 if (signr) { 1744 if (signr) {
1745 struct core_thread *ct;
1723 struct elf_thread_status *ets; 1746 struct elf_thread_status *ets;
1724 rcu_read_lock(); 1747
1725 do_each_thread(g, p) 1748 for (ct = current->mm->core_state->dumper.next;
1726 if (current->mm == p->mm && current != p) { 1749 ct; ct = ct->next) {
1727 ets = kzalloc(sizeof(*ets), GFP_ATOMIC); 1750 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
1728 if (!ets) { 1751 if (!ets)
1729 rcu_read_unlock(); 1752 return 0;
1730 return 0; 1753
1731 } 1754 ets->thread = ct->task;
1732 ets->thread = p; 1755 list_add(&ets->list, &info->thread_list);
1733 list_add(&ets->list, &info->thread_list); 1756 }
1734 } 1757
1735 while_each_thread(g, p);
1736 rcu_read_unlock();
1737 list_for_each(t, &info->thread_list) { 1758 list_for_each(t, &info->thread_list) {
1738 int sz; 1759 int sz;
1739 1760
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index d051a32e6270..1b59b1edf26d 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1573,7 +1573,6 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
1573 struct memelfnote *notes = NULL; 1573 struct memelfnote *notes = NULL;
1574 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */ 1574 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1575 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */ 1575 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1576 struct task_struct *g, *p;
1577 LIST_HEAD(thread_list); 1576 LIST_HEAD(thread_list);
1578 struct list_head *t; 1577 struct list_head *t;
1579 elf_fpregset_t *fpu = NULL; 1578 elf_fpregset_t *fpu = NULL;
@@ -1622,20 +1621,19 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
1622#endif 1621#endif
1623 1622
1624 if (signr) { 1623 if (signr) {
1624 struct core_thread *ct;
1625 struct elf_thread_status *tmp; 1625 struct elf_thread_status *tmp;
1626 rcu_read_lock(); 1626
1627 do_each_thread(g,p) 1627 for (ct = current->mm->core_state->dumper.next;
1628 if (current->mm == p->mm && current != p) { 1628 ct; ct = ct->next) {
1629 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); 1629 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
1630 if (!tmp) { 1630 if (!tmp)
1631 rcu_read_unlock(); 1631 goto cleanup;
1632 goto cleanup; 1632
1633 } 1633 tmp->thread = ct->task;
1634 tmp->thread = p; 1634 list_add(&tmp->list, &thread_list);
1635 list_add(&tmp->list, &thread_list); 1635 }
1636 } 1636
1637 while_each_thread(g,p);
1638 rcu_read_unlock();
1639 list_for_each(t, &thread_list) { 1637 list_for_each(t, &thread_list) {
1640 struct elf_thread_status *tmp; 1638 struct elf_thread_status *tmp;
1641 int sz; 1639 int sz;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 7191306367c5..756205314c24 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -27,6 +27,7 @@
27#include <linux/namei.h> 27#include <linux/namei.h>
28#include <linux/mount.h> 28#include <linux/mount.h>
29#include <linux/syscalls.h> 29#include <linux/syscalls.h>
30#include <linux/fs.h>
30 31
31#include <asm/uaccess.h> 32#include <asm/uaccess.h>
32 33
@@ -535,31 +536,16 @@ static ssize_t
535bm_entry_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos) 536bm_entry_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
536{ 537{
537 Node *e = file->f_path.dentry->d_inode->i_private; 538 Node *e = file->f_path.dentry->d_inode->i_private;
538 loff_t pos = *ppos;
539 ssize_t res; 539 ssize_t res;
540 char *page; 540 char *page;
541 int len;
542 541
543 if (!(page = (char*) __get_free_page(GFP_KERNEL))) 542 if (!(page = (char*) __get_free_page(GFP_KERNEL)))
544 return -ENOMEM; 543 return -ENOMEM;
545 544
546 entry_status(e, page); 545 entry_status(e, page);
547 len = strlen(page);
548 546
549 res = -EINVAL; 547 res = simple_read_from_buffer(buf, nbytes, ppos, page, strlen(page));
550 if (pos < 0) 548
551 goto out;
552 res = 0;
553 if (pos >= len)
554 goto out;
555 if (len < pos + nbytes)
556 nbytes = len - pos;
557 res = -EFAULT;
558 if (copy_to_user(buf, page + pos, nbytes))
559 goto out;
560 *ppos = pos + nbytes;
561 res = nbytes;
562out:
563 free_page((unsigned long) page); 549 free_page((unsigned long) page);
564 return res; 550 return res;
565} 551}
diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c
index e1c854890f94..bf4a3fd3c8e3 100644
--- a/fs/coda/coda_linux.c
+++ b/fs/coda/coda_linux.c
@@ -28,11 +28,9 @@ int coda_fake_statfs;
28char * coda_f2s(struct CodaFid *f) 28char * coda_f2s(struct CodaFid *f)
29{ 29{
30 static char s[60]; 30 static char s[60];
31#ifdef CONFIG_CODA_FS_OLD_API 31
32 sprintf(s, "(%08x.%08x.%08x)", f->opaque[0], f->opaque[1], f->opaque[2]);
33#else
34 sprintf(s, "(%08x.%08x.%08x.%08x)", f->opaque[0], f->opaque[1], f->opaque[2], f->opaque[3]); 32 sprintf(s, "(%08x.%08x.%08x.%08x)", f->opaque[0], f->opaque[1], f->opaque[2], f->opaque[3]);
35#endif 33
36 return s; 34 return s;
37} 35}
38 36
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index 40c36f7352a6..0d9b80ec689c 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -378,11 +378,7 @@ MODULE_AUTHOR("Jan Harkes, Peter J. Braam");
378MODULE_DESCRIPTION("Coda Distributed File System VFS interface"); 378MODULE_DESCRIPTION("Coda Distributed File System VFS interface");
379MODULE_ALIAS_CHARDEV_MAJOR(CODA_PSDEV_MAJOR); 379MODULE_ALIAS_CHARDEV_MAJOR(CODA_PSDEV_MAJOR);
380MODULE_LICENSE("GPL"); 380MODULE_LICENSE("GPL");
381#ifdef CONFIG_CODA_FS_OLD_API
382MODULE_VERSION("5.3.21");
383#else
384MODULE_VERSION("6.6"); 381MODULE_VERSION("6.6");
385#endif
386 382
387static int __init init_coda(void) 383static int __init init_coda(void)
388{ 384{
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index 359e531094dd..ce432bca95d1 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -52,12 +52,8 @@ static void *alloc_upcall(int opcode, int size)
52 inp->ih.opcode = opcode; 52 inp->ih.opcode = opcode;
53 inp->ih.pid = current->pid; 53 inp->ih.pid = current->pid;
54 inp->ih.pgid = task_pgrp_nr(current); 54 inp->ih.pgid = task_pgrp_nr(current);
55#ifdef CONFIG_CODA_FS_OLD_API
56 memset(&inp->ih.cred, 0, sizeof(struct coda_cred));
57 inp->ih.cred.cr_fsuid = current->fsuid;
58#else
59 inp->ih.uid = current->fsuid; 55 inp->ih.uid = current->fsuid;
60#endif 56
61 return (void*)inp; 57 return (void*)inp;
62} 58}
63 59
@@ -166,20 +162,11 @@ int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
166 union inputArgs *inp; 162 union inputArgs *inp;
167 union outputArgs *outp; 163 union outputArgs *outp;
168 int insize, outsize, error; 164 int insize, outsize, error;
169#ifdef CONFIG_CODA_FS_OLD_API
170 struct coda_cred cred = { 0, };
171 cred.cr_fsuid = uid;
172#endif
173 165
174 insize = SIZE(release); 166 insize = SIZE(release);
175 UPARG(CODA_CLOSE); 167 UPARG(CODA_CLOSE);
176 168
177#ifdef CONFIG_CODA_FS_OLD_API
178 memcpy(&(inp->ih.cred), &cred, sizeof(cred));
179#else
180 inp->ih.uid = uid; 169 inp->ih.uid = uid;
181#endif
182
183 inp->coda_close.VFid = *fid; 170 inp->coda_close.VFid = *fid;
184 inp->coda_close.flags = flags; 171 inp->coda_close.flags = flags;
185 172
diff --git a/fs/compat.c b/fs/compat.c
index ed43e17a5dc6..106eba28ec5a 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -197,8 +197,8 @@ static int put_compat_statfs(struct compat_statfs __user *ubuf, struct kstatfs *
197{ 197{
198 198
199 if (sizeof ubuf->f_blocks == 4) { 199 if (sizeof ubuf->f_blocks == 4) {
200 if ((kbuf->f_blocks | kbuf->f_bfree | kbuf->f_bavail) & 200 if ((kbuf->f_blocks | kbuf->f_bfree | kbuf->f_bavail |
201 0xffffffff00000000ULL) 201 kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
202 return -EOVERFLOW; 202 return -EOVERFLOW;
203 /* f_files and f_ffree may be -1; it's okay 203 /* f_files and f_ffree may be -1; it's okay
204 * to stuff that into 32 bits */ 204 * to stuff that into 32 bits */
@@ -271,8 +271,8 @@ out:
271static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf) 271static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
272{ 272{
273 if (sizeof ubuf->f_blocks == 4) { 273 if (sizeof ubuf->f_blocks == 4) {
274 if ((kbuf->f_blocks | kbuf->f_bfree | kbuf->f_bavail) & 274 if ((kbuf->f_blocks | kbuf->f_bfree | kbuf->f_bavail |
275 0xffffffff00000000ULL) 275 kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
276 return -EOVERFLOW; 276 return -EOVERFLOW;
277 /* f_files and f_ffree may be -1; it's okay 277 /* f_files and f_ffree may be -1; it's okay
278 * to stuff that into 32 bits */ 278 * to stuff that into 32 bits */
@@ -2131,9 +2131,9 @@ asmlinkage long compat_sys_epoll_pwait(int epfd,
2131 2131
2132#ifdef CONFIG_SIGNALFD 2132#ifdef CONFIG_SIGNALFD
2133 2133
2134asmlinkage long compat_sys_signalfd(int ufd, 2134asmlinkage long compat_sys_signalfd4(int ufd,
2135 const compat_sigset_t __user *sigmask, 2135 const compat_sigset_t __user *sigmask,
2136 compat_size_t sigsetsize) 2136 compat_size_t sigsetsize, int flags)
2137{ 2137{
2138 compat_sigset_t ss32; 2138 compat_sigset_t ss32;
2139 sigset_t tmp; 2139 sigset_t tmp;
@@ -2148,9 +2148,15 @@ asmlinkage long compat_sys_signalfd(int ufd,
2148 if (copy_to_user(ksigmask, &tmp, sizeof(sigset_t))) 2148 if (copy_to_user(ksigmask, &tmp, sizeof(sigset_t)))
2149 return -EFAULT; 2149 return -EFAULT;
2150 2150
2151 return sys_signalfd(ufd, ksigmask, sizeof(sigset_t)); 2151 return sys_signalfd4(ufd, ksigmask, sizeof(sigset_t), flags);
2152} 2152}
2153 2153
2154asmlinkage long compat_sys_signalfd(int ufd,
2155 const compat_sigset_t __user *sigmask,
2156 compat_size_t sigsetsize)
2157{
2158 return compat_sys_signalfd4(ufd, sigmask, sigsetsize, 0);
2159}
2154#endif /* CONFIG_SIGNALFD */ 2160#endif /* CONFIG_SIGNALFD */
2155 2161
2156#ifdef CONFIG_TIMERFD 2162#ifdef CONFIG_TIMERFD
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 7b3a03c7c6a9..5235c67e7594 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -25,7 +25,6 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/raid/md.h> 26#include <linux/raid/md.h>
27#include <linux/kd.h> 27#include <linux/kd.h>
28#include <linux/dirent.h>
29#include <linux/route.h> 28#include <linux/route.h>
30#include <linux/in6.h> 29#include <linux/in6.h>
31#include <linux/ipv6_route.h> 30#include <linux/ipv6_route.h>
@@ -2297,8 +2296,6 @@ COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOVER)
2297COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE) 2296COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE)
2298COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE_MULTI) 2297COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE_MULTI)
2299COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOSUBVER) 2298COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOSUBVER)
2300COMPATIBLE_IOCTL(AUTOFS_IOC_ASKREGHOST)
2301COMPATIBLE_IOCTL(AUTOFS_IOC_TOGGLEREGHOST)
2302COMPATIBLE_IOCTL(AUTOFS_IOC_ASKUMOUNT) 2299COMPATIBLE_IOCTL(AUTOFS_IOC_ASKUMOUNT)
2303/* Raw devices */ 2300/* Raw devices */
2304COMPATIBLE_IOCTL(RAW_SETBIND) 2301COMPATIBLE_IOCTL(RAW_SETBIND)
diff --git a/fs/dcache.c b/fs/dcache.c
index 6068c25b393c..3818d6ab76ca 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -61,7 +61,6 @@ static struct kmem_cache *dentry_cache __read_mostly;
61static unsigned int d_hash_mask __read_mostly; 61static unsigned int d_hash_mask __read_mostly;
62static unsigned int d_hash_shift __read_mostly; 62static unsigned int d_hash_shift __read_mostly;
63static struct hlist_head *dentry_hashtable __read_mostly; 63static struct hlist_head *dentry_hashtable __read_mostly;
64static LIST_HEAD(dentry_unused);
65 64
66/* Statistics gathering. */ 65/* Statistics gathering. */
67struct dentry_stat_t dentry_stat = { 66struct dentry_stat_t dentry_stat = {
@@ -96,14 +95,6 @@ static void d_free(struct dentry *dentry)
96 call_rcu(&dentry->d_u.d_rcu, d_callback); 95 call_rcu(&dentry->d_u.d_rcu, d_callback);
97} 96}
98 97
99static void dentry_lru_remove(struct dentry *dentry)
100{
101 if (!list_empty(&dentry->d_lru)) {
102 list_del_init(&dentry->d_lru);
103 dentry_stat.nr_unused--;
104 }
105}
106
107/* 98/*
108 * Release the dentry's inode, using the filesystem 99 * Release the dentry's inode, using the filesystem
109 * d_iput() operation if defined. 100 * d_iput() operation if defined.
@@ -130,6 +121,41 @@ static void dentry_iput(struct dentry * dentry)
130 } 121 }
131} 122}
132 123
124/*
125 * dentry_lru_(add|add_tail|del|del_init) must be called with dcache_lock held.
126 */
127static void dentry_lru_add(struct dentry *dentry)
128{
129 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
130 dentry->d_sb->s_nr_dentry_unused++;
131 dentry_stat.nr_unused++;
132}
133
134static void dentry_lru_add_tail(struct dentry *dentry)
135{
136 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
137 dentry->d_sb->s_nr_dentry_unused++;
138 dentry_stat.nr_unused++;
139}
140
141static void dentry_lru_del(struct dentry *dentry)
142{
143 if (!list_empty(&dentry->d_lru)) {
144 list_del(&dentry->d_lru);
145 dentry->d_sb->s_nr_dentry_unused--;
146 dentry_stat.nr_unused--;
147 }
148}
149
150static void dentry_lru_del_init(struct dentry *dentry)
151{
152 if (likely(!list_empty(&dentry->d_lru))) {
153 list_del_init(&dentry->d_lru);
154 dentry->d_sb->s_nr_dentry_unused--;
155 dentry_stat.nr_unused--;
156 }
157}
158
133/** 159/**
134 * d_kill - kill dentry and return parent 160 * d_kill - kill dentry and return parent
135 * @dentry: dentry to kill 161 * @dentry: dentry to kill
@@ -212,8 +238,7 @@ repeat:
212 goto kill_it; 238 goto kill_it;
213 if (list_empty(&dentry->d_lru)) { 239 if (list_empty(&dentry->d_lru)) {
214 dentry->d_flags |= DCACHE_REFERENCED; 240 dentry->d_flags |= DCACHE_REFERENCED;
215 list_add(&dentry->d_lru, &dentry_unused); 241 dentry_lru_add(dentry);
216 dentry_stat.nr_unused++;
217 } 242 }
218 spin_unlock(&dentry->d_lock); 243 spin_unlock(&dentry->d_lock);
219 spin_unlock(&dcache_lock); 244 spin_unlock(&dcache_lock);
@@ -222,7 +247,8 @@ repeat:
222unhash_it: 247unhash_it:
223 __d_drop(dentry); 248 __d_drop(dentry);
224kill_it: 249kill_it:
225 dentry_lru_remove(dentry); 250 /* if dentry was on the d_lru list delete it from there */
251 dentry_lru_del(dentry);
226 dentry = d_kill(dentry); 252 dentry = d_kill(dentry);
227 if (dentry) 253 if (dentry)
228 goto repeat; 254 goto repeat;
@@ -290,7 +316,7 @@ int d_invalidate(struct dentry * dentry)
290static inline struct dentry * __dget_locked(struct dentry *dentry) 316static inline struct dentry * __dget_locked(struct dentry *dentry)
291{ 317{
292 atomic_inc(&dentry->d_count); 318 atomic_inc(&dentry->d_count);
293 dentry_lru_remove(dentry); 319 dentry_lru_del_init(dentry);
294 return dentry; 320 return dentry;
295} 321}
296 322
@@ -406,133 +432,167 @@ static void prune_one_dentry(struct dentry * dentry)
406 432
407 if (dentry->d_op && dentry->d_op->d_delete) 433 if (dentry->d_op && dentry->d_op->d_delete)
408 dentry->d_op->d_delete(dentry); 434 dentry->d_op->d_delete(dentry);
409 dentry_lru_remove(dentry); 435 dentry_lru_del_init(dentry);
410 __d_drop(dentry); 436 __d_drop(dentry);
411 dentry = d_kill(dentry); 437 dentry = d_kill(dentry);
412 spin_lock(&dcache_lock); 438 spin_lock(&dcache_lock);
413 } 439 }
414} 440}
415 441
416/** 442/*
417 * prune_dcache - shrink the dcache 443 * Shrink the dentry LRU on a given superblock.
418 * @count: number of entries to try and free 444 * @sb : superblock to shrink dentry LRU.
419 * @sb: if given, ignore dentries for other superblocks 445 * @count: If count is NULL, we prune all dentries on superblock.
420 * which are being unmounted. 446 * @flags: If flags is non-zero, we need to do special processing based on
421 * 447 * which flags are set. This means we don't need to maintain multiple
422 * Shrink the dcache. This is done when we need 448 * similar copies of this loop.
423 * more memory, or simply when we need to unmount
424 * something (at which point we need to unuse
425 * all dentries).
426 *
427 * This function may fail to free any resources if
428 * all the dentries are in use.
429 */ 449 */
430 450static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
431static void prune_dcache(int count, struct super_block *sb)
432{ 451{
433 spin_lock(&dcache_lock); 452 LIST_HEAD(referenced);
434 for (; count ; count--) { 453 LIST_HEAD(tmp);
435 struct dentry *dentry; 454 struct dentry *dentry;
436 struct list_head *tmp; 455 int cnt = 0;
437 struct rw_semaphore *s_umount;
438
439 cond_resched_lock(&dcache_lock);
440 456
441 tmp = dentry_unused.prev; 457 BUG_ON(!sb);
442 if (sb) { 458 BUG_ON((flags & DCACHE_REFERENCED) && count == NULL);
443 /* Try to find a dentry for this sb, but don't try 459 spin_lock(&dcache_lock);
444 * too hard, if they aren't near the tail they will 460 if (count != NULL)
445 * be moved down again soon 461 /* called from prune_dcache() and shrink_dcache_parent() */
462 cnt = *count;
463restart:
464 if (count == NULL)
465 list_splice_init(&sb->s_dentry_lru, &tmp);
466 else {
467 while (!list_empty(&sb->s_dentry_lru)) {
468 dentry = list_entry(sb->s_dentry_lru.prev,
469 struct dentry, d_lru);
470 BUG_ON(dentry->d_sb != sb);
471
472 spin_lock(&dentry->d_lock);
473 /*
474 * If we are honouring the DCACHE_REFERENCED flag and
475 * the dentry has this flag set, don't free it. Clear
476 * the flag and put it back on the LRU.
446 */ 477 */
447 int skip = count; 478 if ((flags & DCACHE_REFERENCED)
448 while (skip && tmp != &dentry_unused && 479 && (dentry->d_flags & DCACHE_REFERENCED)) {
449 list_entry(tmp, struct dentry, d_lru)->d_sb != sb) { 480 dentry->d_flags &= ~DCACHE_REFERENCED;
450 skip--; 481 list_move_tail(&dentry->d_lru, &referenced);
451 tmp = tmp->prev; 482 spin_unlock(&dentry->d_lock);
483 } else {
484 list_move_tail(&dentry->d_lru, &tmp);
485 spin_unlock(&dentry->d_lock);
486 cnt--;
487 if (!cnt)
488 break;
452 } 489 }
453 } 490 }
454 if (tmp == &dentry_unused) 491 }
455 break; 492 while (!list_empty(&tmp)) {
456 list_del_init(tmp); 493 dentry = list_entry(tmp.prev, struct dentry, d_lru);
457 prefetch(dentry_unused.prev); 494 dentry_lru_del_init(dentry);
458 dentry_stat.nr_unused--; 495 spin_lock(&dentry->d_lock);
459 dentry = list_entry(tmp, struct dentry, d_lru);
460
461 spin_lock(&dentry->d_lock);
462 /* 496 /*
463 * We found an inuse dentry which was not removed from 497 * We found an inuse dentry which was not removed from
464 * dentry_unused because of laziness during lookup. Do not free 498 * the LRU because of laziness during lookup. Do not free
465 * it - just keep it off the dentry_unused list. 499 * it - just keep it off the LRU list.
466 */ 500 */
467 if (atomic_read(&dentry->d_count)) { 501 if (atomic_read(&dentry->d_count)) {
468 spin_unlock(&dentry->d_lock); 502 spin_unlock(&dentry->d_lock);
469 continue; 503 continue;
470 } 504 }
471 /* If the dentry was recently referenced, don't free it. */ 505 prune_one_dentry(dentry);
472 if (dentry->d_flags & DCACHE_REFERENCED) { 506 /* dentry->d_lock was dropped in prune_one_dentry() */
473 dentry->d_flags &= ~DCACHE_REFERENCED; 507 cond_resched_lock(&dcache_lock);
474 list_add(&dentry->d_lru, &dentry_unused); 508 }
475 dentry_stat.nr_unused++; 509 if (count == NULL && !list_empty(&sb->s_dentry_lru))
476 spin_unlock(&dentry->d_lock); 510 goto restart;
511 if (count != NULL)
512 *count = cnt;
513 if (!list_empty(&referenced))
514 list_splice(&referenced, &sb->s_dentry_lru);
515 spin_unlock(&dcache_lock);
516}
517
518/**
519 * prune_dcache - shrink the dcache
520 * @count: number of entries to try to free
521 *
522 * Shrink the dcache. This is done when we need more memory, or simply when we
523 * need to unmount something (at which point we need to unuse all dentries).
524 *
525 * This function may fail to free any resources if all the dentries are in use.
526 */
527static void prune_dcache(int count)
528{
529 struct super_block *sb;
530 int w_count;
531 int unused = dentry_stat.nr_unused;
532 int prune_ratio;
533 int pruned;
534
535 if (unused == 0 || count == 0)
536 return;
537 spin_lock(&dcache_lock);
538restart:
539 if (count >= unused)
540 prune_ratio = 1;
541 else
542 prune_ratio = unused / count;
543 spin_lock(&sb_lock);
544 list_for_each_entry(sb, &super_blocks, s_list) {
545 if (sb->s_nr_dentry_unused == 0)
477 continue; 546 continue;
478 } 547 sb->s_count++;
479 /* 548 /* Now, we reclaim unused dentrins with fairness.
480 * If the dentry is not DCACHED_REFERENCED, it is time 549 * We reclaim them same percentage from each superblock.
481 * to remove it from the dcache, provided the super block is 550 * We calculate number of dentries to scan on this sb
482 * NULL (which means we are trying to reclaim memory) 551 * as follows, but the implementation is arranged to avoid
483 * or this dentry belongs to the same super block that 552 * overflows:
484 * we want to shrink. 553 * number of dentries to scan on this sb =
485 */ 554 * count * (number of dentries on this sb /
486 /* 555 * number of dentries in the machine)
487 * If this dentry is for "my" filesystem, then I can prune it
488 * without taking the s_umount lock (I already hold it).
489 */ 556 */
490 if (sb && dentry->d_sb == sb) { 557 spin_unlock(&sb_lock);
491 prune_one_dentry(dentry); 558 if (prune_ratio != 1)
492 continue; 559 w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1;
493 } 560 else
561 w_count = sb->s_nr_dentry_unused;
562 pruned = w_count;
494 /* 563 /*
495 * ...otherwise we need to be sure this filesystem isn't being 564 * We need to be sure this filesystem isn't being unmounted,
496 * unmounted, otherwise we could race with 565 * otherwise we could race with generic_shutdown_super(), and
497 * generic_shutdown_super(), and end up holding a reference to 566 * end up holding a reference to an inode while the filesystem
498 * an inode while the filesystem is unmounted. 567 * is unmounted. So we try to get s_umount, and make sure
499 * So we try to get s_umount, and make sure s_root isn't NULL. 568 * s_root isn't NULL.
500 * (Take a local copy of s_umount to avoid a use-after-free of
501 * `dentry').
502 */ 569 */
503 s_umount = &dentry->d_sb->s_umount; 570 if (down_read_trylock(&sb->s_umount)) {
504 if (down_read_trylock(s_umount)) { 571 if ((sb->s_root != NULL) &&
505 if (dentry->d_sb->s_root != NULL) { 572 (!list_empty(&sb->s_dentry_lru))) {
506 prune_one_dentry(dentry); 573 spin_unlock(&dcache_lock);
507 up_read(s_umount); 574 __shrink_dcache_sb(sb, &w_count,
508 continue; 575 DCACHE_REFERENCED);
576 pruned -= w_count;
577 spin_lock(&dcache_lock);
509 } 578 }
510 up_read(s_umount); 579 up_read(&sb->s_umount);
511 } 580 }
512 spin_unlock(&dentry->d_lock); 581 spin_lock(&sb_lock);
582 count -= pruned;
513 /* 583 /*
514 * Insert dentry at the head of the list as inserting at the 584 * restart only when sb is no longer on the list and
515 * tail leads to a cycle. 585 * we have more work to do.
516 */ 586 */
517 list_add(&dentry->d_lru, &dentry_unused); 587 if (__put_super_and_need_restart(sb) && count > 0) {
518 dentry_stat.nr_unused++; 588 spin_unlock(&sb_lock);
589 goto restart;
590 }
519 } 591 }
592 spin_unlock(&sb_lock);
520 spin_unlock(&dcache_lock); 593 spin_unlock(&dcache_lock);
521} 594}
522 595
523/*
524 * Shrink the dcache for the specified super block.
525 * This allows us to unmount a device without disturbing
526 * the dcache for the other devices.
527 *
528 * This implementation makes just two traversals of the
529 * unused list. On the first pass we move the selected
530 * dentries to the most recent end, and on the second
531 * pass we free them. The second pass must restart after
532 * each dput(), but since the target dentries are all at
533 * the end, it's really just a single traversal.
534 */
535
536/** 596/**
537 * shrink_dcache_sb - shrink dcache for a superblock 597 * shrink_dcache_sb - shrink dcache for a superblock
538 * @sb: superblock 598 * @sb: superblock
@@ -541,44 +601,9 @@ static void prune_dcache(int count, struct super_block *sb)
541 * is used to free the dcache before unmounting a file 601 * is used to free the dcache before unmounting a file
542 * system 602 * system
543 */ 603 */
544
545void shrink_dcache_sb(struct super_block * sb) 604void shrink_dcache_sb(struct super_block * sb)
546{ 605{
547 struct list_head *tmp, *next; 606 __shrink_dcache_sb(sb, NULL, 0);
548 struct dentry *dentry;
549
550 /*
551 * Pass one ... move the dentries for the specified
552 * superblock to the most recent end of the unused list.
553 */
554 spin_lock(&dcache_lock);
555 list_for_each_prev_safe(tmp, next, &dentry_unused) {
556 dentry = list_entry(tmp, struct dentry, d_lru);
557 if (dentry->d_sb != sb)
558 continue;
559 list_move_tail(tmp, &dentry_unused);
560 }
561
562 /*
563 * Pass two ... free the dentries for this superblock.
564 */
565repeat:
566 list_for_each_prev_safe(tmp, next, &dentry_unused) {
567 dentry = list_entry(tmp, struct dentry, d_lru);
568 if (dentry->d_sb != sb)
569 continue;
570 dentry_stat.nr_unused--;
571 list_del_init(tmp);
572 spin_lock(&dentry->d_lock);
573 if (atomic_read(&dentry->d_count)) {
574 spin_unlock(&dentry->d_lock);
575 continue;
576 }
577 prune_one_dentry(dentry);
578 cond_resched_lock(&dcache_lock);
579 goto repeat;
580 }
581 spin_unlock(&dcache_lock);
582} 607}
583 608
584/* 609/*
@@ -595,7 +620,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
595 620
596 /* detach this root from the system */ 621 /* detach this root from the system */
597 spin_lock(&dcache_lock); 622 spin_lock(&dcache_lock);
598 dentry_lru_remove(dentry); 623 dentry_lru_del_init(dentry);
599 __d_drop(dentry); 624 __d_drop(dentry);
600 spin_unlock(&dcache_lock); 625 spin_unlock(&dcache_lock);
601 626
@@ -609,7 +634,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
609 spin_lock(&dcache_lock); 634 spin_lock(&dcache_lock);
610 list_for_each_entry(loop, &dentry->d_subdirs, 635 list_for_each_entry(loop, &dentry->d_subdirs,
611 d_u.d_child) { 636 d_u.d_child) {
612 dentry_lru_remove(loop); 637 dentry_lru_del_init(loop);
613 __d_drop(loop); 638 __d_drop(loop);
614 cond_resched_lock(&dcache_lock); 639 cond_resched_lock(&dcache_lock);
615 } 640 }
@@ -791,14 +816,13 @@ resume:
791 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 816 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
792 next = tmp->next; 817 next = tmp->next;
793 818
794 dentry_lru_remove(dentry); 819 dentry_lru_del_init(dentry);
795 /* 820 /*
796 * move only zero ref count dentries to the end 821 * move only zero ref count dentries to the end
797 * of the unused list for prune_dcache 822 * of the unused list for prune_dcache
798 */ 823 */
799 if (!atomic_read(&dentry->d_count)) { 824 if (!atomic_read(&dentry->d_count)) {
800 list_add_tail(&dentry->d_lru, &dentry_unused); 825 dentry_lru_add_tail(dentry);
801 dentry_stat.nr_unused++;
802 found++; 826 found++;
803 } 827 }
804 828
@@ -840,10 +864,11 @@ out:
840 864
841void shrink_dcache_parent(struct dentry * parent) 865void shrink_dcache_parent(struct dentry * parent)
842{ 866{
867 struct super_block *sb = parent->d_sb;
843 int found; 868 int found;
844 869
845 while ((found = select_parent(parent)) != 0) 870 while ((found = select_parent(parent)) != 0)
846 prune_dcache(found, parent->d_sb); 871 __shrink_dcache_sb(sb, &found, 0);
847} 872}
848 873
849/* 874/*
@@ -863,7 +888,7 @@ static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
863 if (nr) { 888 if (nr) {
864 if (!(gfp_mask & __GFP_FS)) 889 if (!(gfp_mask & __GFP_FS))
865 return -1; 890 return -1;
866 prune_dcache(nr, NULL); 891 prune_dcache(nr);
867 } 892 }
868 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; 893 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
869} 894}
@@ -1215,7 +1240,7 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1215 * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while 1240 * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while
1216 * lookup is going on. 1241 * lookup is going on.
1217 * 1242 *
1218 * dentry_unused list is not updated even if lookup finds the required dentry 1243 * The dentry unused LRU is not updated even if lookup finds the required dentry
1219 * in there. It is updated in places such as prune_dcache, shrink_dcache_sb, 1244 * in there. It is updated in places such as prune_dcache, shrink_dcache_sb,
1220 * select_parent and __dget_locked. This laziness saves lookup from dcache_lock 1245 * select_parent and __dget_locked. This laziness saves lookup from dcache_lock
1221 * acquisition. 1246 * acquisition.
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index 78878c5781ca..eba87ff3177b 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -116,7 +116,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
116 if (xop->callback == NULL) 116 if (xop->callback == NULL)
117 wait_event(recv_wq, (op->done != 0)); 117 wait_event(recv_wq, (op->done != 0));
118 else { 118 else {
119 rv = -EINPROGRESS; 119 rv = FILE_LOCK_DEFERRED;
120 goto out; 120 goto out;
121 } 121 }
122 122
diff --git a/fs/dquot.c b/fs/dquot.c
index 5ac77da19959..1346eebe74ce 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -562,6 +562,8 @@ static struct shrinker dqcache_shrinker = {
562 */ 562 */
563static void dqput(struct dquot *dquot) 563static void dqput(struct dquot *dquot)
564{ 564{
565 int ret;
566
565 if (!dquot) 567 if (!dquot)
566 return; 568 return;
567#ifdef __DQUOT_PARANOIA 569#ifdef __DQUOT_PARANOIA
@@ -594,7 +596,19 @@ we_slept:
594 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) { 596 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
595 spin_unlock(&dq_list_lock); 597 spin_unlock(&dq_list_lock);
596 /* Commit dquot before releasing */ 598 /* Commit dquot before releasing */
597 dquot->dq_sb->dq_op->write_dquot(dquot); 599 ret = dquot->dq_sb->dq_op->write_dquot(dquot);
600 if (ret < 0) {
601 printk(KERN_ERR "VFS: cannot write quota structure on "
602 "device %s (error %d). Quota may get out of "
603 "sync!\n", dquot->dq_sb->s_id, ret);
604 /*
605 * We clear dirty bit anyway, so that we avoid
606 * infinite loop here
607 */
608 spin_lock(&dq_list_lock);
609 clear_dquot_dirty(dquot);
610 spin_unlock(&dq_list_lock);
611 }
598 goto we_slept; 612 goto we_slept;
599 } 613 }
600 /* Clear flag in case dquot was inactive (something bad happened) */ 614 /* Clear flag in case dquot was inactive (something bad happened) */
@@ -875,7 +889,10 @@ static void print_warning(struct dquot *dquot, const int warntype)
875 char *msg = NULL; 889 char *msg = NULL;
876 struct tty_struct *tty; 890 struct tty_struct *tty;
877 891
878 if (!need_print_warning(dquot)) 892 if (warntype == QUOTA_NL_IHARDBELOW ||
893 warntype == QUOTA_NL_ISOFTBELOW ||
894 warntype == QUOTA_NL_BHARDBELOW ||
895 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot))
879 return; 896 return;
880 897
881 mutex_lock(&tty_mutex); 898 mutex_lock(&tty_mutex);
@@ -1083,6 +1100,35 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
1083 return QUOTA_OK; 1100 return QUOTA_OK;
1084} 1101}
1085 1102
1103static int info_idq_free(struct dquot *dquot, ulong inodes)
1104{
1105 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1106 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1107 return QUOTA_NL_NOWARN;
1108
1109 if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit)
1110 return QUOTA_NL_ISOFTBELOW;
1111 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1112 dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit)
1113 return QUOTA_NL_IHARDBELOW;
1114 return QUOTA_NL_NOWARN;
1115}
1116
1117static int info_bdq_free(struct dquot *dquot, qsize_t space)
1118{
1119 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1120 toqb(dquot->dq_dqb.dqb_curspace) <= dquot->dq_dqb.dqb_bsoftlimit)
1121 return QUOTA_NL_NOWARN;
1122
1123 if (toqb(dquot->dq_dqb.dqb_curspace - space) <=
1124 dquot->dq_dqb.dqb_bsoftlimit)
1125 return QUOTA_NL_BSOFTBELOW;
1126 if (toqb(dquot->dq_dqb.dqb_curspace) >= dquot->dq_dqb.dqb_bhardlimit &&
1127 toqb(dquot->dq_dqb.dqb_curspace - space) <
1128 dquot->dq_dqb.dqb_bhardlimit)
1129 return QUOTA_NL_BHARDBELOW;
1130 return QUOTA_NL_NOWARN;
1131}
1086/* 1132/*
1087 * Initialize quota pointers in inode 1133 * Initialize quota pointers in inode
1088 * Transaction must be started at entry 1134 * Transaction must be started at entry
@@ -1139,6 +1185,28 @@ int dquot_drop(struct inode *inode)
1139 return 0; 1185 return 0;
1140} 1186}
1141 1187
1188/* Wrapper to remove references to quota structures from inode */
1189void vfs_dq_drop(struct inode *inode)
1190{
1191 /* Here we can get arbitrary inode from clear_inode() so we have
1192 * to be careful. OTOH we don't need locking as quota operations
1193 * are allowed to change only at mount time */
1194 if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op
1195 && inode->i_sb->dq_op->drop) {
1196 int cnt;
1197 /* Test before calling to rule out calls from proc and such
1198 * where we are not allowed to block. Note that this is
1199 * actually reliable test even without the lock - the caller
1200 * must assure that nobody can come after the DQUOT_DROP and
1201 * add quota pointers back anyway */
1202 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1203 if (inode->i_dquot[cnt] != NODQUOT)
1204 break;
1205 if (cnt < MAXQUOTAS)
1206 inode->i_sb->dq_op->drop(inode);
1207 }
1208}
1209
1142/* 1210/*
1143 * Following four functions update i_blocks+i_bytes fields and 1211 * Following four functions update i_blocks+i_bytes fields and
1144 * quota information (together with appropriate checks) 1212 * quota information (together with appropriate checks)
@@ -1248,6 +1316,7 @@ warn_put_all:
1248int dquot_free_space(struct inode *inode, qsize_t number) 1316int dquot_free_space(struct inode *inode, qsize_t number)
1249{ 1317{
1250 unsigned int cnt; 1318 unsigned int cnt;
1319 char warntype[MAXQUOTAS];
1251 1320
1252 /* First test before acquiring mutex - solves deadlocks when we 1321 /* First test before acquiring mutex - solves deadlocks when we
1253 * re-enter the quota code and are already holding the mutex */ 1322 * re-enter the quota code and are already holding the mutex */
@@ -1256,6 +1325,7 @@ out_sub:
1256 inode_sub_bytes(inode, number); 1325 inode_sub_bytes(inode, number);
1257 return QUOTA_OK; 1326 return QUOTA_OK;
1258 } 1327 }
1328
1259 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1329 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1260 /* Now recheck reliably when holding dqptr_sem */ 1330 /* Now recheck reliably when holding dqptr_sem */
1261 if (IS_NOQUOTA(inode)) { 1331 if (IS_NOQUOTA(inode)) {
@@ -1266,6 +1336,7 @@ out_sub:
1266 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1336 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1267 if (inode->i_dquot[cnt] == NODQUOT) 1337 if (inode->i_dquot[cnt] == NODQUOT)
1268 continue; 1338 continue;
1339 warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
1269 dquot_decr_space(inode->i_dquot[cnt], number); 1340 dquot_decr_space(inode->i_dquot[cnt], number);
1270 } 1341 }
1271 inode_sub_bytes(inode, number); 1342 inode_sub_bytes(inode, number);
@@ -1274,6 +1345,7 @@ out_sub:
1274 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1345 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1275 if (inode->i_dquot[cnt]) 1346 if (inode->i_dquot[cnt])
1276 mark_dquot_dirty(inode->i_dquot[cnt]); 1347 mark_dquot_dirty(inode->i_dquot[cnt]);
1348 flush_warnings(inode->i_dquot, warntype);
1277 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1349 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1278 return QUOTA_OK; 1350 return QUOTA_OK;
1279} 1351}
@@ -1284,11 +1356,13 @@ out_sub:
1284int dquot_free_inode(const struct inode *inode, unsigned long number) 1356int dquot_free_inode(const struct inode *inode, unsigned long number)
1285{ 1357{
1286 unsigned int cnt; 1358 unsigned int cnt;
1359 char warntype[MAXQUOTAS];
1287 1360
1288 /* First test before acquiring mutex - solves deadlocks when we 1361 /* First test before acquiring mutex - solves deadlocks when we
1289 * re-enter the quota code and are already holding the mutex */ 1362 * re-enter the quota code and are already holding the mutex */
1290 if (IS_NOQUOTA(inode)) 1363 if (IS_NOQUOTA(inode))
1291 return QUOTA_OK; 1364 return QUOTA_OK;
1365
1292 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1366 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1293 /* Now recheck reliably when holding dqptr_sem */ 1367 /* Now recheck reliably when holding dqptr_sem */
1294 if (IS_NOQUOTA(inode)) { 1368 if (IS_NOQUOTA(inode)) {
@@ -1299,6 +1373,7 @@ int dquot_free_inode(const struct inode *inode, unsigned long number)
1299 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1373 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1300 if (inode->i_dquot[cnt] == NODQUOT) 1374 if (inode->i_dquot[cnt] == NODQUOT)
1301 continue; 1375 continue;
1376 warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
1302 dquot_decr_inodes(inode->i_dquot[cnt], number); 1377 dquot_decr_inodes(inode->i_dquot[cnt], number);
1303 } 1378 }
1304 spin_unlock(&dq_data_lock); 1379 spin_unlock(&dq_data_lock);
@@ -1306,6 +1381,7 @@ int dquot_free_inode(const struct inode *inode, unsigned long number)
1306 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1381 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1307 if (inode->i_dquot[cnt]) 1382 if (inode->i_dquot[cnt])
1308 mark_dquot_dirty(inode->i_dquot[cnt]); 1383 mark_dquot_dirty(inode->i_dquot[cnt]);
1384 flush_warnings(inode->i_dquot, warntype);
1309 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1385 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1310 return QUOTA_OK; 1386 return QUOTA_OK;
1311} 1387}
@@ -1323,7 +1399,8 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1323 struct dquot *transfer_to[MAXQUOTAS]; 1399 struct dquot *transfer_to[MAXQUOTAS];
1324 int cnt, ret = NO_QUOTA, chuid = (iattr->ia_valid & ATTR_UID) && inode->i_uid != iattr->ia_uid, 1400 int cnt, ret = NO_QUOTA, chuid = (iattr->ia_valid & ATTR_UID) && inode->i_uid != iattr->ia_uid,
1325 chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid; 1401 chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid;
1326 char warntype[MAXQUOTAS]; 1402 char warntype_to[MAXQUOTAS];
1403 char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
1327 1404
1328 /* First test before acquiring mutex - solves deadlocks when we 1405 /* First test before acquiring mutex - solves deadlocks when we
1329 * re-enter the quota code and are already holding the mutex */ 1406 * re-enter the quota code and are already holding the mutex */
@@ -1332,7 +1409,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1332 /* Clear the arrays */ 1409 /* Clear the arrays */
1333 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1410 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1334 transfer_to[cnt] = transfer_from[cnt] = NODQUOT; 1411 transfer_to[cnt] = transfer_from[cnt] = NODQUOT;
1335 warntype[cnt] = QUOTA_NL_NOWARN; 1412 warntype_to[cnt] = QUOTA_NL_NOWARN;
1336 } 1413 }
1337 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1414 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1338 /* Now recheck reliably when holding dqptr_sem */ 1415 /* Now recheck reliably when holding dqptr_sem */
@@ -1364,8 +1441,9 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1364 if (transfer_to[cnt] == NODQUOT) 1441 if (transfer_to[cnt] == NODQUOT)
1365 continue; 1442 continue;
1366 transfer_from[cnt] = inode->i_dquot[cnt]; 1443 transfer_from[cnt] = inode->i_dquot[cnt];
1367 if (check_idq(transfer_to[cnt], 1, warntype+cnt) == NO_QUOTA || 1444 if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) ==
1368 check_bdq(transfer_to[cnt], space, 0, warntype+cnt) == NO_QUOTA) 1445 NO_QUOTA || check_bdq(transfer_to[cnt], space, 0,
1446 warntype_to + cnt) == NO_QUOTA)
1369 goto warn_put_all; 1447 goto warn_put_all;
1370 } 1448 }
1371 1449
@@ -1381,6 +1459,10 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1381 1459
1382 /* Due to IO error we might not have transfer_from[] structure */ 1460 /* Due to IO error we might not have transfer_from[] structure */
1383 if (transfer_from[cnt]) { 1461 if (transfer_from[cnt]) {
1462 warntype_from_inodes[cnt] =
1463 info_idq_free(transfer_from[cnt], 1);
1464 warntype_from_space[cnt] =
1465 info_bdq_free(transfer_from[cnt], space);
1384 dquot_decr_inodes(transfer_from[cnt], 1); 1466 dquot_decr_inodes(transfer_from[cnt], 1);
1385 dquot_decr_space(transfer_from[cnt], space); 1467 dquot_decr_space(transfer_from[cnt], space);
1386 } 1468 }
@@ -1400,7 +1482,9 @@ warn_put_all:
1400 if (transfer_to[cnt]) 1482 if (transfer_to[cnt])
1401 mark_dquot_dirty(transfer_to[cnt]); 1483 mark_dquot_dirty(transfer_to[cnt]);
1402 } 1484 }
1403 flush_warnings(transfer_to, warntype); 1485 flush_warnings(transfer_to, warntype_to);
1486 flush_warnings(transfer_from, warntype_from_inodes);
1487 flush_warnings(transfer_from, warntype_from_space);
1404 1488
1405 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1489 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1406 if (ret == QUOTA_OK && transfer_from[cnt] != NODQUOT) 1490 if (ret == QUOTA_OK && transfer_from[cnt] != NODQUOT)
@@ -1412,6 +1496,18 @@ warn_put_all:
1412 return ret; 1496 return ret;
1413} 1497}
1414 1498
1499/* Wrapper for transferring ownership of an inode */
1500int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
1501{
1502 if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) {
1503 vfs_dq_init(inode);
1504 if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA)
1505 return 1;
1506 }
1507 return 0;
1508}
1509
1510
1415/* 1511/*
1416 * Write info of quota file to disk 1512 * Write info of quota file to disk
1417 */ 1513 */
@@ -1752,6 +1848,22 @@ out:
1752 return error; 1848 return error;
1753} 1849}
1754 1850
1851/* Wrapper to turn on quotas when remounting rw */
1852int vfs_dq_quota_on_remount(struct super_block *sb)
1853{
1854 int cnt;
1855 int ret = 0, err;
1856
1857 if (!sb->s_qcop || !sb->s_qcop->quota_on)
1858 return -ENOSYS;
1859 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1860 err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1);
1861 if (err < 0 && !ret)
1862 ret = err;
1863 }
1864 return ret;
1865}
1866
1755/* Generic routine for getting common part of quota structure */ 1867/* Generic routine for getting common part of quota structure */
1756static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) 1868static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
1757{ 1869{
@@ -2087,8 +2199,11 @@ EXPORT_SYMBOL(dquot_release);
2087EXPORT_SYMBOL(dquot_mark_dquot_dirty); 2199EXPORT_SYMBOL(dquot_mark_dquot_dirty);
2088EXPORT_SYMBOL(dquot_initialize); 2200EXPORT_SYMBOL(dquot_initialize);
2089EXPORT_SYMBOL(dquot_drop); 2201EXPORT_SYMBOL(dquot_drop);
2202EXPORT_SYMBOL(vfs_dq_drop);
2090EXPORT_SYMBOL(dquot_alloc_space); 2203EXPORT_SYMBOL(dquot_alloc_space);
2091EXPORT_SYMBOL(dquot_alloc_inode); 2204EXPORT_SYMBOL(dquot_alloc_inode);
2092EXPORT_SYMBOL(dquot_free_space); 2205EXPORT_SYMBOL(dquot_free_space);
2093EXPORT_SYMBOL(dquot_free_inode); 2206EXPORT_SYMBOL(dquot_free_inode);
2094EXPORT_SYMBOL(dquot_transfer); 2207EXPORT_SYMBOL(dquot_transfer);
2208EXPORT_SYMBOL(vfs_dq_transfer);
2209EXPORT_SYMBOL(vfs_dq_quota_on_remount);
diff --git a/fs/ecryptfs/Makefile b/fs/ecryptfs/Makefile
index 1e34a7fd4884..b4755a85996e 100644
--- a/fs/ecryptfs/Makefile
+++ b/fs/ecryptfs/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_ECRYPT_FS) += ecryptfs.o 5obj-$(CONFIG_ECRYPT_FS) += ecryptfs.o
6 6
7ecryptfs-objs := dentry.o file.o inode.o main.o super.o mmap.o read_write.o crypto.o keystore.o messaging.o netlink.o miscdev.o debug.o 7ecryptfs-objs := dentry.o file.o inode.o main.o super.o mmap.o read_write.o crypto.o keystore.o messaging.o netlink.o miscdev.o kthread.o debug.o
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index e2832bc7869a..7b99917ffadc 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -33,6 +33,7 @@
33#include <linux/crypto.h> 33#include <linux/crypto.h>
34#include <linux/file.h> 34#include <linux/file.h>
35#include <linux/scatterlist.h> 35#include <linux/scatterlist.h>
36#include <asm/unaligned.h>
36#include "ecryptfs_kernel.h" 37#include "ecryptfs_kernel.h"
37 38
38static int 39static int
@@ -1032,10 +1033,8 @@ static int contains_ecryptfs_marker(char *data)
1032{ 1033{
1033 u32 m_1, m_2; 1034 u32 m_1, m_2;
1034 1035
1035 memcpy(&m_1, data, 4); 1036 m_1 = get_unaligned_be32(data);
1036 m_1 = be32_to_cpu(m_1); 1037 m_2 = get_unaligned_be32(data + 4);
1037 memcpy(&m_2, (data + 4), 4);
1038 m_2 = be32_to_cpu(m_2);
1039 if ((m_1 ^ MAGIC_ECRYPTFS_MARKER) == m_2) 1038 if ((m_1 ^ MAGIC_ECRYPTFS_MARKER) == m_2)
1040 return 1; 1039 return 1;
1041 ecryptfs_printk(KERN_DEBUG, "m_1 = [0x%.8x]; m_2 = [0x%.8x]; " 1040 ecryptfs_printk(KERN_DEBUG, "m_1 = [0x%.8x]; m_2 = [0x%.8x]; "
@@ -1073,8 +1072,7 @@ static int ecryptfs_process_flags(struct ecryptfs_crypt_stat *crypt_stat,
1073 int i; 1072 int i;
1074 u32 flags; 1073 u32 flags;
1075 1074
1076 memcpy(&flags, page_virt, 4); 1075 flags = get_unaligned_be32(page_virt);
1077 flags = be32_to_cpu(flags);
1078 for (i = 0; i < ((sizeof(ecryptfs_flag_map) 1076 for (i = 0; i < ((sizeof(ecryptfs_flag_map)
1079 / sizeof(struct ecryptfs_flag_map_elem))); i++) 1077 / sizeof(struct ecryptfs_flag_map_elem))); i++)
1080 if (flags & ecryptfs_flag_map[i].file_flag) { 1078 if (flags & ecryptfs_flag_map[i].file_flag) {
@@ -1100,11 +1098,9 @@ static void write_ecryptfs_marker(char *page_virt, size_t *written)
1100 1098
1101 get_random_bytes(&m_1, (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2)); 1099 get_random_bytes(&m_1, (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2));
1102 m_2 = (m_1 ^ MAGIC_ECRYPTFS_MARKER); 1100 m_2 = (m_1 ^ MAGIC_ECRYPTFS_MARKER);
1103 m_1 = cpu_to_be32(m_1); 1101 put_unaligned_be32(m_1, page_virt);
1104 memcpy(page_virt, &m_1, (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2)); 1102 page_virt += (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2);
1105 m_2 = cpu_to_be32(m_2); 1103 put_unaligned_be32(m_2, page_virt);
1106 memcpy(page_virt + (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2), &m_2,
1107 (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2));
1108 (*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES; 1104 (*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
1109} 1105}
1110 1106
@@ -1121,8 +1117,7 @@ write_ecryptfs_flags(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat,
1121 flags |= ecryptfs_flag_map[i].file_flag; 1117 flags |= ecryptfs_flag_map[i].file_flag;
1122 /* Version is in top 8 bits of the 32-bit flag vector */ 1118 /* Version is in top 8 bits of the 32-bit flag vector */
1123 flags |= ((((u8)crypt_stat->file_version) << 24) & 0xFF000000); 1119 flags |= ((((u8)crypt_stat->file_version) << 24) & 0xFF000000);
1124 flags = cpu_to_be32(flags); 1120 put_unaligned_be32(flags, page_virt);
1125 memcpy(page_virt, &flags, 4);
1126 (*written) = 4; 1121 (*written) = 4;
1127} 1122}
1128 1123
@@ -1238,11 +1233,9 @@ ecryptfs_write_header_metadata(char *virt,
1238 num_header_extents_at_front = 1233 num_header_extents_at_front =
1239 (u16)(crypt_stat->num_header_bytes_at_front 1234 (u16)(crypt_stat->num_header_bytes_at_front
1240 / crypt_stat->extent_size); 1235 / crypt_stat->extent_size);
1241 header_extent_size = cpu_to_be32(header_extent_size); 1236 put_unaligned_be32(header_extent_size, virt);
1242 memcpy(virt, &header_extent_size, 4);
1243 virt += 4; 1237 virt += 4;
1244 num_header_extents_at_front = cpu_to_be16(num_header_extents_at_front); 1238 put_unaligned_be16(num_header_extents_at_front, virt);
1245 memcpy(virt, &num_header_extents_at_front, 2);
1246 (*written) = 6; 1239 (*written) = 6;
1247} 1240}
1248 1241
@@ -1410,15 +1403,13 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
1410 u32 header_extent_size; 1403 u32 header_extent_size;
1411 u16 num_header_extents_at_front; 1404 u16 num_header_extents_at_front;
1412 1405
1413 memcpy(&header_extent_size, virt, sizeof(u32)); 1406 header_extent_size = get_unaligned_be32(virt);
1414 header_extent_size = be32_to_cpu(header_extent_size); 1407 virt += sizeof(__be32);
1415 virt += sizeof(u32); 1408 num_header_extents_at_front = get_unaligned_be16(virt);
1416 memcpy(&num_header_extents_at_front, virt, sizeof(u16));
1417 num_header_extents_at_front = be16_to_cpu(num_header_extents_at_front);
1418 crypt_stat->num_header_bytes_at_front = 1409 crypt_stat->num_header_bytes_at_front =
1419 (((size_t)num_header_extents_at_front 1410 (((size_t)num_header_extents_at_front
1420 * (size_t)header_extent_size)); 1411 * (size_t)header_extent_size));
1421 (*bytes_read) = (sizeof(u32) + sizeof(u16)); 1412 (*bytes_read) = (sizeof(__be32) + sizeof(__be16));
1422 if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE) 1413 if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE)
1423 && (crypt_stat->num_header_bytes_at_front 1414 && (crypt_stat->num_header_bytes_at_front
1424 < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) { 1415 < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) {
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index c15c25745e05..b73fb752c5f8 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -559,10 +559,25 @@ extern struct kmem_cache *ecryptfs_key_record_cache;
559extern struct kmem_cache *ecryptfs_key_sig_cache; 559extern struct kmem_cache *ecryptfs_key_sig_cache;
560extern struct kmem_cache *ecryptfs_global_auth_tok_cache; 560extern struct kmem_cache *ecryptfs_global_auth_tok_cache;
561extern struct kmem_cache *ecryptfs_key_tfm_cache; 561extern struct kmem_cache *ecryptfs_key_tfm_cache;
562extern struct kmem_cache *ecryptfs_open_req_cache;
562 563
564struct ecryptfs_open_req {
565#define ECRYPTFS_REQ_PROCESSED 0x00000001
566#define ECRYPTFS_REQ_DROPPED 0x00000002
567#define ECRYPTFS_REQ_ZOMBIE 0x00000004
568 u32 flags;
569 struct file **lower_file;
570 struct dentry *lower_dentry;
571 struct vfsmount *lower_mnt;
572 wait_queue_head_t wait;
573 struct mutex mux;
574 struct list_head kthread_ctl_list;
575};
576
577#define ECRYPTFS_INTERPOSE_FLAG_D_ADD 0x00000001
563int ecryptfs_interpose(struct dentry *hidden_dentry, 578int ecryptfs_interpose(struct dentry *hidden_dentry,
564 struct dentry *this_dentry, struct super_block *sb, 579 struct dentry *this_dentry, struct super_block *sb,
565 int flag); 580 u32 flags);
566int ecryptfs_fill_zeros(struct file *file, loff_t new_length); 581int ecryptfs_fill_zeros(struct file *file, loff_t new_length);
567int ecryptfs_decode_filename(struct ecryptfs_crypt_stat *crypt_stat, 582int ecryptfs_decode_filename(struct ecryptfs_crypt_stat *crypt_stat,
568 const char *name, int length, 583 const char *name, int length,
@@ -690,5 +705,11 @@ void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx);
690int 705int
691ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid, 706ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid,
692 struct user_namespace *user_ns, struct pid *pid); 707 struct user_namespace *user_ns, struct pid *pid);
708int ecryptfs_init_kthread(void);
709void ecryptfs_destroy_kthread(void);
710int ecryptfs_privileged_open(struct file **lower_file,
711 struct dentry *lower_dentry,
712 struct vfsmount *lower_mnt);
713int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry);
693 714
694#endif /* #ifndef ECRYPTFS_KERNEL_H */ 715#endif /* #ifndef ECRYPTFS_KERNEL_H */
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 24749bf0668f..9244d653743e 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -192,6 +192,23 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
192 | ECRYPTFS_ENCRYPTED); 192 | ECRYPTFS_ENCRYPTED);
193 } 193 }
194 mutex_unlock(&crypt_stat->cs_mutex); 194 mutex_unlock(&crypt_stat->cs_mutex);
195 if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
196 && !(file->f_flags & O_RDONLY)) {
197 rc = -EPERM;
198 printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
199 "file must hence be opened RO\n", __func__);
200 goto out;
201 }
202 if (!ecryptfs_inode_to_private(inode)->lower_file) {
203 rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
204 if (rc) {
205 printk(KERN_ERR "%s: Error attempting to initialize "
206 "the persistent file for the dentry with name "
207 "[%s]; rc = [%d]\n", __func__,
208 ecryptfs_dentry->d_name.name, rc);
209 goto out;
210 }
211 }
195 ecryptfs_set_file_lower( 212 ecryptfs_set_file_lower(
196 file, ecryptfs_inode_to_private(inode)->lower_file); 213 file, ecryptfs_inode_to_private(inode)->lower_file);
197 if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { 214 if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index c92cc1c00aae..d755455e3bff 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -31,6 +31,7 @@
31#include <linux/mount.h> 31#include <linux/mount.h>
32#include <linux/crypto.h> 32#include <linux/crypto.h>
33#include <linux/fs_stack.h> 33#include <linux/fs_stack.h>
34#include <asm/unaligned.h>
34#include "ecryptfs_kernel.h" 35#include "ecryptfs_kernel.h"
35 36
36static struct dentry *lock_parent(struct dentry *dentry) 37static struct dentry *lock_parent(struct dentry *dentry)
@@ -188,6 +189,16 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
188 "context; rc = [%d]\n", rc); 189 "context; rc = [%d]\n", rc);
189 goto out; 190 goto out;
190 } 191 }
192 if (!ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->lower_file) {
193 rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
194 if (rc) {
195 printk(KERN_ERR "%s: Error attempting to initialize "
196 "the persistent file for the dentry with name "
197 "[%s]; rc = [%d]\n", __func__,
198 ecryptfs_dentry->d_name.name, rc);
199 goto out;
200 }
201 }
191 rc = ecryptfs_write_metadata(ecryptfs_dentry); 202 rc = ecryptfs_write_metadata(ecryptfs_dentry);
192 if (rc) { 203 if (rc) {
193 printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc); 204 printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc);
@@ -307,10 +318,11 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
307 d_add(dentry, NULL); 318 d_add(dentry, NULL);
308 goto out; 319 goto out;
309 } 320 }
310 rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb, 1); 321 rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb,
322 ECRYPTFS_INTERPOSE_FLAG_D_ADD);
311 if (rc) { 323 if (rc) {
312 ecryptfs_printk(KERN_ERR, "Error interposing\n"); 324 ecryptfs_printk(KERN_ERR, "Error interposing\n");
313 goto out_dput; 325 goto out;
314 } 326 }
315 if (S_ISDIR(lower_inode->i_mode)) { 327 if (S_ISDIR(lower_inode->i_mode)) {
316 ecryptfs_printk(KERN_DEBUG, "Is a directory; returning\n"); 328 ecryptfs_printk(KERN_DEBUG, "Is a directory; returning\n");
@@ -336,11 +348,21 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
336 rc = -ENOMEM; 348 rc = -ENOMEM;
337 ecryptfs_printk(KERN_ERR, 349 ecryptfs_printk(KERN_ERR,
338 "Cannot ecryptfs_kmalloc a page\n"); 350 "Cannot ecryptfs_kmalloc a page\n");
339 goto out_dput; 351 goto out;
340 } 352 }
341 crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat; 353 crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
342 if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)) 354 if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED))
343 ecryptfs_set_default_sizes(crypt_stat); 355 ecryptfs_set_default_sizes(crypt_stat);
356 if (!ecryptfs_inode_to_private(dentry->d_inode)->lower_file) {
357 rc = ecryptfs_init_persistent_file(dentry);
358 if (rc) {
359 printk(KERN_ERR "%s: Error attempting to initialize "
360 "the persistent file for the dentry with name "
361 "[%s]; rc = [%d]\n", __func__,
362 dentry->d_name.name, rc);
363 goto out;
364 }
365 }
344 rc = ecryptfs_read_and_validate_header_region(page_virt, 366 rc = ecryptfs_read_and_validate_header_region(page_virt,
345 dentry->d_inode); 367 dentry->d_inode);
346 if (rc) { 368 if (rc) {
@@ -364,8 +386,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
364 else 386 else
365 file_size = i_size_read(lower_dentry->d_inode); 387 file_size = i_size_read(lower_dentry->d_inode);
366 } else { 388 } else {
367 memcpy(&file_size, page_virt, sizeof(file_size)); 389 file_size = get_unaligned_be64(page_virt);
368 file_size = be64_to_cpu(file_size);
369 } 390 }
370 i_size_write(dentry->d_inode, (loff_t)file_size); 391 i_size_write(dentry->d_inode, (loff_t)file_size);
371 kmem_cache_free(ecryptfs_header_cache_2, page_virt); 392 kmem_cache_free(ecryptfs_header_cache_2, page_virt);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index e82b457180be..f5b76a331b9c 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -44,15 +44,15 @@ static int process_request_key_err(long err_code)
44 int rc = 0; 44 int rc = 0;
45 45
46 switch (err_code) { 46 switch (err_code) {
47 case ENOKEY: 47 case -ENOKEY:
48 ecryptfs_printk(KERN_WARNING, "No key\n"); 48 ecryptfs_printk(KERN_WARNING, "No key\n");
49 rc = -ENOENT; 49 rc = -ENOENT;
50 break; 50 break;
51 case EKEYEXPIRED: 51 case -EKEYEXPIRED:
52 ecryptfs_printk(KERN_WARNING, "Key expired\n"); 52 ecryptfs_printk(KERN_WARNING, "Key expired\n");
53 rc = -ETIME; 53 rc = -ETIME;
54 break; 54 break;
55 case EKEYREVOKED: 55 case -EKEYREVOKED:
56 ecryptfs_printk(KERN_WARNING, "Key revoked\n"); 56 ecryptfs_printk(KERN_WARNING, "Key revoked\n");
57 rc = -EINVAL; 57 rc = -EINVAL;
58 break; 58 break;
@@ -963,8 +963,7 @@ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key,
963 if (!(*auth_tok_key) || IS_ERR(*auth_tok_key)) { 963 if (!(*auth_tok_key) || IS_ERR(*auth_tok_key)) {
964 printk(KERN_ERR "Could not find key with description: [%s]\n", 964 printk(KERN_ERR "Could not find key with description: [%s]\n",
965 sig); 965 sig);
966 process_request_key_err(PTR_ERR(*auth_tok_key)); 966 rc = process_request_key_err(PTR_ERR(*auth_tok_key));
967 rc = -EINVAL;
968 goto out; 967 goto out;
969 } 968 }
970 (*auth_tok) = ecryptfs_get_key_payload_data(*auth_tok_key); 969 (*auth_tok) = ecryptfs_get_key_payload_data(*auth_tok_key);
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
new file mode 100644
index 000000000000..c440c6b58b2d
--- /dev/null
+++ b/fs/ecryptfs/kthread.c
@@ -0,0 +1,203 @@
1/**
2 * eCryptfs: Linux filesystem encryption layer
3 *
4 * Copyright (C) 2008 International Business Machines Corp.
5 * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
20 * 02111-1307, USA.
21 */
22
23#include <linux/kthread.h>
24#include <linux/freezer.h>
25#include <linux/wait.h>
26#include <linux/mount.h>
27#include "ecryptfs_kernel.h"
28
29struct kmem_cache *ecryptfs_open_req_cache;
30
31static struct ecryptfs_kthread_ctl {
32#define ECRYPTFS_KTHREAD_ZOMBIE 0x00000001
33 u32 flags;
34 struct mutex mux;
35 struct list_head req_list;
36 wait_queue_head_t wait;
37} ecryptfs_kthread_ctl;
38
39static struct task_struct *ecryptfs_kthread;
40
41/**
42 * ecryptfs_threadfn
43 * @ignored: ignored
44 *
45 * The eCryptfs kernel thread that has the responsibility of getting
46 * the lower persistent file with RW permissions.
47 *
48 * Returns zero on success; non-zero otherwise
49 */
50static int ecryptfs_threadfn(void *ignored)
51{
52 set_freezable();
53 while (1) {
54 struct ecryptfs_open_req *req;
55
56 wait_event_freezable(
57 ecryptfs_kthread_ctl.wait,
58 (!list_empty(&ecryptfs_kthread_ctl.req_list)
59 || kthread_should_stop()));
60 mutex_lock(&ecryptfs_kthread_ctl.mux);
61 if (ecryptfs_kthread_ctl.flags & ECRYPTFS_KTHREAD_ZOMBIE) {
62 mutex_unlock(&ecryptfs_kthread_ctl.mux);
63 goto out;
64 }
65 while (!list_empty(&ecryptfs_kthread_ctl.req_list)) {
66 req = list_first_entry(&ecryptfs_kthread_ctl.req_list,
67 struct ecryptfs_open_req,
68 kthread_ctl_list);
69 mutex_lock(&req->mux);
70 list_del(&req->kthread_ctl_list);
71 if (!(req->flags & ECRYPTFS_REQ_ZOMBIE)) {
72 dget(req->lower_dentry);
73 mntget(req->lower_mnt);
74 (*req->lower_file) = dentry_open(
75 req->lower_dentry, req->lower_mnt,
76 (O_RDWR | O_LARGEFILE));
77 req->flags |= ECRYPTFS_REQ_PROCESSED;
78 }
79 wake_up(&req->wait);
80 mutex_unlock(&req->mux);
81 }
82 mutex_unlock(&ecryptfs_kthread_ctl.mux);
83 }
84out:
85 return 0;
86}
87
88int ecryptfs_init_kthread(void)
89{
90 int rc = 0;
91
92 mutex_init(&ecryptfs_kthread_ctl.mux);
93 init_waitqueue_head(&ecryptfs_kthread_ctl.wait);
94 INIT_LIST_HEAD(&ecryptfs_kthread_ctl.req_list);
95 ecryptfs_kthread = kthread_run(&ecryptfs_threadfn, NULL,
96 "ecryptfs-kthread");
97 if (IS_ERR(ecryptfs_kthread)) {
98 rc = PTR_ERR(ecryptfs_kthread);
99 printk(KERN_ERR "%s: Failed to create kernel thread; rc = [%d]"
100 "\n", __func__, rc);
101 }
102 return rc;
103}
104
105void ecryptfs_destroy_kthread(void)
106{
107 struct ecryptfs_open_req *req;
108
109 mutex_lock(&ecryptfs_kthread_ctl.mux);
110 ecryptfs_kthread_ctl.flags |= ECRYPTFS_KTHREAD_ZOMBIE;
111 list_for_each_entry(req, &ecryptfs_kthread_ctl.req_list,
112 kthread_ctl_list) {
113 mutex_lock(&req->mux);
114 req->flags |= ECRYPTFS_REQ_ZOMBIE;
115 wake_up(&req->wait);
116 mutex_unlock(&req->mux);
117 }
118 mutex_unlock(&ecryptfs_kthread_ctl.mux);
119 kthread_stop(ecryptfs_kthread);
120 wake_up(&ecryptfs_kthread_ctl.wait);
121}
122
123/**
124 * ecryptfs_privileged_open
125 * @lower_file: Result of dentry_open by root on lower dentry
126 * @lower_dentry: Lower dentry for file to open
127 * @lower_mnt: Lower vfsmount for file to open
128 *
129 * This function gets a r/w file opened againt the lower dentry.
130 *
131 * Returns zero on success; non-zero otherwise
132 */
133int ecryptfs_privileged_open(struct file **lower_file,
134 struct dentry *lower_dentry,
135 struct vfsmount *lower_mnt)
136{
137 struct ecryptfs_open_req *req;
138 int rc = 0;
139
140 /* Corresponding dput() and mntput() are done when the
141 * persistent file is fput() when the eCryptfs inode is
142 * destroyed. */
143 dget(lower_dentry);
144 mntget(lower_mnt);
145 (*lower_file) = dentry_open(lower_dentry, lower_mnt,
146 (O_RDWR | O_LARGEFILE));
147 if (!IS_ERR(*lower_file))
148 goto out;
149 req = kmem_cache_alloc(ecryptfs_open_req_cache, GFP_KERNEL);
150 if (!req) {
151 rc = -ENOMEM;
152 goto out;
153 }
154 mutex_init(&req->mux);
155 req->lower_file = lower_file;
156 req->lower_dentry = lower_dentry;
157 req->lower_mnt = lower_mnt;
158 init_waitqueue_head(&req->wait);
159 req->flags = 0;
160 mutex_lock(&ecryptfs_kthread_ctl.mux);
161 if (ecryptfs_kthread_ctl.flags & ECRYPTFS_KTHREAD_ZOMBIE) {
162 rc = -EIO;
163 mutex_unlock(&ecryptfs_kthread_ctl.mux);
164 printk(KERN_ERR "%s: We are in the middle of shutting down; "
165 "aborting privileged request to open lower file\n",
166 __func__);
167 goto out_free;
168 }
169 list_add_tail(&req->kthread_ctl_list, &ecryptfs_kthread_ctl.req_list);
170 mutex_unlock(&ecryptfs_kthread_ctl.mux);
171 wake_up(&ecryptfs_kthread_ctl.wait);
172 wait_event(req->wait, (req->flags != 0));
173 mutex_lock(&req->mux);
174 BUG_ON(req->flags == 0);
175 if (req->flags & ECRYPTFS_REQ_DROPPED
176 || req->flags & ECRYPTFS_REQ_ZOMBIE) {
177 rc = -EIO;
178 printk(KERN_WARNING "%s: Privileged open request dropped\n",
179 __func__);
180 goto out_unlock;
181 }
182 if (IS_ERR(*req->lower_file)) {
183 rc = PTR_ERR(*req->lower_file);
184 dget(lower_dentry);
185 mntget(lower_mnt);
186 (*lower_file) = dentry_open(lower_dentry, lower_mnt,
187 (O_RDONLY | O_LARGEFILE));
188 if (IS_ERR(*lower_file)) {
189 rc = PTR_ERR(*req->lower_file);
190 (*lower_file) = NULL;
191 printk(KERN_WARNING "%s: Error attempting privileged "
192 "open of lower file with either RW or RO "
193 "perms; rc = [%d]. Giving up.\n",
194 __func__, rc);
195 }
196 }
197out_unlock:
198 mutex_unlock(&req->mux);
199out_free:
200 kmem_cache_free(ecryptfs_open_req_cache, req);
201out:
202 return rc;
203}
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index d603631601eb..6f403cfba14f 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -117,7 +117,7 @@ void __ecryptfs_printk(const char *fmt, ...)
117 * 117 *
118 * Returns zero on success; non-zero otherwise 118 * Returns zero on success; non-zero otherwise
119 */ 119 */
120static int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry) 120int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
121{ 121{
122 struct ecryptfs_inode_info *inode_info = 122 struct ecryptfs_inode_info *inode_info =
123 ecryptfs_inode_to_private(ecryptfs_dentry->d_inode); 123 ecryptfs_inode_to_private(ecryptfs_dentry->d_inode);
@@ -130,26 +130,12 @@ static int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
130 ecryptfs_dentry_to_lower_mnt(ecryptfs_dentry); 130 ecryptfs_dentry_to_lower_mnt(ecryptfs_dentry);
131 131
132 lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); 132 lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
133 /* Corresponding dput() and mntput() are done when the 133 rc = ecryptfs_privileged_open(&inode_info->lower_file,
134 * persistent file is fput() when the eCryptfs inode 134 lower_dentry, lower_mnt);
135 * is destroyed. */ 135 if (rc || IS_ERR(inode_info->lower_file)) {
136 dget(lower_dentry);
137 mntget(lower_mnt);
138 inode_info->lower_file = dentry_open(lower_dentry,
139 lower_mnt,
140 (O_RDWR | O_LARGEFILE));
141 if (IS_ERR(inode_info->lower_file)) {
142 dget(lower_dentry);
143 mntget(lower_mnt);
144 inode_info->lower_file = dentry_open(lower_dentry,
145 lower_mnt,
146 (O_RDONLY
147 | O_LARGEFILE));
148 }
149 if (IS_ERR(inode_info->lower_file)) {
150 printk(KERN_ERR "Error opening lower persistent file " 136 printk(KERN_ERR "Error opening lower persistent file "
151 "for lower_dentry [0x%p] and lower_mnt [0x%p]\n", 137 "for lower_dentry [0x%p] and lower_mnt [0x%p]; "
152 lower_dentry, lower_mnt); 138 "rc = [%d]\n", lower_dentry, lower_mnt, rc);
153 rc = PTR_ERR(inode_info->lower_file); 139 rc = PTR_ERR(inode_info->lower_file);
154 inode_info->lower_file = NULL; 140 inode_info->lower_file = NULL;
155 } 141 }
@@ -163,14 +149,14 @@ static int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
163 * @lower_dentry: Existing dentry in the lower filesystem 149 * @lower_dentry: Existing dentry in the lower filesystem
164 * @dentry: ecryptfs' dentry 150 * @dentry: ecryptfs' dentry
165 * @sb: ecryptfs's super_block 151 * @sb: ecryptfs's super_block
166 * @flag: If set to true, then d_add is called, else d_instantiate is called 152 * @flags: flags to govern behavior of interpose procedure
167 * 153 *
168 * Interposes upper and lower dentries. 154 * Interposes upper and lower dentries.
169 * 155 *
170 * Returns zero on success; non-zero otherwise 156 * Returns zero on success; non-zero otherwise
171 */ 157 */
172int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry, 158int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry,
173 struct super_block *sb, int flag) 159 struct super_block *sb, u32 flags)
174{ 160{
175 struct inode *lower_inode; 161 struct inode *lower_inode;
176 struct inode *inode; 162 struct inode *inode;
@@ -207,7 +193,7 @@ int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry,
207 init_special_inode(inode, lower_inode->i_mode, 193 init_special_inode(inode, lower_inode->i_mode,
208 lower_inode->i_rdev); 194 lower_inode->i_rdev);
209 dentry->d_op = &ecryptfs_dops; 195 dentry->d_op = &ecryptfs_dops;
210 if (flag) 196 if (flags & ECRYPTFS_INTERPOSE_FLAG_D_ADD)
211 d_add(dentry, inode); 197 d_add(dentry, inode);
212 else 198 else
213 d_instantiate(dentry, inode); 199 d_instantiate(dentry, inode);
@@ -215,13 +201,6 @@ int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry,
215 /* This size will be overwritten for real files w/ headers and 201 /* This size will be overwritten for real files w/ headers and
216 * other metadata */ 202 * other metadata */
217 fsstack_copy_inode_size(inode, lower_inode); 203 fsstack_copy_inode_size(inode, lower_inode);
218 rc = ecryptfs_init_persistent_file(dentry);
219 if (rc) {
220 printk(KERN_ERR "%s: Error attempting to initialize the "
221 "persistent file for the dentry with name [%s]; "
222 "rc = [%d]\n", __func__, dentry->d_name.name, rc);
223 goto out;
224 }
225out: 204out:
226 return rc; 205 return rc;
227} 206}
@@ -262,10 +241,11 @@ static int ecryptfs_init_global_auth_toks(
262 "session keyring for sig specified in mount " 241 "session keyring for sig specified in mount "
263 "option: [%s]\n", global_auth_tok->sig); 242 "option: [%s]\n", global_auth_tok->sig);
264 global_auth_tok->flags |= ECRYPTFS_AUTH_TOK_INVALID; 243 global_auth_tok->flags |= ECRYPTFS_AUTH_TOK_INVALID;
265 rc = 0; 244 goto out;
266 } else 245 } else
267 global_auth_tok->flags &= ~ECRYPTFS_AUTH_TOK_INVALID; 246 global_auth_tok->flags &= ~ECRYPTFS_AUTH_TOK_INVALID;
268 } 247 }
248out:
269 return rc; 249 return rc;
270} 250}
271 251
@@ -314,7 +294,6 @@ static int ecryptfs_parse_options(struct super_block *sb, char *options)
314 char *cipher_name_dst; 294 char *cipher_name_dst;
315 char *cipher_name_src; 295 char *cipher_name_src;
316 char *cipher_key_bytes_src; 296 char *cipher_key_bytes_src;
317 int cipher_name_len;
318 297
319 if (!options) { 298 if (!options) {
320 rc = -EINVAL; 299 rc = -EINVAL;
@@ -395,17 +374,12 @@ static int ecryptfs_parse_options(struct super_block *sb, char *options)
395 goto out; 374 goto out;
396 } 375 }
397 if (!cipher_name_set) { 376 if (!cipher_name_set) {
398 cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER); 377 int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER);
399 if (unlikely(cipher_name_len 378
400 >= ECRYPTFS_MAX_CIPHER_NAME_SIZE)) { 379 BUG_ON(cipher_name_len >= ECRYPTFS_MAX_CIPHER_NAME_SIZE);
401 rc = -EINVAL; 380
402 BUG(); 381 strcpy(mount_crypt_stat->global_default_cipher_name,
403 goto out; 382 ECRYPTFS_DEFAULT_CIPHER);
404 }
405 memcpy(mount_crypt_stat->global_default_cipher_name,
406 ECRYPTFS_DEFAULT_CIPHER, cipher_name_len);
407 mount_crypt_stat->global_default_cipher_name[cipher_name_len]
408 = '\0';
409 } 383 }
410 if (!cipher_key_bytes_set) { 384 if (!cipher_key_bytes_set) {
411 mount_crypt_stat->global_default_cipher_key_size = 0; 385 mount_crypt_stat->global_default_cipher_key_size = 0;
@@ -430,7 +404,6 @@ static int ecryptfs_parse_options(struct super_block *sb, char *options)
430 printk(KERN_WARNING "One or more global auth toks could not " 404 printk(KERN_WARNING "One or more global auth toks could not "
431 "properly register; rc = [%d]\n", rc); 405 "properly register; rc = [%d]\n", rc);
432 } 406 }
433 rc = 0;
434out: 407out:
435 return rc; 408 return rc;
436} 409}
@@ -679,6 +652,11 @@ static struct ecryptfs_cache_info {
679 .name = "ecryptfs_key_tfm_cache", 652 .name = "ecryptfs_key_tfm_cache",
680 .size = sizeof(struct ecryptfs_key_tfm), 653 .size = sizeof(struct ecryptfs_key_tfm),
681 }, 654 },
655 {
656 .cache = &ecryptfs_open_req_cache,
657 .name = "ecryptfs_open_req_cache",
658 .size = sizeof(struct ecryptfs_open_req),
659 },
682}; 660};
683 661
684static void ecryptfs_free_kmem_caches(void) 662static void ecryptfs_free_kmem_caches(void)
@@ -795,11 +773,17 @@ static int __init ecryptfs_init(void)
795 printk(KERN_ERR "sysfs registration failed\n"); 773 printk(KERN_ERR "sysfs registration failed\n");
796 goto out_unregister_filesystem; 774 goto out_unregister_filesystem;
797 } 775 }
776 rc = ecryptfs_init_kthread();
777 if (rc) {
778 printk(KERN_ERR "%s: kthread initialization failed; "
779 "rc = [%d]\n", __func__, rc);
780 goto out_do_sysfs_unregistration;
781 }
798 rc = ecryptfs_init_messaging(ecryptfs_transport); 782 rc = ecryptfs_init_messaging(ecryptfs_transport);
799 if (rc) { 783 if (rc) {
800 ecryptfs_printk(KERN_ERR, "Failure occured while attempting to " 784 printk(KERN_ERR "Failure occured while attempting to "
801 "initialize the eCryptfs netlink socket\n"); 785 "initialize the eCryptfs netlink socket\n");
802 goto out_do_sysfs_unregistration; 786 goto out_destroy_kthread;
803 } 787 }
804 rc = ecryptfs_init_crypto(); 788 rc = ecryptfs_init_crypto();
805 if (rc) { 789 if (rc) {
@@ -814,6 +798,8 @@ static int __init ecryptfs_init(void)
814 goto out; 798 goto out;
815out_release_messaging: 799out_release_messaging:
816 ecryptfs_release_messaging(ecryptfs_transport); 800 ecryptfs_release_messaging(ecryptfs_transport);
801out_destroy_kthread:
802 ecryptfs_destroy_kthread();
817out_do_sysfs_unregistration: 803out_do_sysfs_unregistration:
818 do_sysfs_unregistration(); 804 do_sysfs_unregistration();
819out_unregister_filesystem: 805out_unregister_filesystem:
@@ -833,6 +819,7 @@ static void __exit ecryptfs_exit(void)
833 printk(KERN_ERR "Failure whilst attempting to destroy crypto; " 819 printk(KERN_ERR "Failure whilst attempting to destroy crypto; "
834 "rc = [%d]\n", rc); 820 "rc = [%d]\n", rc);
835 ecryptfs_release_messaging(ecryptfs_transport); 821 ecryptfs_release_messaging(ecryptfs_transport);
822 ecryptfs_destroy_kthread();
836 do_sysfs_unregistration(); 823 do_sysfs_unregistration();
837 unregister_filesystem(&ecryptfs_fs_type); 824 unregister_filesystem(&ecryptfs_fs_type);
838 ecryptfs_free_kmem_caches(); 825 ecryptfs_free_kmem_caches();
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
index 09a4522f65e6..b484792a0996 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
@@ -358,46 +358,6 @@ out_unlock_daemon:
358} 358}
359 359
360/** 360/**
361 * ecryptfs_miscdev_helo
362 * @euid: effective user id of miscdevess sending helo packet
363 * @user_ns: The namespace in which @euid applies
364 * @pid: miscdevess id of miscdevess sending helo packet
365 *
366 * Returns zero on success; non-zero otherwise
367 */
368static int ecryptfs_miscdev_helo(uid_t euid, struct user_namespace *user_ns,
369 struct pid *pid)
370{
371 int rc;
372
373 rc = ecryptfs_process_helo(ECRYPTFS_TRANSPORT_MISCDEV, euid, user_ns,
374 pid);
375 if (rc)
376 printk(KERN_WARNING "Error processing HELO; rc = [%d]\n", rc);
377 return rc;
378}
379
380/**
381 * ecryptfs_miscdev_quit
382 * @euid: effective user id of miscdevess sending quit packet
383 * @user_ns: The namespace in which @euid applies
384 * @pid: miscdevess id of miscdevess sending quit packet
385 *
386 * Returns zero on success; non-zero otherwise
387 */
388static int ecryptfs_miscdev_quit(uid_t euid, struct user_namespace *user_ns,
389 struct pid *pid)
390{
391 int rc;
392
393 rc = ecryptfs_process_quit(euid, user_ns, pid);
394 if (rc)
395 printk(KERN_WARNING
396 "Error processing QUIT message; rc = [%d]\n", rc);
397 return rc;
398}
399
400/**
401 * ecryptfs_miscdev_response - miscdevess response to message previously sent to daemon 361 * ecryptfs_miscdev_response - miscdevess response to message previously sent to daemon
402 * @data: Bytes comprising struct ecryptfs_message 362 * @data: Bytes comprising struct ecryptfs_message
403 * @data_size: sizeof(struct ecryptfs_message) + data len 363 * @data_size: sizeof(struct ecryptfs_message) + data len
@@ -512,26 +472,7 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
512 __func__, rc); 472 __func__, rc);
513 break; 473 break;
514 case ECRYPTFS_MSG_HELO: 474 case ECRYPTFS_MSG_HELO:
515 rc = ecryptfs_miscdev_helo(current->euid,
516 current->nsproxy->user_ns,
517 task_pid(current));
518 if (rc) {
519 printk(KERN_ERR "%s: Error attempting to process "
520 "helo from pid [0x%p]; rc = [%d]\n", __func__,
521 task_pid(current), rc);
522 goto out_free;
523 }
524 break;
525 case ECRYPTFS_MSG_QUIT: 475 case ECRYPTFS_MSG_QUIT:
526 rc = ecryptfs_miscdev_quit(current->euid,
527 current->nsproxy->user_ns,
528 task_pid(current));
529 if (rc) {
530 printk(KERN_ERR "%s: Error attempting to process "
531 "quit from pid [0x%p]; rc = [%d]\n", __func__,
532 task_pid(current), rc);
533 goto out_free;
534 }
535 break; 476 break;
536 default: 477 default:
537 ecryptfs_printk(KERN_WARNING, "Dropping miscdev " 478 ecryptfs_printk(KERN_WARNING, "Dropping miscdev "
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 2b6fe1e6e8ba..245c2dc02d5c 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -32,6 +32,7 @@
32#include <linux/file.h> 32#include <linux/file.h>
33#include <linux/crypto.h> 33#include <linux/crypto.h>
34#include <linux/scatterlist.h> 34#include <linux/scatterlist.h>
35#include <asm/unaligned.h>
35#include "ecryptfs_kernel.h" 36#include "ecryptfs_kernel.h"
36 37
37/** 38/**
@@ -372,7 +373,6 @@ out:
372 */ 373 */
373static int ecryptfs_write_inode_size_to_header(struct inode *ecryptfs_inode) 374static int ecryptfs_write_inode_size_to_header(struct inode *ecryptfs_inode)
374{ 375{
375 u64 file_size;
376 char *file_size_virt; 376 char *file_size_virt;
377 int rc; 377 int rc;
378 378
@@ -381,9 +381,7 @@ static int ecryptfs_write_inode_size_to_header(struct inode *ecryptfs_inode)
381 rc = -ENOMEM; 381 rc = -ENOMEM;
382 goto out; 382 goto out;
383 } 383 }
384 file_size = (u64)i_size_read(ecryptfs_inode); 384 put_unaligned_be64(i_size_read(ecryptfs_inode), file_size_virt);
385 file_size = cpu_to_be64(file_size);
386 memcpy(file_size_virt, &file_size, sizeof(u64));
387 rc = ecryptfs_write_lower(ecryptfs_inode, file_size_virt, 0, 385 rc = ecryptfs_write_lower(ecryptfs_inode, file_size_virt, 0,
388 sizeof(u64)); 386 sizeof(u64));
389 kfree(file_size_virt); 387 kfree(file_size_virt);
@@ -403,7 +401,6 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
403 struct dentry *lower_dentry = 401 struct dentry *lower_dentry =
404 ecryptfs_inode_to_private(ecryptfs_inode)->lower_file->f_dentry; 402 ecryptfs_inode_to_private(ecryptfs_inode)->lower_file->f_dentry;
405 struct inode *lower_inode = lower_dentry->d_inode; 403 struct inode *lower_inode = lower_dentry->d_inode;
406 u64 file_size;
407 int rc; 404 int rc;
408 405
409 if (!lower_inode->i_op->getxattr || !lower_inode->i_op->setxattr) { 406 if (!lower_inode->i_op->getxattr || !lower_inode->i_op->setxattr) {
@@ -424,9 +421,7 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
424 xattr_virt, PAGE_CACHE_SIZE); 421 xattr_virt, PAGE_CACHE_SIZE);
425 if (size < 0) 422 if (size < 0)
426 size = 8; 423 size = 8;
427 file_size = (u64)i_size_read(ecryptfs_inode); 424 put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
428 file_size = cpu_to_be64(file_size);
429 memcpy(xattr_virt, &file_size, sizeof(u64));
430 rc = lower_inode->i_op->setxattr(lower_dentry, ECRYPTFS_XATTR_NAME, 425 rc = lower_inode->i_op->setxattr(lower_dentry, ECRYPTFS_XATTR_NAME,
431 xattr_virt, size, 0); 426 xattr_virt, size, 0);
432 mutex_unlock(&lower_inode->i_mutex); 427 mutex_unlock(&lower_inode->i_mutex);
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 343942deeec1..08bf558d0408 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -198,11 +198,18 @@ struct file *eventfd_fget(int fd)
198 return file; 198 return file;
199} 199}
200 200
201asmlinkage long sys_eventfd(unsigned int count) 201asmlinkage long sys_eventfd2(unsigned int count, int flags)
202{ 202{
203 int fd; 203 int fd;
204 struct eventfd_ctx *ctx; 204 struct eventfd_ctx *ctx;
205 205
206 /* Check the EFD_* constants for consistency. */
207 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
208 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
209
210 if (flags & ~(EFD_CLOEXEC | EFD_NONBLOCK))
211 return -EINVAL;
212
206 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 213 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
207 if (!ctx) 214 if (!ctx)
208 return -ENOMEM; 215 return -ENOMEM;
@@ -214,9 +221,15 @@ asmlinkage long sys_eventfd(unsigned int count)
214 * When we call this, the initialization must be complete, since 221 * When we call this, the initialization must be complete, since
215 * anon_inode_getfd() will install the fd. 222 * anon_inode_getfd() will install the fd.
216 */ 223 */
217 fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx); 224 fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
225 flags & (O_CLOEXEC | O_NONBLOCK));
218 if (fd < 0) 226 if (fd < 0)
219 kfree(ctx); 227 kfree(ctx);
220 return fd; 228 return fd;
221} 229}
222 230
231asmlinkage long sys_eventfd(unsigned int count)
232{
233 return sys_eventfd2(count, 0);
234}
235
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 990c01d2d66b..0c87474f7917 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1046,20 +1046,25 @@ retry:
1046 * RB tree. With the current implementation, the "size" parameter is ignored 1046 * RB tree. With the current implementation, the "size" parameter is ignored
1047 * (besides sanity checks). 1047 * (besides sanity checks).
1048 */ 1048 */
1049asmlinkage long sys_epoll_create(int size) 1049asmlinkage long sys_epoll_create1(int flags)
1050{ 1050{
1051 int error, fd = -1; 1051 int error, fd = -1;
1052 struct eventpoll *ep; 1052 struct eventpoll *ep;
1053 1053
1054 /* Check the EPOLL_* constant for consistency. */
1055 BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
1056
1057 if (flags & ~EPOLL_CLOEXEC)
1058 return -EINVAL;
1059
1054 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n", 1060 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n",
1055 current, size)); 1061 current, flags));
1056 1062
1057 /* 1063 /*
1058 * Sanity check on the size parameter, and create the internal data 1064 * Create the internal data structure ( "struct eventpoll" ).
1059 * structure ( "struct eventpoll" ).
1060 */ 1065 */
1061 error = -EINVAL; 1066 error = ep_alloc(&ep);
1062 if (size <= 0 || (error = ep_alloc(&ep)) < 0) { 1067 if (error < 0) {
1063 fd = error; 1068 fd = error;
1064 goto error_return; 1069 goto error_return;
1065 } 1070 }
@@ -1068,17 +1073,26 @@ asmlinkage long sys_epoll_create(int size)
1068 * Creates all the items needed to setup an eventpoll file. That is, 1073 * Creates all the items needed to setup an eventpoll file. That is,
1069 * a file structure and a free file descriptor. 1074 * a file structure and a free file descriptor.
1070 */ 1075 */
1071 fd = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep); 1076 fd = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep,
1077 flags & O_CLOEXEC);
1072 if (fd < 0) 1078 if (fd < 0)
1073 ep_free(ep); 1079 ep_free(ep);
1074 1080
1075error_return: 1081error_return:
1076 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", 1082 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
1077 current, size, fd)); 1083 current, flags, fd));
1078 1084
1079 return fd; 1085 return fd;
1080} 1086}
1081 1087
1088asmlinkage long sys_epoll_create(int size)
1089{
1090 if (size < 0)
1091 return -EINVAL;
1092
1093 return sys_epoll_create1(0);
1094}
1095
1082/* 1096/*
1083 * The following function implements the controller interface for 1097 * The following function implements the controller interface for
1084 * the eventpoll file that enables the insertion/removal/change of 1098 * the eventpoll file that enables the insertion/removal/change of
diff --git a/fs/exec.c b/fs/exec.c
index fd9234379e8d..5e559013e303 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -25,19 +25,18 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/file.h> 26#include <linux/file.h>
27#include <linux/fdtable.h> 27#include <linux/fdtable.h>
28#include <linux/mman.h> 28#include <linux/mm.h>
29#include <linux/stat.h> 29#include <linux/stat.h>
30#include <linux/fcntl.h> 30#include <linux/fcntl.h>
31#include <linux/smp_lock.h> 31#include <linux/smp_lock.h>
32#include <linux/swap.h>
32#include <linux/string.h> 33#include <linux/string.h>
33#include <linux/init.h> 34#include <linux/init.h>
34#include <linux/pagemap.h>
35#include <linux/highmem.h> 35#include <linux/highmem.h>
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/key.h> 37#include <linux/key.h>
38#include <linux/personality.h> 38#include <linux/personality.h>
39#include <linux/binfmts.h> 39#include <linux/binfmts.h>
40#include <linux/swap.h>
41#include <linux/utsname.h> 40#include <linux/utsname.h>
42#include <linux/pid_namespace.h> 41#include <linux/pid_namespace.h>
43#include <linux/module.h> 42#include <linux/module.h>
@@ -47,7 +46,6 @@
47#include <linux/mount.h> 46#include <linux/mount.h>
48#include <linux/security.h> 47#include <linux/security.h>
49#include <linux/syscalls.h> 48#include <linux/syscalls.h>
50#include <linux/rmap.h>
51#include <linux/tsacct_kern.h> 49#include <linux/tsacct_kern.h>
52#include <linux/cn_proc.h> 50#include <linux/cn_proc.h>
53#include <linux/audit.h> 51#include <linux/audit.h>
@@ -541,7 +539,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
541 /* 539 /*
542 * when the old and new regions overlap clear from new_end. 540 * when the old and new regions overlap clear from new_end.
543 */ 541 */
544 free_pgd_range(&tlb, new_end, old_end, new_end, 542 free_pgd_range(tlb, new_end, old_end, new_end,
545 vma->vm_next ? vma->vm_next->vm_start : 0); 543 vma->vm_next ? vma->vm_next->vm_start : 0);
546 } else { 544 } else {
547 /* 545 /*
@@ -550,7 +548,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
550 * have constraints on va-space that make this illegal (IA64) - 548 * have constraints on va-space that make this illegal (IA64) -
551 * for the others its just a little faster. 549 * for the others its just a little faster.
552 */ 550 */
553 free_pgd_range(&tlb, old_start, old_end, new_end, 551 free_pgd_range(tlb, old_start, old_end, new_end,
554 vma->vm_next ? vma->vm_next->vm_start : 0); 552 vma->vm_next ? vma->vm_next->vm_start : 0);
555 } 553 }
556 tlb_finish_mmu(tlb, new_end, old_end); 554 tlb_finish_mmu(tlb, new_end, old_end);
@@ -724,12 +722,10 @@ static int exec_mmap(struct mm_struct *mm)
724 * Make sure that if there is a core dump in progress 722 * Make sure that if there is a core dump in progress
725 * for the old mm, we get out and die instead of going 723 * for the old mm, we get out and die instead of going
726 * through with the exec. We must hold mmap_sem around 724 * through with the exec. We must hold mmap_sem around
727 * checking core_waiters and changing tsk->mm. The 725 * checking core_state and changing tsk->mm.
728 * core-inducing thread will increment core_waiters for
729 * each thread whose ->mm == old_mm.
730 */ 726 */
731 down_read(&old_mm->mmap_sem); 727 down_read(&old_mm->mmap_sem);
732 if (unlikely(old_mm->core_waiters)) { 728 if (unlikely(old_mm->core_state)) {
733 up_read(&old_mm->mmap_sem); 729 up_read(&old_mm->mmap_sem);
734 return -EINTR; 730 return -EINTR;
735 } 731 }
@@ -1328,6 +1324,7 @@ int do_execve(char * filename,
1328 if (retval < 0) 1324 if (retval < 0)
1329 goto out; 1325 goto out;
1330 1326
1327 current->flags &= ~PF_KTHREAD;
1331 retval = search_binary_handler(bprm,regs); 1328 retval = search_binary_handler(bprm,regs);
1332 if (retval >= 0) { 1329 if (retval >= 0) {
1333 /* execve success */ 1330 /* execve success */
@@ -1382,17 +1379,14 @@ EXPORT_SYMBOL(set_binfmt);
1382 * name into corename, which must have space for at least 1379 * name into corename, which must have space for at least
1383 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. 1380 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1384 */ 1381 */
1385static int format_corename(char *corename, const char *pattern, long signr) 1382static int format_corename(char *corename, int nr_threads, long signr)
1386{ 1383{
1387 const char *pat_ptr = pattern; 1384 const char *pat_ptr = core_pattern;
1385 int ispipe = (*pat_ptr == '|');
1388 char *out_ptr = corename; 1386 char *out_ptr = corename;
1389 char *const out_end = corename + CORENAME_MAX_SIZE; 1387 char *const out_end = corename + CORENAME_MAX_SIZE;
1390 int rc; 1388 int rc;
1391 int pid_in_pattern = 0; 1389 int pid_in_pattern = 0;
1392 int ispipe = 0;
1393
1394 if (*pattern == '|')
1395 ispipe = 1;
1396 1390
1397 /* Repeat as long as we have more pattern to process and more output 1391 /* Repeat as long as we have more pattern to process and more output
1398 space */ 1392 space */
@@ -1493,7 +1487,7 @@ static int format_corename(char *corename, const char *pattern, long signr)
1493 * and core_uses_pid is set, then .%pid will be appended to 1487 * and core_uses_pid is set, then .%pid will be appended to
1494 * the filename. Do not do this for piped commands. */ 1488 * the filename. Do not do this for piped commands. */
1495 if (!ispipe && !pid_in_pattern 1489 if (!ispipe && !pid_in_pattern
1496 && (core_uses_pid || atomic_read(&current->mm->mm_users) != 1)) { 1490 && (core_uses_pid || nr_threads)) {
1497 rc = snprintf(out_ptr, out_end - out_ptr, 1491 rc = snprintf(out_ptr, out_end - out_ptr,
1498 ".%d", task_tgid_vnr(current)); 1492 ".%d", task_tgid_vnr(current));
1499 if (rc > out_end - out_ptr) 1493 if (rc > out_end - out_ptr)
@@ -1505,9 +1499,10 @@ out:
1505 return ispipe; 1499 return ispipe;
1506} 1500}
1507 1501
1508static void zap_process(struct task_struct *start) 1502static int zap_process(struct task_struct *start)
1509{ 1503{
1510 struct task_struct *t; 1504 struct task_struct *t;
1505 int nr = 0;
1511 1506
1512 start->signal->flags = SIGNAL_GROUP_EXIT; 1507 start->signal->flags = SIGNAL_GROUP_EXIT;
1513 start->signal->group_stop_count = 0; 1508 start->signal->group_stop_count = 0;
@@ -1515,72 +1510,99 @@ static void zap_process(struct task_struct *start)
1515 t = start; 1510 t = start;
1516 do { 1511 do {
1517 if (t != current && t->mm) { 1512 if (t != current && t->mm) {
1518 t->mm->core_waiters++;
1519 sigaddset(&t->pending.signal, SIGKILL); 1513 sigaddset(&t->pending.signal, SIGKILL);
1520 signal_wake_up(t, 1); 1514 signal_wake_up(t, 1);
1515 nr++;
1521 } 1516 }
1522 } while ((t = next_thread(t)) != start); 1517 } while_each_thread(start, t);
1518
1519 return nr;
1523} 1520}
1524 1521
1525static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm, 1522static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1526 int exit_code) 1523 struct core_state *core_state, int exit_code)
1527{ 1524{
1528 struct task_struct *g, *p; 1525 struct task_struct *g, *p;
1529 unsigned long flags; 1526 unsigned long flags;
1530 int err = -EAGAIN; 1527 int nr = -EAGAIN;
1531 1528
1532 spin_lock_irq(&tsk->sighand->siglock); 1529 spin_lock_irq(&tsk->sighand->siglock);
1533 if (!signal_group_exit(tsk->signal)) { 1530 if (!signal_group_exit(tsk->signal)) {
1531 mm->core_state = core_state;
1534 tsk->signal->group_exit_code = exit_code; 1532 tsk->signal->group_exit_code = exit_code;
1535 zap_process(tsk); 1533 nr = zap_process(tsk);
1536 err = 0;
1537 } 1534 }
1538 spin_unlock_irq(&tsk->sighand->siglock); 1535 spin_unlock_irq(&tsk->sighand->siglock);
1539 if (err) 1536 if (unlikely(nr < 0))
1540 return err; 1537 return nr;
1541 1538
1542 if (atomic_read(&mm->mm_users) == mm->core_waiters + 1) 1539 if (atomic_read(&mm->mm_users) == nr + 1)
1543 goto done; 1540 goto done;
1544 1541 /*
1542 * We should find and kill all tasks which use this mm, and we should
1543 * count them correctly into ->nr_threads. We don't take tasklist
1544 * lock, but this is safe wrt:
1545 *
1546 * fork:
1547 * None of sub-threads can fork after zap_process(leader). All
1548 * processes which were created before this point should be
1549 * visible to zap_threads() because copy_process() adds the new
1550 * process to the tail of init_task.tasks list, and lock/unlock
1551 * of ->siglock provides a memory barrier.
1552 *
1553 * do_exit:
1554 * The caller holds mm->mmap_sem. This means that the task which
1555 * uses this mm can't pass exit_mm(), so it can't exit or clear
1556 * its ->mm.
1557 *
1558 * de_thread:
1559 * It does list_replace_rcu(&leader->tasks, &current->tasks),
1560 * we must see either old or new leader, this does not matter.
1561 * However, it can change p->sighand, so lock_task_sighand(p)
1562 * must be used. Since p->mm != NULL and we hold ->mmap_sem
1563 * it can't fail.
1564 *
1565 * Note also that "g" can be the old leader with ->mm == NULL
1566 * and already unhashed and thus removed from ->thread_group.
1567 * This is OK, __unhash_process()->list_del_rcu() does not
1568 * clear the ->next pointer, we will find the new leader via
1569 * next_thread().
1570 */
1545 rcu_read_lock(); 1571 rcu_read_lock();
1546 for_each_process(g) { 1572 for_each_process(g) {
1547 if (g == tsk->group_leader) 1573 if (g == tsk->group_leader)
1548 continue; 1574 continue;
1549 1575 if (g->flags & PF_KTHREAD)
1576 continue;
1550 p = g; 1577 p = g;
1551 do { 1578 do {
1552 if (p->mm) { 1579 if (p->mm) {
1553 if (p->mm == mm) { 1580 if (unlikely(p->mm == mm)) {
1554 /*
1555 * p->sighand can't disappear, but
1556 * may be changed by de_thread()
1557 */
1558 lock_task_sighand(p, &flags); 1581 lock_task_sighand(p, &flags);
1559 zap_process(p); 1582 nr += zap_process(p);
1560 unlock_task_sighand(p, &flags); 1583 unlock_task_sighand(p, &flags);
1561 } 1584 }
1562 break; 1585 break;
1563 } 1586 }
1564 } while ((p = next_thread(p)) != g); 1587 } while_each_thread(g, p);
1565 } 1588 }
1566 rcu_read_unlock(); 1589 rcu_read_unlock();
1567done: 1590done:
1568 return mm->core_waiters; 1591 atomic_set(&core_state->nr_threads, nr);
1592 return nr;
1569} 1593}
1570 1594
1571static int coredump_wait(int exit_code) 1595static int coredump_wait(int exit_code, struct core_state *core_state)
1572{ 1596{
1573 struct task_struct *tsk = current; 1597 struct task_struct *tsk = current;
1574 struct mm_struct *mm = tsk->mm; 1598 struct mm_struct *mm = tsk->mm;
1575 struct completion startup_done;
1576 struct completion *vfork_done; 1599 struct completion *vfork_done;
1577 int core_waiters; 1600 int core_waiters;
1578 1601
1579 init_completion(&mm->core_done); 1602 init_completion(&core_state->startup);
1580 init_completion(&startup_done); 1603 core_state->dumper.task = tsk;
1581 mm->core_startup_done = &startup_done; 1604 core_state->dumper.next = NULL;
1582 1605 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
1583 core_waiters = zap_threads(tsk, mm, exit_code);
1584 up_write(&mm->mmap_sem); 1606 up_write(&mm->mmap_sem);
1585 1607
1586 if (unlikely(core_waiters < 0)) 1608 if (unlikely(core_waiters < 0))
@@ -1597,12 +1619,32 @@ static int coredump_wait(int exit_code)
1597 } 1619 }
1598 1620
1599 if (core_waiters) 1621 if (core_waiters)
1600 wait_for_completion(&startup_done); 1622 wait_for_completion(&core_state->startup);
1601fail: 1623fail:
1602 BUG_ON(mm->core_waiters);
1603 return core_waiters; 1624 return core_waiters;
1604} 1625}
1605 1626
1627static void coredump_finish(struct mm_struct *mm)
1628{
1629 struct core_thread *curr, *next;
1630 struct task_struct *task;
1631
1632 next = mm->core_state->dumper.next;
1633 while ((curr = next) != NULL) {
1634 next = curr->next;
1635 task = curr->task;
1636 /*
1637 * see exit_mm(), curr->task must not see
1638 * ->task == NULL before we read ->next.
1639 */
1640 smp_mb();
1641 curr->task = NULL;
1642 wake_up_process(task);
1643 }
1644
1645 mm->core_state = NULL;
1646}
1647
1606/* 1648/*
1607 * set_dumpable converts traditional three-value dumpable to two flags and 1649 * set_dumpable converts traditional three-value dumpable to two flags and
1608 * stores them into mm->flags. It modifies lower two bits of mm->flags, but 1650 * stores them into mm->flags. It modifies lower two bits of mm->flags, but
@@ -1654,6 +1696,7 @@ int get_dumpable(struct mm_struct *mm)
1654 1696
1655int do_coredump(long signr, int exit_code, struct pt_regs * regs) 1697int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1656{ 1698{
1699 struct core_state core_state;
1657 char corename[CORENAME_MAX_SIZE + 1]; 1700 char corename[CORENAME_MAX_SIZE + 1];
1658 struct mm_struct *mm = current->mm; 1701 struct mm_struct *mm = current->mm;
1659 struct linux_binfmt * binfmt; 1702 struct linux_binfmt * binfmt;
@@ -1677,7 +1720,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1677 /* 1720 /*
1678 * If another thread got here first, or we are not dumpable, bail out. 1721 * If another thread got here first, or we are not dumpable, bail out.
1679 */ 1722 */
1680 if (mm->core_waiters || !get_dumpable(mm)) { 1723 if (mm->core_state || !get_dumpable(mm)) {
1681 up_write(&mm->mmap_sem); 1724 up_write(&mm->mmap_sem);
1682 goto fail; 1725 goto fail;
1683 } 1726 }
@@ -1692,7 +1735,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1692 current->fsuid = 0; /* Dump root private */ 1735 current->fsuid = 0; /* Dump root private */
1693 } 1736 }
1694 1737
1695 retval = coredump_wait(exit_code); 1738 retval = coredump_wait(exit_code, &core_state);
1696 if (retval < 0) 1739 if (retval < 0)
1697 goto fail; 1740 goto fail;
1698 1741
@@ -1707,7 +1750,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1707 * uses lock_kernel() 1750 * uses lock_kernel()
1708 */ 1751 */
1709 lock_kernel(); 1752 lock_kernel();
1710 ispipe = format_corename(corename, core_pattern, signr); 1753 ispipe = format_corename(corename, retval, signr);
1711 unlock_kernel(); 1754 unlock_kernel();
1712 /* 1755 /*
1713 * Don't bother to check the RLIMIT_CORE value if core_pattern points 1756 * Don't bother to check the RLIMIT_CORE value if core_pattern points
@@ -1786,7 +1829,7 @@ fail_unlock:
1786 argv_free(helper_argv); 1829 argv_free(helper_argv);
1787 1830
1788 current->fsuid = fsuid; 1831 current->fsuid = fsuid;
1789 complete_all(&mm->core_done); 1832 coredump_finish(mm);
1790fail: 1833fail:
1791 return retval; 1834 return retval;
1792} 1835}
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index ef50cbc792db..31308a3b0b8b 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -31,6 +31,7 @@
31#include <linux/seq_file.h> 31#include <linux/seq_file.h>
32#include <linux/mount.h> 32#include <linux/mount.h>
33#include <linux/log2.h> 33#include <linux/log2.h>
34#include <linux/quotaops.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include "ext2.h" 36#include "ext2.h"
36#include "xattr.h" 37#include "xattr.h"
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index eaa23d2d5213..70c0dbdcdcb7 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -14,7 +14,7 @@ static size_t
14ext2_xattr_security_list(struct inode *inode, char *list, size_t list_size, 14ext2_xattr_security_list(struct inode *inode, char *list, size_t list_size,
15 const char *name, size_t name_len) 15 const char *name, size_t name_len)
16{ 16{
17 const int prefix_len = sizeof(XATTR_SECURITY_PREFIX)-1; 17 const int prefix_len = XATTR_SECURITY_PREFIX_LEN;
18 const size_t total_len = prefix_len + name_len + 1; 18 const size_t total_len = prefix_len + name_len + 1;
19 19
20 if (list && total_len <= list_size) { 20 if (list && total_len <= list_size) {
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index 83ee149f353d..e8219f8eae9f 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -12,13 +12,11 @@
12#include <linux/ext2_fs.h> 12#include <linux/ext2_fs.h>
13#include "xattr.h" 13#include "xattr.h"
14 14
15#define XATTR_TRUSTED_PREFIX "trusted."
16
17static size_t 15static size_t
18ext2_xattr_trusted_list(struct inode *inode, char *list, size_t list_size, 16ext2_xattr_trusted_list(struct inode *inode, char *list, size_t list_size,
19 const char *name, size_t name_len) 17 const char *name, size_t name_len)
20{ 18{
21 const int prefix_len = sizeof(XATTR_TRUSTED_PREFIX)-1; 19 const int prefix_len = XATTR_TRUSTED_PREFIX_LEN;
22 const size_t total_len = prefix_len + name_len + 1; 20 const size_t total_len = prefix_len + name_len + 1;
23 21
24 if (!capable(CAP_SYS_ADMIN)) 22 if (!capable(CAP_SYS_ADMIN))
diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c
index f383e7c3a7b5..92495d28c62f 100644
--- a/fs/ext2/xattr_user.c
+++ b/fs/ext2/xattr_user.c
@@ -11,13 +11,11 @@
11#include "ext2.h" 11#include "ext2.h"
12#include "xattr.h" 12#include "xattr.h"
13 13
14#define XATTR_USER_PREFIX "user."
15
16static size_t 14static size_t
17ext2_xattr_user_list(struct inode *inode, char *list, size_t list_size, 15ext2_xattr_user_list(struct inode *inode, char *list, size_t list_size,
18 const char *name, size_t name_len) 16 const char *name, size_t name_len)
19{ 17{
20 const size_t prefix_len = sizeof(XATTR_USER_PREFIX)-1; 18 const size_t prefix_len = XATTR_USER_PREFIX_LEN;
21 const size_t total_len = prefix_len + name_len + 1; 19 const size_t total_len = prefix_len + name_len + 1;
22 20
23 if (!test_opt(inode->i_sb, XATTR_USER)) 21 if (!test_opt(inode->i_sb, XATTR_USER))
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 8ca3bfd72427..2eea96ec78ed 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -272,7 +272,7 @@ static void free_rb_tree_fname(struct rb_root *root)
272 272
273 while (n) { 273 while (n) {
274 /* Do the node's children first */ 274 /* Do the node's children first */
275 if ((n)->rb_left) { 275 if (n->rb_left) {
276 n = n->rb_left; 276 n = n->rb_left;
277 continue; 277 continue;
278 } 278 }
@@ -301,24 +301,18 @@ static void free_rb_tree_fname(struct rb_root *root)
301 parent->rb_right = NULL; 301 parent->rb_right = NULL;
302 n = parent; 302 n = parent;
303 } 303 }
304 root->rb_node = NULL;
305} 304}
306 305
307 306
308static struct dir_private_info *create_dir_info(loff_t pos) 307static struct dir_private_info *ext3_htree_create_dir_info(loff_t pos)
309{ 308{
310 struct dir_private_info *p; 309 struct dir_private_info *p;
311 310
312 p = kmalloc(sizeof(struct dir_private_info), GFP_KERNEL); 311 p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
313 if (!p) 312 if (!p)
314 return NULL; 313 return NULL;
315 p->root.rb_node = NULL;
316 p->curr_node = NULL;
317 p->extra_fname = NULL;
318 p->last_pos = 0;
319 p->curr_hash = pos2maj_hash(pos); 314 p->curr_hash = pos2maj_hash(pos);
320 p->curr_minor_hash = pos2min_hash(pos); 315 p->curr_minor_hash = pos2min_hash(pos);
321 p->next_hash = 0;
322 return p; 316 return p;
323} 317}
324 318
@@ -433,7 +427,7 @@ static int ext3_dx_readdir(struct file * filp,
433 int ret; 427 int ret;
434 428
435 if (!info) { 429 if (!info) {
436 info = create_dir_info(filp->f_pos); 430 info = ext3_htree_create_dir_info(filp->f_pos);
437 if (!info) 431 if (!info)
438 return -ENOMEM; 432 return -ENOMEM;
439 filp->private_data = info; 433 filp->private_data = info;
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 77126821b2e9..47b678d73e7a 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -669,6 +669,14 @@ struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
669 if (IS_ERR(inode)) 669 if (IS_ERR(inode))
670 goto iget_failed; 670 goto iget_failed;
671 671
672 /*
673 * If the orphans has i_nlinks > 0 then it should be able to be
674 * truncated, otherwise it won't be removed from the orphan list
675 * during processing and an infinite loop will result.
676 */
677 if (inode->i_nlink && !ext3_can_truncate(inode))
678 goto bad_orphan;
679
672 if (NEXT_ORPHAN(inode) > max_ino) 680 if (NEXT_ORPHAN(inode) > max_ino)
673 goto bad_orphan; 681 goto bad_orphan;
674 brelse(bitmap_bh); 682 brelse(bitmap_bh);
@@ -690,6 +698,7 @@ bad_orphan:
690 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n", 698 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
691 NEXT_ORPHAN(inode)); 699 NEXT_ORPHAN(inode));
692 printk(KERN_NOTICE "max_ino=%lu\n", max_ino); 700 printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
701 printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
693 /* Avoid freeing blocks if we got a bad deleted inode */ 702 /* Avoid freeing blocks if we got a bad deleted inode */
694 if (inode->i_nlink == 0) 703 if (inode->i_nlink == 0)
695 inode->i_blocks = 0; 704 inode->i_blocks = 0;
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 6ae4ecf3ce40..3bf07d70b914 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -2127,7 +2127,21 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
2127 2127
2128 if (this_bh) { 2128 if (this_bh) {
2129 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata"); 2129 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
2130 ext3_journal_dirty_metadata(handle, this_bh); 2130
2131 /*
2132 * The buffer head should have an attached journal head at this
2133 * point. However, if the data is corrupted and an indirect
2134 * block pointed to itself, it would have been detached when
2135 * the block was cleared. Check for this instead of OOPSing.
2136 */
2137 if (bh2jh(this_bh))
2138 ext3_journal_dirty_metadata(handle, this_bh);
2139 else
2140 ext3_error(inode->i_sb, "ext3_free_data",
2141 "circular indirect block detected, "
2142 "inode=%lu, block=%llu",
2143 inode->i_ino,
2144 (unsigned long long)this_bh->b_blocknr);
2131 } 2145 }
2132} 2146}
2133 2147
@@ -2253,6 +2267,19 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
2253 } 2267 }
2254} 2268}
2255 2269
2270int ext3_can_truncate(struct inode *inode)
2271{
2272 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2273 return 0;
2274 if (S_ISREG(inode->i_mode))
2275 return 1;
2276 if (S_ISDIR(inode->i_mode))
2277 return 1;
2278 if (S_ISLNK(inode->i_mode))
2279 return !ext3_inode_is_fast_symlink(inode);
2280 return 0;
2281}
2282
2256/* 2283/*
2257 * ext3_truncate() 2284 * ext3_truncate()
2258 * 2285 *
@@ -2297,12 +2324,7 @@ void ext3_truncate(struct inode *inode)
2297 unsigned blocksize = inode->i_sb->s_blocksize; 2324 unsigned blocksize = inode->i_sb->s_blocksize;
2298 struct page *page; 2325 struct page *page;
2299 2326
2300 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 2327 if (!ext3_can_truncate(inode))
2301 S_ISLNK(inode->i_mode)))
2302 return;
2303 if (ext3_inode_is_fast_symlink(inode))
2304 return;
2305 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2306 return; 2328 return;
2307 2329
2308 /* 2330 /*
@@ -2513,6 +2535,16 @@ static int __ext3_get_inode_loc(struct inode *inode,
2513 } 2535 }
2514 if (!buffer_uptodate(bh)) { 2536 if (!buffer_uptodate(bh)) {
2515 lock_buffer(bh); 2537 lock_buffer(bh);
2538
2539 /*
2540 * If the buffer has the write error flag, we have failed
2541 * to write out another inode in the same block. In this
2542 * case, we don't have to read the block because we may
2543 * read the old inode data successfully.
2544 */
2545 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
2546 set_buffer_uptodate(bh);
2547
2516 if (buffer_uptodate(bh)) { 2548 if (buffer_uptodate(bh)) {
2517 /* someone brought it uptodate while we waited */ 2549 /* someone brought it uptodate while we waited */
2518 unlock_buffer(bh); 2550 unlock_buffer(bh);
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 0b8cf80154f1..de13e919cd81 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -240,13 +240,13 @@ static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize)
240{ 240{
241 unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(1) - 241 unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(1) -
242 EXT3_DIR_REC_LEN(2) - infosize; 242 EXT3_DIR_REC_LEN(2) - infosize;
243 return 0? 20: entry_space / sizeof(struct dx_entry); 243 return entry_space / sizeof(struct dx_entry);
244} 244}
245 245
246static inline unsigned dx_node_limit (struct inode *dir) 246static inline unsigned dx_node_limit (struct inode *dir)
247{ 247{
248 unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0); 248 unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0);
249 return 0? 22: entry_space / sizeof(struct dx_entry); 249 return entry_space / sizeof(struct dx_entry);
250} 250}
251 251
252/* 252/*
@@ -991,19 +991,21 @@ static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry,
991 de = (struct ext3_dir_entry_2 *) bh->b_data; 991 de = (struct ext3_dir_entry_2 *) bh->b_data;
992 top = (struct ext3_dir_entry_2 *) ((char *) de + sb->s_blocksize - 992 top = (struct ext3_dir_entry_2 *) ((char *) de + sb->s_blocksize -
993 EXT3_DIR_REC_LEN(0)); 993 EXT3_DIR_REC_LEN(0));
994 for (; de < top; de = ext3_next_entry(de)) 994 for (; de < top; de = ext3_next_entry(de)) {
995 if (ext3_match (namelen, name, de)) { 995 int off = (block << EXT3_BLOCK_SIZE_BITS(sb))
996 if (!ext3_check_dir_entry("ext3_find_entry", 996 + ((char *) de - bh->b_data);
997 dir, de, bh, 997
998 (block<<EXT3_BLOCK_SIZE_BITS(sb)) 998 if (!ext3_check_dir_entry(__func__, dir, de, bh, off)) {
999 +((char *)de - bh->b_data))) { 999 brelse(bh);
1000 brelse (bh);
1001 *err = ERR_BAD_DX_DIR; 1000 *err = ERR_BAD_DX_DIR;
1002 goto errout; 1001 goto errout;
1003 } 1002 }
1004 *res_dir = de; 1003
1005 dx_release (frames); 1004 if (ext3_match(namelen, name, de)) {
1006 return bh; 1005 *res_dir = de;
1006 dx_release(frames);
1007 return bh;
1008 }
1007 } 1009 }
1008 brelse (bh); 1010 brelse (bh);
1009 /* Check to see if we should continue to search */ 1011 /* Check to see if we should continue to search */
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 2845425077e8..615788c6843a 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -842,7 +842,7 @@ static int parse_options (char *options, struct super_block *sb,
842 int data_opt = 0; 842 int data_opt = 0;
843 int option; 843 int option;
844#ifdef CONFIG_QUOTA 844#ifdef CONFIG_QUOTA
845 int qtype; 845 int qtype, qfmt;
846 char *qname; 846 char *qname;
847#endif 847#endif
848 848
@@ -1018,9 +1018,11 @@ static int parse_options (char *options, struct super_block *sb,
1018 case Opt_grpjquota: 1018 case Opt_grpjquota:
1019 qtype = GRPQUOTA; 1019 qtype = GRPQUOTA;
1020set_qf_name: 1020set_qf_name:
1021 if (sb_any_quota_enabled(sb)) { 1021 if ((sb_any_quota_enabled(sb) ||
1022 sb_any_quota_suspended(sb)) &&
1023 !sbi->s_qf_names[qtype]) {
1022 printk(KERN_ERR 1024 printk(KERN_ERR
1023 "EXT3-fs: Cannot change journalled " 1025 "EXT3-fs: Cannot change journaled "
1024 "quota options when quota turned on.\n"); 1026 "quota options when quota turned on.\n");
1025 return 0; 1027 return 0;
1026 } 1028 }
@@ -1056,9 +1058,11 @@ set_qf_name:
1056 case Opt_offgrpjquota: 1058 case Opt_offgrpjquota:
1057 qtype = GRPQUOTA; 1059 qtype = GRPQUOTA;
1058clear_qf_name: 1060clear_qf_name:
1059 if (sb_any_quota_enabled(sb)) { 1061 if ((sb_any_quota_enabled(sb) ||
1062 sb_any_quota_suspended(sb)) &&
1063 sbi->s_qf_names[qtype]) {
1060 printk(KERN_ERR "EXT3-fs: Cannot change " 1064 printk(KERN_ERR "EXT3-fs: Cannot change "
1061 "journalled quota options when " 1065 "journaled quota options when "
1062 "quota turned on.\n"); 1066 "quota turned on.\n");
1063 return 0; 1067 return 0;
1064 } 1068 }
@@ -1069,10 +1073,20 @@ clear_qf_name:
1069 sbi->s_qf_names[qtype] = NULL; 1073 sbi->s_qf_names[qtype] = NULL;
1070 break; 1074 break;
1071 case Opt_jqfmt_vfsold: 1075 case Opt_jqfmt_vfsold:
1072 sbi->s_jquota_fmt = QFMT_VFS_OLD; 1076 qfmt = QFMT_VFS_OLD;
1073 break; 1077 goto set_qf_format;
1074 case Opt_jqfmt_vfsv0: 1078 case Opt_jqfmt_vfsv0:
1075 sbi->s_jquota_fmt = QFMT_VFS_V0; 1079 qfmt = QFMT_VFS_V0;
1080set_qf_format:
1081 if ((sb_any_quota_enabled(sb) ||
1082 sb_any_quota_suspended(sb)) &&
1083 sbi->s_jquota_fmt != qfmt) {
1084 printk(KERN_ERR "EXT3-fs: Cannot change "
1085 "journaled quota options when "
1086 "quota turned on.\n");
1087 return 0;
1088 }
1089 sbi->s_jquota_fmt = qfmt;
1076 break; 1090 break;
1077 case Opt_quota: 1091 case Opt_quota:
1078 case Opt_usrquota: 1092 case Opt_usrquota:
@@ -1084,7 +1098,8 @@ clear_qf_name:
1084 set_opt(sbi->s_mount_opt, GRPQUOTA); 1098 set_opt(sbi->s_mount_opt, GRPQUOTA);
1085 break; 1099 break;
1086 case Opt_noquota: 1100 case Opt_noquota:
1087 if (sb_any_quota_enabled(sb)) { 1101 if (sb_any_quota_enabled(sb) ||
1102 sb_any_quota_suspended(sb)) {
1088 printk(KERN_ERR "EXT3-fs: Cannot change quota " 1103 printk(KERN_ERR "EXT3-fs: Cannot change quota "
1089 "options when quota turned on.\n"); 1104 "options when quota turned on.\n");
1090 return 0; 1105 return 0;
@@ -1169,14 +1184,14 @@ clear_qf_name:
1169 } 1184 }
1170 1185
1171 if (!sbi->s_jquota_fmt) { 1186 if (!sbi->s_jquota_fmt) {
1172 printk(KERN_ERR "EXT3-fs: journalled quota format " 1187 printk(KERN_ERR "EXT3-fs: journaled quota format "
1173 "not specified.\n"); 1188 "not specified.\n");
1174 return 0; 1189 return 0;
1175 } 1190 }
1176 } else { 1191 } else {
1177 if (sbi->s_jquota_fmt) { 1192 if (sbi->s_jquota_fmt) {
1178 printk(KERN_ERR "EXT3-fs: journalled quota format " 1193 printk(KERN_ERR "EXT3-fs: journaled quota format "
1179 "specified with no journalling " 1194 "specified with no journaling "
1180 "enabled.\n"); 1195 "enabled.\n");
1181 return 0; 1196 return 0;
1182 } 1197 }
@@ -1370,7 +1385,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
1370 int ret = ext3_quota_on_mount(sb, i); 1385 int ret = ext3_quota_on_mount(sb, i);
1371 if (ret < 0) 1386 if (ret < 0)
1372 printk(KERN_ERR 1387 printk(KERN_ERR
1373 "EXT3-fs: Cannot turn on journalled " 1388 "EXT3-fs: Cannot turn on journaled "
1374 "quota: error %d\n", ret); 1389 "quota: error %d\n", ret);
1375 } 1390 }
1376 } 1391 }
@@ -2712,7 +2727,7 @@ static int ext3_release_dquot(struct dquot *dquot)
2712 2727
2713static int ext3_mark_dquot_dirty(struct dquot *dquot) 2728static int ext3_mark_dquot_dirty(struct dquot *dquot)
2714{ 2729{
2715 /* Are we journalling quotas? */ 2730 /* Are we journaling quotas? */
2716 if (EXT3_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] || 2731 if (EXT3_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] ||
2717 EXT3_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) { 2732 EXT3_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) {
2718 dquot_mark_dquot_dirty(dquot); 2733 dquot_mark_dquot_dirty(dquot);
@@ -2759,23 +2774,42 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
2759 2774
2760 if (!test_opt(sb, QUOTA)) 2775 if (!test_opt(sb, QUOTA))
2761 return -EINVAL; 2776 return -EINVAL;
2762 /* Not journalling quota or remount? */ 2777 /* When remounting, no checks are needed and in fact, path is NULL */
2763 if ((!EXT3_SB(sb)->s_qf_names[USRQUOTA] && 2778 if (remount)
2764 !EXT3_SB(sb)->s_qf_names[GRPQUOTA]) || remount)
2765 return vfs_quota_on(sb, type, format_id, path, remount); 2779 return vfs_quota_on(sb, type, format_id, path, remount);
2780
2766 err = path_lookup(path, LOOKUP_FOLLOW, &nd); 2781 err = path_lookup(path, LOOKUP_FOLLOW, &nd);
2767 if (err) 2782 if (err)
2768 return err; 2783 return err;
2784
2769 /* Quotafile not on the same filesystem? */ 2785 /* Quotafile not on the same filesystem? */
2770 if (nd.path.mnt->mnt_sb != sb) { 2786 if (nd.path.mnt->mnt_sb != sb) {
2771 path_put(&nd.path); 2787 path_put(&nd.path);
2772 return -EXDEV; 2788 return -EXDEV;
2773 } 2789 }
2774 /* Quotafile not in fs root? */ 2790 /* Journaling quota? */
2775 if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode) 2791 if (EXT3_SB(sb)->s_qf_names[type]) {
2776 printk(KERN_WARNING 2792 /* Quotafile not of fs root? */
2777 "EXT3-fs: Quota file not on filesystem root. " 2793 if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
2778 "Journalled quota will not work.\n"); 2794 printk(KERN_WARNING
2795 "EXT3-fs: Quota file not on filesystem root. "
2796 "Journaled quota will not work.\n");
2797 }
2798
2799 /*
2800 * When we journal data on quota file, we have to flush journal to see
2801 * all updates to the file when we bypass pagecache...
2802 */
2803 if (ext3_should_journal_data(nd.path.dentry->d_inode)) {
2804 /*
2805 * We don't need to lock updates but journal_flush() could
2806 * otherwise be livelocked...
2807 */
2808 journal_lock_updates(EXT3_SB(sb)->s_journal);
2809 journal_flush(EXT3_SB(sb)->s_journal);
2810 journal_unlock_updates(EXT3_SB(sb)->s_journal);
2811 }
2812
2779 path_put(&nd.path); 2813 path_put(&nd.path);
2780 return vfs_quota_on(sb, type, format_id, path, remount); 2814 return vfs_quota_on(sb, type, format_id, path, remount);
2781} 2815}
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
index 821efaf2b94e..37b81097bdf2 100644
--- a/fs/ext3/xattr_security.c
+++ b/fs/ext3/xattr_security.c
@@ -15,7 +15,7 @@ static size_t
15ext3_xattr_security_list(struct inode *inode, char *list, size_t list_size, 15ext3_xattr_security_list(struct inode *inode, char *list, size_t list_size,
16 const char *name, size_t name_len) 16 const char *name, size_t name_len)
17{ 17{
18 const size_t prefix_len = sizeof(XATTR_SECURITY_PREFIX)-1; 18 const size_t prefix_len = XATTR_SECURITY_PREFIX_LEN;
19 const size_t total_len = prefix_len + name_len + 1; 19 const size_t total_len = prefix_len + name_len + 1;
20 20
21 21
diff --git a/fs/ext3/xattr_trusted.c b/fs/ext3/xattr_trusted.c
index 0327497a55ce..c7c41a410c4b 100644
--- a/fs/ext3/xattr_trusted.c
+++ b/fs/ext3/xattr_trusted.c
@@ -13,13 +13,11 @@
13#include <linux/ext3_fs.h> 13#include <linux/ext3_fs.h>
14#include "xattr.h" 14#include "xattr.h"
15 15
16#define XATTR_TRUSTED_PREFIX "trusted."
17
18static size_t 16static size_t
19ext3_xattr_trusted_list(struct inode *inode, char *list, size_t list_size, 17ext3_xattr_trusted_list(struct inode *inode, char *list, size_t list_size,
20 const char *name, size_t name_len) 18 const char *name, size_t name_len)
21{ 19{
22 const size_t prefix_len = sizeof(XATTR_TRUSTED_PREFIX)-1; 20 const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN;
23 const size_t total_len = prefix_len + name_len + 1; 21 const size_t total_len = prefix_len + name_len + 1;
24 22
25 if (!capable(CAP_SYS_ADMIN)) 23 if (!capable(CAP_SYS_ADMIN))
diff --git a/fs/ext3/xattr_user.c b/fs/ext3/xattr_user.c
index 1abd8f92c440..430fe63b31b3 100644
--- a/fs/ext3/xattr_user.c
+++ b/fs/ext3/xattr_user.c
@@ -12,13 +12,11 @@
12#include <linux/ext3_fs.h> 12#include <linux/ext3_fs.h>
13#include "xattr.h" 13#include "xattr.h"
14 14
15#define XATTR_USER_PREFIX "user."
16
17static size_t 15static size_t
18ext3_xattr_user_list(struct inode *inode, char *list, size_t list_size, 16ext3_xattr_user_list(struct inode *inode, char *list, size_t list_size,
19 const char *name, size_t name_len) 17 const char *name, size_t name_len)
20{ 18{
21 const size_t prefix_len = sizeof(XATTR_USER_PREFIX)-1; 19 const size_t prefix_len = XATTR_USER_PREFIX_LEN;
22 const size_t total_len = prefix_len + name_len + 1; 20 const size_t total_len = prefix_len + name_len + 1;
23 21
24 if (!test_opt(inode->i_sb, XATTR_USER)) 22 if (!test_opt(inode->i_sb, XATTR_USER))
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 34541d06e626..cd4a0162e10d 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -17,7 +17,6 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/time.h> 18#include <linux/time.h>
19#include <linux/msdos_fs.h> 19#include <linux/msdos_fs.h>
20#include <linux/dirent.h>
21#include <linux/smp_lock.h> 20#include <linux/smp_lock.h>
22#include <linux/buffer_head.h> 21#include <linux/buffer_head.h>
23#include <linux/compat.h> 22#include <linux/compat.h>
@@ -124,10 +123,11 @@ static inline int fat_get_entry(struct inode *dir, loff_t *pos,
124 * but ignore that right now. 123 * but ignore that right now.
125 * Ahem... Stack smashing in ring 0 isn't fun. Fixed. 124 * Ahem... Stack smashing in ring 0 isn't fun. Fixed.
126 */ 125 */
127static int uni16_to_x8(unsigned char *ascii, wchar_t *uni, int len, 126static int uni16_to_x8(unsigned char *ascii, const wchar_t *uni, int len,
128 int uni_xlate, struct nls_table *nls) 127 int uni_xlate, struct nls_table *nls)
129{ 128{
130 wchar_t *ip, ec; 129 const wchar_t *ip;
130 wchar_t ec;
131 unsigned char *op, nc; 131 unsigned char *op, nc;
132 int charlen; 132 int charlen;
133 int k; 133 int k;
@@ -167,6 +167,16 @@ static int uni16_to_x8(unsigned char *ascii, wchar_t *uni, int len,
167 return (op - ascii); 167 return (op - ascii);
168} 168}
169 169
170static inline int fat_uni_to_x8(struct msdos_sb_info *sbi, const wchar_t *uni,
171 unsigned char *buf, int size)
172{
173 if (sbi->options.utf8)
174 return utf8_wcstombs(buf, uni, size);
175 else
176 return uni16_to_x8(buf, uni, size, sbi->options.unicode_xlate,
177 sbi->nls_io);
178}
179
170static inline int 180static inline int
171fat_short2uni(struct nls_table *t, unsigned char *c, int clen, wchar_t *uni) 181fat_short2uni(struct nls_table *t, unsigned char *c, int clen, wchar_t *uni)
172{ 182{
@@ -227,6 +237,19 @@ fat_shortname2uni(struct nls_table *nls, unsigned char *buf, int buf_size,
227 return len; 237 return len;
228} 238}
229 239
240static inline int fat_name_match(struct msdos_sb_info *sbi,
241 const unsigned char *a, int a_len,
242 const unsigned char *b, int b_len)
243{
244 if (a_len != b_len)
245 return 0;
246
247 if (sbi->options.name_check != 's')
248 return !nls_strnicmp(sbi->nls_io, a, b, a_len);
249 else
250 return !memcmp(a, b, a_len);
251}
252
230enum { PARSE_INVALID = 1, PARSE_NOT_LONGNAME, PARSE_EOF, }; 253enum { PARSE_INVALID = 1, PARSE_NOT_LONGNAME, PARSE_EOF, };
231 254
232/** 255/**
@@ -302,6 +325,19 @@ parse_long:
302} 325}
303 326
304/* 327/*
328 * Maximum buffer size of short name.
329 * [(MSDOS_NAME + '.') * max one char + nul]
330 * For msdos style, ['.' (hidden) + MSDOS_NAME + '.' + nul]
331 */
332#define FAT_MAX_SHORT_SIZE ((MSDOS_NAME + 1) * NLS_MAX_CHARSET_SIZE + 1)
333/*
334 * Maximum buffer size of unicode chars from slots.
335 * [(max longname slots * 13 (size in a slot) + nul) * sizeof(wchar_t)]
336 */
337#define FAT_MAX_UNI_CHARS ((MSDOS_SLOTS - 1) * 13 + 1)
338#define FAT_MAX_UNI_SIZE (FAT_MAX_UNI_CHARS * sizeof(wchar_t))
339
340/*
305 * Return values: negative -> error, 0 -> not found, positive -> found, 341 * Return values: negative -> error, 0 -> not found, positive -> found,
306 * value is the total amount of slots, including the shortname entry. 342 * value is the total amount of slots, including the shortname entry.
307 */ 343 */
@@ -312,29 +348,20 @@ int fat_search_long(struct inode *inode, const unsigned char *name,
312 struct msdos_sb_info *sbi = MSDOS_SB(sb); 348 struct msdos_sb_info *sbi = MSDOS_SB(sb);
313 struct buffer_head *bh = NULL; 349 struct buffer_head *bh = NULL;
314 struct msdos_dir_entry *de; 350 struct msdos_dir_entry *de;
315 struct nls_table *nls_io = sbi->nls_io;
316 struct nls_table *nls_disk = sbi->nls_disk; 351 struct nls_table *nls_disk = sbi->nls_disk;
317 wchar_t bufuname[14];
318 unsigned char nr_slots; 352 unsigned char nr_slots;
319 int xlate_len; 353 wchar_t bufuname[14];
320 wchar_t *unicode = NULL; 354 wchar_t *unicode = NULL;
321 unsigned char work[MSDOS_NAME]; 355 unsigned char work[MSDOS_NAME];
322 unsigned char *bufname = NULL; 356 unsigned char bufname[FAT_MAX_SHORT_SIZE];
323 int uni_xlate = sbi->options.unicode_xlate;
324 int utf8 = sbi->options.utf8;
325 int anycase = (sbi->options.name_check != 's');
326 unsigned short opt_shortname = sbi->options.shortname; 357 unsigned short opt_shortname = sbi->options.shortname;
327 loff_t cpos = 0; 358 loff_t cpos = 0;
328 int chl, i, j, last_u, err; 359 int chl, i, j, last_u, err, len;
329
330 bufname = __getname();
331 if (!bufname)
332 return -ENOMEM;
333 360
334 err = -ENOENT; 361 err = -ENOENT;
335 while(1) { 362 while (1) {
336 if (fat_get_entry(inode, &cpos, &bh, &de) == -1) 363 if (fat_get_entry(inode, &cpos, &bh, &de) == -1)
337 goto EODir; 364 goto end_of_dir;
338parse_record: 365parse_record:
339 nr_slots = 0; 366 nr_slots = 0;
340 if (de->name[0] == DELETED_FLAG) 367 if (de->name[0] == DELETED_FLAG)
@@ -353,7 +380,7 @@ parse_record:
353 else if (status == PARSE_NOT_LONGNAME) 380 else if (status == PARSE_NOT_LONGNAME)
354 goto parse_record; 381 goto parse_record;
355 else if (status == PARSE_EOF) 382 else if (status == PARSE_EOF)
356 goto EODir; 383 goto end_of_dir;
357 } 384 }
358 385
359 memcpy(work, de->name, sizeof(de->name)); 386 memcpy(work, de->name, sizeof(de->name));
@@ -394,30 +421,24 @@ parse_record:
394 if (!last_u) 421 if (!last_u)
395 continue; 422 continue;
396 423
424 /* Compare shortname */
397 bufuname[last_u] = 0x0000; 425 bufuname[last_u] = 0x0000;
398 xlate_len = utf8 426 len = fat_uni_to_x8(sbi, bufuname, bufname, sizeof(bufname));
399 ?utf8_wcstombs(bufname, bufuname, PATH_MAX) 427 if (fat_name_match(sbi, name, name_len, bufname, len))
400 :uni16_to_x8(bufname, bufuname, PATH_MAX, uni_xlate, nls_io); 428 goto found;
401 if (xlate_len == name_len)
402 if ((!anycase && !memcmp(name, bufname, xlate_len)) ||
403 (anycase && !nls_strnicmp(nls_io, name, bufname,
404 xlate_len)))
405 goto Found;
406 429
407 if (nr_slots) { 430 if (nr_slots) {
408 xlate_len = utf8 431 void *longname = unicode + FAT_MAX_UNI_CHARS;
409 ?utf8_wcstombs(bufname, unicode, PATH_MAX) 432 int size = PATH_MAX - FAT_MAX_UNI_SIZE;
410 :uni16_to_x8(bufname, unicode, PATH_MAX, uni_xlate, nls_io); 433
411 if (xlate_len != name_len) 434 /* Compare longname */
412 continue; 435 len = fat_uni_to_x8(sbi, unicode, longname, size);
413 if ((!anycase && !memcmp(name, bufname, xlate_len)) || 436 if (fat_name_match(sbi, name, name_len, longname, len))
414 (anycase && !nls_strnicmp(nls_io, name, bufname, 437 goto found;
415 xlate_len)))
416 goto Found;
417 } 438 }
418 } 439 }
419 440
420Found: 441found:
421 nr_slots++; /* include the de */ 442 nr_slots++; /* include the de */
422 sinfo->slot_off = cpos - nr_slots * sizeof(*de); 443 sinfo->slot_off = cpos - nr_slots * sizeof(*de);
423 sinfo->nr_slots = nr_slots; 444 sinfo->nr_slots = nr_slots;
@@ -425,9 +446,7 @@ Found:
425 sinfo->bh = bh; 446 sinfo->bh = bh;
426 sinfo->i_pos = fat_make_i_pos(sb, sinfo->bh, sinfo->de); 447 sinfo->i_pos = fat_make_i_pos(sb, sinfo->bh, sinfo->de);
427 err = 0; 448 err = 0;
428EODir: 449end_of_dir:
429 if (bufname)
430 __putname(bufname);
431 if (unicode) 450 if (unicode)
432 __putname(unicode); 451 __putname(unicode);
433 452
@@ -453,23 +472,20 @@ static int __fat_readdir(struct inode *inode, struct file *filp, void *dirent,
453 struct msdos_sb_info *sbi = MSDOS_SB(sb); 472 struct msdos_sb_info *sbi = MSDOS_SB(sb);
454 struct buffer_head *bh; 473 struct buffer_head *bh;
455 struct msdos_dir_entry *de; 474 struct msdos_dir_entry *de;
456 struct nls_table *nls_io = sbi->nls_io;
457 struct nls_table *nls_disk = sbi->nls_disk; 475 struct nls_table *nls_disk = sbi->nls_disk;
458 unsigned char long_slots; 476 unsigned char nr_slots;
459 const char *fill_name;
460 int fill_len;
461 wchar_t bufuname[14]; 477 wchar_t bufuname[14];
462 wchar_t *unicode = NULL; 478 wchar_t *unicode = NULL;
463 unsigned char c, work[MSDOS_NAME], bufname[56], *ptname = bufname; 479 unsigned char c, work[MSDOS_NAME];
464 unsigned long lpos, dummy, *furrfu = &lpos; 480 unsigned char bufname[FAT_MAX_SHORT_SIZE], *ptname = bufname;
465 int uni_xlate = sbi->options.unicode_xlate; 481 unsigned short opt_shortname = sbi->options.shortname;
466 int isvfat = sbi->options.isvfat; 482 int isvfat = sbi->options.isvfat;
467 int utf8 = sbi->options.utf8;
468 int nocase = sbi->options.nocase; 483 int nocase = sbi->options.nocase;
469 unsigned short opt_shortname = sbi->options.shortname; 484 const char *fill_name = NULL;
470 unsigned long inum; 485 unsigned long inum;
471 int chi, chl, i, i2, j, last, last_u, dotoffset = 0; 486 unsigned long lpos, dummy, *furrfu = &lpos;
472 loff_t cpos; 487 loff_t cpos;
488 int chi, chl, i, i2, j, last, last_u, dotoffset = 0, fill_len = 0;
473 int ret = 0; 489 int ret = 0;
474 490
475 lock_super(sb); 491 lock_super(sb);
@@ -489,43 +505,58 @@ static int __fat_readdir(struct inode *inode, struct file *filp, void *dirent,
489 cpos = 0; 505 cpos = 0;
490 } 506 }
491 } 507 }
492 if (cpos & (sizeof(struct msdos_dir_entry)-1)) { 508 if (cpos & (sizeof(struct msdos_dir_entry) - 1)) {
493 ret = -ENOENT; 509 ret = -ENOENT;
494 goto out; 510 goto out;
495 } 511 }
496 512
497 bh = NULL; 513 bh = NULL;
498GetNew: 514get_new:
499 if (fat_get_entry(inode, &cpos, &bh, &de) == -1) 515 if (fat_get_entry(inode, &cpos, &bh, &de) == -1)
500 goto EODir; 516 goto end_of_dir;
501parse_record: 517parse_record:
502 long_slots = 0; 518 nr_slots = 0;
503 /* Check for long filename entry */ 519 /*
504 if (isvfat) { 520 * Check for long filename entry, but if short_only, we don't
521 * need to parse long filename.
522 */
523 if (isvfat && !short_only) {
505 if (de->name[0] == DELETED_FLAG) 524 if (de->name[0] == DELETED_FLAG)
506 goto RecEnd; 525 goto record_end;
507 if (de->attr != ATTR_EXT && (de->attr & ATTR_VOLUME)) 526 if (de->attr != ATTR_EXT && (de->attr & ATTR_VOLUME))
508 goto RecEnd; 527 goto record_end;
509 if (de->attr != ATTR_EXT && IS_FREE(de->name)) 528 if (de->attr != ATTR_EXT && IS_FREE(de->name))
510 goto RecEnd; 529 goto record_end;
511 } else { 530 } else {
512 if ((de->attr & ATTR_VOLUME) || IS_FREE(de->name)) 531 if ((de->attr & ATTR_VOLUME) || IS_FREE(de->name))
513 goto RecEnd; 532 goto record_end;
514 } 533 }
515 534
516 if (isvfat && de->attr == ATTR_EXT) { 535 if (isvfat && de->attr == ATTR_EXT) {
517 int status = fat_parse_long(inode, &cpos, &bh, &de, 536 int status = fat_parse_long(inode, &cpos, &bh, &de,
518 &unicode, &long_slots); 537 &unicode, &nr_slots);
519 if (status < 0) { 538 if (status < 0) {
520 filp->f_pos = cpos; 539 filp->f_pos = cpos;
521 ret = status; 540 ret = status;
522 goto out; 541 goto out;
523 } else if (status == PARSE_INVALID) 542 } else if (status == PARSE_INVALID)
524 goto RecEnd; 543 goto record_end;
525 else if (status == PARSE_NOT_LONGNAME) 544 else if (status == PARSE_NOT_LONGNAME)
526 goto parse_record; 545 goto parse_record;
527 else if (status == PARSE_EOF) 546 else if (status == PARSE_EOF)
528 goto EODir; 547 goto end_of_dir;
548
549 if (nr_slots) {
550 void *longname = unicode + FAT_MAX_UNI_CHARS;
551 int size = PATH_MAX - FAT_MAX_UNI_SIZE;
552 int len = fat_uni_to_x8(sbi, unicode, longname, size);
553
554 fill_name = longname;
555 fill_len = len;
556 /* !both && !short_only, so we don't need shortname. */
557 if (!both)
558 goto start_filldir;
559 }
529 } 560 }
530 561
531 if (sbi->options.dotsOK) { 562 if (sbi->options.dotsOK) {
@@ -587,12 +618,32 @@ parse_record:
587 } 618 }
588 } 619 }
589 if (!last) 620 if (!last)
590 goto RecEnd; 621 goto record_end;
591 622
592 i = last + dotoffset; 623 i = last + dotoffset;
593 j = last_u; 624 j = last_u;
594 625
595 lpos = cpos - (long_slots+1)*sizeof(struct msdos_dir_entry); 626 if (isvfat) {
627 bufuname[j] = 0x0000;
628 i = fat_uni_to_x8(sbi, bufuname, bufname, sizeof(bufname));
629 }
630 if (nr_slots) {
631 /* hack for fat_ioctl_filldir() */
632 struct fat_ioctl_filldir_callback *p = dirent;
633
634 p->longname = fill_name;
635 p->long_len = fill_len;
636 p->shortname = bufname;
637 p->short_len = i;
638 fill_name = NULL;
639 fill_len = 0;
640 } else {
641 fill_name = bufname;
642 fill_len = i;
643 }
644
645start_filldir:
646 lpos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry);
596 if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) 647 if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME))
597 inum = inode->i_ino; 648 inum = inode->i_ino;
598 else if (!memcmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) { 649 else if (!memcmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) {
@@ -607,49 +658,17 @@ parse_record:
607 inum = iunique(sb, MSDOS_ROOT_INO); 658 inum = iunique(sb, MSDOS_ROOT_INO);
608 } 659 }
609 660
610 if (isvfat) {
611 bufuname[j] = 0x0000;
612 i = utf8 ? utf8_wcstombs(bufname, bufuname, sizeof(bufname))
613 : uni16_to_x8(bufname, bufuname, sizeof(bufname), uni_xlate, nls_io);
614 }
615
616 fill_name = bufname;
617 fill_len = i;
618 if (!short_only && long_slots) {
619 /* convert the unicode long name. 261 is maximum size
620 * of unicode buffer. (13 * slots + nul) */
621 void *longname = unicode + 261;
622 int buf_size = PATH_MAX - (261 * sizeof(unicode[0]));
623 int long_len = utf8
624 ? utf8_wcstombs(longname, unicode, buf_size)
625 : uni16_to_x8(longname, unicode, buf_size, uni_xlate, nls_io);
626
627 if (!both) {
628 fill_name = longname;
629 fill_len = long_len;
630 } else {
631 /* hack for fat_ioctl_filldir() */
632 struct fat_ioctl_filldir_callback *p = dirent;
633
634 p->longname = longname;
635 p->long_len = long_len;
636 p->shortname = bufname;
637 p->short_len = i;
638 fill_name = NULL;
639 fill_len = 0;
640 }
641 }
642 if (filldir(dirent, fill_name, fill_len, *furrfu, inum, 661 if (filldir(dirent, fill_name, fill_len, *furrfu, inum,
643 (de->attr & ATTR_DIR) ? DT_DIR : DT_REG) < 0) 662 (de->attr & ATTR_DIR) ? DT_DIR : DT_REG) < 0)
644 goto FillFailed; 663 goto fill_failed;
645 664
646RecEnd: 665record_end:
647 furrfu = &lpos; 666 furrfu = &lpos;
648 filp->f_pos = cpos; 667 filp->f_pos = cpos;
649 goto GetNew; 668 goto get_new;
650EODir: 669end_of_dir:
651 filp->f_pos = cpos; 670 filp->f_pos = cpos;
652FillFailed: 671fill_failed:
653 brelse(bh); 672 brelse(bh);
654 if (unicode) 673 if (unicode)
655 __putname(unicode); 674 __putname(unicode);
@@ -715,7 +734,7 @@ efault: \
715 return -EFAULT; \ 734 return -EFAULT; \
716} 735}
717 736
718FAT_IOCTL_FILLDIR_FUNC(fat_ioctl_filldir, dirent) 737FAT_IOCTL_FILLDIR_FUNC(fat_ioctl_filldir, __fat_dirent)
719 738
720static int fat_ioctl_readdir(struct inode *inode, struct file *filp, 739static int fat_ioctl_readdir(struct inode *inode, struct file *filp,
721 void __user *dirent, filldir_t filldir, 740 void __user *dirent, filldir_t filldir,
@@ -741,7 +760,7 @@ static int fat_ioctl_readdir(struct inode *inode, struct file *filp,
741static int fat_dir_ioctl(struct inode *inode, struct file *filp, 760static int fat_dir_ioctl(struct inode *inode, struct file *filp,
742 unsigned int cmd, unsigned long arg) 761 unsigned int cmd, unsigned long arg)
743{ 762{
744 struct dirent __user *d1 = (struct dirent __user *)arg; 763 struct __fat_dirent __user *d1 = (struct __fat_dirent __user *)arg;
745 int short_only, both; 764 int short_only, both;
746 765
747 switch (cmd) { 766 switch (cmd) {
@@ -757,7 +776,7 @@ static int fat_dir_ioctl(struct inode *inode, struct file *filp,
757 return fat_generic_ioctl(inode, filp, cmd, arg); 776 return fat_generic_ioctl(inode, filp, cmd, arg);
758 } 777 }
759 778
760 if (!access_ok(VERIFY_WRITE, d1, sizeof(struct dirent[2]))) 779 if (!access_ok(VERIFY_WRITE, d1, sizeof(struct __fat_dirent[2])))
761 return -EFAULT; 780 return -EFAULT;
762 /* 781 /*
763 * Yes, we don't need this put_user() absolutely. However old 782 * Yes, we don't need this put_user() absolutely. However old
@@ -1082,7 +1101,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec *ts)
1082 goto error_free; 1101 goto error_free;
1083 } 1102 }
1084 1103
1085 fat_date_unix2dos(ts->tv_sec, &time, &date); 1104 fat_date_unix2dos(ts->tv_sec, &time, &date, sbi->options.tz_utc);
1086 1105
1087 de = (struct msdos_dir_entry *)bhs[0]->b_data; 1106 de = (struct msdos_dir_entry *)bhs[0]->b_data;
1088 /* filling the new directory slots ("." and ".." entries) */ 1107 /* filling the new directory slots ("." and ".." entries) */
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 46a4508ffd2e..23676f9d79ce 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -382,17 +382,20 @@ static int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
382 inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1)) 382 inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1))
383 & ~((loff_t)sbi->cluster_size - 1)) >> 9; 383 & ~((loff_t)sbi->cluster_size - 1)) >> 9;
384 inode->i_mtime.tv_sec = 384 inode->i_mtime.tv_sec =
385 date_dos2unix(le16_to_cpu(de->time), le16_to_cpu(de->date)); 385 date_dos2unix(le16_to_cpu(de->time), le16_to_cpu(de->date),
386 sbi->options.tz_utc);
386 inode->i_mtime.tv_nsec = 0; 387 inode->i_mtime.tv_nsec = 0;
387 if (sbi->options.isvfat) { 388 if (sbi->options.isvfat) {
388 int secs = de->ctime_cs / 100; 389 int secs = de->ctime_cs / 100;
389 int csecs = de->ctime_cs % 100; 390 int csecs = de->ctime_cs % 100;
390 inode->i_ctime.tv_sec = 391 inode->i_ctime.tv_sec =
391 date_dos2unix(le16_to_cpu(de->ctime), 392 date_dos2unix(le16_to_cpu(de->ctime),
392 le16_to_cpu(de->cdate)) + secs; 393 le16_to_cpu(de->cdate),
394 sbi->options.tz_utc) + secs;
393 inode->i_ctime.tv_nsec = csecs * 10000000; 395 inode->i_ctime.tv_nsec = csecs * 10000000;
394 inode->i_atime.tv_sec = 396 inode->i_atime.tv_sec =
395 date_dos2unix(0, le16_to_cpu(de->adate)); 397 date_dos2unix(0, le16_to_cpu(de->adate),
398 sbi->options.tz_utc);
396 inode->i_atime.tv_nsec = 0; 399 inode->i_atime.tv_nsec = 0;
397 } else 400 } else
398 inode->i_ctime = inode->i_atime = inode->i_mtime; 401 inode->i_ctime = inode->i_atime = inode->i_mtime;
@@ -591,11 +594,14 @@ retry:
591 raw_entry->attr = fat_attr(inode); 594 raw_entry->attr = fat_attr(inode);
592 raw_entry->start = cpu_to_le16(MSDOS_I(inode)->i_logstart); 595 raw_entry->start = cpu_to_le16(MSDOS_I(inode)->i_logstart);
593 raw_entry->starthi = cpu_to_le16(MSDOS_I(inode)->i_logstart >> 16); 596 raw_entry->starthi = cpu_to_le16(MSDOS_I(inode)->i_logstart >> 16);
594 fat_date_unix2dos(inode->i_mtime.tv_sec, &raw_entry->time, &raw_entry->date); 597 fat_date_unix2dos(inode->i_mtime.tv_sec, &raw_entry->time,
598 &raw_entry->date, sbi->options.tz_utc);
595 if (sbi->options.isvfat) { 599 if (sbi->options.isvfat) {
596 __le16 atime; 600 __le16 atime;
597 fat_date_unix2dos(inode->i_ctime.tv_sec,&raw_entry->ctime,&raw_entry->cdate); 601 fat_date_unix2dos(inode->i_ctime.tv_sec, &raw_entry->ctime,
598 fat_date_unix2dos(inode->i_atime.tv_sec,&atime,&raw_entry->adate); 602 &raw_entry->cdate, sbi->options.tz_utc);
603 fat_date_unix2dos(inode->i_atime.tv_sec, &atime,
604 &raw_entry->adate, sbi->options.tz_utc);
599 raw_entry->ctime_cs = (inode->i_ctime.tv_sec & 1) * 100 + 605 raw_entry->ctime_cs = (inode->i_ctime.tv_sec & 1) * 100 +
600 inode->i_ctime.tv_nsec / 10000000; 606 inode->i_ctime.tv_nsec / 10000000;
601 } 607 }
@@ -836,6 +842,8 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
836 } 842 }
837 if (sbi->options.flush) 843 if (sbi->options.flush)
838 seq_puts(m, ",flush"); 844 seq_puts(m, ",flush");
845 if (opts->tz_utc)
846 seq_puts(m, ",tz=UTC");
839 847
840 return 0; 848 return 0;
841} 849}
@@ -848,7 +856,7 @@ enum {
848 Opt_charset, Opt_shortname_lower, Opt_shortname_win95, 856 Opt_charset, Opt_shortname_lower, Opt_shortname_win95,
849 Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes, 857 Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
850 Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes, 858 Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
851 Opt_obsolate, Opt_flush, Opt_err, 859 Opt_obsolate, Opt_flush, Opt_tz_utc, Opt_err,
852}; 860};
853 861
854static match_table_t fat_tokens = { 862static match_table_t fat_tokens = {
@@ -883,6 +891,7 @@ static match_table_t fat_tokens = {
883 {Opt_obsolate, "cvf_options=%100s"}, 891 {Opt_obsolate, "cvf_options=%100s"},
884 {Opt_obsolate, "posix"}, 892 {Opt_obsolate, "posix"},
885 {Opt_flush, "flush"}, 893 {Opt_flush, "flush"},
894 {Opt_tz_utc, "tz=UTC"},
886 {Opt_err, NULL}, 895 {Opt_err, NULL},
887}; 896};
888static match_table_t msdos_tokens = { 897static match_table_t msdos_tokens = {
@@ -947,10 +956,11 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
947 opts->utf8 = opts->unicode_xlate = 0; 956 opts->utf8 = opts->unicode_xlate = 0;
948 opts->numtail = 1; 957 opts->numtail = 1;
949 opts->usefree = opts->nocase = 0; 958 opts->usefree = opts->nocase = 0;
959 opts->tz_utc = 0;
950 *debug = 0; 960 *debug = 0;
951 961
952 if (!options) 962 if (!options)
953 return 0; 963 goto out;
954 964
955 while ((p = strsep(&options, ",")) != NULL) { 965 while ((p = strsep(&options, ",")) != NULL) {
956 int token; 966 int token;
@@ -1036,6 +1046,9 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
1036 case Opt_flush: 1046 case Opt_flush:
1037 opts->flush = 1; 1047 opts->flush = 1;
1038 break; 1048 break;
1049 case Opt_tz_utc:
1050 opts->tz_utc = 1;
1051 break;
1039 1052
1040 /* msdos specific */ 1053 /* msdos specific */
1041 case Opt_dots: 1054 case Opt_dots:
@@ -1104,10 +1117,13 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
1104 return -EINVAL; 1117 return -EINVAL;
1105 } 1118 }
1106 } 1119 }
1120
1121out:
1107 /* UTF-8 doesn't provide FAT semantics */ 1122 /* UTF-8 doesn't provide FAT semantics */
1108 if (!strcmp(opts->iocharset, "utf8")) { 1123 if (!strcmp(opts->iocharset, "utf8")) {
1109 printk(KERN_ERR "FAT: utf8 is not a recommended IO charset" 1124 printk(KERN_ERR "FAT: utf8 is not a recommended IO charset"
1110 " for FAT filesystems, filesystem will be case sensitive!\n"); 1125 " for FAT filesystems, filesystem will be "
1126 "case sensitive!\n");
1111 } 1127 }
1112 1128
1113 /* If user doesn't specify allow_utime, it's initialized from dmask. */ 1129 /* If user doesn't specify allow_utime, it's initialized from dmask. */
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 61f23511eacf..79fb98ad36d4 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -142,7 +142,7 @@ static int day_n[] = {
142}; 142};
143 143
144/* Convert a MS-DOS time/date pair to a UNIX date (seconds since 1 1 70). */ 144/* Convert a MS-DOS time/date pair to a UNIX date (seconds since 1 1 70). */
145int date_dos2unix(unsigned short time, unsigned short date) 145int date_dos2unix(unsigned short time, unsigned short date, int tz_utc)
146{ 146{
147 int month, year, secs; 147 int month, year, secs;
148 148
@@ -156,16 +156,18 @@ int date_dos2unix(unsigned short time, unsigned short date)
156 ((date & 31)-1+day_n[month]+(year/4)+year*365-((year & 3) == 0 && 156 ((date & 31)-1+day_n[month]+(year/4)+year*365-((year & 3) == 0 &&
157 month < 2 ? 1 : 0)+3653); 157 month < 2 ? 1 : 0)+3653);
158 /* days since 1.1.70 plus 80's leap day */ 158 /* days since 1.1.70 plus 80's leap day */
159 secs += sys_tz.tz_minuteswest*60; 159 if (!tz_utc)
160 secs += sys_tz.tz_minuteswest*60;
160 return secs; 161 return secs;
161} 162}
162 163
163/* Convert linear UNIX date to a MS-DOS time/date pair. */ 164/* Convert linear UNIX date to a MS-DOS time/date pair. */
164void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date) 165void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date, int tz_utc)
165{ 166{
166 int day, year, nl_day, month; 167 int day, year, nl_day, month;
167 168
168 unix_date -= sys_tz.tz_minuteswest*60; 169 if (!tz_utc)
170 unix_date -= sys_tz.tz_minuteswest*60;
169 171
170 /* Jan 1 GMT 00:00:00 1980. But what about another time zone? */ 172 /* Jan 1 GMT 00:00:00 1980. But what about another time zone? */
171 if (unix_date < 315532800) 173 if (unix_date < 315532800)
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 330a7d782591..9679fcbdeaa0 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -125,13 +125,16 @@ static int dupfd(struct file *file, unsigned int start, int cloexec)
125 return fd; 125 return fd;
126} 126}
127 127
128asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd) 128asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
129{ 129{
130 int err = -EBADF; 130 int err = -EBADF;
131 struct file * file, *tofree; 131 struct file * file, *tofree;
132 struct files_struct * files = current->files; 132 struct files_struct * files = current->files;
133 struct fdtable *fdt; 133 struct fdtable *fdt;
134 134
135 if ((flags & ~O_CLOEXEC) != 0)
136 return -EINVAL;
137
135 spin_lock(&files->file_lock); 138 spin_lock(&files->file_lock);
136 if (!(file = fcheck(oldfd))) 139 if (!(file = fcheck(oldfd)))
137 goto out_unlock; 140 goto out_unlock;
@@ -163,7 +166,10 @@ asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
163 166
164 rcu_assign_pointer(fdt->fd[newfd], file); 167 rcu_assign_pointer(fdt->fd[newfd], file);
165 FD_SET(newfd, fdt->open_fds); 168 FD_SET(newfd, fdt->open_fds);
166 FD_CLR(newfd, fdt->close_on_exec); 169 if (flags & O_CLOEXEC)
170 FD_SET(newfd, fdt->close_on_exec);
171 else
172 FD_CLR(newfd, fdt->close_on_exec);
167 spin_unlock(&files->file_lock); 173 spin_unlock(&files->file_lock);
168 174
169 if (tofree) 175 if (tofree)
@@ -181,6 +187,11 @@ out_fput:
181 goto out; 187 goto out;
182} 188}
183 189
190asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
191{
192 return sys_dup3(oldfd, newfd, 0);
193}
194
184asmlinkage long sys_dup(unsigned int fildes) 195asmlinkage long sys_dup(unsigned int fildes)
185{ 196{
186 int ret = -EBADF; 197 int ret = -EBADF;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 2060bf06b906..51d0035ff07e 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -97,7 +97,7 @@ void fuse_invalidate_attr(struct inode *inode)
97 * timeout is unknown (unlink, rmdir, rename and in some cases 97 * timeout is unknown (unlink, rmdir, rename and in some cases
98 * lookup) 98 * lookup)
99 */ 99 */
100static void fuse_invalidate_entry_cache(struct dentry *entry) 100void fuse_invalidate_entry_cache(struct dentry *entry)
101{ 101{
102 fuse_dentry_settime(entry, 0); 102 fuse_dentry_settime(entry, 0);
103} 103}
@@ -112,18 +112,16 @@ static void fuse_invalidate_entry(struct dentry *entry)
112 fuse_invalidate_entry_cache(entry); 112 fuse_invalidate_entry_cache(entry);
113} 113}
114 114
115static void fuse_lookup_init(struct fuse_req *req, struct inode *dir, 115static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_req *req,
116 struct dentry *entry, 116 u64 nodeid, struct qstr *name,
117 struct fuse_entry_out *outarg) 117 struct fuse_entry_out *outarg)
118{ 118{
119 struct fuse_conn *fc = get_fuse_conn(dir);
120
121 memset(outarg, 0, sizeof(struct fuse_entry_out)); 119 memset(outarg, 0, sizeof(struct fuse_entry_out));
122 req->in.h.opcode = FUSE_LOOKUP; 120 req->in.h.opcode = FUSE_LOOKUP;
123 req->in.h.nodeid = get_node_id(dir); 121 req->in.h.nodeid = nodeid;
124 req->in.numargs = 1; 122 req->in.numargs = 1;
125 req->in.args[0].size = entry->d_name.len + 1; 123 req->in.args[0].size = name->len + 1;
126 req->in.args[0].value = entry->d_name.name; 124 req->in.args[0].value = name->name;
127 req->out.numargs = 1; 125 req->out.numargs = 1;
128 if (fc->minor < 9) 126 if (fc->minor < 9)
129 req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; 127 req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
@@ -189,7 +187,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
189 attr_version = fuse_get_attr_version(fc); 187 attr_version = fuse_get_attr_version(fc);
190 188
191 parent = dget_parent(entry); 189 parent = dget_parent(entry);
192 fuse_lookup_init(req, parent->d_inode, entry, &outarg); 190 fuse_lookup_init(fc, req, get_node_id(parent->d_inode),
191 &entry->d_name, &outarg);
193 request_send(fc, req); 192 request_send(fc, req);
194 dput(parent); 193 dput(parent);
195 err = req->out.h.error; 194 err = req->out.h.error;
@@ -225,7 +224,7 @@ static int invalid_nodeid(u64 nodeid)
225 return !nodeid || nodeid == FUSE_ROOT_ID; 224 return !nodeid || nodeid == FUSE_ROOT_ID;
226} 225}
227 226
228static struct dentry_operations fuse_dentry_operations = { 227struct dentry_operations fuse_dentry_operations = {
229 .d_revalidate = fuse_dentry_revalidate, 228 .d_revalidate = fuse_dentry_revalidate,
230}; 229};
231 230
@@ -239,85 +238,127 @@ int fuse_valid_type(int m)
239 * Add a directory inode to a dentry, ensuring that no other dentry 238 * Add a directory inode to a dentry, ensuring that no other dentry
240 * refers to this inode. Called with fc->inst_mutex. 239 * refers to this inode. Called with fc->inst_mutex.
241 */ 240 */
242static int fuse_d_add_directory(struct dentry *entry, struct inode *inode) 241static struct dentry *fuse_d_add_directory(struct dentry *entry,
242 struct inode *inode)
243{ 243{
244 struct dentry *alias = d_find_alias(inode); 244 struct dentry *alias = d_find_alias(inode);
245 if (alias) { 245 if (alias && !(alias->d_flags & DCACHE_DISCONNECTED)) {
246 /* This tries to shrink the subtree below alias */ 246 /* This tries to shrink the subtree below alias */
247 fuse_invalidate_entry(alias); 247 fuse_invalidate_entry(alias);
248 dput(alias); 248 dput(alias);
249 if (!list_empty(&inode->i_dentry)) 249 if (!list_empty(&inode->i_dentry))
250 return -EBUSY; 250 return ERR_PTR(-EBUSY);
251 } else {
252 dput(alias);
251 } 253 }
252 d_add(entry, inode); 254 return d_splice_alias(inode, entry);
253 return 0;
254} 255}
255 256
256static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, 257int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
257 struct nameidata *nd) 258 struct fuse_entry_out *outarg, struct inode **inode)
258{ 259{
259 int err; 260 struct fuse_conn *fc = get_fuse_conn_super(sb);
260 struct fuse_entry_out outarg;
261 struct inode *inode = NULL;
262 struct fuse_conn *fc = get_fuse_conn(dir);
263 struct fuse_req *req; 261 struct fuse_req *req;
264 struct fuse_req *forget_req; 262 struct fuse_req *forget_req;
265 u64 attr_version; 263 u64 attr_version;
264 int err;
266 265
267 if (entry->d_name.len > FUSE_NAME_MAX) 266 *inode = NULL;
268 return ERR_PTR(-ENAMETOOLONG); 267 err = -ENAMETOOLONG;
268 if (name->len > FUSE_NAME_MAX)
269 goto out;
269 270
270 req = fuse_get_req(fc); 271 req = fuse_get_req(fc);
272 err = PTR_ERR(req);
271 if (IS_ERR(req)) 273 if (IS_ERR(req))
272 return ERR_CAST(req); 274 goto out;
273 275
274 forget_req = fuse_get_req(fc); 276 forget_req = fuse_get_req(fc);
277 err = PTR_ERR(forget_req);
275 if (IS_ERR(forget_req)) { 278 if (IS_ERR(forget_req)) {
276 fuse_put_request(fc, req); 279 fuse_put_request(fc, req);
277 return ERR_CAST(forget_req); 280 goto out;
278 } 281 }
279 282
280 attr_version = fuse_get_attr_version(fc); 283 attr_version = fuse_get_attr_version(fc);
281 284
282 fuse_lookup_init(req, dir, entry, &outarg); 285 fuse_lookup_init(fc, req, nodeid, name, outarg);
283 request_send(fc, req); 286 request_send(fc, req);
284 err = req->out.h.error; 287 err = req->out.h.error;
285 fuse_put_request(fc, req); 288 fuse_put_request(fc, req);
286 /* Zero nodeid is same as -ENOENT, but with valid timeout */ 289 /* Zero nodeid is same as -ENOENT, but with valid timeout */
287 if (!err && outarg.nodeid && 290 if (err || !outarg->nodeid)
288 (invalid_nodeid(outarg.nodeid) || 291 goto out_put_forget;
289 !fuse_valid_type(outarg.attr.mode))) 292
290 err = -EIO; 293 err = -EIO;
291 if (!err && outarg.nodeid) { 294 if (!outarg->nodeid)
292 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, 295 goto out_put_forget;
293 &outarg.attr, entry_attr_timeout(&outarg), 296 if (!fuse_valid_type(outarg->attr.mode))
294 attr_version); 297 goto out_put_forget;
295 if (!inode) { 298
296 fuse_send_forget(fc, forget_req, outarg.nodeid, 1); 299 *inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
297 return ERR_PTR(-ENOMEM); 300 &outarg->attr, entry_attr_timeout(outarg),
298 } 301 attr_version);
302 err = -ENOMEM;
303 if (!*inode) {
304 fuse_send_forget(fc, forget_req, outarg->nodeid, 1);
305 goto out;
299 } 306 }
307 err = 0;
308
309 out_put_forget:
300 fuse_put_request(fc, forget_req); 310 fuse_put_request(fc, forget_req);
301 if (err && err != -ENOENT) 311 out:
302 return ERR_PTR(err); 312 return err;
313}
314
315static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
316 struct nameidata *nd)
317{
318 int err;
319 struct fuse_entry_out outarg;
320 struct inode *inode;
321 struct dentry *newent;
322 struct fuse_conn *fc = get_fuse_conn(dir);
323 bool outarg_valid = true;
324
325 err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
326 &outarg, &inode);
327 if (err == -ENOENT) {
328 outarg_valid = false;
329 err = 0;
330 }
331 if (err)
332 goto out_err;
333
334 err = -EIO;
335 if (inode && get_node_id(inode) == FUSE_ROOT_ID)
336 goto out_iput;
303 337
304 if (inode && S_ISDIR(inode->i_mode)) { 338 if (inode && S_ISDIR(inode->i_mode)) {
305 mutex_lock(&fc->inst_mutex); 339 mutex_lock(&fc->inst_mutex);
306 err = fuse_d_add_directory(entry, inode); 340 newent = fuse_d_add_directory(entry, inode);
307 mutex_unlock(&fc->inst_mutex); 341 mutex_unlock(&fc->inst_mutex);
308 if (err) { 342 err = PTR_ERR(newent);
309 iput(inode); 343 if (IS_ERR(newent))
310 return ERR_PTR(err); 344 goto out_iput;
311 } 345 } else {
312 } else 346 newent = d_splice_alias(inode, entry);
313 d_add(entry, inode); 347 }
314 348
349 entry = newent ? newent : entry;
315 entry->d_op = &fuse_dentry_operations; 350 entry->d_op = &fuse_dentry_operations;
316 if (!err) 351 if (outarg_valid)
317 fuse_change_entry_timeout(entry, &outarg); 352 fuse_change_entry_timeout(entry, &outarg);
318 else 353 else
319 fuse_invalidate_entry_cache(entry); 354 fuse_invalidate_entry_cache(entry);
320 return NULL; 355
356 return newent;
357
358 out_iput:
359 iput(inode);
360 out_err:
361 return ERR_PTR(err);
321} 362}
322 363
323/* 364/*
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 8092f0d9fd1f..67ff2c6a8f63 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1341,6 +1341,11 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
1341 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0; 1341 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
1342 int err; 1342 int err;
1343 1343
1344 if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
1345 /* NLM needs asynchronous locks, which we don't support yet */
1346 return -ENOLCK;
1347 }
1348
1344 /* Unlock on close is handled by the flush method */ 1349 /* Unlock on close is handled by the flush method */
1345 if (fl->fl_flags & FL_CLOSE) 1350 if (fl->fl_flags & FL_CLOSE)
1346 return 0; 1351 return 0;
@@ -1365,7 +1370,9 @@ static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
1365 struct fuse_conn *fc = get_fuse_conn(inode); 1370 struct fuse_conn *fc = get_fuse_conn(inode);
1366 int err; 1371 int err;
1367 1372
1368 if (cmd == F_GETLK) { 1373 if (cmd == F_CANCELLK) {
1374 err = 0;
1375 } else if (cmd == F_GETLK) {
1369 if (fc->no_lock) { 1376 if (fc->no_lock) {
1370 posix_test_lock(file, fl); 1377 posix_test_lock(file, fl);
1371 err = 0; 1378 err = 0;
@@ -1373,7 +1380,7 @@ static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
1373 err = fuse_getlk(file, fl); 1380 err = fuse_getlk(file, fl);
1374 } else { 1381 } else {
1375 if (fc->no_lock) 1382 if (fc->no_lock)
1376 err = posix_lock_file_wait(file, fl); 1383 err = posix_lock_file(file, fl, NULL);
1377 else 1384 else
1378 err = fuse_setlk(file, fl, 0); 1385 err = fuse_setlk(file, fl, 0);
1379 } 1386 }
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index bae948657c4f..3a876076bdd1 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -363,6 +363,9 @@ struct fuse_conn {
363 /** Do not send separate SETATTR request before open(O_TRUNC) */ 363 /** Do not send separate SETATTR request before open(O_TRUNC) */
364 unsigned atomic_o_trunc : 1; 364 unsigned atomic_o_trunc : 1;
365 365
366 /** Filesystem supports NFS exporting. Only set in INIT */
367 unsigned export_support : 1;
368
366 /* 369 /*
367 * The following bitfields are only for optimization purposes 370 * The following bitfields are only for optimization purposes
368 * and hence races in setting them will not cause malfunction 371 * and hence races in setting them will not cause malfunction
@@ -464,6 +467,8 @@ static inline u64 get_node_id(struct inode *inode)
464/** Device operations */ 467/** Device operations */
465extern const struct file_operations fuse_dev_operations; 468extern const struct file_operations fuse_dev_operations;
466 469
470extern struct dentry_operations fuse_dentry_operations;
471
467/** 472/**
468 * Get a filled in inode 473 * Get a filled in inode
469 */ 474 */
@@ -471,6 +476,9 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
471 int generation, struct fuse_attr *attr, 476 int generation, struct fuse_attr *attr,
472 u64 attr_valid, u64 attr_version); 477 u64 attr_valid, u64 attr_version);
473 478
479int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
480 struct fuse_entry_out *outarg, struct inode **inode);
481
474/** 482/**
475 * Send FORGET command 483 * Send FORGET command
476 */ 484 */
@@ -604,6 +612,8 @@ void fuse_abort_conn(struct fuse_conn *fc);
604 */ 612 */
605void fuse_invalidate_attr(struct inode *inode); 613void fuse_invalidate_attr(struct inode *inode);
606 614
615void fuse_invalidate_entry_cache(struct dentry *entry);
616
607/** 617/**
608 * Acquire reference to fuse_conn 618 * Acquire reference to fuse_conn
609 */ 619 */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 3141690558c8..7d2f7d6e22e2 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -18,6 +18,7 @@
18#include <linux/statfs.h> 18#include <linux/statfs.h>
19#include <linux/random.h> 19#include <linux/random.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/exportfs.h>
21 22
22MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); 23MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
23MODULE_DESCRIPTION("Filesystem in Userspace"); 24MODULE_DESCRIPTION("Filesystem in Userspace");
@@ -552,6 +553,174 @@ static struct inode *get_root_inode(struct super_block *sb, unsigned mode)
552 return fuse_iget(sb, 1, 0, &attr, 0, 0); 553 return fuse_iget(sb, 1, 0, &attr, 0, 0);
553} 554}
554 555
556struct fuse_inode_handle
557{
558 u64 nodeid;
559 u32 generation;
560};
561
562static struct dentry *fuse_get_dentry(struct super_block *sb,
563 struct fuse_inode_handle *handle)
564{
565 struct fuse_conn *fc = get_fuse_conn_super(sb);
566 struct inode *inode;
567 struct dentry *entry;
568 int err = -ESTALE;
569
570 if (handle->nodeid == 0)
571 goto out_err;
572
573 inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid);
574 if (!inode) {
575 struct fuse_entry_out outarg;
576 struct qstr name;
577
578 if (!fc->export_support)
579 goto out_err;
580
581 name.len = 1;
582 name.name = ".";
583 err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg,
584 &inode);
585 if (err && err != -ENOENT)
586 goto out_err;
587 if (err || !inode) {
588 err = -ESTALE;
589 goto out_err;
590 }
591 err = -EIO;
592 if (get_node_id(inode) != handle->nodeid)
593 goto out_iput;
594 }
595 err = -ESTALE;
596 if (inode->i_generation != handle->generation)
597 goto out_iput;
598
599 entry = d_alloc_anon(inode);
600 err = -ENOMEM;
601 if (!entry)
602 goto out_iput;
603
604 if (get_node_id(inode) != FUSE_ROOT_ID) {
605 entry->d_op = &fuse_dentry_operations;
606 fuse_invalidate_entry_cache(entry);
607 }
608
609 return entry;
610
611 out_iput:
612 iput(inode);
613 out_err:
614 return ERR_PTR(err);
615}
616
617static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
618 int connectable)
619{
620 struct inode *inode = dentry->d_inode;
621 bool encode_parent = connectable && !S_ISDIR(inode->i_mode);
622 int len = encode_parent ? 6 : 3;
623 u64 nodeid;
624 u32 generation;
625
626 if (*max_len < len)
627 return 255;
628
629 nodeid = get_fuse_inode(inode)->nodeid;
630 generation = inode->i_generation;
631
632 fh[0] = (u32)(nodeid >> 32);
633 fh[1] = (u32)(nodeid & 0xffffffff);
634 fh[2] = generation;
635
636 if (encode_parent) {
637 struct inode *parent;
638
639 spin_lock(&dentry->d_lock);
640 parent = dentry->d_parent->d_inode;
641 nodeid = get_fuse_inode(parent)->nodeid;
642 generation = parent->i_generation;
643 spin_unlock(&dentry->d_lock);
644
645 fh[3] = (u32)(nodeid >> 32);
646 fh[4] = (u32)(nodeid & 0xffffffff);
647 fh[5] = generation;
648 }
649
650 *max_len = len;
651 return encode_parent ? 0x82 : 0x81;
652}
653
654static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
655 struct fid *fid, int fh_len, int fh_type)
656{
657 struct fuse_inode_handle handle;
658
659 if ((fh_type != 0x81 && fh_type != 0x82) || fh_len < 3)
660 return NULL;
661
662 handle.nodeid = (u64) fid->raw[0] << 32;
663 handle.nodeid |= (u64) fid->raw[1];
664 handle.generation = fid->raw[2];
665 return fuse_get_dentry(sb, &handle);
666}
667
668static struct dentry *fuse_fh_to_parent(struct super_block *sb,
669 struct fid *fid, int fh_len, int fh_type)
670{
671 struct fuse_inode_handle parent;
672
673 if (fh_type != 0x82 || fh_len < 6)
674 return NULL;
675
676 parent.nodeid = (u64) fid->raw[3] << 32;
677 parent.nodeid |= (u64) fid->raw[4];
678 parent.generation = fid->raw[5];
679 return fuse_get_dentry(sb, &parent);
680}
681
682static struct dentry *fuse_get_parent(struct dentry *child)
683{
684 struct inode *child_inode = child->d_inode;
685 struct fuse_conn *fc = get_fuse_conn(child_inode);
686 struct inode *inode;
687 struct dentry *parent;
688 struct fuse_entry_out outarg;
689 struct qstr name;
690 int err;
691
692 if (!fc->export_support)
693 return ERR_PTR(-ESTALE);
694
695 name.len = 2;
696 name.name = "..";
697 err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode),
698 &name, &outarg, &inode);
699 if (err && err != -ENOENT)
700 return ERR_PTR(err);
701 if (err || !inode)
702 return ERR_PTR(-ESTALE);
703
704 parent = d_alloc_anon(inode);
705 if (!parent) {
706 iput(inode);
707 return ERR_PTR(-ENOMEM);
708 }
709 if (get_node_id(inode) != FUSE_ROOT_ID) {
710 parent->d_op = &fuse_dentry_operations;
711 fuse_invalidate_entry_cache(parent);
712 }
713
714 return parent;
715}
716
717static const struct export_operations fuse_export_operations = {
718 .fh_to_dentry = fuse_fh_to_dentry,
719 .fh_to_parent = fuse_fh_to_parent,
720 .encode_fh = fuse_encode_fh,
721 .get_parent = fuse_get_parent,
722};
723
555static const struct super_operations fuse_super_operations = { 724static const struct super_operations fuse_super_operations = {
556 .alloc_inode = fuse_alloc_inode, 725 .alloc_inode = fuse_alloc_inode,
557 .destroy_inode = fuse_destroy_inode, 726 .destroy_inode = fuse_destroy_inode,
@@ -581,6 +750,11 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
581 fc->no_lock = 1; 750 fc->no_lock = 1;
582 if (arg->flags & FUSE_ATOMIC_O_TRUNC) 751 if (arg->flags & FUSE_ATOMIC_O_TRUNC)
583 fc->atomic_o_trunc = 1; 752 fc->atomic_o_trunc = 1;
753 if (arg->minor >= 9) {
754 /* LOOKUP has dependency on proto version */
755 if (arg->flags & FUSE_EXPORT_SUPPORT)
756 fc->export_support = 1;
757 }
584 if (arg->flags & FUSE_BIG_WRITES) 758 if (arg->flags & FUSE_BIG_WRITES)
585 fc->big_writes = 1; 759 fc->big_writes = 1;
586 } else { 760 } else {
@@ -607,7 +781,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
607 arg->minor = FUSE_KERNEL_MINOR_VERSION; 781 arg->minor = FUSE_KERNEL_MINOR_VERSION;
608 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; 782 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
609 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | 783 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
610 FUSE_BIG_WRITES; 784 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES;
611 req->in.h.opcode = FUSE_INIT; 785 req->in.h.opcode = FUSE_INIT;
612 req->in.numargs = 1; 786 req->in.numargs = 1;
613 req->in.args[0].size = sizeof(*arg); 787 req->in.args[0].size = sizeof(*arg);
@@ -652,6 +826,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
652 sb->s_magic = FUSE_SUPER_MAGIC; 826 sb->s_magic = FUSE_SUPER_MAGIC;
653 sb->s_op = &fuse_super_operations; 827 sb->s_op = &fuse_super_operations;
654 sb->s_maxbytes = MAX_LFS_FILESIZE; 828 sb->s_maxbytes = MAX_LFS_FILESIZE;
829 sb->s_export_op = &fuse_export_operations;
655 830
656 file = fget(d.fd); 831 file = fget(d.fd);
657 if (!file) 832 if (!file)
diff --git a/fs/hfs/bitmap.c b/fs/hfs/bitmap.c
index 24e75798ddf0..c6e97366e8ac 100644
--- a/fs/hfs/bitmap.c
+++ b/fs/hfs/bitmap.c
@@ -145,7 +145,7 @@ u32 hfs_vbm_search_free(struct super_block *sb, u32 goal, u32 *num_bits)
145 if (!*num_bits) 145 if (!*num_bits)
146 return 0; 146 return 0;
147 147
148 down(&HFS_SB(sb)->bitmap_lock); 148 mutex_lock(&HFS_SB(sb)->bitmap_lock);
149 bitmap = HFS_SB(sb)->bitmap; 149 bitmap = HFS_SB(sb)->bitmap;
150 150
151 pos = hfs_find_set_zero_bits(bitmap, HFS_SB(sb)->fs_ablocks, goal, num_bits); 151 pos = hfs_find_set_zero_bits(bitmap, HFS_SB(sb)->fs_ablocks, goal, num_bits);
@@ -162,7 +162,7 @@ u32 hfs_vbm_search_free(struct super_block *sb, u32 goal, u32 *num_bits)
162 HFS_SB(sb)->free_ablocks -= *num_bits; 162 HFS_SB(sb)->free_ablocks -= *num_bits;
163 hfs_bitmap_dirty(sb); 163 hfs_bitmap_dirty(sb);
164out: 164out:
165 up(&HFS_SB(sb)->bitmap_lock); 165 mutex_unlock(&HFS_SB(sb)->bitmap_lock);
166 return pos; 166 return pos;
167} 167}
168 168
@@ -205,7 +205,7 @@ int hfs_clear_vbm_bits(struct super_block *sb, u16 start, u16 count)
205 if ((start + count) > HFS_SB(sb)->fs_ablocks) 205 if ((start + count) > HFS_SB(sb)->fs_ablocks)
206 return -2; 206 return -2;
207 207
208 down(&HFS_SB(sb)->bitmap_lock); 208 mutex_lock(&HFS_SB(sb)->bitmap_lock);
209 /* bitmap is always on a 32-bit boundary */ 209 /* bitmap is always on a 32-bit boundary */
210 curr = HFS_SB(sb)->bitmap + (start / 32); 210 curr = HFS_SB(sb)->bitmap + (start / 32);
211 len = count; 211 len = count;
@@ -236,7 +236,7 @@ int hfs_clear_vbm_bits(struct super_block *sb, u16 start, u16 count)
236 } 236 }
237out: 237out:
238 HFS_SB(sb)->free_ablocks += len; 238 HFS_SB(sb)->free_ablocks += len;
239 up(&HFS_SB(sb)->bitmap_lock); 239 mutex_unlock(&HFS_SB(sb)->bitmap_lock);
240 hfs_bitmap_dirty(sb); 240 hfs_bitmap_dirty(sb);
241 241
242 return 0; 242 return 0;
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index f6621a785202..9b9d6395bad3 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -40,7 +40,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
40 { 40 {
41 struct hfs_mdb *mdb = HFS_SB(sb)->mdb; 41 struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
42 HFS_I(tree->inode)->flags = 0; 42 HFS_I(tree->inode)->flags = 0;
43 init_MUTEX(&HFS_I(tree->inode)->extents_lock); 43 mutex_init(&HFS_I(tree->inode)->extents_lock);
44 switch (id) { 44 switch (id) {
45 case HFS_EXT_CNID: 45 case HFS_EXT_CNID:
46 hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize, 46 hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
index c176f67ba0a5..2c16316d2917 100644
--- a/fs/hfs/extent.c
+++ b/fs/hfs/extent.c
@@ -343,16 +343,16 @@ int hfs_get_block(struct inode *inode, sector_t block,
343 goto done; 343 goto done;
344 } 344 }
345 345
346 down(&HFS_I(inode)->extents_lock); 346 mutex_lock(&HFS_I(inode)->extents_lock);
347 res = hfs_ext_read_extent(inode, ablock); 347 res = hfs_ext_read_extent(inode, ablock);
348 if (!res) 348 if (!res)
349 dblock = hfs_ext_find_block(HFS_I(inode)->cached_extents, 349 dblock = hfs_ext_find_block(HFS_I(inode)->cached_extents,
350 ablock - HFS_I(inode)->cached_start); 350 ablock - HFS_I(inode)->cached_start);
351 else { 351 else {
352 up(&HFS_I(inode)->extents_lock); 352 mutex_unlock(&HFS_I(inode)->extents_lock);
353 return -EIO; 353 return -EIO;
354 } 354 }
355 up(&HFS_I(inode)->extents_lock); 355 mutex_unlock(&HFS_I(inode)->extents_lock);
356 356
357done: 357done:
358 map_bh(bh_result, sb, HFS_SB(sb)->fs_start + 358 map_bh(bh_result, sb, HFS_SB(sb)->fs_start +
@@ -375,7 +375,7 @@ int hfs_extend_file(struct inode *inode)
375 u32 start, len, goal; 375 u32 start, len, goal;
376 int res; 376 int res;
377 377
378 down(&HFS_I(inode)->extents_lock); 378 mutex_lock(&HFS_I(inode)->extents_lock);
379 if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) 379 if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks)
380 goal = hfs_ext_lastblock(HFS_I(inode)->first_extents); 380 goal = hfs_ext_lastblock(HFS_I(inode)->first_extents);
381 else { 381 else {
@@ -425,7 +425,7 @@ int hfs_extend_file(struct inode *inode)
425 goto insert_extent; 425 goto insert_extent;
426 } 426 }
427out: 427out:
428 up(&HFS_I(inode)->extents_lock); 428 mutex_unlock(&HFS_I(inode)->extents_lock);
429 if (!res) { 429 if (!res) {
430 HFS_I(inode)->alloc_blocks += len; 430 HFS_I(inode)->alloc_blocks += len;
431 mark_inode_dirty(inode); 431 mark_inode_dirty(inode);
@@ -487,7 +487,7 @@ void hfs_file_truncate(struct inode *inode)
487 if (blk_cnt == alloc_cnt) 487 if (blk_cnt == alloc_cnt)
488 goto out; 488 goto out;
489 489
490 down(&HFS_I(inode)->extents_lock); 490 mutex_lock(&HFS_I(inode)->extents_lock);
491 hfs_find_init(HFS_SB(sb)->ext_tree, &fd); 491 hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
492 while (1) { 492 while (1) {
493 if (alloc_cnt == HFS_I(inode)->first_blocks) { 493 if (alloc_cnt == HFS_I(inode)->first_blocks) {
@@ -514,7 +514,7 @@ void hfs_file_truncate(struct inode *inode)
514 hfs_brec_remove(&fd); 514 hfs_brec_remove(&fd);
515 } 515 }
516 hfs_find_exit(&fd); 516 hfs_find_exit(&fd);
517 up(&HFS_I(inode)->extents_lock); 517 mutex_unlock(&HFS_I(inode)->extents_lock);
518 518
519 HFS_I(inode)->alloc_blocks = blk_cnt; 519 HFS_I(inode)->alloc_blocks = blk_cnt;
520out: 520out:
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 147374b6f675..9955232fdf8c 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/mutex.h>
14#include <linux/buffer_head.h> 15#include <linux/buffer_head.h>
15#include <linux/fs.h> 16#include <linux/fs.h>
16 17
@@ -53,7 +54,7 @@ struct hfs_inode_info {
53 struct list_head open_dir_list; 54 struct list_head open_dir_list;
54 struct inode *rsrc_inode; 55 struct inode *rsrc_inode;
55 56
56 struct semaphore extents_lock; 57 struct mutex extents_lock;
57 58
58 u16 alloc_blocks, clump_blocks; 59 u16 alloc_blocks, clump_blocks;
59 sector_t fs_blocks; 60 sector_t fs_blocks;
@@ -139,7 +140,7 @@ struct hfs_sb_info {
139 140
140 struct nls_table *nls_io, *nls_disk; 141 struct nls_table *nls_io, *nls_disk;
141 142
142 struct semaphore bitmap_lock; 143 struct mutex bitmap_lock;
143 144
144 unsigned long flags; 145 unsigned long flags;
145 146
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 97f8446c4ff4..dc4ec640e875 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -150,7 +150,7 @@ struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, int mode)
150 if (!inode) 150 if (!inode)
151 return NULL; 151 return NULL;
152 152
153 init_MUTEX(&HFS_I(inode)->extents_lock); 153 mutex_init(&HFS_I(inode)->extents_lock);
154 INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list); 154 INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
155 hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name); 155 hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name);
156 inode->i_ino = HFS_SB(sb)->next_id++; 156 inode->i_ino = HFS_SB(sb)->next_id++;
@@ -281,7 +281,7 @@ static int hfs_read_inode(struct inode *inode, void *data)
281 281
282 HFS_I(inode)->flags = 0; 282 HFS_I(inode)->flags = 0;
283 HFS_I(inode)->rsrc_inode = NULL; 283 HFS_I(inode)->rsrc_inode = NULL;
284 init_MUTEX(&HFS_I(inode)->extents_lock); 284 mutex_init(&HFS_I(inode)->extents_lock);
285 INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list); 285 INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
286 286
287 /* Initialize the inode */ 287 /* Initialize the inode */
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 8cf67974adf6..ac2ec5ef66e4 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -372,7 +372,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
372 372
373 sb->s_op = &hfs_super_operations; 373 sb->s_op = &hfs_super_operations;
374 sb->s_flags |= MS_NODIRATIME; 374 sb->s_flags |= MS_NODIRATIME;
375 init_MUTEX(&sbi->bitmap_lock); 375 mutex_init(&sbi->bitmap_lock);
376 376
377 res = hfs_mdb_get(sb); 377 res = hfs_mdb_get(sb);
378 if (res) { 378 if (res) {
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index 12e899cd7886..fec8f61227ff 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -199,16 +199,16 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
199 goto done; 199 goto done;
200 } 200 }
201 201
202 down(&HFSPLUS_I(inode).extents_lock); 202 mutex_lock(&HFSPLUS_I(inode).extents_lock);
203 res = hfsplus_ext_read_extent(inode, ablock); 203 res = hfsplus_ext_read_extent(inode, ablock);
204 if (!res) { 204 if (!res) {
205 dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).cached_extents, ablock - 205 dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).cached_extents, ablock -
206 HFSPLUS_I(inode).cached_start); 206 HFSPLUS_I(inode).cached_start);
207 } else { 207 } else {
208 up(&HFSPLUS_I(inode).extents_lock); 208 mutex_unlock(&HFSPLUS_I(inode).extents_lock);
209 return -EIO; 209 return -EIO;
210 } 210 }
211 up(&HFSPLUS_I(inode).extents_lock); 211 mutex_unlock(&HFSPLUS_I(inode).extents_lock);
212 212
213done: 213done:
214 dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock); 214 dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock);
@@ -355,7 +355,7 @@ int hfsplus_file_extend(struct inode *inode)
355 return -ENOSPC; 355 return -ENOSPC;
356 } 356 }
357 357
358 down(&HFSPLUS_I(inode).extents_lock); 358 mutex_lock(&HFSPLUS_I(inode).extents_lock);
359 if (HFSPLUS_I(inode).alloc_blocks == HFSPLUS_I(inode).first_blocks) 359 if (HFSPLUS_I(inode).alloc_blocks == HFSPLUS_I(inode).first_blocks)
360 goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).first_extents); 360 goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).first_extents);
361 else { 361 else {
@@ -408,7 +408,7 @@ int hfsplus_file_extend(struct inode *inode)
408 goto insert_extent; 408 goto insert_extent;
409 } 409 }
410out: 410out:
411 up(&HFSPLUS_I(inode).extents_lock); 411 mutex_unlock(&HFSPLUS_I(inode).extents_lock);
412 if (!res) { 412 if (!res) {
413 HFSPLUS_I(inode).alloc_blocks += len; 413 HFSPLUS_I(inode).alloc_blocks += len;
414 mark_inode_dirty(inode); 414 mark_inode_dirty(inode);
@@ -465,7 +465,7 @@ void hfsplus_file_truncate(struct inode *inode)
465 if (blk_cnt == alloc_cnt) 465 if (blk_cnt == alloc_cnt)
466 goto out; 466 goto out;
467 467
468 down(&HFSPLUS_I(inode).extents_lock); 468 mutex_lock(&HFSPLUS_I(inode).extents_lock);
469 hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd); 469 hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd);
470 while (1) { 470 while (1) {
471 if (alloc_cnt == HFSPLUS_I(inode).first_blocks) { 471 if (alloc_cnt == HFSPLUS_I(inode).first_blocks) {
@@ -492,7 +492,7 @@ void hfsplus_file_truncate(struct inode *inode)
492 hfs_brec_remove(&fd); 492 hfs_brec_remove(&fd);
493 } 493 }
494 hfs_find_exit(&fd); 494 hfs_find_exit(&fd);
495 up(&HFSPLUS_I(inode).extents_lock); 495 mutex_unlock(&HFSPLUS_I(inode).extents_lock);
496 496
497 HFSPLUS_I(inode).alloc_blocks = blk_cnt; 497 HFSPLUS_I(inode).alloc_blocks = blk_cnt;
498out: 498out:
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 9e59537b43d5..f027a905225f 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -11,6 +11,7 @@
11#define _LINUX_HFSPLUS_FS_H 11#define _LINUX_HFSPLUS_FS_H
12 12
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/mutex.h>
14#include <linux/buffer_head.h> 15#include <linux/buffer_head.h>
15#include "hfsplus_raw.h" 16#include "hfsplus_raw.h"
16 17
@@ -154,7 +155,7 @@ struct hfsplus_sb_info {
154 155
155 156
156struct hfsplus_inode_info { 157struct hfsplus_inode_info {
157 struct semaphore extents_lock; 158 struct mutex extents_lock;
158 u32 clump_blocks, alloc_blocks; 159 u32 clump_blocks, alloc_blocks;
159 sector_t fs_blocks; 160 sector_t fs_blocks;
160 /* Allocation extents from catalog record or volume header */ 161 /* Allocation extents from catalog record or volume header */
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 67e1c8b467c4..cc3b5e24339b 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -163,7 +163,7 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dent
163 163
164 inode->i_ino = dir->i_ino; 164 inode->i_ino = dir->i_ino;
165 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); 165 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
166 init_MUTEX(&HFSPLUS_I(inode).extents_lock); 166 mutex_init(&HFSPLUS_I(inode).extents_lock);
167 HFSPLUS_I(inode).flags = HFSPLUS_FLG_RSRC; 167 HFSPLUS_I(inode).flags = HFSPLUS_FLG_RSRC;
168 168
169 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd); 169 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
@@ -316,7 +316,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, int mode)
316 inode->i_nlink = 1; 316 inode->i_nlink = 1;
317 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; 317 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
318 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); 318 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
319 init_MUTEX(&HFSPLUS_I(inode).extents_lock); 319 mutex_init(&HFSPLUS_I(inode).extents_lock);
320 atomic_set(&HFSPLUS_I(inode).opencnt, 0); 320 atomic_set(&HFSPLUS_I(inode).opencnt, 0);
321 HFSPLUS_I(inode).flags = 0; 321 HFSPLUS_I(inode).flags = 0;
322 memset(HFSPLUS_I(inode).first_extents, 0, sizeof(hfsplus_extent_rec)); 322 memset(HFSPLUS_I(inode).first_extents, 0, sizeof(hfsplus_extent_rec));
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index ce97a54518d8..3859118531c7 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -34,7 +34,7 @@ struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
34 return inode; 34 return inode;
35 35
36 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); 36 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
37 init_MUTEX(&HFSPLUS_I(inode).extents_lock); 37 mutex_init(&HFSPLUS_I(inode).extents_lock);
38 HFSPLUS_I(inode).flags = 0; 38 HFSPLUS_I(inode).flags = 0;
39 HFSPLUS_I(inode).rsrc_inode = NULL; 39 HFSPLUS_I(inode).rsrc_inode = NULL;
40 atomic_set(&HFSPLUS_I(inode).opencnt, 0); 40 atomic_set(&HFSPLUS_I(inode).opencnt, 0);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index aeabf80f81a5..dbd01d262ca4 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -53,6 +53,7 @@ int sysctl_hugetlb_shm_group;
53enum { 53enum {
54 Opt_size, Opt_nr_inodes, 54 Opt_size, Opt_nr_inodes,
55 Opt_mode, Opt_uid, Opt_gid, 55 Opt_mode, Opt_uid, Opt_gid,
56 Opt_pagesize,
56 Opt_err, 57 Opt_err,
57}; 58};
58 59
@@ -62,6 +63,7 @@ static match_table_t tokens = {
62 {Opt_mode, "mode=%o"}, 63 {Opt_mode, "mode=%o"},
63 {Opt_uid, "uid=%u"}, 64 {Opt_uid, "uid=%u"},
64 {Opt_gid, "gid=%u"}, 65 {Opt_gid, "gid=%u"},
66 {Opt_pagesize, "pagesize=%s"},
65 {Opt_err, NULL}, 67 {Opt_err, NULL},
66}; 68};
67 69
@@ -80,6 +82,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
80 struct inode *inode = file->f_path.dentry->d_inode; 82 struct inode *inode = file->f_path.dentry->d_inode;
81 loff_t len, vma_len; 83 loff_t len, vma_len;
82 int ret; 84 int ret;
85 struct hstate *h = hstate_file(file);
83 86
84 /* 87 /*
85 * vma address alignment (but not the pgoff alignment) has 88 * vma address alignment (but not the pgoff alignment) has
@@ -92,7 +95,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
92 vma->vm_flags |= VM_HUGETLB | VM_RESERVED; 95 vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
93 vma->vm_ops = &hugetlb_vm_ops; 96 vma->vm_ops = &hugetlb_vm_ops;
94 97
95 if (vma->vm_pgoff & ~(HPAGE_MASK >> PAGE_SHIFT)) 98 if (vma->vm_pgoff & ~(huge_page_mask(h) >> PAGE_SHIFT))
96 return -EINVAL; 99 return -EINVAL;
97 100
98 vma_len = (loff_t)(vma->vm_end - vma->vm_start); 101 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
@@ -103,9 +106,9 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
103 ret = -ENOMEM; 106 ret = -ENOMEM;
104 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 107 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
105 108
106 if (vma->vm_flags & VM_MAYSHARE && 109 if (hugetlb_reserve_pages(inode,
107 hugetlb_reserve_pages(inode, vma->vm_pgoff >> (HPAGE_SHIFT-PAGE_SHIFT), 110 vma->vm_pgoff >> huge_page_order(h),
108 len >> HPAGE_SHIFT)) 111 len >> huge_page_shift(h), vma))
109 goto out; 112 goto out;
110 113
111 ret = 0; 114 ret = 0;
@@ -130,20 +133,21 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
130 struct mm_struct *mm = current->mm; 133 struct mm_struct *mm = current->mm;
131 struct vm_area_struct *vma; 134 struct vm_area_struct *vma;
132 unsigned long start_addr; 135 unsigned long start_addr;
136 struct hstate *h = hstate_file(file);
133 137
134 if (len & ~HPAGE_MASK) 138 if (len & ~huge_page_mask(h))
135 return -EINVAL; 139 return -EINVAL;
136 if (len > TASK_SIZE) 140 if (len > TASK_SIZE)
137 return -ENOMEM; 141 return -ENOMEM;
138 142
139 if (flags & MAP_FIXED) { 143 if (flags & MAP_FIXED) {
140 if (prepare_hugepage_range(addr, len)) 144 if (prepare_hugepage_range(file, addr, len))
141 return -EINVAL; 145 return -EINVAL;
142 return addr; 146 return addr;
143 } 147 }
144 148
145 if (addr) { 149 if (addr) {
146 addr = ALIGN(addr, HPAGE_SIZE); 150 addr = ALIGN(addr, huge_page_size(h));
147 vma = find_vma(mm, addr); 151 vma = find_vma(mm, addr);
148 if (TASK_SIZE - len >= addr && 152 if (TASK_SIZE - len >= addr &&
149 (!vma || addr + len <= vma->vm_start)) 153 (!vma || addr + len <= vma->vm_start))
@@ -156,7 +160,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
156 start_addr = TASK_UNMAPPED_BASE; 160 start_addr = TASK_UNMAPPED_BASE;
157 161
158full_search: 162full_search:
159 addr = ALIGN(start_addr, HPAGE_SIZE); 163 addr = ALIGN(start_addr, huge_page_size(h));
160 164
161 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 165 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
162 /* At this point: (!vma || addr < vma->vm_end). */ 166 /* At this point: (!vma || addr < vma->vm_end). */
@@ -174,7 +178,7 @@ full_search:
174 178
175 if (!vma || addr + len <= vma->vm_start) 179 if (!vma || addr + len <= vma->vm_start)
176 return addr; 180 return addr;
177 addr = ALIGN(vma->vm_end, HPAGE_SIZE); 181 addr = ALIGN(vma->vm_end, huge_page_size(h));
178 } 182 }
179} 183}
180#endif 184#endif
@@ -225,10 +229,11 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
225static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, 229static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
226 size_t len, loff_t *ppos) 230 size_t len, loff_t *ppos)
227{ 231{
232 struct hstate *h = hstate_file(filp);
228 struct address_space *mapping = filp->f_mapping; 233 struct address_space *mapping = filp->f_mapping;
229 struct inode *inode = mapping->host; 234 struct inode *inode = mapping->host;
230 unsigned long index = *ppos >> HPAGE_SHIFT; 235 unsigned long index = *ppos >> huge_page_shift(h);
231 unsigned long offset = *ppos & ~HPAGE_MASK; 236 unsigned long offset = *ppos & ~huge_page_mask(h);
232 unsigned long end_index; 237 unsigned long end_index;
233 loff_t isize; 238 loff_t isize;
234 ssize_t retval = 0; 239 ssize_t retval = 0;
@@ -243,17 +248,17 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
243 if (!isize) 248 if (!isize)
244 goto out; 249 goto out;
245 250
246 end_index = (isize - 1) >> HPAGE_SHIFT; 251 end_index = (isize - 1) >> huge_page_shift(h);
247 for (;;) { 252 for (;;) {
248 struct page *page; 253 struct page *page;
249 int nr, ret; 254 unsigned long nr, ret;
250 255
251 /* nr is the maximum number of bytes to copy from this page */ 256 /* nr is the maximum number of bytes to copy from this page */
252 nr = HPAGE_SIZE; 257 nr = huge_page_size(h);
253 if (index >= end_index) { 258 if (index >= end_index) {
254 if (index > end_index) 259 if (index > end_index)
255 goto out; 260 goto out;
256 nr = ((isize - 1) & ~HPAGE_MASK) + 1; 261 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
257 if (nr <= offset) { 262 if (nr <= offset) {
258 goto out; 263 goto out;
259 } 264 }
@@ -287,8 +292,8 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
287 offset += ret; 292 offset += ret;
288 retval += ret; 293 retval += ret;
289 len -= ret; 294 len -= ret;
290 index += offset >> HPAGE_SHIFT; 295 index += offset >> huge_page_shift(h);
291 offset &= ~HPAGE_MASK; 296 offset &= ~huge_page_mask(h);
292 297
293 if (page) 298 if (page)
294 page_cache_release(page); 299 page_cache_release(page);
@@ -298,7 +303,7 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
298 break; 303 break;
299 } 304 }
300out: 305out:
301 *ppos = ((loff_t)index << HPAGE_SHIFT) + offset; 306 *ppos = ((loff_t)index << huge_page_shift(h)) + offset;
302 mutex_unlock(&inode->i_mutex); 307 mutex_unlock(&inode->i_mutex);
303 return retval; 308 return retval;
304} 309}
@@ -339,8 +344,9 @@ static void truncate_huge_page(struct page *page)
339 344
340static void truncate_hugepages(struct inode *inode, loff_t lstart) 345static void truncate_hugepages(struct inode *inode, loff_t lstart)
341{ 346{
347 struct hstate *h = hstate_inode(inode);
342 struct address_space *mapping = &inode->i_data; 348 struct address_space *mapping = &inode->i_data;
343 const pgoff_t start = lstart >> HPAGE_SHIFT; 349 const pgoff_t start = lstart >> huge_page_shift(h);
344 struct pagevec pvec; 350 struct pagevec pvec;
345 pgoff_t next; 351 pgoff_t next;
346 int i, freed = 0; 352 int i, freed = 0;
@@ -441,7 +447,7 @@ hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff)
441 v_offset = 0; 447 v_offset = 0;
442 448
443 __unmap_hugepage_range(vma, 449 __unmap_hugepage_range(vma,
444 vma->vm_start + v_offset, vma->vm_end); 450 vma->vm_start + v_offset, vma->vm_end, NULL);
445 } 451 }
446} 452}
447 453
@@ -449,8 +455,9 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
449{ 455{
450 pgoff_t pgoff; 456 pgoff_t pgoff;
451 struct address_space *mapping = inode->i_mapping; 457 struct address_space *mapping = inode->i_mapping;
458 struct hstate *h = hstate_inode(inode);
452 459
453 BUG_ON(offset & ~HPAGE_MASK); 460 BUG_ON(offset & ~huge_page_mask(h));
454 pgoff = offset >> PAGE_SHIFT; 461 pgoff = offset >> PAGE_SHIFT;
455 462
456 i_size_write(inode, offset); 463 i_size_write(inode, offset);
@@ -465,6 +472,7 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
465static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) 472static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
466{ 473{
467 struct inode *inode = dentry->d_inode; 474 struct inode *inode = dentry->d_inode;
475 struct hstate *h = hstate_inode(inode);
468 int error; 476 int error;
469 unsigned int ia_valid = attr->ia_valid; 477 unsigned int ia_valid = attr->ia_valid;
470 478
@@ -476,7 +484,7 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
476 484
477 if (ia_valid & ATTR_SIZE) { 485 if (ia_valid & ATTR_SIZE) {
478 error = -EINVAL; 486 error = -EINVAL;
479 if (!(attr->ia_size & ~HPAGE_MASK)) 487 if (!(attr->ia_size & ~huge_page_mask(h)))
480 error = hugetlb_vmtruncate(inode, attr->ia_size); 488 error = hugetlb_vmtruncate(inode, attr->ia_size);
481 if (error) 489 if (error)
482 goto out; 490 goto out;
@@ -610,9 +618,10 @@ static int hugetlbfs_set_page_dirty(struct page *page)
610static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) 618static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
611{ 619{
612 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); 620 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
621 struct hstate *h = hstate_inode(dentry->d_inode);
613 622
614 buf->f_type = HUGETLBFS_MAGIC; 623 buf->f_type = HUGETLBFS_MAGIC;
615 buf->f_bsize = HPAGE_SIZE; 624 buf->f_bsize = huge_page_size(h);
616 if (sbinfo) { 625 if (sbinfo) {
617 spin_lock(&sbinfo->stat_lock); 626 spin_lock(&sbinfo->stat_lock);
618 /* If no limits set, just report 0 for max/free/used 627 /* If no limits set, just report 0 for max/free/used
@@ -743,6 +752,8 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
743 char *p, *rest; 752 char *p, *rest;
744 substring_t args[MAX_OPT_ARGS]; 753 substring_t args[MAX_OPT_ARGS];
745 int option; 754 int option;
755 unsigned long long size = 0;
756 enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE;
746 757
747 if (!options) 758 if (!options)
748 return 0; 759 return 0;
@@ -773,17 +784,13 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
773 break; 784 break;
774 785
775 case Opt_size: { 786 case Opt_size: {
776 unsigned long long size;
777 /* memparse() will accept a K/M/G without a digit */ 787 /* memparse() will accept a K/M/G without a digit */
778 if (!isdigit(*args[0].from)) 788 if (!isdigit(*args[0].from))
779 goto bad_val; 789 goto bad_val;
780 size = memparse(args[0].from, &rest); 790 size = memparse(args[0].from, &rest);
781 if (*rest == '%') { 791 setsize = SIZE_STD;
782 size <<= HPAGE_SHIFT; 792 if (*rest == '%')
783 size *= max_huge_pages; 793 setsize = SIZE_PERCENT;
784 do_div(size, 100);
785 }
786 pconfig->nr_blocks = (size >> HPAGE_SHIFT);
787 break; 794 break;
788 } 795 }
789 796
@@ -794,6 +801,19 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
794 pconfig->nr_inodes = memparse(args[0].from, &rest); 801 pconfig->nr_inodes = memparse(args[0].from, &rest);
795 break; 802 break;
796 803
804 case Opt_pagesize: {
805 unsigned long ps;
806 ps = memparse(args[0].from, &rest);
807 pconfig->hstate = size_to_hstate(ps);
808 if (!pconfig->hstate) {
809 printk(KERN_ERR
810 "hugetlbfs: Unsupported page size %lu MB\n",
811 ps >> 20);
812 return -EINVAL;
813 }
814 break;
815 }
816
797 default: 817 default:
798 printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n", 818 printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n",
799 p); 819 p);
@@ -801,6 +821,18 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
801 break; 821 break;
802 } 822 }
803 } 823 }
824
825 /* Do size after hstate is set up */
826 if (setsize > NO_SIZE) {
827 struct hstate *h = pconfig->hstate;
828 if (setsize == SIZE_PERCENT) {
829 size <<= huge_page_shift(h);
830 size *= h->max_huge_pages;
831 do_div(size, 100);
832 }
833 pconfig->nr_blocks = (size >> huge_page_shift(h));
834 }
835
804 return 0; 836 return 0;
805 837
806bad_val: 838bad_val:
@@ -825,6 +857,7 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
825 config.uid = current->fsuid; 857 config.uid = current->fsuid;
826 config.gid = current->fsgid; 858 config.gid = current->fsgid;
827 config.mode = 0755; 859 config.mode = 0755;
860 config.hstate = &default_hstate;
828 ret = hugetlbfs_parse_options(data, &config); 861 ret = hugetlbfs_parse_options(data, &config);
829 if (ret) 862 if (ret)
830 return ret; 863 return ret;
@@ -833,14 +866,15 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
833 if (!sbinfo) 866 if (!sbinfo)
834 return -ENOMEM; 867 return -ENOMEM;
835 sb->s_fs_info = sbinfo; 868 sb->s_fs_info = sbinfo;
869 sbinfo->hstate = config.hstate;
836 spin_lock_init(&sbinfo->stat_lock); 870 spin_lock_init(&sbinfo->stat_lock);
837 sbinfo->max_blocks = config.nr_blocks; 871 sbinfo->max_blocks = config.nr_blocks;
838 sbinfo->free_blocks = config.nr_blocks; 872 sbinfo->free_blocks = config.nr_blocks;
839 sbinfo->max_inodes = config.nr_inodes; 873 sbinfo->max_inodes = config.nr_inodes;
840 sbinfo->free_inodes = config.nr_inodes; 874 sbinfo->free_inodes = config.nr_inodes;
841 sb->s_maxbytes = MAX_LFS_FILESIZE; 875 sb->s_maxbytes = MAX_LFS_FILESIZE;
842 sb->s_blocksize = HPAGE_SIZE; 876 sb->s_blocksize = huge_page_size(config.hstate);
843 sb->s_blocksize_bits = HPAGE_SHIFT; 877 sb->s_blocksize_bits = huge_page_shift(config.hstate);
844 sb->s_magic = HUGETLBFS_MAGIC; 878 sb->s_magic = HUGETLBFS_MAGIC;
845 sb->s_op = &hugetlbfs_ops; 879 sb->s_op = &hugetlbfs_ops;
846 sb->s_time_gran = 1; 880 sb->s_time_gran = 1;
@@ -942,7 +976,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size)
942 goto out_dentry; 976 goto out_dentry;
943 977
944 error = -ENOMEM; 978 error = -ENOMEM;
945 if (hugetlb_reserve_pages(inode, 0, size >> HPAGE_SHIFT)) 979 if (hugetlb_reserve_pages(inode, 0,
980 size >> huge_page_shift(hstate_inode(inode)), NULL))
946 goto out_inode; 981 goto out_inode;
947 982
948 d_instantiate(dentry, inode); 983 d_instantiate(dentry, inode);
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
index 6676c06bb7c1..fe79c25d95dc 100644
--- a/fs/inotify_user.c
+++ b/fs/inotify_user.c
@@ -566,7 +566,7 @@ static const struct inotify_operations inotify_user_ops = {
566 .destroy_watch = free_inotify_user_watch, 566 .destroy_watch = free_inotify_user_watch,
567}; 567};
568 568
569asmlinkage long sys_inotify_init(void) 569asmlinkage long sys_inotify_init1(int flags)
570{ 570{
571 struct inotify_device *dev; 571 struct inotify_device *dev;
572 struct inotify_handle *ih; 572 struct inotify_handle *ih;
@@ -574,7 +574,14 @@ asmlinkage long sys_inotify_init(void)
574 struct file *filp; 574 struct file *filp;
575 int fd, ret; 575 int fd, ret;
576 576
577 fd = get_unused_fd(); 577 /* Check the IN_* constants for consistency. */
578 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
579 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
580
581 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
582 return -EINVAL;
583
584 fd = get_unused_fd_flags(flags & O_CLOEXEC);
578 if (fd < 0) 585 if (fd < 0)
579 return fd; 586 return fd;
580 587
@@ -610,7 +617,7 @@ asmlinkage long sys_inotify_init(void)
610 filp->f_path.dentry = dget(inotify_mnt->mnt_root); 617 filp->f_path.dentry = dget(inotify_mnt->mnt_root);
611 filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; 618 filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
612 filp->f_mode = FMODE_READ; 619 filp->f_mode = FMODE_READ;
613 filp->f_flags = O_RDONLY; 620 filp->f_flags = O_RDONLY | (flags & O_NONBLOCK);
614 filp->private_data = dev; 621 filp->private_data = dev;
615 622
616 INIT_LIST_HEAD(&dev->events); 623 INIT_LIST_HEAD(&dev->events);
@@ -638,6 +645,11 @@ out_put_fd:
638 return ret; 645 return ret;
639} 646}
640 647
648asmlinkage long sys_inotify_init(void)
649{
650 return sys_inotify_init1(0);
651}
652
641asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) 653asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
642{ 654{
643 struct inode *inode; 655 struct inode *inode;
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index 6bd48f0a7047..c2fb2dd0131f 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -209,6 +209,11 @@ repeat:
209 209
210 while (rs.len > 2) { /* There may be one byte for padding somewhere */ 210 while (rs.len > 2) { /* There may be one byte for padding somewhere */
211 rr = (struct rock_ridge *)rs.chr; 211 rr = (struct rock_ridge *)rs.chr;
212 /*
213 * Ignore rock ridge info if rr->len is out of range, but
214 * don't return -EIO because that would make the file
215 * invisible.
216 */
212 if (rr->len < 3) 217 if (rr->len < 3)
213 goto out; /* Something got screwed up here */ 218 goto out; /* Something got screwed up here */
214 sig = isonum_721(rs.chr); 219 sig = isonum_721(rs.chr);
@@ -216,8 +221,12 @@ repeat:
216 goto eio; 221 goto eio;
217 rs.chr += rr->len; 222 rs.chr += rr->len;
218 rs.len -= rr->len; 223 rs.len -= rr->len;
224 /*
225 * As above, just ignore the rock ridge info if rr->len
226 * is bogus.
227 */
219 if (rs.len < 0) 228 if (rs.len < 0)
220 goto eio; /* corrupted isofs */ 229 goto out; /* Something got screwed up here */
221 230
222 switch (sig) { 231 switch (sig) {
223 case SIG('R', 'R'): 232 case SIG('R', 'R'):
@@ -307,6 +316,11 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de,
307repeat: 316repeat:
308 while (rs.len > 2) { /* There may be one byte for padding somewhere */ 317 while (rs.len > 2) { /* There may be one byte for padding somewhere */
309 rr = (struct rock_ridge *)rs.chr; 318 rr = (struct rock_ridge *)rs.chr;
319 /*
320 * Ignore rock ridge info if rr->len is out of range, but
321 * don't return -EIO because that would make the file
322 * invisible.
323 */
310 if (rr->len < 3) 324 if (rr->len < 3)
311 goto out; /* Something got screwed up here */ 325 goto out; /* Something got screwed up here */
312 sig = isonum_721(rs.chr); 326 sig = isonum_721(rs.chr);
@@ -314,8 +328,12 @@ repeat:
314 goto eio; 328 goto eio;
315 rs.chr += rr->len; 329 rs.chr += rr->len;
316 rs.len -= rr->len; 330 rs.len -= rr->len;
331 /*
332 * As above, just ignore the rock ridge info if rr->len
333 * is bogus.
334 */
317 if (rs.len < 0) 335 if (rs.len < 0)
318 goto eio; /* corrupted isofs */ 336 goto out; /* Something got screwed up here */
319 337
320 switch (sig) { 338 switch (sig) {
321#ifndef CONFIG_ZISOFS /* No flag for SF or ZF */ 339#ifndef CONFIG_ZISOFS /* No flag for SF or ZF */
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 5a8ca61498ca..2eccbfaa1d48 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -36,7 +36,7 @@ static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
36 36
37/* 37/*
38 * When an ext3-ordered file is truncated, it is possible that many pages are 38 * When an ext3-ordered file is truncated, it is possible that many pages are
39 * not sucessfully freed, because they are attached to a committing transaction. 39 * not successfully freed, because they are attached to a committing transaction.
40 * After the transaction commits, these pages are left on the LRU, with no 40 * After the transaction commits, these pages are left on the LRU, with no
41 * ->mapping, and with attached buffers. These pages are trivially reclaimable 41 * ->mapping, and with attached buffers. These pages are trivially reclaimable
42 * by the VM, but their apparent absence upsets the VM accounting, and it makes 42 * by the VM, but their apparent absence upsets the VM accounting, and it makes
@@ -45,8 +45,8 @@ static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
45 * So here, we have a buffer which has just come off the forget list. Look to 45 * So here, we have a buffer which has just come off the forget list. Look to
46 * see if we can strip all buffers from the backing page. 46 * see if we can strip all buffers from the backing page.
47 * 47 *
48 * Called under lock_journal(), and possibly under journal_datalist_lock. The 48 * Called under journal->j_list_lock. The caller provided us with a ref
49 * caller provided us with a ref against the buffer, and we drop that here. 49 * against the buffer, and we drop that here.
50 */ 50 */
51static void release_buffer_page(struct buffer_head *bh) 51static void release_buffer_page(struct buffer_head *bh)
52{ 52{
@@ -78,6 +78,19 @@ nope:
78} 78}
79 79
80/* 80/*
81 * Decrement reference counter for data buffer. If it has been marked
82 * 'BH_Freed', release it and the page to which it belongs if possible.
83 */
84static void release_data_buffer(struct buffer_head *bh)
85{
86 if (buffer_freed(bh)) {
87 clear_buffer_freed(bh);
88 release_buffer_page(bh);
89 } else
90 put_bh(bh);
91}
92
93/*
81 * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is 94 * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
82 * held. For ranking reasons we must trylock. If we lose, schedule away and 95 * held. For ranking reasons we must trylock. If we lose, schedule away and
83 * return 0. j_list_lock is dropped in this case. 96 * return 0. j_list_lock is dropped in this case.
@@ -172,7 +185,7 @@ static void journal_do_submit_data(struct buffer_head **wbuf, int bufs)
172/* 185/*
173 * Submit all the data buffers to disk 186 * Submit all the data buffers to disk
174 */ 187 */
175static void journal_submit_data_buffers(journal_t *journal, 188static int journal_submit_data_buffers(journal_t *journal,
176 transaction_t *commit_transaction) 189 transaction_t *commit_transaction)
177{ 190{
178 struct journal_head *jh; 191 struct journal_head *jh;
@@ -180,6 +193,7 @@ static void journal_submit_data_buffers(journal_t *journal,
180 int locked; 193 int locked;
181 int bufs = 0; 194 int bufs = 0;
182 struct buffer_head **wbuf = journal->j_wbuf; 195 struct buffer_head **wbuf = journal->j_wbuf;
196 int err = 0;
183 197
184 /* 198 /*
185 * Whenever we unlock the journal and sleep, things can get added 199 * Whenever we unlock the journal and sleep, things can get added
@@ -231,7 +245,7 @@ write_out_data:
231 if (locked) 245 if (locked)
232 unlock_buffer(bh); 246 unlock_buffer(bh);
233 BUFFER_TRACE(bh, "already cleaned up"); 247 BUFFER_TRACE(bh, "already cleaned up");
234 put_bh(bh); 248 release_data_buffer(bh);
235 continue; 249 continue;
236 } 250 }
237 if (locked && test_clear_buffer_dirty(bh)) { 251 if (locked && test_clear_buffer_dirty(bh)) {
@@ -253,15 +267,17 @@ write_out_data:
253 put_bh(bh); 267 put_bh(bh);
254 } else { 268 } else {
255 BUFFER_TRACE(bh, "writeout complete: unfile"); 269 BUFFER_TRACE(bh, "writeout complete: unfile");
270 if (unlikely(!buffer_uptodate(bh)))
271 err = -EIO;
256 __journal_unfile_buffer(jh); 272 __journal_unfile_buffer(jh);
257 jbd_unlock_bh_state(bh); 273 jbd_unlock_bh_state(bh);
258 if (locked) 274 if (locked)
259 unlock_buffer(bh); 275 unlock_buffer(bh);
260 journal_remove_journal_head(bh); 276 journal_remove_journal_head(bh);
261 /* Once for our safety reference, once for 277 /* One for our safety reference, other for
262 * journal_remove_journal_head() */ 278 * journal_remove_journal_head() */
263 put_bh(bh); 279 put_bh(bh);
264 put_bh(bh); 280 release_data_buffer(bh);
265 } 281 }
266 282
267 if (need_resched() || spin_needbreak(&journal->j_list_lock)) { 283 if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
@@ -271,6 +287,8 @@ write_out_data:
271 } 287 }
272 spin_unlock(&journal->j_list_lock); 288 spin_unlock(&journal->j_list_lock);
273 journal_do_submit_data(wbuf, bufs); 289 journal_do_submit_data(wbuf, bufs);
290
291 return err;
274} 292}
275 293
276/* 294/*
@@ -410,8 +428,7 @@ void journal_commit_transaction(journal_t *journal)
410 * Now start flushing things to disk, in the order they appear 428 * Now start flushing things to disk, in the order they appear
411 * on the transaction lists. Data blocks go first. 429 * on the transaction lists. Data blocks go first.
412 */ 430 */
413 err = 0; 431 err = journal_submit_data_buffers(journal, commit_transaction);
414 journal_submit_data_buffers(journal, commit_transaction);
415 432
416 /* 433 /*
417 * Wait for all previously submitted IO to complete. 434 * Wait for all previously submitted IO to complete.
@@ -426,10 +443,21 @@ void journal_commit_transaction(journal_t *journal)
426 if (buffer_locked(bh)) { 443 if (buffer_locked(bh)) {
427 spin_unlock(&journal->j_list_lock); 444 spin_unlock(&journal->j_list_lock);
428 wait_on_buffer(bh); 445 wait_on_buffer(bh);
429 if (unlikely(!buffer_uptodate(bh)))
430 err = -EIO;
431 spin_lock(&journal->j_list_lock); 446 spin_lock(&journal->j_list_lock);
432 } 447 }
448 if (unlikely(!buffer_uptodate(bh))) {
449 if (TestSetPageLocked(bh->b_page)) {
450 spin_unlock(&journal->j_list_lock);
451 lock_page(bh->b_page);
452 spin_lock(&journal->j_list_lock);
453 }
454 if (bh->b_page->mapping)
455 set_bit(AS_EIO, &bh->b_page->mapping->flags);
456
457 unlock_page(bh->b_page);
458 SetPageError(bh->b_page);
459 err = -EIO;
460 }
433 if (!inverted_lock(journal, bh)) { 461 if (!inverted_lock(journal, bh)) {
434 put_bh(bh); 462 put_bh(bh);
435 spin_lock(&journal->j_list_lock); 463 spin_lock(&journal->j_list_lock);
@@ -443,17 +471,21 @@ void journal_commit_transaction(journal_t *journal)
443 } else { 471 } else {
444 jbd_unlock_bh_state(bh); 472 jbd_unlock_bh_state(bh);
445 } 473 }
446 put_bh(bh); 474 release_data_buffer(bh);
447 cond_resched_lock(&journal->j_list_lock); 475 cond_resched_lock(&journal->j_list_lock);
448 } 476 }
449 spin_unlock(&journal->j_list_lock); 477 spin_unlock(&journal->j_list_lock);
450 478
451 if (err) 479 if (err) {
452 journal_abort(journal, err); 480 char b[BDEVNAME_SIZE];
453 481
454 journal_write_revoke_records(journal, commit_transaction); 482 printk(KERN_WARNING
483 "JBD: Detected IO errors while flushing file data "
484 "on %s\n", bdevname(journal->j_fs_dev, b));
485 err = 0;
486 }
455 487
456 jbd_debug(3, "JBD: commit phase 2\n"); 488 journal_write_revoke_records(journal, commit_transaction);
457 489
458 /* 490 /*
459 * If we found any dirty or locked buffers, then we should have 491 * If we found any dirty or locked buffers, then we should have
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index b99c3b3654c4..aa7143a8349b 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -68,7 +68,6 @@ EXPORT_SYMBOL(journal_set_features);
68EXPORT_SYMBOL(journal_create); 68EXPORT_SYMBOL(journal_create);
69EXPORT_SYMBOL(journal_load); 69EXPORT_SYMBOL(journal_load);
70EXPORT_SYMBOL(journal_destroy); 70EXPORT_SYMBOL(journal_destroy);
71EXPORT_SYMBOL(journal_update_superblock);
72EXPORT_SYMBOL(journal_abort); 71EXPORT_SYMBOL(journal_abort);
73EXPORT_SYMBOL(journal_errno); 72EXPORT_SYMBOL(journal_errno);
74EXPORT_SYMBOL(journal_ack_err); 73EXPORT_SYMBOL(journal_ack_err);
@@ -1636,9 +1635,10 @@ static int journal_init_journal_head_cache(void)
1636 1635
1637static void journal_destroy_journal_head_cache(void) 1636static void journal_destroy_journal_head_cache(void)
1638{ 1637{
1639 J_ASSERT(journal_head_cache != NULL); 1638 if (journal_head_cache) {
1640 kmem_cache_destroy(journal_head_cache); 1639 kmem_cache_destroy(journal_head_cache);
1641 journal_head_cache = NULL; 1640 journal_head_cache = NULL;
1641 }
1642} 1642}
1643 1643
1644/* 1644/*
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index 1bb43e987f4b..c7bd649bbbdc 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -166,138 +166,123 @@ static struct jbd_revoke_record_s *find_revoke_record(journal_t *journal,
166 return NULL; 166 return NULL;
167} 167}
168 168
169void journal_destroy_revoke_caches(void)
170{
171 if (revoke_record_cache) {
172 kmem_cache_destroy(revoke_record_cache);
173 revoke_record_cache = NULL;
174 }
175 if (revoke_table_cache) {
176 kmem_cache_destroy(revoke_table_cache);
177 revoke_table_cache = NULL;
178 }
179}
180
169int __init journal_init_revoke_caches(void) 181int __init journal_init_revoke_caches(void)
170{ 182{
183 J_ASSERT(!revoke_record_cache);
184 J_ASSERT(!revoke_table_cache);
185
171 revoke_record_cache = kmem_cache_create("revoke_record", 186 revoke_record_cache = kmem_cache_create("revoke_record",
172 sizeof(struct jbd_revoke_record_s), 187 sizeof(struct jbd_revoke_record_s),
173 0, 188 0,
174 SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY, 189 SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
175 NULL); 190 NULL);
176 if (!revoke_record_cache) 191 if (!revoke_record_cache)
177 return -ENOMEM; 192 goto record_cache_failure;
178 193
179 revoke_table_cache = kmem_cache_create("revoke_table", 194 revoke_table_cache = kmem_cache_create("revoke_table",
180 sizeof(struct jbd_revoke_table_s), 195 sizeof(struct jbd_revoke_table_s),
181 0, SLAB_TEMPORARY, NULL); 196 0, SLAB_TEMPORARY, NULL);
182 if (!revoke_table_cache) { 197 if (!revoke_table_cache)
183 kmem_cache_destroy(revoke_record_cache); 198 goto table_cache_failure;
184 revoke_record_cache = NULL; 199
185 return -ENOMEM;
186 }
187 return 0; 200 return 0;
188}
189 201
190void journal_destroy_revoke_caches(void) 202table_cache_failure:
191{ 203 journal_destroy_revoke_caches();
192 kmem_cache_destroy(revoke_record_cache); 204record_cache_failure:
193 revoke_record_cache = NULL; 205 return -ENOMEM;
194 kmem_cache_destroy(revoke_table_cache);
195 revoke_table_cache = NULL;
196} 206}
197 207
198/* Initialise the revoke table for a given journal to a given size. */ 208static struct jbd_revoke_table_s *journal_init_revoke_table(int hash_size)
199
200int journal_init_revoke(journal_t *journal, int hash_size)
201{ 209{
202 int shift, tmp; 210 int shift = 0;
211 int tmp = hash_size;
212 struct jbd_revoke_table_s *table;
203 213
204 J_ASSERT (journal->j_revoke_table[0] == NULL); 214 table = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
215 if (!table)
216 goto out;
205 217
206 shift = 0;
207 tmp = hash_size;
208 while((tmp >>= 1UL) != 0UL) 218 while((tmp >>= 1UL) != 0UL)
209 shift++; 219 shift++;
210 220
211 journal->j_revoke_table[0] = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL); 221 table->hash_size = hash_size;
212 if (!journal->j_revoke_table[0]) 222 table->hash_shift = shift;
213 return -ENOMEM; 223 table->hash_table =
214 journal->j_revoke = journal->j_revoke_table[0];
215
216 /* Check that the hash_size is a power of two */
217 J_ASSERT(is_power_of_2(hash_size));
218
219 journal->j_revoke->hash_size = hash_size;
220
221 journal->j_revoke->hash_shift = shift;
222
223 journal->j_revoke->hash_table =
224 kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL); 224 kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
225 if (!journal->j_revoke->hash_table) { 225 if (!table->hash_table) {
226 kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]); 226 kmem_cache_free(revoke_table_cache, table);
227 journal->j_revoke = NULL; 227 table = NULL;
228 return -ENOMEM; 228 goto out;
229 } 229 }
230 230
231 for (tmp = 0; tmp < hash_size; tmp++) 231 for (tmp = 0; tmp < hash_size; tmp++)
232 INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]); 232 INIT_LIST_HEAD(&table->hash_table[tmp]);
233 233
234 journal->j_revoke_table[1] = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL); 234out:
235 if (!journal->j_revoke_table[1]) { 235 return table;
236 kfree(journal->j_revoke_table[0]->hash_table); 236}
237 kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]); 237
238 return -ENOMEM; 238static void journal_destroy_revoke_table(struct jbd_revoke_table_s *table)
239{
240 int i;
241 struct list_head *hash_list;
242
243 for (i = 0; i < table->hash_size; i++) {
244 hash_list = &table->hash_table[i];
245 J_ASSERT(list_empty(hash_list));
239 } 246 }
240 247
241 journal->j_revoke = journal->j_revoke_table[1]; 248 kfree(table->hash_table);
249 kmem_cache_free(revoke_table_cache, table);
250}
242 251
243 /* Check that the hash_size is a power of two */ 252/* Initialise the revoke table for a given journal to a given size. */
253int journal_init_revoke(journal_t *journal, int hash_size)
254{
255 J_ASSERT(journal->j_revoke_table[0] == NULL);
244 J_ASSERT(is_power_of_2(hash_size)); 256 J_ASSERT(is_power_of_2(hash_size));
245 257
246 journal->j_revoke->hash_size = hash_size; 258 journal->j_revoke_table[0] = journal_init_revoke_table(hash_size);
259 if (!journal->j_revoke_table[0])
260 goto fail0;
247 261
248 journal->j_revoke->hash_shift = shift; 262 journal->j_revoke_table[1] = journal_init_revoke_table(hash_size);
263 if (!journal->j_revoke_table[1])
264 goto fail1;
249 265
250 journal->j_revoke->hash_table = 266 journal->j_revoke = journal->j_revoke_table[1];
251 kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
252 if (!journal->j_revoke->hash_table) {
253 kfree(journal->j_revoke_table[0]->hash_table);
254 kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
255 kmem_cache_free(revoke_table_cache, journal->j_revoke_table[1]);
256 journal->j_revoke = NULL;
257 return -ENOMEM;
258 }
259
260 for (tmp = 0; tmp < hash_size; tmp++)
261 INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]);
262 267
263 spin_lock_init(&journal->j_revoke_lock); 268 spin_lock_init(&journal->j_revoke_lock);
264 269
265 return 0; 270 return 0;
266}
267 271
268/* Destoy a journal's revoke table. The table must already be empty! */ 272fail1:
273 journal_destroy_revoke_table(journal->j_revoke_table[0]);
274fail0:
275 return -ENOMEM;
276}
269 277
278/* Destroy a journal's revoke table. The table must already be empty! */
270void journal_destroy_revoke(journal_t *journal) 279void journal_destroy_revoke(journal_t *journal)
271{ 280{
272 struct jbd_revoke_table_s *table;
273 struct list_head *hash_list;
274 int i;
275
276 table = journal->j_revoke_table[0];
277 if (!table)
278 return;
279
280 for (i=0; i<table->hash_size; i++) {
281 hash_list = &table->hash_table[i];
282 J_ASSERT (list_empty(hash_list));
283 }
284
285 kfree(table->hash_table);
286 kmem_cache_free(revoke_table_cache, table);
287 journal->j_revoke = NULL;
288
289 table = journal->j_revoke_table[1];
290 if (!table)
291 return;
292
293 for (i=0; i<table->hash_size; i++) {
294 hash_list = &table->hash_table[i];
295 J_ASSERT (list_empty(hash_list));
296 }
297
298 kfree(table->hash_table);
299 kmem_cache_free(revoke_table_cache, table);
300 journal->j_revoke = NULL; 281 journal->j_revoke = NULL;
282 if (journal->j_revoke_table[0])
283 journal_destroy_revoke_table(journal->j_revoke_table[0]);
284 if (journal->j_revoke_table[1])
285 journal_destroy_revoke_table(journal->j_revoke_table[1]);
301} 286}
302 287
303 288
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 67ff2024c23c..8dee32007500 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1648,12 +1648,42 @@ out:
1648 return; 1648 return;
1649} 1649}
1650 1650
1651/*
1652 * journal_try_to_free_buffers() could race with journal_commit_transaction()
1653 * The latter might still hold the a count on buffers when inspecting
1654 * them on t_syncdata_list or t_locked_list.
1655 *
1656 * journal_try_to_free_buffers() will call this function to
1657 * wait for the current transaction to finish syncing data buffers, before
1658 * tryinf to free that buffer.
1659 *
1660 * Called with journal->j_state_lock held.
1661 */
1662static void journal_wait_for_transaction_sync_data(journal_t *journal)
1663{
1664 transaction_t *transaction = NULL;
1665 tid_t tid;
1666
1667 spin_lock(&journal->j_state_lock);
1668 transaction = journal->j_committing_transaction;
1669
1670 if (!transaction) {
1671 spin_unlock(&journal->j_state_lock);
1672 return;
1673 }
1674
1675 tid = transaction->t_tid;
1676 spin_unlock(&journal->j_state_lock);
1677 log_wait_commit(journal, tid);
1678}
1651 1679
1652/** 1680/**
1653 * int journal_try_to_free_buffers() - try to free page buffers. 1681 * int journal_try_to_free_buffers() - try to free page buffers.
1654 * @journal: journal for operation 1682 * @journal: journal for operation
1655 * @page: to try and free 1683 * @page: to try and free
1656 * @unused_gfp_mask: unused 1684 * @gfp_mask: we use the mask to detect how hard should we try to release
1685 * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to
1686 * release the buffers.
1657 * 1687 *
1658 * 1688 *
1659 * For all the buffers on this page, 1689 * For all the buffers on this page,
@@ -1682,9 +1712,11 @@ out:
1682 * journal_try_to_free_buffer() is changing its state. But that 1712 * journal_try_to_free_buffer() is changing its state. But that
1683 * cannot happen because we never reallocate freed data as metadata 1713 * cannot happen because we never reallocate freed data as metadata
1684 * while the data is part of a transaction. Yes? 1714 * while the data is part of a transaction. Yes?
1715 *
1716 * Return 0 on failure, 1 on success
1685 */ 1717 */
1686int journal_try_to_free_buffers(journal_t *journal, 1718int journal_try_to_free_buffers(journal_t *journal,
1687 struct page *page, gfp_t unused_gfp_mask) 1719 struct page *page, gfp_t gfp_mask)
1688{ 1720{
1689 struct buffer_head *head; 1721 struct buffer_head *head;
1690 struct buffer_head *bh; 1722 struct buffer_head *bh;
@@ -1713,7 +1745,28 @@ int journal_try_to_free_buffers(journal_t *journal,
1713 if (buffer_jbd(bh)) 1745 if (buffer_jbd(bh))
1714 goto busy; 1746 goto busy;
1715 } while ((bh = bh->b_this_page) != head); 1747 } while ((bh = bh->b_this_page) != head);
1748
1716 ret = try_to_free_buffers(page); 1749 ret = try_to_free_buffers(page);
1750
1751 /*
1752 * There are a number of places where journal_try_to_free_buffers()
1753 * could race with journal_commit_transaction(), the later still
1754 * holds the reference to the buffers to free while processing them.
1755 * try_to_free_buffers() failed to free those buffers. Some of the
1756 * caller of releasepage() request page buffers to be dropped, otherwise
1757 * treat the fail-to-free as errors (such as generic_file_direct_IO())
1758 *
1759 * So, if the caller of try_to_release_page() wants the synchronous
1760 * behaviour(i.e make sure buffers are dropped upon return),
1761 * let's wait for the current transaction to finish flush of
1762 * dirty data buffers, then try to free those buffers again,
1763 * with the journal locked.
1764 */
1765 if (ret == 0 && (gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS)) {
1766 journal_wait_for_transaction_sync_data(journal);
1767 ret = try_to_free_buffers(page);
1768 }
1769
1717busy: 1770busy:
1718 return ret; 1771 return ret;
1719} 1772}
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 0288e6d7936a..359c091d8965 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -22,6 +22,7 @@
22#include <linux/parser.h> 22#include <linux/parser.h>
23#include <linux/completion.h> 23#include <linux/completion.h>
24#include <linux/vfs.h> 24#include <linux/vfs.h>
25#include <linux/quotaops.h>
25#include <linux/mount.h> 26#include <linux/mount.h>
26#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
27#include <linux/kthread.h> 28#include <linux/kthread.h>
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 1f6dc518505c..31668b690e03 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -582,7 +582,15 @@ again:
582 } 582 }
583 if (status < 0) 583 if (status < 0)
584 goto out_unlock; 584 goto out_unlock;
585 status = nlm_stat_to_errno(resp->status); 585 /*
586 * EAGAIN doesn't make sense for sleeping locks, and in some
587 * cases NLM_LCK_DENIED is returned for a permanent error. So
588 * turn it into an ENOLCK.
589 */
590 if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
591 status = -ENOLCK;
592 else
593 status = nlm_stat_to_errno(resp->status);
586out_unblock: 594out_unblock:
587 nlmclnt_finish_block(block); 595 nlmclnt_finish_block(block);
588out: 596out:
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 821b9acdfb66..cf0d5c2c318d 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -418,8 +418,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
418 goto out; 418 goto out;
419 case -EAGAIN: 419 case -EAGAIN:
420 ret = nlm_lck_denied; 420 ret = nlm_lck_denied;
421 break; 421 goto out;
422 case -EINPROGRESS: 422 case FILE_LOCK_DEFERRED:
423 if (wait) 423 if (wait)
424 break; 424 break;
425 /* Filesystem lock operation is in progress 425 /* Filesystem lock operation is in progress
@@ -434,10 +434,6 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
434 goto out; 434 goto out;
435 } 435 }
436 436
437 ret = nlm_lck_denied;
438 if (!wait)
439 goto out;
440
441 ret = nlm_lck_blocked; 437 ret = nlm_lck_blocked;
442 438
443 /* Append to list of blocked */ 439 /* Append to list of blocked */
@@ -507,7 +503,7 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
507 } 503 }
508 504
509 error = vfs_test_lock(file->f_file, &lock->fl); 505 error = vfs_test_lock(file->f_file, &lock->fl);
510 if (error == -EINPROGRESS) { 506 if (error == FILE_LOCK_DEFERRED) {
511 ret = nlmsvc_defer_lock_rqst(rqstp, block); 507 ret = nlmsvc_defer_lock_rqst(rqstp, block);
512 goto out; 508 goto out;
513 } 509 }
@@ -731,8 +727,7 @@ nlmsvc_grant_blocked(struct nlm_block *block)
731 switch (error) { 727 switch (error) {
732 case 0: 728 case 0:
733 break; 729 break;
734 case -EAGAIN: 730 case FILE_LOCK_DEFERRED:
735 case -EINPROGRESS:
736 dprintk("lockd: lock still blocked error %d\n", error); 731 dprintk("lockd: lock still blocked error %d\n", error);
737 nlmsvc_insert_block(block, NLM_NEVER); 732 nlmsvc_insert_block(block, NLM_NEVER);
738 nlmsvc_release_block(block); 733 nlmsvc_release_block(block);
diff --git a/fs/locks.c b/fs/locks.c
index dce8c747371c..01490300f7cb 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -779,8 +779,10 @@ find_conflict:
779 if (!flock_locks_conflict(request, fl)) 779 if (!flock_locks_conflict(request, fl))
780 continue; 780 continue;
781 error = -EAGAIN; 781 error = -EAGAIN;
782 if (request->fl_flags & FL_SLEEP) 782 if (!(request->fl_flags & FL_SLEEP))
783 locks_insert_block(fl, request); 783 goto out;
784 error = FILE_LOCK_DEFERRED;
785 locks_insert_block(fl, request);
784 goto out; 786 goto out;
785 } 787 }
786 if (request->fl_flags & FL_ACCESS) 788 if (request->fl_flags & FL_ACCESS)
@@ -836,7 +838,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
836 error = -EDEADLK; 838 error = -EDEADLK;
837 if (posix_locks_deadlock(request, fl)) 839 if (posix_locks_deadlock(request, fl))
838 goto out; 840 goto out;
839 error = -EAGAIN; 841 error = FILE_LOCK_DEFERRED;
840 locks_insert_block(fl, request); 842 locks_insert_block(fl, request);
841 goto out; 843 goto out;
842 } 844 }
@@ -1035,7 +1037,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1035 might_sleep (); 1037 might_sleep ();
1036 for (;;) { 1038 for (;;) {
1037 error = posix_lock_file(filp, fl, NULL); 1039 error = posix_lock_file(filp, fl, NULL);
1038 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 1040 if (error != FILE_LOCK_DEFERRED)
1039 break; 1041 break;
1040 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1042 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1041 if (!error) 1043 if (!error)
@@ -1107,9 +1109,7 @@ int locks_mandatory_area(int read_write, struct inode *inode,
1107 1109
1108 for (;;) { 1110 for (;;) {
1109 error = __posix_lock_file(inode, &fl, NULL); 1111 error = __posix_lock_file(inode, &fl, NULL);
1110 if (error != -EAGAIN) 1112 if (error != FILE_LOCK_DEFERRED)
1111 break;
1112 if (!(fl.fl_flags & FL_SLEEP))
1113 break; 1113 break;
1114 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); 1114 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1115 if (!error) { 1115 if (!error) {
@@ -1531,7 +1531,7 @@ int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1531 might_sleep(); 1531 might_sleep();
1532 for (;;) { 1532 for (;;) {
1533 error = flock_lock_file(filp, fl); 1533 error = flock_lock_file(filp, fl);
1534 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 1534 if (error != FILE_LOCK_DEFERRED)
1535 break; 1535 break;
1536 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1536 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1537 if (!error) 1537 if (!error)
@@ -1716,17 +1716,17 @@ out:
1716 * fl_grant is set. Callers expecting ->lock() to return asynchronously 1716 * fl_grant is set. Callers expecting ->lock() to return asynchronously
1717 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if) 1717 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
1718 * the request is for a blocking lock. When ->lock() does return asynchronously, 1718 * the request is for a blocking lock. When ->lock() does return asynchronously,
1719 * it must return -EINPROGRESS, and call ->fl_grant() when the lock 1719 * it must return FILE_LOCK_DEFERRED, and call ->fl_grant() when the lock
1720 * request completes. 1720 * request completes.
1721 * If the request is for non-blocking lock the file system should return 1721 * If the request is for non-blocking lock the file system should return
1722 * -EINPROGRESS then try to get the lock and call the callback routine with 1722 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
1723 * the result. If the request timed out the callback routine will return a 1723 * with the result. If the request timed out the callback routine will return a
1724 * nonzero return code and the file system should release the lock. The file 1724 * nonzero return code and the file system should release the lock. The file
1725 * system is also responsible to keep a corresponding posix lock when it 1725 * system is also responsible to keep a corresponding posix lock when it
1726 * grants a lock so the VFS can find out which locks are locally held and do 1726 * grants a lock so the VFS can find out which locks are locally held and do
1727 * the correct lock cleanup when required. 1727 * the correct lock cleanup when required.
1728 * The underlying filesystem must not drop the kernel lock or call 1728 * The underlying filesystem must not drop the kernel lock or call
1729 * ->fl_grant() before returning to the caller with a -EINPROGRESS 1729 * ->fl_grant() before returning to the caller with a FILE_LOCK_DEFERRED
1730 * return code. 1730 * return code.
1731 */ 1731 */
1732int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) 1732int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
@@ -1738,6 +1738,30 @@ int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, str
1738} 1738}
1739EXPORT_SYMBOL_GPL(vfs_lock_file); 1739EXPORT_SYMBOL_GPL(vfs_lock_file);
1740 1740
1741static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1742 struct file_lock *fl)
1743{
1744 int error;
1745
1746 error = security_file_lock(filp, fl->fl_type);
1747 if (error)
1748 return error;
1749
1750 for (;;) {
1751 error = vfs_lock_file(filp, cmd, fl, NULL);
1752 if (error != FILE_LOCK_DEFERRED)
1753 break;
1754 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1755 if (!error)
1756 continue;
1757
1758 locks_delete_block(fl);
1759 break;
1760 }
1761
1762 return error;
1763}
1764
1741/* Apply the lock described by l to an open file descriptor. 1765/* Apply the lock described by l to an open file descriptor.
1742 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1766 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1743 */ 1767 */
@@ -1795,26 +1819,7 @@ again:
1795 goto out; 1819 goto out;
1796 } 1820 }
1797 1821
1798 error = security_file_lock(filp, file_lock->fl_type); 1822 error = do_lock_file_wait(filp, cmd, file_lock);
1799 if (error)
1800 goto out;
1801
1802 if (filp->f_op && filp->f_op->lock != NULL)
1803 error = filp->f_op->lock(filp, cmd, file_lock);
1804 else {
1805 for (;;) {
1806 error = posix_lock_file(filp, file_lock, NULL);
1807 if (error != -EAGAIN || cmd == F_SETLK)
1808 break;
1809 error = wait_event_interruptible(file_lock->fl_wait,
1810 !file_lock->fl_next);
1811 if (!error)
1812 continue;
1813
1814 locks_delete_block(file_lock);
1815 break;
1816 }
1817 }
1818 1823
1819 /* 1824 /*
1820 * Attempt to detect a close/fcntl race and recover by 1825 * Attempt to detect a close/fcntl race and recover by
@@ -1932,26 +1937,7 @@ again:
1932 goto out; 1937 goto out;
1933 } 1938 }
1934 1939
1935 error = security_file_lock(filp, file_lock->fl_type); 1940 error = do_lock_file_wait(filp, cmd, file_lock);
1936 if (error)
1937 goto out;
1938
1939 if (filp->f_op && filp->f_op->lock != NULL)
1940 error = filp->f_op->lock(filp, cmd, file_lock);
1941 else {
1942 for (;;) {
1943 error = posix_lock_file(filp, file_lock, NULL);
1944 if (error != -EAGAIN || cmd == F_SETLK64)
1945 break;
1946 error = wait_event_interruptible(file_lock->fl_wait,
1947 !file_lock->fl_next);
1948 if (!error)
1949 continue;
1950
1951 locks_delete_block(file_lock);
1952 break;
1953 }
1954 }
1955 1941
1956 /* 1942 /*
1957 * Attempt to detect a close/fcntl race and recover by 1943 * Attempt to detect a close/fcntl race and recover by
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 84f6242ba6fc..523d73713418 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -256,9 +256,6 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
256 if (!s->s_root) 256 if (!s->s_root)
257 goto out_iput; 257 goto out_iput;
258 258
259 if (!NO_TRUNCATE)
260 s->s_root->d_op = &minix_dentry_operations;
261
262 if (!(s->s_flags & MS_RDONLY)) { 259 if (!(s->s_flags & MS_RDONLY)) {
263 if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ 260 if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
264 ms->s_state &= ~MINIX_VALID_FS; 261 ms->s_state &= ~MINIX_VALID_FS;
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index 326edfe96108..e6a0b193bea4 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -2,11 +2,6 @@
2#include <linux/pagemap.h> 2#include <linux/pagemap.h>
3#include <linux/minix_fs.h> 3#include <linux/minix_fs.h>
4 4
5/*
6 * change the define below to 0 if you want names > info->s_namelen chars to be
7 * truncated. Else they will be disallowed (ENAMETOOLONG).
8 */
9#define NO_TRUNCATE 1
10#define INODE_VERSION(inode) minix_sb(inode->i_sb)->s_version 5#define INODE_VERSION(inode) minix_sb(inode->i_sb)->s_version
11#define MINIX_V1 0x0001 /* original minix fs */ 6#define MINIX_V1 0x0001 /* original minix fs */
12#define MINIX_V2 0x0002 /* minix V2 fs */ 7#define MINIX_V2 0x0002 /* minix V2 fs */
@@ -83,7 +78,6 @@ extern const struct inode_operations minix_file_inode_operations;
83extern const struct inode_operations minix_dir_inode_operations; 78extern const struct inode_operations minix_dir_inode_operations;
84extern const struct file_operations minix_file_operations; 79extern const struct file_operations minix_file_operations;
85extern const struct file_operations minix_dir_operations; 80extern const struct file_operations minix_dir_operations;
86extern struct dentry_operations minix_dentry_operations;
87 81
88static inline struct minix_sb_info *minix_sb(struct super_block *sb) 82static inline struct minix_sb_info *minix_sb(struct super_block *sb)
89{ 83{
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index 102241bc9c79..32b131cd6121 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -18,30 +18,6 @@ static int add_nondir(struct dentry *dentry, struct inode *inode)
18 return err; 18 return err;
19} 19}
20 20
21static int minix_hash(struct dentry *dentry, struct qstr *qstr)
22{
23 unsigned long hash;
24 int i;
25 const unsigned char *name;
26
27 i = minix_sb(dentry->d_inode->i_sb)->s_namelen;
28 if (i >= qstr->len)
29 return 0;
30 /* Truncate the name in place, avoids having to define a compare
31 function. */
32 qstr->len = i;
33 name = qstr->name;
34 hash = init_name_hash();
35 while (i--)
36 hash = partial_name_hash(*name++, hash);
37 qstr->hash = end_name_hash(hash);
38 return 0;
39}
40
41struct dentry_operations minix_dentry_operations = {
42 .d_hash = minix_hash,
43};
44
45static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd) 21static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
46{ 22{
47 struct inode * inode = NULL; 23 struct inode * inode = NULL;
diff --git a/fs/msdos/namei.c b/fs/msdos/namei.c
index 1f7f2956412a..e844b9809d27 100644
--- a/fs/msdos/namei.c
+++ b/fs/msdos/namei.c
@@ -14,12 +14,7 @@
14 14
15/* Characters that are undesirable in an MS-DOS file name */ 15/* Characters that are undesirable in an MS-DOS file name */
16static unsigned char bad_chars[] = "*?<>|\""; 16static unsigned char bad_chars[] = "*?<>|\"";
17static unsigned char bad_if_strict_pc[] = "+=,; "; 17static unsigned char bad_if_strict[] = "+=,; ";
18/* GEMDOS is less restrictive */
19static unsigned char bad_if_strict_atari[] = " ";
20
21#define bad_if_strict(opts) \
22 ((opts)->atari ? bad_if_strict_atari : bad_if_strict_pc)
23 18
24/***** Formats an MS-DOS file name. Rejects invalid names. */ 19/***** Formats an MS-DOS file name. Rejects invalid names. */
25static int msdos_format_name(const unsigned char *name, int len, 20static int msdos_format_name(const unsigned char *name, int len,
@@ -40,21 +35,20 @@ static int msdos_format_name(const unsigned char *name, int len,
40 /* Get rid of dot - test for it elsewhere */ 35 /* Get rid of dot - test for it elsewhere */
41 name++; 36 name++;
42 len--; 37 len--;
43 } else if (!opts->atari) 38 } else
44 return -EINVAL; 39 return -EINVAL;
45 } 40 }
46 /* 41 /*
47 * disallow names that _really_ start with a dot for MS-DOS, 42 * disallow names that _really_ start with a dot
48 * GEMDOS does not care
49 */ 43 */
50 space = !opts->atari; 44 space = 1;
51 c = 0; 45 c = 0;
52 for (walk = res; len && walk - res < 8; walk++) { 46 for (walk = res; len && walk - res < 8; walk++) {
53 c = *name++; 47 c = *name++;
54 len--; 48 len--;
55 if (opts->name_check != 'r' && strchr(bad_chars, c)) 49 if (opts->name_check != 'r' && strchr(bad_chars, c))
56 return -EINVAL; 50 return -EINVAL;
57 if (opts->name_check == 's' && strchr(bad_if_strict(opts), c)) 51 if (opts->name_check == 's' && strchr(bad_if_strict, c))
58 return -EINVAL; 52 return -EINVAL;
59 if (c >= 'A' && c <= 'Z' && opts->name_check == 's') 53 if (c >= 'A' && c <= 'Z' && opts->name_check == 's')
60 return -EINVAL; 54 return -EINVAL;
@@ -94,7 +88,7 @@ static int msdos_format_name(const unsigned char *name, int len,
94 if (opts->name_check != 'r' && strchr(bad_chars, c)) 88 if (opts->name_check != 'r' && strchr(bad_chars, c))
95 return -EINVAL; 89 return -EINVAL;
96 if (opts->name_check == 's' && 90 if (opts->name_check == 's' &&
97 strchr(bad_if_strict(opts), c)) 91 strchr(bad_if_strict, c))
98 return -EINVAL; 92 return -EINVAL;
99 if (c < ' ' || c == ':' || c == '\\') 93 if (c < ' ' || c == ':' || c == '\\')
100 return -EINVAL; 94 return -EINVAL;
@@ -243,6 +237,7 @@ static int msdos_add_entry(struct inode *dir, const unsigned char *name,
243 int is_dir, int is_hid, int cluster, 237 int is_dir, int is_hid, int cluster,
244 struct timespec *ts, struct fat_slot_info *sinfo) 238 struct timespec *ts, struct fat_slot_info *sinfo)
245{ 239{
240 struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb);
246 struct msdos_dir_entry de; 241 struct msdos_dir_entry de;
247 __le16 time, date; 242 __le16 time, date;
248 int err; 243 int err;
@@ -252,7 +247,7 @@ static int msdos_add_entry(struct inode *dir, const unsigned char *name,
252 if (is_hid) 247 if (is_hid)
253 de.attr |= ATTR_HIDDEN; 248 de.attr |= ATTR_HIDDEN;
254 de.lcase = 0; 249 de.lcase = 0;
255 fat_date_unix2dos(ts->tv_sec, &time, &date); 250 fat_date_unix2dos(ts->tv_sec, &time, &date, sbi->options.tz_utc);
256 de.cdate = de.adate = 0; 251 de.cdate = de.adate = 0;
257 de.ctime = 0; 252 de.ctime = 0;
258 de.ctime_cs = 0; 253 de.ctime_cs = 0;
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index 46763d1cd397..8478fc25daee 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -127,7 +127,7 @@ enum {
127 Opt_err 127 Opt_err
128}; 128};
129 129
130static match_table_t __initdata tokens = { 130static match_table_t __initconst tokens = {
131 {Opt_port, "port=%u"}, 131 {Opt_port, "port=%u"},
132 {Opt_rsize, "rsize=%u"}, 132 {Opt_rsize, "rsize=%u"},
133 {Opt_wsize, "wsize=%u"}, 133 {Opt_wsize, "wsize=%u"},
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index 6b6225ac4926..15c6faeec77c 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -19,6 +19,13 @@
19 19
20#define NFSDDBG_FACILITY NFSDDBG_LOCKD 20#define NFSDDBG_FACILITY NFSDDBG_LOCKD
21 21
22#ifdef CONFIG_LOCKD_V4
23#define nlm_stale_fh nlm4_stale_fh
24#define nlm_failed nlm4_failed
25#else
26#define nlm_stale_fh nlm_lck_denied_nolocks
27#define nlm_failed nlm_lck_denied_nolocks
28#endif
22/* 29/*
23 * Note: we hold the dentry use count while the file is open. 30 * Note: we hold the dentry use count while the file is open.
24 */ 31 */
@@ -47,12 +54,10 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp)
47 return 0; 54 return 0;
48 case nfserr_dropit: 55 case nfserr_dropit:
49 return nlm_drop_reply; 56 return nlm_drop_reply;
50#ifdef CONFIG_LOCKD_V4
51 case nfserr_stale: 57 case nfserr_stale:
52 return nlm4_stale_fh; 58 return nlm_stale_fh;
53#endif
54 default: 59 default:
55 return nlm_lck_denied; 60 return nlm_failed;
56 } 61 }
57} 62}
58 63
diff --git a/fs/open.c b/fs/open.c
index a99ad09c3197..bb98d2fe809f 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -64,7 +64,8 @@ static int vfs_statfs_native(struct dentry *dentry, struct statfs *buf)
64 memcpy(buf, &st, sizeof(st)); 64 memcpy(buf, &st, sizeof(st));
65 else { 65 else {
66 if (sizeof buf->f_blocks == 4) { 66 if (sizeof buf->f_blocks == 4) {
67 if ((st.f_blocks | st.f_bfree | st.f_bavail) & 67 if ((st.f_blocks | st.f_bfree | st.f_bavail |
68 st.f_bsize | st.f_frsize) &
68 0xffffffff00000000ULL) 69 0xffffffff00000000ULL)
69 return -EOVERFLOW; 70 return -EOVERFLOW;
70 /* 71 /*
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index efef715135d3..7d6b34e201db 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -344,18 +344,18 @@ static ssize_t whole_disk_show(struct device *dev,
344static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH, 344static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH,
345 whole_disk_show, NULL); 345 whole_disk_show, NULL);
346 346
347void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len, int flags) 347int add_partition(struct gendisk *disk, int part, sector_t start, sector_t len, int flags)
348{ 348{
349 struct hd_struct *p; 349 struct hd_struct *p;
350 int err; 350 int err;
351 351
352 p = kzalloc(sizeof(*p), GFP_KERNEL); 352 p = kzalloc(sizeof(*p), GFP_KERNEL);
353 if (!p) 353 if (!p)
354 return; 354 return -ENOMEM;
355 355
356 if (!init_part_stats(p)) { 356 if (!init_part_stats(p)) {
357 kfree(p); 357 err = -ENOMEM;
358 return; 358 goto out0;
359 } 359 }
360 p->start_sect = start; 360 p->start_sect = start;
361 p->nr_sects = len; 361 p->nr_sects = len;
@@ -378,15 +378,31 @@ void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len,
378 378
379 /* delay uevent until 'holders' subdir is created */ 379 /* delay uevent until 'holders' subdir is created */
380 p->dev.uevent_suppress = 1; 380 p->dev.uevent_suppress = 1;
381 device_add(&p->dev); 381 err = device_add(&p->dev);
382 if (err)
383 goto out1;
382 partition_sysfs_add_subdir(p); 384 partition_sysfs_add_subdir(p);
383 p->dev.uevent_suppress = 0; 385 p->dev.uevent_suppress = 0;
384 if (flags & ADDPART_FLAG_WHOLEDISK) 386 if (flags & ADDPART_FLAG_WHOLEDISK) {
385 err = device_create_file(&p->dev, &dev_attr_whole_disk); 387 err = device_create_file(&p->dev, &dev_attr_whole_disk);
388 if (err)
389 goto out2;
390 }
386 391
387 /* suppress uevent if the disk supresses it */ 392 /* suppress uevent if the disk supresses it */
388 if (!disk->dev.uevent_suppress) 393 if (!disk->dev.uevent_suppress)
389 kobject_uevent(&p->dev.kobj, KOBJ_ADD); 394 kobject_uevent(&p->dev.kobj, KOBJ_ADD);
395
396 return 0;
397
398out2:
399 device_del(&p->dev);
400out1:
401 put_device(&p->dev);
402 free_part_stats(p);
403out0:
404 kfree(p);
405 return err;
390} 406}
391 407
392/* Not exported, helper to add_disk(). */ 408/* Not exported, helper to add_disk(). */
@@ -483,10 +499,16 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
483 if (!size) 499 if (!size)
484 continue; 500 continue;
485 if (from + size > get_capacity(disk)) { 501 if (from + size > get_capacity(disk)) {
486 printk(" %s: p%d exceeds device capacity\n", 502 printk(KERN_ERR " %s: p%d exceeds device capacity\n",
487 disk->disk_name, p); 503 disk->disk_name, p);
504 continue;
505 }
506 res = add_partition(disk, p, from, size, state->parts[p].flags);
507 if (res) {
508 printk(KERN_ERR " %s: p%d could not be added: %d\n",
509 disk->disk_name, p, -res);
510 continue;
488 } 511 }
489 add_partition(disk, p, from, size, state->parts[p].flags);
490#ifdef CONFIG_BLK_DEV_MD 512#ifdef CONFIG_BLK_DEV_MD
491 if (state->parts[p].flags & ADDPART_FLAG_RAID) 513 if (state->parts[p].flags & ADDPART_FLAG_RAID)
492 md_autodetect_dev(bdev->bd_dev+p); 514 md_autodetect_dev(bdev->bd_dev+p);
diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
index e7b07006bc41..038a6022152f 100644
--- a/fs/partitions/efi.c
+++ b/fs/partitions/efi.c
@@ -95,13 +95,6 @@
95#include "check.h" 95#include "check.h"
96#include "efi.h" 96#include "efi.h"
97 97
98#undef EFI_DEBUG
99#ifdef EFI_DEBUG
100#define Dprintk(x...) printk(KERN_DEBUG x)
101#else
102#define Dprintk(x...)
103#endif
104
105/* This allows a kernel command line option 'gpt' to override 98/* This allows a kernel command line option 'gpt' to override
106 * the test for invalid PMBR. Not __initdata because reloading 99 * the test for invalid PMBR. Not __initdata because reloading
107 * the partition tables happens after init too. 100 * the partition tables happens after init too.
@@ -305,10 +298,10 @@ is_gpt_valid(struct block_device *bdev, u64 lba,
305 298
306 /* Check the GUID Partition Table signature */ 299 /* Check the GUID Partition Table signature */
307 if (le64_to_cpu((*gpt)->signature) != GPT_HEADER_SIGNATURE) { 300 if (le64_to_cpu((*gpt)->signature) != GPT_HEADER_SIGNATURE) {
308 Dprintk("GUID Partition Table Header signature is wrong:" 301 pr_debug("GUID Partition Table Header signature is wrong:"
309 "%lld != %lld\n", 302 "%lld != %lld\n",
310 (unsigned long long)le64_to_cpu((*gpt)->signature), 303 (unsigned long long)le64_to_cpu((*gpt)->signature),
311 (unsigned long long)GPT_HEADER_SIGNATURE); 304 (unsigned long long)GPT_HEADER_SIGNATURE);
312 goto fail; 305 goto fail;
313 } 306 }
314 307
@@ -318,9 +311,8 @@ is_gpt_valid(struct block_device *bdev, u64 lba,
318 crc = efi_crc32((const unsigned char *) (*gpt), le32_to_cpu((*gpt)->header_size)); 311 crc = efi_crc32((const unsigned char *) (*gpt), le32_to_cpu((*gpt)->header_size));
319 312
320 if (crc != origcrc) { 313 if (crc != origcrc) {
321 Dprintk 314 pr_debug("GUID Partition Table Header CRC is wrong: %x != %x\n",
322 ("GUID Partition Table Header CRC is wrong: %x != %x\n", 315 crc, origcrc);
323 crc, origcrc);
324 goto fail; 316 goto fail;
325 } 317 }
326 (*gpt)->header_crc32 = cpu_to_le32(origcrc); 318 (*gpt)->header_crc32 = cpu_to_le32(origcrc);
@@ -328,9 +320,9 @@ is_gpt_valid(struct block_device *bdev, u64 lba,
328 /* Check that the my_lba entry points to the LBA that contains 320 /* Check that the my_lba entry points to the LBA that contains
329 * the GUID Partition Table */ 321 * the GUID Partition Table */
330 if (le64_to_cpu((*gpt)->my_lba) != lba) { 322 if (le64_to_cpu((*gpt)->my_lba) != lba) {
331 Dprintk("GPT my_lba incorrect: %lld != %lld\n", 323 pr_debug("GPT my_lba incorrect: %lld != %lld\n",
332 (unsigned long long)le64_to_cpu((*gpt)->my_lba), 324 (unsigned long long)le64_to_cpu((*gpt)->my_lba),
333 (unsigned long long)lba); 325 (unsigned long long)lba);
334 goto fail; 326 goto fail;
335 } 327 }
336 328
@@ -339,15 +331,15 @@ is_gpt_valid(struct block_device *bdev, u64 lba,
339 */ 331 */
340 lastlba = last_lba(bdev); 332 lastlba = last_lba(bdev);
341 if (le64_to_cpu((*gpt)->first_usable_lba) > lastlba) { 333 if (le64_to_cpu((*gpt)->first_usable_lba) > lastlba) {
342 Dprintk("GPT: first_usable_lba incorrect: %lld > %lld\n", 334 pr_debug("GPT: first_usable_lba incorrect: %lld > %lld\n",
343 (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba), 335 (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba),
344 (unsigned long long)lastlba); 336 (unsigned long long)lastlba);
345 goto fail; 337 goto fail;
346 } 338 }
347 if (le64_to_cpu((*gpt)->last_usable_lba) > lastlba) { 339 if (le64_to_cpu((*gpt)->last_usable_lba) > lastlba) {
348 Dprintk("GPT: last_usable_lba incorrect: %lld > %lld\n", 340 pr_debug("GPT: last_usable_lba incorrect: %lld > %lld\n",
349 (unsigned long long)le64_to_cpu((*gpt)->last_usable_lba), 341 (unsigned long long)le64_to_cpu((*gpt)->last_usable_lba),
350 (unsigned long long)lastlba); 342 (unsigned long long)lastlba);
351 goto fail; 343 goto fail;
352 } 344 }
353 345
@@ -360,7 +352,7 @@ is_gpt_valid(struct block_device *bdev, u64 lba,
360 le32_to_cpu((*gpt)->sizeof_partition_entry)); 352 le32_to_cpu((*gpt)->sizeof_partition_entry));
361 353
362 if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) { 354 if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) {
363 Dprintk("GUID Partitition Entry Array CRC check failed.\n"); 355 pr_debug("GUID Partitition Entry Array CRC check failed.\n");
364 goto fail_ptes; 356 goto fail_ptes;
365 } 357 }
366 358
@@ -616,7 +608,7 @@ efi_partition(struct parsed_partitions *state, struct block_device *bdev)
616 return 0; 608 return 0;
617 } 609 }
618 610
619 Dprintk("GUID Partition Table is valid! Yea!\n"); 611 pr_debug("GUID Partition Table is valid! Yea!\n");
620 612
621 for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) { 613 for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) {
622 if (!is_pte_valid(&ptes[i], last_lba(bdev))) 614 if (!is_pte_valid(&ptes[i], last_lba(bdev)))
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index 0fdda2e8a4cc..8652fb99e962 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -133,17 +133,17 @@ static bool ldm_parse_privhead(const u8 *data, struct privhead *ph)
133 bool is_vista = false; 133 bool is_vista = false;
134 134
135 BUG_ON(!data || !ph); 135 BUG_ON(!data || !ph);
136 if (MAGIC_PRIVHEAD != BE64(data)) { 136 if (MAGIC_PRIVHEAD != get_unaligned_be64(data)) {
137 ldm_error("Cannot find PRIVHEAD structure. LDM database is" 137 ldm_error("Cannot find PRIVHEAD structure. LDM database is"
138 " corrupt. Aborting."); 138 " corrupt. Aborting.");
139 return false; 139 return false;
140 } 140 }
141 ph->ver_major = BE16(data + 0x000C); 141 ph->ver_major = get_unaligned_be16(data + 0x000C);
142 ph->ver_minor = BE16(data + 0x000E); 142 ph->ver_minor = get_unaligned_be16(data + 0x000E);
143 ph->logical_disk_start = BE64(data + 0x011B); 143 ph->logical_disk_start = get_unaligned_be64(data + 0x011B);
144 ph->logical_disk_size = BE64(data + 0x0123); 144 ph->logical_disk_size = get_unaligned_be64(data + 0x0123);
145 ph->config_start = BE64(data + 0x012B); 145 ph->config_start = get_unaligned_be64(data + 0x012B);
146 ph->config_size = BE64(data + 0x0133); 146 ph->config_size = get_unaligned_be64(data + 0x0133);
147 /* Version 2.11 is Win2k/XP and version 2.12 is Vista. */ 147 /* Version 2.11 is Win2k/XP and version 2.12 is Vista. */
148 if (ph->ver_major == 2 && ph->ver_minor == 12) 148 if (ph->ver_major == 2 && ph->ver_minor == 12)
149 is_vista = true; 149 is_vista = true;
@@ -191,14 +191,14 @@ static bool ldm_parse_tocblock (const u8 *data, struct tocblock *toc)
191{ 191{
192 BUG_ON (!data || !toc); 192 BUG_ON (!data || !toc);
193 193
194 if (MAGIC_TOCBLOCK != BE64 (data)) { 194 if (MAGIC_TOCBLOCK != get_unaligned_be64(data)) {
195 ldm_crit ("Cannot find TOCBLOCK, database may be corrupt."); 195 ldm_crit ("Cannot find TOCBLOCK, database may be corrupt.");
196 return false; 196 return false;
197 } 197 }
198 strncpy (toc->bitmap1_name, data + 0x24, sizeof (toc->bitmap1_name)); 198 strncpy (toc->bitmap1_name, data + 0x24, sizeof (toc->bitmap1_name));
199 toc->bitmap1_name[sizeof (toc->bitmap1_name) - 1] = 0; 199 toc->bitmap1_name[sizeof (toc->bitmap1_name) - 1] = 0;
200 toc->bitmap1_start = BE64 (data + 0x2E); 200 toc->bitmap1_start = get_unaligned_be64(data + 0x2E);
201 toc->bitmap1_size = BE64 (data + 0x36); 201 toc->bitmap1_size = get_unaligned_be64(data + 0x36);
202 202
203 if (strncmp (toc->bitmap1_name, TOC_BITMAP1, 203 if (strncmp (toc->bitmap1_name, TOC_BITMAP1,
204 sizeof (toc->bitmap1_name)) != 0) { 204 sizeof (toc->bitmap1_name)) != 0) {
@@ -208,8 +208,8 @@ static bool ldm_parse_tocblock (const u8 *data, struct tocblock *toc)
208 } 208 }
209 strncpy (toc->bitmap2_name, data + 0x46, sizeof (toc->bitmap2_name)); 209 strncpy (toc->bitmap2_name, data + 0x46, sizeof (toc->bitmap2_name));
210 toc->bitmap2_name[sizeof (toc->bitmap2_name) - 1] = 0; 210 toc->bitmap2_name[sizeof (toc->bitmap2_name) - 1] = 0;
211 toc->bitmap2_start = BE64 (data + 0x50); 211 toc->bitmap2_start = get_unaligned_be64(data + 0x50);
212 toc->bitmap2_size = BE64 (data + 0x58); 212 toc->bitmap2_size = get_unaligned_be64(data + 0x58);
213 if (strncmp (toc->bitmap2_name, TOC_BITMAP2, 213 if (strncmp (toc->bitmap2_name, TOC_BITMAP2,
214 sizeof (toc->bitmap2_name)) != 0) { 214 sizeof (toc->bitmap2_name)) != 0) {
215 ldm_crit ("TOCBLOCK's second bitmap is '%s', should be '%s'.", 215 ldm_crit ("TOCBLOCK's second bitmap is '%s', should be '%s'.",
@@ -237,22 +237,22 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm)
237{ 237{
238 BUG_ON (!data || !vm); 238 BUG_ON (!data || !vm);
239 239
240 if (MAGIC_VMDB != BE32 (data)) { 240 if (MAGIC_VMDB != get_unaligned_be32(data)) {
241 ldm_crit ("Cannot find the VMDB, database may be corrupt."); 241 ldm_crit ("Cannot find the VMDB, database may be corrupt.");
242 return false; 242 return false;
243 } 243 }
244 244
245 vm->ver_major = BE16 (data + 0x12); 245 vm->ver_major = get_unaligned_be16(data + 0x12);
246 vm->ver_minor = BE16 (data + 0x14); 246 vm->ver_minor = get_unaligned_be16(data + 0x14);
247 if ((vm->ver_major != 4) || (vm->ver_minor != 10)) { 247 if ((vm->ver_major != 4) || (vm->ver_minor != 10)) {
248 ldm_error ("Expected VMDB version %d.%d, got %d.%d. " 248 ldm_error ("Expected VMDB version %d.%d, got %d.%d. "
249 "Aborting.", 4, 10, vm->ver_major, vm->ver_minor); 249 "Aborting.", 4, 10, vm->ver_major, vm->ver_minor);
250 return false; 250 return false;
251 } 251 }
252 252
253 vm->vblk_size = BE32 (data + 0x08); 253 vm->vblk_size = get_unaligned_be32(data + 0x08);
254 vm->vblk_offset = BE32 (data + 0x0C); 254 vm->vblk_offset = get_unaligned_be32(data + 0x0C);
255 vm->last_vblk_seq = BE32 (data + 0x04); 255 vm->last_vblk_seq = get_unaligned_be32(data + 0x04);
256 256
257 ldm_debug ("Parsed VMDB successfully."); 257 ldm_debug ("Parsed VMDB successfully.");
258 return true; 258 return true;
@@ -507,7 +507,7 @@ static bool ldm_validate_vmdb (struct block_device *bdev, unsigned long base,
507 goto out; /* Already logged */ 507 goto out; /* Already logged */
508 508
509 /* Are there uncommitted transactions? */ 509 /* Are there uncommitted transactions? */
510 if (BE16(data + 0x10) != 0x01) { 510 if (get_unaligned_be16(data + 0x10) != 0x01) {
511 ldm_crit ("Database is not in a consistent state. Aborting."); 511 ldm_crit ("Database is not in a consistent state. Aborting.");
512 goto out; 512 goto out;
513 } 513 }
@@ -802,7 +802,7 @@ static bool ldm_parse_cmp3 (const u8 *buffer, int buflen, struct vblk *vb)
802 return false; 802 return false;
803 803
804 len += VBLK_SIZE_CMP3; 804 len += VBLK_SIZE_CMP3;
805 if (len != BE32 (buffer + 0x14)) 805 if (len != get_unaligned_be32(buffer + 0x14))
806 return false; 806 return false;
807 807
808 comp = &vb->vblk.comp; 808 comp = &vb->vblk.comp;
@@ -851,7 +851,7 @@ static int ldm_parse_dgr3 (const u8 *buffer, int buflen, struct vblk *vb)
851 return false; 851 return false;
852 852
853 len += VBLK_SIZE_DGR3; 853 len += VBLK_SIZE_DGR3;
854 if (len != BE32 (buffer + 0x14)) 854 if (len != get_unaligned_be32(buffer + 0x14))
855 return false; 855 return false;
856 856
857 dgrp = &vb->vblk.dgrp; 857 dgrp = &vb->vblk.dgrp;
@@ -895,7 +895,7 @@ static bool ldm_parse_dgr4 (const u8 *buffer, int buflen, struct vblk *vb)
895 return false; 895 return false;
896 896
897 len += VBLK_SIZE_DGR4; 897 len += VBLK_SIZE_DGR4;
898 if (len != BE32 (buffer + 0x14)) 898 if (len != get_unaligned_be32(buffer + 0x14))
899 return false; 899 return false;
900 900
901 dgrp = &vb->vblk.dgrp; 901 dgrp = &vb->vblk.dgrp;
@@ -931,7 +931,7 @@ static bool ldm_parse_dsk3 (const u8 *buffer, int buflen, struct vblk *vb)
931 return false; 931 return false;
932 932
933 len += VBLK_SIZE_DSK3; 933 len += VBLK_SIZE_DSK3;
934 if (len != BE32 (buffer + 0x14)) 934 if (len != get_unaligned_be32(buffer + 0x14))
935 return false; 935 return false;
936 936
937 disk = &vb->vblk.disk; 937 disk = &vb->vblk.disk;
@@ -968,7 +968,7 @@ static bool ldm_parse_dsk4 (const u8 *buffer, int buflen, struct vblk *vb)
968 return false; 968 return false;
969 969
970 len += VBLK_SIZE_DSK4; 970 len += VBLK_SIZE_DSK4;
971 if (len != BE32 (buffer + 0x14)) 971 if (len != get_unaligned_be32(buffer + 0x14))
972 return false; 972 return false;
973 973
974 disk = &vb->vblk.disk; 974 disk = &vb->vblk.disk;
@@ -1034,14 +1034,14 @@ static bool ldm_parse_prt3(const u8 *buffer, int buflen, struct vblk *vb)
1034 return false; 1034 return false;
1035 } 1035 }
1036 len += VBLK_SIZE_PRT3; 1036 len += VBLK_SIZE_PRT3;
1037 if (len > BE32(buffer + 0x14)) { 1037 if (len > get_unaligned_be32(buffer + 0x14)) {
1038 ldm_error("len %d > BE32(buffer + 0x14) %d", len, 1038 ldm_error("len %d > BE32(buffer + 0x14) %d", len,
1039 BE32(buffer + 0x14)); 1039 get_unaligned_be32(buffer + 0x14));
1040 return false; 1040 return false;
1041 } 1041 }
1042 part = &vb->vblk.part; 1042 part = &vb->vblk.part;
1043 part->start = BE64(buffer + 0x24 + r_name); 1043 part->start = get_unaligned_be64(buffer + 0x24 + r_name);
1044 part->volume_offset = BE64(buffer + 0x2C + r_name); 1044 part->volume_offset = get_unaligned_be64(buffer + 0x2C + r_name);
1045 part->size = ldm_get_vnum(buffer + 0x34 + r_name); 1045 part->size = ldm_get_vnum(buffer + 0x34 + r_name);
1046 part->parent_id = ldm_get_vnum(buffer + 0x34 + r_size); 1046 part->parent_id = ldm_get_vnum(buffer + 0x34 + r_size);
1047 part->disk_id = ldm_get_vnum(buffer + 0x34 + r_parent); 1047 part->disk_id = ldm_get_vnum(buffer + 0x34 + r_parent);
@@ -1139,9 +1139,9 @@ static bool ldm_parse_vol5(const u8 *buffer, int buflen, struct vblk *vb)
1139 return false; 1139 return false;
1140 } 1140 }
1141 len += VBLK_SIZE_VOL5; 1141 len += VBLK_SIZE_VOL5;
1142 if (len > BE32(buffer + 0x14)) { 1142 if (len > get_unaligned_be32(buffer + 0x14)) {
1143 ldm_error("len %d > BE32(buffer + 0x14) %d", len, 1143 ldm_error("len %d > BE32(buffer + 0x14) %d", len,
1144 BE32(buffer + 0x14)); 1144 get_unaligned_be32(buffer + 0x14));
1145 return false; 1145 return false;
1146 } 1146 }
1147 volu = &vb->vblk.volu; 1147 volu = &vb->vblk.volu;
@@ -1294,9 +1294,9 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
1294 1294
1295 BUG_ON (!data || !frags); 1295 BUG_ON (!data || !frags);
1296 1296
1297 group = BE32 (data + 0x08); 1297 group = get_unaligned_be32(data + 0x08);
1298 rec = BE16 (data + 0x0C); 1298 rec = get_unaligned_be16(data + 0x0C);
1299 num = BE16 (data + 0x0E); 1299 num = get_unaligned_be16(data + 0x0E);
1300 if ((num < 1) || (num > 4)) { 1300 if ((num < 1) || (num > 4)) {
1301 ldm_error ("A VBLK claims to have %d parts.", num); 1301 ldm_error ("A VBLK claims to have %d parts.", num);
1302 return false; 1302 return false;
@@ -1425,12 +1425,12 @@ static bool ldm_get_vblks (struct block_device *bdev, unsigned long base,
1425 } 1425 }
1426 1426
1427 for (v = 0; v < perbuf; v++, data+=size) { /* For each vblk */ 1427 for (v = 0; v < perbuf; v++, data+=size) { /* For each vblk */
1428 if (MAGIC_VBLK != BE32 (data)) { 1428 if (MAGIC_VBLK != get_unaligned_be32(data)) {
1429 ldm_error ("Expected to find a VBLK."); 1429 ldm_error ("Expected to find a VBLK.");
1430 goto out; 1430 goto out;
1431 } 1431 }
1432 1432
1433 recs = BE16 (data + 0x0E); /* Number of records */ 1433 recs = get_unaligned_be16(data + 0x0E); /* Number of records */
1434 if (recs == 1) { 1434 if (recs == 1) {
1435 if (!ldm_ldmdb_add (data, size, ldb)) 1435 if (!ldm_ldmdb_add (data, size, ldb))
1436 goto out; /* Already logged */ 1436 goto out; /* Already logged */
diff --git a/fs/partitions/ldm.h b/fs/partitions/ldm.h
index 80f63b5fdd9f..30e08e809c1d 100644
--- a/fs/partitions/ldm.h
+++ b/fs/partitions/ldm.h
@@ -98,11 +98,6 @@ struct parsed_partitions;
98#define TOC_BITMAP1 "config" /* Names of the two defined */ 98#define TOC_BITMAP1 "config" /* Names of the two defined */
99#define TOC_BITMAP2 "log" /* bitmaps in the TOCBLOCK. */ 99#define TOC_BITMAP2 "log" /* bitmaps in the TOCBLOCK. */
100 100
101/* Most numbers we deal with are big-endian and won't be aligned. */
102#define BE16(x) ((u16)be16_to_cpu(get_unaligned((__be16*)(x))))
103#define BE32(x) ((u32)be32_to_cpu(get_unaligned((__be32*)(x))))
104#define BE64(x) ((u64)be64_to_cpu(get_unaligned((__be64*)(x))))
105
106/* Borrowed from msdos.c */ 101/* Borrowed from msdos.c */
107#define SYS_IND(p) (get_unaligned(&(p)->sys_ind)) 102#define SYS_IND(p) (get_unaligned(&(p)->sys_ind))
108 103
diff --git a/fs/pipe.c b/fs/pipe.c
index 700f4e0d9572..10c4e9aa5c49 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -950,7 +950,7 @@ fail_inode:
950 return NULL; 950 return NULL;
951} 951}
952 952
953struct file *create_write_pipe(void) 953struct file *create_write_pipe(int flags)
954{ 954{
955 int err; 955 int err;
956 struct inode *inode; 956 struct inode *inode;
@@ -983,7 +983,7 @@ struct file *create_write_pipe(void)
983 goto err_dentry; 983 goto err_dentry;
984 f->f_mapping = inode->i_mapping; 984 f->f_mapping = inode->i_mapping;
985 985
986 f->f_flags = O_WRONLY; 986 f->f_flags = O_WRONLY | (flags & O_NONBLOCK);
987 f->f_version = 0; 987 f->f_version = 0;
988 988
989 return f; 989 return f;
@@ -1007,7 +1007,7 @@ void free_write_pipe(struct file *f)
1007 put_filp(f); 1007 put_filp(f);
1008} 1008}
1009 1009
1010struct file *create_read_pipe(struct file *wrf) 1010struct file *create_read_pipe(struct file *wrf, int flags)
1011{ 1011{
1012 struct file *f = get_empty_filp(); 1012 struct file *f = get_empty_filp();
1013 if (!f) 1013 if (!f)
@@ -1019,7 +1019,7 @@ struct file *create_read_pipe(struct file *wrf)
1019 f->f_mapping = wrf->f_path.dentry->d_inode->i_mapping; 1019 f->f_mapping = wrf->f_path.dentry->d_inode->i_mapping;
1020 1020
1021 f->f_pos = 0; 1021 f->f_pos = 0;
1022 f->f_flags = O_RDONLY; 1022 f->f_flags = O_RDONLY | (flags & O_NONBLOCK);
1023 f->f_op = &read_pipe_fops; 1023 f->f_op = &read_pipe_fops;
1024 f->f_mode = FMODE_READ; 1024 f->f_mode = FMODE_READ;
1025 f->f_version = 0; 1025 f->f_version = 0;
@@ -1027,26 +1027,29 @@ struct file *create_read_pipe(struct file *wrf)
1027 return f; 1027 return f;
1028} 1028}
1029 1029
1030int do_pipe(int *fd) 1030int do_pipe_flags(int *fd, int flags)
1031{ 1031{
1032 struct file *fw, *fr; 1032 struct file *fw, *fr;
1033 int error; 1033 int error;
1034 int fdw, fdr; 1034 int fdw, fdr;
1035 1035
1036 fw = create_write_pipe(); 1036 if (flags & ~(O_CLOEXEC | O_NONBLOCK))
1037 return -EINVAL;
1038
1039 fw = create_write_pipe(flags);
1037 if (IS_ERR(fw)) 1040 if (IS_ERR(fw))
1038 return PTR_ERR(fw); 1041 return PTR_ERR(fw);
1039 fr = create_read_pipe(fw); 1042 fr = create_read_pipe(fw, flags);
1040 error = PTR_ERR(fr); 1043 error = PTR_ERR(fr);
1041 if (IS_ERR(fr)) 1044 if (IS_ERR(fr))
1042 goto err_write_pipe; 1045 goto err_write_pipe;
1043 1046
1044 error = get_unused_fd(); 1047 error = get_unused_fd_flags(flags);
1045 if (error < 0) 1048 if (error < 0)
1046 goto err_read_pipe; 1049 goto err_read_pipe;
1047 fdr = error; 1050 fdr = error;
1048 1051
1049 error = get_unused_fd(); 1052 error = get_unused_fd_flags(flags);
1050 if (error < 0) 1053 if (error < 0)
1051 goto err_fdr; 1054 goto err_fdr;
1052 fdw = error; 1055 fdw = error;
@@ -1074,16 +1077,21 @@ int do_pipe(int *fd)
1074 return error; 1077 return error;
1075} 1078}
1076 1079
1080int do_pipe(int *fd)
1081{
1082 return do_pipe_flags(fd, 0);
1083}
1084
1077/* 1085/*
1078 * sys_pipe() is the normal C calling standard for creating 1086 * sys_pipe() is the normal C calling standard for creating
1079 * a pipe. It's not the way Unix traditionally does this, though. 1087 * a pipe. It's not the way Unix traditionally does this, though.
1080 */ 1088 */
1081asmlinkage long __weak sys_pipe(int __user *fildes) 1089asmlinkage long __weak sys_pipe2(int __user *fildes, int flags)
1082{ 1090{
1083 int fd[2]; 1091 int fd[2];
1084 int error; 1092 int error;
1085 1093
1086 error = do_pipe(fd); 1094 error = do_pipe_flags(fd, flags);
1087 if (!error) { 1095 if (!error) {
1088 if (copy_to_user(fildes, fd, sizeof(fd))) { 1096 if (copy_to_user(fildes, fd, sizeof(fd))) {
1089 sys_close(fd[0]); 1097 sys_close(fd[0]);
@@ -1094,6 +1102,11 @@ asmlinkage long __weak sys_pipe(int __user *fildes)
1094 return error; 1102 return error;
1095} 1103}
1096 1104
1105asmlinkage long __weak sys_pipe(int __user *fildes)
1106{
1107 return sys_pipe2(fildes, 0);
1108}
1109
1097/* 1110/*
1098 * pipefs should _never_ be mounted by userland - too much of security hassle, 1111 * pipefs should _never_ be mounted by userland - too much of security hassle,
1099 * no real gain from having the whole whorehouse mounted. So we don't need 1112 * no real gain from having the whole whorehouse mounted. So we don't need
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
new file mode 100644
index 000000000000..73cd7a418f06
--- /dev/null
+++ b/fs/proc/Kconfig
@@ -0,0 +1,59 @@
1config PROC_FS
2 bool "/proc file system support" if EMBEDDED
3 default y
4 help
5 This is a virtual file system providing information about the status
6 of the system. "Virtual" means that it doesn't take up any space on
7 your hard disk: the files are created on the fly by the kernel when
8 you try to access them. Also, you cannot read the files with older
9 version of the program less: you need to use more or cat.
10
11 It's totally cool; for example, "cat /proc/interrupts" gives
12 information about what the different IRQs are used for at the moment
13 (there is a small number of Interrupt ReQuest lines in your computer
14 that are used by the attached devices to gain the CPU's attention --
15 often a source of trouble if two devices are mistakenly configured
16 to use the same IRQ). The program procinfo to display some
17 information about your system gathered from the /proc file system.
18
19 Before you can use the /proc file system, it has to be mounted,
20 meaning it has to be given a location in the directory hierarchy.
21 That location should be /proc. A command such as "mount -t proc proc
22 /proc" or the equivalent line in /etc/fstab does the job.
23
24 The /proc file system is explained in the file
25 <file:Documentation/filesystems/proc.txt> and on the proc(5) manpage
26 ("man 5 proc").
27
28 This option will enlarge your kernel by about 67 KB. Several
29 programs depend on this, so everyone should say Y here.
30
31config PROC_KCORE
32 bool "/proc/kcore support" if !ARM
33 depends on PROC_FS && MMU
34
35config PROC_VMCORE
36 bool "/proc/vmcore support (EXPERIMENTAL)"
37 depends on PROC_FS && CRASH_DUMP
38 default y
39 help
40 Exports the dump image of crashed kernel in ELF format.
41
42config PROC_SYSCTL
43 bool "Sysctl support (/proc/sys)" if EMBEDDED
44 depends on PROC_FS
45 select SYSCTL
46 default y
47 ---help---
48 The sysctl interface provides a means of dynamically changing
49 certain kernel parameters and variables on the fly without requiring
50 a recompile of the kernel or reboot of the system. The primary
51 interface is through /proc/sys. If you say Y here a tree of
52 modifiable sysctl entries will be generated beneath the
53 /proc/sys directory. They are explained in the files
54 in <file:Documentation/sysctl/>. Note that enabling this
55 option will enlarge the kernel by at least 8 KB.
56
57 As it is generally a good thing, you should say Y here unless
58 building a kernel for install/rescue disks or your system is very
59 limited in memory.
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 58c3e6a8e15e..a891fe4cb43b 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2376,29 +2376,82 @@ static int proc_base_fill_cache(struct file *filp, void *dirent,
2376} 2376}
2377 2377
2378#ifdef CONFIG_TASK_IO_ACCOUNTING 2378#ifdef CONFIG_TASK_IO_ACCOUNTING
2379static int proc_pid_io_accounting(struct task_struct *task, char *buffer) 2379static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
2380{ 2380{
2381 u64 rchar, wchar, syscr, syscw;
2382 struct task_io_accounting ioac;
2383
2384 if (!whole) {
2385 rchar = task->rchar;
2386 wchar = task->wchar;
2387 syscr = task->syscr;
2388 syscw = task->syscw;
2389 memcpy(&ioac, &task->ioac, sizeof(ioac));
2390 } else {
2391 unsigned long flags;
2392 struct task_struct *t = task;
2393 rchar = wchar = syscr = syscw = 0;
2394 memset(&ioac, 0, sizeof(ioac));
2395
2396 rcu_read_lock();
2397 do {
2398 rchar += t->rchar;
2399 wchar += t->wchar;
2400 syscr += t->syscr;
2401 syscw += t->syscw;
2402
2403 ioac.read_bytes += t->ioac.read_bytes;
2404 ioac.write_bytes += t->ioac.write_bytes;
2405 ioac.cancelled_write_bytes +=
2406 t->ioac.cancelled_write_bytes;
2407 t = next_thread(t);
2408 } while (t != task);
2409 rcu_read_unlock();
2410
2411 if (lock_task_sighand(task, &flags)) {
2412 struct signal_struct *sig = task->signal;
2413
2414 rchar += sig->rchar;
2415 wchar += sig->wchar;
2416 syscr += sig->syscr;
2417 syscw += sig->syscw;
2418
2419 ioac.read_bytes += sig->ioac.read_bytes;
2420 ioac.write_bytes += sig->ioac.write_bytes;
2421 ioac.cancelled_write_bytes +=
2422 sig->ioac.cancelled_write_bytes;
2423
2424 unlock_task_sighand(task, &flags);
2425 }
2426 }
2427
2381 return sprintf(buffer, 2428 return sprintf(buffer,
2382#ifdef CONFIG_TASK_XACCT
2383 "rchar: %llu\n" 2429 "rchar: %llu\n"
2384 "wchar: %llu\n" 2430 "wchar: %llu\n"
2385 "syscr: %llu\n" 2431 "syscr: %llu\n"
2386 "syscw: %llu\n" 2432 "syscw: %llu\n"
2387#endif
2388 "read_bytes: %llu\n" 2433 "read_bytes: %llu\n"
2389 "write_bytes: %llu\n" 2434 "write_bytes: %llu\n"
2390 "cancelled_write_bytes: %llu\n", 2435 "cancelled_write_bytes: %llu\n",
2391#ifdef CONFIG_TASK_XACCT 2436 (unsigned long long)rchar,
2392 (unsigned long long)task->rchar, 2437 (unsigned long long)wchar,
2393 (unsigned long long)task->wchar, 2438 (unsigned long long)syscr,
2394 (unsigned long long)task->syscr, 2439 (unsigned long long)syscw,
2395 (unsigned long long)task->syscw, 2440 (unsigned long long)ioac.read_bytes,
2396#endif 2441 (unsigned long long)ioac.write_bytes,
2397 (unsigned long long)task->ioac.read_bytes, 2442 (unsigned long long)ioac.cancelled_write_bytes);
2398 (unsigned long long)task->ioac.write_bytes, 2443}
2399 (unsigned long long)task->ioac.cancelled_write_bytes); 2444
2445static int proc_tid_io_accounting(struct task_struct *task, char *buffer)
2446{
2447 return do_io_accounting(task, buffer, 0);
2400} 2448}
2401#endif 2449
2450static int proc_tgid_io_accounting(struct task_struct *task, char *buffer)
2451{
2452 return do_io_accounting(task, buffer, 1);
2453}
2454#endif /* CONFIG_TASK_IO_ACCOUNTING */
2402 2455
2403/* 2456/*
2404 * Thread groups 2457 * Thread groups
@@ -2470,7 +2523,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2470 REG("coredump_filter", S_IRUGO|S_IWUSR, coredump_filter), 2523 REG("coredump_filter", S_IRUGO|S_IWUSR, coredump_filter),
2471#endif 2524#endif
2472#ifdef CONFIG_TASK_IO_ACCOUNTING 2525#ifdef CONFIG_TASK_IO_ACCOUNTING
2473 INF("io", S_IRUGO, pid_io_accounting), 2526 INF("io", S_IRUGO, tgid_io_accounting),
2474#endif 2527#endif
2475}; 2528};
2476 2529
@@ -2797,6 +2850,9 @@ static const struct pid_entry tid_base_stuff[] = {
2797#ifdef CONFIG_FAULT_INJECTION 2850#ifdef CONFIG_FAULT_INJECTION
2798 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject), 2851 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
2799#endif 2852#endif
2853#ifdef CONFIG_TASK_IO_ACCOUNTING
2854 INF("io", S_IRUGO, tid_io_accounting),
2855#endif
2800}; 2856};
2801 2857
2802static int proc_tid_base_readdir(struct file * filp, 2858static int proc_tid_base_readdir(struct file * filp,
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 43e54e86cefd..bc0a0dd2d844 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -597,6 +597,7 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
597 ent->pde_users = 0; 597 ent->pde_users = 0;
598 spin_lock_init(&ent->pde_unload_lock); 598 spin_lock_init(&ent->pde_unload_lock);
599 ent->pde_unload_completion = NULL; 599 ent->pde_unload_completion = NULL;
600 INIT_LIST_HEAD(&ent->pde_openers);
600 out: 601 out:
601 return ent; 602 return ent;
602} 603}
@@ -789,6 +790,19 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
789 spin_unlock(&de->pde_unload_lock); 790 spin_unlock(&de->pde_unload_lock);
790 791
791continue_removing: 792continue_removing:
793 spin_lock(&de->pde_unload_lock);
794 while (!list_empty(&de->pde_openers)) {
795 struct pde_opener *pdeo;
796
797 pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh);
798 list_del(&pdeo->lh);
799 spin_unlock(&de->pde_unload_lock);
800 pdeo->release(pdeo->inode, pdeo->file);
801 kfree(pdeo);
802 spin_lock(&de->pde_unload_lock);
803 }
804 spin_unlock(&de->pde_unload_lock);
805
792 if (S_ISDIR(de->mode)) 806 if (S_ISDIR(de->mode))
793 parent->nlink--; 807 parent->nlink--;
794 de->nlink = 0; 808 de->nlink = 0;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index b08d10017911..02eca2ed9dd7 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -111,27 +111,25 @@ int __init proc_init_inodecache(void)
111 return 0; 111 return 0;
112} 112}
113 113
114static int proc_remount(struct super_block *sb, int *flags, char *data)
115{
116 *flags |= MS_NODIRATIME;
117 return 0;
118}
119
120static const struct super_operations proc_sops = { 114static const struct super_operations proc_sops = {
121 .alloc_inode = proc_alloc_inode, 115 .alloc_inode = proc_alloc_inode,
122 .destroy_inode = proc_destroy_inode, 116 .destroy_inode = proc_destroy_inode,
123 .drop_inode = generic_delete_inode, 117 .drop_inode = generic_delete_inode,
124 .delete_inode = proc_delete_inode, 118 .delete_inode = proc_delete_inode,
125 .statfs = simple_statfs, 119 .statfs = simple_statfs,
126 .remount_fs = proc_remount,
127}; 120};
128 121
129static void pde_users_dec(struct proc_dir_entry *pde) 122static void __pde_users_dec(struct proc_dir_entry *pde)
130{ 123{
131 spin_lock(&pde->pde_unload_lock);
132 pde->pde_users--; 124 pde->pde_users--;
133 if (pde->pde_unload_completion && pde->pde_users == 0) 125 if (pde->pde_unload_completion && pde->pde_users == 0)
134 complete(pde->pde_unload_completion); 126 complete(pde->pde_unload_completion);
127}
128
129static void pde_users_dec(struct proc_dir_entry *pde)
130{
131 spin_lock(&pde->pde_unload_lock);
132 __pde_users_dec(pde);
135 spin_unlock(&pde->pde_unload_lock); 133 spin_unlock(&pde->pde_unload_lock);
136} 134}
137 135
@@ -318,36 +316,97 @@ static int proc_reg_open(struct inode *inode, struct file *file)
318 struct proc_dir_entry *pde = PDE(inode); 316 struct proc_dir_entry *pde = PDE(inode);
319 int rv = 0; 317 int rv = 0;
320 int (*open)(struct inode *, struct file *); 318 int (*open)(struct inode *, struct file *);
319 int (*release)(struct inode *, struct file *);
320 struct pde_opener *pdeo;
321
322 /*
323 * What for, you ask? Well, we can have open, rmmod, remove_proc_entry
324 * sequence. ->release won't be called because ->proc_fops will be
325 * cleared. Depending on complexity of ->release, consequences vary.
326 *
327 * We can't wait for mercy when close will be done for real, it's
328 * deadlockable: rmmod foo </proc/foo . So, we're going to do ->release
329 * by hand in remove_proc_entry(). For this, save opener's credentials
330 * for later.
331 */
332 pdeo = kmalloc(sizeof(struct pde_opener), GFP_KERNEL);
333 if (!pdeo)
334 return -ENOMEM;
321 335
322 spin_lock(&pde->pde_unload_lock); 336 spin_lock(&pde->pde_unload_lock);
323 if (!pde->proc_fops) { 337 if (!pde->proc_fops) {
324 spin_unlock(&pde->pde_unload_lock); 338 spin_unlock(&pde->pde_unload_lock);
339 kfree(pdeo);
325 return rv; 340 return rv;
326 } 341 }
327 pde->pde_users++; 342 pde->pde_users++;
328 open = pde->proc_fops->open; 343 open = pde->proc_fops->open;
344 release = pde->proc_fops->release;
329 spin_unlock(&pde->pde_unload_lock); 345 spin_unlock(&pde->pde_unload_lock);
330 346
331 if (open) 347 if (open)
332 rv = open(inode, file); 348 rv = open(inode, file);
333 349
334 pde_users_dec(pde); 350 spin_lock(&pde->pde_unload_lock);
351 if (rv == 0 && release) {
352 /* To know what to release. */
353 pdeo->inode = inode;
354 pdeo->file = file;
355 /* Strictly for "too late" ->release in proc_reg_release(). */
356 pdeo->release = release;
357 list_add(&pdeo->lh, &pde->pde_openers);
358 } else
359 kfree(pdeo);
360 __pde_users_dec(pde);
361 spin_unlock(&pde->pde_unload_lock);
335 return rv; 362 return rv;
336} 363}
337 364
365static struct pde_opener *find_pde_opener(struct proc_dir_entry *pde,
366 struct inode *inode, struct file *file)
367{
368 struct pde_opener *pdeo;
369
370 list_for_each_entry(pdeo, &pde->pde_openers, lh) {
371 if (pdeo->inode == inode && pdeo->file == file)
372 return pdeo;
373 }
374 return NULL;
375}
376
338static int proc_reg_release(struct inode *inode, struct file *file) 377static int proc_reg_release(struct inode *inode, struct file *file)
339{ 378{
340 struct proc_dir_entry *pde = PDE(inode); 379 struct proc_dir_entry *pde = PDE(inode);
341 int rv = 0; 380 int rv = 0;
342 int (*release)(struct inode *, struct file *); 381 int (*release)(struct inode *, struct file *);
382 struct pde_opener *pdeo;
343 383
344 spin_lock(&pde->pde_unload_lock); 384 spin_lock(&pde->pde_unload_lock);
385 pdeo = find_pde_opener(pde, inode, file);
345 if (!pde->proc_fops) { 386 if (!pde->proc_fops) {
346 spin_unlock(&pde->pde_unload_lock); 387 /*
388 * Can't simply exit, __fput() will think that everything is OK,
389 * and move on to freeing struct file. remove_proc_entry() will
390 * find slacker in opener's list and will try to do non-trivial
391 * things with struct file. Therefore, remove opener from list.
392 *
393 * But if opener is removed from list, who will ->release it?
394 */
395 if (pdeo) {
396 list_del(&pdeo->lh);
397 spin_unlock(&pde->pde_unload_lock);
398 rv = pdeo->release(inode, file);
399 kfree(pdeo);
400 } else
401 spin_unlock(&pde->pde_unload_lock);
347 return rv; 402 return rv;
348 } 403 }
349 pde->pde_users++; 404 pde->pde_users++;
350 release = pde->proc_fops->release; 405 release = pde->proc_fops->release;
406 if (pdeo) {
407 list_del(&pdeo->lh);
408 kfree(pdeo);
409 }
351 spin_unlock(&pde->pde_unload_lock); 410 spin_unlock(&pde->pde_unload_lock);
352 411
353 if (release) 412 if (release)
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 28cbca805905..442202314d53 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -63,6 +63,7 @@ extern const struct file_operations proc_smaps_operations;
63extern const struct file_operations proc_clear_refs_operations; 63extern const struct file_operations proc_clear_refs_operations;
64extern const struct file_operations proc_pagemap_operations; 64extern const struct file_operations proc_pagemap_operations;
65extern const struct file_operations proc_net_operations; 65extern const struct file_operations proc_net_operations;
66extern const struct file_operations proc_kmsg_operations;
66extern const struct inode_operations proc_net_inode_operations; 67extern const struct inode_operations proc_net_inode_operations;
67 68
68void free_proc_entry(struct proc_dir_entry *de); 69void free_proc_entry(struct proc_dir_entry *de);
@@ -88,3 +89,10 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *ino,
88 struct dentry *dentry); 89 struct dentry *dentry);
89int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent, 90int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
90 filldir_t filldir); 91 filldir_t filldir);
92
93struct pde_opener {
94 struct inode *inode;
95 struct file *file;
96 int (*release)(struct inode *, struct file *);
97 struct list_head lh;
98};
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index e78c81fcf547..c2370c76fb71 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -23,6 +23,10 @@
23 23
24#define CORE_STR "CORE" 24#define CORE_STR "CORE"
25 25
26#ifndef ELF_CORE_EFLAGS
27#define ELF_CORE_EFLAGS 0
28#endif
29
26static int open_kcore(struct inode * inode, struct file * filp) 30static int open_kcore(struct inode * inode, struct file * filp)
27{ 31{
28 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; 32 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
@@ -164,11 +168,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
164 elf->e_entry = 0; 168 elf->e_entry = 0;
165 elf->e_phoff = sizeof(struct elfhdr); 169 elf->e_phoff = sizeof(struct elfhdr);
166 elf->e_shoff = 0; 170 elf->e_shoff = 0;
167#if defined(CONFIG_H8300) 171 elf->e_flags = ELF_CORE_EFLAGS;
168 elf->e_flags = ELF_FLAGS;
169#else
170 elf->e_flags = 0;
171#endif
172 elf->e_ehsize = sizeof(struct elfhdr); 172 elf->e_ehsize = sizeof(struct elfhdr);
173 elf->e_phentsize= sizeof(struct elf_phdr); 173 elf->e_phentsize= sizeof(struct elf_phdr);
174 elf->e_phnum = nphdr; 174 elf->e_phnum = nphdr;
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index ff3b90b56e9d..9fd5df3f40ce 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -15,6 +15,8 @@
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <asm/io.h> 16#include <asm/io.h>
17 17
18#include "internal.h"
19
18extern wait_queue_head_t log_wait; 20extern wait_queue_head_t log_wait;
19 21
20extern int do_syslog(int type, char __user *bug, int count); 22extern int do_syslog(int type, char __user *bug, int count);
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index c652d469dc08..ded969862960 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -232,7 +232,6 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
232#undef K 232#undef K
233} 233}
234 234
235extern const struct seq_operations fragmentation_op;
236static int fragmentation_open(struct inode *inode, struct file *file) 235static int fragmentation_open(struct inode *inode, struct file *file)
237{ 236{
238 (void)inode; 237 (void)inode;
@@ -246,7 +245,6 @@ static const struct file_operations fragmentation_file_operations = {
246 .release = seq_release, 245 .release = seq_release,
247}; 246};
248 247
249extern const struct seq_operations pagetypeinfo_op;
250static int pagetypeinfo_open(struct inode *inode, struct file *file) 248static int pagetypeinfo_open(struct inode *inode, struct file *file)
251{ 249{
252 return seq_open(file, &pagetypeinfo_op); 250 return seq_open(file, &pagetypeinfo_op);
@@ -259,7 +257,6 @@ static const struct file_operations pagetypeinfo_file_ops = {
259 .release = seq_release, 257 .release = seq_release,
260}; 258};
261 259
262extern const struct seq_operations zoneinfo_op;
263static int zoneinfo_open(struct inode *inode, struct file *file) 260static int zoneinfo_open(struct inode *inode, struct file *file)
264{ 261{
265 return seq_open(file, &zoneinfo_op); 262 return seq_open(file, &zoneinfo_op);
@@ -356,7 +353,6 @@ static const struct file_operations proc_devinfo_operations = {
356 .release = seq_release, 353 .release = seq_release,
357}; 354};
358 355
359extern const struct seq_operations vmstat_op;
360static int vmstat_open(struct inode *inode, struct file *file) 356static int vmstat_open(struct inode *inode, struct file *file)
361{ 357{
362 return seq_open(file, &vmstat_op); 358 return seq_open(file, &vmstat_op);
@@ -468,14 +464,25 @@ static const struct file_operations proc_slabstats_operations = {
468#ifdef CONFIG_MMU 464#ifdef CONFIG_MMU
469static int vmalloc_open(struct inode *inode, struct file *file) 465static int vmalloc_open(struct inode *inode, struct file *file)
470{ 466{
471 return seq_open(file, &vmalloc_op); 467 unsigned int *ptr = NULL;
468 int ret;
469
470 if (NUMA_BUILD)
471 ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
472 ret = seq_open(file, &vmalloc_op);
473 if (!ret) {
474 struct seq_file *m = file->private_data;
475 m->private = ptr;
476 } else
477 kfree(ptr);
478 return ret;
472} 479}
473 480
474static const struct file_operations proc_vmalloc_operations = { 481static const struct file_operations proc_vmalloc_operations = {
475 .open = vmalloc_open, 482 .open = vmalloc_open,
476 .read = seq_read, 483 .read = seq_read,
477 .llseek = seq_lseek, 484 .llseek = seq_lseek,
478 .release = seq_release, 485 .release = seq_release_private,
479}; 486};
480#endif 487#endif
481 488
diff --git a/fs/quota.c b/fs/quota.c
index db1cc9f3c7aa..7f4386ebc23a 100644
--- a/fs/quota.c
+++ b/fs/quota.c
@@ -186,7 +186,7 @@ static void quota_sync_sb(struct super_block *sb, int type)
186 186
187void sync_dquots(struct super_block *sb, int type) 187void sync_dquots(struct super_block *sb, int type)
188{ 188{
189 int cnt, dirty; 189 int cnt;
190 190
191 if (sb) { 191 if (sb) {
192 if (sb->s_qcop->quota_sync) 192 if (sb->s_qcop->quota_sync)
@@ -198,11 +198,17 @@ void sync_dquots(struct super_block *sb, int type)
198restart: 198restart:
199 list_for_each_entry(sb, &super_blocks, s_list) { 199 list_for_each_entry(sb, &super_blocks, s_list) {
200 /* This test just improves performance so it needn't be reliable... */ 200 /* This test just improves performance so it needn't be reliable... */
201 for (cnt = 0, dirty = 0; cnt < MAXQUOTAS; cnt++) 201 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
202 if ((type == cnt || type == -1) && sb_has_quota_enabled(sb, cnt) 202 if (type != -1 && type != cnt)
203 && info_any_dirty(&sb_dqopt(sb)->info[cnt])) 203 continue;
204 dirty = 1; 204 if (!sb_has_quota_enabled(sb, cnt))
205 if (!dirty) 205 continue;
206 if (!info_dirty(&sb_dqopt(sb)->info[cnt]) &&
207 list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list))
208 continue;
209 break;
210 }
211 if (cnt == MAXQUOTAS)
206 continue; 212 continue;
207 sb->s_count++; 213 sb->s_count++;
208 spin_unlock(&sb_lock); 214 spin_unlock(&sb_lock);
diff --git a/fs/quota_v1.c b/fs/quota_v1.c
index a6cf9269105c..5ae15b13eeb0 100644
--- a/fs/quota_v1.c
+++ b/fs/quota_v1.c
@@ -1,6 +1,7 @@
1#include <linux/errno.h> 1#include <linux/errno.h>
2#include <linux/fs.h> 2#include <linux/fs.h>
3#include <linux/quota.h> 3#include <linux/quota.h>
4#include <linux/quotaops.h>
4#include <linux/dqblk_v1.h> 5#include <linux/dqblk_v1.h>
5#include <linux/quotaio_v1.h> 6#include <linux/quotaio_v1.h>
6#include <linux/kernel.h> 7#include <linux/kernel.h>
diff --git a/fs/quota_v2.c b/fs/quota_v2.c
index 234ada903633..b53827dc02d9 100644
--- a/fs/quota_v2.c
+++ b/fs/quota_v2.c
@@ -11,6 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/quotaops.h>
14 15
15#include <asm/byteorder.h> 16#include <asm/byteorder.h>
16 17
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index e396b2fa4743..c8f60ee183b5 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -34,15 +34,10 @@
34** from within kupdate, it will ignore the immediate flag 34** from within kupdate, it will ignore the immediate flag
35*/ 35*/
36 36
37#include <asm/uaccess.h>
38#include <asm/system.h>
39
40#include <linux/time.h> 37#include <linux/time.h>
41#include <linux/semaphore.h> 38#include <linux/semaphore.h>
42
43#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
44#include <linux/reiserfs_fs.h> 40#include <linux/reiserfs_fs.h>
45
46#include <linux/kernel.h> 41#include <linux/kernel.h>
47#include <linux/errno.h> 42#include <linux/errno.h>
48#include <linux/fcntl.h> 43#include <linux/fcntl.h>
@@ -54,6 +49,9 @@
54#include <linux/writeback.h> 49#include <linux/writeback.h>
55#include <linux/blkdev.h> 50#include <linux/blkdev.h>
56#include <linux/backing-dev.h> 51#include <linux/backing-dev.h>
52#include <linux/uaccess.h>
53
54#include <asm/system.h>
57 55
58/* gets a struct reiserfs_journal_list * from a list head */ 56/* gets a struct reiserfs_journal_list * from a list head */
59#define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \ 57#define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
@@ -558,13 +556,13 @@ static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
558static inline void lock_journal(struct super_block *p_s_sb) 556static inline void lock_journal(struct super_block *p_s_sb)
559{ 557{
560 PROC_INFO_INC(p_s_sb, journal.lock_journal); 558 PROC_INFO_INC(p_s_sb, journal.lock_journal);
561 down(&SB_JOURNAL(p_s_sb)->j_lock); 559 mutex_lock(&SB_JOURNAL(p_s_sb)->j_mutex);
562} 560}
563 561
564/* unlock the current transaction */ 562/* unlock the current transaction */
565static inline void unlock_journal(struct super_block *p_s_sb) 563static inline void unlock_journal(struct super_block *p_s_sb)
566{ 564{
567 up(&SB_JOURNAL(p_s_sb)->j_lock); 565 mutex_unlock(&SB_JOURNAL(p_s_sb)->j_mutex);
568} 566}
569 567
570static inline void get_journal_list(struct reiserfs_journal_list *jl) 568static inline void get_journal_list(struct reiserfs_journal_list *jl)
@@ -1045,9 +1043,9 @@ static int flush_commit_list(struct super_block *s,
1045 } 1043 }
1046 1044
1047 /* make sure nobody is trying to flush this one at the same time */ 1045 /* make sure nobody is trying to flush this one at the same time */
1048 down(&jl->j_commit_lock); 1046 mutex_lock(&jl->j_commit_mutex);
1049 if (!journal_list_still_alive(s, trans_id)) { 1047 if (!journal_list_still_alive(s, trans_id)) {
1050 up(&jl->j_commit_lock); 1048 mutex_unlock(&jl->j_commit_mutex);
1051 goto put_jl; 1049 goto put_jl;
1052 } 1050 }
1053 BUG_ON(jl->j_trans_id == 0); 1051 BUG_ON(jl->j_trans_id == 0);
@@ -1057,7 +1055,7 @@ static int flush_commit_list(struct super_block *s,
1057 if (flushall) { 1055 if (flushall) {
1058 atomic_set(&(jl->j_older_commits_done), 1); 1056 atomic_set(&(jl->j_older_commits_done), 1);
1059 } 1057 }
1060 up(&jl->j_commit_lock); 1058 mutex_unlock(&jl->j_commit_mutex);
1061 goto put_jl; 1059 goto put_jl;
1062 } 1060 }
1063 1061
@@ -1181,7 +1179,7 @@ static int flush_commit_list(struct super_block *s,
1181 if (flushall) { 1179 if (flushall) {
1182 atomic_set(&(jl->j_older_commits_done), 1); 1180 atomic_set(&(jl->j_older_commits_done), 1);
1183 } 1181 }
1184 up(&jl->j_commit_lock); 1182 mutex_unlock(&jl->j_commit_mutex);
1185 put_jl: 1183 put_jl:
1186 put_journal_list(s, jl); 1184 put_journal_list(s, jl);
1187 1185
@@ -1411,8 +1409,8 @@ static int flush_journal_list(struct super_block *s,
1411 1409
1412 /* if flushall == 0, the lock is already held */ 1410 /* if flushall == 0, the lock is already held */
1413 if (flushall) { 1411 if (flushall) {
1414 down(&journal->j_flush_sem); 1412 mutex_lock(&journal->j_flush_mutex);
1415 } else if (!down_trylock(&journal->j_flush_sem)) { 1413 } else if (mutex_trylock(&journal->j_flush_mutex)) {
1416 BUG(); 1414 BUG();
1417 } 1415 }
1418 1416
@@ -1642,7 +1640,7 @@ static int flush_journal_list(struct super_block *s,
1642 jl->j_state = 0; 1640 jl->j_state = 0;
1643 put_journal_list(s, jl); 1641 put_journal_list(s, jl);
1644 if (flushall) 1642 if (flushall)
1645 up(&journal->j_flush_sem); 1643 mutex_unlock(&journal->j_flush_mutex);
1646 put_fs_excl(); 1644 put_fs_excl();
1647 return err; 1645 return err;
1648} 1646}
@@ -1772,12 +1770,12 @@ static int kupdate_transactions(struct super_block *s,
1772 struct reiserfs_journal *journal = SB_JOURNAL(s); 1770 struct reiserfs_journal *journal = SB_JOURNAL(s);
1773 chunk.nr = 0; 1771 chunk.nr = 0;
1774 1772
1775 down(&journal->j_flush_sem); 1773 mutex_lock(&journal->j_flush_mutex);
1776 if (!journal_list_still_alive(s, orig_trans_id)) { 1774 if (!journal_list_still_alive(s, orig_trans_id)) {
1777 goto done; 1775 goto done;
1778 } 1776 }
1779 1777
1780 /* we've got j_flush_sem held, nobody is going to delete any 1778 /* we've got j_flush_mutex held, nobody is going to delete any
1781 * of these lists out from underneath us 1779 * of these lists out from underneath us
1782 */ 1780 */
1783 while ((num_trans && transactions_flushed < num_trans) || 1781 while ((num_trans && transactions_flushed < num_trans) ||
@@ -1812,7 +1810,7 @@ static int kupdate_transactions(struct super_block *s,
1812 } 1810 }
1813 1811
1814 done: 1812 done:
1815 up(&journal->j_flush_sem); 1813 mutex_unlock(&journal->j_flush_mutex);
1816 return ret; 1814 return ret;
1817} 1815}
1818 1816
@@ -2556,7 +2554,7 @@ static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
2556 INIT_LIST_HEAD(&jl->j_working_list); 2554 INIT_LIST_HEAD(&jl->j_working_list);
2557 INIT_LIST_HEAD(&jl->j_tail_bh_list); 2555 INIT_LIST_HEAD(&jl->j_tail_bh_list);
2558 INIT_LIST_HEAD(&jl->j_bh_list); 2556 INIT_LIST_HEAD(&jl->j_bh_list);
2559 sema_init(&jl->j_commit_lock, 1); 2557 mutex_init(&jl->j_commit_mutex);
2560 SB_JOURNAL(s)->j_num_lists++; 2558 SB_JOURNAL(s)->j_num_lists++;
2561 get_journal_list(jl); 2559 get_journal_list(jl);
2562 return jl; 2560 return jl;
@@ -2837,8 +2835,8 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name,
2837 journal->j_last = NULL; 2835 journal->j_last = NULL;
2838 journal->j_first = NULL; 2836 journal->j_first = NULL;
2839 init_waitqueue_head(&(journal->j_join_wait)); 2837 init_waitqueue_head(&(journal->j_join_wait));
2840 sema_init(&journal->j_lock, 1); 2838 mutex_init(&journal->j_mutex);
2841 sema_init(&journal->j_flush_sem, 1); 2839 mutex_init(&journal->j_flush_mutex);
2842 2840
2843 journal->j_trans_id = 10; 2841 journal->j_trans_id = 10;
2844 journal->j_mount_id = 10; 2842 journal->j_mount_id = 10;
@@ -4030,7 +4028,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
4030 * the new transaction is fully setup, and we've already flushed the 4028 * the new transaction is fully setup, and we've already flushed the
4031 * ordered bh list 4029 * ordered bh list
4032 */ 4030 */
4033 down(&jl->j_commit_lock); 4031 mutex_lock(&jl->j_commit_mutex);
4034 4032
4035 /* save the transaction id in case we need to commit it later */ 4033 /* save the transaction id in case we need to commit it later */
4036 commit_trans_id = jl->j_trans_id; 4034 commit_trans_id = jl->j_trans_id;
@@ -4196,7 +4194,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
4196 lock_kernel(); 4194 lock_kernel();
4197 } 4195 }
4198 BUG_ON(!list_empty(&jl->j_tail_bh_list)); 4196 BUG_ON(!list_empty(&jl->j_tail_bh_list));
4199 up(&jl->j_commit_lock); 4197 mutex_unlock(&jl->j_commit_mutex);
4200 4198
4201 /* honor the flush wishes from the caller, simple commits can 4199 /* honor the flush wishes from the caller, simple commits can
4202 ** be done outside the journal lock, they are done below 4200 ** be done outside the journal lock, they are done below
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 1d40f2bd1970..2ec748ba0bd3 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -22,6 +22,7 @@
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/buffer_head.h> 23#include <linux/buffer_head.h>
24#include <linux/exportfs.h> 24#include <linux/exportfs.h>
25#include <linux/quotaops.h>
25#include <linux/vfs.h> 26#include <linux/vfs.h>
26#include <linux/mnt_namespace.h> 27#include <linux/mnt_namespace.h>
27#include <linux/mount.h> 28#include <linux/mount.h>
@@ -182,7 +183,7 @@ static int finish_unfinished(struct super_block *s)
182 int ret = reiserfs_quota_on_mount(s, i); 183 int ret = reiserfs_quota_on_mount(s, i);
183 if (ret < 0) 184 if (ret < 0)
184 reiserfs_warning(s, 185 reiserfs_warning(s,
185 "reiserfs: cannot turn on journalled quota: error %d", 186 "reiserfs: cannot turn on journaled quota: error %d",
186 ret); 187 ret);
187 } 188 }
188 } 189 }
@@ -876,7 +877,9 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
876 mount options were selected. */ 877 mount options were selected. */
877 unsigned long *blocks, /* strtol-ed from NNN of resize=NNN */ 878 unsigned long *blocks, /* strtol-ed from NNN of resize=NNN */
878 char **jdev_name, 879 char **jdev_name,
879 unsigned int *commit_max_age) 880 unsigned int *commit_max_age,
881 char **qf_names,
882 unsigned int *qfmt)
880{ 883{
881 int c; 884 int c;
882 char *arg = NULL; 885 char *arg = NULL;
@@ -992,9 +995,11 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
992 if (c == 'u' || c == 'g') { 995 if (c == 'u' || c == 'g') {
993 int qtype = c == 'u' ? USRQUOTA : GRPQUOTA; 996 int qtype = c == 'u' ? USRQUOTA : GRPQUOTA;
994 997
995 if (sb_any_quota_enabled(s)) { 998 if ((sb_any_quota_enabled(s) ||
999 sb_any_quota_suspended(s)) &&
1000 (!*arg != !REISERFS_SB(s)->s_qf_names[qtype])) {
996 reiserfs_warning(s, 1001 reiserfs_warning(s,
997 "reiserfs_parse_options: cannot change journalled quota options when quota turned on."); 1002 "reiserfs_parse_options: cannot change journaled quota options when quota turned on.");
998 return 0; 1003 return 0;
999 } 1004 }
1000 if (*arg) { /* Some filename specified? */ 1005 if (*arg) { /* Some filename specified? */
@@ -1011,46 +1016,54 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
1011 "reiserfs_parse_options: quotafile must be on filesystem root."); 1016 "reiserfs_parse_options: quotafile must be on filesystem root.");
1012 return 0; 1017 return 0;
1013 } 1018 }
1014 REISERFS_SB(s)->s_qf_names[qtype] = 1019 qf_names[qtype] =
1015 kmalloc(strlen(arg) + 1, GFP_KERNEL); 1020 kmalloc(strlen(arg) + 1, GFP_KERNEL);
1016 if (!REISERFS_SB(s)->s_qf_names[qtype]) { 1021 if (!qf_names[qtype]) {
1017 reiserfs_warning(s, 1022 reiserfs_warning(s,
1018 "reiserfs_parse_options: not enough memory for storing quotafile name."); 1023 "reiserfs_parse_options: not enough memory for storing quotafile name.");
1019 return 0; 1024 return 0;
1020 } 1025 }
1021 strcpy(REISERFS_SB(s)->s_qf_names[qtype], arg); 1026 strcpy(qf_names[qtype], arg);
1022 *mount_options |= 1 << REISERFS_QUOTA; 1027 *mount_options |= 1 << REISERFS_QUOTA;
1023 } else { 1028 } else {
1024 kfree(REISERFS_SB(s)->s_qf_names[qtype]); 1029 if (qf_names[qtype] !=
1025 REISERFS_SB(s)->s_qf_names[qtype] = NULL; 1030 REISERFS_SB(s)->s_qf_names[qtype])
1031 kfree(qf_names[qtype]);
1032 qf_names[qtype] = NULL;
1026 } 1033 }
1027 } 1034 }
1028 if (c == 'f') { 1035 if (c == 'f') {
1029 if (!strcmp(arg, "vfsold")) 1036 if (!strcmp(arg, "vfsold"))
1030 REISERFS_SB(s)->s_jquota_fmt = QFMT_VFS_OLD; 1037 *qfmt = QFMT_VFS_OLD;
1031 else if (!strcmp(arg, "vfsv0")) 1038 else if (!strcmp(arg, "vfsv0"))
1032 REISERFS_SB(s)->s_jquota_fmt = QFMT_VFS_V0; 1039 *qfmt = QFMT_VFS_V0;
1033 else { 1040 else {
1034 reiserfs_warning(s, 1041 reiserfs_warning(s,
1035 "reiserfs_parse_options: unknown quota format specified."); 1042 "reiserfs_parse_options: unknown quota format specified.");
1036 return 0; 1043 return 0;
1037 } 1044 }
1045 if ((sb_any_quota_enabled(s) ||
1046 sb_any_quota_suspended(s)) &&
1047 *qfmt != REISERFS_SB(s)->s_jquota_fmt) {
1048 reiserfs_warning(s,
1049 "reiserfs_parse_options: cannot change journaled quota options when quota turned on.");
1050 return 0;
1051 }
1038 } 1052 }
1039#else 1053#else
1040 if (c == 'u' || c == 'g' || c == 'f') { 1054 if (c == 'u' || c == 'g' || c == 'f') {
1041 reiserfs_warning(s, 1055 reiserfs_warning(s,
1042 "reiserfs_parse_options: journalled quota options not supported."); 1056 "reiserfs_parse_options: journaled quota options not supported.");
1043 return 0; 1057 return 0;
1044 } 1058 }
1045#endif 1059#endif
1046 } 1060 }
1047 1061
1048#ifdef CONFIG_QUOTA 1062#ifdef CONFIG_QUOTA
1049 if (!REISERFS_SB(s)->s_jquota_fmt 1063 if (!REISERFS_SB(s)->s_jquota_fmt && !*qfmt
1050 && (REISERFS_SB(s)->s_qf_names[USRQUOTA] 1064 && (qf_names[USRQUOTA] || qf_names[GRPQUOTA])) {
1051 || REISERFS_SB(s)->s_qf_names[GRPQUOTA])) {
1052 reiserfs_warning(s, 1065 reiserfs_warning(s,
1053 "reiserfs_parse_options: journalled quota format not specified."); 1066 "reiserfs_parse_options: journaled quota format not specified.");
1054 return 0; 1067 return 0;
1055 } 1068 }
1056 /* This checking is not precise wrt the quota type but for our purposes it is sufficient */ 1069 /* This checking is not precise wrt the quota type but for our purposes it is sufficient */
@@ -1130,6 +1143,21 @@ static void handle_attrs(struct super_block *s)
1130 } 1143 }
1131} 1144}
1132 1145
1146#ifdef CONFIG_QUOTA
1147static void handle_quota_files(struct super_block *s, char **qf_names,
1148 unsigned int *qfmt)
1149{
1150 int i;
1151
1152 for (i = 0; i < MAXQUOTAS; i++) {
1153 if (qf_names[i] != REISERFS_SB(s)->s_qf_names[i])
1154 kfree(REISERFS_SB(s)->s_qf_names[i]);
1155 REISERFS_SB(s)->s_qf_names[i] = qf_names[i];
1156 }
1157 REISERFS_SB(s)->s_jquota_fmt = *qfmt;
1158}
1159#endif
1160
1133static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) 1161static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1134{ 1162{
1135 struct reiserfs_super_block *rs; 1163 struct reiserfs_super_block *rs;
@@ -1141,23 +1169,30 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1141 struct reiserfs_journal *journal = SB_JOURNAL(s); 1169 struct reiserfs_journal *journal = SB_JOURNAL(s);
1142 char *new_opts = kstrdup(arg, GFP_KERNEL); 1170 char *new_opts = kstrdup(arg, GFP_KERNEL);
1143 int err; 1171 int err;
1172 char *qf_names[MAXQUOTAS];
1173 unsigned int qfmt = 0;
1144#ifdef CONFIG_QUOTA 1174#ifdef CONFIG_QUOTA
1145 int i; 1175 int i;
1176
1177 memcpy(qf_names, REISERFS_SB(s)->s_qf_names, sizeof(qf_names));
1146#endif 1178#endif
1147 1179
1148 rs = SB_DISK_SUPER_BLOCK(s); 1180 rs = SB_DISK_SUPER_BLOCK(s);
1149 1181
1150 if (!reiserfs_parse_options 1182 if (!reiserfs_parse_options
1151 (s, arg, &mount_options, &blocks, NULL, &commit_max_age)) { 1183 (s, arg, &mount_options, &blocks, NULL, &commit_max_age,
1184 qf_names, &qfmt)) {
1152#ifdef CONFIG_QUOTA 1185#ifdef CONFIG_QUOTA
1153 for (i = 0; i < MAXQUOTAS; i++) { 1186 for (i = 0; i < MAXQUOTAS; i++)
1154 kfree(REISERFS_SB(s)->s_qf_names[i]); 1187 if (qf_names[i] != REISERFS_SB(s)->s_qf_names[i])
1155 REISERFS_SB(s)->s_qf_names[i] = NULL; 1188 kfree(qf_names[i]);
1156 }
1157#endif 1189#endif
1158 err = -EINVAL; 1190 err = -EINVAL;
1159 goto out_err; 1191 goto out_err;
1160 } 1192 }
1193#ifdef CONFIG_QUOTA
1194 handle_quota_files(s, qf_names, &qfmt);
1195#endif
1161 1196
1162 handle_attrs(s); 1197 handle_attrs(s);
1163 1198
@@ -1570,6 +1605,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1570 char *jdev_name; 1605 char *jdev_name;
1571 struct reiserfs_sb_info *sbi; 1606 struct reiserfs_sb_info *sbi;
1572 int errval = -EINVAL; 1607 int errval = -EINVAL;
1608 char *qf_names[MAXQUOTAS] = {};
1609 unsigned int qfmt = 0;
1573 1610
1574 save_mount_options(s, data); 1611 save_mount_options(s, data);
1575 1612
@@ -1597,9 +1634,12 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1597 jdev_name = NULL; 1634 jdev_name = NULL;
1598 if (reiserfs_parse_options 1635 if (reiserfs_parse_options
1599 (s, (char *)data, &(sbi->s_mount_opt), &blocks, &jdev_name, 1636 (s, (char *)data, &(sbi->s_mount_opt), &blocks, &jdev_name,
1600 &commit_max_age) == 0) { 1637 &commit_max_age, qf_names, &qfmt) == 0) {
1601 goto error; 1638 goto error;
1602 } 1639 }
1640#ifdef CONFIG_QUOTA
1641 handle_quota_files(s, qf_names, &qfmt);
1642#endif
1603 1643
1604 if (blocks) { 1644 if (blocks) {
1605 SWARN(silent, s, "jmacd-7: reiserfs_fill_super: resize option " 1645 SWARN(silent, s, "jmacd-7: reiserfs_fill_super: resize option "
@@ -1819,7 +1859,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1819 1859
1820 return (0); 1860 return (0);
1821 1861
1822 error: 1862error:
1823 if (jinit_done) { /* kill the commit thread, free journal ram */ 1863 if (jinit_done) { /* kill the commit thread, free journal ram */
1824 journal_release_error(NULL, s); 1864 journal_release_error(NULL, s);
1825 } 1865 }
@@ -1830,10 +1870,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1830#ifdef CONFIG_QUOTA 1870#ifdef CONFIG_QUOTA
1831 { 1871 {
1832 int j; 1872 int j;
1833 for (j = 0; j < MAXQUOTAS; j++) { 1873 for (j = 0; j < MAXQUOTAS; j++)
1834 kfree(sbi->s_qf_names[j]); 1874 kfree(qf_names[j]);
1835 sbi->s_qf_names[j] = NULL;
1836 }
1837 } 1875 }
1838#endif 1876#endif
1839 kfree(sbi); 1877 kfree(sbi);
@@ -1980,7 +2018,7 @@ static int reiserfs_release_dquot(struct dquot *dquot)
1980 2018
1981static int reiserfs_mark_dquot_dirty(struct dquot *dquot) 2019static int reiserfs_mark_dquot_dirty(struct dquot *dquot)
1982{ 2020{
1983 /* Are we journalling quotas? */ 2021 /* Are we journaling quotas? */
1984 if (REISERFS_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] || 2022 if (REISERFS_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] ||
1985 REISERFS_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) { 2023 REISERFS_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) {
1986 dquot_mark_dquot_dirty(dquot); 2024 dquot_mark_dquot_dirty(dquot);
@@ -2026,6 +2064,7 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
2026 int err; 2064 int err;
2027 struct nameidata nd; 2065 struct nameidata nd;
2028 struct inode *inode; 2066 struct inode *inode;
2067 struct reiserfs_transaction_handle th;
2029 2068
2030 if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA))) 2069 if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA)))
2031 return -EINVAL; 2070 return -EINVAL;
@@ -2053,17 +2092,28 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
2053 } 2092 }
2054 mark_inode_dirty(inode); 2093 mark_inode_dirty(inode);
2055 } 2094 }
2056 /* Not journalling quota? No more tests needed... */ 2095 /* Journaling quota? */
2057 if (!REISERFS_SB(sb)->s_qf_names[USRQUOTA] && 2096 if (REISERFS_SB(sb)->s_qf_names[type]) {
2058 !REISERFS_SB(sb)->s_qf_names[GRPQUOTA]) { 2097 /* Quotafile not of fs root? */
2059 path_put(&nd.path); 2098 if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
2060 return vfs_quota_on(sb, type, format_id, path, 0); 2099 reiserfs_warning(sb,
2061 }
2062 /* Quotafile not of fs root? */
2063 if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
2064 reiserfs_warning(sb,
2065 "reiserfs: Quota file not on filesystem root. " 2100 "reiserfs: Quota file not on filesystem root. "
2066 "Journalled quota will not work."); 2101 "Journalled quota will not work.");
2102 }
2103
2104 /*
2105 * When we journal data on quota file, we have to flush journal to see
2106 * all updates to the file when we bypass pagecache...
2107 */
2108 if (reiserfs_file_data_log(inode)) {
2109 /* Just start temporary transaction and finish it */
2110 err = journal_begin(&th, sb, 1);
2111 if (err)
2112 return err;
2113 err = journal_end_sync(&th, sb, 1);
2114 if (err)
2115 return err;
2116 }
2067 path_put(&nd.path); 2117 path_put(&nd.path);
2068 return vfs_quota_on(sb, type, format_id, path, 0); 2118 return vfs_quota_on(sb, type, format_id, path, 0);
2069} 2119}
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index 5e90a95ad60b..056008db1377 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -6,8 +6,6 @@
6#include <linux/reiserfs_xattr.h> 6#include <linux/reiserfs_xattr.h>
7#include <asm/uaccess.h> 7#include <asm/uaccess.h>
8 8
9#define XATTR_SECURITY_PREFIX "security."
10
11static int 9static int
12security_get(struct inode *inode, const char *name, void *buffer, size_t size) 10security_get(struct inode *inode, const char *name, void *buffer, size_t size)
13{ 11{
diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c
index 024a938ca60f..60abe2bb1f98 100644
--- a/fs/reiserfs/xattr_trusted.c
+++ b/fs/reiserfs/xattr_trusted.c
@@ -7,8 +7,6 @@
7#include <linux/reiserfs_xattr.h> 7#include <linux/reiserfs_xattr.h>
8#include <asm/uaccess.h> 8#include <asm/uaccess.h>
9 9
10#define XATTR_TRUSTED_PREFIX "trusted."
11
12static int 10static int
13trusted_get(struct inode *inode, const char *name, void *buffer, size_t size) 11trusted_get(struct inode *inode, const char *name, void *buffer, size_t size)
14{ 12{
diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c
index 073f39364b11..1384efcb938e 100644
--- a/fs/reiserfs/xattr_user.c
+++ b/fs/reiserfs/xattr_user.c
@@ -10,8 +10,6 @@
10# include <linux/reiserfs_acl.h> 10# include <linux/reiserfs_acl.h>
11#endif 11#endif
12 12
13#define XATTR_USER_PREFIX "user."
14
15static int 13static int
16user_get(struct inode *inode, const char *name, void *buffer, size_t size) 14user_get(struct inode *inode, const char *name, void *buffer, size_t size)
17{ 15{
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 619725644c75..9c39bc7f8431 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -205,11 +205,19 @@ static const struct file_operations signalfd_fops = {
205 .read = signalfd_read, 205 .read = signalfd_read,
206}; 206};
207 207
208asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask) 208asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask,
209 size_t sizemask, int flags)
209{ 210{
210 sigset_t sigmask; 211 sigset_t sigmask;
211 struct signalfd_ctx *ctx; 212 struct signalfd_ctx *ctx;
212 213
214 /* Check the SFD_* constants for consistency. */
215 BUILD_BUG_ON(SFD_CLOEXEC != O_CLOEXEC);
216 BUILD_BUG_ON(SFD_NONBLOCK != O_NONBLOCK);
217
218 if (flags & ~(SFD_CLOEXEC | SFD_NONBLOCK))
219 return -EINVAL;
220
213 if (sizemask != sizeof(sigset_t) || 221 if (sizemask != sizeof(sigset_t) ||
214 copy_from_user(&sigmask, user_mask, sizeof(sigmask))) 222 copy_from_user(&sigmask, user_mask, sizeof(sigmask)))
215 return -EINVAL; 223 return -EINVAL;
@@ -227,7 +235,8 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas
227 * When we call this, the initialization must be complete, since 235 * When we call this, the initialization must be complete, since
228 * anon_inode_getfd() will install the fd. 236 * anon_inode_getfd() will install the fd.
229 */ 237 */
230 ufd = anon_inode_getfd("[signalfd]", &signalfd_fops, ctx); 238 ufd = anon_inode_getfd("[signalfd]", &signalfd_fops, ctx,
239 flags & (O_CLOEXEC | O_NONBLOCK));
231 if (ufd < 0) 240 if (ufd < 0)
232 kfree(ctx); 241 kfree(ctx);
233 } else { 242 } else {
@@ -249,3 +258,9 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas
249 258
250 return ufd; 259 return ufd;
251} 260}
261
262asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask,
263 size_t sizemask)
264{
265 return sys_signalfd4(ufd, user_mask, sizemask, 0);
266}
diff --git a/fs/smbfs/cache.c b/fs/smbfs/cache.c
index 8182f0542a21..8c177eb7e344 100644
--- a/fs/smbfs/cache.c
+++ b/fs/smbfs/cache.c
@@ -13,7 +13,6 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/dirent.h>
17#include <linux/smb_fs.h> 16#include <linux/smb_fs.h>
18#include <linux/pagemap.h> 17#include <linux/pagemap.h>
19#include <linux/net.h> 18#include <linux/net.h>
diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
index d517a27b7f4b..ee536e8a649a 100644
--- a/fs/smbfs/proc.c
+++ b/fs/smbfs/proc.c
@@ -16,7 +16,6 @@
16#include <linux/stat.h> 16#include <linux/stat.h>
17#include <linux/fcntl.h> 17#include <linux/fcntl.h>
18#include <linux/dcache.h> 18#include <linux/dcache.h>
19#include <linux/dirent.h>
20#include <linux/nls.h> 19#include <linux/nls.h>
21#include <linux/smp_lock.h> 20#include <linux/smp_lock.h>
22#include <linux/net.h> 21#include <linux/net.h>
diff --git a/fs/super.c b/fs/super.c
index 453877c5697b..e931ae9511fe 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -70,6 +70,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
70 INIT_LIST_HEAD(&s->s_instances); 70 INIT_LIST_HEAD(&s->s_instances);
71 INIT_HLIST_HEAD(&s->s_anon); 71 INIT_HLIST_HEAD(&s->s_anon);
72 INIT_LIST_HEAD(&s->s_inodes); 72 INIT_LIST_HEAD(&s->s_inodes);
73 INIT_LIST_HEAD(&s->s_dentry_lru);
73 init_rwsem(&s->s_umount); 74 init_rwsem(&s->s_umount);
74 mutex_init(&s->s_lock); 75 mutex_init(&s->s_lock);
75 lockdep_set_class(&s->s_umount, &type->s_umount_key); 76 lockdep_set_class(&s->s_umount, &type->s_umount_key);
diff --git a/fs/sync.c b/fs/sync.c
index 228e17b5e9ee..2967562d416f 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -139,7 +139,8 @@ asmlinkage long sys_fdatasync(unsigned int fd)
139 * before performing the write. 139 * before performing the write.
140 * 140 *
141 * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the 141 * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the
142 * range which are not presently under writeback. 142 * range which are not presently under writeback. Note that this may block for
143 * significant periods due to exhaustion of disk request structures.
143 * 144 *
144 * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range 145 * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range
145 * after performing the write. 146 * after performing the write.
diff --git a/fs/timerfd.c b/fs/timerfd.c
index d87d354ec424..c502c60e4f54 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -184,7 +184,11 @@ asmlinkage long sys_timerfd_create(int clockid, int flags)
184 int ufd; 184 int ufd;
185 struct timerfd_ctx *ctx; 185 struct timerfd_ctx *ctx;
186 186
187 if (flags) 187 /* Check the TFD_* constants for consistency. */
188 BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC);
189 BUILD_BUG_ON(TFD_NONBLOCK != O_NONBLOCK);
190
191 if (flags & ~(TFD_CLOEXEC | TFD_NONBLOCK))
188 return -EINVAL; 192 return -EINVAL;
189 if (clockid != CLOCK_MONOTONIC && 193 if (clockid != CLOCK_MONOTONIC &&
190 clockid != CLOCK_REALTIME) 194 clockid != CLOCK_REALTIME)
@@ -198,7 +202,8 @@ asmlinkage long sys_timerfd_create(int clockid, int flags)
198 ctx->clockid = clockid; 202 ctx->clockid = clockid;
199 hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS); 203 hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS);
200 204
201 ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx); 205 ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
206 flags & (O_CLOEXEC | O_NONBLOCK));
202 if (ufd < 0) 207 if (ufd < 0)
203 kfree(ctx); 208 kfree(ctx);
204 209
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 85b22b5977fa..227c9d700040 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -76,6 +76,7 @@
76 76
77#include <linux/errno.h> 77#include <linux/errno.h>
78#include <linux/fs.h> 78#include <linux/fs.h>
79#include <linux/quotaops.h>
79#include <linux/slab.h> 80#include <linux/slab.h>
80#include <linux/time.h> 81#include <linux/time.h>
81#include <linux/stat.h> 82#include <linux/stat.h>
@@ -1232,7 +1233,7 @@ static int ufs_show_options(struct seq_file *seq, struct vfsmount *vfs)
1232{ 1233{
1233 struct ufs_sb_info *sbi = UFS_SB(vfs->mnt_sb); 1234 struct ufs_sb_info *sbi = UFS_SB(vfs->mnt_sb);
1234 unsigned mval = sbi->s_mount_opt & UFS_MOUNT_UFSTYPE; 1235 unsigned mval = sbi->s_mount_opt & UFS_MOUNT_UFSTYPE;
1235 struct match_token *tp = tokens; 1236 const struct match_token *tp = tokens;
1236 1237
1237 while (tp->token != Opt_onerror_panic && tp->token != mval) 1238 while (tp->token != Opt_onerror_panic && tp->token != mval)
1238 ++tp; 1239 ++tp;
diff --git a/fs/vfat/namei.c b/fs/vfat/namei.c
index b546ba69be82..155c10b4adbd 100644
--- a/fs/vfat/namei.c
+++ b/fs/vfat/namei.c
@@ -621,7 +621,7 @@ shortname:
621 memcpy(de->name, msdos_name, MSDOS_NAME); 621 memcpy(de->name, msdos_name, MSDOS_NAME);
622 de->attr = is_dir ? ATTR_DIR : ATTR_ARCH; 622 de->attr = is_dir ? ATTR_DIR : ATTR_ARCH;
623 de->lcase = lcase; 623 de->lcase = lcase;
624 fat_date_unix2dos(ts->tv_sec, &time, &date); 624 fat_date_unix2dos(ts->tv_sec, &time, &date, sbi->options.tz_utc);
625 de->time = de->ctime = time; 625 de->time = de->ctime = time;
626 de->date = de->cdate = de->adate = date; 626 de->date = de->cdate = de->adate = date;
627 de->ctime_cs = 0; 627 de->ctime_cs = 0;
diff --git a/include/asm-alpha/ide.h b/include/asm-alpha/ide.h
deleted file mode 100644
index f44129abc02c..000000000000
--- a/include/asm-alpha/ide.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/*
2 * linux/include/asm-alpha/ide.h
3 *
4 * Copyright (C) 1994-1996 Linus Torvalds & authors
5 */
6
7/*
8 * This file contains the alpha architecture specific IDE code.
9 */
10
11#ifndef __ASMalpha_IDE_H
12#define __ASMalpha_IDE_H
13
14#ifdef __KERNEL__
15
16static inline int ide_default_irq(unsigned long base)
17{
18 switch (base) {
19 case 0x1f0: return 14;
20 case 0x170: return 15;
21 case 0x1e8: return 11;
22 case 0x168: return 10;
23 default:
24 return 0;
25 }
26}
27
28static inline unsigned long ide_default_io_base(int index)
29{
30 switch (index) {
31 case 0: return 0x1f0;
32 case 1: return 0x170;
33 case 2: return 0x1e8;
34 case 3: return 0x168;
35 default:
36 return 0;
37 }
38}
39
40#include <asm-generic/ide_iops.h>
41
42#endif /* __KERNEL__ */
43
44#endif /* __ASMalpha_IDE_H */
diff --git a/include/asm-alpha/kvm.h b/include/asm-alpha/kvm.h
deleted file mode 100644
index b9daec429689..000000000000
--- a/include/asm-alpha/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_ALPHA_H
2#define __LINUX_KVM_ALPHA_H
3
4/* alpha does not support KVM */
5
6#endif
diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h
index 22ff9762d17b..0995f9d13417 100644
--- a/include/asm-alpha/page.h
+++ b/include/asm-alpha/page.h
@@ -80,9 +80,6 @@ typedef struct page *pgtable_t;
80 80
81#endif /* !__ASSEMBLY__ */ 81#endif /* !__ASSEMBLY__ */
82 82
83/* to align the pointer to the (next) page boundary */
84#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
85
86#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) 83#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
87#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) 84#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
88#ifndef CONFIG_DISCONTIGMEM 85#ifndef CONFIG_DISCONTIGMEM
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-alpha/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-alpha/socket.h b/include/asm-alpha/socket.h
index 08c979319929..a1057c2d95e7 100644
--- a/include/asm-alpha/socket.h
+++ b/include/asm-alpha/socket.h
@@ -62,4 +62,9 @@
62 62
63#define SO_MARK 36 63#define SO_MARK 36
64 64
65/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
66 * have to define SOCK_NONBLOCK to a different value here.
67 */
68#define SOCK_NONBLOCK 0x40000000
69
65#endif /* _ASM_SOCKET_H */ 70#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-alpha/thread_info.h b/include/asm-alpha/thread_info.h
index fb3185196298..15fda4344424 100644
--- a/include/asm-alpha/thread_info.h
+++ b/include/asm-alpha/thread_info.h
@@ -50,10 +50,8 @@ register struct thread_info *__current_thread_info __asm__("$8");
50#define current_thread_info() __current_thread_info 50#define current_thread_info() __current_thread_info
51 51
52/* Thread information allocation. */ 52/* Thread information allocation. */
53#define THREAD_SIZE_ORDER 1
53#define THREAD_SIZE (2*PAGE_SIZE) 54#define THREAD_SIZE (2*PAGE_SIZE)
54#define alloc_thread_info(tsk) \
55 ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
56#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
57 55
58#endif /* __ASSEMBLY__ */ 56#endif /* __ASSEMBLY__ */
59 57
diff --git a/include/asm-arm/arch-iop13xx/adma.h b/include/asm-arm/arch-iop13xx/adma.h
index 90d14ee564f5..ef4f5da2029f 100644
--- a/include/asm-arm/arch-iop13xx/adma.h
+++ b/include/asm-arm/arch-iop13xx/adma.h
@@ -198,17 +198,13 @@ iop_chan_memset_slot_count(size_t len, int *slots_per_op)
198static inline int 198static inline int
199iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op) 199iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op)
200{ 200{
201 int num_slots; 201 static const char slot_count_table[] = { 1, 2, 2, 2,
202 /* slots_to_find = 1 for basic descriptor + 1 per 4 sources above 1 202 2, 3, 3, 3,
203 * (1 source => 8 bytes) (1 slot => 32 bytes) 203 3, 4, 4, 4,
204 */ 204 4, 5, 5, 5,
205 num_slots = 1 + (((src_cnt - 1) << 3) >> 5); 205 };
206 if (((src_cnt - 1) << 3) & 0x1f) 206 *slots_per_op = slot_count_table[src_cnt - 1];
207 num_slots++; 207 return *slots_per_op;
208
209 *slots_per_op = num_slots;
210
211 return num_slots;
212} 208}
213 209
214#define ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024) 210#define ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
diff --git a/include/asm-arm/arch-pxa/cm-x270.h b/include/asm-arm/arch-pxa/cm-x270.h
deleted file mode 100644
index f8fac9e18009..000000000000
--- a/include/asm-arm/arch-pxa/cm-x270.h
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 * linux/include/asm/arch-pxa/cm-x270.h
3 *
4 * Copyright Compulab Ltd., 2003, 2007
5 * Mike Rapoport <mike@compulab.co.il>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12
13/* CM-x270 device physical addresses */
14#define CMX270_CS1_PHYS (PXA_CS1_PHYS)
15#define MARATHON_PHYS (PXA_CS2_PHYS)
16#define CMX270_IDE104_PHYS (PXA_CS3_PHYS)
17#define CMX270_IT8152_PHYS (PXA_CS4_PHYS)
18
19/* Statically mapped regions */
20#define CMX270_VIRT_BASE (0xe8000000)
21#define CMX270_IT8152_VIRT (CMX270_VIRT_BASE)
22#define CMX270_IDE104_VIRT (CMX270_IT8152_VIRT + SZ_64M)
23
24/* GPIO related definitions */
25#define GPIO_IT8152_IRQ (22)
26
27#define IRQ_GPIO_IT8152_IRQ IRQ_GPIO(GPIO_IT8152_IRQ)
28#define PME_IRQ IRQ_GPIO(0)
29#define CMX270_IDE_IRQ IRQ_GPIO(100)
30#define CMX270_GPIRQ1 IRQ_GPIO(101)
31#define CMX270_TOUCHIRQ IRQ_GPIO(96)
32#define CMX270_ETHIRQ IRQ_GPIO(10)
33#define CMX270_GFXIRQ IRQ_GPIO(95)
34#define CMX270_NANDIRQ IRQ_GPIO(89)
35#define CMX270_MMC_IRQ IRQ_GPIO(83)
36
37/* PCMCIA related definitions */
38#define PCC_DETECT(x) (GPLR(84 - (x)) & GPIO_bit(84 - (x)))
39#define PCC_READY(x) (GPLR(82 - (x)) & GPIO_bit(82 - (x)))
40
41#define PCMCIA_S0_CD_VALID IRQ_GPIO(84)
42#define PCMCIA_S0_CD_VALID_EDGE GPIO_BOTH_EDGES
43
44#define PCMCIA_S1_CD_VALID IRQ_GPIO(83)
45#define PCMCIA_S1_CD_VALID_EDGE GPIO_BOTH_EDGES
46
47#define PCMCIA_S0_RDYINT IRQ_GPIO(82)
48#define PCMCIA_S1_RDYINT IRQ_GPIO(81)
49
50#define PCMCIA_RESET_GPIO 53
diff --git a/include/asm-arm/arch-pxa/eseries-gpio.h b/include/asm-arm/arch-pxa/eseries-gpio.h
new file mode 100644
index 000000000000..4c90b1310270
--- /dev/null
+++ b/include/asm-arm/arch-pxa/eseries-gpio.h
@@ -0,0 +1,50 @@
1/*
2 * eseries-gpio.h
3 *
4 * Copyright (C) Ian Molton <spyro@f2s.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12/* e-series power button */
13#define GPIO_ESERIES_POWERBTN 0
14
15/* UDC GPIO definitions */
16#define GPIO_E7XX_USB_DISC 13
17#define GPIO_E7XX_USB_PULLUP 3
18
19#define GPIO_E800_USB_DISC 4
20#define GPIO_E800_USB_PULLUP 84
21
22/* e740 PCMCIA GPIO definitions */
23/* Note: PWR1 seems to be inverted */
24#define GPIO_E740_PCMCIA_CD0 8
25#define GPIO_E740_PCMCIA_CD1 44
26#define GPIO_E740_PCMCIA_RDY0 11
27#define GPIO_E740_PCMCIA_RDY1 6
28#define GPIO_E740_PCMCIA_RST0 27
29#define GPIO_E740_PCMCIA_RST1 24
30#define GPIO_E740_PCMCIA_PWR0 20
31#define GPIO_E740_PCMCIA_PWR1 23
32
33/* e750 PCMCIA GPIO definitions */
34#define GPIO_E750_PCMCIA_CD0 8
35#define GPIO_E750_PCMCIA_RDY0 12
36#define GPIO_E750_PCMCIA_RST0 27
37#define GPIO_E750_PCMCIA_PWR0 20
38
39/* e800 PCMCIA GPIO definitions */
40#define GPIO_E800_PCMCIA_RST0 69
41#define GPIO_E800_PCMCIA_RST1 72
42#define GPIO_E800_PCMCIA_PWR0 20
43#define GPIO_E800_PCMCIA_PWR1 73
44
45/* e7xx IrDA power control */
46#define GPIO_E7XX_IR_ON 38
47
48/* ASIC related GPIOs */
49#define GPIO_ESERIES_TMIO_IRQ 5
50#define GPIO_E800_ANGELX_IRQ 8
diff --git a/include/asm-arm/arch-pxa/eseries-irq.h b/include/asm-arm/arch-pxa/eseries-irq.h
new file mode 100644
index 000000000000..f2a93d5e31d3
--- /dev/null
+++ b/include/asm-arm/arch-pxa/eseries-irq.h
@@ -0,0 +1,27 @@
1/*
2 * eseries-irq.h
3 *
4 * Copyright (C) Ian Molton <spyro@f2s.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#define ANGELX_IRQ_BASE (IRQ_BOARD_START+8)
13#define IRQ_ANGELX(n) (ANGELX_IRQ_BASE + (n))
14
15#define ANGELX_RDY0_IRQ IRQ_ANGELX(0)
16#define ANGELX_ST0_IRQ IRQ_ANGELX(1)
17#define ANGELX_CD0_IRQ IRQ_ANGELX(2)
18#define ANGELX_RDY1_IRQ IRQ_ANGELX(3)
19#define ANGELX_ST1_IRQ IRQ_ANGELX(4)
20#define ANGELX_CD1_IRQ IRQ_ANGELX(5)
21
22#define TMIO_IRQ_BASE (IRQ_BOARD_START+0)
23#define IRQ_TMIO(n) (TMIO_IRQ_BASE + (n))
24
25#define TMIO_SD_IRQ IRQ_TMIO(1)
26#define TMIO_USB_IRQ IRQ_TMIO(2)
27
diff --git a/include/asm-arm/arch-pxa/hardware.h b/include/asm-arm/arch-pxa/hardware.h
index d9af6dabc899..979a45695d7d 100644
--- a/include/asm-arm/arch-pxa/hardware.h
+++ b/include/asm-arm/arch-pxa/hardware.h
@@ -69,6 +69,12 @@
69 _id == 0x212; \ 69 _id == 0x212; \
70 }) 70 })
71 71
72#define __cpu_is_pxa255(id) \
73 ({ \
74 unsigned int _id = (id) >> 4 & 0xfff; \
75 _id == 0x2d0; \
76 })
77
72#define __cpu_is_pxa25x(id) \ 78#define __cpu_is_pxa25x(id) \
73 ({ \ 79 ({ \
74 unsigned int _id = (id) >> 4 & 0xfff; \ 80 unsigned int _id = (id) >> 4 & 0xfff; \
@@ -76,6 +82,7 @@
76 }) 82 })
77#else 83#else
78#define __cpu_is_pxa21x(id) (0) 84#define __cpu_is_pxa21x(id) (0)
85#define __cpu_is_pxa255(id) (0)
79#define __cpu_is_pxa25x(id) (0) 86#define __cpu_is_pxa25x(id) (0)
80#endif 87#endif
81 88
@@ -119,11 +126,26 @@
119#define __cpu_is_pxa320(id) (0) 126#define __cpu_is_pxa320(id) (0)
120#endif 127#endif
121 128
129#ifdef CONFIG_CPU_PXA930
130#define __cpu_is_pxa930(id) \
131 ({ \
132 unsigned int _id = (id) >> 4 & 0xfff; \
133 _id == 0x683; \
134 })
135#else
136#define __cpu_is_pxa930(id) (0)
137#endif
138
122#define cpu_is_pxa21x() \ 139#define cpu_is_pxa21x() \
123 ({ \ 140 ({ \
124 __cpu_is_pxa21x(read_cpuid_id()); \ 141 __cpu_is_pxa21x(read_cpuid_id()); \
125 }) 142 })
126 143
144#define cpu_is_pxa255() \
145 ({ \
146 __cpu_is_pxa255(read_cpuid_id()); \
147 })
148
127#define cpu_is_pxa25x() \ 149#define cpu_is_pxa25x() \
128 ({ \ 150 ({ \
129 __cpu_is_pxa25x(read_cpuid_id()); \ 151 __cpu_is_pxa25x(read_cpuid_id()); \
@@ -149,6 +171,12 @@
149 __cpu_is_pxa320(read_cpuid_id()); \ 171 __cpu_is_pxa320(read_cpuid_id()); \
150 }) 172 })
151 173
174#define cpu_is_pxa930() \
175 ({ \
176 unsigned int id = read_cpuid(CPUID_ID); \
177 __cpu_is_pxa930(id); \
178 })
179
152/* 180/*
153 * CPUID Core Generation Bit 181 * CPUID Core Generation Bit
154 * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x 182 * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
@@ -196,6 +224,11 @@ extern void pxa_gpio_set_value(unsigned gpio, int value);
196 */ 224 */
197extern unsigned int get_memclk_frequency_10khz(void); 225extern unsigned int get_memclk_frequency_10khz(void);
198 226
227/*
228 * register GPIO as reset generator
229 */
230extern int init_gpio_reset(int gpio);
231
199#endif 232#endif
200 233
201#if defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI) 234#if defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI)
diff --git a/include/asm-arm/arch-pxa/irqs.h b/include/asm-arm/arch-pxa/irqs.h
index b6c8fe377683..9413121b0ed9 100644
--- a/include/asm-arm/arch-pxa/irqs.h
+++ b/include/asm-arm/arch-pxa/irqs.h
@@ -180,10 +180,13 @@
180#define NR_IRQS (IRQ_LOCOMO_SPI_TEND + 1) 180#define NR_IRQS (IRQ_LOCOMO_SPI_TEND + 1)
181#elif defined(CONFIG_ARCH_LUBBOCK) || \ 181#elif defined(CONFIG_ARCH_LUBBOCK) || \
182 defined(CONFIG_MACH_LOGICPD_PXA270) || \ 182 defined(CONFIG_MACH_LOGICPD_PXA270) || \
183 defined(CONFIG_MACH_TOSA) || \
183 defined(CONFIG_MACH_MAINSTONE) || \ 184 defined(CONFIG_MACH_MAINSTONE) || \
184 defined(CONFIG_MACH_PCM027) || \ 185 defined(CONFIG_MACH_PCM027) || \
185 defined(CONFIG_MACH_MAGICIAN) 186 defined(CONFIG_MACH_MAGICIAN)
186#define NR_IRQS (IRQ_BOARD_END) 187#define NR_IRQS (IRQ_BOARD_END)
188#elif defined(CONFIG_MACH_ZYLONITE)
189#define NR_IRQS (IRQ_BOARD_START + 32)
187#else 190#else
188#define NR_IRQS (IRQ_BOARD_START) 191#define NR_IRQS (IRQ_BOARD_START)
189#endif 192#endif
diff --git a/include/asm-arm/arch-pxa/mfp-pxa2xx.h b/include/asm-arm/arch-pxa/mfp-pxa2xx.h
index db8d890d237c..8de1c0dae624 100644
--- a/include/asm-arm/arch-pxa/mfp-pxa2xx.h
+++ b/include/asm-arm/arch-pxa/mfp-pxa2xx.h
@@ -128,5 +128,6 @@
128#define GPIO84_GPIO MFP_CFG_IN(GPIO84, AF0) 128#define GPIO84_GPIO MFP_CFG_IN(GPIO84, AF0)
129 129
130extern void pxa2xx_mfp_config(unsigned long *mfp_cfgs, int num); 130extern void pxa2xx_mfp_config(unsigned long *mfp_cfgs, int num);
131extern void pxa2xx_mfp_set_lpm(int mfp, unsigned long lpm);
131extern int gpio_set_wake(unsigned int gpio, unsigned int on); 132extern int gpio_set_wake(unsigned int gpio, unsigned int on);
132#endif /* __ASM_ARCH_MFP_PXA2XX_H */ 133#endif /* __ASM_ARCH_MFP_PXA2XX_H */
diff --git a/include/asm-arm/arch-pxa/mfp-pxa930.h b/include/asm-arm/arch-pxa/mfp-pxa930.h
new file mode 100644
index 000000000000..c4e945ab1923
--- /dev/null
+++ b/include/asm-arm/arch-pxa/mfp-pxa930.h
@@ -0,0 +1,491 @@
1/*
2 * linux/include/asm-arm/arch-pxa/mfp-pxa930.h
3 *
4 * PXA930 specific MFP configuration definitions
5 *
6 * Copyright (C) 2007-2008 Marvell International Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __ASM_ARCH_MFP_PXA9xx_H
14#define __ASM_ARCH_MFP_PXA9xx_H
15
16#include <asm/arch/mfp.h>
17#include <asm/arch/mfp-pxa3xx.h>
18
19/* GPIO */
20#define GPIO46_GPIO MFP_CFG(GPIO46, AF0)
21#define GPIO49_GPIO MFP_CFG(GPIO49, AF0)
22#define GPIO50_GPIO MFP_CFG(GPIO50, AF0)
23#define GPIO51_GPIO MFP_CFG(GPIO51, AF0)
24#define GPIO52_GPIO MFP_CFG(GPIO52, AF0)
25#define GPIO56_GPIO MFP_CFG(GPIO56, AF0)
26#define GPIO58_GPIO MFP_CFG(GPIO58, AF0)
27#define GPIO59_GPIO MFP_CFG(GPIO59, AF0)
28#define GPIO60_GPIO MFP_CFG(GPIO60, AF0)
29#define GPIO61_GPIO MFP_CFG(GPIO61, AF0)
30#define GPIO62_GPIO MFP_CFG(GPIO62, AF0)
31
32#define GSIM_UCLK_GPIO_79 MFP_CFG(GSIM_UCLK, AF0)
33#define GSIM_UIO_GPIO_80 MFP_CFG(GSIM_UIO, AF0)
34#define GSIM_nURST_GPIO_81 MFP_CFG(GSIM_nURST, AF0)
35#define GSIM_UDET_GPIO_82 MFP_CFG(GSIM_UDET, AF0)
36
37#define DF_IO15_GPIO_28 MFP_CFG(DF_IO15, AF0)
38#define DF_IO14_GPIO_29 MFP_CFG(DF_IO14, AF0)
39#define DF_IO13_GPIO_30 MFP_CFG(DF_IO13, AF0)
40#define DF_IO12_GPIO_31 MFP_CFG(DF_IO12, AF0)
41#define DF_IO11_GPIO_32 MFP_CFG(DF_IO11, AF0)
42#define DF_IO10_GPIO_33 MFP_CFG(DF_IO10, AF0)
43#define DF_IO9_GPIO_34 MFP_CFG(DF_IO9, AF0)
44#define DF_IO8_GPIO_35 MFP_CFG(DF_IO8, AF0)
45#define DF_IO7_GPIO_36 MFP_CFG(DF_IO7, AF0)
46#define DF_IO6_GPIO_37 MFP_CFG(DF_IO6, AF0)
47#define DF_IO5_GPIO_38 MFP_CFG(DF_IO5, AF0)
48#define DF_IO4_GPIO_39 MFP_CFG(DF_IO4, AF0)
49#define DF_IO3_GPIO_40 MFP_CFG(DF_IO3, AF0)
50#define DF_IO2_GPIO_41 MFP_CFG(DF_IO2, AF0)
51#define DF_IO1_GPIO_42 MFP_CFG(DF_IO1, AF0)
52#define DF_IO0_GPIO_43 MFP_CFG(DF_IO0, AF0)
53#define DF_nCS0_GPIO_44 MFP_CFG(DF_nCS0, AF0)
54#define DF_nCS1_GPIO_45 MFP_CFG(DF_nCS1, AF0)
55#define DF_nWE_GPIO_46 MFP_CFG(DF_nWE, AF0)
56#define DF_nRE_nOE_GPIO_47 MFP_CFG(DF_nRE_nOE, AF0)
57#define DF_CLE_nOE_GPIO_48 MFP_CFG(DF_CLE_nOE, AF0)
58#define DF_nADV1_ALE_GPIO_49 MFP_CFG(DF_nADV1_ALE, AF0)
59#define DF_nADV2_ALE_GPIO_50 MFP_CFG(DF_nADV2_ALE, AF0)
60#define DF_INT_RnB_GPIO_51 MFP_CFG(DF_INT_RnB, AF0)
61#define DF_SCLK_E_GPIO_52 MFP_CFG(DF_SCLK_E, AF0)
62
63#define DF_ADDR0_GPIO_53 MFP_CFG(DF_ADDR0, AF0)
64#define DF_ADDR1_GPIO_54 MFP_CFG(DF_ADDR1, AF0)
65#define DF_ADDR2_GPIO_55 MFP_CFG(DF_ADDR2, AF0)
66#define DF_ADDR3_GPIO_56 MFP_CFG(DF_ADDR3, AF0)
67#define nXCVREN_GPIO_57 MFP_CFG(nXCVREN, AF0)
68#define nLUA_GPIO_58 MFP_CFG(nLUA, AF0)
69#define nLLA_GPIO_59 MFP_CFG(nLLA, AF0)
70#define nBE0_GPIO_60 MFP_CFG(nBE0, AF0)
71#define nBE1_GPIO_61 MFP_CFG(nBE1, AF0)
72#define RDY_GPIO_62 MFP_CFG(RDY, AF0)
73
74/* Chip Select */
75#define DF_nCS0_nCS2 MFP_CFG_LPM(DF_nCS0, AF3, PULL_HIGH)
76#define DF_nCS1_nCS3 MFP_CFG_LPM(DF_nCS1, AF3, PULL_HIGH)
77
78/* AC97 */
79#define GPIO83_BAC97_SYSCLK MFP_CFG(GPIO83, AF3)
80#define GPIO84_BAC97_SDATA_IN0 MFP_CFG(GPIO84, AF3)
81#define GPIO85_BAC97_BITCLK MFP_CFG(GPIO85, AF3)
82#define GPIO86_BAC97_nRESET MFP_CFG(GPIO86, AF3)
83#define GPIO87_BAC97_SYNC MFP_CFG(GPIO87, AF3)
84#define GPIO88_BAC97_SDATA_OUT MFP_CFG(GPIO88, AF3)
85
86/* I2C */
87#define GPIO39_CI2C_SCL MFP_CFG_LPM(GPIO39, AF3, PULL_HIGH)
88#define GPIO40_CI2C_SDA MFP_CFG_LPM(GPIO40, AF3, PULL_HIGH)
89
90#define GPIO51_CI2C_SCL MFP_CFG_LPM(GPIO51, AF3, PULL_HIGH)
91#define GPIO52_CI2C_SDA MFP_CFG_LPM(GPIO52, AF3, PULL_HIGH)
92
93#define GPIO63_CI2C_SCL MFP_CFG_LPM(GPIO63, AF4, PULL_HIGH)
94#define GPIO64_CI2C_SDA MFP_CFG_LPM(GPIO64, AF4, PULL_HIGH)
95
96#define GPIO77_CI2C_SCL MFP_CFG_LPM(GPIO77, AF2, PULL_HIGH)
97#define GPIO78_CI2C_SDA MFP_CFG_LPM(GPIO78, AF2, PULL_HIGH)
98
99#define GPIO89_CI2C_SCL MFP_CFG_LPM(GPIO89, AF1, PULL_HIGH)
100#define GPIO90_CI2C_SDA MFP_CFG_LPM(GPIO90, AF1, PULL_HIGH)
101
102#define GPIO95_CI2C_SCL MFP_CFG_LPM(GPIO95, AF1, PULL_HIGH)
103#define GPIO96_CI2C_SDA MFP_CFG_LPM(GPIO96, AF1, PULL_HIGH)
104
105#define GPIO97_CI2C_SCL MFP_CFG_LPM(GPIO97, AF3, PULL_HIGH)
106#define GPIO98_CI2C_SDA MFP_CFG_LPM(GPIO98, AF3, PULL_HIGH)
107
108/* QCI */
109#define GPIO63_CI_DD_9 MFP_CFG_LPM(GPIO63, AF1, PULL_LOW)
110#define GPIO64_CI_DD_8 MFP_CFG_LPM(GPIO64, AF1, PULL_LOW)
111#define GPIO65_CI_DD_7 MFP_CFG_LPM(GPIO65, AF1, PULL_LOW)
112#define GPIO66_CI_DD_6 MFP_CFG_LPM(GPIO66, AF1, PULL_LOW)
113#define GPIO67_CI_DD_5 MFP_CFG_LPM(GPIO67, AF1, PULL_LOW)
114#define GPIO68_CI_DD_4 MFP_CFG_LPM(GPIO68, AF1, PULL_LOW)
115#define GPIO69_CI_DD_3 MFP_CFG_LPM(GPIO69, AF1, PULL_LOW)
116#define GPIO70_CI_DD_2 MFP_CFG_LPM(GPIO70, AF1, PULL_LOW)
117#define GPIO71_CI_DD_1 MFP_CFG_LPM(GPIO71, AF1, PULL_LOW)
118#define GPIO72_CI_DD_0 MFP_CFG_LPM(GPIO72, AF1, PULL_LOW)
119#define GPIO73_CI_HSYNC MFP_CFG_LPM(GPIO73, AF1, PULL_LOW)
120#define GPIO74_CI_VSYNC MFP_CFG_LPM(GPIO74, AF1, PULL_LOW)
121#define GPIO75_CI_MCLK MFP_CFG_LPM(GPIO75, AF1, PULL_LOW)
122#define GPIO76_CI_PCLK MFP_CFG_LPM(GPIO76, AF1, PULL_LOW)
123
124/* KEYPAD */
125#define GPIO4_KP_DKIN_4 MFP_CFG_LPM(GPIO4, AF3, FLOAT)
126#define GPIO5_KP_DKIN_5 MFP_CFG_LPM(GPIO5, AF3, FLOAT)
127#define GPIO6_KP_DKIN_6 MFP_CFG_LPM(GPIO6, AF3, FLOAT)
128#define GPIO7_KP_DKIN_7 MFP_CFG_LPM(GPIO7, AF3, FLOAT)
129#define GPIO8_KP_DKIN_4 MFP_CFG_LPM(GPIO8, AF3, FLOAT)
130#define GPIO9_KP_DKIN_5 MFP_CFG_LPM(GPIO9, AF3, FLOAT)
131#define GPIO10_KP_DKIN_6 MFP_CFG_LPM(GPIO10, AF3, FLOAT)
132#define GPIO11_KP_DKIN_7 MFP_CFG_LPM(GPIO11, AF3, FLOAT)
133
134#define GPIO12_KP_DKIN_0 MFP_CFG_LPM(GPIO12, AF2, FLOAT)
135#define GPIO13_KP_DKIN_1 MFP_CFG_LPM(GPIO13, AF2, FLOAT)
136#define GPIO14_KP_DKIN_2 MFP_CFG_LPM(GPIO14, AF2, FLOAT)
137#define GPIO15_KP_DKIN_3 MFP_CFG_LPM(GPIO15, AF2, FLOAT)
138
139#define GPIO41_KP_DKIN_0 MFP_CFG_LPM(GPIO41, AF2, FLOAT)
140#define GPIO42_KP_DKIN_1 MFP_CFG_LPM(GPIO42, AF2, FLOAT)
141#define GPIO43_KP_DKIN_2 MFP_CFG_LPM(GPIO43, AF2, FLOAT)
142#define GPIO44_KP_DKIN_3 MFP_CFG_LPM(GPIO44, AF2, FLOAT)
143#define GPIO41_KP_DKIN_4 MFP_CFG_LPM(GPIO41, AF4, FLOAT)
144#define GPIO42_KP_DKIN_5 MFP_CFG_LPM(GPIO42, AF4, FLOAT)
145
146#define GPIO0_KP_MKIN_0 MFP_CFG_LPM(GPIO0, AF1, FLOAT)
147#define GPIO2_KP_MKIN_1 MFP_CFG_LPM(GPIO2, AF1, FLOAT)
148#define GPIO4_KP_MKIN_2 MFP_CFG_LPM(GPIO4, AF1, FLOAT)
149#define GPIO6_KP_MKIN_3 MFP_CFG_LPM(GPIO6, AF1, FLOAT)
150#define GPIO8_KP_MKIN_4 MFP_CFG_LPM(GPIO8, AF1, FLOAT)
151#define GPIO10_KP_MKIN_5 MFP_CFG_LPM(GPIO10, AF1, FLOAT)
152#define GPIO12_KP_MKIN_6 MFP_CFG_LPM(GPIO12, AF1, FLOAT)
153#define GPIO14_KP_MKIN_7 MFP_CFG(GPIO14, AF1)
154#define GPIO35_KP_MKIN_5 MFP_CFG(GPIO35, AF4)
155
156#define GPIO1_KP_MKOUT_0 MFP_CFG_LPM(GPIO1, AF1, DRIVE_HIGH)
157#define GPIO3_KP_MKOUT_1 MFP_CFG_LPM(GPIO3, AF1, DRIVE_HIGH)
158#define GPIO5_KP_MKOUT_2 MFP_CFG_LPM(GPIO5, AF1, DRIVE_HIGH)
159#define GPIO7_KP_MKOUT_3 MFP_CFG_LPM(GPIO7, AF1, DRIVE_HIGH)
160#define GPIO9_KP_MKOUT_4 MFP_CFG_LPM(GPIO9, AF1, DRIVE_HIGH)
161#define GPIO11_KP_MKOUT_5 MFP_CFG_LPM(GPIO11, AF1, DRIVE_HIGH)
162#define GPIO13_KP_MKOUT_6 MFP_CFG_LPM(GPIO13, AF1, DRIVE_HIGH)
163#define GPIO15_KP_MKOUT_7 MFP_CFG_LPM(GPIO15, AF1, DRIVE_HIGH)
164#define GPIO36_KP_MKOUT_5 MFP_CFG_LPM(GPIO36, AF4, DRIVE_HIGH)
165
166/* LCD */
167#define GPIO17_LCD_FCLK_RD MFP_CFG(GPIO17, AF1)
168#define GPIO18_LCD_LCLK_A0 MFP_CFG(GPIO18, AF1)
169#define GPIO19_LCD_PCLK_WR MFP_CFG(GPIO19, AF1)
170#define GPIO20_LCD_BIAS MFP_CFG(GPIO20, AF1)
171#define GPIO21_LCD_CS MFP_CFG(GPIO21, AF1)
172#define GPIO22_LCD_CS2 MFP_CFG(GPIO22, AF2)
173#define GPIO22_LCD_VSYNC MFP_CFG(GPIO22, AF1)
174#define GPIO23_LCD_DD0 MFP_CFG(GPIO23, AF1)
175#define GPIO24_LCD_DD1 MFP_CFG(GPIO24, AF1)
176#define GPIO25_LCD_DD2 MFP_CFG(GPIO25, AF1)
177#define GPIO26_LCD_DD3 MFP_CFG(GPIO26, AF1)
178#define GPIO27_LCD_DD4 MFP_CFG(GPIO27, AF1)
179#define GPIO28_LCD_DD5 MFP_CFG(GPIO28, AF1)
180#define GPIO29_LCD_DD6 MFP_CFG(GPIO29, AF1)
181#define GPIO30_LCD_DD7 MFP_CFG(GPIO30, AF1)
182#define GPIO31_LCD_DD8 MFP_CFG(GPIO31, AF1)
183#define GPIO32_LCD_DD9 MFP_CFG(GPIO32, AF1)
184#define GPIO33_LCD_DD10 MFP_CFG(GPIO33, AF1)
185#define GPIO34_LCD_DD11 MFP_CFG(GPIO34, AF1)
186#define GPIO35_LCD_DD12 MFP_CFG(GPIO35, AF1)
187#define GPIO36_LCD_DD13 MFP_CFG(GPIO36, AF1)
188#define GPIO37_LCD_DD14 MFP_CFG(GPIO37, AF1)
189#define GPIO38_LCD_DD15 MFP_CFG(GPIO38, AF1)
190#define GPIO39_LCD_DD16 MFP_CFG(GPIO39, AF1)
191#define GPIO40_LCD_DD17 MFP_CFG(GPIO40, AF1)
192#define GPIO41_LCD_CS2 MFP_CFG(GPIO41, AF3)
193#define GPIO42_LCD_VSYNC2 MFP_CFG(GPIO42, AF3)
194#define GPIO44_LCD_DD7 MFP_CFG(GPIO44, AF1)
195
196/* Mini-LCD */
197#define GPIO17_MLCD_FCLK MFP_CFG(GPIO17, AF3)
198#define GPIO18_MLCD_LCLK MFP_CFG(GPIO18, AF3)
199#define GPIO19_MLCD_PCLK MFP_CFG(GPIO19, AF3)
200#define GPIO20_MLCD_BIAS MFP_CFG(GPIO20, AF3)
201#define GPIO23_MLCD_DD0 MFP_CFG(GPIO23, AF3)
202#define GPIO24_MLCD_DD1 MFP_CFG(GPIO24, AF3)
203#define GPIO25_MLCD_DD2 MFP_CFG(GPIO25, AF3)
204#define GPIO26_MLCD_DD3 MFP_CFG(GPIO26, AF3)
205#define GPIO27_MLCD_DD4 MFP_CFG(GPIO27, AF3)
206#define GPIO28_MLCD_DD5 MFP_CFG(GPIO28, AF3)
207#define GPIO29_MLCD_DD6 MFP_CFG(GPIO29, AF3)
208#define GPIO30_MLCD_DD7 MFP_CFG(GPIO30, AF3)
209#define GPIO31_MLCD_DD8 MFP_CFG(GPIO31, AF3)
210#define GPIO32_MLCD_DD9 MFP_CFG(GPIO32, AF3)
211#define GPIO33_MLCD_DD10 MFP_CFG(GPIO33, AF3)
212#define GPIO34_MLCD_DD11 MFP_CFG(GPIO34, AF3)
213#define GPIO35_MLCD_DD12 MFP_CFG(GPIO35, AF3)
214#define GPIO36_MLCD_DD13 MFP_CFG(GPIO36, AF3)
215#define GPIO37_MLCD_DD14 MFP_CFG(GPIO37, AF3)
216#define GPIO38_MLCD_DD15 MFP_CFG(GPIO38, AF3)
217#define GPIO44_MLCD_DD7 MFP_CFG(GPIO44, AF5)
218
219/* MMC1 */
220#define GPIO10_MMC1_DAT3 MFP_CFG(GPIO10, AF4)
221#define GPIO11_MMC1_DAT2 MFP_CFG(GPIO11, AF4)
222#define GPIO12_MMC1_DAT1 MFP_CFG(GPIO12, AF4)
223#define GPIO13_MMC1_DAT0 MFP_CFG(GPIO13, AF4)
224#define GPIO14_MMC1_CMD MFP_CFG(GPIO14, AF4)
225#define GPIO15_MMC1_CLK MFP_CFG(GPIO15, AF4)
226#define GPIO55_MMC1_CMD MFP_CFG(GPIO55, AF3)
227#define GPIO56_MMC1_CLK MFP_CFG(GPIO56, AF3)
228#define GPIO57_MMC1_DAT0 MFP_CFG(GPIO57, AF3)
229#define GPIO58_MMC1_DAT1 MFP_CFG(GPIO58, AF3)
230#define GPIO59_MMC1_DAT2 MFP_CFG(GPIO59, AF3)
231#define GPIO60_MMC1_DAT3 MFP_CFG(GPIO60, AF3)
232
233#define DF_ADDR0_MMC1_CLK MFP_CFG(DF_ADDR0, AF2)
234#define DF_ADDR1_MMC1_CMD MFP_CFG(DF_ADDR1, AF2)
235#define DF_ADDR2_MMC1_DAT0 MFP_CFG(DF_ADDR2, AF2)
236#define DF_ADDR3_MMC1_DAT1 MFP_CFG(DF_ADDR3, AF3)
237#define nXCVREN_MMC1_DAT2 MFP_CFG(nXCVREN, AF2)
238
239/* MMC2 */
240#define GPIO31_MMC2_CMD MFP_CFG(GPIO31, AF7)
241#define GPIO32_MMC2_CLK MFP_CFG(GPIO32, AF7)
242#define GPIO33_MMC2_DAT0 MFP_CFG(GPIO33, AF7)
243#define GPIO34_MMC2_DAT1 MFP_CFG(GPIO34, AF7)
244#define GPIO35_MMC2_DAT2 MFP_CFG(GPIO35, AF7)
245#define GPIO36_MMC2_DAT3 MFP_CFG(GPIO36, AF7)
246
247#define GPIO101_MMC2_DAT3 MFP_CFG(GPIO101, AF1)
248#define GPIO102_MMC2_DAT2 MFP_CFG(GPIO102, AF1)
249#define GPIO103_MMC2_DAT1 MFP_CFG(GPIO103, AF1)
250#define GPIO104_MMC2_DAT0 MFP_CFG(GPIO104, AF1)
251#define GPIO105_MMC2_CMD MFP_CFG(GPIO105, AF1)
252#define GPIO106_MMC2_CLK MFP_CFG(GPIO106, AF1)
253
254#define DF_IO10_MMC2_DAT3 MFP_CFG(DF_IO10, AF3)
255#define DF_IO11_MMC2_DAT2 MFP_CFG(DF_IO11, AF3)
256#define DF_IO12_MMC2_DAT1 MFP_CFG(DF_IO12, AF3)
257#define DF_IO13_MMC2_DAT0 MFP_CFG(DF_IO13, AF3)
258#define DF_IO14_MMC2_CLK MFP_CFG(DF_IO14, AF3)
259#define DF_IO15_MMC2_CMD MFP_CFG(DF_IO15, AF3)
260
261/* BSSP1 */
262#define GPIO12_BSSP1_CLK MFP_CFG(GPIO12, AF3)
263#define GPIO13_BSSP1_FRM MFP_CFG(GPIO13, AF3)
264#define GPIO14_BSSP1_RXD MFP_CFG(GPIO14, AF3)
265#define GPIO15_BSSP1_TXD MFP_CFG(GPIO15, AF3)
266#define GPIO97_BSSP1_CLK MFP_CFG(GPIO97, AF5)
267#define GPIO98_BSSP1_FRM MFP_CFG(GPIO98, AF5)
268
269/* BSSP2 */
270#define GPIO84_BSSP2_SDATA_IN MFP_CFG(GPIO84, AF1)
271#define GPIO85_BSSP2_BITCLK MFP_CFG(GPIO85, AF1)
272#define GPIO86_BSSP2_SYSCLK MFP_CFG(GPIO86, AF1)
273#define GPIO87_BSSP2_SYNC MFP_CFG(GPIO87, AF1)
274#define GPIO88_BSSP2_DATA_OUT MFP_CFG(GPIO88, AF1)
275#define GPIO86_BSSP2_SDATA_IN MFP_CFG(GPIO86, AF4)
276
277/* BSSP3 */
278#define GPIO79_BSSP3_CLK MFP_CFG(GPIO79, AF1)
279#define GPIO80_BSSP3_FRM MFP_CFG(GPIO80, AF1)
280#define GPIO81_BSSP3_TXD MFP_CFG(GPIO81, AF1)
281#define GPIO82_BSSP3_RXD MFP_CFG(GPIO82, AF1)
282#define GPIO83_BSSP3_SYSCLK MFP_CFG(GPIO83, AF1)
283
284/* BSSP4 */
285#define GPIO43_BSSP4_CLK MFP_CFG(GPIO43, AF4)
286#define GPIO44_BSSP4_FRM MFP_CFG(GPIO44, AF4)
287#define GPIO45_BSSP4_TXD MFP_CFG(GPIO45, AF4)
288#define GPIO46_BSSP4_RXD MFP_CFG(GPIO46, AF4)
289
290#define GPIO51_BSSP4_CLK MFP_CFG(GPIO51, AF4)
291#define GPIO52_BSSP4_FRM MFP_CFG(GPIO52, AF4)
292#define GPIO53_BSSP4_TXD MFP_CFG(GPIO53, AF4)
293#define GPIO54_BSSP4_RXD MFP_CFG(GPIO54, AF4)
294
295/* GSSP1 */
296#define GPIO79_GSSP1_CLK MFP_CFG(GPIO79, AF2)
297#define GPIO80_GSSP1_FRM MFP_CFG(GPIO80, AF2)
298#define GPIO81_GSSP1_TXD MFP_CFG(GPIO81, AF2)
299#define GPIO82_GSSP1_RXD MFP_CFG(GPIO82, AF2)
300#define GPIO83_GSSP1_SYSCLK MFP_CFG(GPIO83, AF2)
301
302#define GPIO93_GSSP1_CLK MFP_CFG(GPIO93, AF4)
303#define GPIO94_GSSP1_FRM MFP_CFG(GPIO94, AF4)
304#define GPIO95_GSSP1_TXD MFP_CFG(GPIO95, AF4)
305#define GPIO96_GSSP1_RXD MFP_CFG(GPIO96, AF4)
306
307/* GSSP2 */
308#define GPIO47_GSSP2_CLK MFP_CFG(GPIO47, AF4)
309#define GPIO48_GSSP2_FRM MFP_CFG(GPIO48, AF4)
310#define GPIO49_GSSP2_RXD MFP_CFG(GPIO49, AF4)
311#define GPIO50_GSSP2_TXD MFP_CFG(GPIO50, AF4)
312
313#define GPIO69_GSSP2_CLK MFP_CFG(GPIO69, AF4)
314#define GPIO70_GSSP2_FRM MFP_CFG(GPIO70, AF4)
315#define GPIO71_GSSP2_RXD MFP_CFG(GPIO71, AF4)
316#define GPIO72_GSSP2_TXD MFP_CFG(GPIO72, AF4)
317
318#define GPIO84_GSSP2_RXD MFP_CFG(GPIO84, AF2)
319#define GPIO85_GSSP2_CLK MFP_CFG(GPIO85, AF2)
320#define GPIO86_GSSP2_SYSCLK MFP_CFG(GPIO86, AF2)
321#define GPIO87_GSSP2_FRM MFP_CFG(GPIO87, AF2)
322#define GPIO88_GSSP2_TXD MFP_CFG(GPIO88, AF2)
323#define GPIO86_GSSP2_RXD MFP_CFG(GPIO86, AF5)
324
325#define GPIO103_GSSP2_CLK MFP_CFG(GPIO103, AF2)
326#define GPIO104_GSSP2_FRM MFP_CFG(GPIO104, AF2)
327#define GPIO105_GSSP2_RXD MFP_CFG(GPIO105, AF2)
328#define GPIO106_GSSP2_TXD MFP_CFG(GPIO106, AF2)
329
330/* UART1 - FFUART */
331#define GPIO47_UART1_DSR_N MFP_CFG(GPIO47, AF1)
332#define GPIO48_UART1_DTR_N MFP_CFG(GPIO48, AF1)
333#define GPIO49_UART1_RI MFP_CFG(GPIO49, AF1)
334#define GPIO50_UART1_DCD MFP_CFG(GPIO50, AF1)
335#define GPIO51_UART1_CTS MFP_CFG(GPIO51, AF1)
336#define GPIO52_UART1_RTS MFP_CFG(GPIO52, AF1)
337#define GPIO53_UART1_RXD MFP_CFG(GPIO53, AF1)
338#define GPIO54_UART1_TXD MFP_CFG(GPIO54, AF1)
339
340#define GPIO63_UART1_TXD MFP_CFG(GPIO63, AF2)
341#define GPIO64_UART1_RXD MFP_CFG(GPIO64, AF2)
342#define GPIO65_UART1_DSR MFP_CFG(GPIO65, AF2)
343#define GPIO66_UART1_DTR MFP_CFG(GPIO66, AF2)
344#define GPIO67_UART1_RI MFP_CFG(GPIO67, AF2)
345#define GPIO68_UART1_DCD MFP_CFG(GPIO68, AF2)
346#define GPIO69_UART1_CTS MFP_CFG(GPIO69, AF2)
347#define GPIO70_UART1_RTS MFP_CFG(GPIO70, AF2)
348
349/* UART2 - BTUART */
350#define GPIO91_UART2_RXD MFP_CFG(GPIO91, AF1)
351#define GPIO92_UART2_TXD MFP_CFG(GPIO92, AF1)
352#define GPIO93_UART2_CTS MFP_CFG(GPIO93, AF1)
353#define GPIO94_UART2_RTS MFP_CFG(GPIO94, AF1)
354
355/* UART3 - STUART */
356#define GPIO43_UART3_RTS MFP_CFG(GPIO43, AF3)
357#define GPIO44_UART3_CTS MFP_CFG(GPIO44, AF3)
358#define GPIO45_UART3_RXD MFP_CFG(GPIO45, AF3)
359#define GPIO46_UART3_TXD MFP_CFG(GPIO46, AF3)
360
361#define GPIO75_UART3_RTS MFP_CFG(GPIO75, AF5)
362#define GPIO76_UART3_CTS MFP_CFG(GPIO76, AF5)
363#define GPIO77_UART3_TXD MFP_CFG(GPIO77, AF5)
364#define GPIO78_UART3_RXD MFP_CFG(GPIO78, AF5)
365
366/* DFI */
367#define DF_IO0_DF_IO0 MFP_CFG(DF_IO0, AF2)
368#define DF_IO1_DF_IO1 MFP_CFG(DF_IO1, AF2)
369#define DF_IO2_DF_IO2 MFP_CFG(DF_IO2, AF2)
370#define DF_IO3_DF_IO3 MFP_CFG(DF_IO3, AF2)
371#define DF_IO4_DF_IO4 MFP_CFG(DF_IO4, AF2)
372#define DF_IO5_DF_IO5 MFP_CFG(DF_IO5, AF2)
373#define DF_IO6_DF_IO6 MFP_CFG(DF_IO6, AF2)
374#define DF_IO7_DF_IO7 MFP_CFG(DF_IO7, AF2)
375#define DF_IO8_DF_IO8 MFP_CFG(DF_IO8, AF2)
376#define DF_IO9_DF_IO9 MFP_CFG(DF_IO9, AF2)
377#define DF_IO10_DF_IO10 MFP_CFG(DF_IO10, AF2)
378#define DF_IO11_DF_IO11 MFP_CFG(DF_IO11, AF2)
379#define DF_IO12_DF_IO12 MFP_CFG(DF_IO12, AF2)
380#define DF_IO13_DF_IO13 MFP_CFG(DF_IO13, AF2)
381#define DF_IO14_DF_IO14 MFP_CFG(DF_IO14, AF2)
382#define DF_IO15_DF_IO15 MFP_CFG(DF_IO15, AF2)
383#define DF_nADV1_ALE_DF_nADV1 MFP_CFG(DF_nADV1_ALE, AF2)
384#define DF_nADV2_ALE_DF_nADV2 MFP_CFG(DF_nADV2_ALE, AF2)
385#define DF_nCS0_DF_nCS0 MFP_CFG(DF_nCS0, AF2)
386#define DF_nCS1_DF_nCS1 MFP_CFG(DF_nCS1, AF2)
387#define DF_nRE_nOE_DF_nOE MFP_CFG(DF_nRE_nOE, AF2)
388#define DF_nWE_DF_nWE MFP_CFG(DF_nWE, AF2)
389
390/* DFI - NAND */
391#define DF_CLE_nOE_ND_CLE MFP_CFG_LPM(DF_CLE_nOE, AF1, PULL_HIGH)
392#define DF_INT_RnB_ND_INT_RnB MFP_CFG_LPM(DF_INT_RnB, AF1, PULL_LOW)
393#define DF_IO0_ND_IO0 MFP_CFG_LPM(DF_IO0, AF1, PULL_LOW)
394#define DF_IO1_ND_IO1 MFP_CFG_LPM(DF_IO1, AF1, PULL_LOW)
395#define DF_IO2_ND_IO2 MFP_CFG_LPM(DF_IO2, AF1, PULL_LOW)
396#define DF_IO3_ND_IO3 MFP_CFG_LPM(DF_IO3, AF1, PULL_LOW)
397#define DF_IO4_ND_IO4 MFP_CFG_LPM(DF_IO4, AF1, PULL_LOW)
398#define DF_IO5_ND_IO5 MFP_CFG_LPM(DF_IO5, AF1, PULL_LOW)
399#define DF_IO6_ND_IO6 MFP_CFG_LPM(DF_IO6, AF1, PULL_LOW)
400#define DF_IO7_ND_IO7 MFP_CFG_LPM(DF_IO7, AF1, PULL_LOW)
401#define DF_IO8_ND_IO8 MFP_CFG_LPM(DF_IO8, AF1, PULL_LOW)
402#define DF_IO9_ND_IO9 MFP_CFG_LPM(DF_IO9, AF1, PULL_LOW)
403#define DF_IO10_ND_IO10 MFP_CFG_LPM(DF_IO10, AF1, PULL_LOW)
404#define DF_IO11_ND_IO11 MFP_CFG_LPM(DF_IO11, AF1, PULL_LOW)
405#define DF_IO12_ND_IO12 MFP_CFG_LPM(DF_IO12, AF1, PULL_LOW)
406#define DF_IO13_ND_IO13 MFP_CFG_LPM(DF_IO13, AF1, PULL_LOW)
407#define DF_IO14_ND_IO14 MFP_CFG_LPM(DF_IO14, AF1, PULL_LOW)
408#define DF_IO15_ND_IO15 MFP_CFG_LPM(DF_IO15, AF1, PULL_LOW)
409#define DF_nADV1_ALE_ND_ALE MFP_CFG_LPM(DF_nADV1_ALE, AF1, PULL_HIGH)
410#define DF_nADV2_ALE_ND_ALE MFP_CFG_LPM(DF_nADV2_ALE, AF1, PULL_HIGH)
411#define DF_nADV2_ALE_nCS3 MFP_CFG_LPM(DF_nADV2_ALE, AF3, PULL_HIGH)
412#define DF_nCS0_ND_nCS0 MFP_CFG_LPM(DF_nCS0, AF1, PULL_HIGH)
413#define DF_nCS1_ND_nCS1 MFP_CFG_LPM(DF_nCS1, AF1, PULL_HIGH)
414#define DF_nRE_nOE_ND_nRE MFP_CFG_LPM(DF_nRE_nOE, AF1, PULL_HIGH)
415#define DF_nWE_ND_nWE MFP_CFG_LPM(DF_nWE, AF1, PULL_HIGH)
416
417/* PWM */
418#define GPIO41_PWM0 MFP_CFG_LPM(GPIO41, AF1, PULL_LOW)
419#define GPIO42_PWM1 MFP_CFG_LPM(GPIO42, AF1, PULL_LOW)
420#define GPIO43_PWM3 MFP_CFG_LPM(GPIO43, AF1, PULL_LOW)
421#define GPIO20_PWM0 MFP_CFG_LPM(GPIO20, AF2, PULL_LOW)
422#define GPIO21_PWM2 MFP_CFG_LPM(GPIO21, AF3, PULL_LOW)
423#define GPIO22_PWM3 MFP_CFG_LPM(GPIO22, AF3, PULL_LOW)
424
425/* CIR */
426#define GPIO46_CIR_OUT MFP_CFG(GPIO46, AF1)
427#define GPIO77_CIR_OUT MFP_CFG(GPIO77, AF3)
428
429/* USB P2 */
430#define GPIO0_USB_P2_7 MFP_CFG(GPIO0, AF3)
431#define GPIO15_USB_P2_7 MFP_CFG(GPIO15, AF5)
432#define GPIO16_USB_P2_7 MFP_CFG(GPIO16, AF2)
433#define GPIO48_USB_P2_7 MFP_CFG(GPIO48, AF7)
434#define GPIO49_USB_P2_7 MFP_CFG(GPIO49, AF6)
435#define DF_IO9_USB_P2_7 MFP_CFG(DF_IO9, AF3)
436
437#define GPIO48_USB_P2_8 MFP_CFG(GPIO48, AF2)
438#define GPIO50_USB_P2_7 MFP_CFG_X(GPIO50, AF2, DS02X, FLOAT)
439#define GPIO51_USB_P2_5 MFP_CFG(GPIO51, AF2)
440#define GPIO47_USB_P2_4 MFP_CFG(GPIO47, AF2)
441#define GPIO53_USB_P2_3 MFP_CFG(GPIO53, AF2)
442#define GPIO54_USB_P2_6 MFP_CFG(GPIO54, AF2)
443#define GPIO49_USB_P2_2 MFP_CFG(GPIO49, AF2)
444#define GPIO52_USB_P2_1 MFP_CFG(GPIO52, AF2)
445
446#define GPIO63_USB_P2_8 MFP_CFG(GPIO63, AF3)
447#define GPIO64_USB_P2_7 MFP_CFG(GPIO64, AF3)
448#define GPIO65_USB_P2_6 MFP_CFG(GPIO65, AF3)
449#define GPIO66_USG_P2_5 MFP_CFG(GPIO66, AF3)
450#define GPIO67_USB_P2_4 MFP_CFG(GPIO67, AF3)
451#define GPIO68_USB_P2_3 MFP_CFG(GPIO68, AF3)
452#define GPIO69_USB_P2_2 MFP_CFG(GPIO69, AF3)
453#define GPIO70_USB_P2_1 MFP_CFG(GPIO70, AF3)
454
455/* ULPI */
456#define GPIO31_USB_ULPI_D0 MFP_CFG(GPIO31, AF4)
457#define GPIO30_USB_ULPI_D1 MFP_CFG(GPIO30, AF7)
458#define GPIO33_USB_ULPI_D2 MFP_CFG(GPIO33, AF5)
459#define GPIO34_USB_ULPI_D3 MFP_CFG(GPIO34, AF5)
460#define GPIO35_USB_ULPI_D4 MFP_CFG(GPIO35, AF5)
461#define GPIO36_USB_ULPI_D5 MFP_CFG(GPIO36, AF5)
462#define GPIO41_USB_ULPI_D6 MFP_CFG(GPIO41, AF5)
463#define GPIO42_USB_ULPI_D7 MFP_CFG(GPIO42, AF5)
464#define GPIO37_USB_ULPI_DIR MFP_CFG(GPIO37, AF4)
465#define GPIO38_USB_ULPI_CLK MFP_CFG(GPIO38, AF4)
466#define GPIO39_USB_ULPI_STP MFP_CFG(GPIO39, AF4)
467#define GPIO40_USB_ULPI_NXT MFP_CFG(GPIO40, AF4)
468
469#define GPIO3_CLK26MOUTDMD MFP_CFG(GPIO3, AF3)
470#define GPIO40_CLK26MOUTDMD MFP_CFG(GPIO40, AF7)
471#define GPIO94_CLK26MOUTDMD MFP_CFG(GPIO94, AF5)
472#define GPIO104_CLK26MOUTDMD MFP_CFG(GPIO104, AF4)
473#define DF_ADDR1_CLK26MOUTDMD MFP_CFG(DF_ADDR2, AF3)
474#define DF_ADDR3_CLK26MOUTDMD MFP_CFG(DF_ADDR3, AF3)
475
476#define GPIO14_CLK26MOUT MFP_CFG(GPIO14, AF5)
477#define GPIO38_CLK26MOUT MFP_CFG(GPIO38, AF7)
478#define GPIO92_CLK26MOUT MFP_CFG(GPIO92, AF5)
479#define GPIO105_CLK26MOUT MFP_CFG(GPIO105, AF4)
480
481#define GPIO2_CLK13MOUTDMD MFP_CFG(GPIO2, AF3)
482#define GPIO39_CLK13MOUTDMD MFP_CFG(GPIO39, AF7)
483#define GPIO50_CLK13MOUTDMD MFP_CFG(GPIO50, AF3)
484#define GPIO93_CLK13MOUTDMD MFP_CFG(GPIO93, AF5)
485#define GPIO103_CLK13MOUTDMD MFP_CFG(GPIO103, AF4)
486#define DF_ADDR2_CLK13MOUTDMD MFP_CFG(DF_ADDR2, AF3)
487
488/* 1 wire */
489#define GPIO95_OW_DQ_IN MFP_CFG(GPIO95, AF5)
490
491#endif /* __ASM_ARCH_MFP_PXA9xx_H */
diff --git a/include/asm-arm/arch-pxa/mfp.h b/include/asm-arm/arch-pxa/mfp.h
index 02f6157396d3..e7d58798da67 100644
--- a/include/asm-arm/arch-pxa/mfp.h
+++ b/include/asm-arm/arch-pxa/mfp.h
@@ -210,6 +210,14 @@ enum {
210 MFP_PIN_DF_IO14, 210 MFP_PIN_DF_IO14,
211 MFP_PIN_DF_IO15, 211 MFP_PIN_DF_IO15,
212 212
213 /* additional pins on PXA930 */
214 MFP_PIN_GSIM_UIO,
215 MFP_PIN_GSIM_UCLK,
216 MFP_PIN_GSIM_UDET,
217 MFP_PIN_GSIM_nURST,
218 MFP_PIN_PMIC_INT,
219 MFP_PIN_RDY,
220
213 MFP_PIN_MAX, 221 MFP_PIN_MAX,
214}; 222};
215 223
diff --git a/include/asm-arm/arch-pxa/palmtx.h b/include/asm-arm/arch-pxa/palmtx.h
new file mode 100644
index 000000000000..1e8bccbda510
--- /dev/null
+++ b/include/asm-arm/arch-pxa/palmtx.h
@@ -0,0 +1,106 @@
1/*
2 * GPIOs and interrupts for Palm T|X Handheld Computer
3 *
4 * Based on palmld-gpio.h by Alex Osborne
5 *
6 * Authors: Marek Vasut <marek.vasut@gmail.com>
7 * Cristiano P. <cristianop@users.sourceforge.net>
8 * Jan Herman <2hp@seznam.cz>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#ifndef _INCLUDE_PALMTX_H_
17#define _INCLUDE_PALMTX_H_
18
19/** HERE ARE GPIOs **/
20
21/* GPIOs */
22#define GPIO_NR_PALMTX_GPIO_RESET 1
23
24#define GPIO_NR_PALMTX_POWER_DETECT 12 /* 90 */
25#define GPIO_NR_PALMTX_HOTSYNC_BUTTON_N 10
26#define GPIO_NR_PALMTX_EARPHONE_DETECT 107
27
28/* SD/MMC */
29#define GPIO_NR_PALMTX_SD_DETECT_N 14
30#define GPIO_NR_PALMTX_SD_POWER 114 /* probably */
31#define GPIO_NR_PALMTX_SD_READONLY 115 /* probably */
32
33/* TOUCHSCREEN */
34#define GPIO_NR_PALMTX_WM9712_IRQ 27
35
36/* IRDA - disable GPIO connected to SD pin of tranceiver (TFBS4710?) ? */
37#define GPIO_NR_PALMTX_IR_DISABLE 40
38
39/* USB */
40#define GPIO_NR_PALMTX_USB_DETECT_N 13
41#define GPIO_NR_PALMTX_USB_POWER 95
42#define GPIO_NR_PALMTX_USB_PULLUP 93
43
44/* LCD/BACKLIGHT */
45#define GPIO_NR_PALMTX_BL_POWER 84
46#define GPIO_NR_PALMTX_LCD_POWER 96
47
48/* LCD BORDER */
49#define GPIO_NR_PALMTX_BORDER_SWITCH 98
50#define GPIO_NR_PALMTX_BORDER_SELECT 22
51
52/* BLUETOOTH */
53#define GPIO_NR_PALMTX_BT_POWER 17
54#define GPIO_NR_PALMTX_BT_RESET 83
55
56/* PCMCIA (WiFi) */
57#define GPIO_NR_PALMTX_PCMCIA_POWER1 94
58#define GPIO_NR_PALMTX_PCMCIA_POWER2 108
59#define GPIO_NR_PALMTX_PCMCIA_RESET 79
60#define GPIO_NR_PALMTX_PCMCIA_READY 116
61
62/* NAND Flash ... this GPIO may be incorrect! */
63#define GPIO_NR_PALMTX_NAND_BUFFER_DIR 79
64
65/* INTERRUPTS */
66#define IRQ_GPIO_PALMTX_SD_DETECT_N IRQ_GPIO(GPIO_NR_PALMTX_SD_DETECT_N)
67#define IRQ_GPIO_PALMTX_WM9712_IRQ IRQ_GPIO(GPIO_NR_PALMTX_WM9712_IRQ)
68#define IRQ_GPIO_PALMTX_USB_DETECT IRQ_GPIO(GPIO_NR_PALMTX_USB_DETECT)
69#define IRQ_GPIO_PALMTX_GPIO_RESET IRQ_GPIO(GPIO_NR_PALMTX_GPIO_RESET)
70
71/** HERE ARE INIT VALUES **/
72
73/* Various addresses */
74#define PALMTX_PCMCIA_PHYS 0x28000000
75#define PALMTX_PCMCIA_VIRT 0xf0000000
76#define PALMTX_PCMCIA_SIZE 0x100000
77
78#define PALMTX_PHYS_RAM_START 0xa0000000
79#define PALMTX_PHYS_IO_START 0x40000000
80
81#define PALMTX_PHYS_FLASH_START PXA_CS0_PHYS /* ChipSelect 0 */
82#define PALMTX_PHYS_NAND_START PXA_CS1_PHYS /* ChipSelect 1 */
83
84/* TOUCHSCREEN */
85#define AC97_LINK_FRAME 21
86
87
88/* BATTERY */
89#define PALMTX_BAT_MAX_VOLTAGE 4000 /* 4.00v current voltage */
90#define PALMTX_BAT_MIN_VOLTAGE 3550 /* 3.55v critical voltage */
91#define PALMTX_BAT_MAX_CURRENT 0 /* unknokn */
92#define PALMTX_BAT_MIN_CURRENT 0 /* unknown */
93#define PALMTX_BAT_MAX_CHARGE 1 /* unknown */
94#define PALMTX_BAT_MIN_CHARGE 1 /* unknown */
95#define PALMTX_MAX_LIFE_MINS 360 /* on-life in minutes */
96
97#define PALMTX_BAT_MEASURE_DELAY (HZ * 1)
98
99/* BACKLIGHT */
100#define PALMTX_MAX_INTENSITY 0xFE
101#define PALMTX_DEFAULT_INTENSITY 0x7E
102#define PALMTX_LIMIT_MASK 0x7F
103#define PALMTX_PRESCALER 0x3F
104#define PALMTX_PERIOD_NS 3500
105
106#endif
diff --git a/include/asm-arm/arch-pxa/pxa27x-udc.h b/include/asm-arm/arch-pxa/pxa27x-udc.h
index bc1cf7d0773a..ab1443f8bd89 100644
--- a/include/asm-arm/arch-pxa/pxa27x-udc.h
+++ b/include/asm-arm/arch-pxa/pxa27x-udc.h
@@ -97,7 +97,7 @@
97#define UP2OCR_IDON (1 << 10) /* OTG ID Read Enable */ 97#define UP2OCR_IDON (1 << 10) /* OTG ID Read Enable */
98#define UP2OCR_HXS (1 << 16) /* Host Port 2 Transceiver Output Select */ 98#define UP2OCR_HXS (1 << 16) /* Host Port 2 Transceiver Output Select */
99#define UP2OCR_HXOE (1 << 17) /* Host Port 2 Transceiver Output Enable */ 99#define UP2OCR_HXOE (1 << 17) /* Host Port 2 Transceiver Output Enable */
100#define UP2OCR_SEOS (1 << 24) /* Single-Ended Output Select */ 100#define UP2OCR_SEOS(x) ((x & 7) << 24) /* Single-Ended Output Select */
101 101
102#define UDCCSN(x) __REG2(0x40600100, (x) << 2) 102#define UDCCSN(x) __REG2(0x40600100, (x) << 2)
103#define UDCCSR0 __REG(0x40600100) /* UDC Control/Status register - Endpoint 0 */ 103#define UDCCSR0 __REG(0x40600100) /* UDC Control/Status register - Endpoint 0 */
diff --git a/include/asm-arm/arch-pxa/pxa2xx_spi.h b/include/asm-arm/arch-pxa/pxa2xx_spi.h
index 3459fb26ce97..2206cb61a9f9 100644
--- a/include/asm-arm/arch-pxa/pxa2xx_spi.h
+++ b/include/asm-arm/arch-pxa/pxa2xx_spi.h
@@ -41,4 +41,6 @@ struct pxa2xx_spi_chip {
41 void (*cs_control)(u32 command); 41 void (*cs_control)(u32 command);
42}; 42};
43 43
44extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
45
44#endif /*PXA2XX_SPI_H_*/ 46#endif /*PXA2XX_SPI_H_*/
diff --git a/include/asm-arm/arch-pxa/pxa3xx_nand.h b/include/asm-arm/arch-pxa/pxa3xx_nand.h
index 81a8937486cb..eb4b190b6657 100644
--- a/include/asm-arm/arch-pxa/pxa3xx_nand.h
+++ b/include/asm-arm/arch-pxa/pxa3xx_nand.h
@@ -15,4 +15,6 @@ struct pxa3xx_nand_platform_data {
15 struct mtd_partition *parts; 15 struct mtd_partition *parts;
16 unsigned int nr_parts; 16 unsigned int nr_parts;
17}; 17};
18
19extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info);
18#endif /* __ASM_ARCH_PXA3XX_NAND_H */ 20#endif /* __ASM_ARCH_PXA3XX_NAND_H */
diff --git a/include/asm-arm/arch-pxa/pxafb.h b/include/asm-arm/arch-pxa/pxafb.h
index bbd22396841a..daf018d0c604 100644
--- a/include/asm-arm/arch-pxa/pxafb.h
+++ b/include/asm-arm/arch-pxa/pxafb.h
@@ -71,7 +71,8 @@ struct pxafb_mode_info {
71 71
72 u_char bpp; 72 u_char bpp;
73 u_int cmap_greyscale:1, 73 u_int cmap_greyscale:1,
74 unused:31; 74 depth:8,
75 unused:23;
75 76
76 /* Parallel Mode Timing */ 77 /* Parallel Mode Timing */
77 u_char hsync_len; 78 u_char hsync_len;
diff --git a/include/asm-arm/arch-pxa/regs-lcd.h b/include/asm-arm/arch-pxa/regs-lcd.h
index 3ba464c913a5..820a189684a9 100644
--- a/include/asm-arm/arch-pxa/regs-lcd.h
+++ b/include/asm-arm/arch-pxa/regs-lcd.h
@@ -27,6 +27,12 @@
27#define LCCR3_4BPP (2 << 24) 27#define LCCR3_4BPP (2 << 24)
28#define LCCR3_8BPP (3 << 24) 28#define LCCR3_8BPP (3 << 24)
29#define LCCR3_16BPP (4 << 24) 29#define LCCR3_16BPP (4 << 24)
30#define LCCR3_18BPP (5 << 24)
31#define LCCR3_18BPP_P (6 << 24)
32#define LCCR3_19BPP (7 << 24)
33#define LCCR3_19BPP_P (1 << 29)
34#define LCCR3_24BPP ((1 << 29) | (1 << 24))
35#define LCCR3_25BPP ((1 << 29) | (2 << 24))
30 36
31#define LCCR3_PDFOR_0 (0 << 30) 37#define LCCR3_PDFOR_0 (0 << 30)
32#define LCCR3_PDFOR_1 (1 << 30) 38#define LCCR3_PDFOR_1 (1 << 30)
diff --git a/include/asm-arm/arch-pxa/regs-ssp.h b/include/asm-arm/arch-pxa/regs-ssp.h
index 0255328c3c18..3c04cde2cf1f 100644
--- a/include/asm-arm/arch-pxa/regs-ssp.h
+++ b/include/asm-arm/arch-pxa/regs-ssp.h
@@ -20,6 +20,10 @@
20#define SSTSS (0x38) /* SSP Timeslot Status */ 20#define SSTSS (0x38) /* SSP Timeslot Status */
21#define SSACD (0x3C) /* SSP Audio Clock Divider */ 21#define SSACD (0x3C) /* SSP Audio Clock Divider */
22 22
23#if defined(CONFIG_PXA3xx)
24#define SSACDD (0x40) /* SSP Audio Clock Dither Divider */
25#endif
26
23/* Common PXA2xx bits first */ 27/* Common PXA2xx bits first */
24#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */ 28#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */
25#define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */ 29#define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */
@@ -29,10 +33,12 @@
29#define SSCR0_National (0x2 << 4) /* National Microwire */ 33#define SSCR0_National (0x2 << 4) /* National Microwire */
30#define SSCR0_ECS (1 << 6) /* External clock select */ 34#define SSCR0_ECS (1 << 6) /* External clock select */
31#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */ 35#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */
36
32#if defined(CONFIG_PXA25x) 37#if defined(CONFIG_PXA25x)
33#define SSCR0_SCR (0x0000ff00) /* Serial Clock Rate (mask) */ 38#define SSCR0_SCR (0x0000ff00) /* Serial Clock Rate (mask) */
34#define SSCR0_SerClkDiv(x) ((((x) - 2)/2) << 8) /* Divisor [2..512] */ 39#define SSCR0_SerClkDiv(x) ((((x) - 2)/2) << 8) /* Divisor [2..512] */
35#elif defined(CONFIG_PXA27x) 40
41#elif defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx)
36#define SSCR0_SCR (0x000fff00) /* Serial Clock Rate (mask) */ 42#define SSCR0_SCR (0x000fff00) /* Serial Clock Rate (mask) */
37#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */ 43#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
38#define SSCR0_EDSS (1 << 20) /* Extended data size select */ 44#define SSCR0_EDSS (1 << 20) /* Extended data size select */
@@ -45,6 +51,10 @@
45#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */ 51#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */
46#endif 52#endif
47 53
54#if defined(CONFIG_PXA3xx)
55#define SSCR0_FPCKE (1 << 29) /* FIFO packing enable */
56#endif
57
48#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */ 58#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */
49#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */ 59#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */
50#define SSCR1_LBM (1 << 2) /* Loop-Back Mode */ 60#define SSCR1_LBM (1 << 2) /* Loop-Back Mode */
@@ -109,5 +119,9 @@
109#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */ 119#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */
110#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */ 120#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
111#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */ 121#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */
122#if defined(CONFIG_PXA3xx)
123#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */
124#endif
125
112 126
113#endif /* __ASM_ARCH_REGS_SSP_H */ 127#endif /* __ASM_ARCH_REGS_SSP_H */
diff --git a/include/asm-arm/arch-pxa/system.h b/include/asm-arm/arch-pxa/system.h
index ba7e132de1b3..6956fc5235f8 100644
--- a/include/asm-arm/arch-pxa/system.h
+++ b/include/asm-arm/arch-pxa/system.h
@@ -21,19 +21,4 @@ static inline void arch_idle(void)
21} 21}
22 22
23 23
24static inline void arch_reset(char mode) 24void arch_reset(char mode);
25{
26 if (cpu_is_pxa2xx())
27 RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR;
28
29 if (mode == 's') {
30 /* Jump into ROM at address 0 */
31 cpu_reset(0);
32 } else {
33 /* Initialize the watchdog and let it fire */
34 OWER = OWER_WME;
35 OSSR = OSSR_M3;
36 OSMR3 = OSCR + 368640; /* ... in 100 ms */
37 }
38}
39
diff --git a/include/asm-arm/arch-pxa/tosa.h b/include/asm-arm/arch-pxa/tosa.h
index c5b6fde6907c..a72803f0461b 100644
--- a/include/asm-arm/arch-pxa/tosa.h
+++ b/include/asm-arm/arch-pxa/tosa.h
@@ -25,21 +25,18 @@
25 */ 25 */
26#define TOSA_SCOOP_GPIO_BASE NR_BUILTIN_GPIO 26#define TOSA_SCOOP_GPIO_BASE NR_BUILTIN_GPIO
27#define TOSA_SCOOP_PXA_VCORE1 SCOOP_GPCR_PA11 27#define TOSA_SCOOP_PXA_VCORE1 SCOOP_GPCR_PA11
28#define TOSA_SCOOP_TC6393_REST_IN SCOOP_GPCR_PA12 28#define TOSA_GPIO_TC6393XB_REST_IN (TOSA_SCOOP_GPIO_BASE + 1)
29#define TOSA_GPIO_IR_POWERDWN (TOSA_SCOOP_GPIO_BASE + 2) 29#define TOSA_GPIO_IR_POWERDWN (TOSA_SCOOP_GPIO_BASE + 2)
30#define TOSA_GPIO_SD_WP (TOSA_SCOOP_GPIO_BASE + 3) 30#define TOSA_GPIO_SD_WP (TOSA_SCOOP_GPIO_BASE + 3)
31#define TOSA_GPIO_PWR_ON (TOSA_SCOOP_GPIO_BASE + 4) 31#define TOSA_GPIO_PWR_ON (TOSA_SCOOP_GPIO_BASE + 4)
32#define TOSA_SCOOP_AUD_PWR_ON SCOOP_GPCR_PA16 32#define TOSA_SCOOP_AUD_PWR_ON SCOOP_GPCR_PA16
33#define TOSA_SCOOP_BT_RESET SCOOP_GPCR_PA17 33#define TOSA_GPIO_BT_RESET (TOSA_SCOOP_GPIO_BASE + 6)
34#define TOSA_SCOOP_BT_PWR_EN SCOOP_GPCR_PA18 34#define TOSA_GPIO_BT_PWR_EN (TOSA_SCOOP_GPIO_BASE + 7)
35#define TOSA_SCOOP_AC_IN_OL SCOOP_GPCR_PA19 35#define TOSA_SCOOP_AC_IN_OL SCOOP_GPCR_PA19
36 36
37/* GPIO Direction 1 : output mode / 0:input mode */ 37/* GPIO Direction 1 : output mode / 0:input mode */
38#define TOSA_SCOOP_IO_DIR ( TOSA_SCOOP_PXA_VCORE1 | TOSA_SCOOP_TC6393_REST_IN | \ 38#define TOSA_SCOOP_IO_DIR (TOSA_SCOOP_PXA_VCORE1 | \
39 TOSA_SCOOP_AUD_PWR_ON |\ 39 TOSA_SCOOP_AUD_PWR_ON)
40 TOSA_SCOOP_BT_RESET | TOSA_SCOOP_BT_PWR_EN )
41/* GPIO out put level when init 1: Hi */
42#define TOSA_SCOOP_IO_OUT ( TOSA_SCOOP_TC6393_REST_IN )
43 40
44/* 41/*
45 * SCOOP2 jacket GPIOs 42 * SCOOP2 jacket GPIOs
@@ -49,16 +46,34 @@
49#define TOSA_GPIO_NOTE_LED (TOSA_SCOOP_JC_GPIO_BASE + 1) 46#define TOSA_GPIO_NOTE_LED (TOSA_SCOOP_JC_GPIO_BASE + 1)
50#define TOSA_GPIO_CHRG_ERR_LED (TOSA_SCOOP_JC_GPIO_BASE + 2) 47#define TOSA_GPIO_CHRG_ERR_LED (TOSA_SCOOP_JC_GPIO_BASE + 2)
51#define TOSA_GPIO_USB_PULLUP (TOSA_SCOOP_JC_GPIO_BASE + 3) 48#define TOSA_GPIO_USB_PULLUP (TOSA_SCOOP_JC_GPIO_BASE + 3)
52#define TOSA_SCOOP_JC_TC6393_SUSPEND SCOOP_GPCR_PA15 49#define TOSA_GPIO_TC6393XB_SUSPEND (TOSA_SCOOP_JC_GPIO_BASE + 4)
53#define TOSA_SCOOP_JC_TC3693_L3V_ON SCOOP_GPCR_PA16 50#define TOSA_GPIO_TC6393XB_L3V_ON (TOSA_SCOOP_JC_GPIO_BASE + 5)
54#define TOSA_SCOOP_JC_WLAN_DETECT SCOOP_GPCR_PA17 51#define TOSA_SCOOP_JC_WLAN_DETECT SCOOP_GPCR_PA17
55#define TOSA_GPIO_WLAN_LED (TOSA_SCOOP_JC_GPIO_BASE + 7) 52#define TOSA_GPIO_WLAN_LED (TOSA_SCOOP_JC_GPIO_BASE + 7)
56#define TOSA_SCOOP_JC_CARD_LIMIT_SEL SCOOP_GPCR_PA19 53#define TOSA_SCOOP_JC_CARD_LIMIT_SEL SCOOP_GPCR_PA19
57 54
58/* GPIO Direction 1 : output mode / 0:input mode */ 55/* GPIO Direction 1 : output mode / 0:input mode */
59#define TOSA_SCOOP_JC_IO_DIR ( \ 56#define TOSA_SCOOP_JC_IO_DIR (TOSA_SCOOP_JC_CARD_LIMIT_SEL)
60 TOSA_SCOOP_JC_TC6393_SUSPEND | TOSA_SCOOP_JC_TC3693_L3V_ON | \ 57
61 TOSA_SCOOP_JC_CARD_LIMIT_SEL ) 58/*
59 * TC6393XB GPIOs
60 */
61#define TOSA_TC6393XB_GPIO_BASE (NR_BUILTIN_GPIO + 2 * 12)
62#define TOSA_TC6393XB_GPIO(i) (TOSA_TC6393XB_GPIO_BASE + (i))
63#define TOSA_TC6393XB_GPIO_BIT(gpio) (1 << (gpio - TOSA_TC6393XB_GPIO_BASE))
64
65#define TOSA_GPIO_TG_ON (TOSA_TC6393XB_GPIO_BASE + 0)
66#define TOSA_GPIO_L_MUTE (TOSA_TC6393XB_GPIO_BASE + 1)
67#define TOSA_GPIO_BL_C20MA (TOSA_TC6393XB_GPIO_BASE + 3)
68#define TOSA_GPIO_CARD_VCC_ON (TOSA_TC6393XB_GPIO_BASE + 4)
69#define TOSA_GPIO_CHARGE_OFF (TOSA_TC6393XB_GPIO_BASE + 6)
70#define TOSA_GPIO_CHARGE_OFF_JC (TOSA_TC6393XB_GPIO_BASE + 7)
71#define TOSA_GPIO_BAT0_V_ON (TOSA_TC6393XB_GPIO_BASE + 9)
72#define TOSA_GPIO_BAT1_V_ON (TOSA_TC6393XB_GPIO_BASE + 10)
73#define TOSA_GPIO_BU_CHRG_ON (TOSA_TC6393XB_GPIO_BASE + 11)
74#define TOSA_GPIO_BAT_SW_ON (TOSA_TC6393XB_GPIO_BASE + 12)
75#define TOSA_GPIO_BAT0_TH_ON (TOSA_TC6393XB_GPIO_BASE + 14)
76#define TOSA_GPIO_BAT1_TH_ON (TOSA_TC6393XB_GPIO_BASE + 15)
62 77
63/* 78/*
64 * Timing Generator 79 * Timing Generator
@@ -84,13 +99,13 @@
84#define TOSA_GPIO_JACKET_DETECT (7) 99#define TOSA_GPIO_JACKET_DETECT (7)
85#define TOSA_GPIO_nSD_DETECT (9) 100#define TOSA_GPIO_nSD_DETECT (9)
86#define TOSA_GPIO_nSD_INT (10) 101#define TOSA_GPIO_nSD_INT (10)
87#define TOSA_GPIO_TC6393_CLK (11) 102#define TOSA_GPIO_TC6393XB_CLK (11)
88#define TOSA_GPIO_BAT1_CRG (12) 103#define TOSA_GPIO_BAT1_CRG (12)
89#define TOSA_GPIO_CF_CD (13) 104#define TOSA_GPIO_CF_CD (13)
90#define TOSA_GPIO_BAT0_CRG (14) 105#define TOSA_GPIO_BAT0_CRG (14)
91#define TOSA_GPIO_TC6393_INT (15) 106#define TOSA_GPIO_TC6393XB_INT (15)
92#define TOSA_GPIO_BAT0_LOW (17) 107#define TOSA_GPIO_BAT0_LOW (17)
93#define TOSA_GPIO_TC6393_RDY (18) 108#define TOSA_GPIO_TC6393XB_RDY (18)
94#define TOSA_GPIO_ON_RESET (19) 109#define TOSA_GPIO_ON_RESET (19)
95#define TOSA_GPIO_EAR_IN (20) 110#define TOSA_GPIO_EAR_IN (20)
96#define TOSA_GPIO_CF_IRQ (21) /* CF slot0 Ready */ 111#define TOSA_GPIO_CF_IRQ (21) /* CF slot0 Ready */
@@ -99,6 +114,7 @@
99#define TOSA_GPIO_TP_INT (32) /* Touch Panel pen down interrupt */ 114#define TOSA_GPIO_TP_INT (32) /* Touch Panel pen down interrupt */
100#define TOSA_GPIO_JC_CF_IRQ (36) /* CF slot1 Ready */ 115#define TOSA_GPIO_JC_CF_IRQ (36) /* CF slot1 Ready */
101#define TOSA_GPIO_BAT_LOCKED (38) /* Battery locked */ 116#define TOSA_GPIO_BAT_LOCKED (38) /* Battery locked */
117#define TOSA_GPIO_IRDA_TX (47)
102#define TOSA_GPIO_TG_SPI_SCLK (81) 118#define TOSA_GPIO_TG_SPI_SCLK (81)
103#define TOSA_GPIO_TG_SPI_CS (82) 119#define TOSA_GPIO_TG_SPI_CS (82)
104#define TOSA_GPIO_TG_SPI_MOSI (83) 120#define TOSA_GPIO_TG_SPI_MOSI (83)
@@ -137,7 +153,7 @@
137#define TOSA_IRQ_GPIO_BAT1_CRG IRQ_GPIO(TOSA_GPIO_BAT1_CRG) 153#define TOSA_IRQ_GPIO_BAT1_CRG IRQ_GPIO(TOSA_GPIO_BAT1_CRG)
138#define TOSA_IRQ_GPIO_CF_CD IRQ_GPIO(TOSA_GPIO_CF_CD) 154#define TOSA_IRQ_GPIO_CF_CD IRQ_GPIO(TOSA_GPIO_CF_CD)
139#define TOSA_IRQ_GPIO_BAT0_CRG IRQ_GPIO(TOSA_GPIO_BAT0_CRG) 155#define TOSA_IRQ_GPIO_BAT0_CRG IRQ_GPIO(TOSA_GPIO_BAT0_CRG)
140#define TOSA_IRQ_GPIO_TC6393_INT IRQ_GPIO(TOSA_GPIO_TC6393_INT) 156#define TOSA_IRQ_GPIO_TC6393XB_INT IRQ_GPIO(TOSA_GPIO_TC6393XB_INT)
141#define TOSA_IRQ_GPIO_BAT0_LOW IRQ_GPIO(TOSA_GPIO_BAT0_LOW) 157#define TOSA_IRQ_GPIO_BAT0_LOW IRQ_GPIO(TOSA_GPIO_BAT0_LOW)
142#define TOSA_IRQ_GPIO_EAR_IN IRQ_GPIO(TOSA_GPIO_EAR_IN) 158#define TOSA_IRQ_GPIO_EAR_IN IRQ_GPIO(TOSA_GPIO_EAR_IN)
143#define TOSA_IRQ_GPIO_CF_IRQ IRQ_GPIO(TOSA_GPIO_CF_IRQ) 159#define TOSA_IRQ_GPIO_CF_IRQ IRQ_GPIO(TOSA_GPIO_CF_IRQ)
diff --git a/include/asm-arm/arch-pxa/tosa_bt.h b/include/asm-arm/arch-pxa/tosa_bt.h
new file mode 100644
index 000000000000..efc3c3d3b75d
--- /dev/null
+++ b/include/asm-arm/arch-pxa/tosa_bt.h
@@ -0,0 +1,22 @@
1/*
2 * Tosa bluetooth built-in chip control.
3 *
4 * Later it may be shared with some other platforms.
5 *
6 * Copyright (c) 2008 Dmitry Baryshkov
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13#ifndef TOSA_BT_H
14#define TOSA_BT_H
15
16struct tosa_bt_data {
17 int gpio_pwr;
18 int gpio_reset;
19};
20
21#endif
22
diff --git a/include/asm-arm/arch-pxa/uncompress.h b/include/asm-arm/arch-pxa/uncompress.h
index dadf4c20b622..f4551269aaf2 100644
--- a/include/asm-arm/arch-pxa/uncompress.h
+++ b/include/asm-arm/arch-pxa/uncompress.h
@@ -11,11 +11,11 @@
11 11
12#include <linux/serial_reg.h> 12#include <linux/serial_reg.h>
13#include <asm/arch/pxa-regs.h> 13#include <asm/arch/pxa-regs.h>
14#include <asm/mach-types.h>
14 15
15#define __REG(x) ((volatile unsigned long *)x) 16#define __REG(x) ((volatile unsigned long *)x)
16
17#define UART FFUART
18 17
18static volatile unsigned long *UART = FFUART;
19 19
20static inline void putc(char c) 20static inline void putc(char c)
21{ 21{
@@ -33,8 +33,13 @@ static inline void flush(void)
33{ 33{
34} 34}
35 35
36static inline void arch_decomp_setup(void)
37{
38 if (machine_is_littleton())
39 UART = STUART;
40}
41
36/* 42/*
37 * nothing to do 43 * nothing to do
38 */ 44 */
39#define arch_decomp_setup()
40#define arch_decomp_wdog() 45#define arch_decomp_wdog()
diff --git a/include/asm-arm/arch-pxa/zylonite.h b/include/asm-arm/arch-pxa/zylonite.h
index de577de8d18c..0d35ca04731e 100644
--- a/include/asm-arm/arch-pxa/zylonite.h
+++ b/include/asm-arm/arch-pxa/zylonite.h
@@ -16,6 +16,8 @@ struct platform_mmc_slot {
16extern struct platform_mmc_slot zylonite_mmc_slot[]; 16extern struct platform_mmc_slot zylonite_mmc_slot[];
17 17
18extern int gpio_eth_irq; 18extern int gpio_eth_irq;
19extern int gpio_debug_led1;
20extern int gpio_debug_led2;
19 21
20extern int wm9713_irq; 22extern int wm9713_irq;
21 23
diff --git a/include/asm-arm/arch-sa1100/h3600.h b/include/asm-arm/arch-sa1100/h3600.h
index 1b6355971574..3ca0ecf095e6 100644
--- a/include/asm-arm/arch-sa1100/h3600.h
+++ b/include/asm-arm/arch-sa1100/h3600.h
@@ -23,6 +23,11 @@
23#ifndef _INCLUDE_H3600_H_ 23#ifndef _INCLUDE_H3600_H_
24#define _INCLUDE_H3600_H_ 24#define _INCLUDE_H3600_H_
25 25
26typedef int __bitwise pm_request_t;
27
28#define PM_SUSPEND ((__force pm_request_t) 1) /* enter D1-D3 */
29#define PM_RESUME ((__force pm_request_t) 2) /* enter D0 */
30
26/* generalized support for H3xxx series Compaq Pocket PC's */ 31/* generalized support for H3xxx series Compaq Pocket PC's */
27#define machine_is_h3xxx() (machine_is_h3100() || machine_is_h3600() || machine_is_h3800()) 32#define machine_is_h3xxx() (machine_is_h3100() || machine_is_h3600() || machine_is_h3800())
28 33
diff --git a/include/asm-arm/hardware/iop3xx-adma.h b/include/asm-arm/hardware/iop3xx-adma.h
index a32b86ac62aa..af64676650a2 100644
--- a/include/asm-arm/hardware/iop3xx-adma.h
+++ b/include/asm-arm/hardware/iop3xx-adma.h
@@ -260,7 +260,7 @@ static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
260static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt, 260static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
261 int *slots_per_op) 261 int *slots_per_op)
262{ 262{
263 static const int slot_count_table[] = { 0, 263 static const char slot_count_table[] = {
264 1, 1, 1, 1, /* 01 - 04 */ 264 1, 1, 1, 1, /* 01 - 04 */
265 2, 2, 2, 2, /* 05 - 08 */ 265 2, 2, 2, 2, /* 05 - 08 */
266 4, 4, 4, 4, /* 09 - 12 */ 266 4, 4, 4, 4, /* 09 - 12 */
@@ -270,7 +270,7 @@ static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
270 8, 8, 8, 8, /* 25 - 28 */ 270 8, 8, 8, 8, /* 25 - 28 */
271 8, 8, 8, 8, /* 29 - 32 */ 271 8, 8, 8, 8, /* 29 - 32 */
272 }; 272 };
273 *slots_per_op = slot_count_table[src_cnt]; 273 *slots_per_op = slot_count_table[src_cnt - 1];
274 return *slots_per_op; 274 return *slots_per_op;
275} 275}
276 276
diff --git a/include/asm-arm/ide.h b/include/asm-arm/ide.h
index 88f4d231ce4f..a48019f99d08 100644
--- a/include/asm-arm/ide.h
+++ b/include/asm-arm/ide.h
@@ -13,10 +13,6 @@
13 13
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15 15
16#ifndef MAX_HWIFS
17#define MAX_HWIFS 4
18#endif
19
20#define __ide_mm_insw(port,addr,len) readsw(port,addr,len) 16#define __ide_mm_insw(port,addr,len) readsw(port,addr,len)
21#define __ide_mm_insl(port,addr,len) readsl(port,addr,len) 17#define __ide_mm_insl(port,addr,len) readsl(port,addr,len)
22#define __ide_mm_outsw(port,addr,len) writesw(port,addr,len) 18#define __ide_mm_outsw(port,addr,len) writesw(port,addr,len)
diff --git a/include/asm-arm/kgdb.h b/include/asm-arm/kgdb.h
new file mode 100644
index 000000000000..67af4b841984
--- /dev/null
+++ b/include/asm-arm/kgdb.h
@@ -0,0 +1,104 @@
1/*
2 * ARM KGDB support
3 *
4 * Author: Deepak Saxena <dsaxena@mvista.com>
5 *
6 * Copyright (C) 2002 MontaVista Software Inc.
7 *
8 */
9
10#ifndef __ARM_KGDB_H__
11#define __ARM_KGDB_H__
12
13#include <linux/ptrace.h>
14
15/*
16 * GDB assumes that we're a user process being debugged, so
17 * it will send us an SWI command to write into memory as the
18 * debug trap. When an SWI occurs, the next instruction addr is
19 * placed into R14_svc before jumping to the vector trap.
20 * This doesn't work for kernel debugging as we are already in SVC
21 * we would loose the kernel's LR, which is a bad thing. This
22 * is bad thing.
23 *
24 * By doing this as an undefined instruction trap, we force a mode
25 * switch from SVC to UND mode, allowing us to save full kernel state.
26 *
27 * We also define a KGDB_COMPILED_BREAK which can be used to compile
28 * in breakpoints. This is important for things like sysrq-G and for
29 * the initial breakpoint from trap_init().
30 *
31 * Note to ARM HW designers: Add real trap support like SH && PPC to
32 * make our lives much much simpler. :)
33 */
34#define BREAK_INSTR_SIZE 4
35#define GDB_BREAKINST 0xef9f0001
36#define KGDB_BREAKINST 0xe7ffdefe
37#define KGDB_COMPILED_BREAK 0xe7ffdeff
38#define CACHE_FLUSH_IS_SAFE 1
39
40#ifndef __ASSEMBLY__
41
42static inline void arch_kgdb_breakpoint(void)
43{
44 asm(".word 0xe7ffdeff");
45}
46
47extern void kgdb_handle_bus_error(void);
48extern int kgdb_fault_expected;
49
50#endif /* !__ASSEMBLY__ */
51
52/*
53 * From Kevin Hilman:
54 *
55 * gdb is expecting the following registers layout.
56 *
57 * r0-r15: 1 long word each
58 * f0-f7: unused, 3 long words each !!
59 * fps: unused, 1 long word
60 * cpsr: 1 long word
61 *
62 * Even though f0-f7 and fps are not used, they need to be
63 * present in the registers sent for correct processing in
64 * the host-side gdb.
65 *
66 * In particular, it is crucial that CPSR is in the right place,
67 * otherwise gdb will not be able to correctly interpret stepping over
68 * conditional branches.
69 */
70#define _GP_REGS 16
71#define _FP_REGS 8
72#define _EXTRA_REGS 2
73#define GDB_MAX_REGS (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
74
75#define KGDB_MAX_NO_CPUS 1
76#define BUFMAX 400
77#define NUMREGBYTES (GDB_MAX_REGS << 2)
78#define NUMCRITREGBYTES (32 << 2)
79
80#define _R0 0
81#define _R1 1
82#define _R2 2
83#define _R3 3
84#define _R4 4
85#define _R5 5
86#define _R6 6
87#define _R7 7
88#define _R8 8
89#define _R9 9
90#define _R10 10
91#define _FP 11
92#define _IP 12
93#define _SPT 13
94#define _LR 14
95#define _PC 15
96#define _CPSR (GDB_MAX_REGS - 1)
97
98/*
99 * So that we can denote the end of a frame for tracing,
100 * in the simple case:
101 */
102#define CFI_END_FRAME(func) __CFI_END_FRAME(_PC, _SPT, func)
103
104#endif /* __ASM_KGDB_H__ */
diff --git a/include/asm-arm/kvm.h b/include/asm-arm/kvm.h
deleted file mode 100644
index cb3c08cbcb9e..000000000000
--- a/include/asm-arm/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_ARM_H
2#define __LINUX_KVM_ARM_H
3
4/* arm does not support KVM */
5
6#endif
diff --git a/include/asm-arm/mach/udc_pxa2xx.h b/include/asm-arm/mach/udc_pxa2xx.h
index f9f3606986c2..9e5ed7c0f27f 100644
--- a/include/asm-arm/mach/udc_pxa2xx.h
+++ b/include/asm-arm/mach/udc_pxa2xx.h
@@ -23,6 +23,7 @@ struct pxa2xx_udc_mach_info {
23 */ 23 */
24 bool gpio_vbus_inverted; 24 bool gpio_vbus_inverted;
25 u16 gpio_vbus; /* high == vbus present */ 25 u16 gpio_vbus; /* high == vbus present */
26 bool gpio_pullup_inverted;
26 u16 gpio_pullup; /* high == pullup activated */ 27 u16 gpio_pullup; /* high == pullup activated */
27}; 28};
28 29
diff --git a/include/asm-arm/page-nommu.h b/include/asm-arm/page-nommu.h
index a1bcad060480..ea1cde84f500 100644
--- a/include/asm-arm/page-nommu.h
+++ b/include/asm-arm/page-nommu.h
@@ -7,6 +7,7 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10
10#ifndef _ASMARM_PAGE_NOMMU_H 11#ifndef _ASMARM_PAGE_NOMMU_H
11#define _ASMARM_PAGE_NOMMU_H 12#define _ASMARM_PAGE_NOMMU_H
12 13
@@ -42,9 +43,6 @@ typedef unsigned long pgprot_t;
42#define __pmd(x) (x) 43#define __pmd(x) (x)
43#define __pgprot(x) (x) 44#define __pgprot(x) (x)
44 45
45/* to align the pointer to the (next) page boundary */
46#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
47
48extern unsigned long memory_start; 46extern unsigned long memory_start;
49extern unsigned long memory_end; 47extern unsigned long memory_end;
50 48
diff --git a/include/asm-arm/page.h b/include/asm-arm/page.h
index 8e05bdb5f12f..7c5fc5582e5d 100644
--- a/include/asm-arm/page.h
+++ b/include/asm-arm/page.h
@@ -15,9 +15,6 @@
15#define PAGE_SIZE (1UL << PAGE_SHIFT) 15#define PAGE_SIZE (1UL << PAGE_SHIFT)
16#define PAGE_MASK (~(PAGE_SIZE-1)) 16#define PAGE_MASK (~(PAGE_SIZE-1))
17 17
18/* to align the pointer to the (next) page boundary */
19#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
20
21#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
22 19
23#ifndef CONFIG_MMU 20#ifndef CONFIG_MMU
diff --git a/include/asm-arm/plat-orion/mv_xor.h b/include/asm-arm/plat-orion/mv_xor.h
new file mode 100644
index 000000000000..c349e8ff5cc0
--- /dev/null
+++ b/include/asm-arm/plat-orion/mv_xor.h
@@ -0,0 +1,28 @@
1/*
2 * Marvell XOR platform device data definition file.
3 */
4
5#ifndef __ASM_PLAT_ORION_MV_XOR_H
6#define __ASM_PLAT_ORION_MV_XOR_H
7
8#include <linux/dmaengine.h>
9#include <linux/mbus.h>
10
11#define MV_XOR_SHARED_NAME "mv_xor_shared"
12#define MV_XOR_NAME "mv_xor"
13
14struct mbus_dram_target_info;
15
16struct mv_xor_platform_shared_data {
17 struct mbus_dram_target_info *dram;
18};
19
20struct mv_xor_platform_data {
21 struct platform_device *shared;
22 int hw_id;
23 dma_cap_mask_t cap_mask;
24 size_t pool_size;
25};
26
27
28#endif
diff --git a/include/asm-arm/ptrace.h b/include/asm-arm/ptrace.h
index 7aaa206cb54e..8382b7510f94 100644
--- a/include/asm-arm/ptrace.h
+++ b/include/asm-arm/ptrace.h
@@ -139,8 +139,6 @@ static inline int valid_user_regs(struct pt_regs *regs)
139 return 0; 139 return 0;
140} 140}
141 141
142#endif /* __KERNEL__ */
143
144#define pc_pointer(v) \ 142#define pc_pointer(v) \
145 ((v) & ~PCMASK) 143 ((v) & ~PCMASK)
146 144
@@ -153,10 +151,10 @@ extern unsigned long profile_pc(struct pt_regs *regs);
153#define profile_pc(regs) instruction_pointer(regs) 151#define profile_pc(regs) instruction_pointer(regs)
154#endif 152#endif
155 153
156#ifdef __KERNEL__
157#define predicate(x) ((x) & 0xf0000000) 154#define predicate(x) ((x) & 0xf0000000)
158#define PREDICATE_ALWAYS 0xe0000000 155#define PREDICATE_ALWAYS 0xe0000000
159#endif 156
157#endif /* __KERNEL__ */
160 158
161#endif /* __ASSEMBLY__ */ 159#endif /* __ASSEMBLY__ */
162 160
diff --git a/include/asm-arm/semaphore.h b/include/asm-arm/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-arm/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-arm/thread_info.h b/include/asm-arm/thread_info.h
index f5a664786311..d4be2d646160 100644
--- a/include/asm-arm/thread_info.h
+++ b/include/asm-arm/thread_info.h
@@ -97,19 +97,6 @@ static inline struct thread_info *current_thread_info(void)
97 return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); 97 return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
98} 98}
99 99
100/* thread information allocation */
101#ifdef CONFIG_DEBUG_STACK_USAGE
102#define alloc_thread_info(tsk) \
103 ((struct thread_info *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, \
104 THREAD_SIZE_ORDER))
105#else
106#define alloc_thread_info(tsk) \
107 ((struct thread_info *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER))
108#endif
109
110#define free_thread_info(info) \
111 free_pages((unsigned long)info, THREAD_SIZE_ORDER);
112
113#define thread_saved_pc(tsk) \ 100#define thread_saved_pc(tsk) \
114 ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc))) 101 ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc)))
115#define thread_saved_fp(tsk) \ 102#define thread_saved_fp(tsk) \
diff --git a/include/asm-arm/traps.h b/include/asm-arm/traps.h
index f1541afcf85c..aa399aec568e 100644
--- a/include/asm-arm/traps.h
+++ b/include/asm-arm/traps.h
@@ -24,4 +24,6 @@ static inline int in_exception_text(unsigned long ptr)
24 ptr < (unsigned long)&__exception_text_end; 24 ptr < (unsigned long)&__exception_text_end;
25} 25}
26 26
27extern void __init early_trap_init(void);
28
27#endif 29#endif
diff --git a/include/asm-avr32/arch-at32ap/at32ap700x.h b/include/asm-avr32/arch-at32ap/at32ap700x.h
index 31e48b0e7324..d18a3053be0d 100644
--- a/include/asm-avr32/arch-at32ap/at32ap700x.h
+++ b/include/asm-avr32/arch-at32ap/at32ap700x.h
@@ -30,4 +30,20 @@
30#define GPIO_PIN_PD(N) (GPIO_PIOD_BASE + (N)) 30#define GPIO_PIN_PD(N) (GPIO_PIOD_BASE + (N))
31#define GPIO_PIN_PE(N) (GPIO_PIOE_BASE + (N)) 31#define GPIO_PIN_PE(N) (GPIO_PIOE_BASE + (N))
32 32
33
34/*
35 * DMAC peripheral hardware handshaking interfaces, used with dw_dmac
36 */
37#define DMAC_MCI_RX 0
38#define DMAC_MCI_TX 1
39#define DMAC_DAC_TX 2
40#define DMAC_AC97_A_RX 3
41#define DMAC_AC97_A_TX 4
42#define DMAC_AC97_B_RX 5
43#define DMAC_AC97_B_TX 6
44#define DMAC_DMAREQ_0 7
45#define DMAC_DMAREQ_1 8
46#define DMAC_DMAREQ_2 9
47#define DMAC_DMAREQ_3 10
48
33#endif /* __ASM_ARCH_AT32AP700X_H__ */ 49#endif /* __ASM_ARCH_AT32AP700X_H__ */
diff --git a/include/asm-avr32/kvm.h b/include/asm-avr32/kvm.h
deleted file mode 100644
index 8c5777020e2c..000000000000
--- a/include/asm-avr32/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_AVR32_H
2#define __LINUX_KVM_AVR32_H
3
4/* avr32 does not support KVM */
5
6#endif
diff --git a/include/asm-avr32/page.h b/include/asm-avr32/page.h
index cbbc5ca9728b..f805d1cb11bc 100644
--- a/include/asm-avr32/page.h
+++ b/include/asm-avr32/page.h
@@ -57,9 +57,6 @@ static inline int get_order(unsigned long size)
57 57
58#endif /* !__ASSEMBLY__ */ 58#endif /* !__ASSEMBLY__ */
59 59
60/* Align the pointer to the (next) page boundary */
61#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
62
63/* 60/*
64 * The hardware maps the virtual addresses 0x80000000 -> 0x9fffffff 61 * The hardware maps the virtual addresses 0x80000000 -> 0x9fffffff
65 * permanently to the physical addresses 0x00000000 -> 0x1fffffff when 62 * permanently to the physical addresses 0x00000000 -> 0x1fffffff when
diff --git a/include/asm-avr32/semaphore.h b/include/asm-avr32/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-avr32/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-avr32/thread_info.h b/include/asm-avr32/thread_info.h
index df68631b7b27..294b25f9323d 100644
--- a/include/asm-avr32/thread_info.h
+++ b/include/asm-avr32/thread_info.h
@@ -61,10 +61,6 @@ static inline struct thread_info *current_thread_info(void)
61 return (struct thread_info *)addr; 61 return (struct thread_info *)addr;
62} 62}
63 63
64/* thread information allocation */
65#define alloc_thread_info(ti) \
66 ((struct thread_info *) __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER))
67#define free_thread_info(ti) free_pages((unsigned long)(ti), 1)
68#define get_thread_info(ti) get_task_struct((ti)->task) 64#define get_thread_info(ti) get_task_struct((ti)->task)
69#define put_thread_info(ti) put_task_struct((ti)->task) 65#define put_thread_info(ti) put_task_struct((ti)->task)
70 66
diff --git a/include/asm-blackfin/ide.h b/include/asm-blackfin/ide.h
deleted file mode 100644
index 5b88de115bf4..000000000000
--- a/include/asm-blackfin/ide.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/****************************************************************************/
2
3/*
4 * linux/include/asm-blackfin/ide.h
5 *
6 * Copyright (C) 1994-1996 Linus Torvalds & authors
7 * Copyright (C) 2001 Lineo Inc., davidm@snapgear.com
8 * Copyright (C) 2002 Greg Ungerer (gerg@snapgear.com)
9 * Copyright (C) 2002 Yoshinori Sato (ysato@users.sourceforge.jp)
10 * Copyright (C) 2005 Hennerich Michael (hennerich@blackfin.uclinux.org)
11 */
12
13/****************************************************************************/
14#ifndef _BLACKFIN_IDE_H
15#define _BLACKFIN_IDE_H
16/****************************************************************************/
17#ifdef __KERNEL__
18/****************************************************************************/
19
20#define MAX_HWIFS 1
21
22#include <asm-generic/ide_iops.h>
23
24/****************************************************************************/
25#endif /* __KERNEL__ */
26#endif /* _BLACKFIN_IDE_H */
27/****************************************************************************/
diff --git a/include/asm-blackfin/kvm.h b/include/asm-blackfin/kvm.h
deleted file mode 100644
index e3477d77c014..000000000000
--- a/include/asm-blackfin/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_BLACKFIN_H
2#define __LINUX_KVM_BLACKFIN_H
3
4/* blackfin does not support KVM */
5
6#endif
diff --git a/include/asm-blackfin/page.h b/include/asm-blackfin/page.h
index c7db0220fbd6..344f6a8c1f22 100644
--- a/include/asm-blackfin/page.h
+++ b/include/asm-blackfin/page.h
@@ -51,9 +51,6 @@ typedef struct page *pgtable_t;
51#define __pgd(x) ((pgd_t) { (x) } ) 51#define __pgd(x) ((pgd_t) { (x) } )
52#define __pgprot(x) ((pgprot_t) { (x) } ) 52#define __pgprot(x) ((pgprot_t) { (x) } )
53 53
54/* to align the pointer to the (next) page boundary */
55#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
56
57extern unsigned long memory_start; 54extern unsigned long memory_start;
58extern unsigned long memory_end; 55extern unsigned long memory_end;
59 56
diff --git a/include/asm-blackfin/ptrace.h b/include/asm-blackfin/ptrace.h
index b8346cd3a6f6..a45a80e54adc 100644
--- a/include/asm-blackfin/ptrace.h
+++ b/include/asm-blackfin/ptrace.h
@@ -83,14 +83,14 @@ struct pt_regs {
83#define PTRACE_GETREGS 12 83#define PTRACE_GETREGS 12
84#define PTRACE_SETREGS 13 /* ptrace signal */ 84#define PTRACE_SETREGS 13 /* ptrace signal */
85 85
86#ifdef CONFIG_BINFMT_ELF_FDPIC
87#define PTRACE_GETFDPIC 31 86#define PTRACE_GETFDPIC 31
88#define PTRACE_GETFDPIC_EXEC 0 87#define PTRACE_GETFDPIC_EXEC 0
89#define PTRACE_GETFDPIC_INTERP 1 88#define PTRACE_GETFDPIC_INTERP 1
90#endif
91 89
92#define PS_S (0x0002) 90#define PS_S (0x0002)
93 91
92#ifdef __KERNEL__
93
94/* user_mode returns true if only one bit is set in IPEND, other than the 94/* user_mode returns true if only one bit is set in IPEND, other than the
95 master interrupt enable. */ 95 master interrupt enable. */
96#define user_mode(regs) (!(((regs)->ipend & ~0x10) & (((regs)->ipend & ~0x10) - 1))) 96#define user_mode(regs) (!(((regs)->ipend & ~0x10) & (((regs)->ipend & ~0x10) - 1)))
@@ -98,6 +98,8 @@ struct pt_regs {
98#define profile_pc(regs) instruction_pointer(regs) 98#define profile_pc(regs) instruction_pointer(regs)
99extern void show_regs(struct pt_regs *); 99extern void show_regs(struct pt_regs *);
100 100
101#endif /* __KERNEL__ */
102
101#endif /* __ASSEMBLY__ */ 103#endif /* __ASSEMBLY__ */
102 104
103/* 105/*
diff --git a/include/asm-blackfin/semaphore.h b/include/asm-blackfin/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-blackfin/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-blackfin/thread_info.h b/include/asm-blackfin/thread_info.h
index bc2fe5accf20..642769329d12 100644
--- a/include/asm-blackfin/thread_info.h
+++ b/include/asm-blackfin/thread_info.h
@@ -42,6 +42,7 @@
42/* 42/*
43 * Size of kernel stack for each process. This must be a power of 2... 43 * Size of kernel stack for each process. This must be a power of 2...
44 */ 44 */
45#define THREAD_SIZE_ORDER 1
45#define THREAD_SIZE 8192 /* 2 pages */ 46#define THREAD_SIZE 8192 /* 2 pages */
46 47
47#ifndef __ASSEMBLY__ 48#ifndef __ASSEMBLY__
@@ -94,10 +95,6 @@ static inline struct thread_info *current_thread_info(void)
94 return (struct thread_info *)((long)ti & ~((long)THREAD_SIZE-1)); 95 return (struct thread_info *)((long)ti & ~((long)THREAD_SIZE-1));
95} 96}
96 97
97/* thread information allocation */
98#define alloc_thread_info(tsk) ((struct thread_info *) \
99 __get_free_pages(GFP_KERNEL, 1))
100#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
101#endif /* __ASSEMBLY__ */ 98#endif /* __ASSEMBLY__ */
102 99
103/* 100/*
diff --git a/include/asm-cris/arch-v10/Kbuild b/include/asm-cris/arch-v10/Kbuild
index 60e7e1b73cec..7a192e1290b1 100644
--- a/include/asm-cris/arch-v10/Kbuild
+++ b/include/asm-cris/arch-v10/Kbuild
@@ -1,4 +1,3 @@
1header-y += ptrace.h
2header-y += user.h 1header-y += user.h
3header-y += svinto.h 2header-y += svinto.h
4header-y += sv_addr_ag.h 3header-y += sv_addr_ag.h
diff --git a/include/asm-cris/arch-v10/ide.h b/include/asm-cris/arch-v10/ide.h
deleted file mode 100644
index 5366e6239328..000000000000
--- a/include/asm-cris/arch-v10/ide.h
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * linux/include/asm-cris/ide.h
3 *
4 * Copyright (C) 2000, 2001, 2002 Axis Communications AB
5 *
6 * Authors: Bjorn Wesen
7 *
8 */
9
10/*
11 * This file contains the ETRAX 100LX specific IDE code.
12 */
13
14#ifndef __ASMCRIS_IDE_H
15#define __ASMCRIS_IDE_H
16
17#ifdef __KERNEL__
18
19#include <asm/arch/svinto.h>
20#include <asm/io.h>
21#include <asm-generic/ide_iops.h>
22
23
24/* ETRAX 100 can support 4 IDE busses on the same pins (serialized) */
25
26#define MAX_HWIFS 4
27
28static inline int ide_default_irq(unsigned long base)
29{
30 /* all IDE busses share the same IRQ, number 4.
31 * this has the side-effect that ide-probe.c will cluster our 4 interfaces
32 * together in a hwgroup, and will serialize accesses. this is good, because
33 * we can't access more than one interface at the same time on ETRAX100.
34 */
35 return 4;
36}
37
38static inline unsigned long ide_default_io_base(int index)
39{
40 /* we have no real I/O base address per interface, since all go through the
41 * same register. but in a bitfield in that register, we have the i/f number.
42 * so we can use the io_base to remember that bitfield.
43 */
44 static const unsigned long io_bases[MAX_HWIFS] = {
45 IO_FIELD(R_ATA_CTRL_DATA, sel, 0),
46 IO_FIELD(R_ATA_CTRL_DATA, sel, 1),
47 IO_FIELD(R_ATA_CTRL_DATA, sel, 2),
48 IO_FIELD(R_ATA_CTRL_DATA, sel, 3)
49 };
50 return io_bases[index];
51}
52
53/* this is called once for each interface, to setup the port addresses. data_port is the result
54 * of the ide_default_io_base call above. ctrl_port will be 0, but that is don't care for us.
55 */
56
57static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port, unsigned long ctrl_port, int *irq)
58{
59 int i;
60
61 /* fill in ports for ATA addresses 0 to 7 */
62 for (i = 0; i <= 7; i++) {
63 hw->io_ports_array[i] = data_port |
64 IO_FIELD(R_ATA_CTRL_DATA, addr, i) |
65 IO_STATE(R_ATA_CTRL_DATA, cs0, active);
66 }
67
68 /* the IDE control register is at ATA address 6, with CS1 active instead of CS0 */
69 hw->io_ports.ctl_addr = data_port |
70 IO_FIELD(R_ATA_CTRL_DATA, addr, 6) |
71 IO_STATE(R_ATA_CTRL_DATA, cs1, active);
72
73 /* whats this for ? */
74 hw->io_ports.irq_addr = 0;
75}
76
77static inline void ide_init_default_hwifs(void)
78{
79 hw_regs_t hw;
80 int index;
81
82 for(index = 0; index < MAX_HWIFS; index++) {
83 ide_init_hwif_ports(&hw, ide_default_io_base(index), 0, NULL);
84 hw.irq = ide_default_irq(ide_default_io_base(index));
85 ide_register_hw(&hw, NULL);
86 }
87}
88
89#endif /* __KERNEL__ */
90
91#endif /* __ASMCRIS_IDE_H */
diff --git a/include/asm-cris/arch-v10/ptrace.h b/include/asm-cris/arch-v10/ptrace.h
index fb14c5ee37f9..2f464eab3a51 100644
--- a/include/asm-cris/arch-v10/ptrace.h
+++ b/include/asm-cris/arch-v10/ptrace.h
@@ -106,10 +106,14 @@ struct switch_stack {
106 unsigned long return_ip; /* ip that _resume will return to */ 106 unsigned long return_ip; /* ip that _resume will return to */
107}; 107};
108 108
109#ifdef __KERNEL__
110
109/* bit 8 is user-mode flag */ 111/* bit 8 is user-mode flag */
110#define user_mode(regs) (((regs)->dccr & 0x100) != 0) 112#define user_mode(regs) (((regs)->dccr & 0x100) != 0)
111#define instruction_pointer(regs) ((regs)->irp) 113#define instruction_pointer(regs) ((regs)->irp)
112#define profile_pc(regs) instruction_pointer(regs) 114#define profile_pc(regs) instruction_pointer(regs)
113extern void show_regs(struct pt_regs *); 115extern void show_regs(struct pt_regs *);
114 116
117#endif /* __KERNEL__ */
118
115#endif 119#endif
diff --git a/include/asm-cris/arch-v32/Kbuild b/include/asm-cris/arch-v32/Kbuild
index a0ec545e242e..35f2fc4f993e 100644
--- a/include/asm-cris/arch-v32/Kbuild
+++ b/include/asm-cris/arch-v32/Kbuild
@@ -1,3 +1,2 @@
1header-y += ptrace.h
2header-y += user.h 1header-y += user.h
3header-y += cryptocop.h 2header-y += cryptocop.h
diff --git a/include/asm-cris/arch-v32/ide.h b/include/asm-cris/arch-v32/ide.h
deleted file mode 100644
index fb9c3627a5b4..000000000000
--- a/include/asm-cris/arch-v32/ide.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * linux/include/asm-cris/ide.h
3 *
4 * Copyright (C) 2000-2004 Axis Communications AB
5 *
6 * Authors: Bjorn Wesen, Mikael Starvik
7 *
8 */
9
10/*
11 * This file contains the ETRAX FS specific IDE code.
12 */
13
14#ifndef __ASMCRIS_IDE_H
15#define __ASMCRIS_IDE_H
16
17#ifdef __KERNEL__
18
19#include <asm/arch/hwregs/intr_vect.h>
20#include <asm/arch/hwregs/ata_defs.h>
21#include <asm/io.h>
22#include <asm-generic/ide_iops.h>
23
24
25/* ETRAX FS can support 4 IDE busses on the same pins (serialized) */
26
27#define MAX_HWIFS 4
28
29static inline int ide_default_irq(unsigned long base)
30{
31 /* all IDE busses share the same IRQ,
32 * this has the side-effect that ide-probe.c will cluster our 4 interfaces
33 * together in a hwgroup, and will serialize accesses. this is good, because
34 * we can't access more than one interface at the same time on ETRAX100.
35 */
36 return ATA_INTR_VECT;
37}
38
39static inline unsigned long ide_default_io_base(int index)
40{
41 reg_ata_rw_ctrl2 ctrl2 = {.sel = index};
42 /* we have no real I/O base address per interface, since all go through the
43 * same register. but in a bitfield in that register, we have the i/f number.
44 * so we can use the io_base to remember that bitfield.
45 */
46 ctrl2.sel = index;
47
48 return REG_TYPE_CONV(unsigned long, reg_ata_rw_ctrl2, ctrl2);
49}
50
51#define IDE_ARCH_ACK_INTR
52#define ide_ack_intr(hwif) ((hwif)->ack_intr(hwif))
53
54#endif /* __KERNEL__ */
55
56#endif /* __ASMCRIS_IDE_H */
diff --git a/include/asm-cris/arch-v32/ptrace.h b/include/asm-cris/arch-v32/ptrace.h
index 516cc7062d94..41f4e8662bc2 100644
--- a/include/asm-cris/arch-v32/ptrace.h
+++ b/include/asm-cris/arch-v32/ptrace.h
@@ -106,9 +106,13 @@ struct switch_stack {
106 unsigned long return_ip; /* ip that _resume will return to */ 106 unsigned long return_ip; /* ip that _resume will return to */
107}; 107};
108 108
109#ifdef __KERNEL__
110
109#define user_mode(regs) (((regs)->ccs & (1 << (U_CCS_BITNR + CCS_SHIFT))) != 0) 111#define user_mode(regs) (((regs)->ccs & (1 << (U_CCS_BITNR + CCS_SHIFT))) != 0)
110#define instruction_pointer(regs) ((regs)->erp) 112#define instruction_pointer(regs) ((regs)->erp)
111extern void show_regs(struct pt_regs *); 113extern void show_regs(struct pt_regs *);
112#define profile_pc(regs) instruction_pointer(regs) 114#define profile_pc(regs) instruction_pointer(regs)
113 115
116#endif /* __KERNEL__ */
117
114#endif 118#endif
diff --git a/include/asm-cris/cacheflush.h b/include/asm-cris/cacheflush.h
index 01af2de27c5b..cf60e3f69f8d 100644
--- a/include/asm-cris/cacheflush.h
+++ b/include/asm-cris/cacheflush.h
@@ -26,7 +26,6 @@
26#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 26#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
27 memcpy(dst, src, len) 27 memcpy(dst, src, len)
28 28
29void global_flush_tlb(void);
30int change_page_attr(struct page *page, int numpages, pgprot_t prot); 29int change_page_attr(struct page *page, int numpages, pgprot_t prot);
31 30
32#endif /* _CRIS_CACHEFLUSH_H */ 31#endif /* _CRIS_CACHEFLUSH_H */
diff --git a/include/asm-cris/ide.h b/include/asm-cris/ide.h
deleted file mode 100644
index a894f66665f8..000000000000
--- a/include/asm-cris/ide.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm/arch/ide.h>
diff --git a/include/asm-cris/kvm.h b/include/asm-cris/kvm.h
deleted file mode 100644
index c860f51149f0..000000000000
--- a/include/asm-cris/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_CRIS_H
2#define __LINUX_KVM_CRIS_H
3
4/* cris does not support KVM */
5
6#endif
diff --git a/include/asm-cris/page.h b/include/asm-cris/page.h
index c45bb1ef397c..d19272ba6b69 100644
--- a/include/asm-cris/page.h
+++ b/include/asm-cris/page.h
@@ -60,9 +60,6 @@ typedef struct page *pgtable_t;
60 60
61#define page_to_phys(page) __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) 61#define page_to_phys(page) __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
62 62
63/* to align the pointer to the (next) page boundary */
64#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
65
66#ifndef __ASSEMBLY__ 63#ifndef __ASSEMBLY__
67 64
68#endif /* __ASSEMBLY__ */ 65#endif /* __ASSEMBLY__ */
diff --git a/include/asm-cris/ptrace.h b/include/asm-cris/ptrace.h
index 1ec69a7ea836..d910925e3174 100644
--- a/include/asm-cris/ptrace.h
+++ b/include/asm-cris/ptrace.h
@@ -4,11 +4,13 @@
4#include <asm/arch/ptrace.h> 4#include <asm/arch/ptrace.h>
5 5
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7
7/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ 8/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
8#define PTRACE_GETREGS 12 9#define PTRACE_GETREGS 12
9#define PTRACE_SETREGS 13 10#define PTRACE_SETREGS 13
10#endif
11 11
12#define profile_pc(regs) instruction_pointer(regs) 12#define profile_pc(regs) instruction_pointer(regs)
13 13
14#endif /* __KERNEL__ */
15
14#endif /* _CRIS_PTRACE_H */ 16#endif /* _CRIS_PTRACE_H */
diff --git a/include/asm-cris/semaphore.h b/include/asm-cris/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-cris/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-cris/thread_info.h b/include/asm-cris/thread_info.h
index 784668ab0fa2..7efe1000f99d 100644
--- a/include/asm-cris/thread_info.h
+++ b/include/asm-cris/thread_info.h
@@ -11,6 +11,8 @@
11 11
12#ifdef __KERNEL__ 12#ifdef __KERNEL__
13 13
14#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
15
14#ifndef __ASSEMBLY__ 16#ifndef __ASSEMBLY__
15#include <asm/types.h> 17#include <asm/types.h>
16#include <asm/processor.h> 18#include <asm/processor.h>
diff --git a/include/asm-frv/Kbuild b/include/asm-frv/Kbuild
index bc3f12c5b7e0..0f8956def738 100644
--- a/include/asm-frv/Kbuild
+++ b/include/asm-frv/Kbuild
@@ -3,4 +3,3 @@ include include/asm-generic/Kbuild.asm
3header-y += registers.h 3header-y += registers.h
4 4
5unifdef-y += termios.h 5unifdef-y += termios.h
6unifdef-y += ptrace.h
diff --git a/include/asm-frv/ide.h b/include/asm-frv/ide.h
index 8c9a540d4344..7ebcc56a2229 100644
--- a/include/asm-frv/ide.h
+++ b/include/asm-frv/ide.h
@@ -18,10 +18,6 @@
18#include <asm/io.h> 18#include <asm/io.h>
19#include <asm/irq.h> 19#include <asm/irq.h>
20 20
21#ifndef MAX_HWIFS
22#define MAX_HWIFS 8
23#endif
24
25/****************************************************************************/ 21/****************************************************************************/
26/* 22/*
27 * some bits needed for parts of the IDE subsystem to compile 23 * some bits needed for parts of the IDE subsystem to compile
diff --git a/include/asm-frv/kvm.h b/include/asm-frv/kvm.h
deleted file mode 100644
index 9c8a4f08d0a9..000000000000
--- a/include/asm-frv/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_FRV_H
2#define __LINUX_KVM_FRV_H
3
4/* frv does not support KVM */
5
6#endif
diff --git a/include/asm-frv/page.h b/include/asm-frv/page.h
index c2c1e89e747d..bd9c220094c7 100644
--- a/include/asm-frv/page.h
+++ b/include/asm-frv/page.h
@@ -40,9 +40,6 @@ typedef struct page *pgtable_t;
40#define __pgprot(x) ((pgprot_t) { (x) } ) 40#define __pgprot(x) ((pgprot_t) { (x) } )
41#define PTE_MASK PAGE_MASK 41#define PTE_MASK PAGE_MASK
42 42
43/* to align the pointer to the (next) page boundary */
44#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
45
46#define devmem_is_allowed(pfn) 1 43#define devmem_is_allowed(pfn) 1
47 44
48#define __pa(vaddr) virt_to_phys((void *) (unsigned long) (vaddr)) 45#define __pa(vaddr) virt_to_phys((void *) (unsigned long) (vaddr))
diff --git a/include/asm-frv/semaphore.h b/include/asm-frv/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-frv/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-frv/thread_info.h b/include/asm-frv/thread_info.h
index 348b8f1df17e..b7ac6bf2844c 100644
--- a/include/asm-frv/thread_info.h
+++ b/include/asm-frv/thread_info.h
@@ -82,6 +82,8 @@ register struct thread_info *__current_thread_info asm("gr15");
82 82
83#define current_thread_info() ({ __current_thread_info; }) 83#define current_thread_info() ({ __current_thread_info; })
84 84
85#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
86
85/* thread information allocation */ 87/* thread information allocation */
86#ifdef CONFIG_DEBUG_STACK_USAGE 88#ifdef CONFIG_DEBUG_STACK_USAGE
87#define alloc_thread_info(tsk) \ 89#define alloc_thread_info(tsk) \
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
index 7cd25b8e7c9a..1170dc60e638 100644
--- a/include/asm-generic/Kbuild.asm
+++ b/include/asm-generic/Kbuild.asm
@@ -1,4 +1,6 @@
1ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/kvm.h),)
1header-y += kvm.h 2header-y += kvm.h
3endif
2 4
3ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/a.out.h),) 5ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/a.out.h),)
4unifdef-y += a.out.h 6unifdef-y += a.out.h
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 2632328d8646..a3f738cffdb6 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -34,9 +34,14 @@ struct bug_entry {
34#ifndef __WARN 34#ifndef __WARN
35#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
36extern void warn_on_slowpath(const char *file, const int line); 36extern void warn_on_slowpath(const char *file, const int line);
37extern void warn_slowpath(const char *file, const int line,
38 const char *fmt, ...) __attribute__((format(printf, 3, 4)));
37#define WANT_WARN_ON_SLOWPATH 39#define WANT_WARN_ON_SLOWPATH
38#endif 40#endif
39#define __WARN() warn_on_slowpath(__FILE__, __LINE__) 41#define __WARN() warn_on_slowpath(__FILE__, __LINE__)
42#define __WARN_printf(arg...) warn_slowpath(__FILE__, __LINE__, arg)
43#else
44#define __WARN_printf(arg...) __WARN()
40#endif 45#endif
41 46
42#ifndef WARN_ON 47#ifndef WARN_ON
@@ -48,6 +53,15 @@ extern void warn_on_slowpath(const char *file, const int line);
48}) 53})
49#endif 54#endif
50 55
56#ifndef WARN
57#define WARN(condition, format...) ({ \
58 int __ret_warn_on = !!(condition); \
59 if (unlikely(__ret_warn_on)) \
60 __WARN_printf(format); \
61 unlikely(__ret_warn_on); \
62})
63#endif
64
51#else /* !CONFIG_BUG */ 65#else /* !CONFIG_BUG */
52#ifndef HAVE_ARCH_BUG 66#ifndef HAVE_ARCH_BUG
53#define BUG() 67#define BUG()
@@ -63,6 +77,14 @@ extern void warn_on_slowpath(const char *file, const int line);
63 unlikely(__ret_warn_on); \ 77 unlikely(__ret_warn_on); \
64}) 78})
65#endif 79#endif
80
81#ifndef WARN
82#define WARN(condition, format...) ({ \
83 int __ret_warn_on = !!(condition); \
84 unlikely(__ret_warn_on); \
85})
86#endif
87
66#endif 88#endif
67 89
68#define WARN_ON_ONCE(condition) ({ \ 90#define WARN_ON_ONCE(condition) ({ \
@@ -75,6 +97,9 @@ extern void warn_on_slowpath(const char *file, const int line);
75 unlikely(__ret_warn_once); \ 97 unlikely(__ret_warn_once); \
76}) 98})
77 99
100#define WARN_ON_RATELIMIT(condition, state) \
101 WARN_ON((condition) && __ratelimit(state))
102
78#ifdef CONFIG_SMP 103#ifdef CONFIG_SMP
79# define WARN_ON_SMP(x) WARN_ON(x) 104# define WARN_ON_SMP(x) WARN_ON(x)
80#else 105#else
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 6be061d09da9..a3034d20ebd5 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -3,7 +3,7 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6#ifdef CONFIG_HAVE_GPIO_LIB 6#ifdef CONFIG_GPIOLIB
7 7
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9 9
@@ -32,6 +32,8 @@ struct module;
32/** 32/**
33 * struct gpio_chip - abstract a GPIO controller 33 * struct gpio_chip - abstract a GPIO controller
34 * @label: for diagnostics 34 * @label: for diagnostics
35 * @dev: optional device providing the GPIOs
36 * @owner: helps prevent removal of modules exporting active GPIOs
35 * @direction_input: configures signal "offset" as input, or returns error 37 * @direction_input: configures signal "offset" as input, or returns error
36 * @get: returns value for signal "offset"; for output signals this 38 * @get: returns value for signal "offset"; for output signals this
37 * returns either the value actually sensed, or zero 39 * returns either the value actually sensed, or zero
@@ -59,6 +61,7 @@ struct module;
59 */ 61 */
60struct gpio_chip { 62struct gpio_chip {
61 char *label; 63 char *label;
64 struct device *dev;
62 struct module *owner; 65 struct module *owner;
63 66
64 int (*direction_input)(struct gpio_chip *chip, 67 int (*direction_input)(struct gpio_chip *chip,
@@ -74,6 +77,7 @@ struct gpio_chip {
74 int base; 77 int base;
75 u16 ngpio; 78 u16 ngpio;
76 unsigned can_sleep:1; 79 unsigned can_sleep:1;
80 unsigned exported:1;
77}; 81};
78 82
79extern const char *gpiochip_is_requested(struct gpio_chip *chip, 83extern const char *gpiochip_is_requested(struct gpio_chip *chip,
@@ -108,7 +112,18 @@ extern void __gpio_set_value(unsigned gpio, int value);
108extern int __gpio_cansleep(unsigned gpio); 112extern int __gpio_cansleep(unsigned gpio);
109 113
110 114
111#else 115#ifdef CONFIG_GPIO_SYSFS
116
117/*
118 * A sysfs interface can be exported by individual drivers if they want,
119 * but more typically is configured entirely from userspace.
120 */
121extern int gpio_export(unsigned gpio, bool direction_may_change);
122extern void gpio_unexport(unsigned gpio);
123
124#endif /* CONFIG_GPIO_SYSFS */
125
126#else /* !CONFIG_HAVE_GPIO_LIB */
112 127
113static inline int gpio_is_valid(int number) 128static inline int gpio_is_valid(int number)
114{ 129{
@@ -137,6 +152,20 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value)
137 gpio_set_value(gpio, value); 152 gpio_set_value(gpio, value);
138} 153}
139 154
140#endif 155#endif /* !CONFIG_HAVE_GPIO_LIB */
156
157#ifndef CONFIG_GPIO_SYSFS
158
159/* sysfs support is only available with gpiolib, where it's optional */
160
161static inline int gpio_export(unsigned gpio, bool direction_may_change)
162{
163 return -ENOSYS;
164}
165
166static inline void gpio_unexport(unsigned gpio)
167{
168}
169#endif /* CONFIG_GPIO_SYSFS */
141 170
142#endif /* _ASM_GENERIC_GPIO_H */ 171#endif /* _ASM_GENERIC_GPIO_H */
diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
index 260948905e4e..f9bc9ac29b36 100644
--- a/include/asm-generic/int-ll64.h
+++ b/include/asm-generic/int-ll64.h
@@ -26,7 +26,7 @@ typedef unsigned int __u32;
26#ifdef __GNUC__ 26#ifdef __GNUC__
27__extension__ typedef __signed__ long long __s64; 27__extension__ typedef __signed__ long long __s64;
28__extension__ typedef unsigned long long __u64; 28__extension__ typedef unsigned long long __u64;
29#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L 29#else
30typedef __signed__ long long __s64; 30typedef __signed__ long long __s64;
31typedef unsigned long long __u64; 31typedef unsigned long long __u64;
32#endif 32#endif
diff --git a/include/asm-h8300/elf.h b/include/asm-h8300/elf.h
index 26bfc7e641da..a8b57d1f4128 100644
--- a/include/asm-h8300/elf.h
+++ b/include/asm-h8300/elf.h
@@ -26,10 +26,10 @@ typedef unsigned long elf_fpregset_t;
26#define ELF_DATA ELFDATA2MSB 26#define ELF_DATA ELFDATA2MSB
27#define ELF_ARCH EM_H8_300 27#define ELF_ARCH EM_H8_300
28#if defined(__H8300H__) 28#if defined(__H8300H__)
29#define ELF_FLAGS 0x810000 29#define ELF_CORE_EFLAGS 0x810000
30#endif 30#endif
31#if defined(__H8300S__) 31#if defined(__H8300S__)
32#define ELF_FLAGS 0x820000 32#define ELF_CORE_EFLAGS 0x820000
33#endif 33#endif
34 34
35#define ELF_PLAT_INIT(_r) _r->er1 = 0 35#define ELF_PLAT_INIT(_r) _r->er1 = 0
diff --git a/include/asm-h8300/ide.h b/include/asm-h8300/ide.h
deleted file mode 100644
index f8535ce7476e..000000000000
--- a/include/asm-h8300/ide.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/****************************************************************************/
2
3/*
4 * linux/include/asm-h8300/ide.h
5 *
6 * Copyright (C) 1994-1996 Linus Torvalds & authors
7 * Copyright (C) 2001 Lineo Inc., davidm@snapgear.com
8 * Copyright (C) 2002 Greg Ungerer (gerg@snapgear.com)
9 * Copyright (C) 2002 Yoshinori Sato (ysato@users.sourceforge.jp)
10 */
11
12/****************************************************************************/
13#ifndef _H8300_IDE_H
14#define _H8300_IDE_H
15/****************************************************************************/
16#ifdef __KERNEL__
17/****************************************************************************/
18
19#define MAX_HWIFS 1
20
21#include <asm-generic/ide_iops.h>
22
23/****************************************************************************/
24#endif /* __KERNEL__ */
25#endif /* _H8300_IDE_H */
26/****************************************************************************/
diff --git a/include/asm-h8300/keyboard.h b/include/asm-h8300/keyboard.h
deleted file mode 100644
index 90efbd655390..000000000000
--- a/include/asm-h8300/keyboard.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * linux/include/asm-h8300/keyboard.h
3 * Created 04 Dec 2001 by Khaled Hassounah <khassounah@mediumware.net>
4 * This file contains the Dragonball architecture specific keyboard definitions
5 */
6
7#ifndef _H8300_KEYBOARD_H
8#define _H8300_KEYBOARD_H
9
10
11/* dummy i.e. no real keyboard */
12#define kbd_setkeycode(x...) (-ENOSYS)
13#define kbd_getkeycode(x...) (-ENOSYS)
14#define kbd_translate(x...) (0)
15#define kbd_unexpected_up(x...) (1)
16#define kbd_leds(x...) do {;} while (0)
17#define kbd_init_hw(x...) do {;} while (0)
18#define kbd_enable_irq(x...) do {;} while (0)
19#define kbd_disable_irq(x...) do {;} while (0)
20
21#endif /* _H8300_KEYBOARD_H */
22
23
24
diff --git a/include/asm-h8300/kvm.h b/include/asm-h8300/kvm.h
deleted file mode 100644
index bdbed7b987e1..000000000000
--- a/include/asm-h8300/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_H8300_H
2#define __LINUX_KVM_H8300_H
3
4/* h8300 does not support KVM */
5
6#endif
diff --git a/include/asm-h8300/page.h b/include/asm-h8300/page.h
index d6a3eaf3b27e..0b6acf0b03aa 100644
--- a/include/asm-h8300/page.h
+++ b/include/asm-h8300/page.h
@@ -43,9 +43,6 @@ typedef struct page *pgtable_t;
43#define __pgd(x) ((pgd_t) { (x) } ) 43#define __pgd(x) ((pgd_t) { (x) } )
44#define __pgprot(x) ((pgprot_t) { (x) } ) 44#define __pgprot(x) ((pgprot_t) { (x) } )
45 45
46/* to align the pointer to the (next) page boundary */
47#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
48
49extern unsigned long memory_start; 46extern unsigned long memory_start;
50extern unsigned long memory_end; 47extern unsigned long memory_end;
51 48
diff --git a/include/asm-h8300/semaphore.h b/include/asm-h8300/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-h8300/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-h8300/thread_info.h b/include/asm-h8300/thread_info.h
index 27bb95e2944c..aafd4d322ec3 100644
--- a/include/asm-h8300/thread_info.h
+++ b/include/asm-h8300/thread_info.h
@@ -49,6 +49,7 @@ struct thread_info {
49/* 49/*
50 * Size of kernel stack for each process. This must be a power of 2... 50 * Size of kernel stack for each process. This must be a power of 2...
51 */ 51 */
52#define THREAD_SIZE_ORDER 1
52#define THREAD_SIZE 8192 /* 2 pages */ 53#define THREAD_SIZE 8192 /* 2 pages */
53 54
54 55
@@ -65,10 +66,6 @@ static inline struct thread_info *current_thread_info(void)
65 return ti; 66 return ti;
66} 67}
67 68
68/* thread information allocation */
69#define alloc_thread_info(tsk) ((struct thread_info *) \
70 __get_free_pages(GFP_KERNEL, 1))
71#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
72#endif /* __ASSEMBLY__ */ 69#endif /* __ASSEMBLY__ */
73 70
74/* 71/*
diff --git a/include/asm-ia64/hugetlb.h b/include/asm-ia64/hugetlb.h
index f28a9701f1cf..da55c63728e0 100644
--- a/include/asm-ia64/hugetlb.h
+++ b/include/asm-ia64/hugetlb.h
@@ -4,11 +4,12 @@
4#include <asm/page.h> 4#include <asm/page.h>
5 5
6 6
7void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, 7void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
8 unsigned long end, unsigned long floor, 8 unsigned long end, unsigned long floor,
9 unsigned long ceiling); 9 unsigned long ceiling);
10 10
11int prepare_hugepage_range(unsigned long addr, unsigned long len); 11int prepare_hugepage_range(struct file *file,
12 unsigned long addr, unsigned long len);
12 13
13static inline int is_hugepage_only_range(struct mm_struct *mm, 14static inline int is_hugepage_only_range(struct mm_struct *mm,
14 unsigned long addr, 15 unsigned long addr,
diff --git a/include/asm-ia64/ide.h b/include/asm-ia64/ide.h
deleted file mode 100644
index 8fa3f8cd067a..000000000000
--- a/include/asm-ia64/ide.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * linux/include/asm-ia64/ide.h
3 *
4 * Copyright (C) 1994-1996 Linus Torvalds & authors
5 */
6
7/*
8 * This file contains the ia64 architecture specific IDE code.
9 */
10
11#ifndef __ASM_IA64_IDE_H
12#define __ASM_IA64_IDE_H
13
14#ifdef __KERNEL__
15
16
17#include <linux/irq.h>
18
19static inline int ide_default_irq(unsigned long base)
20{
21 switch (base) {
22 case 0x1f0: return isa_irq_to_vector(14);
23 case 0x170: return isa_irq_to_vector(15);
24 case 0x1e8: return isa_irq_to_vector(11);
25 case 0x168: return isa_irq_to_vector(10);
26 case 0x1e0: return isa_irq_to_vector(8);
27 case 0x160: return isa_irq_to_vector(12);
28 default:
29 return 0;
30 }
31}
32
33static inline unsigned long ide_default_io_base(int index)
34{
35 switch (index) {
36 case 0: return 0x1f0;
37 case 1: return 0x170;
38 case 2: return 0x1e8;
39 case 3: return 0x168;
40 case 4: return 0x1e0;
41 case 5: return 0x160;
42 default:
43 return 0;
44 }
45}
46
47#include <asm-generic/ide_iops.h>
48
49#endif /* __KERNEL__ */
50
51#endif /* __ASM_IA64_IDE_H */
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 36f39321b768..5f271bc712ee 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -40,7 +40,6 @@
40 40
41#define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT) 41#define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT)
42#define PAGE_MASK (~(PAGE_SIZE - 1)) 42#define PAGE_MASK (~(PAGE_SIZE - 1))
43#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
44 43
45#define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */ 44#define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */
46#define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT) 45#define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-ia64/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 2422ac61658a..7c60fcdd2efd 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -54,6 +54,8 @@ struct thread_info {
54 }, \ 54 }, \
55} 55}
56 56
57#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
58
57#ifndef ASM_OFFSETS_C 59#ifndef ASM_OFFSETS_C
58/* how to get the thread information struct from C */ 60/* how to get the thread information struct from C */
59#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) 61#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
index e60314716122..d535833aab5e 100644
--- a/include/asm-ia64/unistd.h
+++ b/include/asm-ia64/unistd.h
@@ -302,11 +302,17 @@
302#define __NR_timerfd_create 1310 302#define __NR_timerfd_create 1310
303#define __NR_timerfd_settime 1311 303#define __NR_timerfd_settime 1311
304#define __NR_timerfd_gettime 1312 304#define __NR_timerfd_gettime 1312
305#define __NR_signalfd4 1313
306#define __NR_eventfd2 1314
307#define __NR_epoll_create1 1315
308#define __NR_dup3 1316
309#define __NR_pipe2 1317
310#define __NR_inotify_init1 1318
305 311
306#ifdef __KERNEL__ 312#ifdef __KERNEL__
307 313
308 314
309#define NR_syscalls 289 /* length of syscall table */ 315#define NR_syscalls 295 /* length of syscall table */
310 316
311/* 317/*
312 * The following defines stop scripts/checksyscalls.sh from complaining about 318 * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/include/asm-m32r/ide.h b/include/asm-m32r/ide.h
deleted file mode 100644
index 1e7f6474d130..000000000000
--- a/include/asm-m32r/ide.h
+++ /dev/null
@@ -1,70 +0,0 @@
1#ifndef _ASM_M32R_IDE_H
2#define _ASM_M32R_IDE_H
3
4/*
5 * linux/include/asm-m32r/ide.h
6 *
7 * Copyright (C) 1994-1996 Linus Torvalds & authors
8 */
9
10/*
11 * This file contains the i386 architecture specific IDE code.
12 */
13
14#ifdef __KERNEL__
15
16#include <asm/m32r.h>
17
18#ifndef MAX_HWIFS
19# ifdef CONFIG_BLK_DEV_IDEPCI
20#define MAX_HWIFS 10
21# else
22#define MAX_HWIFS 2
23# endif
24#endif
25
26static __inline__ int ide_default_irq(unsigned long base)
27{
28 switch (base) {
29#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_MAPPI2) \
30 || defined(CONFIG_PLAT_OPSPUT)
31 case 0x1f0: return PLD_IRQ_CFIREQ;
32 default:
33 return 0;
34#elif defined(CONFIG_PLAT_MAPPI3)
35 case 0x1f0: return PLD_IRQ_CFIREQ;
36 case 0x170: return PLD_IRQ_IDEIREQ;
37 default:
38 return 0;
39#else
40 case 0x1f0: return 14;
41 case 0x170: return 15;
42 case 0x1e8: return 11;
43 case 0x168: return 10;
44 case 0x1e0: return 8;
45 case 0x160: return 12;
46 default:
47 return 0;
48#endif
49 }
50}
51
52static __inline__ unsigned long ide_default_io_base(int index)
53{
54 switch (index) {
55 case 0: return 0x1f0;
56 case 1: return 0x170;
57 case 2: return 0x1e8;
58 case 3: return 0x168;
59 case 4: return 0x1e0;
60 case 5: return 0x160;
61 default:
62 return 0;
63 }
64}
65
66#include <asm-generic/ide_iops.h>
67
68#endif /* __KERNEL__ */
69
70#endif /* _ASM_M32R_IDE_H */
diff --git a/include/asm-m32r/kvm.h b/include/asm-m32r/kvm.h
deleted file mode 100644
index 99a40515b77e..000000000000
--- a/include/asm-m32r/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_M32R_H
2#define __LINUX_KVM_M32R_H
3
4/* m32r does not support KVM */
5
6#endif
diff --git a/include/asm-m32r/page.h b/include/asm-m32r/page.h
index 8a677f3fca68..c9333089fe11 100644
--- a/include/asm-m32r/page.h
+++ b/include/asm-m32r/page.h
@@ -41,9 +41,6 @@ typedef struct page *pgtable_t;
41 41
42#endif /* !__ASSEMBLY__ */ 42#endif /* !__ASSEMBLY__ */
43 43
44/* to align the pointer to the (next) page boundary */
45#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
46
47/* 44/*
48 * This handles the memory map.. We could make this a config 45 * This handles the memory map.. We could make this a config
49 * option, but too many people screw it up, and too few need 46 * option, but too many people screw it up, and too few need
diff --git a/include/asm-m32r/semaphore.h b/include/asm-m32r/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-m32r/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-m32r/thread_info.h b/include/asm-m32r/thread_info.h
index 1effcd0f5e63..8589d462df27 100644
--- a/include/asm-m32r/thread_info.h
+++ b/include/asm-m32r/thread_info.h
@@ -94,6 +94,8 @@ static inline struct thread_info *current_thread_info(void)
94 return ti; 94 return ti;
95} 95}
96 96
97#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
98
97/* thread information allocation */ 99/* thread information allocation */
98#ifdef CONFIG_DEBUG_STACK_USAGE 100#ifdef CONFIG_DEBUG_STACK_USAGE
99#define alloc_thread_info(tsk) \ 101#define alloc_thread_info(tsk) \
diff --git a/include/asm-m68k/dvma.h b/include/asm-m68k/dvma.h
index 4fff408d0150..890bbf7e7758 100644
--- a/include/asm-m68k/dvma.h
+++ b/include/asm-m68k/dvma.h
@@ -13,7 +13,7 @@
13#define DVMA_PAGE_SHIFT 13 13#define DVMA_PAGE_SHIFT 13
14#define DVMA_PAGE_SIZE (1UL << DVMA_PAGE_SHIFT) 14#define DVMA_PAGE_SIZE (1UL << DVMA_PAGE_SHIFT)
15#define DVMA_PAGE_MASK (~(DVMA_PAGE_SIZE-1)) 15#define DVMA_PAGE_MASK (~(DVMA_PAGE_SIZE-1))
16#define DVMA_PAGE_ALIGN(addr) (((addr)+DVMA_PAGE_SIZE-1)&DVMA_PAGE_MASK) 16#define DVMA_PAGE_ALIGN(addr) ALIGN(addr, DVMA_PAGE_SIZE)
17 17
18extern void dvma_init(void); 18extern void dvma_init(void);
19extern int dvma_map_iommu(unsigned long kaddr, unsigned long baddr, 19extern int dvma_map_iommu(unsigned long kaddr, unsigned long baddr,
diff --git a/include/asm-m68k/ide.h b/include/asm-m68k/ide.h
index 909c6dfd3851..1daf6cbdd9f0 100644
--- a/include/asm-m68k/ide.h
+++ b/include/asm-m68k/ide.h
@@ -45,10 +45,6 @@
45#include <asm/macints.h> 45#include <asm/macints.h>
46#endif 46#endif
47 47
48#ifndef MAX_HWIFS
49#define MAX_HWIFS 4 /* same as the other archs */
50#endif
51
52/* 48/*
53 * Get rid of defs from io.h - ide has its private and conflicting versions 49 * Get rid of defs from io.h - ide has its private and conflicting versions
54 * Since so far no single m68k platform uses ISA/PCI I/O space for IDE, we 50 * Since so far no single m68k platform uses ISA/PCI I/O space for IDE, we
diff --git a/include/asm-m68k/kvm.h b/include/asm-m68k/kvm.h
deleted file mode 100644
index 7ed27fce5240..000000000000
--- a/include/asm-m68k/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_M68K_H
2#define __LINUX_KVM_M68K_H
3
4/* m68k does not support KVM */
5
6#endif
diff --git a/include/asm-m68k/page.h b/include/asm-m68k/page.h
index 880c2cbff8a6..a34b8bad7847 100644
--- a/include/asm-m68k/page.h
+++ b/include/asm-m68k/page.h
@@ -103,9 +103,6 @@ typedef struct page *pgtable_t;
103#define __pgd(x) ((pgd_t) { (x) } ) 103#define __pgd(x) ((pgd_t) { (x) } )
104#define __pgprot(x) ((pgprot_t) { (x) } ) 104#define __pgprot(x) ((pgprot_t) { (x) } )
105 105
106/* to align the pointer to the (next) page boundary */
107#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
108
109#endif /* !__ASSEMBLY__ */ 106#endif /* !__ASSEMBLY__ */
110 107
111#include <asm/page_offset.h> 108#include <asm/page_offset.h>
diff --git a/include/asm-m68k/semaphore.h b/include/asm-m68k/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-m68k/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-m68k/thread_info.h b/include/asm-m68k/thread_info.h
index d635a3752488..abc002798a2b 100644
--- a/include/asm-m68k/thread_info.h
+++ b/include/asm-m68k/thread_info.h
@@ -25,13 +25,7 @@ struct thread_info {
25} 25}
26 26
27/* THREAD_SIZE should be 8k, so handle differently for 4k and 8k machines */ 27/* THREAD_SIZE should be 8k, so handle differently for 4k and 8k machines */
28#if PAGE_SHIFT == 13 /* 8k machines */ 28#define THREAD_SIZE_ORDER (13 - PAGE_SHIFT)
29#define alloc_thread_info(tsk) ((struct thread_info *)__get_free_pages(GFP_KERNEL,0))
30#define free_thread_info(ti) free_pages((unsigned long)(ti),0)
31#else /* otherwise assume 4k pages */
32#define alloc_thread_info(tsk) ((struct thread_info *)__get_free_pages(GFP_KERNEL,1))
33#define free_thread_info(ti) free_pages((unsigned long)(ti),1)
34#endif /* PAGE_SHIFT == 13 */
35 29
36#define init_thread_info (init_task.thread.info) 30#define init_thread_info (init_task.thread.info)
37#define init_stack (init_thread_union.stack) 31#define init_stack (init_thread_union.stack)
diff --git a/include/asm-m68knommu/bitops.h b/include/asm-m68knommu/bitops.h
index c142fbf2f376..6f3685eab44c 100644
--- a/include/asm-m68knommu/bitops.h
+++ b/include/asm-m68knommu/bitops.h
@@ -14,8 +14,38 @@
14#error only <linux/bitops.h> can be included directly 14#error only <linux/bitops.h> can be included directly
15#endif 15#endif
16 16
17#if defined (__mcfisaaplus__) || defined (__mcfisac__)
18static inline int ffs(unsigned int val)
19{
20 if (!val)
21 return 0;
22
23 asm volatile(
24 "bitrev %0\n\t"
25 "ff1 %0\n\t"
26 : "=d" (val)
27 : "0" (val)
28 );
29 val++;
30 return val;
31}
32
33static inline int __ffs(unsigned int val)
34{
35 asm volatile(
36 "bitrev %0\n\t"
37 "ff1 %0\n\t"
38 : "=d" (val)
39 : "0" (val)
40 );
41 return val;
42}
43
44#else
17#include <asm-generic/bitops/ffs.h> 45#include <asm-generic/bitops/ffs.h>
18#include <asm-generic/bitops/__ffs.h> 46#include <asm-generic/bitops/__ffs.h>
47#endif
48
19#include <asm-generic/bitops/sched.h> 49#include <asm-generic/bitops/sched.h>
20#include <asm-generic/bitops/ffz.h> 50#include <asm-generic/bitops/ffz.h>
21 51
diff --git a/include/asm-m68knommu/byteorder.h b/include/asm-m68knommu/byteorder.h
index 8fcde907b0f9..20bb4426b610 100644
--- a/include/asm-m68knommu/byteorder.h
+++ b/include/asm-m68knommu/byteorder.h
@@ -1,13 +1,27 @@
1#ifndef _M68KNOMMU_BYTEORDER_H 1#ifndef _M68KNOMMU_BYTEORDER_H
2#define _M68KNOMMU_BYTEORDER_H 2#define _M68KNOMMU_BYTEORDER_H
3 3
4#include <asm/types.h> 4#include <linux/types.h>
5 5
6#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) 6#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
7# define __BYTEORDER_HAS_U64__ 7# define __BYTEORDER_HAS_U64__
8# define __SWAB_64_THRU_32__ 8# define __SWAB_64_THRU_32__
9#endif 9#endif
10 10
11#if defined (__mcfisaaplus__) || defined (__mcfisac__)
12static inline __attribute_const__ __u32 ___arch__swab32(__u32 val)
13{
14 asm(
15 "byterev %0"
16 : "=d" (val)
17 : "0" (val)
18 );
19 return val;
20}
21
22#define __arch__swab32(x) ___arch__swab32(x)
23#endif
24
11#include <linux/byteorder/big_endian.h> 25#include <linux/byteorder/big_endian.h>
12 26
13#endif /* _M68KNOMMU_BYTEORDER_H */ 27#endif /* _M68KNOMMU_BYTEORDER_H */
diff --git a/include/asm-m68knommu/commproc.h b/include/asm-m68knommu/commproc.h
index 36e870b468ef..edf5eb6c08d2 100644
--- a/include/asm-m68knommu/commproc.h
+++ b/include/asm-m68knommu/commproc.h
@@ -519,25 +519,6 @@ typedef struct scc_enet {
519#define SICR_ENET_CLKRT ((uint)0x00002c00) 519#define SICR_ENET_CLKRT ((uint)0x00002c00)
520#endif 520#endif
521 521
522#ifdef CONFIG_RPXCLASSIC
523/* Bits in parallel I/O port registers that have to be set/cleared
524 * to configure the pins for SCC1 use.
525 */
526#define PA_ENET_RXD ((ushort)0x0001)
527#define PA_ENET_TXD ((ushort)0x0002)
528#define PA_ENET_TCLK ((ushort)0x0200)
529#define PA_ENET_RCLK ((ushort)0x0800)
530#define PB_ENET_TENA ((uint)0x00001000)
531#define PC_ENET_CLSN ((ushort)0x0010)
532#define PC_ENET_RENA ((ushort)0x0020)
533
534/* Control bits in the SICR to route TCLK (CLK2) and RCLK (CLK4) to
535 * SCC1. Also, make sure GR1 (bit 24) and SC1 (bit 25) are zero.
536 */
537#define SICR_ENET_MASK ((uint)0x000000ff)
538#define SICR_ENET_CLKRT ((uint)0x0000003d)
539#endif
540
541/* SCC Event register as used by Ethernet. 522/* SCC Event register as used by Ethernet.
542*/ 523*/
543#define SCCE_ENET_GRA ((ushort)0x0080) /* Graceful stop complete */ 524#define SCCE_ENET_GRA ((ushort)0x0080) /* Graceful stop complete */
diff --git a/include/asm-m68knommu/kvm.h b/include/asm-m68knommu/kvm.h
deleted file mode 100644
index b49d4258dabb..000000000000
--- a/include/asm-m68knommu/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_M68KNOMMU_H
2#define __LINUX_KVM_M68KNOMMU_H
3
4/* m68knommu does not support KVM */
5
6#endif
diff --git a/include/asm-m68knommu/page.h b/include/asm-m68knommu/page.h
index 1e82ebb7d644..3a1ede4544cb 100644
--- a/include/asm-m68knommu/page.h
+++ b/include/asm-m68knommu/page.h
@@ -43,9 +43,6 @@ typedef struct page *pgtable_t;
43#define __pgd(x) ((pgd_t) { (x) } ) 43#define __pgd(x) ((pgd_t) { (x) } )
44#define __pgprot(x) ((pgprot_t) { (x) } ) 44#define __pgprot(x) ((pgprot_t) { (x) } )
45 45
46/* to align the pointer to the (next) page boundary */
47#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
48
49extern unsigned long memory_start; 46extern unsigned long memory_start;
50extern unsigned long memory_end; 47extern unsigned long memory_end;
51 48
diff --git a/include/asm-m68knommu/ptrace.h b/include/asm-m68knommu/ptrace.h
index 47258e86e8c4..8c9194b98548 100644
--- a/include/asm-m68knommu/ptrace.h
+++ b/include/asm-m68knommu/ptrace.h
@@ -68,10 +68,8 @@ struct switch_stack {
68/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ 68/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
69#define PTRACE_GETREGS 12 69#define PTRACE_GETREGS 12
70#define PTRACE_SETREGS 13 70#define PTRACE_SETREGS 13
71#ifdef CONFIG_FPU
72#define PTRACE_GETFPREGS 14 71#define PTRACE_GETFPREGS 14
73#define PTRACE_SETFPREGS 15 72#define PTRACE_SETFPREGS 15
74#endif
75 73
76#ifdef __KERNEL__ 74#ifdef __KERNEL__
77 75
diff --git a/include/asm-m68knommu/semaphore.h b/include/asm-m68knommu/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-m68knommu/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-m68knommu/system.h b/include/asm-m68knommu/system.h
index 64c64432bbb8..40f49de69821 100644
--- a/include/asm-m68knommu/system.h
+++ b/include/asm-m68knommu/system.h
@@ -118,6 +118,8 @@ asmlinkage void resume(void);
118#define smp_read_barrier_depends() do { } while(0) 118#define smp_read_barrier_depends() do { } while(0)
119#endif 119#endif
120 120
121#define read_barrier_depends() ((void)0)
122
121#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 123#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
122 124
123struct __xchg_dummy { unsigned long a[100]; }; 125struct __xchg_dummy { unsigned long a[100]; };
@@ -310,4 +312,13 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
310#endif 312#endif
311#define arch_align_stack(x) (x) 313#define arch_align_stack(x) (x)
312 314
315
316static inline int irqs_disabled_flags(unsigned long flags)
317{
318 if (flags & 0x0700)
319 return 0;
320 else
321 return 1;
322}
323
313#endif /* _M68KNOMMU_SYSTEM_H */ 324#endif /* _M68KNOMMU_SYSTEM_H */
diff --git a/include/asm-m68knommu/thread_info.h b/include/asm-m68knommu/thread_info.h
index 95996d978bed..0c9bc095f3f0 100644
--- a/include/asm-m68knommu/thread_info.h
+++ b/include/asm-m68knommu/thread_info.h
@@ -71,10 +71,6 @@ static inline struct thread_info *current_thread_info(void)
71 return ti; 71 return ti;
72} 72}
73 73
74/* thread information allocation */
75#define alloc_thread_info(tsk) ((struct thread_info *) \
76 __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER))
77#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_SIZE_ORDER)
78#endif /* __ASSEMBLY__ */ 74#endif /* __ASSEMBLY__ */
79 75
80#define PREEMPT_ACTIVE 0x4000000 76#define PREEMPT_ACTIVE 0x4000000
diff --git a/include/asm-mips/kvm.h b/include/asm-mips/kvm.h
deleted file mode 100644
index 093a5b7f796b..000000000000
--- a/include/asm-mips/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_MIPS_H
2#define __LINUX_KVM_MIPS_H
3
4/* mips does not support KVM */
5
6#endif
diff --git a/include/asm-mips/mach-au1x00/au1550_spi.h b/include/asm-mips/mach-au1x00/au1550_spi.h
index 40e6c489833a..08e1958e9410 100644
--- a/include/asm-mips/mach-au1x00/au1550_spi.h
+++ b/include/asm-mips/mach-au1x00/au1550_spi.h
@@ -6,7 +6,6 @@
6#define _AU1550_SPI_H_ 6#define _AU1550_SPI_H_
7 7
8struct au1550_spi_info { 8struct au1550_spi_info {
9 s16 bus_num; /* defines which PSC and IRQ to use */
10 u32 mainclk_hz; /* main input clock frequency of PSC */ 9 u32 mainclk_hz; /* main input clock frequency of PSC */
11 u16 num_chipselect; /* number of chipselects supported */ 10 u16 num_chipselect; /* number of chipselects supported */
12 void (*activate_cs)(struct au1550_spi_info *spi, int cs, int polarity); 11 void (*activate_cs)(struct au1550_spi_info *spi, int cs, int polarity);
diff --git a/include/asm-mips/mach-generic/gpio.h b/include/asm-mips/mach-generic/gpio.h
index e6b376bd9d06..b4e70208da64 100644
--- a/include/asm-mips/mach-generic/gpio.h
+++ b/include/asm-mips/mach-generic/gpio.h
@@ -1,7 +1,7 @@
1#ifndef __ASM_MACH_GENERIC_GPIO_H 1#ifndef __ASM_MACH_GENERIC_GPIO_H
2#define __ASM_MACH_GENERIC_GPIO_H 2#define __ASM_MACH_GENERIC_GPIO_H
3 3
4#ifdef CONFIG_HAVE_GPIO_LIB 4#ifdef CONFIG_GPIOLIB
5#define gpio_get_value __gpio_get_value 5#define gpio_get_value __gpio_get_value
6#define gpio_set_value __gpio_set_value 6#define gpio_set_value __gpio_set_value
7#define gpio_cansleep __gpio_cansleep 7#define gpio_cansleep __gpio_cansleep
diff --git a/include/asm-mips/mach-generic/ide.h b/include/asm-mips/mach-generic/ide.h
index 0f6c251f5fec..73008f7bdc93 100644
--- a/include/asm-mips/mach-generic/ide.h
+++ b/include/asm-mips/mach-generic/ide.h
@@ -19,14 +19,6 @@
19#include <linux/stddef.h> 19#include <linux/stddef.h>
20#include <asm/processor.h> 20#include <asm/processor.h>
21 21
22#ifndef MAX_HWIFS
23# ifdef CONFIG_BLK_DEV_IDEPCI
24#define MAX_HWIFS 10
25# else
26#define MAX_HWIFS 6
27# endif
28#endif
29
30static __inline__ int ide_probe_legacy(void) 22static __inline__ int ide_probe_legacy(void)
31{ 23{
32#ifdef CONFIG_PCI 24#ifdef CONFIG_PCI
@@ -56,46 +48,6 @@ found:
56#endif 48#endif
57} 49}
58 50
59static __inline__ int ide_default_irq(unsigned long base)
60{
61 switch (base) {
62 case 0x1f0: return 14;
63 case 0x170: return 15;
64 case 0x1e8: return 11;
65 case 0x168: return 10;
66 case 0x1e0: return 8;
67 case 0x160: return 12;
68 default:
69 return 0;
70 }
71}
72
73static __inline__ unsigned long ide_default_io_base(int index)
74{
75 if (!ide_probe_legacy())
76 return 0;
77 /*
78 * If PCI is present then it is not safe to poke around
79 * the other legacy IDE ports. Only 0x1f0 and 0x170 are
80 * defined compatibility mode ports for PCI. A user can
81 * override this using ide= but we must default safe.
82 */
83 if (no_pci_devices()) {
84 switch (index) {
85 case 2: return 0x1e8;
86 case 3: return 0x168;
87 case 4: return 0x1e0;
88 case 5: return 0x160;
89 }
90 }
91 switch (index) {
92 case 0: return 0x1f0;
93 case 1: return 0x170;
94 default:
95 return 0;
96 }
97}
98
99/* MIPS port and memory-mapped I/O string operations. */ 51/* MIPS port and memory-mapped I/O string operations. */
100static inline void __ide_flush_prologue(void) 52static inline void __ide_flush_prologue(void)
101{ 53{
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index 494f00ba9541..fe7a88ea066e 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -137,9 +137,6 @@ typedef struct { unsigned long pgprot; } pgprot_t;
137 137
138#endif /* !__ASSEMBLY__ */ 138#endif /* !__ASSEMBLY__ */
139 139
140/* to align the pointer to the (next) page boundary */
141#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
142
143/* 140/*
144 * __pa()/__va() should be used only during mem init. 141 * __pa()/__va() should be used only during mem init.
145 */ 142 */
diff --git a/include/asm-mips/processor.h b/include/asm-mips/processor.h
index 58cbac5a64e4..a1e4453469f9 100644
--- a/include/asm-mips/processor.h
+++ b/include/asm-mips/processor.h
@@ -45,7 +45,7 @@ extern unsigned int vced_count, vcei_count;
45 * This decides where the kernel will search for a free chunk of vm 45 * This decides where the kernel will search for a free chunk of vm
46 * space during mmap's. 46 * space during mmap's.
47 */ 47 */
48#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) 48#define TASK_UNMAPPED_BASE ((TASK_SIZE / 3) & ~(PAGE_SIZE))
49#endif 49#endif
50 50
51#ifdef CONFIG_64BIT 51#ifdef CONFIG_64BIT
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-mips/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-mips/socket.h b/include/asm-mips/socket.h
index 63f60254d308..facc2d7a87ca 100644
--- a/include/asm-mips/socket.h
+++ b/include/asm-mips/socket.h
@@ -102,6 +102,13 @@ enum sock_type {
102}; 102};
103 103
104#define SOCK_MAX (SOCK_PACKET + 1) 104#define SOCK_MAX (SOCK_PACKET + 1)
105/* Mask which covers at least up to SOCK_MASK-1. The
106 * * remaining bits are used as flags. */
107#define SOCK_TYPE_MASK 0xf
108
109/* Flags for socket, socketpair, paccept */
110#define SOCK_CLOEXEC O_CLOEXEC
111#define SOCK_NONBLOCK O_NONBLOCK
105 112
106#define ARCH_HAS_SOCKET_TYPES 1 113#define ARCH_HAS_SOCKET_TYPES 1
107 114
diff --git a/include/asm-mips/thread_info.h b/include/asm-mips/thread_info.h
index b2772df1a1bd..bb3060699df2 100644
--- a/include/asm-mips/thread_info.h
+++ b/include/asm-mips/thread_info.h
@@ -82,6 +82,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
82#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 82#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
83#define THREAD_MASK (THREAD_SIZE - 1UL) 83#define THREAD_MASK (THREAD_SIZE - 1UL)
84 84
85#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
86
85#ifdef CONFIG_DEBUG_STACK_USAGE 87#ifdef CONFIG_DEBUG_STACK_USAGE
86#define alloc_thread_info(tsk) \ 88#define alloc_thread_info(tsk) \
87({ \ 89({ \
diff --git a/include/asm-mn10300/ide.h b/include/asm-mn10300/ide.h
index dc235121ec42..6adcdd92e83d 100644
--- a/include/asm-mn10300/ide.h
+++ b/include/asm-mn10300/ide.h
@@ -23,10 +23,6 @@
23#undef SUPPORT_VLB_SYNC 23#undef SUPPORT_VLB_SYNC
24#define SUPPORT_VLB_SYNC 0 24#define SUPPORT_VLB_SYNC 0
25 25
26#ifndef MAX_HWIFS
27#define MAX_HWIFS 8
28#endif
29
30/* 26/*
31 * some bits needed for parts of the IDE subsystem to compile 27 * some bits needed for parts of the IDE subsystem to compile
32 */ 28 */
diff --git a/include/asm-mn10300/kvm.h b/include/asm-mn10300/kvm.h
deleted file mode 100644
index f6b609ff4a57..000000000000
--- a/include/asm-mn10300/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_MN10300_H
2#define __LINUX_KVM_MN10300_H
3
4/* mn10300 does not support KVM */
5
6#endif
diff --git a/include/asm-mn10300/page.h b/include/asm-mn10300/page.h
index 124971b9fb9b..8288e124165b 100644
--- a/include/asm-mn10300/page.h
+++ b/include/asm-mn10300/page.h
@@ -61,9 +61,6 @@ typedef struct page *pgtable_t;
61 61
62#endif /* !__ASSEMBLY__ */ 62#endif /* !__ASSEMBLY__ */
63 63
64/* to align the pointer to the (next) page boundary */
65#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
66
67/* 64/*
68 * This handles the memory map.. We could make this a config 65 * This handles the memory map.. We could make this a config
69 * option, but too many people screw it up, and too few need 66 * option, but too many people screw it up, and too few need
diff --git a/include/asm-mn10300/pci.h b/include/asm-mn10300/pci.h
index 205192c52bb5..cd9cc5c89cea 100644
--- a/include/asm-mn10300/pci.h
+++ b/include/asm-mn10300/pci.h
@@ -74,15 +74,6 @@ struct pci_dev;
74/* This is always fine. */ 74/* This is always fine. */
75#define pci_dac_dma_supported(pci_dev, mask) (0) 75#define pci_dac_dma_supported(pci_dev, mask) (0)
76 76
77/*
78 * These macros should be used after a pci_map_sg call has been done
79 * to get bus addresses of each of the SG entries and their lengths.
80 * You should only work with the number of sg entries pci_map_sg
81 * returns.
82 */
83#define sg_dma_address(sg) ((sg)->dma_address)
84#define sg_dma_len(sg) ((sg)->length)
85
86/* Return the index of the PCI controller for device. */ 77/* Return the index of the PCI controller for device. */
87static inline int pci_controller_num(struct pci_dev *dev) 78static inline int pci_controller_num(struct pci_dev *dev)
88{ 79{
diff --git a/include/asm-mn10300/ptrace.h b/include/asm-mn10300/ptrace.h
index b3684689fcce..7b06cc623d8b 100644
--- a/include/asm-mn10300/ptrace.h
+++ b/include/asm-mn10300/ptrace.h
@@ -88,12 +88,16 @@ extern struct pt_regs *__frame; /* current frame pointer */
88/* options set using PTRACE_SETOPTIONS */ 88/* options set using PTRACE_SETOPTIONS */
89#define PTRACE_O_TRACESYSGOOD 0x00000001 89#define PTRACE_O_TRACESYSGOOD 0x00000001
90 90
91#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 91#if defined(__KERNEL__)
92
93#if !defined(__ASSEMBLY__)
92#define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL) 94#define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
93#define instruction_pointer(regs) ((regs)->pc) 95#define instruction_pointer(regs) ((regs)->pc)
94extern void show_regs(struct pt_regs *); 96extern void show_regs(struct pt_regs *);
95#endif 97#endif /* !__ASSEMBLY */
96 98
97#define profile_pc(regs) ((regs)->pc) 99#define profile_pc(regs) ((regs)->pc)
98 100
101#endif /* __KERNEL__ */
102
99#endif /* _ASM_PTRACE_H */ 103#endif /* _ASM_PTRACE_H */
diff --git a/include/asm-mn10300/scatterlist.h b/include/asm-mn10300/scatterlist.h
index e29d91dbcf2b..67535901b9ff 100644
--- a/include/asm-mn10300/scatterlist.h
+++ b/include/asm-mn10300/scatterlist.h
@@ -43,4 +43,13 @@ struct scatterlist {
43 43
44#define ISA_DMA_THRESHOLD (0x00ffffff) 44#define ISA_DMA_THRESHOLD (0x00ffffff)
45 45
46/*
47 * These macros should be used after a pci_map_sg call has been done
48 * to get bus addresses of each of the SG entries and their lengths.
49 * You should only work with the number of sg entries pci_map_sg
50 * returns.
51 */
52#define sg_dma_address(sg) ((sg)->dma_address)
53#define sg_dma_len(sg) ((sg)->length)
54
46#endif /* _ASM_SCATTERLIST_H */ 55#endif /* _ASM_SCATTERLIST_H */
diff --git a/include/asm-mn10300/semaphore.h b/include/asm-mn10300/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-mn10300/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-mn10300/thread_info.h b/include/asm-mn10300/thread_info.h
index e397e7192785..78a3881f3c12 100644
--- a/include/asm-mn10300/thread_info.h
+++ b/include/asm-mn10300/thread_info.h
@@ -112,6 +112,8 @@ static inline unsigned long current_stack_pointer(void)
112 return sp; 112 return sp;
113} 113}
114 114
115#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
116
115/* thread information allocation */ 117/* thread information allocation */
116#ifdef CONFIG_DEBUG_STACK_USAGE 118#ifdef CONFIG_DEBUG_STACK_USAGE
117#define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL) 119#define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL)
diff --git a/include/asm-parisc/ide.h b/include/asm-parisc/ide.h
index db0c94410095..c246ef75017d 100644
--- a/include/asm-parisc/ide.h
+++ b/include/asm-parisc/ide.h
@@ -13,10 +13,6 @@
13 13
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15 15
16#ifndef MAX_HWIFS
17#define MAX_HWIFS 2
18#endif
19
20#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id)) 16#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id))
21#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id)) 17#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id))
22#define ide_request_region(from,extent,name) request_region((from), (extent), (name)) 18#define ide_request_region(from,extent,name) request_region((from), (extent), (name))
diff --git a/include/asm-parisc/kvm.h b/include/asm-parisc/kvm.h
deleted file mode 100644
index 00cc45812547..000000000000
--- a/include/asm-parisc/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_PARISC_H
2#define __LINUX_KVM_PARISC_H
3
4/* parisc does not support KVM */
5
6#endif
diff --git a/include/asm-parisc/page.h b/include/asm-parisc/page.h
index 27d50b859541..c3941f09a878 100644
--- a/include/asm-parisc/page.h
+++ b/include/asm-parisc/page.h
@@ -119,10 +119,6 @@ extern int npmem_ranges;
119#define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY) 119#define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY)
120#define PTE_ENTRY_SIZE (1UL << BITS_PER_PTE_ENTRY) 120#define PTE_ENTRY_SIZE (1UL << BITS_PER_PTE_ENTRY)
121 121
122/* to align the pointer to the (next) page boundary */
123#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
124
125
126#define LINUX_GATEWAY_SPACE 0 122#define LINUX_GATEWAY_SPACE 0
127 123
128/* This governs the relationship between virtual and physical addresses. 124/* This governs the relationship between virtual and physical addresses.
diff --git a/include/asm-parisc/ptrace.h b/include/asm-parisc/ptrace.h
index 93f990e418f1..3e94c5d85ff5 100644
--- a/include/asm-parisc/ptrace.h
+++ b/include/asm-parisc/ptrace.h
@@ -33,7 +33,6 @@ struct pt_regs {
33 unsigned long ipsw; /* CR22 */ 33 unsigned long ipsw; /* CR22 */
34}; 34};
35 35
36#define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS))
37/* 36/*
38 * The numbers chosen here are somewhat arbitrary but absolutely MUST 37 * The numbers chosen here are somewhat arbitrary but absolutely MUST
39 * not overlap with any of the number assigned in <linux/ptrace.h>. 38 * not overlap with any of the number assigned in <linux/ptrace.h>.
@@ -43,8 +42,11 @@ struct pt_regs {
43 * since we have taken branch traps too) 42 * since we have taken branch traps too)
44 */ 43 */
45#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */ 44#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
45
46#ifdef __KERNEL__ 46#ifdef __KERNEL__
47 47
48#define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS))
49
48/* XXX should we use iaoq[1] or iaoq[0] ? */ 50/* XXX should we use iaoq[1] or iaoq[0] ? */
49#define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) 51#define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0)
50#define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) 52#define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0)
diff --git a/include/asm-parisc/semaphore.h b/include/asm-parisc/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-parisc/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-parisc/socket.h b/include/asm-parisc/socket.h
index 69a7a0d30b02..fba402c95ac2 100644
--- a/include/asm-parisc/socket.h
+++ b/include/asm-parisc/socket.h
@@ -54,4 +54,9 @@
54 54
55#define SO_MARK 0x401f 55#define SO_MARK 0x401f
56 56
57/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
58 * have to define SOCK_NONBLOCK to a different value here.
59 */
60#define SOCK_NONBLOCK 0x40000000
61
57#endif /* _ASM_SOCKET_H */ 62#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-parisc/thread_info.h b/include/asm-parisc/thread_info.h
index 2d9c7500867b..9f812741c355 100644
--- a/include/asm-parisc/thread_info.h
+++ b/include/asm-parisc/thread_info.h
@@ -34,15 +34,11 @@ struct thread_info {
34 34
35/* thread information allocation */ 35/* thread information allocation */
36 36
37#define THREAD_ORDER 2 37#define THREAD_SIZE_ORDER 2
38/* Be sure to hunt all references to this down when you change the size of 38/* Be sure to hunt all references to this down when you change the size of
39 * the kernel stack */ 39 * the kernel stack */
40#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 40#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
41#define THREAD_SHIFT (PAGE_SHIFT + THREAD_ORDER) 41#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
42
43#define alloc_thread_info(tsk) ((struct thread_info *) \
44 __get_free_pages(GFP_KERNEL, THREAD_ORDER))
45#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
46 42
47/* how to get the thread information struct from C */ 43/* how to get the thread information struct from C */
48#define current_thread_info() ((struct thread_info *)mfctl(30)) 44#define current_thread_info() ((struct thread_info *)mfctl(30))
diff --git a/include/asm-powerpc/Kbuild b/include/asm-powerpc/Kbuild
index 04ce8f8a2ee7..5ab7d7fe198c 100644
--- a/include/asm-powerpc/Kbuild
+++ b/include/asm-powerpc/Kbuild
@@ -29,7 +29,6 @@ unifdef-y += elf.h
29unifdef-y += nvram.h 29unifdef-y += nvram.h
30unifdef-y += param.h 30unifdef-y += param.h
31unifdef-y += posix_types.h 31unifdef-y += posix_types.h
32unifdef-y += ptrace.h
33unifdef-y += seccomp.h 32unifdef-y += seccomp.h
34unifdef-y += signal.h 33unifdef-y += signal.h
35unifdef-y += spu_info.h 34unifdef-y += spu_info.h
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 2a3e9075a5a0..ef8a248dfd55 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -127,6 +127,8 @@ extern struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr);
127extern void do_feature_fixups(unsigned long value, void *fixup_start, 127extern void do_feature_fixups(unsigned long value, void *fixup_start,
128 void *fixup_end); 128 void *fixup_end);
129 129
130extern const char *powerpc_base_platform;
131
130#endif /* __ASSEMBLY__ */ 132#endif /* __ASSEMBLY__ */
131 133
132/* CPU kernel features */ 134/* CPU kernel features */
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index 89664675b469..80d1f399ee51 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -217,6 +217,14 @@ typedef elf_vrregset_t elf_fpxregset_t;
217 217
218#define ELF_PLATFORM (cur_cpu_spec->platform) 218#define ELF_PLATFORM (cur_cpu_spec->platform)
219 219
220/* While ELF_PLATFORM indicates the ISA supported by the platform, it
221 * may not accurately reflect the underlying behavior of the hardware
222 * (as in the case of running in Power5+ compatibility mode on a
223 * Power6 machine). ELF_BASE_PLATFORM allows ld.so to load libraries
224 * that are tuned for the real hardware.
225 */
226#define ELF_BASE_PLATFORM (powerpc_base_platform)
227
220#ifdef __powerpc64__ 228#ifdef __powerpc64__
221# define ELF_PLAT_INIT(_r, load_addr) do { \ 229# define ELF_PLAT_INIT(_r, load_addr) do { \
222 _r->gpr[2] = load_addr; \ 230 _r->gpr[2] = load_addr; \
diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h
index ef328995ba9d..3a179827528d 100644
--- a/include/asm-powerpc/firmware.h
+++ b/include/asm-powerpc/firmware.h
@@ -46,6 +46,7 @@
46#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000) 46#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000)
47#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000) 47#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000)
48#define FW_FEATURE_BULK_REMOVE ASM_CONST(0x0000000002000000) 48#define FW_FEATURE_BULK_REMOVE ASM_CONST(0x0000000002000000)
49#define FW_FEATURE_CMO ASM_CONST(0x0000000004000000)
49 50
50#ifndef __ASSEMBLY__ 51#ifndef __ASSEMBLY__
51 52
@@ -58,7 +59,7 @@ enum {
58 FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ | 59 FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ |
59 FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN | 60 FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
60 FW_FEATURE_BULK | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE | 61 FW_FEATURE_BULK | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE |
61 FW_FEATURE_SPLPAR | FW_FEATURE_LPAR, 62 FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | FW_FEATURE_CMO,
62 FW_FEATURE_PSERIES_ALWAYS = 0, 63 FW_FEATURE_PSERIES_ALWAYS = 0,
63 FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, 64 FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
64 FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, 65 FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
diff --git a/include/asm-powerpc/gpio.h b/include/asm-powerpc/gpio.h
index 77ad3a890f30..ea04632399d8 100644
--- a/include/asm-powerpc/gpio.h
+++ b/include/asm-powerpc/gpio.h
@@ -17,7 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <asm-generic/gpio.h> 18#include <asm-generic/gpio.h>
19 19
20#ifdef CONFIG_HAVE_GPIO_LIB 20#ifdef CONFIG_GPIOLIB
21 21
22/* 22/*
23 * We don't (yet) implement inlined/rapid versions for on-chip gpios. 23 * We don't (yet) implement inlined/rapid versions for on-chip gpios.
@@ -51,6 +51,6 @@ static inline int irq_to_gpio(unsigned int irq)
51 return -EINVAL; 51 return -EINVAL;
52} 52}
53 53
54#endif /* CONFIG_HAVE_GPIO_LIB */ 54#endif /* CONFIG_GPIOLIB */
55 55
56#endif /* __ASM_POWERPC_GPIO_H */ 56#endif /* __ASM_POWERPC_GPIO_H */
diff --git a/include/asm-powerpc/hugetlb.h b/include/asm-powerpc/hugetlb.h
index be32ff02f4a0..26f0d0ab27a5 100644
--- a/include/asm-powerpc/hugetlb.h
+++ b/include/asm-powerpc/hugetlb.h
@@ -7,7 +7,7 @@
7int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, 7int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
8 unsigned long len); 8 unsigned long len);
9 9
10void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, 10void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
11 unsigned long end, unsigned long floor, 11 unsigned long end, unsigned long floor,
12 unsigned long ceiling); 12 unsigned long ceiling);
13 13
@@ -21,11 +21,13 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
21 * If the arch doesn't supply something else, assume that hugepage 21 * If the arch doesn't supply something else, assume that hugepage
22 * size aligned regions are ok without further preparation. 22 * size aligned regions are ok without further preparation.
23 */ 23 */
24static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) 24static inline int prepare_hugepage_range(struct file *file,
25 unsigned long addr, unsigned long len)
25{ 26{
26 if (len & ~HPAGE_MASK) 27 struct hstate *h = hstate_file(file);
28 if (len & ~huge_page_mask(h))
27 return -EINVAL; 29 return -EINVAL;
28 if (addr & ~HPAGE_MASK) 30 if (addr & ~huge_page_mask(h))
29 return -EINVAL; 31 return -EINVAL;
30 return 0; 32 return 0;
31} 33}
diff --git a/include/asm-powerpc/hvcall.h b/include/asm-powerpc/hvcall.h
index bf6cd7cb996c..fbe2932fa9e9 100644
--- a/include/asm-powerpc/hvcall.h
+++ b/include/asm-powerpc/hvcall.h
@@ -92,6 +92,11 @@
92#define H_EXACT (1UL<<(63-24)) /* Use exact PTE or return H_PTEG_FULL */ 92#define H_EXACT (1UL<<(63-24)) /* Use exact PTE or return H_PTEG_FULL */
93#define H_R_XLATE (1UL<<(63-25)) /* include a valid logical page num in the pte if the valid bit is set */ 93#define H_R_XLATE (1UL<<(63-25)) /* include a valid logical page num in the pte if the valid bit is set */
94#define H_READ_4 (1UL<<(63-26)) /* Return 4 PTEs */ 94#define H_READ_4 (1UL<<(63-26)) /* Return 4 PTEs */
95#define H_PAGE_STATE_CHANGE (1UL<<(63-28))
96#define H_PAGE_UNUSED ((1UL<<(63-29)) | (1UL<<(63-30)))
97#define H_PAGE_SET_UNUSED (H_PAGE_STATE_CHANGE | H_PAGE_UNUSED)
98#define H_PAGE_SET_LOANED (H_PAGE_SET_UNUSED | (1UL<<(63-31)))
99#define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE
95#define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */ 100#define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */
96#define H_ANDCOND (1UL<<(63-33)) 101#define H_ANDCOND (1UL<<(63-33))
97#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */ 102#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */
@@ -210,7 +215,9 @@
210#define H_JOIN 0x298 215#define H_JOIN 0x298
211#define H_VASI_STATE 0x2A4 216#define H_VASI_STATE 0x2A4
212#define H_ENABLE_CRQ 0x2B0 217#define H_ENABLE_CRQ 0x2B0
213#define MAX_HCALL_OPCODE H_ENABLE_CRQ 218#define H_SET_MPP 0x2D0
219#define H_GET_MPP 0x2D4
220#define MAX_HCALL_OPCODE H_GET_MPP
214 221
215#ifndef __ASSEMBLY__ 222#ifndef __ASSEMBLY__
216 223
@@ -270,6 +277,20 @@ struct hcall_stats {
270}; 277};
271#define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1) 278#define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1)
272 279
280struct hvcall_mpp_data {
281 unsigned long entitled_mem;
282 unsigned long mapped_mem;
283 unsigned short group_num;
284 unsigned short pool_num;
285 unsigned char mem_weight;
286 unsigned char unallocated_mem_weight;
287 unsigned long unallocated_entitlement; /* value in bytes */
288 unsigned long pool_size;
289 signed long loan_request;
290 unsigned long backing_mem;
291};
292
293int h_get_mpp(struct hvcall_mpp_data *);
273#endif /* __ASSEMBLY__ */ 294#endif /* __ASSEMBLY__ */
274#endif /* __KERNEL__ */ 295#endif /* __KERNEL__ */
275#endif /* _ASM_POWERPC_HVCALL_H */ 296#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/include/asm-powerpc/ide.h b/include/asm-powerpc/ide.h
index 3d90bf7d3d73..1aaf27be8741 100644
--- a/include/asm-powerpc/ide.h
+++ b/include/asm-powerpc/ide.h
@@ -14,14 +14,6 @@
14#endif 14#endif
15#include <asm/io.h> 15#include <asm/io.h>
16 16
17#ifndef MAX_HWIFS
18#ifdef __powerpc64__
19#define MAX_HWIFS 10
20#else
21#define MAX_HWIFS 8
22#endif
23#endif
24
25#define __ide_mm_insw(p, a, c) readsw((void __iomem *)(p), (a), (c)) 17#define __ide_mm_insw(p, a, c) readsw((void __iomem *)(p), (a), (c))
26#define __ide_mm_insl(p, a, c) readsl((void __iomem *)(p), (a), (c)) 18#define __ide_mm_insl(p, a, c) readsl((void __iomem *)(p), (a), (c))
27#define __ide_mm_outsw(p, a, c) writesw((void __iomem *)(p), (a), (c)) 19#define __ide_mm_outsw(p, a, c) writesw((void __iomem *)(p), (a), (c))
@@ -40,16 +32,6 @@ static __inline__ int ide_default_irq(unsigned long base)
40 case 0x170: return 15; 32 case 0x170: return 15;
41 } 33 }
42#endif 34#endif
43#ifdef CONFIG_PPC_PREP
44 switch (base) {
45 case 0x1f0: return 13;
46 case 0x170: return 13;
47 case 0x1e8: return 11;
48 case 0x168: return 10;
49 case 0xfff0: return 14; /* MCP(N)750 ide0 */
50 case 0xffe0: return 15; /* MCP(N)750 ide1 */
51 }
52#endif
53 return 0; 35 return 0;
54} 36}
55 37
@@ -62,14 +44,6 @@ static __inline__ unsigned long ide_default_io_base(int index)
62 case 1: return 0x170; 44 case 1: return 0x170;
63 } 45 }
64#endif 46#endif
65#ifdef CONFIG_PPC_PREP
66 switch (index) {
67 case 0: return 0x1f0;
68 case 1: return 0x170;
69 case 2: return 0x1e8;
70 case 3: return 0x168;
71 }
72#endif
73 return 0; 47 return 0;
74} 48}
75 49
diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h
index 8b627823f5f9..77c7fa025e65 100644
--- a/include/asm-powerpc/io.h
+++ b/include/asm-powerpc/io.h
@@ -617,7 +617,8 @@ static inline void iosync(void)
617 * and can be hooked by the platform via ppc_md 617 * and can be hooked by the platform via ppc_md
618 * 618 *
619 * * ioremap_flags allows to specify the page flags as an argument and can 619 * * ioremap_flags allows to specify the page flags as an argument and can
620 * also be hooked by the platform via ppc_md 620 * also be hooked by the platform via ppc_md. ioremap_prot is the exact
621 * same thing as ioremap_flags.
621 * 622 *
622 * * ioremap_nocache is identical to ioremap 623 * * ioremap_nocache is identical to ioremap
623 * 624 *
@@ -639,6 +640,8 @@ extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
639extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size, 640extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size,
640 unsigned long flags); 641 unsigned long flags);
641#define ioremap_nocache(addr, size) ioremap((addr), (size)) 642#define ioremap_nocache(addr, size) ioremap((addr), (size))
643#define ioremap_prot(addr, size, prot) ioremap_flags((addr), (size), (prot))
644
642extern void iounmap(volatile void __iomem *addr); 645extern void iounmap(volatile void __iomem *addr);
643 646
644extern void __iomem *__ioremap(phys_addr_t, unsigned long size, 647extern void __iomem *__ioremap(phys_addr_t, unsigned long size,
diff --git a/include/asm-powerpc/kgdb.h b/include/asm-powerpc/kgdb.h
index b617dac82969..1399caf719ae 100644
--- a/include/asm-powerpc/kgdb.h
+++ b/include/asm-powerpc/kgdb.h
@@ -1,57 +1,65 @@
1/* 1/*
2 * kgdb.h: Defines and declarations for serial line source level 2 * include/asm-powerpc/kgdb.h
3 * remote debugging of the Linux kernel using gdb.
4 * 3 *
4 * The PowerPC (32/64) specific defines / externs for KGDB. Based on
5 * the previous 32bit and 64bit specific files, which had the following
6 * copyrights:
7 *
8 * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com)
9 * PPC Mods (C) 2004 Tom Rini (trini@mvista.com)
10 * PPC Mods (C) 2003 John Whitney (john.whitney@timesys.com)
5 * PPC Mods (C) 1998 Michael Tesch (tesch@cs.wisc.edu) 11 * PPC Mods (C) 1998 Michael Tesch (tesch@cs.wisc.edu)
6 * 12 *
13 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 14 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
15 * Author: Tom Rini <trini@kernel.crashing.org>
16 *
17 * 2006 (c) MontaVista Software, Inc. This file is licensed under
18 * the terms of the GNU General Public License version 2. This program
19 * is licensed "as is" without any warranty of any kind, whether express
20 * or implied.
8 */ 21 */
9#ifdef __KERNEL__ 22#ifdef __KERNEL__
10#ifndef _PPC_KGDB_H 23#ifndef __POWERPC_KGDB_H__
11#define _PPC_KGDB_H 24#define __POWERPC_KGDB_H__
12 25
13#ifndef __ASSEMBLY__ 26#ifndef __ASSEMBLY__
14 27
15/* Things specific to the gen550 backend. */ 28#define BREAK_INSTR_SIZE 4
16struct uart_port; 29#define BUFMAX ((NUMREGBYTES * 2) + 512)
17 30#define OUTBUFMAX ((NUMREGBYTES * 2) + 512)
18extern void gen550_progress(char *, unsigned short); 31static inline void arch_kgdb_breakpoint(void)
19extern void gen550_kgdb_map_scc(void); 32{
20extern void gen550_init(int, struct uart_port *); 33 asm(".long 0x7d821008"); /* twge r2, r2 */
21 34}
22/* Things specific to the pmac backend. */ 35#define CACHE_FLUSH_IS_SAFE 1
23extern void zs_kgdb_hook(int tty_num);
24
25/* To init the kgdb engine. (called by serial hook)*/
26extern void set_debug_traps(void);
27
28/* To enter the debugger explicitly. */
29extern void breakpoint(void);
30
31/* For taking exceptions
32 * these are defined in traps.c
33 */
34extern int (*debugger)(struct pt_regs *regs);
35extern int (*debugger_bpt)(struct pt_regs *regs);
36extern int (*debugger_sstep)(struct pt_regs *regs);
37extern int (*debugger_iabr_match)(struct pt_regs *regs);
38extern int (*debugger_dabr_match)(struct pt_regs *regs);
39extern void (*debugger_fault_handler)(struct pt_regs *regs);
40
41/* What we bring to the party */
42int kgdb_bpt(struct pt_regs *regs);
43int kgdb_sstep(struct pt_regs *regs);
44void kgdb(struct pt_regs *regs);
45int kgdb_iabr_match(struct pt_regs *regs);
46int kgdb_dabr_match(struct pt_regs *regs);
47 36
37/* The number bytes of registers we have to save depends on a few
38 * things. For 64bit we default to not including vector registers and
39 * vector state registers. */
40#ifdef CONFIG_PPC64
48/* 41/*
49 * external low-level support routines (ie macserial.c) 42 * 64 bit (8 byte) registers:
43 * 32 gpr, 32 fpr, nip, msr, link, ctr
44 * 32 bit (4 byte) registers:
45 * ccr, xer, fpscr
50 */ 46 */
51extern void kgdb_interruptible(int); /* control interrupts from serial */ 47#define NUMREGBYTES ((68 * 8) + (3 * 4))
52extern void putDebugChar(char); /* write a single character */ 48#define NUMCRITREGBYTES 184
53extern char getDebugChar(void); /* read and return a single char */ 49#else /* CONFIG_PPC32 */
54 50/* On non-E500 family PPC32 we determine the size by picking the last
51 * register we need, but on E500 we skip sections so we list what we
52 * need to store, and add it up. */
53#ifndef CONFIG_E500
54#define MAXREG (PT_FPSCR+1)
55#else
56/* 32 GPRs (8 bytes), nip, msr, ccr, link, ctr, xer, acc (8 bytes), spefscr*/
57#define MAXREG ((32*2)+6+2+1)
58#endif
59#define NUMREGBYTES (MAXREG * sizeof(int))
60/* CR/LR, R1, R2, R13-R31 inclusive. */
61#define NUMCRITREGBYTES (23 * sizeof(int))
62#endif /* 32/64 */
55#endif /* !(__ASSEMBLY__) */ 63#endif /* !(__ASSEMBLY__) */
56#endif /* !(_PPC_KGDB_H) */ 64#endif /* !__POWERPC_KGDB_H__ */
57#endif /* __KERNEL__ */ 65#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/lppaca.h b/include/asm-powerpc/lppaca.h
index 567ed92cd91f..2fe268b10333 100644
--- a/include/asm-powerpc/lppaca.h
+++ b/include/asm-powerpc/lppaca.h
@@ -125,7 +125,10 @@ struct lppaca {
125 // NOTE: This value will ALWAYS be zero for dedicated processors and 125 // NOTE: This value will ALWAYS be zero for dedicated processors and
126 // will NEVER be zero for shared processors (ie, initialized to a 1). 126 // will NEVER be zero for shared processors (ie, initialized to a 1).
127 volatile u32 yield_count; // PLIC increments each dispatchx00-x03 127 volatile u32 yield_count; // PLIC increments each dispatchx00-x03
128 u8 reserved6[124]; // Reserved x04-x7F 128 u32 reserved6;
129 volatile u64 cmo_faults; // CMO page fault count x08-x0F
130 volatile u64 cmo_fault_time; // CMO page fault time x10-x17
131 u8 reserved7[104]; // Reserved x18-x7F
129 132
130//============================================================================= 133//=============================================================================
131// CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data 134// CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index 1233d735fd28..893aafd87fde 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -76,7 +76,7 @@ struct machdep_calls {
76 * destroyed as well */ 76 * destroyed as well */
77 void (*hpte_clear_all)(void); 77 void (*hpte_clear_all)(void);
78 78
79 void (*tce_build)(struct iommu_table * tbl, 79 int (*tce_build)(struct iommu_table *tbl,
80 long index, 80 long index,
81 long npages, 81 long npages,
82 unsigned long uaddr, 82 unsigned long uaddr,
diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h
index d1dc16afb118..19c7a9403490 100644
--- a/include/asm-powerpc/mmu-hash64.h
+++ b/include/asm-powerpc/mmu-hash64.h
@@ -194,9 +194,9 @@ extern int mmu_ci_restrictions;
194 194
195#ifdef CONFIG_HUGETLB_PAGE 195#ifdef CONFIG_HUGETLB_PAGE
196/* 196/*
197 * The page size index of the huge pages for use by hugetlbfs 197 * The page size indexes of the huge pages for use by hugetlbfs
198 */ 198 */
199extern int mmu_huge_psize; 199extern unsigned int mmu_huge_psizes[MMU_PAGE_COUNT];
200 200
201#endif /* CONFIG_HUGETLB_PAGE */ 201#endif /* CONFIG_HUGETLB_PAGE */
202 202
@@ -281,6 +281,8 @@ extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
281 unsigned long pstart, unsigned long mode, 281 unsigned long pstart, unsigned long mode,
282 int psize, int ssize); 282 int psize, int ssize);
283extern void set_huge_psize(int psize); 283extern void set_huge_psize(int psize);
284extern void add_gpage(unsigned long addr, unsigned long page_size,
285 unsigned long number_of_pages);
284extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); 286extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
285 287
286extern void htab_initialize(void); 288extern void htab_initialize(void);
diff --git a/include/asm-powerpc/mpc52xx_psc.h b/include/asm-powerpc/mpc52xx_psc.h
index 710c5d36efaa..8917ed630565 100644
--- a/include/asm-powerpc/mpc52xx_psc.h
+++ b/include/asm-powerpc/mpc52xx_psc.h
@@ -60,10 +60,12 @@
60#define MPC52xx_PSC_RXTX_FIFO_ALARM 0x0002 60#define MPC52xx_PSC_RXTX_FIFO_ALARM 0x0002
61#define MPC52xx_PSC_RXTX_FIFO_EMPTY 0x0001 61#define MPC52xx_PSC_RXTX_FIFO_EMPTY 0x0001
62 62
63/* PSC interrupt mask bits */ 63/* PSC interrupt status/mask bits */
64#define MPC52xx_PSC_IMR_TXRDY 0x0100 64#define MPC52xx_PSC_IMR_TXRDY 0x0100
65#define MPC52xx_PSC_IMR_RXRDY 0x0200 65#define MPC52xx_PSC_IMR_RXRDY 0x0200
66#define MPC52xx_PSC_IMR_DB 0x0400 66#define MPC52xx_PSC_IMR_DB 0x0400
67#define MPC52xx_PSC_IMR_TXEMP 0x0800
68#define MPC52xx_PSC_IMR_ORERR 0x1000
67#define MPC52xx_PSC_IMR_IPC 0x8000 69#define MPC52xx_PSC_IMR_IPC 0x8000
68 70
69/* PSC input port change bit */ 71/* PSC input port change bit */
@@ -92,6 +94,34 @@
92 94
93#define MPC52xx_PSC_RFNUM_MASK 0x01ff 95#define MPC52xx_PSC_RFNUM_MASK 0x01ff
94 96
97#define MPC52xx_PSC_SICR_DTS1 (1 << 29)
98#define MPC52xx_PSC_SICR_SHDR (1 << 28)
99#define MPC52xx_PSC_SICR_SIM_MASK (0xf << 24)
100#define MPC52xx_PSC_SICR_SIM_UART (0x0 << 24)
101#define MPC52xx_PSC_SICR_SIM_UART_DCD (0x8 << 24)
102#define MPC52xx_PSC_SICR_SIM_CODEC_8 (0x1 << 24)
103#define MPC52xx_PSC_SICR_SIM_CODEC_16 (0x2 << 24)
104#define MPC52xx_PSC_SICR_SIM_AC97 (0x3 << 24)
105#define MPC52xx_PSC_SICR_SIM_SIR (0x8 << 24)
106#define MPC52xx_PSC_SICR_SIM_SIR_DCD (0xc << 24)
107#define MPC52xx_PSC_SICR_SIM_MIR (0x5 << 24)
108#define MPC52xx_PSC_SICR_SIM_FIR (0x6 << 24)
109#define MPC52xx_PSC_SICR_SIM_CODEC_24 (0x7 << 24)
110#define MPC52xx_PSC_SICR_SIM_CODEC_32 (0xf << 24)
111#define MPC52xx_PSC_SICR_GENCLK (1 << 23)
112#define MPC52xx_PSC_SICR_I2S (1 << 22)
113#define MPC52xx_PSC_SICR_CLKPOL (1 << 21)
114#define MPC52xx_PSC_SICR_SYNCPOL (1 << 20)
115#define MPC52xx_PSC_SICR_CELLSLAVE (1 << 19)
116#define MPC52xx_PSC_SICR_CELL2XCLK (1 << 18)
117#define MPC52xx_PSC_SICR_ESAI (1 << 17)
118#define MPC52xx_PSC_SICR_ENAC97 (1 << 16)
119#define MPC52xx_PSC_SICR_SPI (1 << 15)
120#define MPC52xx_PSC_SICR_MSTR (1 << 14)
121#define MPC52xx_PSC_SICR_CPOL (1 << 13)
122#define MPC52xx_PSC_SICR_CPHA (1 << 12)
123#define MPC52xx_PSC_SICR_USEEOF (1 << 11)
124#define MPC52xx_PSC_SICR_DISABLEEOF (1 << 10)
95 125
96/* Structure of the hardware registers */ 126/* Structure of the hardware registers */
97struct mpc52xx_psc { 127struct mpc52xx_psc {
@@ -132,8 +162,12 @@ struct mpc52xx_psc {
132 u8 reserved5[3]; 162 u8 reserved5[3];
133 u8 ctlr; /* PSC + 0x1c */ 163 u8 ctlr; /* PSC + 0x1c */
134 u8 reserved6[3]; 164 u8 reserved6[3];
135 u16 ccr; /* PSC + 0x20 */ 165 /* BitClkDiv field of CCR is byte swapped in
136 u8 reserved7[14]; 166 * the hardware for mpc5200/b compatibility */
167 u32 ccr; /* PSC + 0x20 */
168 u32 ac97_slots; /* PSC + 0x24 */
169 u32 ac97_cmd; /* PSC + 0x28 */
170 u32 ac97_data; /* PSC + 0x2c */
137 u8 ivr; /* PSC + 0x30 */ 171 u8 ivr; /* PSC + 0x30 */
138 u8 reserved8[3]; 172 u8 reserved8[3];
139 u8 ip; /* PSC + 0x34 */ 173 u8 ip; /* PSC + 0x34 */
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h
index cffdf0eb0df6..e088545cb3f5 100644
--- a/include/asm-powerpc/page.h
+++ b/include/asm-powerpc/page.h
@@ -119,9 +119,6 @@ extern phys_addr_t kernstart_addr;
119/* align addr on a size boundary - adjust address up if needed */ 119/* align addr on a size boundary - adjust address up if needed */
120#define _ALIGN(addr,size) _ALIGN_UP(addr,size) 120#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
121 121
122/* to align the pointer to the (next) page boundary */
123#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
124
125/* 122/*
126 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for 123 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
127 * "kernelness", use is_kernel_addr() - it should do what you want. 124 * "kernelness", use is_kernel_addr() - it should do what you want.
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
index 02fd80710e9d..043bfdfe4f73 100644
--- a/include/asm-powerpc/page_64.h
+++ b/include/asm-powerpc/page_64.h
@@ -90,6 +90,7 @@ extern unsigned int HPAGE_SHIFT;
90#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 90#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
91#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 91#define HPAGE_MASK (~(HPAGE_SIZE - 1))
92#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 92#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
93#define HUGE_MAX_HSTATE 3
93 94
94#endif /* __ASSEMBLY__ */ 95#endif /* __ASSEMBLY__ */
95 96
diff --git a/include/asm-powerpc/pgalloc-64.h b/include/asm-powerpc/pgalloc-64.h
index 68980990f62a..812a1d8f35cb 100644
--- a/include/asm-powerpc/pgalloc-64.h
+++ b/include/asm-powerpc/pgalloc-64.h
@@ -22,7 +22,7 @@ extern struct kmem_cache *pgtable_cache[];
22#define PUD_CACHE_NUM 1 22#define PUD_CACHE_NUM 1
23#define PMD_CACHE_NUM 1 23#define PMD_CACHE_NUM 1
24#define HUGEPTE_CACHE_NUM 2 24#define HUGEPTE_CACHE_NUM 2
25#define PTE_NONCACHE_NUM 3 /* from GFP rather than kmem_cache */ 25#define PTE_NONCACHE_NUM 7 /* from GFP rather than kmem_cache */
26 26
27static inline pgd_t *pgd_alloc(struct mm_struct *mm) 27static inline pgd_t *pgd_alloc(struct mm_struct *mm)
28{ 28{
@@ -119,7 +119,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
119 __free_page(ptepage); 119 __free_page(ptepage);
120} 120}
121 121
122#define PGF_CACHENUM_MASK 0x3 122#define PGF_CACHENUM_MASK 0x7
123 123
124typedef struct pgtable_free { 124typedef struct pgtable_free {
125 unsigned long val; 125 unsigned long val;
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index fd2090dc1dce..c9601dfb4a1e 100644
--- a/include/asm-powerpc/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
@@ -51,6 +51,9 @@
51#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ 51#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
52 _PAGE_SECONDARY | _PAGE_GROUP_IX) 52 _PAGE_SECONDARY | _PAGE_GROUP_IX)
53 53
54/* There is no 4K PFN hack on 4K pages */
55#define _PAGE_4K_PFN 0
56
54/* PAGE_MASK gives the right answer below, but only by accident */ 57/* PAGE_MASK gives the right answer below, but only by accident */
55/* It should be preserving the high 48 bits and then specifically */ 58/* It should be preserving the high 48 bits and then specifically */
56/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ 59/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
index c5007712473f..7e54adb35596 100644
--- a/include/asm-powerpc/pgtable-64k.h
+++ b/include/asm-powerpc/pgtable-64k.h
@@ -138,7 +138,7 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
138 unsigned __split = (psize == MMU_PAGE_4K || \ 138 unsigned __split = (psize == MMU_PAGE_4K || \
139 psize == MMU_PAGE_64K_AP); \ 139 psize == MMU_PAGE_64K_AP); \
140 shift = mmu_psize_defs[psize].shift; \ 140 shift = mmu_psize_defs[psize].shift; \
141 for (index = 0; va < __end; index++, va += (1 << shift)) { \ 141 for (index = 0; va < __end; index++, va += (1L << shift)) { \
142 if (!__split || __rpte_sub_valid(rpte, index)) do { \ 142 if (!__split || __rpte_sub_valid(rpte, index)) do { \
143 143
144#define pte_iterate_hashed_end() } while(0); } } while(0) 144#define pte_iterate_hashed_end() } while(0); } } while(0)
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index 3a96d001cb75..bdbab72f3ebc 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -395,6 +395,12 @@ extern int icache_44x_need_flush;
395#ifndef _PAGE_EXEC 395#ifndef _PAGE_EXEC
396#define _PAGE_EXEC 0 396#define _PAGE_EXEC 0
397#endif 397#endif
398#ifndef _PAGE_ENDIAN
399#define _PAGE_ENDIAN 0
400#endif
401#ifndef _PAGE_COHERENT
402#define _PAGE_COHERENT 0
403#endif
398#ifndef _PMD_PRESENT_MASK 404#ifndef _PMD_PRESENT_MASK
399#define _PMD_PRESENT_MASK _PMD_PRESENT 405#define _PMD_PRESENT_MASK _PMD_PRESENT
400#endif 406#endif
@@ -405,6 +411,12 @@ extern int icache_44x_need_flush;
405 411
406#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 412#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
407 413
414
415#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
416 _PAGE_WRITETHRU | _PAGE_ENDIAN | \
417 _PAGE_USER | _PAGE_ACCESSED | \
418 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
419 _PAGE_EXEC | _PAGE_HWEXEC)
408/* 420/*
409 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware 421 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
410 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need 422 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
@@ -538,6 +550,10 @@ static inline pte_t pte_mkyoung(pte_t pte) {
538 pte_val(pte) |= _PAGE_ACCESSED; return pte; } 550 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
539static inline pte_t pte_mkspecial(pte_t pte) { 551static inline pte_t pte_mkspecial(pte_t pte) {
540 return pte; } 552 return pte; }
553static inline unsigned long pte_pgprot(pte_t pte)
554{
555 return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
556}
541 557
542static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 558static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
543{ 559{
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index ab98a9c80b28..ba8000352b9a 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -117,6 +117,10 @@
117#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) 117#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
118#define HAVE_PAGE_AGP 118#define HAVE_PAGE_AGP
119 119
120#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | \
121 _PAGE_NO_CACHE | _PAGE_WRITETHRU | \
122 _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \
123 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
120/* PTEIDX nibble */ 124/* PTEIDX nibble */
121#define _PTEIDX_SECONDARY 0x8 125#define _PTEIDX_SECONDARY 0x8
122#define _PTEIDX_GROUP_IX 0x7 126#define _PTEIDX_GROUP_IX 0x7
@@ -262,6 +266,10 @@ static inline pte_t pte_mkhuge(pte_t pte) {
262 return pte; } 266 return pte; }
263static inline pte_t pte_mkspecial(pte_t pte) { 267static inline pte_t pte_mkspecial(pte_t pte) {
264 return pte; } 268 return pte; }
269static inline unsigned long pte_pgprot(pte_t pte)
270{
271 return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
272}
265 273
266/* Atomic PTE updates */ 274/* Atomic PTE updates */
267static inline unsigned long pte_update(struct mm_struct *mm, 275static inline unsigned long pte_update(struct mm_struct *mm,
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h
index d18ffe7bc7c4..dbb8ca172e44 100644
--- a/include/asm-powerpc/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -38,6 +38,19 @@ extern void paging_init(void);
38 remap_pfn_range(vma, vaddr, pfn, size, prot) 38 remap_pfn_range(vma, vaddr, pfn, size, prot)
39 39
40#include <asm-generic/pgtable.h> 40#include <asm-generic/pgtable.h>
41
42
43/*
44 * This gets called at the end of handling a page fault, when
45 * the kernel has put a new PTE into the page table for the process.
46 * We use it to ensure coherency between the i-cache and d-cache
47 * for the page which has just been mapped in.
48 * On machines which use an MMU hash table, we use this to put a
49 * corresponding HPTE into the hash table ahead of time, instead of
50 * waiting for the inevitable extra hash-table miss exception.
51 */
52extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
53
41#endif /* __ASSEMBLY__ */ 54#endif /* __ASSEMBLY__ */
42 55
43#endif /* __KERNEL__ */ 56#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/semaphore.h b/include/asm-powerpc/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-powerpc/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-powerpc/syscalls.h b/include/asm-powerpc/syscalls.h
index 2b8a458f990a..eb8eb400c664 100644
--- a/include/asm-powerpc/syscalls.h
+++ b/include/asm-powerpc/syscalls.h
@@ -31,6 +31,7 @@ asmlinkage int sys_vfork(unsigned long p1, unsigned long p2,
31 unsigned long p3, unsigned long p4, unsigned long p5, 31 unsigned long p3, unsigned long p4, unsigned long p5,
32 unsigned long p6, struct pt_regs *regs); 32 unsigned long p6, struct pt_regs *regs);
33asmlinkage long sys_pipe(int __user *fildes); 33asmlinkage long sys_pipe(int __user *fildes);
34asmlinkage long sys_pipe2(int __user *fildes, int flags);
34asmlinkage long sys_rt_sigaction(int sig, 35asmlinkage long sys_rt_sigaction(int sig,
35 const struct sigaction __user *act, 36 const struct sigaction __user *act,
36 struct sigaction __user *oact, size_t sigsetsize); 37 struct sigaction __user *oact, size_t sigsetsize);
diff --git a/include/asm-powerpc/systbl.h b/include/asm-powerpc/systbl.h
index ae7085c65692..e084272ed1c2 100644
--- a/include/asm-powerpc/systbl.h
+++ b/include/asm-powerpc/systbl.h
@@ -316,3 +316,9 @@ COMPAT_SYS(fallocate)
316SYSCALL(subpage_prot) 316SYSCALL(subpage_prot)
317COMPAT_SYS_SPU(timerfd_settime) 317COMPAT_SYS_SPU(timerfd_settime)
318COMPAT_SYS_SPU(timerfd_gettime) 318COMPAT_SYS_SPU(timerfd_gettime)
319COMPAT_SYS_SPU(signalfd4)
320SYSCALL_SPU(eventfd2)
321SYSCALL_SPU(epoll_create1)
322SYSCALL_SPU(dup3)
323SYSCALL_SPU(pipe2)
324SYSCALL(inotify_init1)
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index e6e25e2364eb..d6648c143322 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -110,6 +110,8 @@ static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
110#endif 110#endif
111 111
112extern int set_dabr(unsigned long dabr); 112extern int set_dabr(unsigned long dabr);
113extern void do_dabr(struct pt_regs *regs, unsigned long address,
114 unsigned long error_code);
113extern void print_backtrace(unsigned long *); 115extern void print_backtrace(unsigned long *);
114extern void show_regs(struct pt_regs * regs); 116extern void show_regs(struct pt_regs * regs);
115extern void flush_instruction_cache(void); 117extern void flush_instruction_cache(void);
diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h
index b705c2a7651a..a9db562df69a 100644
--- a/include/asm-powerpc/thread_info.h
+++ b/include/asm-powerpc/thread_info.h
@@ -66,20 +66,12 @@ struct thread_info {
66 66
67#if THREAD_SHIFT >= PAGE_SHIFT 67#if THREAD_SHIFT >= PAGE_SHIFT
68 68
69#define THREAD_ORDER (THREAD_SHIFT - PAGE_SHIFT) 69#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
70
71#ifdef CONFIG_DEBUG_STACK_USAGE
72#define alloc_thread_info(tsk) \
73 ((struct thread_info *)__get_free_pages(GFP_KERNEL | \
74 __GFP_ZERO, THREAD_ORDER))
75#else
76#define alloc_thread_info(tsk) \
77 ((struct thread_info *)__get_free_pages(GFP_KERNEL, THREAD_ORDER))
78#endif
79#define free_thread_info(ti) free_pages((unsigned long)ti, THREAD_ORDER)
80 70
81#else /* THREAD_SHIFT < PAGE_SHIFT */ 71#else /* THREAD_SHIFT < PAGE_SHIFT */
82 72
73#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
74
83extern struct thread_info *alloc_thread_info(struct task_struct *tsk); 75extern struct thread_info *alloc_thread_info(struct task_struct *tsk);
84extern void free_thread_info(struct thread_info *ti); 76extern void free_thread_info(struct thread_info *ti);
85 77
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h
index 5c9108147644..361cd5c7a32b 100644
--- a/include/asm-powerpc/tlbflush.h
+++ b/include/asm-powerpc/tlbflush.h
@@ -162,16 +162,5 @@ extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
162 162
163#endif 163#endif
164 164
165/*
166 * This gets called at the end of handling a page fault, when
167 * the kernel has put a new PTE into the page table for the process.
168 * We use it to ensure coherency between the i-cache and d-cache
169 * for the page which has just been mapped in.
170 * On machines which use an MMU hash table, we use this to put a
171 * corresponding HPTE into the hash table ahead of time, instead of
172 * waiting for the inevitable extra hash-table miss exception.
173 */
174extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
175
176#endif /*__KERNEL__ */ 165#endif /*__KERNEL__ */
177#endif /* _ASM_POWERPC_TLBFLUSH_H */ 166#endif /* _ASM_POWERPC_TLBFLUSH_H */
diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h
index ce91bb662063..e07d0c76ed77 100644
--- a/include/asm-powerpc/unistd.h
+++ b/include/asm-powerpc/unistd.h
@@ -335,10 +335,16 @@
335#define __NR_subpage_prot 310 335#define __NR_subpage_prot 310
336#define __NR_timerfd_settime 311 336#define __NR_timerfd_settime 311
337#define __NR_timerfd_gettime 312 337#define __NR_timerfd_gettime 312
338#define __NR_signalfd4 313
339#define __NR_eventfd2 314
340#define __NR_epoll_create1 315
341#define __NR_dup3 316
342#define __NR_pipe2 317
343#define __NR_inotify_init1 318
338 344
339#ifdef __KERNEL__ 345#ifdef __KERNEL__
340 346
341#define __NR_syscalls 313 347#define __NR_syscalls 319
342 348
343#define __NR__exit __NR_exit 349#define __NR__exit __NR_exit
344#define NR_syscalls __NR_syscalls 350#define NR_syscalls __NR_syscalls
diff --git a/include/asm-powerpc/vio.h b/include/asm-powerpc/vio.h
index 56512a968dab..0a290a195946 100644
--- a/include/asm-powerpc/vio.h
+++ b/include/asm-powerpc/vio.h
@@ -39,16 +39,32 @@
39#define VIO_IRQ_DISABLE 0UL 39#define VIO_IRQ_DISABLE 0UL
40#define VIO_IRQ_ENABLE 1UL 40#define VIO_IRQ_ENABLE 1UL
41 41
42/*
43 * VIO CMO minimum entitlement for all devices and spare entitlement
44 */
45#define VIO_CMO_MIN_ENT 1562624
46
42struct iommu_table; 47struct iommu_table;
43 48
44/* 49/**
45 * The vio_dev structure is used to describe virtual I/O devices. 50 * vio_dev - This structure is used to describe virtual I/O devices.
51 *
52 * @desired: set from return of driver's get_desired_dma() function
53 * @entitled: bytes of IO data that has been reserved for this device.
54 * @allocated: bytes of IO data currently in use by the device.
55 * @allocs_failed: number of DMA failures due to insufficient entitlement.
46 */ 56 */
47struct vio_dev { 57struct vio_dev {
48 const char *name; 58 const char *name;
49 const char *type; 59 const char *type;
50 uint32_t unit_address; 60 uint32_t unit_address;
51 unsigned int irq; 61 unsigned int irq;
62 struct {
63 size_t desired;
64 size_t entitled;
65 size_t allocated;
66 atomic_t allocs_failed;
67 } cmo;
52 struct device dev; 68 struct device dev;
53}; 69};
54 70
@@ -56,12 +72,19 @@ struct vio_driver {
56 const struct vio_device_id *id_table; 72 const struct vio_device_id *id_table;
57 int (*probe)(struct vio_dev *dev, const struct vio_device_id *id); 73 int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
58 int (*remove)(struct vio_dev *dev); 74 int (*remove)(struct vio_dev *dev);
75 /* A driver must have a get_desired_dma() function to
76 * be loaded in a CMO environment if it uses DMA.
77 */
78 unsigned long (*get_desired_dma)(struct vio_dev *dev);
59 struct device_driver driver; 79 struct device_driver driver;
60}; 80};
61 81
62extern int vio_register_driver(struct vio_driver *drv); 82extern int vio_register_driver(struct vio_driver *drv);
63extern void vio_unregister_driver(struct vio_driver *drv); 83extern void vio_unregister_driver(struct vio_driver *drv);
64 84
85extern int vio_cmo_entitlement_update(size_t);
86extern void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired);
87
65extern void __devinit vio_unregister_device(struct vio_dev *dev); 88extern void __devinit vio_unregister_device(struct vio_dev *dev);
66 89
67struct device_node; 90struct device_node;
diff --git a/include/asm-s390/Kbuild b/include/asm-s390/Kbuild
index bb5e9edb9825..63a23415fba6 100644
--- a/include/asm-s390/Kbuild
+++ b/include/asm-s390/Kbuild
@@ -7,7 +7,6 @@ header-y += tape390.h
7header-y += ucontext.h 7header-y += ucontext.h
8header-y += vtoc.h 8header-y += vtoc.h
9header-y += zcrypt.h 9header-y += zcrypt.h
10header-y += kvm.h
11header-y += chsc.h 10header-y += chsc.h
12 11
13unifdef-y += cmb.h 12unifdef-y += cmb.h
diff --git a/include/asm-s390/hugetlb.h b/include/asm-s390/hugetlb.h
index 600a776f8f75..670a1d1745d2 100644
--- a/include/asm-s390/hugetlb.h
+++ b/include/asm-s390/hugetlb.h
@@ -22,7 +22,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
22 * If the arch doesn't supply something else, assume that hugepage 22 * If the arch doesn't supply something else, assume that hugepage
23 * size aligned regions are ok without further preparation. 23 * size aligned regions are ok without further preparation.
24 */ 24 */
25static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) 25static inline int prepare_hugepage_range(struct file *file,
26 unsigned long addr, unsigned long len)
26{ 27{
27 if (len & ~HPAGE_MASK) 28 if (len & ~HPAGE_MASK)
28 return -EINVAL; 29 return -EINVAL;
diff --git a/include/asm-s390/kvm_virtio.h b/include/asm-s390/kvm_virtio.h
index 5c871a990c29..146100224def 100644
--- a/include/asm-s390/kvm_virtio.h
+++ b/include/asm-s390/kvm_virtio.h
@@ -50,4 +50,14 @@ struct kvm_vqconfig {
50#define KVM_S390_VIRTIO_RESET 1 50#define KVM_S390_VIRTIO_RESET 1
51#define KVM_S390_VIRTIO_SET_STATUS 2 51#define KVM_S390_VIRTIO_SET_STATUS 2
52 52
53#ifdef __KERNEL__
54/* early virtio console setup */
55#ifdef CONFIG_VIRTIO_CONSOLE
56extern void s390_virtio_console_init(void);
57#else
58static inline void s390_virtio_console_init(void)
59{
60}
61#endif /* CONFIG_VIRTIO_CONSOLE */
62#endif /* __KERNEL__ */
53#endif 63#endif
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index 12fd9c4f0f15..991ba939408c 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -138,9 +138,6 @@ void arch_alloc_page(struct page *page, int order);
138 138
139#endif /* !__ASSEMBLY__ */ 139#endif /* !__ASSEMBLY__ */
140 140
141/* to align the pointer to the (next) page boundary */
142#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
143
144#define __PAGE_OFFSET 0x0UL 141#define __PAGE_OFFSET 0x0UL
145#define PAGE_OFFSET 0x0UL 142#define PAGE_OFFSET 0x0UL
146#define __pa(x) (unsigned long)(x) 143#define __pa(x) (unsigned long)(x)
diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-s390/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-s390/thread_info.h b/include/asm-s390/thread_info.h
index 99bbed99a3b2..91a8f93ad355 100644
--- a/include/asm-s390/thread_info.h
+++ b/include/asm-s390/thread_info.h
@@ -78,10 +78,7 @@ static inline struct thread_info *current_thread_info(void)
78 return (struct thread_info *)((*(unsigned long *) __LC_KERNEL_STACK)-THREAD_SIZE); 78 return (struct thread_info *)((*(unsigned long *) __LC_KERNEL_STACK)-THREAD_SIZE);
79} 79}
80 80
81/* thread information allocation */ 81#define THREAD_SIZE_ORDER THREAD_ORDER
82#define alloc_thread_info(tsk) ((struct thread_info *) \
83 __get_free_pages(GFP_KERNEL,THREAD_ORDER))
84#define free_thread_info(ti) free_pages((unsigned long) (ti),THREAD_ORDER)
85 82
86#endif 83#endif
87 84
diff --git a/include/asm-sh/hugetlb.h b/include/asm-sh/hugetlb.h
index 02402303d89b..967068fb79ac 100644
--- a/include/asm-sh/hugetlb.h
+++ b/include/asm-sh/hugetlb.h
@@ -14,7 +14,8 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
14 * If the arch doesn't supply something else, assume that hugepage 14 * If the arch doesn't supply something else, assume that hugepage
15 * size aligned regions are ok without further preparation. 15 * size aligned regions are ok without further preparation.
16 */ 16 */
17static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) 17static inline int prepare_hugepage_range(struct file *file,
18 unsigned long addr, unsigned long len)
18{ 19{
19 if (len & ~HPAGE_MASK) 20 if (len & ~HPAGE_MASK)
20 return -EINVAL; 21 return -EINVAL;
@@ -26,7 +27,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
26static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { 27static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
27} 28}
28 29
29static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb, 30static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
30 unsigned long addr, unsigned long end, 31 unsigned long addr, unsigned long end,
31 unsigned long floor, 32 unsigned long floor,
32 unsigned long ceiling) 33 unsigned long ceiling)
diff --git a/include/asm-sh/ide.h b/include/asm-sh/ide.h
deleted file mode 100644
index 58e0bdd52be4..000000000000
--- a/include/asm-sh/ide.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * linux/include/asm-sh/ide.h
3 *
4 * Copyright (C) 1994-1996 Linus Torvalds & authors
5 */
6
7/*
8 * This file contains the i386 architecture specific IDE code.
9 * In future, SuperH code.
10 */
11
12#ifndef __ASM_SH_IDE_H
13#define __ASM_SH_IDE_H
14
15#ifdef __KERNEL__
16
17#include <asm-generic/ide_iops.h>
18
19#endif /* __KERNEL__ */
20
21#endif /* __ASM_SH_IDE_H */
diff --git a/include/asm-sh/kvm.h b/include/asm-sh/kvm.h
deleted file mode 100644
index 6af51dbab2d0..000000000000
--- a/include/asm-sh/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_SH_H
2#define __LINUX_KVM_SH_H
3
4/* sh does not support KVM */
5
6#endif
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index 304c30b5d947..5dc01d2fcc4c 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -22,9 +22,6 @@
22#define PAGE_MASK (~(PAGE_SIZE-1)) 22#define PAGE_MASK (~(PAGE_SIZE-1))
23#define PTE_MASK PAGE_MASK 23#define PTE_MASK PAGE_MASK
24 24
25/* to align the pointer to the (next) page boundary */
26#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
27
28#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 25#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
29#define HPAGE_SHIFT 16 26#define HPAGE_SHIFT 16
30#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) 27#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
diff --git a/include/asm-sh/ptrace.h b/include/asm-sh/ptrace.h
index 8d6c92b3e770..7d36dc3bee69 100644
--- a/include/asm-sh/ptrace.h
+++ b/include/asm-sh/ptrace.h
@@ -5,7 +5,7 @@
5 * Copyright (C) 1999, 2000 Niibe Yutaka 5 * Copyright (C) 1999, 2000 Niibe Yutaka
6 * 6 *
7 */ 7 */
8#if defined(__SH5__) || defined(CONFIG_SUPERH64) 8#if defined(__SH5__)
9struct pt_regs { 9struct pt_regs {
10 unsigned long long pc; 10 unsigned long long pc;
11 unsigned long long sr; 11 unsigned long long sr;
diff --git a/include/asm-sh/semaphore.h b/include/asm-sh/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-sh/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-sh/sh7760fb.h b/include/asm-sh/sh7760fb.h
new file mode 100644
index 000000000000..8767f61aceca
--- /dev/null
+++ b/include/asm-sh/sh7760fb.h
@@ -0,0 +1,197 @@
1/*
2 * sh7760fb.h -- platform data for SH7760/SH7763 LCDC framebuffer driver.
3 *
4 * (c) 2006-2008 MSC Vertriebsges.m.b.H.,
5 * Manuel Lauss <mano@roarinelk.homelinux.net>
6 * (c) 2008 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 */
8
9#ifndef _ASM_SH_SH7760FB_H
10#define _ASM_SH_SH7760FB_H
11
12/*
13 * some bits of the colormap registers should be written as zero.
14 * create a mask for that.
15 */
16#define SH7760FB_PALETTE_MASK 0x00f8fcf8
17
18/* The LCDC dma engine always sets bits 27-26 to 1: this is Area3 */
19#define SH7760FB_DMA_MASK 0x0C000000
20
21/* palette */
22#define LDPR(x) (((x) << 2))
23
24/* framebuffer registers and bits */
25#define LDICKR 0x400
26#define LDMTR 0x402
27/* see sh7760fb.h for LDMTR bits */
28#define LDDFR 0x404
29#define LDDFR_PABD (1 << 8)
30#define LDDFR_COLOR_MASK 0x7F
31#define LDSMR 0x406
32#define LDSMR_ROT (1 << 13)
33#define LDSARU 0x408
34#define LDSARL 0x40c
35#define LDLAOR 0x410
36#define LDPALCR 0x412
37#define LDPALCR_PALS (1 << 4)
38#define LDPALCR_PALEN (1 << 0)
39#define LDHCNR 0x414
40#define LDHSYNR 0x416
41#define LDVDLNR 0x418
42#define LDVTLNR 0x41a
43#define LDVSYNR 0x41c
44#define LDACLNR 0x41e
45#define LDINTR 0x420
46#define LDPMMR 0x424
47#define LDPSPR 0x426
48#define LDCNTR 0x428
49#define LDCNTR_DON (1 << 0)
50#define LDCNTR_DON2 (1 << 4)
51
52#ifdef CONFIG_CPU_SUBTYPE_SH7763
53# define LDLIRNR 0x440
54/* LDINTR bit */
55# define LDINTR_MINTEN (1 << 15)
56# define LDINTR_FINTEN (1 << 14)
57# define LDINTR_VSINTEN (1 << 13)
58# define LDINTR_VEINTEN (1 << 12)
59# define LDINTR_MINTS (1 << 11)
60# define LDINTR_FINTS (1 << 10)
61# define LDINTR_VSINTS (1 << 9)
62# define LDINTR_VEINTS (1 << 8)
63# define VINT_START (LDINTR_VSINTEN)
64# define VINT_CHECK (LDINTR_VSINTS)
65#else
66/* LDINTR bit */
67# define LDINTR_VINTSEL (1 << 12)
68# define LDINTR_VINTE (1 << 8)
69# define LDINTR_VINTS (1 << 0)
70# define VINT_START (LDINTR_VINTSEL)
71# define VINT_CHECK (LDINTR_VINTS)
72#endif
73
74/* HSYNC polarity inversion */
75#define LDMTR_FLMPOL (1 << 15)
76
77/* VSYNC polarity inversion */
78#define LDMTR_CL1POL (1 << 14)
79
80/* DISPLAY-ENABLE polarity inversion */
81#define LDMTR_DISPEN_LOWACT (1 << 13)
82
83/* DISPLAY DATA BUS polarity inversion */
84#define LDMTR_DPOL_LOWACT (1 << 12)
85
86/* AC modulation signal enable */
87#define LDMTR_MCNT (1 << 10)
88
89/* Disable output of HSYNC during VSYNC period */
90#define LDMTR_CL1CNT (1 << 9)
91
92/* Disable output of VSYNC during VSYNC period */
93#define LDMTR_CL2CNT (1 << 8)
94
95/* Display types supported by the LCDC */
96#define LDMTR_STN_MONO_4 0x00
97#define LDMTR_STN_MONO_8 0x01
98#define LDMTR_STN_COLOR_4 0x08
99#define LDMTR_STN_COLOR_8 0x09
100#define LDMTR_STN_COLOR_12 0x0A
101#define LDMTR_STN_COLOR_16 0x0B
102#define LDMTR_DSTN_MONO_8 0x11
103#define LDMTR_DSTN_MONO_16 0x13
104#define LDMTR_DSTN_COLOR_8 0x19
105#define LDMTR_DSTN_COLOR_12 0x1A
106#define LDMTR_DSTN_COLOR_16 0x1B
107#define LDMTR_TFT_COLOR_16 0x2B
108
109/* framebuffer color layout */
110#define LDDFR_1BPP_MONO 0x00
111#define LDDFR_2BPP_MONO 0x01
112#define LDDFR_4BPP_MONO 0x02
113#define LDDFR_6BPP_MONO 0x04
114#define LDDFR_4BPP 0x0A
115#define LDDFR_8BPP 0x0C
116#define LDDFR_16BPP_RGB555 0x1D
117#define LDDFR_16BPP_RGB565 0x2D
118
119/* LCDC Pixclock sources */
120#define LCDC_CLKSRC_BUSCLOCK 0
121#define LCDC_CLKSRC_PERIPHERAL 1
122#define LCDC_CLKSRC_EXTERNAL 2
123
124#define LDICKR_CLKSRC(x) \
125 (((x) & 3) << 12)
126
127/* LCDC pixclock input divider. Set to 1 at a minimum! */
128#define LDICKR_CLKDIV(x) \
129 ((x) & 0x1f)
130
131struct sh7760fb_platdata {
132
133 /* Set this member to a valid fb_videmode for the display you
134 * wish to use. The following members must be initialized:
135 * xres, yres, hsync_len, vsync_len, sync,
136 * {left,right,upper,lower}_margin.
137 * The driver uses the above members to calculate register values
138 * and memory requirements. Other members are ignored but may
139 * be used by other framebuffer layer components.
140 */
141 struct fb_videomode *def_mode;
142
143 /* LDMTR includes display type and signal polarity. The
144 * HSYNC/VSYNC polarities are derived from the fb_var_screeninfo
145 * data above; however the polarities of the following signals
146 * must be encoded in the ldmtr member:
147 * Display Enable signal (default high-active) DISPEN_LOWACT
148 * Display Data signals (default high-active) DPOL_LOWACT
149 * AC Modulation signal (default off) MCNT
150 * Hsync-During-Vsync suppression (default off) CL1CNT
151 * Vsync-during-vsync suppression (default off) CL2CNT
152 * NOTE: also set a display type!
153 * (one of LDMTR_{STN,DSTN,TFT}_{MONO,COLOR}_{4,8,12,16})
154 */
155 u16 ldmtr;
156
157 /* LDDFR controls framebuffer image format (depth, organization)
158 * Use ONE of the LDDFR_?BPP_* macros!
159 */
160 u16 lddfr;
161
162 /* LDPMMR and LDPSPR control the timing of the power signals
163 * for the display. Please read the SH7760 Hardware Manual,
164 * Chapters 30.3.17, 30.3.18 and 30.4.6!
165 */
166 u16 ldpmmr;
167 u16 ldpspr;
168
169 /* LDACLNR contains the line numbers after which the AC modulation
170 * signal is to toggle. Set to ZERO for TFTs or displays which
171 * do not need it. (Chapter 30.3.15 in SH7760 Hardware Manual).
172 */
173 u16 ldaclnr;
174
175 /* LDICKR contains information on pixelclock source and config.
176 * Please use the LDICKR_CLKSRC() and LDICKR_CLKDIV() macros.
177 * minimal value for CLKDIV() must be 1!.
178 */
179 u16 ldickr;
180
181 /* set this member to 1 if you wish to use the LCDC's hardware
182 * rotation function. This is limited to displays <= 320x200
183 * pixels resolution!
184 */
185 int rotate; /* set to 1 to rotate 90 CCW */
186
187 /* set this to 1 to suppress vsync irq use. */
188 int novsync;
189
190 /* blanking hook for platform. Set this if your platform can do
191 * more than the LCDC in terms of blanking (e.g. disable clock
192 * generator / backlight power supply / etc.
193 */
194 void (*blank) (int);
195};
196
197#endif /* _ASM_SH_SH7760FB_H */
diff --git a/include/asm-sh/sh_mobile_lcdc.h b/include/asm-sh/sh_mobile_lcdc.h
new file mode 100644
index 000000000000..27677727df4d
--- /dev/null
+++ b/include/asm-sh/sh_mobile_lcdc.h
@@ -0,0 +1,66 @@
1#ifndef __ASM_SH_MOBILE_LCDC_H__
2#define __ASM_SH_MOBILE_LCDC_H__
3
4#include <linux/fb.h>
5
6enum { RGB8, /* 24bpp, 8:8:8 */
7 RGB9, /* 18bpp, 9:9 */
8 RGB12A, /* 24bpp, 12:12 */
9 RGB12B, /* 12bpp */
10 RGB16, /* 16bpp */
11 RGB18, /* 18bpp */
12 RGB24, /* 24bpp */
13 SYS8A, /* 24bpp, 8:8:8 */
14 SYS8B, /* 18bpp, 8:8:2 */
15 SYS8C, /* 18bpp, 2:8:8 */
16 SYS8D, /* 16bpp, 8:8 */
17 SYS9, /* 18bpp, 9:9 */
18 SYS12, /* 24bpp, 12:12 */
19 SYS16A, /* 16bpp */
20 SYS16B, /* 18bpp, 16:2 */
21 SYS16C, /* 18bpp, 2:16 */
22 SYS18, /* 18bpp */
23 SYS24 };/* 24bpp */
24
25enum { LCDC_CHAN_DISABLED = 0,
26 LCDC_CHAN_MAINLCD,
27 LCDC_CHAN_SUBLCD };
28
29enum { LCDC_CLK_BUS, LCDC_CLK_PERIPHERAL, LCDC_CLK_EXTERNAL };
30
31struct sh_mobile_lcdc_sys_bus_cfg {
32 unsigned long ldmt2r;
33 unsigned long ldmt3r;
34};
35
36struct sh_mobile_lcdc_sys_bus_ops {
37 void (*write_index)(void *handle, unsigned long data);
38 void (*write_data)(void *handle, unsigned long data);
39 unsigned long (*read_data)(void *handle);
40};
41
42struct sh_mobile_lcdc_board_cfg {
43 void *board_data;
44 int (*setup_sys)(void *board_data, void *sys_ops_handle,
45 struct sh_mobile_lcdc_sys_bus_ops *sys_ops);
46 void (*display_on)(void *board_data);
47 void (*display_off)(void *board_data);
48};
49
50struct sh_mobile_lcdc_chan_cfg {
51 int chan;
52 int bpp;
53 int interface_type; /* selects RGBn or SYSn I/F, see above */
54 int clock_divider;
55 struct fb_videomode lcd_cfg;
56 struct sh_mobile_lcdc_board_cfg board_cfg;
57 struct sh_mobile_lcdc_sys_bus_cfg sys_bus_cfg; /* only for SYSn I/F */
58};
59
60struct sh_mobile_lcdc_info {
61 unsigned long lddckr;
62 int clock_source;
63 struct sh_mobile_lcdc_chan_cfg ch[2];
64};
65
66#endif /* __ASM_SH_MOBILE_LCDC_H__ */
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h
index c50e5d35fe84..5131e3907525 100644
--- a/include/asm-sh/thread_info.h
+++ b/include/asm-sh/thread_info.h
@@ -92,6 +92,8 @@ static inline struct thread_info *current_thread_info(void)
92 return ti; 92 return ti;
93} 93}
94 94
95#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
96
95/* thread information allocation */ 97/* thread information allocation */
96#ifdef CONFIG_DEBUG_STACK_USAGE 98#ifdef CONFIG_DEBUG_STACK_USAGE
97#define alloc_thread_info(ti) kzalloc(THREAD_SIZE, GFP_KERNEL) 99#define alloc_thread_info(ti) kzalloc(THREAD_SIZE, GFP_KERNEL)
diff --git a/include/asm-sparc/hugetlb.h b/include/asm-sparc/hugetlb.h
index 412af58926a0..177061064ee6 100644
--- a/include/asm-sparc/hugetlb.h
+++ b/include/asm-sparc/hugetlb.h
@@ -22,7 +22,8 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
22 * If the arch doesn't supply something else, assume that hugepage 22 * If the arch doesn't supply something else, assume that hugepage
23 * size aligned regions are ok without further preparation. 23 * size aligned regions are ok without further preparation.
24 */ 24 */
25static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) 25static inline int prepare_hugepage_range(struct file *file,
26 unsigned long addr, unsigned long len)
26{ 27{
27 if (len & ~HPAGE_MASK) 28 if (len & ~HPAGE_MASK)
28 return -EINVAL; 29 return -EINVAL;
@@ -31,7 +32,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
31 return 0; 32 return 0;
32} 33}
33 34
34static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb, 35static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
35 unsigned long addr, unsigned long end, 36 unsigned long addr, unsigned long end,
36 unsigned long floor, 37 unsigned long floor,
37 unsigned long ceiling) 38 unsigned long ceiling)
diff --git a/include/asm-sparc/ide.h b/include/asm-sparc/ide.h
index 879fcec72dc1..b7af3d658239 100644
--- a/include/asm-sparc/ide.h
+++ b/include/asm-sparc/ide.h
@@ -21,9 +21,6 @@
21#include <asm/psr.h> 21#include <asm/psr.h>
22#endif 22#endif
23 23
24#undef MAX_HWIFS
25#define MAX_HWIFS 2
26
27#define __ide_insl(data_reg, buffer, wcount) \ 24#define __ide_insl(data_reg, buffer, wcount) \
28 __ide_insw(data_reg, buffer, (wcount)<<1) 25 __ide_insw(data_reg, buffer, (wcount)<<1)
29#define __ide_outsl(data_reg, buffer, wcount) \ 26#define __ide_outsl(data_reg, buffer, wcount) \
diff --git a/include/asm-sparc/kvm.h b/include/asm-sparc/kvm.h
deleted file mode 100644
index 2e5478da3819..000000000000
--- a/include/asm-sparc/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_SPARC_H
2#define __LINUX_KVM_SPARC_H
3
4/* sparc does not support KVM */
5
6#endif
diff --git a/include/asm-sparc/page_32.h b/include/asm-sparc/page_32.h
index 14de518cc38f..cf5fb70ca1c1 100644
--- a/include/asm-sparc/page_32.h
+++ b/include/asm-sparc/page_32.h
@@ -134,9 +134,6 @@ BTFIXUPDEF_SETHI(sparc_unmapped_base)
134 134
135#endif /* !(__ASSEMBLY__) */ 135#endif /* !(__ASSEMBLY__) */
136 136
137/* to align the pointer to the (next) page boundary */
138#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
139
140#define PAGE_OFFSET 0xf0000000 137#define PAGE_OFFSET 0xf0000000
141#ifndef __ASSEMBLY__ 138#ifndef __ASSEMBLY__
142extern unsigned long phys_base; 139extern unsigned long phys_base;
diff --git a/include/asm-sparc/page_64.h b/include/asm-sparc/page_64.h
index a8a2bba032c1..b579b910ef51 100644
--- a/include/asm-sparc/page_64.h
+++ b/include/asm-sparc/page_64.h
@@ -106,9 +106,6 @@ typedef struct page *pgtable_t;
106 106
107#endif /* !(__ASSEMBLY__) */ 107#endif /* !(__ASSEMBLY__) */
108 108
109/* to align the pointer to the (next) page boundary */
110#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
111
112/* We used to stick this into a hard-coded global register (%g4) 109/* We used to stick this into a hard-coded global register (%g4)
113 * but that does not make sense anymore. 110 * but that does not make sense anymore.
114 */ 111 */
diff --git a/include/asm-sparc/semaphore.h b/include/asm-sparc/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-sparc/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-sparc/thread_info_32.h b/include/asm-sparc/thread_info_32.h
index 91b9f5888c85..2cf9db044055 100644
--- a/include/asm-sparc/thread_info_32.h
+++ b/include/asm-sparc/thread_info_32.h
@@ -86,6 +86,8 @@ register struct thread_info *current_thread_info_reg asm("g6");
86#define THREAD_INFO_ORDER 1 86#define THREAD_INFO_ORDER 1
87#endif 87#endif
88 88
89#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
90
89BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void) 91BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void)
90#define alloc_thread_info(tsk) BTFIXUP_CALL(alloc_thread_info)() 92#define alloc_thread_info(tsk) BTFIXUP_CALL(alloc_thread_info)()
91 93
diff --git a/include/asm-sparc/thread_info_64.h b/include/asm-sparc/thread_info_64.h
index c6d2e6c7f844..960969d5ad06 100644
--- a/include/asm-sparc/thread_info_64.h
+++ b/include/asm-sparc/thread_info_64.h
@@ -155,6 +155,8 @@ register struct thread_info *current_thread_info_reg asm("g6");
155#define __THREAD_INFO_ORDER 0 155#define __THREAD_INFO_ORDER 0
156#endif /* PAGE_SHIFT == 13 */ 156#endif /* PAGE_SHIFT == 13 */
157 157
158#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
159
158#ifdef CONFIG_DEBUG_STACK_USAGE 160#ifdef CONFIG_DEBUG_STACK_USAGE
159#define alloc_thread_info(tsk) \ 161#define alloc_thread_info(tsk) \
160({ \ 162({ \
diff --git a/include/asm-sparc/unistd_32.h b/include/asm-sparc/unistd_32.h
index 2338a0276377..648643a9f139 100644
--- a/include/asm-sparc/unistd_32.h
+++ b/include/asm-sparc/unistd_32.h
@@ -332,8 +332,14 @@
332#define __NR_fallocate 314 332#define __NR_fallocate 314
333#define __NR_timerfd_settime 315 333#define __NR_timerfd_settime 315
334#define __NR_timerfd_gettime 316 334#define __NR_timerfd_gettime 316
335#define __NR_signalfd4 317
336#define __NR_eventfd2 318
337#define __NR_epoll_create1 319
338#define __NR_dup3 320
339#define __NR_pipe2 321
340#define __NR_inotify_init1 322
335 341
336#define NR_SYSCALLS 317 342#define NR_SYSCALLS 323
337 343
338/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, 344/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
339 * it never had the plain ones and there is no value to adding those 345 * it never had the plain ones and there is no value to adding those
diff --git a/include/asm-sparc/unistd_64.h b/include/asm-sparc/unistd_64.h
index 13be4453a1f0..c5cc0e052321 100644
--- a/include/asm-sparc/unistd_64.h
+++ b/include/asm-sparc/unistd_64.h
@@ -334,8 +334,14 @@
334#define __NR_fallocate 314 334#define __NR_fallocate 314
335#define __NR_timerfd_settime 315 335#define __NR_timerfd_settime 315
336#define __NR_timerfd_gettime 316 336#define __NR_timerfd_gettime 316
337#define __NR_signalfd4 317
338#define __NR_eventfd2 318
339#define __NR_epoll_create1 319
340#define __NR_dup3 320
341#define __NR_pipe2 321
342#define __NR_inotify_init1 322
337 343
338#define NR_SYSCALLS 317 344#define NR_SYSCALLS 323
339 345
340#ifdef __KERNEL__ 346#ifdef __KERNEL__
341#define __ARCH_WANT_IPC_PARSE_VERSION 347#define __ARCH_WANT_IPC_PARSE_VERSION
diff --git a/include/asm-sparc64/kvm.h b/include/asm-sparc64/kvm.h
deleted file mode 100644
index 53564ad86b15..000000000000
--- a/include/asm-sparc64/kvm.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-sparc/kvm.h>
diff --git a/include/asm-sparc64/semaphore.h b/include/asm-sparc64/semaphore.h
deleted file mode 100644
index 39362afde5fe..000000000000
--- a/include/asm-sparc64/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-sparc/semaphore.h>
diff --git a/include/asm-um/kvm.h b/include/asm-um/kvm.h
deleted file mode 100644
index 66aa77094551..000000000000
--- a/include/asm-um/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_UM_H
2#define __LINUX_KVM_UM_H
3
4/* um does not support KVM */
5
6#endif
diff --git a/include/asm-um/page.h b/include/asm-um/page.h
index 916e1a61999f..a6df1f13d732 100644
--- a/include/asm-um/page.h
+++ b/include/asm-um/page.h
@@ -92,9 +92,6 @@ typedef struct page *pgtable_t;
92#define __pgd(x) ((pgd_t) { (x) } ) 92#define __pgd(x) ((pgd_t) { (x) } )
93#define __pgprot(x) ((pgprot_t) { (x) } ) 93#define __pgprot(x) ((pgprot_t) { (x) } )
94 94
95/* to align the pointer to the (next) page boundary */
96#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
97
98extern unsigned long uml_physmem; 95extern unsigned long uml_physmem;
99 96
100#define PAGE_OFFSET (uml_physmem) 97#define PAGE_OFFSET (uml_physmem)
@@ -118,9 +115,6 @@ extern unsigned long uml_physmem;
118#define pfn_valid(pfn) ((pfn) < max_mapnr) 115#define pfn_valid(pfn) ((pfn) < max_mapnr)
119#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v))) 116#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
120 117
121extern struct page *arch_validate(struct page *page, gfp_t mask, int order);
122#define HAVE_ARCH_VALIDATE
123
124#include <asm-generic/memory_model.h> 118#include <asm-generic/memory_model.h>
125#include <asm-generic/page.h> 119#include <asm-generic/page.h>
126 120
diff --git a/include/asm-um/ptrace-generic.h b/include/asm-um/ptrace-generic.h
index 6aefcd32fc61..315749705ea1 100644
--- a/include/asm-um/ptrace-generic.h
+++ b/include/asm-um/ptrace-generic.h
@@ -47,9 +47,6 @@ extern int set_fpregs(struct user_i387_struct __user *buf,
47 47
48extern void show_regs(struct pt_regs *regs); 48extern void show_regs(struct pt_regs *regs);
49 49
50extern void send_sigtrap(struct task_struct *tsk, struct uml_pt_regs *regs,
51 int error_code);
52
53extern int arch_copy_tls(struct task_struct *new); 50extern int arch_copy_tls(struct task_struct *new);
54extern void clear_flushed_tls(struct task_struct *task); 51extern void clear_flushed_tls(struct task_struct *task);
55 52
diff --git a/include/asm-um/semaphore.h b/include/asm-um/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-um/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-um/thread_info.h b/include/asm-um/thread_info.h
index 356b83e2c22e..e07e72846c7a 100644
--- a/include/asm-um/thread_info.h
+++ b/include/asm-um/thread_info.h
@@ -53,21 +53,7 @@ static inline struct thread_info *current_thread_info(void)
53 return ti; 53 return ti;
54} 54}
55 55
56#ifdef CONFIG_DEBUG_STACK_USAGE 56#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
57
58#define alloc_thread_info(tsk) \
59 ((struct thread_info *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, \
60 CONFIG_KERNEL_STACK_ORDER))
61#else
62
63/* thread information allocation */
64#define alloc_thread_info(tsk) \
65 ((struct thread_info *) __get_free_pages(GFP_KERNEL, \
66 CONFIG_KERNEL_STACK_ORDER))
67#endif
68
69#define free_thread_info(ti) \
70 free_pages((unsigned long)(ti),CONFIG_KERNEL_STACK_ORDER)
71 57
72#endif 58#endif
73 59
diff --git a/include/asm-v850/Kbuild b/include/asm-v850/Kbuild
deleted file mode 100644
index c68e1680da01..000000000000
--- a/include/asm-v850/Kbuild
+++ /dev/null
@@ -1 +0,0 @@
1include include/asm-generic/Kbuild.asm
diff --git a/include/asm-v850/a.out.h b/include/asm-v850/a.out.h
deleted file mode 100644
index e9439a0708f6..000000000000
--- a/include/asm-v850/a.out.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#ifndef __V850_A_OUT_H__
2#define __V850_A_OUT_H__
3
4struct exec
5{
6 unsigned long a_info; /* Use macros N_MAGIC, etc for access */
7 unsigned a_text; /* length of text, in bytes */
8 unsigned a_data; /* length of data, in bytes */
9 unsigned a_bss; /* length of uninitialized data area for file, in bytes */
10 unsigned a_syms; /* length of symbol table data in file, in bytes */
11 unsigned a_entry; /* start address */
12 unsigned a_trsize; /* length of relocation info for text, in bytes */
13 unsigned a_drsize; /* length of relocation info for data, in bytes */
14};
15
16#define N_TRSIZE(a) ((a).a_trsize)
17#define N_DRSIZE(a) ((a).a_drsize)
18#define N_SYMSIZE(a) ((a).a_syms)
19
20
21#endif /* __V850_A_OUT_H__ */
diff --git a/include/asm-v850/anna.h b/include/asm-v850/anna.h
deleted file mode 100644
index cd5eaee103b0..000000000000
--- a/include/asm-v850/anna.h
+++ /dev/null
@@ -1,137 +0,0 @@
1/*
2 * include/asm-v850/anna.h -- Anna V850E2 evaluation cpu chip/board
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_ANNA_H__
15#define __V850_ANNA_H__
16
17#include <asm/v850e2.h> /* Based on V850E2 core. */
18
19
20#define CPU_MODEL "v850e2/anna"
21#define CPU_MODEL_LONG "NEC V850E2/Anna"
22#define PLATFORM "anna"
23#define PLATFORM_LONG "NEC/Midas lab V850E2/Anna evaluation board"
24
25#define CPU_CLOCK_FREQ 200000000 /* 200MHz */
26#define SYS_CLOCK_FREQ 33300000 /* 33.3MHz */
27
28
29/* 1MB of static RAM. This memory is mirrored 64 times. */
30#define SRAM_ADDR 0x04000000
31#define SRAM_SIZE 0x00100000 /* 1MB */
32/* 64MB of DRAM. */
33#define SDRAM_ADDR 0x08000000
34#define SDRAM_SIZE 0x04000000 /* 64MB */
35
36
37/* For <asm/page.h> */
38#define PAGE_OFFSET SRAM_ADDR
39
40/* We use on-chip RAM, for a few miscellaneous variables that must be
41 accessible using a load instruction relative to R0. The Anna chip has
42 128K of `dLB' ram nominally located at 0xFFF00000, but it's mirrored
43 every 128K, so we can use the `last mirror' (except for the portion at
44 the top which is overridden by I/O space). In addition, the early
45 sample chip we're using has lots of memory errors in the dLB ram, so we
46 use a specially chosen location that has at least 20 bytes of contiguous
47 valid memory (xxxF0020 - xxxF003F). */
48#define R0_RAM_ADDR 0xFFFF8020
49
50
51/* Anna specific control registers. */
52#define ANNA_ILBEN_ADDR 0xFFFFF7F2
53#define ANNA_ILBEN (*(volatile u16 *)ANNA_ILBEN_ADDR)
54
55
56/* I/O port P0-P3. */
57/* Direct I/O. Bits 0-7 are pins Pn0-Pn7. */
58#define ANNA_PORT_IO_ADDR(n) (0xFFFFF400 + (n) * 2)
59#define ANNA_PORT_IO(n) (*(volatile u8 *)ANNA_PORT_IO_ADDR(n))
60/* Port mode (for direct I/O, 0 = output, 1 = input). */
61#define ANNA_PORT_PM_ADDR(n) (0xFFFFF410 + (n) * 2)
62#define ANNA_PORT_PM(n) (*(volatile u8 *)ANNA_PORT_PM_ADDR(n))
63
64
65/* Hardware-specific interrupt numbers (in the kernel IRQ namespace). */
66#define IRQ_INTP(n) (n) /* Pnnn (pin) interrupts 0-15 */
67#define IRQ_INTP_NUM 16
68#define IRQ_INTOV(n) (0x10 + (n)) /* 0-2 */
69#define IRQ_INTOV_NUM 2
70#define IRQ_INTCCC(n) (0x12 + (n))
71#define IRQ_INTCCC_NUM 4
72#define IRQ_INTCMD(n) (0x16 + (n)) /* interval timer interrupts 0-5 */
73#define IRQ_INTCMD_NUM 6
74#define IRQ_INTDMA(n) (0x1C + (n)) /* DMA interrupts 0-3 */
75#define IRQ_INTDMA_NUM 4
76#define IRQ_INTDMXER 0x20
77#define IRQ_INTSRE(n) (0x21 + (n)*3) /* UART 0-1 reception error */
78#define IRQ_INTSRE_NUM 2
79#define IRQ_INTSR(n) (0x22 + (n)*3) /* UART 0-1 reception completion */
80#define IRQ_INTSR_NUM 2
81#define IRQ_INTST(n) (0x23 + (n)*3) /* UART 0-1 transmission completion */
82#define IRQ_INTST_NUM 2
83
84#define NUM_CPU_IRQS 64
85
86#ifndef __ASSEMBLY__
87/* Initialize chip interrupts. */
88extern void anna_init_irqs (void);
89#endif
90
91
92/* Anna UART details (basically the same as the V850E/MA1, but 2 channels). */
93#define V850E_UART_NUM_CHANNELS 2
94#define V850E_UART_BASE_FREQ (SYS_CLOCK_FREQ / 2)
95#define V850E_UART_CHIP_NAME "V850E2/NA85E2A"
96
97/* This is the UART channel that's actually connected on the board. */
98#define V850E_UART_CONSOLE_CHANNEL 1
99
100/* This is a function that gets called before configuring the UART. */
101#define V850E_UART_PRE_CONFIGURE anna_uart_pre_configure
102#ifndef __ASSEMBLY__
103extern void anna_uart_pre_configure (unsigned chan,
104 unsigned cflags, unsigned baud);
105#endif
106
107/* This board supports RTS/CTS for the on-chip UART, but only for channel 1. */
108
109/* CTS for UART channel 1 is pin P37 (bit 7 of port 3). */
110#define V850E_UART_CTS(chan) ((chan) == 1 ? !(ANNA_PORT_IO(3) & 0x80) : 1)
111/* RTS for UART channel 1 is pin P07 (bit 7 of port 0). */
112#define V850E_UART_SET_RTS(chan, val) \
113 do { \
114 if (chan == 1) { \
115 unsigned old = ANNA_PORT_IO(0); \
116 if (val) \
117 ANNA_PORT_IO(0) = old & ~0x80; \
118 else \
119 ANNA_PORT_IO(0) = old | 0x80; \
120 } \
121 } while (0)
122
123
124/* Timer C details. */
125#define V850E_TIMER_C_BASE_ADDR 0xFFFFF600
126
127/* Timer D details (the Anna actually has 5 of these; should change later). */
128#define V850E_TIMER_D_BASE_ADDR 0xFFFFF540
129#define V850E_TIMER_D_TMD_BASE_ADDR (V850E_TIMER_D_BASE_ADDR + 0x0)
130#define V850E_TIMER_D_CMD_BASE_ADDR (V850E_TIMER_D_BASE_ADDR + 0x2)
131#define V850E_TIMER_D_TMCD_BASE_ADDR (V850E_TIMER_D_BASE_ADDR + 0x4)
132
133#define V850E_TIMER_D_BASE_FREQ SYS_CLOCK_FREQ
134#define V850E_TIMER_D_TMCD_CS_MIN 1 /* min 2^1 divider */
135
136
137#endif /* __V850_ANNA_H__ */
diff --git a/include/asm-v850/as85ep1.h b/include/asm-v850/as85ep1.h
deleted file mode 100644
index 5a5ca9073d09..000000000000
--- a/include/asm-v850/as85ep1.h
+++ /dev/null
@@ -1,152 +0,0 @@
1/*
2 * include/asm-v850/as85ep1.h -- AS85EP1 evaluation CPU chip/board
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_AS85EP1_H__
15#define __V850_AS85EP1_H__
16
17#include <asm/v850e.h>
18
19
20#define CPU_MODEL "as85ep1"
21#define CPU_MODEL_LONG "NEC V850E/AS85EP1"
22#define PLATFORM "AS85EP1"
23#define PLATFORM_LONG "NEC V850E/AS85EP1 evaluation board"
24
25#define CPU_CLOCK_FREQ 96000000 /* 96MHz */
26#define SYS_CLOCK_FREQ CPU_CLOCK_FREQ
27
28
29/* 1MB of static RAM. */
30#define SRAM_ADDR 0x00400000
31#define SRAM_SIZE 0x00100000 /* 1MB */
32/* About 58MB of DRAM. This can actually be at one of two positions,
33 determined by jump JP3; we have to use the first position because the
34 second is partially out of processor instruction addressing range
35 (though in the second position there's actually 64MB available). */
36#define SDRAM_ADDR 0x00600000
37#define SDRAM_SIZE 0x039F8000 /* approx 58MB */
38
39/* For <asm/page.h> */
40#define PAGE_OFFSET SRAM_ADDR
41
42/* We use on-chip RAM, for a few miscellaneous variables that must be
43 accessible using a load instruction relative to R0. The AS85EP1 chip
44 16K of internal RAM located slightly before I/O space. */
45#define R0_RAM_ADDR 0xFFFF8000
46
47
48/* AS85EP1 specific control registers. */
49#define AS85EP1_CSC_ADDR(n) (0xFFFFF060 + (n) * 2)
50#define AS85EP1_CSC(n) (*(volatile u16 *)AS85EP1_CSC_ADDR(n))
51#define AS85EP1_BSC_ADDR 0xFFFFF066
52#define AS85EP1_BSC (*(volatile u16 *)AS85EP1_BSC_ADDR)
53#define AS85EP1_BCT_ADDR(n) (0xFFFFF480 + (n) * 2)
54#define AS85EP1_BCT(n) (*(volatile u16 *)AS85EP1_BCT_ADDR(n))
55#define AS85EP1_DWC_ADDR(n) (0xFFFFF484 + (n) * 2)
56#define AS85EP1_DWC(n) (*(volatile u16 *)AS85EP1_DWC_ADDR(n))
57#define AS85EP1_BCC_ADDR 0xFFFFF488
58#define AS85EP1_BCC (*(volatile u16 *)AS85EP1_BCC_ADDR)
59#define AS85EP1_ASC_ADDR 0xFFFFF48A
60#define AS85EP1_ASC (*(volatile u16 *)AS85EP1_ASC_ADDR)
61#define AS85EP1_BCP_ADDR 0xFFFFF48C
62#define AS85EP1_BCP (*(volatile u16 *)AS85EP1_BCP_ADDR)
63#define AS85EP1_LBS_ADDR 0xFFFFF48E
64#define AS85EP1_LBS (*(volatile u16 *)AS85EP1_LBS_ADDR)
65#define AS85EP1_BMC_ADDR 0xFFFFF498
66#define AS85EP1_BMC (*(volatile u16 *)AS85EP1_BMC_ADDR)
67#define AS85EP1_PRC_ADDR 0xFFFFF49A
68#define AS85EP1_PRC (*(volatile u16 *)AS85EP1_PRC_ADDR)
69#define AS85EP1_SCR_ADDR(n) (0xFFFFF4A0 + (n) * 4)
70#define AS85EP1_SCR(n) (*(volatile u16 *)AS85EP1_SCR_ADDR(n))
71#define AS85EP1_RFS_ADDR(n) (0xFFFFF4A2 + (n) * 4)
72#define AS85EP1_RFS(n) (*(volatile u16 *)AS85EP1_RFS_ADDR(n))
73#define AS85EP1_IRAMM_ADDR 0xFFFFF80A
74#define AS85EP1_IRAMM (*(volatile u8 *)AS85EP1_IRAMM_ADDR)
75
76
77
78/* I/O port P0-P13. */
79/* Direct I/O. Bits 0-7 are pins Pn0-Pn7. */
80#define AS85EP1_PORT_IO_ADDR(n) (0xFFFFF400 + (n) * 2)
81#define AS85EP1_PORT_IO(n) (*(volatile u8 *)AS85EP1_PORT_IO_ADDR(n))
82/* Port mode (for direct I/O, 0 = output, 1 = input). */
83#define AS85EP1_PORT_PM_ADDR(n) (0xFFFFF420 + (n) * 2)
84#define AS85EP1_PORT_PM(n) (*(volatile u8 *)AS85EP1_PORT_PM_ADDR(n))
85/* Port mode control (0 = direct I/O mode, 1 = alternative I/O mode). */
86#define AS85EP1_PORT_PMC_ADDR(n) (0xFFFFF440 + (n) * 2)
87#define AS85EP1_PORT_PMC(n) (*(volatile u8 *)AS85EP1_PORT_PMC_ADDR(n))
88
89
90/* Hardware-specific interrupt numbers (in the kernel IRQ namespace). */
91#define IRQ_INTCCC(n) (0x0C + (n))
92#define IRQ_INTCCC_NUM 8
93#define IRQ_INTCMD(n) (0x14 + (n)) /* interval timer interrupts 0-5 */
94#define IRQ_INTCMD_NUM 6
95#define IRQ_INTSRE(n) (0x1E + (n)*3) /* UART 0-1 reception error */
96#define IRQ_INTSRE_NUM 2
97#define IRQ_INTSR(n) (0x1F + (n)*3) /* UART 0-1 reception completion */
98#define IRQ_INTSR_NUM 2
99#define IRQ_INTST(n) (0x20 + (n)*3) /* UART 0-1 transmission completion */
100#define IRQ_INTST_NUM 2
101
102#define NUM_CPU_IRQS 64
103
104#ifndef __ASSEMBLY__
105/* Initialize chip interrupts. */
106extern void as85ep1_init_irqs (void);
107#endif
108
109
110/* AS85EP1 UART details (basically the same as the V850E/MA1, but 2 channels). */
111#define V850E_UART_NUM_CHANNELS 2
112#define V850E_UART_BASE_FREQ (SYS_CLOCK_FREQ / 4)
113#define V850E_UART_CHIP_NAME "V850E/NA85E"
114
115/* This is a function that gets called before configuring the UART. */
116#define V850E_UART_PRE_CONFIGURE as85ep1_uart_pre_configure
117#ifndef __ASSEMBLY__
118extern void as85ep1_uart_pre_configure (unsigned chan,
119 unsigned cflags, unsigned baud);
120#endif
121
122/* This board supports RTS/CTS for the on-chip UART, but only for channel 1. */
123
124/* CTS for UART channel 1 is pin P54 (bit 4 of port 5). */
125#define V850E_UART_CTS(chan) ((chan) == 1 ? !(AS85EP1_PORT_IO(5) & 0x10) : 1)
126/* RTS for UART channel 1 is pin P53 (bit 3 of port 5). */
127#define V850E_UART_SET_RTS(chan, val) \
128 do { \
129 if (chan == 1) { \
130 unsigned old = AS85EP1_PORT_IO(5); \
131 if (val) \
132 AS85EP1_PORT_IO(5) = old & ~0x8; \
133 else \
134 AS85EP1_PORT_IO(5) = old | 0x8; \
135 } \
136 } while (0)
137
138
139/* Timer C details. */
140#define V850E_TIMER_C_BASE_ADDR 0xFFFFF600
141
142/* Timer D details (the AS85EP1 actually has 5 of these; should change later). */
143#define V850E_TIMER_D_BASE_ADDR 0xFFFFF540
144#define V850E_TIMER_D_TMD_BASE_ADDR (V850E_TIMER_D_BASE_ADDR + 0x0)
145#define V850E_TIMER_D_CMD_BASE_ADDR (V850E_TIMER_D_BASE_ADDR + 0x2)
146#define V850E_TIMER_D_TMCD_BASE_ADDR (V850E_TIMER_D_BASE_ADDR + 0x4)
147
148#define V850E_TIMER_D_BASE_FREQ SYS_CLOCK_FREQ
149#define V850E_TIMER_D_TMCD_CS_MIN 2 /* min 2^2 divider */
150
151
152#endif /* __V850_AS85EP1_H__ */
diff --git a/include/asm-v850/asm.h b/include/asm-v850/asm.h
deleted file mode 100644
index bf1e785a5dde..000000000000
--- a/include/asm-v850/asm.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * include/asm-v850/asm.h -- Macros for writing assembly code
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#define G_ENTRY(name) \
15 .balign 4; \
16 .globl name; \
17 .type name,@function; \
18 name
19#define G_DATA(name) \
20 .globl name; \
21 .type name,@object; \
22 name
23#define END(name) \
24 .size name,.-name
25
26#define L_ENTRY(name) \
27 .balign 4; \
28 .type name,@function; \
29 name
30#define L_DATA(name) \
31 .type name,@object; \
32 name
diff --git a/include/asm-v850/atomic.h b/include/asm-v850/atomic.h
deleted file mode 100644
index e4e57de08f73..000000000000
--- a/include/asm-v850/atomic.h
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * include/asm-v850/atomic.h -- Atomic operations
3 *
4 * Copyright (C) 2001,02 NEC Corporation
5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_ATOMIC_H__
15#define __V850_ATOMIC_H__
16
17
18#include <asm/system.h>
19
20#ifdef CONFIG_SMP
21#error SMP not supported
22#endif
23
24typedef struct { int counter; } atomic_t;
25
26#define ATOMIC_INIT(i) { (i) }
27
28#ifdef __KERNEL__
29
30#define atomic_read(v) ((v)->counter)
31#define atomic_set(v,i) (((v)->counter) = (i))
32
33static inline int atomic_add_return (int i, volatile atomic_t *v)
34{
35 unsigned long flags;
36 int res;
37
38 local_irq_save (flags);
39 res = v->counter + i;
40 v->counter = res;
41 local_irq_restore (flags);
42
43 return res;
44}
45
46static __inline__ int atomic_sub_return (int i, volatile atomic_t *v)
47{
48 unsigned long flags;
49 int res;
50
51 local_irq_save (flags);
52 res = v->counter - i;
53 v->counter = res;
54 local_irq_restore (flags);
55
56 return res;
57}
58
59static __inline__ void atomic_clear_mask (unsigned long mask, unsigned long *addr)
60{
61 unsigned long flags;
62
63 local_irq_save (flags);
64 *addr &= ~mask;
65 local_irq_restore (flags);
66}
67
68#endif
69
70#define atomic_add(i, v) atomic_add_return ((i), (v))
71#define atomic_sub(i, v) atomic_sub_return ((i), (v))
72
73#define atomic_dec_return(v) atomic_sub_return (1, (v))
74#define atomic_inc_return(v) atomic_add_return (1, (v))
75#define atomic_inc(v) atomic_inc_return (v)
76#define atomic_dec(v) atomic_dec_return (v)
77
78/*
79 * atomic_inc_and_test - increment and test
80 * @v: pointer of type atomic_t
81 *
82 * Atomically increments @v by 1
83 * and returns true if the result is zero, or false for all
84 * other cases.
85 */
86#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
87
88#define atomic_sub_and_test(i,v) (atomic_sub_return ((i), (v)) == 0)
89#define atomic_dec_and_test(v) (atomic_sub_return (1, (v)) == 0)
90#define atomic_add_negative(i,v) (atomic_add_return ((i), (v)) < 0)
91
92static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
93{
94 int ret;
95 unsigned long flags;
96
97 local_irq_save(flags);
98 ret = v->counter;
99 if (likely(ret == old))
100 v->counter = new;
101 local_irq_restore(flags);
102
103 return ret;
104}
105
106#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
107
108static inline int atomic_add_unless(atomic_t *v, int a, int u)
109{
110 int ret;
111 unsigned long flags;
112
113 local_irq_save(flags);
114 ret = v->counter;
115 if (ret != u)
116 v->counter += a;
117 local_irq_restore(flags);
118
119 return ret != u;
120}
121
122#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
123
124/* Atomic operations are already serializing on ARM */
125#define smp_mb__before_atomic_dec() barrier()
126#define smp_mb__after_atomic_dec() barrier()
127#define smp_mb__before_atomic_inc() barrier()
128#define smp_mb__after_atomic_inc() barrier()
129
130#include <asm-generic/atomic.h>
131#endif /* __V850_ATOMIC_H__ */
diff --git a/include/asm-v850/auxvec.h b/include/asm-v850/auxvec.h
deleted file mode 100644
index f493232d0224..000000000000
--- a/include/asm-v850/auxvec.h
+++ /dev/null
@@ -1,4 +0,0 @@
1#ifndef __V850_AUXVEC_H__
2#define __V850_AUXVEC_H__
3
4#endif /* __V850_AUXVEC_H__ */
diff --git a/include/asm-v850/bitops.h b/include/asm-v850/bitops.h
deleted file mode 100644
index f82f5b4a56e0..000000000000
--- a/include/asm-v850/bitops.h
+++ /dev/null
@@ -1,161 +0,0 @@
1/*
2 * include/asm-v850/bitops.h -- Bit operations
3 *
4 * Copyright (C) 2001,02,03,04,05 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03,04,05 Miles Bader <miles@gnu.org>
6 * Copyright (C) 1992 Linus Torvalds.
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 */
12
13#ifndef __V850_BITOPS_H__
14#define __V850_BITOPS_H__
15
16#ifndef _LINUX_BITOPS_H
17#error only <linux/bitops.h> can be included directly
18#endif
19
20#include <linux/compiler.h> /* unlikely */
21#include <asm/byteorder.h> /* swab32 */
22#include <asm/system.h> /* interrupt enable/disable */
23
24
25#ifdef __KERNEL__
26
27#include <asm-generic/bitops/ffz.h>
28
29/*
30 * The __ functions are not atomic
31 */
32
33/* In the following constant-bit-op macros, a "g" constraint is used when
34 we really need an integer ("i" constraint). This is to avoid
35 warnings/errors from the compiler in the case where the associated
36 operand _isn't_ an integer, and shouldn't produce bogus assembly because
37 use of that form is protected by a guard statement that checks for
38 constants, and should otherwise be removed by the optimizer. This
39 _usually_ works -- however, __builtin_constant_p returns true for a
40 variable with a known constant value too, and unfortunately gcc will
41 happily put the variable in a register and use the register for the "g"
42 constraint'd asm operand. To avoid the latter problem, we add a
43 constant offset to the operand and subtract it back in the asm code;
44 forcing gcc to do arithmetic on the value is usually enough to get it
45 to use a real constant value. This is horrible, and ultimately
46 unreliable too, but it seems to work for now (hopefully gcc will offer
47 us more control in the future, so we can do a better job). */
48
49#define __const_bit_op(op, nr, addr) \
50 ({ __asm__ (op " (%0 - 0x123), %1" \
51 :: "g" (((nr) & 0x7) + 0x123), \
52 "m" (*((char *)(addr) + ((nr) >> 3))) \
53 : "memory"); })
54#define __var_bit_op(op, nr, addr) \
55 ({ int __nr = (nr); \
56 __asm__ (op " %0, [%1]" \
57 :: "r" (__nr & 0x7), \
58 "r" ((char *)(addr) + (__nr >> 3)) \
59 : "memory"); })
60#define __bit_op(op, nr, addr) \
61 ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF) \
62 ? __const_bit_op (op, nr, addr) \
63 : __var_bit_op (op, nr, addr))
64
65#define __set_bit(nr, addr) __bit_op ("set1", nr, addr)
66#define __clear_bit(nr, addr) __bit_op ("clr1", nr, addr)
67#define __change_bit(nr, addr) __bit_op ("not1", nr, addr)
68
69/* The bit instructions used by `non-atomic' variants are actually atomic. */
70#define set_bit __set_bit
71#define clear_bit __clear_bit
72#define change_bit __change_bit
73
74
75#define __const_tns_bit_op(op, nr, addr) \
76 ({ int __tns_res; \
77 __asm__ __volatile__ ( \
78 "tst1 (%1 - 0x123), %2; setf nz, %0; " op " (%1 - 0x123), %2" \
79 : "=&r" (__tns_res) \
80 : "g" (((nr) & 0x7) + 0x123), \
81 "m" (*((char *)(addr) + ((nr) >> 3))) \
82 : "memory"); \
83 __tns_res; \
84 })
85#define __var_tns_bit_op(op, nr, addr) \
86 ({ int __nr = (nr); \
87 int __tns_res; \
88 __asm__ __volatile__ ( \
89 "tst1 %1, [%2]; setf nz, %0; " op " %1, [%2]" \
90 : "=&r" (__tns_res) \
91 : "r" (__nr & 0x7), \
92 "r" ((char *)(addr) + (__nr >> 3)) \
93 : "memory"); \
94 __tns_res; \
95 })
96#define __tns_bit_op(op, nr, addr) \
97 ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF) \
98 ? __const_tns_bit_op (op, nr, addr) \
99 : __var_tns_bit_op (op, nr, addr))
100#define __tns_atomic_bit_op(op, nr, addr) \
101 ({ int __tns_atomic_res, __tns_atomic_flags; \
102 local_irq_save (__tns_atomic_flags); \
103 __tns_atomic_res = __tns_bit_op (op, nr, addr); \
104 local_irq_restore (__tns_atomic_flags); \
105 __tns_atomic_res; \
106 })
107
108#define __test_and_set_bit(nr, addr) __tns_bit_op ("set1", nr, addr)
109#define test_and_set_bit(nr, addr) __tns_atomic_bit_op ("set1", nr, addr)
110
111#define __test_and_clear_bit(nr, addr) __tns_bit_op ("clr1", nr, addr)
112#define test_and_clear_bit(nr, addr) __tns_atomic_bit_op ("clr1", nr, addr)
113
114#define __test_and_change_bit(nr, addr) __tns_bit_op ("not1", nr, addr)
115#define test_and_change_bit(nr, addr) __tns_atomic_bit_op ("not1", nr, addr)
116
117
118#define __const_test_bit(nr, addr) \
119 ({ int __test_bit_res; \
120 __asm__ __volatile__ ("tst1 (%1 - 0x123), %2; setf nz, %0" \
121 : "=r" (__test_bit_res) \
122 : "g" (((nr) & 0x7) + 0x123), \
123 "m" (*((const char *)(addr) + ((nr) >> 3)))); \
124 __test_bit_res; \
125 })
126static inline int __test_bit (int nr, const void *addr)
127{
128 int res;
129 __asm__ __volatile__ ("tst1 %1, [%2]; setf nz, %0"
130 : "=r" (res)
131 : "r" (nr & 0x7), "r" (addr + (nr >> 3)));
132 return res;
133}
134#define test_bit(nr,addr) \
135 ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF) \
136 ? __const_test_bit ((nr), (addr)) \
137 : __test_bit ((nr), (addr)))
138
139
140/* clear_bit doesn't provide any barrier for the compiler. */
141#define smp_mb__before_clear_bit() barrier ()
142#define smp_mb__after_clear_bit() barrier ()
143
144#include <asm-generic/bitops/ffs.h>
145#include <asm-generic/bitops/fls.h>
146#include <asm-generic/bitops/fls64.h>
147#include <asm-generic/bitops/__ffs.h>
148#include <asm-generic/bitops/find.h>
149#include <asm-generic/bitops/sched.h>
150#include <asm-generic/bitops/hweight.h>
151#include <asm-generic/bitops/lock.h>
152
153#include <asm-generic/bitops/ext2-non-atomic.h>
154#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
155#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
156
157#include <asm-generic/bitops/minix.h>
158
159#endif /* __KERNEL__ */
160
161#endif /* __V850_BITOPS_H__ */
diff --git a/include/asm-v850/bug.h b/include/asm-v850/bug.h
deleted file mode 100644
index b0ed2d35f3e8..000000000000
--- a/include/asm-v850/bug.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * include/asm-v850/bug.h -- Bug reporting
3 *
4 * Copyright (C) 2003 NEC Electronics Corporation
5 * Copyright (C) 2003 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_BUG_H__
15#define __V850_BUG_H__
16
17#ifdef CONFIG_BUG
18extern void __bug (void) __attribute__ ((noreturn));
19#define BUG() __bug()
20#define HAVE_ARCH_BUG
21#endif
22
23#include <asm-generic/bug.h>
24
25#endif /* __V850_BUG_H__ */
diff --git a/include/asm-v850/bugs.h b/include/asm-v850/bugs.h
deleted file mode 100644
index 71110a65c1d7..000000000000
--- a/include/asm-v850/bugs.h
+++ /dev/null
@@ -1,16 +0,0 @@
1/*
2 * include/asm-v850e/bugs.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 */
6
7/*
8 * This is included by init/main.c to check for architecture-dependent bugs.
9 *
10 * Needs:
11 * void check_bugs(void);
12 */
13
14static void check_bugs(void)
15{
16}
diff --git a/include/asm-v850/byteorder.h b/include/asm-v850/byteorder.h
deleted file mode 100644
index a6f07530050e..000000000000
--- a/include/asm-v850/byteorder.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * include/asm-v850/byteorder.h -- Endian id and conversion ops
3 *
4 * Copyright (C) 2001 NEC Corporation
5 * Copyright (C) 2001 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_BYTEORDER_H__
15#define __V850_BYTEORDER_H__
16
17#include <asm/types.h>
18#include <linux/compiler.h>
19
20#ifdef __GNUC__
21
22static __inline__ __attribute_const__ __u32 ___arch__swab32 (__u32 word)
23{
24 __u32 res;
25 __asm__ ("bsw %1, %0" : "=r" (res) : "r" (word));
26 return res;
27}
28
29static __inline__ __attribute_const__ __u16 ___arch__swab16 (__u16 half_word)
30{
31 __u16 res;
32 __asm__ ("bsh %1, %0" : "=r" (res) : "r" (half_word));
33 return res;
34}
35
36#define __arch__swab32(x) ___arch__swab32(x)
37#define __arch__swab16(x) ___arch__swab16(x)
38
39#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
40# define __BYTEORDER_HAS_U64__
41# define __SWAB_64_THRU_32__
42#endif
43
44#endif /* __GNUC__ */
45
46#include <linux/byteorder/little_endian.h>
47
48#endif /* __V850_BYTEORDER_H__ */
diff --git a/include/asm-v850/cache.h b/include/asm-v850/cache.h
deleted file mode 100644
index 8832c7ea3242..000000000000
--- a/include/asm-v850/cache.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * include/asm-v850/cache.h -- Cache operations
3 *
4 * Copyright (C) 2001,05 NEC Corporation
5 * Copyright (C) 2001,05 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_CACHE_H__
15#define __V850_CACHE_H__
16
17/* All cache operations are machine-dependent. */
18#include <asm/machdep.h>
19
20#ifndef L1_CACHE_BYTES
21/* This processor has no cache, so just choose an arbitrary value. */
22#define L1_CACHE_BYTES 16
23#define L1_CACHE_SHIFT 4
24#endif
25
26#endif /* __V850_CACHE_H__ */
diff --git a/include/asm-v850/cacheflush.h b/include/asm-v850/cacheflush.h
deleted file mode 100644
index 9ece05a202ef..000000000000
--- a/include/asm-v850/cacheflush.h
+++ /dev/null
@@ -1,70 +0,0 @@
1/*
2 * include/asm-v850/cacheflush.h
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_CACHEFLUSH_H__
15#define __V850_CACHEFLUSH_H__
16
17/* Somebody depends on this; sigh... */
18#include <linux/mm.h>
19
20#include <asm/machdep.h>
21
22
23/* The following are all used by the kernel in ways that only affect
24 systems with MMUs, so we don't need them. */
25#define flush_cache_all() ((void)0)
26#define flush_cache_mm(mm) ((void)0)
27#define flush_cache_dup_mm(mm) ((void)0)
28#define flush_cache_range(vma, start, end) ((void)0)
29#define flush_cache_page(vma, vmaddr, pfn) ((void)0)
30#define flush_dcache_page(page) ((void)0)
31#define flush_dcache_mmap_lock(mapping) ((void)0)
32#define flush_dcache_mmap_unlock(mapping) ((void)0)
33#define flush_cache_vmap(start, end) ((void)0)
34#define flush_cache_vunmap(start, end) ((void)0)
35
36#ifdef CONFIG_NO_CACHE
37
38/* Some systems have no cache at all, in which case we don't need these
39 either. */
40#define flush_icache() ((void)0)
41#define flush_icache_range(start, end) ((void)0)
42#define flush_icache_page(vma,pg) ((void)0)
43#define flush_icache_user_range(vma,pg,adr,len) ((void)0)
44#define flush_cache_sigtramp(vaddr) ((void)0)
45
46#else /* !CONFIG_NO_CACHE */
47
48struct page;
49struct mm_struct;
50struct vm_area_struct;
51
52/* Otherwise, somebody had better define them. */
53extern void flush_icache (void);
54extern void flush_icache_range (unsigned long start, unsigned long end);
55extern void flush_icache_page (struct vm_area_struct *vma, struct page *page);
56extern void flush_icache_user_range (struct vm_area_struct *vma,
57 struct page *page,
58 unsigned long adr, int len);
59extern void flush_cache_sigtramp (unsigned long addr);
60
61#endif /* CONFIG_NO_CACHE */
62
63#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
64do { memcpy(dst, src, len); \
65 flush_icache_user_range(vma, page, vaddr, len); \
66} while (0)
67#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
68 memcpy(dst, src, len)
69
70#endif /* __V850_CACHEFLUSH_H__ */
diff --git a/include/asm-v850/checksum.h b/include/asm-v850/checksum.h
deleted file mode 100644
index d1dddd938262..000000000000
--- a/include/asm-v850/checksum.h
+++ /dev/null
@@ -1,112 +0,0 @@
1/*
2 * include/asm-v850/checksum.h -- Checksum ops
3 *
4 * Copyright (C) 2001,2005 NEC Corporation
5 * Copyright (C) 2001,2005 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_CHECKSUM_H__
15#define __V850_CHECKSUM_H__
16
17/*
18 * computes the checksum of a memory block at buff, length len,
19 * and adds in "sum" (32-bit)
20 *
21 * returns a 32-bit number suitable for feeding into itself
22 * or csum_tcpudp_magic
23 *
24 * this function must be called with even lengths, except
25 * for the last fragment, which may be odd
26 *
27 * it's best to have buff aligned on a 32-bit boundary
28 */
29extern __wsum csum_partial(const void *buff, int len, __wsum sum);
30
31/*
32 * the same as csum_partial, but copies from src while it
33 * checksums
34 *
35 * here even more important to align src and dst on a 32-bit (or even
36 * better 64-bit) boundary
37 */
38extern __wsum csum_partial_copy_nocheck(const void *src,
39 void *dst, int len, __wsum sum);
40
41
42/*
43 * the same as csum_partial_copy, but copies from user space.
44 *
45 * here even more important to align src and dst on a 32-bit (or even
46 * better 64-bit) boundary
47 */
48extern __wsum csum_partial_copy_from_user (const void *src,
49 void *dst,
50 int len, __wsum sum,
51 int *csum_err);
52
53__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
54
55/*
56 * Fold a partial checksum
57 */
58static inline __sum16 csum_fold (__wsum sum)
59{
60 unsigned int result;
61 /*
62 %0 %1
63 hsw %1, %0 H L L H
64 add %1, %0 H L H+L+C H+L
65 */
66 asm ("hsw %1, %0; add %1, %0" : "=&r" (result) : "r" (sum));
67 return (__force __sum16)(~result >> 16);
68}
69
70
71/*
72 * computes the checksum of the TCP/UDP pseudo-header
73 * returns a 16-bit checksum, already complemented
74 */
75static inline __wsum
76csum_tcpudp_nofold (__be32 saddr, __be32 daddr,
77 unsigned short len,
78 unsigned short proto, __wsum sum)
79{
80 int __carry;
81 __asm__ ("add %2, %0;"
82 "setf c, %1;"
83 "add %1, %0;"
84 "add %3, %0;"
85 "setf c, %1;"
86 "add %1, %0;"
87 "add %4, %0;"
88 "setf c, %1;"
89 "add %1, %0"
90 : "=&r" (sum), "=&r" (__carry)
91 : "r" (daddr), "r" (saddr),
92 "r" ((len + proto) << 8),
93 "0" (sum));
94 return sum;
95}
96
97static inline __sum16
98csum_tcpudp_magic (__be32 saddr, __be32 daddr,
99 unsigned short len,
100 unsigned short proto, __wsum sum)
101{
102 return csum_fold (csum_tcpudp_nofold (saddr, daddr, len, proto, sum));
103}
104
105/*
106 * this routine is used for miscellaneous IP-like checksums, mainly
107 * in icmp.c
108 */
109extern __sum16 ip_compute_csum(const void *buff, int len);
110
111
112#endif /* __V850_CHECKSUM_H__ */
diff --git a/include/asm-v850/clinkage.h b/include/asm-v850/clinkage.h
deleted file mode 100644
index c389691d6f86..000000000000
--- a/include/asm-v850/clinkage.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * include/asm-v850/clinkage.h -- Macros to reflect C symbol-naming conventions
3 *
4 * Copyright (C) 2001,02 NEC Corporatione
5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_CLINKAGE_H__
15#define __V850_CLINKAGE_H__
16
17#include <asm/macrology.h>
18#include <asm/asm.h>
19
20#define C_SYMBOL_NAME(name) macrology_paste(_, name)
21#define C_SYMBOL_STRING(name) macrology_stringify(C_SYMBOL_NAME(name))
22#define C_ENTRY(name) G_ENTRY(C_SYMBOL_NAME(name))
23#define C_DATA(name) G_DATA(C_SYMBOL_NAME(name))
24#define C_END(name) END(C_SYMBOL_NAME(name))
25
26#endif /* __V850_CLINKAGE_H__ */
diff --git a/include/asm-v850/cputime.h b/include/asm-v850/cputime.h
deleted file mode 100644
index 7c799c33b8a9..000000000000
--- a/include/asm-v850/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __V850_CPUTIME_H
2#define __V850_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __V850_CPUTIME_H */
diff --git a/include/asm-v850/current.h b/include/asm-v850/current.h
deleted file mode 100644
index 30aae5673770..000000000000
--- a/include/asm-v850/current.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * include/asm-v850/current.h -- Current task
3 *
4 * Copyright (C) 2001,02 NEC Corporation
5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_CURRENT_H__
15#define __V850_CURRENT_H__
16
17#ifndef __ASSEMBLY__ /* <linux/thread_info.h> is not asm-safe. */
18#include <linux/thread_info.h>
19#endif
20
21#include <asm/macrology.h>
22
23
24/* Register used to hold the current task pointer while in the kernel.
25 Any `call clobbered' register without a special meaning should be OK,
26 but check asm/v850/kernel/entry.S to be sure. */
27#define CURRENT_TASK_REGNUM 16
28#define CURRENT_TASK macrology_paste (r, CURRENT_TASK_REGNUM)
29
30
31#ifdef __ASSEMBLY__
32
33/* Put a pointer to the current task structure into REG. */
34#define GET_CURRENT_TASK(reg) \
35 GET_CURRENT_THREAD(reg); \
36 ld.w TI_TASK[reg], reg
37
38#else /* !__ASSEMBLY__ */
39
40/* A pointer to the current task. */
41register struct task_struct *current \
42 __asm__ (macrology_stringify (CURRENT_TASK));
43
44#endif /* __ASSEMBLY__ */
45
46
47#endif /* _V850_CURRENT_H */
diff --git a/include/asm-v850/delay.h b/include/asm-v850/delay.h
deleted file mode 100644
index 6d028e6b2354..000000000000
--- a/include/asm-v850/delay.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * include/asm-v850/delay.h -- Delay routines, using a pre-computed
3 * "loops_per_second" value
4 *
5 * Copyright (C) 2001,03 NEC Corporation
6 * Copyright (C) 2001,03 Miles Bader <miles@gnu.org>
7 * Copyright (C) 1994 Hamish Macdonald
8 *
9 * This file is subject to the terms and conditions of the GNU General
10 * Public License. See the file COPYING in the main directory of this
11 * archive for more details.
12 */
13
14#ifndef __V850_DELAY_H__
15#define __V850_DELAY_H__
16
17#include <asm/param.h>
18
19static inline void __delay(unsigned long loops)
20{
21 if (loops)
22 __asm__ __volatile__ ("1: add -1, %0; bnz 1b"
23 : "=r" (loops) : "0" (loops));
24}
25
26/*
27 * Use only for very small delays ( < 1 msec). Should probably use a
28 * lookup table, really, as the multiplications take much too long with
29 * short delays. This is a "reasonable" implementation, though (and the
30 * first constant multiplications gets optimized away if the delay is
31 * a constant)
32 */
33
34extern unsigned long loops_per_jiffy;
35
36static inline void udelay(unsigned long usecs)
37{
38 register unsigned long full_loops, part_loops;
39
40 full_loops = ((usecs * HZ) / 1000000) * loops_per_jiffy;
41 usecs %= (1000000 / HZ);
42 part_loops = (usecs * HZ * loops_per_jiffy) / 1000000;
43
44 __delay(full_loops + part_loops);
45}
46
47#endif /* __V850_DELAY_H__ */
diff --git a/include/asm-v850/device.h b/include/asm-v850/device.h
deleted file mode 100644
index d8f9872b0e2d..000000000000
--- a/include/asm-v850/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6#include <asm-generic/device.h>
7
diff --git a/include/asm-v850/div64.h b/include/asm-v850/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/include/asm-v850/div64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/div64.h>
diff --git a/include/asm-v850/dma-mapping.h b/include/asm-v850/dma-mapping.h
deleted file mode 100644
index 1cc42c603a1b..000000000000
--- a/include/asm-v850/dma-mapping.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef __V850_DMA_MAPPING_H__
2#define __V850_DMA_MAPPING_H__
3
4
5#ifdef CONFIG_PCI
6#include <asm-generic/dma-mapping.h>
7#else
8#include <asm-generic/dma-mapping-broken.h>
9#endif
10
11#endif /* __V850_DMA_MAPPING_H__ */
diff --git a/include/asm-v850/dma.h b/include/asm-v850/dma.h
deleted file mode 100644
index 2369849e2d0a..000000000000
--- a/include/asm-v850/dma.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef __V850_DMA_H__
2#define __V850_DMA_H__
3
4/* What should this be? */
5#define MAX_DMA_ADDRESS 0xFFFFFFFF
6
7/* reserve a DMA channel */
8extern int request_dma (unsigned int dmanr, const char * device_id);
9/* release it again */
10extern void free_dma (unsigned int dmanr);
11
12#ifdef CONFIG_PCI
13extern int isa_dma_bridge_buggy;
14#else
15#define isa_dma_bridge_buggy (0)
16#endif
17
18#endif /* __V850_DMA_H__ */
diff --git a/include/asm-v850/elf.h b/include/asm-v850/elf.h
deleted file mode 100644
index 28f5b176ff1a..000000000000
--- a/include/asm-v850/elf.h
+++ /dev/null
@@ -1,99 +0,0 @@
1#ifndef __V850_ELF_H__
2#define __V850_ELF_H__
3
4/*
5 * ELF register definitions..
6 */
7
8#include <asm/ptrace.h>
9#include <asm/user.h>
10#include <asm/byteorder.h>
11
12typedef unsigned long elf_greg_t;
13
14#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
15typedef elf_greg_t elf_gregset_t[ELF_NGREG];
16
17typedef struct user_fpu_struct elf_fpregset_t;
18
19/*
20 * This is used to ensure we don't load something for the wrong architecture.
21 */
22#define elf_check_arch(x) \
23 ((x)->e_machine == EM_V850 || (x)->e_machine == EM_CYGNUS_V850)
24
25
26/* v850 relocation types. */
27#define R_V850_NONE 0
28#define R_V850_9_PCREL 1
29#define R_V850_22_PCREL 2
30#define R_V850_HI16_S 3
31#define R_V850_HI16 4
32#define R_V850_LO16 5
33#define R_V850_32 6
34#define R_V850_16 7
35#define R_V850_8 8
36#define R_V850_SDA_16_16_OFFSET 9 /* For ld.b, st.b, set1, clr1,
37 not1, tst1, movea, movhi */
38#define R_V850_SDA_15_16_OFFSET 10 /* For ld.w, ld.h, ld.hu, st.w, st.h */
39#define R_V850_ZDA_16_16_OFFSET 11 /* For ld.b, st.b, set1, clr1,
40 not1, tst1, movea, movhi */
41#define R_V850_ZDA_15_16_OFFSET 12 /* For ld.w, ld.h, ld.hu, st.w, st.h */
42#define R_V850_TDA_6_8_OFFSET 13 /* For sst.w, sld.w */
43#define R_V850_TDA_7_8_OFFSET 14 /* For sst.h, sld.h */
44#define R_V850_TDA_7_7_OFFSET 15 /* For sst.b, sld.b */
45#define R_V850_TDA_16_16_OFFSET 16 /* For set1, clr1, not1, tst1,
46 movea, movhi */
47#define R_V850_NUM 17
48
49
50/*
51 * These are used to set parameters in the core dumps.
52 */
53#define ELF_CLASS ELFCLASS32
54#ifdef __LITTLE_ENDIAN__
55#define ELF_DATA ELFDATA2LSB
56#else
57#define ELF_DATA ELFDATA2MSB
58#endif
59#define ELF_ARCH EM_V850
60
61#define USE_ELF_CORE_DUMP
62#define ELF_EXEC_PAGESIZE 4096
63
64
65#define ELF_CORE_COPY_REGS(_dest,_regs) \
66 memcpy((char *) &_dest, (char *) _regs, \
67 sizeof(struct pt_regs));
68
69/* This yields a mask that user programs can use to figure out what
70 instruction set this CPU supports. This could be done in user space,
71 but it's not easy, and we've already done it here. */
72
73#define ELF_HWCAP (0)
74
75/* This yields a string that ld.so will use to load implementation
76 specific libraries for optimization. This is more specific in
77 intent than poking at uname or /proc/cpuinfo.
78
79 For the moment, we have only optimizations for the Intel generations,
80 but that could change... */
81
82#define ELF_PLATFORM (NULL)
83
84#define ELF_PLAT_INIT(_r, load_addr) \
85 do { \
86 _r->gpr[0] = _r->gpr[1] = _r->gpr[2] = _r->gpr[3] = \
87 _r->gpr[4] = _r->gpr[5] = _r->gpr[6] = _r->gpr[7] = \
88 _r->gpr[8] = _r->gpr[9] = _r->gpr[10] = _r->gpr[11] = \
89 _r->gpr[12] = _r->gpr[13] = _r->gpr[14] = _r->gpr[15] = \
90 _r->gpr[16] = _r->gpr[17] = _r->gpr[18] = _r->gpr[19] = \
91 _r->gpr[20] = _r->gpr[21] = _r->gpr[22] = _r->gpr[23] = \
92 _r->gpr[24] = _r->gpr[25] = _r->gpr[26] = _r->gpr[27] = \
93 _r->gpr[28] = _r->gpr[29] = _r->gpr[30] = _r->gpr[31] = \
94 0; \
95 } while (0)
96
97#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
98
99#endif /* __V850_ELF_H__ */
diff --git a/include/asm-v850/emergency-restart.h b/include/asm-v850/emergency-restart.h
deleted file mode 100644
index 108d8c48e42e..000000000000
--- a/include/asm-v850/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_EMERGENCY_RESTART_H
2#define _ASM_EMERGENCY_RESTART_H
3
4#include <asm-generic/emergency-restart.h>
5
6#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/include/asm-v850/entry.h b/include/asm-v850/entry.h
deleted file mode 100644
index d9df8ac48584..000000000000
--- a/include/asm-v850/entry.h
+++ /dev/null
@@ -1,113 +0,0 @@
1/*
2 * include/asm-v850/entry.h -- Definitions used by low-level trap handlers
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_ENTRY_H__
15#define __V850_ENTRY_H__
16
17
18#include <asm/ptrace.h>
19#include <asm/machdep.h>
20
21
22/* These are special variables using by the kernel trap/interrupt code
23 to save registers in, at a time when there are no spare registers we
24 can use to do so, and we can't depend on the value of the stack
25 pointer. This means that they must be within a signed 16-bit
26 displacement of 0x00000000. */
27
28#define KERNEL_VAR_SPACE_ADDR R0_RAM_ADDR
29
30#ifdef __ASSEMBLY__
31#define KERNEL_VAR(addr) addr[r0]
32#else
33#define KERNEL_VAR(addr) (*(volatile unsigned long *)(addr))
34#endif
35
36/* Kernel stack pointer, 4 bytes. */
37#define KSP_ADDR (KERNEL_VAR_SPACE_ADDR + 0)
38#define KSP KERNEL_VAR (KSP_ADDR)
39/* 1 if in kernel-mode, 0 if in user mode, 1 byte. */
40#define KM_ADDR (KERNEL_VAR_SPACE_ADDR + 4)
41#define KM KERNEL_VAR (KM_ADDR)
42/* Temporary storage for interrupt handlers, 4 bytes. */
43#define INT_SCRATCH_ADDR (KERNEL_VAR_SPACE_ADDR + 8)
44#define INT_SCRATCH KERNEL_VAR (INT_SCRATCH_ADDR)
45/* Where the stack-pointer is saved when jumping to various sorts of
46 interrupt handlers. ENTRY_SP is used by everything except NMIs,
47 which have their own location. Higher-priority NMIs can clobber the
48 value written by a lower priority NMI, since they can't be disabled,
49 but that's OK, because only NMI0 (the lowest-priority one) is allowed
50 to return. */
51#define ENTRY_SP_ADDR (KERNEL_VAR_SPACE_ADDR + 12)
52#define ENTRY_SP KERNEL_VAR (ENTRY_SP_ADDR)
53#define NMI_ENTRY_SP_ADDR (KERNEL_VAR_SPACE_ADDR + 16)
54#define NMI_ENTRY_SP KERNEL_VAR (NMI_ENTRY_SP_ADDR)
55
56#ifdef CONFIG_RESET_GUARD
57/* Used to detect unexpected resets (since the v850 has no MMU, any call
58 through a null pointer will jump to the reset vector). We detect
59 such resets by checking for a magic value, RESET_GUARD_ACTIVE, in
60 this location. Properly resetting the machine stores zero there, so
61 it shouldn't trigger the guard; the power-on value is uncertain, but
62 it's unlikely to be RESET_GUARD_ACTIVE. */
63#define RESET_GUARD_ADDR (KERNEL_VAR_SPACE_ADDR + 28)
64#define RESET_GUARD KERNEL_VAR (RESET_GUARD_ADDR)
65#define RESET_GUARD_ACTIVE 0xFAB4BEEF
66#endif /* CONFIG_RESET_GUARD */
67
68#ifdef CONFIG_V850E_HIGHRES_TIMER
69#define HIGHRES_TIMER_SLOW_TICKS_ADDR (KERNEL_VAR_SPACE_ADDR + 32)
70#define HIGHRES_TIMER_SLOW_TICKS KERNEL_VAR (HIGHRES_TIMER_SLOW_TICKS_ADDR)
71#endif /* CONFIG_V850E_HIGHRES_TIMER */
72
73#ifndef __ASSEMBLY__
74
75#ifdef CONFIG_RESET_GUARD
76/* Turn off reset guard, so that resetting the machine works normally.
77 This should be called in the various machine_halt, etc., functions. */
78static inline void disable_reset_guard (void)
79{
80 RESET_GUARD = 0;
81}
82#endif /* CONFIG_RESET_GUARD */
83
84#endif /* !__ASSEMBLY__ */
85
86
87/* A `state save frame' is a struct pt_regs preceded by some extra space
88 suitable for a function call stack frame. */
89
90/* Amount of room on the stack reserved for arguments and to satisfy the
91 C calling conventions, in addition to the space used by the struct
92 pt_regs that actually holds saved values. */
93#define STATE_SAVE_ARG_SPACE (6*4) /* Up to six arguments. */
94
95
96#ifdef __ASSEMBLY__
97
98/* The size of a state save frame. */
99#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
100
101#else /* !__ASSEMBLY__ */
102
103/* The size of a state save frame. */
104#define STATE_SAVE_SIZE (sizeof (struct pt_regs) + STATE_SAVE_ARG_SPACE)
105
106#endif /* __ASSEMBLY__ */
107
108
109/* Offset of the struct pt_regs in a state save frame. */
110#define STATE_SAVE_PT_OFFSET STATE_SAVE_ARG_SPACE
111
112
113#endif /* __V850_ENTRY_H__ */
diff --git a/include/asm-v850/errno.h b/include/asm-v850/errno.h
deleted file mode 100644
index 31c91df01205..000000000000
--- a/include/asm-v850/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __V850_ERRNO_H__
2#define __V850_ERRNO_H__
3
4#include <asm-generic/errno.h>
5
6#endif /* __V850_ERRNO_H__ */
diff --git a/include/asm-v850/fb.h b/include/asm-v850/fb.h
deleted file mode 100644
index c7df38030992..000000000000
--- a/include/asm-v850/fb.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4
5#define fb_pgprotect(...) do {} while (0)
6
7static inline int fb_is_primary_device(struct fb_info *info)
8{
9 return 0;
10}
11
12#endif /* _ASM_FB_H_ */
diff --git a/include/asm-v850/fcntl.h b/include/asm-v850/fcntl.h
deleted file mode 100644
index 3af4d56776dd..000000000000
--- a/include/asm-v850/fcntl.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef __V850_FCNTL_H__
2#define __V850_FCNTL_H__
3
4#define O_DIRECTORY 040000 /* must be a directory */
5#define O_NOFOLLOW 0100000 /* don't follow links */
6#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */
7#define O_LARGEFILE 0400000
8
9#include <asm-generic/fcntl.h>
10
11#endif /* __V850_FCNTL_H__ */
diff --git a/include/asm-v850/flat.h b/include/asm-v850/flat.h
deleted file mode 100644
index 17f0ea566611..000000000000
--- a/include/asm-v850/flat.h
+++ /dev/null
@@ -1,133 +0,0 @@
1/*
2 * include/asm-v850/flat.h -- uClinux flat-format executables
3 *
4 * Copyright (C) 2002,03 NEC Electronics Corporation
5 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_FLAT_H__
15#define __V850_FLAT_H__
16
17/* The amount by which a relocation can exceed the program image limits
18 without being regarded as an error. On the v850, the relocations of
19 some base-pointers can be offset by 0x8000 (to allow better usage of the
20 space offered by 16-bit signed offsets -- in most cases the offsets used
21 with such a base-pointer will be negative). */
22
23#define flat_reloc_valid(reloc, size) ((reloc) <= (size + 0x8000))
24
25#define flat_stack_align(sp) /* nothing needed */
26#define flat_argvp_envp_on_stack() 0
27#define flat_old_ram_flag(flags) (flags)
28#define flat_set_persistent(relval, p) 0
29
30/* We store the type of relocation in the top 4 bits of the `relval.' */
31
32/* Convert a relocation entry into an address. */
33static inline unsigned long
34flat_get_relocate_addr (unsigned long relval)
35{
36 return relval & 0x0fffffff; /* Mask out top 4-bits */
37}
38
39#define flat_v850_get_reloc_type(relval) ((relval) >> 28)
40
41#define FLAT_V850_R_32 0 /* Normal 32-bit reloc */
42#define FLAT_V850_R_HI16S_LO15 1 /* High 16-bits + signed 15-bit low field */
43#define FLAT_V850_R_HI16S_LO16 2 /* High 16-bits + signed 16-bit low field */
44
45/* Extract the address to be relocated from the symbol reference at RP;
46 RELVAL is the raw relocation-table entry from which RP is derived.
47 For the v850, RP should always be half-word aligned. */
48static inline unsigned long flat_get_addr_from_rp (unsigned long *rp,
49 unsigned long relval,
50 unsigned long flags,
51 unsigned long *persistent)
52{
53 short *srp = (short *)rp;
54
55 switch (flat_v850_get_reloc_type (relval))
56 {
57 case FLAT_V850_R_32:
58 /* Simple 32-bit address. */
59 return srp[0] | (srp[1] << 16);
60
61 case FLAT_V850_R_HI16S_LO16:
62 /* The high and low halves of the address are in the 16
63 bits at RP, and the 2nd word of the 32-bit instruction
64 following that, respectively. The low half is _signed_
65 so we have to sign-extend it and add it to the upper
66 half instead of simply or-ing them together.
67
68 Unlike most relocated address, this one is stored in
69 native (little-endian) byte-order to avoid problems with
70 trashing the low-order bit, so we have to convert to
71 network-byte-order before returning, as that's what the
72 caller expects. */
73 return htonl ((srp[0] << 16) + srp[2]);
74
75 case FLAT_V850_R_HI16S_LO15:
76 /* The high and low halves of the address are in the 16
77 bits at RP, and the upper 15 bits of the 2nd word of the
78 32-bit instruction following that, respectively. The
79 low half is _signed_ so we have to sign-extend it and
80 add it to the upper half instead of simply or-ing them
81 together. The lowest bit is always zero.
82
83 Unlike most relocated address, this one is stored in
84 native (little-endian) byte-order to avoid problems with
85 trashing the low-order bit, so we have to convert to
86 network-byte-order before returning, as that's what the
87 caller expects. */
88 return htonl ((srp[0] << 16) + (srp[2] & ~0x1));
89
90 default:
91 return ~0; /* bogus value */
92 }
93}
94
95/* Insert the address ADDR into the symbol reference at RP;
96 RELVAL is the raw relocation-table entry from which RP is derived.
97 For the v850, RP should always be half-word aligned. */
98static inline void flat_put_addr_at_rp (unsigned long *rp, unsigned long addr,
99 unsigned long relval)
100{
101 short *srp = (short *)rp;
102
103 switch (flat_v850_get_reloc_type (relval)) {
104 case FLAT_V850_R_32:
105 /* Simple 32-bit address. */
106 srp[0] = addr & 0xFFFF;
107 srp[1] = (addr >> 16);
108 break;
109
110 case FLAT_V850_R_HI16S_LO16:
111 /* The high and low halves of the address are in the 16
112 bits at RP, and the 2nd word of the 32-bit instruction
113 following that, respectively. The low half is _signed_
114 so we must carry its sign bit to the upper half before
115 writing the upper half. */
116 srp[0] = (addr >> 16) + ((addr >> 15) & 0x1);
117 srp[2] = addr & 0xFFFF;
118 break;
119
120 case FLAT_V850_R_HI16S_LO15:
121 /* The high and low halves of the address are in the 16
122 bits at RP, and the upper 15 bits of the 2nd word of the
123 32-bit instruction following that, respectively. The
124 low half is _signed_ so we must carry its sign bit to
125 the upper half before writing the upper half. The
126 lowest bit we preserve from the existing instruction. */
127 srp[0] = (addr >> 16) + ((addr >> 15) & 0x1);
128 srp[2] = (addr & 0xFFFE) | (srp[2] & 0x1);
129 break;
130 }
131}
132
133#endif /* __V850_FLAT_H__ */
diff --git a/include/asm-v850/fpga85e2c.h b/include/asm-v850/fpga85e2c.h
deleted file mode 100644
index 23aae666c718..000000000000
--- a/include/asm-v850/fpga85e2c.h
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * include/asm-v850/fpga85e2c.h -- Machine-dependent defs for
3 * FPGA implementation of V850E2/NA85E2C
4 *
5 * Copyright (C) 2002,03 NEC Electronics Corporation
6 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 */
14
15#ifndef __V850_FPGA85E2C_H__
16#define __V850_FPGA85E2C_H__
17
18#include <asm/v850e2.h>
19#include <asm/clinkage.h>
20
21
22#define CPU_MODEL "v850e2/fpga85e2c"
23#define CPU_MODEL_LONG "NEC V850E2/NA85E2C"
24#define PLATFORM "fpga85e2c"
25#define PLATFORM_LONG "NA85E2C FPGA implementation"
26
27
28/* `external ram'. */
29#define ERAM_ADDR 0
30#define ERAM_SIZE 0x00100000 /* 1MB */
31
32
33/* FPGA specific control registers. */
34
35/* Writing a non-zero value to FLGREG(0) will signal the controlling CPU
36 to stop execution. */
37#define FLGREG_ADDR(n) (0xFFE80100 + 2*(n))
38#define FLGREG(n) (*(volatile unsigned char *)FLGREG_ADDR (n))
39#define FLGREG_NUM 2
40
41#define CSDEV_ADDR(n) (0xFFE80110 + 2*(n))
42#define CSDEV(n) (*(volatile unsigned char *)CSDEV_ADDR (n))
43
44
45/* Timer interrupts 0-3, interrupt at intervals from CLK/4096 to CLK/16384. */
46#define IRQ_RPU(n) (60 + (n))
47#define IRQ_RPU_NUM 4
48
49/* For <asm/irq.h> */
50#define NUM_CPU_IRQS 64
51
52
53/* General-purpose timer. */
54/* control/status register (can only be read/written via bit insns) */
55#define RPU_GTMC_ADDR 0xFFFFFB00
56#define RPU_GTMC (*(volatile unsigned char *)RPU_GTMC_ADDR)
57#define RPU_GTMC_CE_BIT 7 /* clock enable (control) */
58#define RPU_GTMC_OV_BIT 6 /* overflow (status) */
59#define RPU_GTMC_CLK_BIT 1 /* 0 = .5 MHz CLK, 1 = 1 Mhz (control) */
60/* 32-bit count (8 least-significant bits are always zero). */
61#define RPU_GTM_ADDR 0xFFFFFB28
62#define RPU_GTM (*(volatile unsigned long *)RPU_GTMC_ADDR)
63
64
65/* For <asm/page.h> */
66#define PAGE_OFFSET ERAM_ADDR /* minimum allocatable address */
67
68
69/* For <asm/entry.h> */
70/* `R0 RAM', used for a few miscellaneous variables that must be accessible
71 using a load instruction relative to R0. The FPGA implementation
72 actually has no on-chip RAM, so we use part of main ram just after the
73 interrupt vectors. */
74#ifdef __ASSEMBLY__
75#define R0_RAM_ADDR lo(C_SYMBOL_NAME(_r0_ram))
76#else
77extern char _r0_ram;
78#define R0_RAM_ADDR ((unsigned long)&_r0_ram);
79#endif
80
81
82#endif /* __V850_FPGA85E2C_H__ */
diff --git a/include/asm-v850/futex.h b/include/asm-v850/futex.h
deleted file mode 100644
index 6a332a9f099c..000000000000
--- a/include/asm-v850/futex.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#include <asm-generic/futex.h>
5
6#endif
diff --git a/include/asm-v850/gbus_int.h b/include/asm-v850/gbus_int.h
deleted file mode 100644
index 0c4bce753c7e..000000000000
--- a/include/asm-v850/gbus_int.h
+++ /dev/null
@@ -1,97 +0,0 @@
1/*
2 * include/asm-v850/gbus_int.h -- Midas labs GBUS interrupt support
3 *
4 * Copyright (C) 2001,02 NEC Corporation
5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_GBUS_INT_H__
15#define __V850_GBUS_INT_H__
16
17
18/* The GBUS interrupt interface has 32 interrupts shared among 4
19 processor interrupts. The 32 GBUS interrupts are divided into two
20 sets of 16 each, for allocating among control registers, etc (there
21 are two of each control register, with bits 0-15 controlling an
22 interrupt each). */
23
24/* The GBUS interrupts themselves. */
25#define IRQ_GBUS_INT(n) (GBUS_INT_BASE_IRQ + (n))
26#define IRQ_GBUS_INT_NUM 32
27
28/* Control registers. */
29#define GBUS_INT_STATUS_ADDR(w) (GBUS_INT_BASE_ADDR + (w)*0x40)
30#define GBUS_INT_STATUS(w) (*(volatile u16 *)GBUS_INT_STATUS_ADDR(w))
31#define GBUS_INT_CLEAR_ADDR(w) (GBUS_INT_BASE_ADDR + 0x10 + (w)*0x40)
32#define GBUS_INT_CLEAR(w) (*(volatile u16 *)GBUS_INT_CLEAR_ADDR(w))
33#define GBUS_INT_EDGE_ADDR(w) (GBUS_INT_BASE_ADDR + 0x20 + (w)*0x40)
34#define GBUS_INT_EDGE(w) (*(volatile u16 *)GBUS_INT_EDGE_ADDR(w))
35#define GBUS_INT_POLARITY_ADDR(w) (GBUS_INT_BASE_ADDR + 0x30 + (w)*0x40)
36#define GBUS_INT_POLARITY(w) (*(volatile u16 *)GBUS_INT_POLARITY_ADDR(w))
37/* This allows enabling interrupt bits in word W for interrupt GINTn. */
38#define GBUS_INT_ENABLE_ADDR(w, n) \
39 (GBUS_INT_BASE_ADDR + 0x100 + (w)*0x10 + (n)*0x20)
40#define GBUS_INT_ENABLE(w, n) (*(volatile u16 *)GBUS_INT_ENABLE_ADDR(w, n))
41
42/* Mapping between kernel interrupt numbers and hardware control regs/bits. */
43#define GBUS_INT_BITS_PER_WORD 16
44#define GBUS_INT_NUM_WORDS (IRQ_GBUS_INT_NUM / GBUS_INT_BITS_PER_WORD)
45#define GBUS_INT_IRQ_WORD(irq) (((irq) - GBUS_INT_BASE_IRQ) >> 4)
46#define GBUS_INT_IRQ_BIT(irq) (((irq) - GBUS_INT_BASE_IRQ) & 0xF)
47#define GBUS_INT_IRQ_MASK(irq) (1 << GBUS_INT_IRQ_BIT(irq))
48
49
50/* Possible priorities for GBUS interrupts. */
51#define GBUS_INT_PRIORITY_HIGH 2
52#define GBUS_INT_PRIORITY_MEDIUM 4
53#define GBUS_INT_PRIORITY_LOW 6
54
55
56#ifndef __ASSEMBLY__
57
58/* Enable interrupt handling for interrupt IRQ. */
59extern void gbus_int_enable_irq (unsigned irq);
60/* Disable interrupt handling for interrupt IRQ. Note that any
61 interrupts received while disabled will be delivered once the
62 interrupt is enabled again, unless they are explicitly cleared using
63 `gbus_int_clear_pending_irq'. */
64extern void gbus_int_disable_irq (unsigned irq);
65/* Return true if interrupt handling for interrupt IRQ is enabled. */
66extern int gbus_int_irq_enabled (unsigned irq);
67/* Disable all GBUS irqs. */
68extern void gbus_int_disable_irqs (void);
69/* Clear any pending interrupts for IRQ. */
70extern void gbus_int_clear_pending_irq (unsigned irq);
71/* Return true if interrupt IRQ is pending (but disabled). */
72extern int gbus_int_irq_pending (unsigned irq);
73
74
75struct gbus_int_irq_init {
76 const char *name; /* name of interrupt type */
77
78 /* Range of kernel irq numbers for this type:
79 BASE, BASE+INTERVAL, ..., BASE+INTERVAL*NUM */
80 unsigned base, num, interval;
81
82 unsigned priority; /* interrupt priority to assign */
83};
84struct hw_interrupt_type; /* fwd decl */
85
86/* Initialize HW_IRQ_TYPES for GBUS irqs described in array
87 INITS (which is terminated by an entry with the name field == 0). */
88extern void gbus_int_init_irq_types (struct gbus_int_irq_init *inits,
89 struct hw_interrupt_type *hw_irq_types);
90
91/* Initialize GBUS interrupts. */
92extern void gbus_int_init_irqs (void);
93
94#endif /* !__ASSEMBLY__ */
95
96
97#endif /* __V850_GBUS_INT_H__ */
diff --git a/include/asm-v850/hardirq.h b/include/asm-v850/hardirq.h
deleted file mode 100644
index 04e20127c5af..000000000000
--- a/include/asm-v850/hardirq.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef __V850_HARDIRQ_H__
2#define __V850_HARDIRQ_H__
3
4#include <linux/threads.h>
5#include <linux/cache.h>
6
7#include <asm/irq.h>
8
9typedef struct {
10 unsigned int __softirq_pending;
11} ____cacheline_aligned irq_cpustat_t;
12
13#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
14
15#define HARDIRQ_BITS 8
16
17/*
18 * The hardirq mask has to be large enough to have
19 * space for potentially all IRQ sources in the system
20 * nesting on a single CPU:
21 */
22#if (1 << HARDIRQ_BITS) < NR_IRQS
23# error HARDIRQ_BITS is too low!
24#endif
25
26void ack_bad_irq(unsigned int irq);
27
28#endif /* __V850_HARDIRQ_H__ */
diff --git a/include/asm-v850/highres_timer.h b/include/asm-v850/highres_timer.h
deleted file mode 100644
index 486fb49ceab6..000000000000
--- a/include/asm-v850/highres_timer.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/*
2 * include/asm-v850/highres_timer.h -- High resolution timing routines
3 *
4 * Copyright (C) 2001,03 NEC Electronics Corporation
5 * Copyright (C) 2001,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_HIGHRES_TIMER_H__
15#define __V850_HIGHRES_TIMER_H__
16
17#ifndef __ASSEMBLY__
18#include <linux/time.h>
19#endif
20
21#include <asm/entry.h>
22
23
24/* Frequency of the `slow ticks' (one tick each time the fast-tick
25 counter overflows). */
26#define HIGHRES_TIMER_SLOW_TICK_RATE 25
27
28/* Which timer in the V850E `Timer D' we use. */
29#define HIGHRES_TIMER_TIMER_D_UNIT 3
30
31
32#ifndef __ASSEMBLY__
33
34extern void highres_timer_start (void), highres_timer_stop (void);
35extern void highres_timer_reset (void);
36extern void highres_timer_read_ticks (u32 *slow_ticks, u32 *fast_ticks);
37extern void highres_timer_ticks_to_timeval (u32 slow_ticks, u32 fast_ticks,
38 struct timeval *tv);
39extern void highres_timer_read (struct timeval *tv);
40
41#endif /* !__ASSEMBLY__ */
42
43
44#endif /* __V850_HIGHRES_TIMER_H__ */
diff --git a/include/asm-v850/hw_irq.h b/include/asm-v850/hw_irq.h
deleted file mode 100644
index 043e94bb6bd8..000000000000
--- a/include/asm-v850/hw_irq.h
+++ /dev/null
@@ -1,4 +0,0 @@
1#ifndef __V850_HW_IRQ_H__
2#define __V850_HW_IRQ_H__
3
4#endif /* __V850_HW_IRQ_H__ */
diff --git a/include/asm-v850/io.h b/include/asm-v850/io.h
deleted file mode 100644
index cdad251fba9f..000000000000
--- a/include/asm-v850/io.h
+++ /dev/null
@@ -1,142 +0,0 @@
1/*
2 * include/asm-v850/io.h -- Misc I/O operations
3 *
4 * Copyright (C) 2001,02,03,04,05 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03,04,05 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_IO_H__
15#define __V850_IO_H__
16
17#define IO_SPACE_LIMIT 0xFFFFFFFF
18
19#define readb(addr) \
20 ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; })
21#define readw(addr) \
22 ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; })
23#define readl(addr) \
24 ({ unsigned long __v = (*(volatile unsigned long *) (addr)); __v; })
25
26#define readb_relaxed(a) readb(a)
27#define readw_relaxed(a) readw(a)
28#define readl_relaxed(a) readl(a)
29
30#define writeb(val, addr) \
31 (void)((*(volatile unsigned char *) (addr)) = (val))
32#define writew(val, addr) \
33 (void)((*(volatile unsigned short *) (addr)) = (val))
34#define writel(val, addr) \
35 (void)((*(volatile unsigned int *) (addr)) = (val))
36
37#define __raw_readb readb
38#define __raw_readw readw
39#define __raw_readl readl
40#define __raw_writeb writeb
41#define __raw_writew writew
42#define __raw_writel writel
43
44#define inb(addr) readb (addr)
45#define inw(addr) readw (addr)
46#define inl(addr) readl (addr)
47#define outb(x, addr) ((void) writeb (x, addr))
48#define outw(x, addr) ((void) writew (x, addr))
49#define outl(x, addr) ((void) writel (x, addr))
50
51#define inb_p(port) inb((port))
52#define outb_p(val, port) outb((val), (port))
53#define inw_p(port) inw((port))
54#define outw_p(val, port) outw((val), (port))
55#define inl_p(port) inl((port))
56#define outl_p(val, port) outl((val), (port))
57
58static inline void insb (unsigned long port, void *dst, unsigned long count)
59{
60 unsigned char *p = dst;
61 while (count--)
62 *p++ = inb (port);
63}
64static inline void insw (unsigned long port, void *dst, unsigned long count)
65{
66 unsigned short *p = dst;
67 while (count--)
68 *p++ = inw (port);
69}
70static inline void insl (unsigned long port, void *dst, unsigned long count)
71{
72 unsigned long *p = dst;
73 while (count--)
74 *p++ = inl (port);
75}
76
77static inline void
78outsb (unsigned long port, const void *src, unsigned long count)
79{
80 const unsigned char *p = src;
81 while (count--)
82 outb (*p++, port);
83}
84static inline void
85outsw (unsigned long port, const void *src, unsigned long count)
86{
87 const unsigned short *p = src;
88 while (count--)
89 outw (*p++, port);
90}
91static inline void
92outsl (unsigned long port, const void *src, unsigned long count)
93{
94 const unsigned long *p = src;
95 while (count--)
96 outl (*p++, port);
97}
98
99
100/* Some places try to pass in an loff_t for PHYSADDR (?!), so we cast it to
101 long before casting it to a pointer to avoid compiler warnings. */
102#define ioremap(physaddr, size) ((void __iomem *)(unsigned long)(physaddr))
103#define iounmap(addr) ((void)0)
104
105#define ioremap_nocache(physaddr, size) ioremap (physaddr, size)
106#define ioremap_writethrough(physaddr, size) ioremap (physaddr, size)
107#define ioremap_fullcache(physaddr, size) ioremap (physaddr, size)
108
109#define ioread8(addr) readb (addr)
110#define ioread16(addr) readw (addr)
111#define ioread32(addr) readl (addr)
112#define iowrite8(val, addr) writeb (val, addr)
113#define iowrite16(val, addr) writew (val, addr)
114#define iowrite32(val, addr) writel (val, addr)
115
116#define mmiowb()
117
118#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
119#if 0
120/* This is really stupid; don't define it. */
121#define page_to_bus(page) page_to_phys (page)
122#endif
123
124/* Conversion between virtual and physical mappings. */
125#define phys_to_virt(addr) ((void *)__phys_to_virt (addr))
126#define virt_to_phys(addr) ((unsigned long)__virt_to_phys (addr))
127
128#define memcpy_fromio(dst, src, len) memcpy (dst, (void *)src, len)
129#define memcpy_toio(dst, src, len) memcpy ((void *)dst, src, len)
130
131/*
132 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
133 * access
134 */
135#define xlate_dev_mem_ptr(p) __va(p)
136
137/*
138 * Convert a virtual cached pointer to an uncached pointer
139 */
140#define xlate_dev_kmem_ptr(p) p
141
142#endif /* __V850_IO_H__ */
diff --git a/include/asm-v850/ioctl.h b/include/asm-v850/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/include/asm-v850/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ioctl.h>
diff --git a/include/asm-v850/ioctls.h b/include/asm-v850/ioctls.h
deleted file mode 100644
index 5313abd5f388..000000000000
--- a/include/asm-v850/ioctls.h
+++ /dev/null
@@ -1,84 +0,0 @@
1#ifndef __V850_IOCTLS_H__
2#define __V850_IOCTLS_H__
3
4#include <asm/ioctl.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46#define TIOCSBRK 0x5427 /* BSD compatibility */
47#define TIOCCBRK 0x5428 /* BSD compatibility */
48#define TIOCGSID 0x5429 /* Return the session ID of FD */
49#define TCGETS2 _IOR('T',0x2A, struct termios2)
50#define TCSETS2 _IOW('T',0x2B, struct termios2)
51#define TCSETSW2 _IOW('T',0x2C, struct termios2)
52#define TCSETSF2 _IOW('T',0x2D, struct termios2)
53#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
54#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
55
56#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
57#define FIOCLEX 0x5451
58#define FIOASYNC 0x5452
59#define TIOCSERCONFIG 0x5453
60#define TIOCSERGWILD 0x5454
61#define TIOCSERSWILD 0x5455
62#define TIOCGLCKTRMIOS 0x5456
63#define TIOCSLCKTRMIOS 0x5457
64#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
65#define TIOCSERGETLSR 0x5459 /* Get line status register */
66#define TIOCSERGETMULTI 0x545A /* Get multiport config */
67#define TIOCSERSETMULTI 0x545B /* Set multiport config */
68
69#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
70#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
71#define FIOQSIZE 0x545E
72
73/* Used for packet mode */
74#define TIOCPKT_DATA 0
75#define TIOCPKT_FLUSHREAD 1
76#define TIOCPKT_FLUSHWRITE 2
77#define TIOCPKT_STOP 4
78#define TIOCPKT_START 8
79#define TIOCPKT_NOSTOP 16
80#define TIOCPKT_DOSTOP 32
81
82#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
83
84#endif /* __V850_IOCTLS_H__ */
diff --git a/include/asm-v850/ipcbuf.h b/include/asm-v850/ipcbuf.h
deleted file mode 100644
index d8cbe9886d95..000000000000
--- a/include/asm-v850/ipcbuf.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __V850E_IPCBUF_H__
2#define __V850E_IPCBUF_H__
3
4/*
5 * The user_ipc_perm structure for v850e architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit mode_t and seq
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct ipc64_perm
15{
16 __kernel_key_t key;
17 __kernel_uid32_t uid;
18 __kernel_gid32_t gid;
19 __kernel_uid32_t cuid;
20 __kernel_gid32_t cgid;
21 __kernel_mode_t mode;
22 unsigned short __pad1;
23 unsigned short seq;
24 unsigned short __pad2;
25 unsigned long __unused1;
26 unsigned long __unused2;
27};
28
29#endif /* __V850E_IPCBUF_H__ */
diff --git a/include/asm-v850/irq.h b/include/asm-v850/irq.h
deleted file mode 100644
index 7d0d4cd1ce54..000000000000
--- a/include/asm-v850/irq.h
+++ /dev/null
@@ -1,55 +0,0 @@
1/*
2 * include/asm-v850/irq.h -- Machine interrupt handling
3 *
4 * Copyright (C) 2001,02,04 NEC Electronics Corporation
5 * Copyright (C) 2001,02,04 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_IRQ_H__
15#define __V850_IRQ_H__
16
17#include <asm/machdep.h>
18
19/* Default NUM_MACH_IRQS. */
20#ifndef NUM_MACH_IRQS
21#define NUM_MACH_IRQS NUM_CPU_IRQS
22#endif
23
24/* NMIs have IRQ numbers from FIRST_NMI to FIRST_NMI+NUM_NMIS-1. */
25#define FIRST_NMI NUM_MACH_IRQS
26#define IRQ_NMI(n) (FIRST_NMI + (n))
27/* v850 processors have 3 non-maskable interrupts. */
28#define NUM_NMIS 3
29
30/* Includes both maskable and non-maskable irqs. */
31#define NR_IRQS (NUM_MACH_IRQS + NUM_NMIS)
32
33
34#ifndef __ASSEMBLY__
35
36struct pt_regs;
37struct hw_interrupt_type;
38struct irqaction;
39
40#define irq_canonicalize(irq) (irq)
41
42/* Initialize irq handling for IRQs.
43 BASE_IRQ, BASE_IRQ+INTERVAL, ..., BASE_IRQ+NUM*INTERVAL
44 to IRQ_TYPE. An IRQ_TYPE of 0 means to use a generic interrupt type. */
45extern void
46init_irq_handlers (int base_irq, int num, int interval,
47 struct hw_interrupt_type *irq_type);
48
49/* Handle interrupt IRQ. REGS are the registers at the time of ther
50 interrupt. */
51extern unsigned int handle_irq (int irq, struct pt_regs *regs);
52
53#endif /* !__ASSEMBLY__ */
54
55#endif /* __V850_IRQ_H__ */
diff --git a/include/asm-v850/irq_regs.h b/include/asm-v850/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/include/asm-v850/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/irq_regs.h>
diff --git a/include/asm-v850/kdebug.h b/include/asm-v850/kdebug.h
deleted file mode 100644
index 6ece1b037665..000000000000
--- a/include/asm-v850/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/kdebug.h>
diff --git a/include/asm-v850/kmap_types.h b/include/asm-v850/kmap_types.h
deleted file mode 100644
index 3288976b161f..000000000000
--- a/include/asm-v850/kmap_types.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef __V850_KMAP_TYPES_H__
2#define __V850_KMAP_TYPES_H__
3
4enum km_type {
5 KM_BOUNCE_READ,
6 KM_SKB_SUNRPC_DATA,
7 KM_SKB_DATA_SOFTIRQ,
8 KM_USER0,
9 KM_USER1,
10 KM_BIO_SRC_IRQ,
11 KM_BIO_DST_IRQ,
12 KM_PTE0,
13 KM_PTE1,
14 KM_IRQ0,
15 KM_IRQ1,
16 KM_TYPE_NR
17};
18
19#endif /* __V850_KMAP_TYPES_H__ */
diff --git a/include/asm-v850/kvm.h b/include/asm-v850/kvm.h
deleted file mode 100644
index 3f729b79febc..000000000000
--- a/include/asm-v850/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_V850_H
2#define __LINUX_KVM_V850_H
3
4/* v850 does not support KVM */
5
6#endif
diff --git a/include/asm-v850/linkage.h b/include/asm-v850/linkage.h
deleted file mode 100644
index b6185d3cfe68..000000000000
--- a/include/asm-v850/linkage.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef __ASM_LINKAGE_H
2#define __ASM_LINKAGE_H
3
4#ifdef __ASSEMBLY__
5#include <asm/asm.h>
6#endif
7
8#endif
diff --git a/include/asm-v850/local.h b/include/asm-v850/local.h
deleted file mode 100644
index 705148abe276..000000000000
--- a/include/asm-v850/local.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __V850_LOCAL_H__
2#define __V850_LOCAL_H__
3
4#include <asm-generic/local.h>
5
6#endif /* __V850_LOCAL_H__ */
diff --git a/include/asm-v850/ma.h b/include/asm-v850/ma.h
deleted file mode 100644
index 89e66473a176..000000000000
--- a/include/asm-v850/ma.h
+++ /dev/null
@@ -1,101 +0,0 @@
1/*
2 * include/asm-v850/ma.h -- V850E/MA series of cpu chips
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_MA_H__
15#define __V850_MA_H__
16
17/* The MA series uses the V850E cpu core. */
18#include <asm/v850e.h>
19
20
21/* For <asm/entry.h> */
22/* We use on-chip RAM, for a few miscellaneous variables that must be
23 accessible using a load instruction relative to R0. The amount
24 varies between chip models, but there's always at least 4K, and it
25 should always start at FFFFC000. */
26#define R0_RAM_ADDR 0xFFFFC000
27
28
29/* MA series UART details. */
30#define V850E_UART_BASE_FREQ CPU_CLOCK_FREQ
31
32/* This is a function that gets called before configuring the UART. */
33#define V850E_UART_PRE_CONFIGURE ma_uart_pre_configure
34#ifndef __ASSEMBLY__
35extern void ma_uart_pre_configure (unsigned chan,
36 unsigned cflags, unsigned baud);
37#endif
38
39
40/* MA series timer C details. */
41#define V850E_TIMER_C_BASE_ADDR 0xFFFFF600
42
43
44/* MA series timer D details. */
45#define V850E_TIMER_D_BASE_ADDR 0xFFFFF540
46#define V850E_TIMER_D_TMD_BASE_ADDR (V850E_TIMER_D_BASE_ADDR + 0x0)
47#define V850E_TIMER_D_CMD_BASE_ADDR (V850E_TIMER_D_BASE_ADDR + 0x2)
48#define V850E_TIMER_D_TMCD_BASE_ADDR (V850E_TIMER_D_BASE_ADDR + 0x4)
49
50#define V850E_TIMER_D_BASE_FREQ CPU_CLOCK_FREQ
51
52
53/* Port 0 */
54/* Direct I/O. Bits 0-7 are pins P00-P07. */
55#define MA_PORT0_IO_ADDR 0xFFFFF400
56#define MA_PORT0_IO (*(volatile u8 *)MA_PORT0_IO_ADDR)
57/* Port mode (for direct I/O, 0 = output, 1 = input). */
58#define MA_PORT0_PM_ADDR 0xFFFFF420
59#define MA_PORT0_PM (*(volatile u8 *)MA_PORT0_PM_ADDR)
60/* Port mode control (0 = direct I/O mode, 1 = alternative I/O mode). */
61#define MA_PORT0_PMC_ADDR 0xFFFFF440
62#define MA_PORT0_PMC (*(volatile u8 *)MA_PORT0_PMC_ADDR)
63/* Port function control (for P04-P07, 0 = IRQ, 1 = DMARQ). */
64#define MA_PORT0_PFC_ADDR 0xFFFFF460
65#define MA_PORT0_PFC (*(volatile u8 *)MA_PORT0_PFC_ADDR)
66
67/* Port 1 */
68/* Direct I/O. Bits 0-3 are pins P10-P13. */
69#define MA_PORT1_IO_ADDR 0xFFFFF402
70#define MA_PORT1_IO (*(volatile u8 *)MA_PORT1_IO_ADDR)
71/* Port mode (for direct I/O, 0 = output, 1 = input). */
72#define MA_PORT1_PM_ADDR 0xFFFFF420
73#define MA_PORT1_PM (*(volatile u8 *)MA_PORT1_PM_ADDR)
74/* Port mode control (0 = direct I/O mode, 1 = alternative I/O mode). */
75#define MA_PORT1_PMC_ADDR 0xFFFFF442
76#define MA_PORT1_PMC (*(volatile u8 *)MA_PORT1_PMC_ADDR)
77
78/* Port 4 */
79/* Direct I/O. Bits 0-5 are pins P40-P45. */
80#define MA_PORT4_IO_ADDR 0xFFFFF408
81#define MA_PORT4_IO (*(volatile u8 *)MA_PORT4_IO_ADDR)
82/* Port mode (for direct I/O, 0 = output, 1 = input). */
83#define MA_PORT4_PM_ADDR 0xFFFFF428
84#define MA_PORT4_PM (*(volatile u8 *)MA_PORT4_PM_ADDR)
85/* Port mode control (0 = direct I/O mode, 1 = alternative I/O mode). */
86#define MA_PORT4_PMC_ADDR 0xFFFFF448
87#define MA_PORT4_PMC (*(volatile u8 *)MA_PORT4_PMC_ADDR)
88/* Port function control (for serial interfaces, 0 = CSI, 1 = UART). */
89#define MA_PORT4_PFC_ADDR 0xFFFFF468
90#define MA_PORT4_PFC (*(volatile u8 *)MA_PORT4_PFC_ADDR)
91
92
93#ifndef __ASSEMBLY__
94
95/* Initialize MA chip interrupts. */
96extern void ma_init_irqs (void);
97
98#endif /* !__ASSEMBLY__ */
99
100
101#endif /* __V850_MA_H__ */
diff --git a/include/asm-v850/ma1.h b/include/asm-v850/ma1.h
deleted file mode 100644
index ede1f1de2b7a..000000000000
--- a/include/asm-v850/ma1.h
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 * include/asm-v850/ma1.h -- V850E/MA1 cpu chip
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_MA1_H__
15#define __V850_MA1_H__
16
17/* Inherit more generic details from MA series. */
18#include <asm/ma.h>
19
20
21#define CPU_MODEL "v850e/ma1"
22#define CPU_MODEL_LONG "NEC V850E/MA1"
23
24
25/* Hardware-specific interrupt numbers (in the kernel IRQ namespace). */
26#define IRQ_INTOV(n) (n) /* 0-3 */
27#define IRQ_INTOV_NUM 4
28#define IRQ_INTP(n) (0x4 + (n)) /* Pnnn (pin) interrupts */
29#define IRQ_INTP_NUM 24
30#define IRQ_INTCMD(n) (0x1c + (n)) /* interval timer interrupts 0-3 */
31#define IRQ_INTCMD_NUM 4
32#define IRQ_INTDMA(n) (0x20 + (n)) /* DMA interrupts 0-3 */
33#define IRQ_INTDMA_NUM 4
34#define IRQ_INTCSI(n) (0x24 + (n)*4)/* CSI 0-2 transmit/receive completion */
35#define IRQ_INTCSI_NUM 3
36#define IRQ_INTSER(n) (0x25 + (n)*4) /* UART 0-2 reception error */
37#define IRQ_INTSER_NUM 3
38#define IRQ_INTSR(n) (0x26 + (n)*4) /* UART 0-2 reception completion */
39#define IRQ_INTSR_NUM 3
40#define IRQ_INTST(n) (0x27 + (n)*4) /* UART 0-2 transmission completion */
41#define IRQ_INTST_NUM 3
42
43#define NUM_CPU_IRQS 0x30
44
45
46/* The MA1 has a UART with 3 channels. */
47#define V850E_UART_NUM_CHANNELS 3
48
49
50#endif /* __V850_MA1_H__ */
diff --git a/include/asm-v850/machdep.h b/include/asm-v850/machdep.h
deleted file mode 100644
index f1e3b8b91508..000000000000
--- a/include/asm-v850/machdep.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/*
2 * include/asm-v850/machdep.h -- Machine-dependent definitions
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_MACHDEP_H__
15#define __V850_MACHDEP_H__
16
17
18/* chips */
19#ifdef CONFIG_V850E_MA1
20#include <asm/ma1.h>
21#endif
22#ifdef CONFIG_V850E_ME2
23#include <asm/me2.h>
24#endif
25#ifdef CONFIG_V850E_TEG
26#include <asm/teg.h>
27#endif
28
29/* These are both chips _and_ platforms, so put them in the middle... */
30#ifdef CONFIG_V850E2_ANNA
31#include <asm/anna.h>
32#endif
33#ifdef CONFIG_V850E_AS85EP1
34#include <asm/as85ep1.h>
35#endif
36
37/* platforms */
38#ifdef CONFIG_RTE_CB_MA1
39#include <asm/rte_ma1_cb.h>
40#endif
41#ifdef CONFIG_RTE_CB_ME2
42#include <asm/rte_me2_cb.h>
43#endif
44#ifdef CONFIG_RTE_CB_NB85E
45#include <asm/rte_nb85e_cb.h>
46#endif
47#ifdef CONFIG_V850E_SIM
48#include <asm/sim.h>
49#endif
50#ifdef CONFIG_V850E2_SIM85E2C
51#include <asm/sim85e2c.h>
52#endif
53#ifdef CONFIG_V850E2_SIM85E2S
54#include <asm/sim85e2s.h>
55#endif
56#ifdef CONFIG_V850E2_FPGA85E2C
57#include <asm/fpga85e2c.h>
58#endif
59
60#endif /* __V850_MACHDEP_H__ */
diff --git a/include/asm-v850/macrology.h b/include/asm-v850/macrology.h
deleted file mode 100644
index 37abf874832c..000000000000
--- a/include/asm-v850/macrology.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/*
2 * include/asm-v850/macrology.h -- Various useful CPP macros
3 *
4 * Copyright (C) 2001 NEC Corporation
5 * Copyright (C) 2001 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#define macrology_paste(arg1, arg2) macrology_paste_1(arg1, arg2)
15#define macrology_paste_1(arg1, arg2) arg1 ## arg2
16#define macrology_stringify(sym) macrology_stringify_1(sym)
17#define macrology_stringify_1(sym) #sym
diff --git a/include/asm-v850/me2.h b/include/asm-v850/me2.h
deleted file mode 100644
index ac7c9ce0bdc1..000000000000
--- a/include/asm-v850/me2.h
+++ /dev/null
@@ -1,182 +0,0 @@
1/*
2 * include/asm-v850/me2.h -- V850E/ME2 cpu chip
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_ME2_H__
15#define __V850_ME2_H__
16
17#include <asm/v850e.h>
18#include <asm/v850e_cache.h>
19
20
21#define CPU_MODEL "v850e/me2"
22#define CPU_MODEL_LONG "NEC V850E/ME2"
23
24
25/* Hardware-specific interrupt numbers (in the kernel IRQ namespace). */
26#define IRQ_INTP(n) (n) /* Pnnn (pin) interrupts */
27#define IRQ_INTP_NUM 31
28#define IRQ_INTCMD(n) (0x31 + (n)) /* interval timer interrupts 0-3 */
29#define IRQ_INTCMD_NUM 4
30#define IRQ_INTDMA(n) (0x41 + (n)) /* DMA interrupts 0-3 */
31#define IRQ_INTDMA_NUM 4
32#define IRQ_INTUBTIRE(n) (0x49 + (n)*5)/* UARTB 0-1 reception error */
33#define IRQ_INTUBTIRE_NUM 2
34#define IRQ_INTUBTIR(n) (0x4a + (n)*5) /* UARTB 0-1 reception complete */
35#define IRQ_INTUBTIR_NUM 2
36#define IRQ_INTUBTIT(n) (0x4b + (n)*5) /* UARTB 0-1 transmission complete */
37#define IRQ_INTUBTIT_NUM 2
38#define IRQ_INTUBTIF(n) (0x4c + (n)*5) /* UARTB 0-1 FIFO trans. complete */
39#define IRQ_INTUBTIF_NUM 2
40#define IRQ_INTUBTITO(n) (0x4d + (n)*5) /* UARTB 0-1 reception timeout */
41#define IRQ_INTUBTITO_NUM 2
42
43/* For <asm/irq.h> */
44#define NUM_CPU_IRQS 0x59 /* V850E/ME2 */
45
46
47/* For <asm/entry.h> */
48/* We use on-chip RAM, for a few miscellaneous variables that must be
49 accessible using a load instruction relative to R0. */
50#define R0_RAM_ADDR 0xFFFFB000 /* V850E/ME2 */
51
52
53/* V850E/ME2 UARTB details.*/
54#define V850E_UART_NUM_CHANNELS 2
55#define V850E_UARTB_BASE_FREQ (CPU_CLOCK_FREQ / 4)
56
57/* This is a function that gets called before configuring the UART. */
58#define V850E_UART_PRE_CONFIGURE me2_uart_pre_configure
59#ifndef __ASSEMBLY__
60extern void me2_uart_pre_configure (unsigned chan,
61 unsigned cflags, unsigned baud);
62#endif /* __ASSEMBLY__ */
63
64
65/* V850E/ME2 timer C details. */
66#define V850E_TIMER_C_BASE_ADDR 0xFFFFF600
67
68
69/* V850E/ME2 timer D details. */
70#define V850E_TIMER_D_BASE_ADDR 0xFFFFF540
71#define V850E_TIMER_D_TMD_BASE_ADDR (V850E_TIMER_D_BASE_ADDR + 0x0)
72#define V850E_TIMER_D_CMD_BASE_ADDR (V850E_TIMER_D_BASE_ADDR + 0x2)
73#define V850E_TIMER_D_TMCD_BASE_ADDR (V850E_TIMER_D_BASE_ADDR + 0x4)
74
75#define V850E_TIMER_D_BASE_FREQ (CPU_CLOCK_FREQ / 2)
76
77
78/* Select iRAM mode. */
79#define ME2_IRAMM_ADDR 0xFFFFF80A
80#define ME2_IRAMM (*(volatile u8*)ME2_IRAMM_ADDR)
81
82
83/* Interrupt edge-detection configuration. INTF(n) and INTR(n) are only
84 valid for n == 1, 2, or 5. */
85#define ME2_INTF_ADDR(n) (0xFFFFFC00 + (n) * 0x2)
86#define ME2_INTF(n) (*(volatile u8*)ME2_INTF_ADDR(n))
87#define ME2_INTR_ADDR(n) (0xFFFFFC20 + (n) * 0x2)
88#define ME2_INTR(n) (*(volatile u8*)ME2_INTR_ADDR(n))
89#define ME2_INTFAL_ADDR 0xFFFFFC10
90#define ME2_INTFAL (*(volatile u8*)ME2_INTFAL_ADDR)
91#define ME2_INTRAL_ADDR 0xFFFFFC30
92#define ME2_INTRAL (*(volatile u8*)ME2_INTRAL_ADDR)
93#define ME2_INTFDH_ADDR 0xFFFFFC16
94#define ME2_INTFDH (*(volatile u16*)ME2_INTFDH_ADDR)
95#define ME2_INTRDH_ADDR 0xFFFFFC36
96#define ME2_INTRDH (*(volatile u16*)ME2_INTRDH_ADDR)
97#define ME2_SESC_ADDR(n) (0xFFFFF609 + (n) * 0x10)
98#define ME2_SESC(n) (*(volatile u8*)ME2_SESC_ADDR(n))
99#define ME2_SESA10_ADDR 0xFFFFF5AD
100#define ME2_SESA10 (*(volatile u8*)ME2_SESA10_ADDR)
101#define ME2_SESA11_ADDR 0xFFFFF5DD
102#define ME2_SESA11 (*(volatile u8*)ME2_SESA11_ADDR)
103
104
105/* Port 1 */
106/* Direct I/O. Bits 0-3 are pins P10-P13. */
107#define ME2_PORT1_IO_ADDR 0xFFFFF402
108#define ME2_PORT1_IO (*(volatile u8 *)ME2_PORT1_IO_ADDR)
109/* Port mode (for direct I/O, 0 = output, 1 = input). */
110#define ME2_PORT1_PM_ADDR 0xFFFFF422
111#define ME2_PORT1_PM (*(volatile u8 *)ME2_PORT1_PM_ADDR)
112/* Port mode control (0 = direct I/O mode, 1 = alternative I/O mode). */
113#define ME2_PORT1_PMC_ADDR 0xFFFFF442
114#define ME2_PORT1_PMC (*(volatile u8 *)ME2_PORT1_PMC_ADDR)
115/* Port function control (for serial interfaces, 0 = CSI30, 1 = UARTB0 ). */
116#define ME2_PORT1_PFC_ADDR 0xFFFFF462
117#define ME2_PORT1_PFC (*(volatile u8 *)ME2_PORT1_PFC_ADDR)
118
119/* Port 2 */
120/* Direct I/O. Bits 0-3 are pins P20-P25. */
121#define ME2_PORT2_IO_ADDR 0xFFFFF404
122#define ME2_PORT2_IO (*(volatile u8 *)ME2_PORT2_IO_ADDR)
123/* Port mode (for direct I/O, 0 = output, 1 = input). */
124#define ME2_PORT2_PM_ADDR 0xFFFFF424
125#define ME2_PORT2_PM (*(volatile u8 *)ME2_PORT2_PM_ADDR)
126/* Port mode control (0 = direct I/O mode, 1 = alternative I/O mode). */
127#define ME2_PORT2_PMC_ADDR 0xFFFFF444
128#define ME2_PORT2_PMC (*(volatile u8 *)ME2_PORT2_PMC_ADDR)
129/* Port function control (for serial interfaces, 0 = INTP2x, 1 = UARTB1 ). */
130#define ME2_PORT2_PFC_ADDR 0xFFFFF464
131#define ME2_PORT2_PFC (*(volatile u8 *)ME2_PORT2_PFC_ADDR)
132
133/* Port 5 */
134/* Direct I/O. Bits 0-5 are pins P50-P55. */
135#define ME2_PORT5_IO_ADDR 0xFFFFF40A
136#define ME2_PORT5_IO (*(volatile u8 *)ME2_PORT5_IO_ADDR)
137/* Port mode (for direct I/O, 0 = output, 1 = input). */
138#define ME2_PORT5_PM_ADDR 0xFFFFF42A
139#define ME2_PORT5_PM (*(volatile u8 *)ME2_PORT5_PM_ADDR)
140/* Port mode control (0 = direct I/O mode, 1 = alternative I/O mode). */
141#define ME2_PORT5_PMC_ADDR 0xFFFFF44A
142#define ME2_PORT5_PMC (*(volatile u8 *)ME2_PORT5_PMC_ADDR)
143/* Port function control (). */
144#define ME2_PORT5_PFC_ADDR 0xFFFFF46A
145#define ME2_PORT5_PFC (*(volatile u8 *)ME2_PORT5_PFC_ADDR)
146
147/* Port 6 */
148/* Direct I/O. Bits 5-7 are pins P65-P67. */
149#define ME2_PORT6_IO_ADDR 0xFFFFF40C
150#define ME2_PORT6_IO (*(volatile u8 *)ME2_PORT6_IO_ADDR)
151/* Port mode (for direct I/O, 0 = output, 1 = input). */
152#define ME2_PORT6_PM_ADDR 0xFFFFF42C
153#define ME2_PORT6_PM (*(volatile u8 *)ME2_PORT6_PM_ADDR)
154/* Port mode control (0 = direct I/O mode, 1 = alternative I/O mode). */
155#define ME2_PORT6_PMC_ADDR 0xFFFFF44C
156#define ME2_PORT6_PMC (*(volatile u8 *)ME2_PORT6_PMC_ADDR)
157/* Port function control (). */
158#define ME2_PORT6_PFC_ADDR 0xFFFFF46C
159#define ME2_PORT6_PFC (*(volatile u8 *)ME2_PORT6_PFC_ADDR)
160
161/* Port 7 */
162/* Direct I/O. Bits 2-7 are pins P72-P77. */
163#define ME2_PORT7_IO_ADDR 0xFFFFF40E
164#define ME2_PORT7_IO (*(volatile u8 *)ME2_PORT7_IO_ADDR)
165/* Port mode (for direct I/O, 0 = output, 1 = input). */
166#define ME2_PORT7_PM_ADDR 0xFFFFF42E
167#define ME2_PORT7_PM (*(volatile u8 *)ME2_PORT7_PM_ADDR)
168/* Port mode control (0 = direct I/O mode, 1 = alternative I/O mode). */
169#define ME2_PORT7_PMC_ADDR 0xFFFFF44E
170#define ME2_PORT7_PMC (*(volatile u8 *)ME2_PORT7_PMC_ADDR)
171/* Port function control (). */
172#define ME2_PORT7_PFC_ADDR 0xFFFFF46E
173#define ME2_PORT7_PFC (*(volatile u8 *)ME2_PORT7_PFC_ADDR)
174
175
176#ifndef __ASSEMBLY__
177/* Initialize V850E/ME2 chip interrupts. */
178extern void me2_init_irqs (void);
179#endif /* !__ASSEMBLY__ */
180
181
182#endif /* __V850_ME2_H__ */
diff --git a/include/asm-v850/mman.h b/include/asm-v850/mman.h
deleted file mode 100644
index edbf6edbfb37..000000000000
--- a/include/asm-v850/mman.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef __V850_MMAN_H__
2#define __V850_MMAN_H__
3
4#include <asm-generic/mman.h>
5
6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
8#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
9#define MAP_LOCKED 0x2000 /* pages are locked */
10#define MAP_NORESERVE 0x4000 /* don't check for reservations */
11
12#define MCL_CURRENT 1 /* lock all current mappings */
13#define MCL_FUTURE 2 /* lock all future mappings */
14
15#endif /* __V850_MMAN_H__ */
diff --git a/include/asm-v850/mmu.h b/include/asm-v850/mmu.h
deleted file mode 100644
index 267768c66ef6..000000000000
--- a/include/asm-v850/mmu.h
+++ /dev/null
@@ -1,11 +0,0 @@
1/* Copyright (C) 2002, 2005, David McCullough <davidm@snapgear.com> */
2
3#ifndef __V850_MMU_H__
4#define __V850_MMU_H__
5
6typedef struct {
7 struct vm_list_struct *vmlist;
8 unsigned long end_brk;
9} mm_context_t;
10
11#endif /* __V850_MMU_H__ */
diff --git a/include/asm-v850/mmu_context.h b/include/asm-v850/mmu_context.h
deleted file mode 100644
index 01daacd5474e..000000000000
--- a/include/asm-v850/mmu_context.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __V850_MMU_CONTEXT_H__
2#define __V850_MMU_CONTEXT_H__
3
4#include <asm-generic/mm_hooks.h>
5
6#define destroy_context(mm) ((void)0)
7#define init_new_context(tsk,mm) 0
8#define switch_mm(prev,next,tsk) ((void)0)
9#define deactivate_mm(tsk,mm) do { } while (0)
10#define activate_mm(prev,next) ((void)0)
11#define enter_lazy_tlb(mm,tsk) ((void)0)
12
13#endif /* __V850_MMU_CONTEXT_H__ */
diff --git a/include/asm-v850/module.h b/include/asm-v850/module.h
deleted file mode 100644
index 2c2f4944f09f..000000000000
--- a/include/asm-v850/module.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * include/asm-v850/module.h -- Architecture-specific module hooks
3 *
4 * Copyright (C) 2001,02,03,04 NEC Corporation
5 * Copyright (C) 2001,02,03,04 Miles Bader <miles@gnu.org>
6 * Copyright (C) 2001,03 Rusty Russell
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 *
14 * Derived in part from include/asm-ppc/module.h
15 */
16
17#ifndef __V850_MODULE_H__
18#define __V850_MODULE_H__
19
20#define MODULE_SYMBOL_PREFIX "_"
21
22struct v850_plt_entry
23{
24 /* Indirect jump instruction sequence (6-byte mov + 2-byte jr). */
25 unsigned long tramp[2];
26};
27
28struct mod_arch_specific
29{
30 /* Indices of PLT sections within module. */
31 unsigned int core_plt_section, init_plt_section;
32};
33
34#define Elf_Shdr Elf32_Shdr
35#define Elf_Sym Elf32_Sym
36#define Elf_Ehdr Elf32_Ehdr
37
38/* Make empty sections for module_frob_arch_sections to expand. */
39#ifdef MODULE
40asm(".section .plt,\"ax\",@nobits; .align 3; .previous");
41asm(".section .init.plt,\"ax\",@nobits; .align 3; .previous");
42#endif
43
44/* We don't do exception tables. */
45struct exception_table_entry;
46static inline const struct exception_table_entry *
47search_extable(const struct exception_table_entry *first,
48 const struct exception_table_entry *last,
49 unsigned long value)
50{
51 return 0;
52}
53#define ARCH_HAS_SEARCH_EXTABLE
54static inline void
55sort_extable(struct exception_table_entry *start,
56 struct exception_table_entry *finish)
57{
58 /* nada */
59}
60#define ARCH_HAS_SORT_EXTABLE
61
62#endif /* __V850_MODULE_H__ */
diff --git a/include/asm-v850/msgbuf.h b/include/asm-v850/msgbuf.h
deleted file mode 100644
index ed07dbd01637..000000000000
--- a/include/asm-v850/msgbuf.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef __V850_MSGBUF_H__
2#define __V850_MSGBUF_H__
3
4/*
5 * The msqid64_ds structure for v850 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct msqid64_ds {
15 struct ipc64_perm msg_perm;
16 __kernel_time_t msg_stime; /* last msgsnd time */
17 unsigned long __unused1;
18 __kernel_time_t msg_rtime; /* last msgrcv time */
19 unsigned long __unused2;
20 __kernel_time_t msg_ctime; /* last change time */
21 unsigned long __unused3;
22 unsigned long msg_cbytes; /* current number of bytes on queue */
23 unsigned long msg_qnum; /* number of messages in queue */
24 unsigned long msg_qbytes; /* max number of bytes on queue */
25 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
26 __kernel_pid_t msg_lrpid; /* last receive pid */
27 unsigned long __unused4;
28 unsigned long __unused5;
29};
30
31#endif /* __V850_MSGBUF_H__ */
diff --git a/include/asm-v850/mutex.h b/include/asm-v850/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/include/asm-v850/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-v850/page.h b/include/asm-v850/page.h
deleted file mode 100644
index 74a539a9bd59..000000000000
--- a/include/asm-v850/page.h
+++ /dev/null
@@ -1,128 +0,0 @@
1/*
2 * include/asm-v850/page.h -- VM ops
3 *
4 * Copyright (C) 2001,02,03,05 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03,05 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_PAGE_H__
15#define __V850_PAGE_H__
16
17#include <asm/machdep.h>
18
19
20#define PAGE_SHIFT 12
21#define PAGE_SIZE (1UL << PAGE_SHIFT)
22#define PAGE_MASK (~(PAGE_SIZE-1))
23
24
25/*
26 * PAGE_OFFSET -- the first address of the first page of memory. For archs with
27 * no MMU this corresponds to the first free page in physical memory (aligned
28 * on a page boundary).
29 */
30#ifndef PAGE_OFFSET
31#define PAGE_OFFSET 0x0000000
32#endif
33
34
35#ifndef __ASSEMBLY__
36
37#define STRICT_MM_TYPECHECKS
38
39#define clear_page(page) memset ((void *)(page), 0, PAGE_SIZE)
40#define copy_page(to, from) memcpy ((void *)(to), (void *)from, PAGE_SIZE)
41
42#define clear_user_page(addr, vaddr, page) \
43 do { clear_page(addr); \
44 flush_dcache_page(page); \
45 } while (0)
46#define copy_user_page(to, from, vaddr, page) \
47 do { copy_page(to, from); \
48 flush_dcache_page(page); \
49 } while (0)
50
51#ifdef STRICT_MM_TYPECHECKS
52/*
53 * These are used to make use of C type-checking..
54 */
55
56typedef struct { unsigned long pte; } pte_t;
57typedef struct { unsigned long pmd; } pmd_t;
58typedef struct { unsigned long pgd; } pgd_t;
59typedef struct { unsigned long pgprot; } pgprot_t;
60typedef struct page *pgtable_t;
61
62#define pte_val(x) ((x).pte)
63#define pmd_val(x) ((x).pmd)
64#define pgd_val(x) ((x).pgd)
65#define pgprot_val(x) ((x).pgprot)
66
67#define __pte(x) ((pte_t) { (x) } )
68#define __pmd(x) ((pmd_t) { (x) } )
69#define __pgd(x) ((pgd_t) { (x) } )
70#define __pgprot(x) ((pgprot_t) { (x) } )
71
72#else /* !STRICT_MM_TYPECHECKS */
73/*
74 * .. while these make it easier on the compiler
75 */
76
77typedef unsigned long pte_t;
78typedef unsigned long pmd_t;
79typedef unsigned long pgd_t;
80typedef unsigned long pgprot_t;
81
82#define pte_val(x) (x)
83#define pmd_val(x) (x)
84#define pgd_val(x) (x)
85#define pgprot_val(x) (x)
86
87#define __pte(x) (x)
88#define __pmd(x) (x)
89#define __pgd(x) (x)
90#define __pgprot(x) (x)
91
92#endif /* STRICT_MM_TYPECHECKS */
93
94#endif /* !__ASSEMBLY__ */
95
96
97/* to align the pointer to the (next) page boundary */
98#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
99
100
101/* No current v850 processor has virtual memory. */
102#define __virt_to_phys(addr) (addr)
103#define __phys_to_virt(addr) (addr)
104
105#define virt_to_pfn(kaddr) (__virt_to_phys (kaddr) >> PAGE_SHIFT)
106#define pfn_to_virt(pfn) __phys_to_virt ((pfn) << PAGE_SHIFT)
107
108#define MAP_NR(kaddr) \
109 (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT)
110#define virt_to_page(kaddr) (mem_map + MAP_NR (kaddr))
111#define page_to_virt(page) \
112 ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
113
114#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
115#define pfn_valid(pfn) ((pfn) < max_mapnr)
116
117#define virt_addr_valid(kaddr) \
118 (((void *)(kaddr) >= (void *)PAGE_OFFSET) && MAP_NR (kaddr) < max_mapnr)
119
120
121#define __pa(x) __virt_to_phys ((unsigned long)(x))
122#define __va(x) ((void *)__phys_to_virt ((unsigned long)(x)))
123
124
125#include <asm-generic/memory_model.h>
126#include <asm-generic/page.h>
127
128#endif /* __V850_PAGE_H__ */
diff --git a/include/asm-v850/param.h b/include/asm-v850/param.h
deleted file mode 100644
index 4391f5fe0204..000000000000
--- a/include/asm-v850/param.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * include/asm-v850/param.h -- Varions kernel parameters
3 *
4 * Copyright (C) 2001,02 NEC Corporation
5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_PARAM_H__
15#define __V850_PARAM_H__
16
17#define EXEC_PAGESIZE 4096
18
19#ifndef NOGROUP
20#define NOGROUP (-1)
21#endif
22
23#define MAXHOSTNAMELEN 64 /* max length of hostname */
24
25#ifdef __KERNEL__
26# define HZ CONFIG_HZ
27# define USER_HZ 100
28# define CLOCKS_PER_SEC USER_HZ
29#else
30# define HZ 100
31#endif
32
33#endif /* __V850_PARAM_H__ */
diff --git a/include/asm-v850/pci.h b/include/asm-v850/pci.h
deleted file mode 100644
index de2a7d0a81cc..000000000000
--- a/include/asm-v850/pci.h
+++ /dev/null
@@ -1,119 +0,0 @@
1/*
2 * include/asm-v850/pci.h -- PCI support
3 *
4 * Copyright (C) 2001,02,05 NEC Corporation
5 * Copyright (C) 2001,02,05 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_PCI_H__
15#define __V850_PCI_H__
16
17/* Get any platform-dependent definitions. */
18#include <asm/machdep.h>
19
20#define pcibios_scan_all_fns(a, b) 0
21
22/* Generic declarations. */
23
24struct scatterlist;
25
26extern void pcibios_set_master (struct pci_dev *dev);
27
28/* `Grant' to PDEV the memory block at CPU_ADDR, for doing DMA. The
29 32-bit PCI bus mastering address to use is returned. the device owns
30 this memory until either pci_unmap_single or pci_dma_sync_single_for_cpu is
31 performed. */
32extern dma_addr_t
33pci_map_single (struct pci_dev *pdev, void *cpu_addr, size_t size, int dir);
34
35/* Return to the CPU the PCI DMA memory block previously `granted' to
36 PDEV, at DMA_ADDR. */
37extern void
38pci_unmap_single (struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
39 int dir);
40
41/* Make physical memory consistent for a single streaming mode DMA
42 translation after a transfer.
43
44 If you perform a pci_map_single() but wish to interrogate the
45 buffer using the cpu, yet do not wish to teardown the PCI dma
46 mapping, you must call this function before doing so. At the next
47 point you give the PCI dma address back to the card, you must first
48 perform a pci_dma_sync_for_device, and then the device again owns
49 the buffer. */
50extern void
51pci_dma_sync_single_for_cpu (struct pci_dev *dev, dma_addr_t dma_addr,
52 size_t size, int dir);
53
54extern void
55pci_dma_sync_single_for_device (struct pci_dev *dev, dma_addr_t dma_addr,
56 size_t size, int dir);
57
58
59/* Do multiple DMA mappings at once. */
60extern int
61pci_map_sg (struct pci_dev *pdev, struct scatterlist *sg, int sg_len, int dir);
62
63/* Unmap multiple DMA mappings at once. */
64extern void
65pci_unmap_sg (struct pci_dev *pdev, struct scatterlist *sg, int sg_len,
66 int dir);
67
68/* SG-list versions of pci_dma_sync functions. */
69extern void
70pci_dma_sync_sg_for_cpu (struct pci_dev *dev,
71 struct scatterlist *sg, int sg_len,
72 int dir);
73extern void
74pci_dma_sync_sg_for_device (struct pci_dev *dev,
75 struct scatterlist *sg, int sg_len,
76 int dir);
77
78#define pci_map_page(dev, page, offs, size, dir) \
79 pci_map_single(dev, (page_address(page) + (offs)), size, dir)
80#define pci_unmap_page(dev,addr,sz,dir) \
81 pci_unmap_single(dev, addr, sz, dir)
82
83/* Test for pci_map_single or pci_map_page having generated an error. */
84static inline int
85pci_dma_mapping_error (dma_addr_t dma_addr)
86{
87 return dma_addr == 0;
88}
89
90/* Allocate and map kernel buffer using consistent mode DMA for PCI
91 device. Returns non-NULL cpu-view pointer to the buffer if
92 successful and sets *DMA_ADDR to the pci side dma address as well,
93 else DMA_ADDR is undefined. */
94extern void *
95pci_alloc_consistent (struct pci_dev *pdev, size_t size, dma_addr_t *dma_addr);
96
97/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
98 be values that were returned from pci_alloc_consistent. SIZE must be
99 the same as what as passed into pci_alloc_consistent. References to
100 the memory and mappings assosciated with CPU_ADDR or DMA_ADDR past
101 this call are illegal. */
102extern void
103pci_free_consistent (struct pci_dev *pdev, size_t size, void *cpu_addr,
104 dma_addr_t dma_addr);
105
106#ifdef CONFIG_PCI
107static inline void pci_dma_burst_advice(struct pci_dev *pdev,
108 enum pci_dma_burst_strategy *strat,
109 unsigned long *strategy_parameter)
110{
111 *strat = PCI_DMA_BURST_INFINITY;
112 *strategy_parameter = ~0UL;
113}
114#endif
115
116extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
117extern void pci_iounmap (struct pci_dev *dev, void __iomem *addr);
118
119#endif /* __V850_PCI_H__ */
diff --git a/include/asm-v850/percpu.h b/include/asm-v850/percpu.h
deleted file mode 100644
index 755ac6522b63..000000000000
--- a/include/asm-v850/percpu.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __V850_PERCPU_H__
2#define __V850_PERCPU_H__
3
4#include <asm-generic/percpu.h>
5
6/* This is a stupid hack to satisfy some grotty implicit include-file
7 dependency; basically, <linux/smp.h> uses BUG_ON, which calls BUG, but
8 doesn't include the necessary headers to define it. In the twisted
9 festering mess of includes this must all be resolved somehow on other
10 platforms, but I haven't the faintest idea how, and don't care; here will
11 do, even though doesn't actually make any sense. */
12#include <asm/page.h>
13
14#endif /* __V850_PERCPU_H__ */
diff --git a/include/asm-v850/pgalloc.h b/include/asm-v850/pgalloc.h
deleted file mode 100644
index b91eb2d02bfd..000000000000
--- a/include/asm-v850/pgalloc.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * include/asm-v850/pgalloc.h
3 *
4 * Copyright (C) 2001,02 NEC Corporation
5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_PGALLOC_H__
15#define __V850_PGALLOC_H__
16
17#include <linux/mm.h> /* some crap code expects this */
18
19/* ... and then, there was one. */
20#define check_pgt_cache() ((void)0)
21
22#endif /* __V850_PGALLOC_H__ */
diff --git a/include/asm-v850/pgtable.h b/include/asm-v850/pgtable.h
deleted file mode 100644
index 1ea2a900f0f8..000000000000
--- a/include/asm-v850/pgtable.h
+++ /dev/null
@@ -1,59 +0,0 @@
1#ifndef __V850_PGTABLE_H__
2#define __V850_PGTABLE_H__
3
4#include <asm-generic/4level-fixup.h>
5
6#include <asm/page.h>
7
8
9#define pgd_present(pgd) (1) /* pages are always present on NO_MM */
10#define pgd_none(pgd) (0)
11#define pgd_bad(pgd) (0)
12#define pgd_clear(pgdp) ((void)0)
13
14#define pmd_offset(a, b) ((void *)0)
15
16#define kern_addr_valid(addr) (1)
17
18
19#define __swp_type(x) (0)
20#define __swp_offset(x) (0)
21#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
22#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
23#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
24
25static inline int pte_file (pte_t pte) { return 0; }
26
27
28/* These mean nothing to !CONFIG_MMU. */
29#define PAGE_NONE __pgprot(0)
30#define PAGE_SHARED __pgprot(0)
31#define PAGE_COPY __pgprot(0)
32#define PAGE_READONLY __pgprot(0)
33#define PAGE_KERNEL __pgprot(0)
34
35
36/*
37 * ZERO_PAGE is a global shared page that is always zero: used
38 * for zero-mapped memory areas etc. When CONFIG_MMU is not defined, this
39 * should never actually be used, so just define it to something that's
40 * will hopefully cause a bus error if it is.
41 */
42#define ZERO_PAGE(vaddr) ((void *)0x87654321)
43
44
45/* Some bogus code in procfs uses these; whatever. */
46#define VMALLOC_START 0
47#define VMALLOC_END (~0)
48
49
50extern void paging_init (void);
51#define swapper_pg_dir ((pgd_t *) 0)
52
53#define pgtable_cache_init() ((void)0)
54
55
56extern unsigned int kobjsize(const void *objp);
57
58
59#endif /* __V850_PGTABLE_H__ */
diff --git a/include/asm-v850/poll.h b/include/asm-v850/poll.h
deleted file mode 100644
index 803cad0b9b59..000000000000
--- a/include/asm-v850/poll.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef __V850_POLL_H__
2#define __V850_POLL_H__
3
4#define POLLWRNORM POLLOUT
5#define POLLWRBAND 0x0100
6
7#include <asm-generic/poll.h>
8
9#endif /* __V850_POLL_H__ */
diff --git a/include/asm-v850/posix_types.h b/include/asm-v850/posix_types.h
deleted file mode 100644
index 7f403b765390..000000000000
--- a/include/asm-v850/posix_types.h
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * include/asm-v850/posix_types.h -- Kernel versions of standard types
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_POSIX_TYPES_H__
15#define __V850_POSIX_TYPES_H__
16
17typedef unsigned long __kernel_ino_t;
18typedef unsigned long long __kernel_ino64_t;
19typedef unsigned int __kernel_mode_t;
20typedef unsigned int __kernel_nlink_t;
21typedef long __kernel_off_t;
22typedef long long __kernel_loff_t;
23typedef int __kernel_pid_t;
24typedef unsigned short __kernel_ipc_pid_t;
25typedef unsigned int __kernel_uid_t;
26typedef unsigned int __kernel_gid_t;
27typedef unsigned int __kernel_size_t;
28typedef int __kernel_ssize_t;
29typedef int __kernel_ptrdiff_t;
30typedef long __kernel_time_t;
31typedef long __kernel_suseconds_t;
32typedef long __kernel_clock_t;
33typedef int __kernel_timer_t;
34typedef int __kernel_clockid_t;
35typedef int __kernel_daddr_t;
36typedef char * __kernel_caddr_t;
37typedef unsigned short __kernel_uid16_t;
38typedef unsigned short __kernel_gid16_t;
39typedef unsigned int __kernel_uid32_t;
40typedef unsigned int __kernel_gid32_t;
41
42/* Some bogus code depends on this; we don't care. */
43typedef __kernel_uid_t __kernel_old_uid_t;
44typedef unsigned int __kernel_old_dev_t;
45
46typedef struct {
47 int val[2];
48} __kernel_fsid_t;
49
50
51#if defined(__KERNEL__)
52
53/* We used to include <asm/bitops.h> here, which seems the right thing, but
54 it caused nasty include-file definition order problems. Removing the
55 include seems to work, so fingers crossed... */
56
57#undef __FD_SET
58#define __FD_SET(fd, fd_set) \
59 __set_bit (fd, (void *)&((__kernel_fd_set *)fd_set)->fds_bits)
60#undef __FD_CLR
61#define __FD_CLR(fd, fd_set) \
62 __clear_bit (fd, (void *)&((__kernel_fd_set *)fd_set)->fds_bits)
63#undef __FD_ISSET
64#define __FD_ISSET(fd, fd_set) \
65 __test_bit (fd, (void *)&((__kernel_fd_set *)fd_set)->fds_bits)
66#undef __FD_ZERO
67#define __FD_ZERO(fd_set) \
68 memset (fd_set, 0, sizeof (*(fd_set *)fd_set))
69
70#endif /* defined(__KERNEL__) */
71
72#endif /* __V850_POSIX_TYPES_H__ */
diff --git a/include/asm-v850/processor.h b/include/asm-v850/processor.h
deleted file mode 100644
index 979e3467f9af..000000000000
--- a/include/asm-v850/processor.h
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * include/asm-v850/processor.h
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_PROCESSOR_H__
15#define __V850_PROCESSOR_H__
16
17#ifndef __ASSEMBLY__ /* <linux/thread_info.h> is not asm-safe. */
18#include <linux/thread_info.h>
19#endif
20
21#include <linux/compiler.h>
22#include <asm/ptrace.h>
23#include <asm/entry.h>
24
25/* Some code expects `segment' stuff to be defined here. */
26#include <asm/segment.h>
27
28
29/*
30 * The only places this is used seem to be horrible bletcherous kludges,
31 * so we just define it to be as large as possible.
32 */
33#define TASK_SIZE (0xFFFFFFFF)
34
35/*
36 * This decides where the kernel will search for a free chunk of vm
37 * space during mmap's. We won't be using it.
38 */
39#define TASK_UNMAPPED_BASE 0
40
41
42#ifndef __ASSEMBLY__
43
44
45/*
46 * Default implementation of macro that returns current
47 * instruction pointer ("program counter").
48 */
49#define current_text_addr() ({ __label__ _l; _l: &&_l;})
50
51/* If you change this, you must change the associated assembly-languages
52 constants defined below, THREAD_*. */
53struct thread_struct {
54 /* kernel stack pointer (must be first field in structure) */
55 unsigned long ksp;
56};
57
58#define INIT_THREAD { sizeof init_stack + (unsigned long)init_stack }
59
60
61/* Do necessary setup to start up a newly executed thread. */
62static inline void start_thread (struct pt_regs *regs,
63 unsigned long pc, unsigned long usp)
64{
65 regs->pc = pc;
66 regs->gpr[GPR_SP] = usp;
67 regs->kernel_mode = 0;
68}
69
70/* Free all resources held by a thread. */
71static inline void release_thread (struct task_struct *dead_task)
72{
73}
74
75/* Prepare to copy thread state - unlazy all lazy status */
76#define prepare_to_copy(tsk) do { } while (0)
77
78extern int kernel_thread (int (*fn)(void *), void * arg, unsigned long flags);
79
80/* Free current thread data structures etc. */
81static inline void exit_thread (void)
82{
83}
84
85
86/* Return the registers saved during context-switch by the currently
87 not-running thread T. Note that this only includes some registers!
88 See entry.S for details. */
89#define thread_saved_regs(t) \
90 ((struct pt_regs*)((t)->thread.ksp + STATE_SAVE_PT_OFFSET))
91/* Return saved (kernel) PC of a blocked thread. Actually, we return the
92 LP register, because the thread is actually blocked in switch_thread,
93 and we're interested in the PC it will _return_ to. */
94#define thread_saved_pc(t) (thread_saved_regs(t)->gpr[GPR_LP])
95
96
97unsigned long get_wchan (struct task_struct *p);
98
99
100/* Return some info about the user process TASK. */
101#define task_tos(task) ((unsigned long)task_stack_page(task) + THREAD_SIZE)
102#define task_pt_regs(task) ((struct pt_regs *)task_tos (task) - 1)
103#define task_sp(task) (task_pt_regs (task)->gpr[GPR_SP])
104#define task_pc(task) (task_pt_regs (task)->pc)
105/* Grotty old names for some. */
106#define KSTK_EIP(task) task_pc (task)
107#define KSTK_ESP(task) task_sp (task)
108
109
110#define cpu_relax() barrier()
111
112
113#else /* __ASSEMBLY__ */
114
115#define THREAD_KSP 0
116
117#endif /* !__ASSEMBLY__ */
118
119
120#endif /* __V850_PROCESSOR_H__ */
diff --git a/include/asm-v850/ptrace.h b/include/asm-v850/ptrace.h
deleted file mode 100644
index 4f35cf2cd641..000000000000
--- a/include/asm-v850/ptrace.h
+++ /dev/null
@@ -1,121 +0,0 @@
1/*
2 * include/asm-v850/ptrace.h -- Access to CPU registers
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_PTRACE_H__
15#define __V850_PTRACE_H__
16
17
18/* v850 general purpose registers with special meanings. */
19#define GPR_ZERO 0 /* constant zero */
20#define GPR_ASM 1 /* reserved for assembler */
21#define GPR_SP 3 /* stack pointer */
22#define GPR_GP 4 /* global data pointer */
23#define GPR_TP 5 /* `text pointer' */
24#define GPR_EP 30 /* `element pointer' */
25#define GPR_LP 31 /* link pointer (current return address) */
26
27/* These aren't official names, but they make some code more descriptive. */
28#define GPR_ARG0 6
29#define GPR_ARG1 7
30#define GPR_ARG2 8
31#define GPR_ARG3 9
32#define GPR_RVAL0 10
33#define GPR_RVAL1 11
34#define GPR_RVAL GPR_RVAL0
35
36#define NUM_GPRS 32
37
38/* v850 `system' registers. */
39#define SR_EIPC 0
40#define SR_EIPSW 1
41#define SR_FEPC 2
42#define SR_FEPSW 3
43#define SR_ECR 4
44#define SR_PSW 5
45#define SR_CTPC 16
46#define SR_CTPSW 17
47#define SR_DBPC 18
48#define SR_DBPSW 19
49#define SR_CTBP 20
50#define SR_DIR 21
51#define SR_ASID 23
52
53
54#ifndef __ASSEMBLY__
55
56typedef unsigned long v850_reg_t;
57
58/* How processor state is stored on the stack during a syscall/signal.
59 If you change this structure, change the associated assembly-language
60 macros below too (PT_*)! */
61struct pt_regs
62{
63 /* General purpose registers. */
64 v850_reg_t gpr[NUM_GPRS];
65
66 v850_reg_t pc; /* program counter */
67 v850_reg_t psw; /* program status word */
68
69 /* Registers used by `callt' instruction: */
70 v850_reg_t ctpc; /* saved program counter */
71 v850_reg_t ctpsw; /* saved psw */
72 v850_reg_t ctbp; /* base pointer for callt table */
73
74 char kernel_mode; /* 1 if in `kernel mode', 0 if user mode */
75};
76
77
78#define instruction_pointer(regs) ((regs)->pc)
79#define profile_pc(regs) instruction_pointer(regs)
80#define user_mode(regs) (!(regs)->kernel_mode)
81
82/* When a struct pt_regs is used to save user state for a system call in
83 the kernel, the system call is stored in the space for R0 (since it's
84 never used otherwise, R0 being a constant 0). Non-system-calls
85 simply store 0 there. */
86#define PT_REGS_SYSCALL(regs) (regs)->gpr[0]
87#define PT_REGS_SET_SYSCALL(regs, val) ((regs)->gpr[0] = (val))
88
89#endif /* !__ASSEMBLY__ */
90
91
92/* The number of bytes used to store each register. */
93#define _PT_REG_SIZE 4
94
95/* Offset of a general purpose register in a struct pt_regs. */
96#define PT_GPR(num) ((num) * _PT_REG_SIZE)
97
98/* Offsets of various special registers & fields in a struct pt_regs. */
99#define PT_PC ((NUM_GPRS + 0) * _PT_REG_SIZE)
100#define PT_PSW ((NUM_GPRS + 1) * _PT_REG_SIZE)
101#define PT_CTPC ((NUM_GPRS + 2) * _PT_REG_SIZE)
102#define PT_CTPSW ((NUM_GPRS + 3) * _PT_REG_SIZE)
103#define PT_CTBP ((NUM_GPRS + 4) * _PT_REG_SIZE)
104#define PT_KERNEL_MODE ((NUM_GPRS + 5) * _PT_REG_SIZE)
105
106/* Where the current syscall number is stashed; obviously only valid in
107 the kernel! */
108#define PT_CUR_SYSCALL PT_GPR(0)
109
110/* Size of struct pt_regs, including alignment. */
111#define PT_SIZE ((NUM_GPRS + 6) * _PT_REG_SIZE)
112
113
114/* These are `magic' values for PTRACE_PEEKUSR that return info about where
115 a process is located in memory. */
116#define PT_TEXT_ADDR (PT_SIZE + 1)
117#define PT_TEXT_LEN (PT_SIZE + 2)
118#define PT_DATA_ADDR (PT_SIZE + 3)
119
120
121#endif /* __V850_PTRACE_H__ */
diff --git a/include/asm-v850/resource.h b/include/asm-v850/resource.h
deleted file mode 100644
index 4b9dcd44f8d1..000000000000
--- a/include/asm-v850/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __V850_RESOURCE_H__
2#define __V850_RESOURCE_H__
3
4#include <asm-generic/resource.h>
5
6#endif /* __V850_RESOURCE_H__ */
diff --git a/include/asm-v850/rte_cb.h b/include/asm-v850/rte_cb.h
deleted file mode 100644
index db9879f00aa7..000000000000
--- a/include/asm-v850/rte_cb.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2 * include/asm-v850/rte_cb.h -- Midas labs RTE-CB series of evaluation boards
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_RTE_CB_H__
15#define __V850_RTE_CB_H__
16
17
18/* The SRAM on the Mother-A motherboard. */
19#define MB_A_SRAM_ADDR GCS0_ADDR
20#define MB_A_SRAM_SIZE 0x00200000 /* 2MB */
21
22
23#ifdef CONFIG_RTE_GBUS_INT
24/* GBUS interrupt support. */
25
26# include <asm/gbus_int.h>
27
28# define GBUS_INT_BASE_IRQ NUM_RTE_CB_IRQS
29# define GBUS_INT_BASE_ADDR (GCS2_ADDR + 0x00006000)
30
31/* Some specific interrupts. */
32# define IRQ_MB_A_LAN IRQ_GBUS_INT(10)
33# define IRQ_MB_A_PCI1(n) (IRQ_GBUS_INT(16) + (n))
34# define IRQ_MB_A_PCI1_NUM 4
35# define IRQ_MB_A_PCI2(n) (IRQ_GBUS_INT(20) + (n))
36# define IRQ_MB_A_PCI2_NUM 4
37# define IRQ_MB_A_EXT(n) (IRQ_GBUS_INT(24) + (n))
38# define IRQ_MB_A_EXT_NUM 4
39# define IRQ_MB_A_USB_OC(n) (IRQ_GBUS_INT(28) + (n))
40# define IRQ_MB_A_USB_OC_NUM 2
41# define IRQ_MB_A_PCMCIA_OC IRQ_GBUS_INT(30)
42
43/* We define NUM_MACH_IRQS to include extra interrupts from the GBUS. */
44# define NUM_MACH_IRQS (NUM_RTE_CB_IRQS + IRQ_GBUS_INT_NUM)
45
46#else /* !CONFIG_RTE_GBUS_INT */
47
48# define NUM_MACH_IRQS NUM_RTE_CB_IRQS
49
50#endif /* CONFIG_RTE_GBUS_INT */
51
52
53#ifdef CONFIG_RTE_MB_A_PCI
54/* Mother-A PCI bus support. */
55
56# include <asm/rte_mb_a_pci.h>
57
58/* These are the base addresses used for allocating device address
59 space. 512K of the motherboard SRAM is in the same space, so we have
60 to be careful not to let it be allocated. */
61# define PCIBIOS_MIN_MEM (MB_A_PCI_MEM_ADDR + 0x80000)
62# define PCIBIOS_MIN_IO MB_A_PCI_IO_ADDR
63
64/* As we don't really support PCI DMA to cpu memory, and use bounce-buffers
65 instead, perversely enough, this becomes always true! */
66# define pci_dma_supported(dev, mask) 1
67# define pcibios_assign_all_busses() 1
68
69#endif /* CONFIG_RTE_MB_A_PCI */
70
71
72#ifndef __ASSEMBLY__
73extern void rte_cb_early_init (void);
74extern void rte_cb_init_irqs (void);
75#endif /* !__ASSEMBLY__ */
76
77
78#endif /* __V850_RTE_CB_H__ */
diff --git a/include/asm-v850/rte_ma1_cb.h b/include/asm-v850/rte_ma1_cb.h
deleted file mode 100644
index bd3162ab9844..000000000000
--- a/include/asm-v850/rte_ma1_cb.h
+++ /dev/null
@@ -1,128 +0,0 @@
1/*
2 * include/asm-v850/rte_ma1_cb.h -- Midas labs RTE-V850/MA1-CB board
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_RTE_MA1_CB_H__
15#define __V850_RTE_MA1_CB_H__
16
17#include <asm/rte_cb.h> /* Common defs for Midas RTE-CB boards. */
18
19
20#define PLATFORM "rte-v850e/ma1-cb"
21#define PLATFORM_LONG "Midas lab RTE-V850E/MA1-CB"
22
23#define CPU_CLOCK_FREQ 50000000 /* 50MHz */
24
25/* 1MB of onboard SRAM. Note that the monitor ROM uses parts of this
26 for its own purposes, so care must be taken. Some address lines are
27 not decoded, so the SRAM area is mirrored every 1MB from 0x400000 to
28 0x800000 (exclusive). */
29#define SRAM_ADDR 0x00400000
30#define SRAM_SIZE 0x00100000 /* 1MB */
31
32/* 32MB of onbard SDRAM. */
33#define SDRAM_ADDR 0x00800000
34#define SDRAM_SIZE 0x02000000 /* 32MB */
35
36
37/* CPU addresses of GBUS memory spaces. */
38#define GCS0_ADDR 0x05000000 /* GCS0 - Common SRAM (2MB) */
39#define GCS0_SIZE 0x00200000 /* 2MB */
40#define GCS1_ADDR 0x06000000 /* GCS1 - Flash ROM (8MB) */
41#define GCS1_SIZE 0x00800000 /* 8MB */
42#define GCS2_ADDR 0x07900000 /* GCS2 - I/O registers */
43#define GCS2_SIZE 0x00400000 /* 4MB */
44#define GCS5_ADDR 0x04000000 /* GCS5 - PCI bus space */
45#define GCS5_SIZE 0x01000000 /* 16MB */
46#define GCS6_ADDR 0x07980000 /* GCS6 - PCI control registers */
47#define GCS6_SIZE 0x00000200 /* 512B */
48
49
50/* For <asm/page.h> */
51#define PAGE_OFFSET SRAM_ADDR
52
53
54/* The GBUS GINT0 - GINT3 interrupts are connected to the INTP000 - INTP011
55 pins on the CPU. These are shared among the GBUS interrupts. */
56#define IRQ_GINT(n) IRQ_INTP(n)
57#define IRQ_GINT_NUM 4
58
59/* Used by <asm/rte_cb.h> to derive NUM_MACH_IRQS. */
60#define NUM_RTE_CB_IRQS NUM_CPU_IRQS
61
62
63#ifdef CONFIG_ROM_KERNEL
64/* Kernel is in ROM, starting at address 0. */
65
66#define INTV_BASE 0
67
68#else /* !CONFIG_ROM_KERNEL */
69
70#ifdef CONFIG_RTE_CB_MULTI
71/* Using RAM kernel with ROM monitor for Multi debugger. */
72
73/* The chip's real interrupt vectors are in ROM, but they jump to a
74 secondary interrupt vector table in RAM. */
75#define INTV_BASE 0x004F8000
76
77/* Scratch memory used by the ROM monitor, which shouldn't be used by
78 linux (except for the alternate interrupt vector area, defined
79 above). */
80#define MON_SCRATCH_ADDR 0x004F8000
81#define MON_SCRATCH_SIZE 0x00008000 /* 32KB */
82
83#else /* !CONFIG_RTE_CB_MULTI */
84/* Using RAM-kernel. Assume some sort of boot-loader got us loaded at
85 address 0. */
86
87#define INTV_BASE 0
88
89#endif /* CONFIG_RTE_CB_MULTI */
90
91#endif /* CONFIG_ROM_KERNEL */
92
93
94/* Some misc. on-board devices. */
95
96/* Seven-segment LED display (two digits). Write-only. */
97#define LED_ADDR(n) (0x07802000 + (n))
98#define LED(n) (*(volatile unsigned char *)LED_ADDR(n))
99#define LED_NUM_DIGITS 2
100
101
102/* Override the basic MA uart pre-initialization so that we can
103 initialize extra stuff. */
104#undef V850E_UART_PRE_CONFIGURE /* should be defined by <asm/ma.h> */
105#define V850E_UART_PRE_CONFIGURE rte_ma1_cb_uart_pre_configure
106#ifndef __ASSEMBLY__
107extern void rte_ma1_cb_uart_pre_configure (unsigned chan,
108 unsigned cflags, unsigned baud);
109#endif
110
111/* This board supports RTS/CTS for the on-chip UART, but only for channel 0. */
112
113/* CTS for UART channel 0 is pin P43 (bit 3 of port 4). */
114#define V850E_UART_CTS(chan) ((chan) == 0 ? !(MA_PORT4_IO & 0x8) : 1)
115/* RTS for UART channel 0 is pin P42 (bit 2 of port 4). */
116#define V850E_UART_SET_RTS(chan, val) \
117 do { \
118 if (chan == 0) { \
119 unsigned old = MA_PORT4_IO; \
120 if (val) \
121 MA_PORT4_IO = old & ~0x4; \
122 else \
123 MA_PORT4_IO = old | 0x4; \
124 } \
125 } while (0)
126
127
128#endif /* __V850_RTE_MA1_CB_H__ */
diff --git a/include/asm-v850/rte_mb_a_pci.h b/include/asm-v850/rte_mb_a_pci.h
deleted file mode 100644
index 41ac185ca9cd..000000000000
--- a/include/asm-v850/rte_mb_a_pci.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * include/asm-v850/mb_a_pci.h -- PCI support for Midas lab RTE-MOTHER-A board
3 *
4 * Copyright (C) 2001 NEC Corporation
5 * Copyright (C) 2001 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_MB_A_PCI_H__
15#define __V850_MB_A_PCI_H__
16
17
18#define MB_A_PCI_MEM_ADDR GCS5_ADDR
19#define MB_A_PCI_MEM_SIZE (GCS5_SIZE / 2)
20#define MB_A_PCI_IO_ADDR (GCS5_ADDR + MB_A_PCI_MEM_SIZE)
21#define MB_A_PCI_IO_SIZE (GCS5_SIZE / 2)
22#define MB_A_PCI_REG_BASE_ADDR GCS6_ADDR
23
24#define MB_A_PCI_PCICR_ADDR (MB_A_PCI_REG_BASE_ADDR + 0x4)
25#define MB_A_PCI_PCICR (*(volatile u16 *)MB_A_PCI_PCICR_ADDR)
26#define MB_A_PCI_PCISR_ADDR (MB_A_PCI_REG_BASE_ADDR + 0x6)
27#define MB_A_PCI_PCISR (*(volatile u16 *)MB_A_PCI_PCISR_ADDR)
28#define MB_A_PCI_PCILTR_ADDR (MB_A_PCI_REG_BASE_ADDR + 0xD)
29#define MB_A_PCI_PCILTR (*(volatile u8 *)MB_A_PCI_PCILTR_ADDR)
30#define MB_A_PCI_PCIBAR0_ADDR (MB_A_PCI_REG_BASE_ADDR + 0x10)
31#define MB_A_PCI_PCIBAR0 (*(volatile u32 *)MB_A_PCI_PCIBAR0_ADDR)
32#define MB_A_PCI_PCIBAR1_ADDR (MB_A_PCI_REG_BASE_ADDR + 0x14)
33#define MB_A_PCI_PCIBAR1 (*(volatile u32 *)MB_A_PCI_PCIBAR1_ADDR)
34#define MB_A_PCI_PCIBAR2_ADDR (MB_A_PCI_REG_BASE_ADDR + 0x18)
35#define MB_A_PCI_PCIBAR2 (*(volatile u32 *)MB_A_PCI_PCIBAR2_ADDR)
36#define MB_A_PCI_VENDOR_ID_ADDR (MB_A_PCI_REG_BASE_ADDR + 0x2C)
37#define MB_A_PCI_VENDOR_ID (*(volatile u16 *)MB_A_PCI_VENDOR_ID_ADDR)
38#define MB_A_PCI_DEVICE_ID_ADDR (MB_A_PCI_REG_BASE_ADDR + 0x2E)
39#define MB_A_PCI_DEVICE_ID (*(volatile u16 *)MB_A_PCI_DEVICE_ID_ADDR)
40#define MB_A_PCI_DMRR_ADDR (MB_A_PCI_REG_BASE_ADDR + 0x9C)
41#define MB_A_PCI_DMRR (*(volatile u32 *)MB_A_PCI_DMRR_ADDR)
42#define MB_A_PCI_DMLBAM_ADDR (MB_A_PCI_REG_BASE_ADDR + 0xA0)
43#define MB_A_PCI_DMLBAM (*(volatile u32 *)MB_A_PCI_DMLBAM_ADDR)
44#define MB_A_PCI_DMLBAI_ADDR (MB_A_PCI_REG_BASE_ADDR + 0xA4)
45#define MB_A_PCI_DMLBAI (*(volatile u32 *)MB_A_PCI_DMLBAI_ADDR)
46#define MB_A_PCI_PCIPBAM_ADDR (MB_A_PCI_REG_BASE_ADDR + 0xA8)
47#define MB_A_PCI_PCIPBAM (*(volatile u32 *)MB_A_PCI_PCIPBAM_ADDR)
48/* `PCI Configuration Address Register for Direct Master to PCI IO/CFG' */
49#define MB_A_PCI_DMCFGA_ADDR (MB_A_PCI_REG_BASE_ADDR + 0xAC)
50#define MB_A_PCI_DMCFGA (*(volatile u32 *)MB_A_PCI_DMCFGA_ADDR)
51/* `PCI Permanent Configuration ID Register' */
52#define MB_A_PCI_PCIHIDR_ADDR (MB_A_PCI_REG_BASE_ADDR + 0xF0)
53#define MB_A_PCI_PCIHIDR (*(volatile u32 *)MB_A_PCI_PCIHIDR_ADDR)
54
55
56#endif /* __V850_MB_A_PCI_H__ */
diff --git a/include/asm-v850/rte_me2_cb.h b/include/asm-v850/rte_me2_cb.h
deleted file mode 100644
index 9922c85c85a8..000000000000
--- a/include/asm-v850/rte_me2_cb.h
+++ /dev/null
@@ -1,202 +0,0 @@
1/*
2 * include/asm-v850/rte_me2_cb.h -- Midas labs RTE-V850E/ME2-CB board
3 *
4 * Copyright (C) 2001,02,03 NEC Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_RTE_ME2_CB_H__
15#define __V850_RTE_ME2_CB_H__
16
17#include <asm/rte_cb.h> /* Common defs for Midas RTE-CB boards. */
18
19
20#define PLATFORM "rte-v850e/me2-cb"
21#define PLATFORM_LONG "Midas lab RTE-V850E/ME2-CB"
22
23#define CPU_CLOCK_FREQ 150000000 /* 150MHz */
24#define FIXED_BOGOMIPS 50
25
26/* 32MB of onbard SDRAM. */
27#define SDRAM_ADDR 0x00800000
28#define SDRAM_SIZE 0x02000000 /* 32MB */
29
30
31/* CPU addresses of GBUS memory spaces. */
32#define GCS0_ADDR 0x04000000 /* GCS0 - Common SRAM (2MB) */
33#define GCS0_SIZE 0x00800000 /* 8MB */
34#define GCS1_ADDR 0x04800000 /* GCS1 - Flash ROM (8MB) */
35#define GCS1_SIZE 0x00800000 /* 8MB */
36#define GCS2_ADDR 0x07000000 /* GCS2 - I/O registers */
37#define GCS2_SIZE 0x00800000 /* 8MB */
38#define GCS5_ADDR 0x08000000 /* GCS5 - PCI bus space */
39#define GCS5_SIZE 0x02000000 /* 32MB */
40#define GCS6_ADDR 0x07800000 /* GCS6 - PCI control registers */
41#define GCS6_SIZE 0x00800000 /* 8MB */
42
43
44/* For <asm/page.h> */
45#define PAGE_OFFSET SDRAM_ADDR
46
47
48#ifdef CONFIG_ROM_KERNEL
49/* Kernel is in ROM, starting at address 0. */
50
51#define INTV_BASE 0
52#define ROOT_FS_IMAGE_RW 0
53
54#else /* !CONFIG_ROM_KERNEL */
55/* Using RAM-kernel. Assume some sort of boot-loader got us loaded at
56 address 0. */
57
58#define INTV_BASE 0
59#define ROOT_FS_IMAGE_RW 1
60
61#endif /* CONFIG_ROM_KERNEL */
62
63
64/* Some misc. on-board devices. */
65
66/* Seven-segment LED display (four digits). */
67#define LED_ADDR(n) (0x0FE02000 + (n))
68#define LED(n) (*(volatile unsigned char *)LED_ADDR(n))
69#define LED_NUM_DIGITS 4
70
71
72/* On-board PIC. */
73
74#define CB_PIC_BASE_ADDR 0x0FE04000
75
76#define CB_PIC_INT0M_ADDR (CB_PIC_BASE_ADDR + 0x00)
77#define CB_PIC_INT0M (*(volatile u16 *)CB_PIC_INT0M_ADDR)
78#define CB_PIC_INT1M_ADDR (CB_PIC_BASE_ADDR + 0x10)
79#define CB_PIC_INT1M (*(volatile u16 *)CB_PIC_INT1M_ADDR)
80#define CB_PIC_INTR_ADDR (CB_PIC_BASE_ADDR + 0x20)
81#define CB_PIC_INTR (*(volatile u16 *)CB_PIC_INTR_ADDR)
82#define CB_PIC_INTEN_ADDR (CB_PIC_BASE_ADDR + 0x30)
83#define CB_PIC_INTEN (*(volatile u16 *)CB_PIC_INTEN_ADDR)
84
85#define CB_PIC_INT0EN 0x0001
86#define CB_PIC_INT1EN 0x0002
87#define CB_PIC_INT0SEL 0x0080
88
89/* The PIC interrupts themselves. */
90#define CB_PIC_BASE_IRQ NUM_CPU_IRQS
91#define IRQ_CB_PIC_NUM 10
92
93/* Some specific CB_PIC interrupts. */
94#define IRQ_CB_EXTTM0 (CB_PIC_BASE_IRQ + 0)
95#define IRQ_CB_EXTSIO (CB_PIC_BASE_IRQ + 1)
96#define IRQ_CB_TOVER (CB_PIC_BASE_IRQ + 2)
97#define IRQ_CB_GINT0 (CB_PIC_BASE_IRQ + 3)
98#define IRQ_CB_USB (CB_PIC_BASE_IRQ + 4)
99#define IRQ_CB_LANC (CB_PIC_BASE_IRQ + 5)
100#define IRQ_CB_USB_VBUS_ON (CB_PIC_BASE_IRQ + 6)
101#define IRQ_CB_USB_VBUS_OFF (CB_PIC_BASE_IRQ + 7)
102#define IRQ_CB_EXTTM1 (CB_PIC_BASE_IRQ + 8)
103#define IRQ_CB_EXTTM2 (CB_PIC_BASE_IRQ + 9)
104
105/* The GBUS GINT1 - GINT3 (note, not GINT0!) interrupts are connected to
106 the INTP65 - INTP67 pins on the CPU. These are shared among the GBUS
107 interrupts. */
108#define IRQ_GINT(n) IRQ_INTP((n) + 9) /* 0 is unused! */
109#define IRQ_GINT_NUM 4 /* 0 is unused! */
110
111/* The shared interrupt line from the PIC is connected to CPU pin INTP23. */
112#define IRQ_CB_PIC IRQ_INTP(4) /* P23 */
113
114/* Used by <asm/rte_cb.h> to derive NUM_MACH_IRQS. */
115#define NUM_RTE_CB_IRQS (NUM_CPU_IRQS + IRQ_CB_PIC_NUM)
116
117
118#ifndef __ASSEMBLY__
119struct cb_pic_irq_init {
120 const char *name; /* name of interrupt type */
121
122 /* Range of kernel irq numbers for this type:
123 BASE, BASE+INTERVAL, ..., BASE+INTERVAL*NUM */
124 unsigned base, num, interval;
125
126 unsigned priority; /* interrupt priority to assign */
127};
128struct hw_interrupt_type; /* fwd decl */
129
130/* Enable interrupt handling for interrupt IRQ. */
131extern void cb_pic_enable_irq (unsigned irq);
132/* Disable interrupt handling for interrupt IRQ. Note that any interrupts
133 received while disabled will be delivered once the interrupt is enabled
134 again, unless they are explicitly cleared using `cb_pic_clear_pending_irq'. */
135extern void cb_pic_disable_irq (unsigned irq);
136/* Initialize HW_IRQ_TYPES for PIC irqs described in array INITS (which is
137 terminated by an entry with the name field == 0). */
138extern void cb_pic_init_irq_types (struct cb_pic_irq_init *inits,
139 struct hw_interrupt_type *hw_irq_types);
140/* Initialize PIC interrupts. */
141extern void cb_pic_init_irqs (void);
142#endif /* __ASSEMBLY__ */
143
144
145/* TL16C550C on board UART see also asm/serial.h */
146#define CB_UART_BASE 0x0FE08000
147#define CB_UART_REG_GAP 0x10
148#define CB_UART_CLOCK 0x16000000
149
150/* CompactFlash setting */
151#define CB_CF_BASE 0x0FE0C000
152#define CB_CF_CCR_ADDR (CB_CF_BASE+0x200)
153#define CB_CF_CCR (*(volatile u8 *)CB_CF_CCR_ADDR)
154#define CB_CF_REG0_ADDR (CB_CF_BASE+0x1000)
155#define CB_CF_REG0 (*(volatile u16 *)CB_CF_REG0_ADDR)
156#define CB_CF_STS0_ADDR (CB_CF_BASE+0x1004)
157#define CB_CF_STS0 (*(volatile u16 *)CB_CF_STS0_ADDR)
158#define CB_PCATA_BASE (CB_CF_BASE+0x800)
159#define CB_IDE_BASE (CB_CF_BASE+0x9F0)
160#define CB_IDE_CTRL (CB_CF_BASE+0xBF6)
161#define CB_IDE_REG_OFFS 0x1
162
163
164/* SMSC LAN91C111 setting */
165#if defined(CONFIG_SMC91111)
166#define CB_LANC_BASE 0x0FE10300
167#define CONFIG_SMC16BITONLY
168#define ETH0_ADDR CB_LANC_BASE
169#define ETH0_IRQ IRQ_CB_LANC
170#endif /* CONFIG_SMC16BITONLY */
171
172
173#undef V850E_UART_PRE_CONFIGURE
174#define V850E_UART_PRE_CONFIGURE rte_me2_cb_uart_pre_configure
175#ifndef __ASSEMBLY__
176extern void rte_me2_cb_uart_pre_configure (unsigned chan,
177 unsigned cflags, unsigned baud);
178#endif /* __ASSEMBLY__ */
179
180/* This board supports RTS/CTS for the on-chip UART, but only for channel 0. */
181
182/* CTS for UART channel 0 is pin P22 (bit 2 of port 2). */
183#define V850E_UART_CTS(chan) ((chan) == 0 ? !(ME2_PORT2_IO & 0x4) : 1)
184/* RTS for UART channel 0 is pin P21 (bit 1 of port 2). */
185#define V850E_UART_SET_RTS(chan, val) \
186 do { \
187 if (chan == 0) { \
188 unsigned old = ME2_PORT2_IO; \
189 if (val) \
190 ME2_PORT2_IO = old & ~0x2; \
191 else \
192 ME2_PORT2_IO = old | 0x2; \
193 } \
194 } while (0)
195
196
197#ifndef __ASSEMBLY__
198extern void rte_me2_cb_init_irqs (void);
199#endif /* !__ASSEMBLY__ */
200
201
202#endif /* __V850_RTE_ME2_CB_H__ */
diff --git a/include/asm-v850/rte_nb85e_cb.h b/include/asm-v850/rte_nb85e_cb.h
deleted file mode 100644
index f56591cad90a..000000000000
--- a/include/asm-v850/rte_nb85e_cb.h
+++ /dev/null
@@ -1,111 +0,0 @@
1/*
2 * include/asm-v850/rte_nb85e_cb.h -- Midas labs RTE-V850/NB85E-CB board
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_RTE_NB85E_CB_H__
15#define __V850_RTE_NB85E_CB_H__
16
17#include <asm/rte_cb.h> /* Common defs for Midas RTE-CB boards. */
18
19
20#define PLATFORM "rte-v850e/nb85e-cb"
21#define PLATFORM_LONG "Midas lab RTE-V850E/NB85E-CB"
22
23#define CPU_CLOCK_FREQ 50000000 /* 50MHz */
24
25/* 1MB of onboard SRAM. Note that the monitor ROM uses parts of this
26 for its own purposes, so care must be taken. */
27#define SRAM_ADDR 0x03C00000
28#define SRAM_SIZE 0x00100000 /* 1MB */
29
30/* 16MB of onbard SDRAM. */
31#define SDRAM_ADDR 0x01000000
32#define SDRAM_SIZE 0x01000000 /* 16MB */
33
34
35/* CPU addresses of GBUS memory spaces. */
36#define GCS0_ADDR 0x00400000 /* GCS0 - Common SRAM (2MB) */
37#define GCS0_SIZE 0x00400000 /* 4MB */
38#define GCS1_ADDR 0x02000000 /* GCS1 - Flash ROM (8MB) */
39#define GCS1_SIZE 0x00800000 /* 8MB */
40#define GCS2_ADDR 0x03900000 /* GCS2 - I/O registers */
41#define GCS2_SIZE 0x00080000 /* 512KB */
42#define GCS3_ADDR 0x02800000 /* GCS3 - EXT-bus: memory space */
43#define GCS3_SIZE 0x00800000 /* 8MB */
44#define GCS4_ADDR 0x03A00000 /* GCS4 - EXT-bus: I/O space */
45#define GCS4_SIZE 0x00200000 /* 2MB */
46#define GCS5_ADDR 0x00800000 /* GCS5 - PCI bus space */
47#define GCS5_SIZE 0x00800000 /* 8MB */
48#define GCS6_ADDR 0x03980000 /* GCS6 - PCI control registers */
49#define GCS6_SIZE 0x00010000 /* 64KB */
50
51
52/* The GBUS GINT0 - GINT3 interrupts are connected to CPU interrupts 10-12.
53 These are shared among the GBUS interrupts. */
54#define IRQ_GINT(n) (10 + (n))
55#define IRQ_GINT_NUM 3
56
57/* Used by <asm/rte_cb.h> to derive NUM_MACH_IRQS. */
58#define NUM_RTE_CB_IRQS NUM_CPU_IRQS
59
60
61#ifdef CONFIG_ROM_KERNEL
62/* Kernel is in ROM, starting at address 0. */
63
64#define INTV_BASE 0
65
66#else /* !CONFIG_ROM_KERNEL */
67/* We're using the ROM monitor. */
68
69/* The chip's real interrupt vectors are in ROM, but they jump to a
70 secondary interrupt vector table in RAM. */
71#define INTV_BASE 0x03CF8000
72
73/* Scratch memory used by the ROM monitor, which shouldn't be used by
74 linux (except for the alternate interrupt vector area, defined
75 above). */
76#define MON_SCRATCH_ADDR 0x03CE8000
77#define MON_SCRATCH_SIZE 0x00018000 /* 96KB */
78
79#endif /* CONFIG_ROM_KERNEL */
80
81
82/* Some misc. on-board devices. */
83
84/* Seven-segment LED display (two digits). Write-only. */
85#define LED_ADDR(n) (0x03802000 + (n))
86#define LED(n) (*(volatile unsigned char *)LED_ADDR(n))
87#define LED_NUM_DIGITS 4
88
89
90/* Override the basic TEG UART pre-initialization so that we can
91 initialize extra stuff. */
92#undef V850E_UART_PRE_CONFIGURE /* should be defined by <asm/teg.h> */
93#define V850E_UART_PRE_CONFIGURE rte_nb85e_cb_uart_pre_configure
94#ifndef __ASSEMBLY__
95extern void rte_nb85e_cb_uart_pre_configure (unsigned chan,
96 unsigned cflags, unsigned baud);
97#endif
98
99/* This board supports RTS/CTS for the on-chip UART. */
100
101/* CTS is pin P00. */
102#define V850E_UART_CTS(chan) (! (TEG_PORT0_IO & 0x1))
103/* RTS is pin P02. */
104#define V850E_UART_SET_RTS(chan, val) \
105 do { \
106 unsigned old = TEG_PORT0_IO; \
107 TEG_PORT0_IO = val ? (old & ~0x4) : (old | 0x4); \
108 } while (0)
109
110
111#endif /* __V850_RTE_NB85E_CB_H__ */
diff --git a/include/asm-v850/scatterlist.h b/include/asm-v850/scatterlist.h
deleted file mode 100644
index 02d27b3fb061..000000000000
--- a/include/asm-v850/scatterlist.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * include/asm-v850/scatterlist.h
3 *
4 * Copyright (C) 2001,02 NEC Corporation
5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_SCATTERLIST_H__
15#define __V850_SCATTERLIST_H__
16
17#include <asm/types.h>
18
19struct scatterlist {
20#ifdef CONFIG_DEBUG_SG
21 unsigned long sg_magic;
22#endif
23 unsigned long page_link;
24 unsigned offset;
25 dma_addr_t dma_address;
26 unsigned length;
27};
28
29#define ISA_DMA_THRESHOLD (~0UL)
30
31#endif /* __V850_SCATTERLIST_H__ */
diff --git a/include/asm-v850/sections.h b/include/asm-v850/sections.h
deleted file mode 100644
index e0238253a0d0..000000000000
--- a/include/asm-v850/sections.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __V850_SECTIONS_H__
2#define __V850_SECTIONS_H__
3
4#include <asm-generic/sections.h>
5
6#endif /* __V850_SECTIONS_H__ */
diff --git a/include/asm-v850/segment.h b/include/asm-v850/segment.h
deleted file mode 100644
index 5e2b15dcf3d9..000000000000
--- a/include/asm-v850/segment.h
+++ /dev/null
@@ -1,36 +0,0 @@
1#ifndef __V850_SEGMENT_H__
2#define __V850_SEGMENT_H__
3
4
5#ifndef __ASSEMBLY__
6
7typedef unsigned long mm_segment_t; /* domain register */
8
9#endif /* !__ASSEMBLY__ */
10
11
12#define __KERNEL_CS 0x0
13#define __KERNEL_DS 0x0
14
15#define __USER_CS 0x1
16#define __USER_DS 0x1
17
18#define KERNEL_DS __KERNEL_DS
19#define KERNEL_CS __KERNEL_CS
20#define USER_DS __USER_DS
21#define USER_CS __USER_CS
22
23#define segment_eq(a,b) ((a) == (b))
24
25#define get_ds() (KERNEL_DS)
26#define get_fs() (USER_DS)
27
28#define set_fs(seg) ((void)(seg))
29
30
31#define copy_segments(task, mm) ((void)((void)(task), (mm)))
32#define release_segments(mm) ((void)(mm))
33#define forget_segments() ((void)0)
34
35
36#endif /* __V850_SEGMENT_H__ */
diff --git a/include/asm-v850/semaphore.h b/include/asm-v850/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-v850/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-v850/sembuf.h b/include/asm-v850/sembuf.h
deleted file mode 100644
index 1622231a8b85..000000000000
--- a/include/asm-v850/sembuf.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef __V850_SEMBUF_H__
2#define __V850_SEMBUF_H__
3
4/*
5 * The semid64_ds structure for v850 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct semid64_ds {
15 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
16 __kernel_time_t sem_otime; /* last semop time */
17 unsigned long __unused1;
18 __kernel_time_t sem_ctime; /* last change time */
19 unsigned long __unused2;
20 unsigned long sem_nsems; /* no. of semaphores in array */
21 unsigned long __unused3;
22 unsigned long __unused4;
23};
24
25#endif /* __V850_SEMBUF_H__ */
diff --git a/include/asm-v850/serial.h b/include/asm-v850/serial.h
deleted file mode 100644
index 36d8f4cbbf39..000000000000
--- a/include/asm-v850/serial.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9
10#ifdef CONFIG_RTE_CB_ME2
11
12#include <asm/rte_me2_cb.h>
13
14#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
15
16#define irq_cannonicalize(x) (x)
17#define BASE_BAUD 250000 /* (16MHz / (16 * 38400)) * 9600 */
18#define SERIAL_PORT_DFNS \
19 { 0, BASE_BAUD, CB_UART_BASE, IRQ_CB_EXTSIO, STD_COM_FLAGS },
20
21/* Redefine UART register offsets. */
22#undef UART_RX
23#undef UART_TX
24#undef UART_DLL
25#undef UART_TRG
26#undef UART_DLM
27#undef UART_IER
28#undef UART_FCTR
29#undef UART_IIR
30#undef UART_FCR
31#undef UART_EFR
32#undef UART_LCR
33#undef UART_MCR
34#undef UART_LSR
35#undef UART_MSR
36#undef UART_SCR
37#undef UART_EMSR
38
39#define UART_RX (0 * CB_UART_REG_GAP)
40#define UART_TX (0 * CB_UART_REG_GAP)
41#define UART_DLL (0 * CB_UART_REG_GAP)
42#define UART_TRG (0 * CB_UART_REG_GAP)
43#define UART_DLM (1 * CB_UART_REG_GAP)
44#define UART_IER (1 * CB_UART_REG_GAP)
45#define UART_FCTR (1 * CB_UART_REG_GAP)
46#define UART_IIR (2 * CB_UART_REG_GAP)
47#define UART_FCR (2 * CB_UART_REG_GAP)
48#define UART_EFR (2 * CB_UART_REG_GAP)
49#define UART_LCR (3 * CB_UART_REG_GAP)
50#define UART_MCR (4 * CB_UART_REG_GAP)
51#define UART_LSR (5 * CB_UART_REG_GAP)
52#define UART_MSR (6 * CB_UART_REG_GAP)
53#define UART_SCR (7 * CB_UART_REG_GAP)
54#define UART_EMSR (7 * CB_UART_REG_GAP)
55
56#endif /* CONFIG_RTE_CB_ME2 */
diff --git a/include/asm-v850/setup.h b/include/asm-v850/setup.h
deleted file mode 100644
index c48a9b97d05b..000000000000
--- a/include/asm-v850/setup.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _V850_SETUP_H
2#define _V850_SETUP_H
3
4#define COMMAND_LINE_SIZE 512
5
6#endif /* __SETUP_H */
diff --git a/include/asm-v850/shmbuf.h b/include/asm-v850/shmbuf.h
deleted file mode 100644
index 3d085c9c418e..000000000000
--- a/include/asm-v850/shmbuf.h
+++ /dev/null
@@ -1,42 +0,0 @@
1#ifndef __V850_SHMBUF_H__
2#define __V850_SHMBUF_H__
3
4/*
5 * The shmid64_ds structure for v850 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct shmid64_ds {
15 struct ipc64_perm shm_perm; /* operation perms */
16 size_t shm_segsz; /* size of segment (bytes) */
17 __kernel_time_t shm_atime; /* last attach time */
18 unsigned long __unused1;
19 __kernel_time_t shm_dtime; /* last detach time */
20 unsigned long __unused2;
21 __kernel_time_t shm_ctime; /* last change time */
22 unsigned long __unused3;
23 __kernel_pid_t shm_cpid; /* pid of creator */
24 __kernel_pid_t shm_lpid; /* pid of last operator */
25 unsigned long shm_nattch; /* no. of current attaches */
26 unsigned long __unused4;
27 unsigned long __unused5;
28};
29
30struct shminfo64 {
31 unsigned long shmmax;
32 unsigned long shmmin;
33 unsigned long shmmni;
34 unsigned long shmseg;
35 unsigned long shmall;
36 unsigned long __unused1;
37 unsigned long __unused2;
38 unsigned long __unused3;
39 unsigned long __unused4;
40};
41
42#endif /* __V850_SHMBUF_H__ */
diff --git a/include/asm-v850/shmparam.h b/include/asm-v850/shmparam.h
deleted file mode 100644
index 7dcb6739073e..000000000000
--- a/include/asm-v850/shmparam.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __V850_SHMPARAM_H__
2#define __V850_SHMPARAM_H__
3
4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
5
6#endif /* __V850_SHMPARAM_H__ */
diff --git a/include/asm-v850/sigcontext.h b/include/asm-v850/sigcontext.h
deleted file mode 100644
index e0890f6f4bc9..000000000000
--- a/include/asm-v850/sigcontext.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * include/asm-v850/sigcontext.h -- Signal contexts
3 *
4 * Copyright (C) 2001 NEC Corporation
5 * Copyright (C) 2001 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_SIGCONTEXT_H__
15#define __V850_SIGCONTEXT_H__
16
17#include <asm/ptrace.h>
18
19struct sigcontext
20{
21 struct pt_regs regs;
22 unsigned long oldmask;
23};
24
25#endif /* __V850_SIGCONTEXT_H__ */
diff --git a/include/asm-v850/siginfo.h b/include/asm-v850/siginfo.h
deleted file mode 100644
index 7eb94703dce0..000000000000
--- a/include/asm-v850/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __V850_SIGINFO_H__
2#define __V850_SIGINFO_H__
3
4#include <asm-generic/siginfo.h>
5
6#endif /* __V850_SIGINFO_H__ */
diff --git a/include/asm-v850/signal.h b/include/asm-v850/signal.h
deleted file mode 100644
index a38df0834bbf..000000000000
--- a/include/asm-v850/signal.h
+++ /dev/null
@@ -1,168 +0,0 @@
1#ifndef __V850_SIGNAL_H__
2#define __V850_SIGNAL_H__
3
4#include <linux/types.h>
5
6/* Avoid too many header ordering problems. */
7struct siginfo;
8
9
10#ifdef __KERNEL__
11
12/* Most things should be clean enough to redefine this at will, if care
13 is taken to make libc match. */
14#define _NSIG 64
15#define _NSIG_BPW 32
16#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
17
18typedef unsigned long old_sigset_t; /* at least 32 bits */
19
20typedef struct {
21 unsigned long sig[_NSIG_WORDS];
22} sigset_t;
23
24#else /* !__KERNEL__ */
25
26/* Here we must cater to libcs that poke about in kernel headers. */
27
28#define NSIG 32
29typedef unsigned long sigset_t;
30
31#endif /* __KERNEL__ */
32
33
34#define SIGHUP 1
35#define SIGINT 2
36#define SIGQUIT 3
37#define SIGILL 4
38#define SIGTRAP 5
39#define SIGABRT 6
40#define SIGIOT 6
41#define SIGBUS 7
42#define SIGFPE 8
43#define SIGKILL 9
44#define SIGUSR1 10
45#define SIGSEGV 11
46#define SIGUSR2 12
47#define SIGPIPE 13
48#define SIGALRM 14
49#define SIGTERM 15
50#define SIGSTKFLT 16
51#define SIGCHLD 17
52#define SIGCONT 18
53#define SIGSTOP 19
54#define SIGTSTP 20
55#define SIGTTIN 21
56#define SIGTTOU 22
57#define SIGURG 23
58#define SIGXCPU 24
59#define SIGXFSZ 25
60#define SIGVTALRM 26
61#define SIGPROF 27
62#define SIGWINCH 28
63#define SIGIO 29
64#define SIGPOLL SIGIO
65/*
66#define SIGLOST 29
67*/
68#define SIGPWR 30
69#define SIGSYS 31
70#define SIGUNUSED 31
71
72/* These should not be considered constants from userland. */
73#define SIGRTMIN 32
74#define SIGRTMAX _NSIG
75
76/*
77 * SA_FLAGS values:
78 *
79 * SA_ONSTACK indicates that a registered stack_t will be used.
80 * SA_RESTART flag to get restarting signals (which were the default long ago)
81 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
82 * SA_RESETHAND clears the handler when the signal is delivered.
83 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
84 * SA_NODEFER prevents the current signal from being masked in the handler.
85 *
86 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
87 * Unix names RESETHAND and NODEFER respectively.
88 */
89#define SA_NOCLDSTOP 0x00000001
90#define SA_NOCLDWAIT 0x00000002
91#define SA_SIGINFO 0x00000004
92#define SA_ONSTACK 0x08000000
93#define SA_RESTART 0x10000000
94#define SA_NODEFER 0x40000000
95#define SA_RESETHAND 0x80000000
96
97#define SA_NOMASK SA_NODEFER
98#define SA_ONESHOT SA_RESETHAND
99
100#define SA_RESTORER 0x04000000
101
102/*
103 * sigaltstack controls
104 */
105#define SS_ONSTACK 1
106#define SS_DISABLE 2
107
108#define MINSIGSTKSZ 2048
109#define SIGSTKSZ 8192
110
111#include <asm-generic/signal.h>
112
113#ifdef __KERNEL__
114
115struct old_sigaction {
116 __sighandler_t sa_handler;
117 old_sigset_t sa_mask;
118 unsigned long sa_flags;
119 void (*sa_restorer)(void);
120};
121
122struct sigaction {
123 __sighandler_t sa_handler;
124 unsigned long sa_flags;
125 void (*sa_restorer)(void);
126 sigset_t sa_mask; /* mask last for extensibility */
127};
128
129struct k_sigaction {
130 struct sigaction sa;
131};
132
133#else /* !__KERNEL__ */
134
135/* Here we must cater to libcs that poke about in kernel headers. */
136
137struct sigaction {
138 union {
139 __sighandler_t _sa_handler;
140 void (*_sa_sigaction)(int, struct siginfo *, void *);
141 } _u;
142 sigset_t sa_mask;
143 unsigned long sa_flags;
144 void (*sa_restorer)(void);
145};
146
147#define sa_handler _u._sa_handler
148#define sa_sigaction _u._sa_sigaction
149
150#endif /* __KERNEL__ */
151
152
153typedef struct sigaltstack {
154 void *ss_sp;
155 int ss_flags;
156 size_t ss_size;
157} stack_t;
158
159#ifdef __KERNEL__
160
161#include <asm/sigcontext.h>
162#undef __HAVE_ARCH_SIG_BITOPS
163
164#define ptrace_signal_deliver(regs, cookie) do { } while (0)
165
166#endif /* __KERNEL__ */
167
168#endif /* __V850_SIGNAL_H__ */
diff --git a/include/asm-v850/sim.h b/include/asm-v850/sim.h
deleted file mode 100644
index 026932d476cd..000000000000
--- a/include/asm-v850/sim.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * include/asm-v850/sim.h -- Machine-dependent defs for GDB v850e simulator
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_SIM_H__
15#define __V850_SIM_H__
16
17
18#define CPU_ARCH "v850e"
19#define CPU_MODEL "v850e"
20#define CPU_MODEL_LONG "NEC V850E"
21#define PLATFORM "gdb/v850e"
22#define PLATFORM_LONG "GDB V850E simulator"
23
24
25/* We use a weird value for RAM, not just 0, for testing purposes.
26 These must match the values used in the linker script. */
27#define RAM_ADDR 0x8F000000
28#define RAM_SIZE 0x03000000
29
30
31/* For <asm/page.h> */
32#define PAGE_OFFSET RAM_ADDR
33
34
35/* For <asm/entry.h> */
36/* `R0 RAM', used for a few miscellaneous variables that must be
37 accessible using a load instruction relative to R0. On real
38 processors, this usually is on-chip RAM, but here we just
39 choose an arbitrary address that meets the above constraint. */
40#define R0_RAM_ADDR 0xFFFFF000
41
42
43/* For <asm/irq.h> */
44#define NUM_CPU_IRQS 6
45
46
47#endif /* __V850_SIM_H__ */
diff --git a/include/asm-v850/sim85e2.h b/include/asm-v850/sim85e2.h
deleted file mode 100644
index 8b4d6974066c..000000000000
--- a/include/asm-v850/sim85e2.h
+++ /dev/null
@@ -1,69 +0,0 @@
1/*
2 * include/asm-v850/sim85e2.h -- Machine-dependent defs for
3 * V850E2 RTL simulator
4 *
5 * Copyright (C) 2002,03 NEC Electronics Corporation
6 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 */
14
15#ifndef __V850_SIM85E2_H__
16#define __V850_SIM85E2_H__
17
18
19#include <asm/v850e2.h> /* Based on V850E2 core. */
20
21
22/* Various memory areas supported by the simulator.
23 These should match the corresponding definitions in the linker script. */
24
25/* `instruction RAM'; instruction fetches are much faster from IRAM than
26 from DRAM. */
27#define IRAM_ADDR 0
28#define IRAM_SIZE 0x00100000 /* 1MB */
29/* `data RAM', below and contiguous with the I/O space.
30 Data fetches are much faster from DRAM than from IRAM. */
31#define DRAM_ADDR 0xfff00000
32#define DRAM_SIZE 0x000ff000 /* 1020KB */
33/* `external ram'. Unlike the above RAM areas, this memory is cached,
34 so both instruction and data fetches should be (mostly) fast --
35 however, currently only write-through caching is supported, so writes
36 to ERAM will be slow. */
37#define ERAM_ADDR 0x00100000
38#define ERAM_SIZE 0x07f00000 /* 127MB (max) */
39/* Dynamic RAM; uses memory controller. */
40#define SDRAM_ADDR 0x10000000
41#define SDRAM_SIZE 0x01000000 /* 16MB */
42
43
44/* Simulator specific control registers. */
45/* NOTHAL controls whether the simulator will stop at a `halt' insn. */
46#define SIM85E2_NOTHAL_ADDR 0xffffff22
47#define SIM85E2_NOTHAL (*(volatile u8 *)SIM85E2_NOTHAL_ADDR)
48/* The simulator will stop N cycles after N is written to SIMFIN. */
49#define SIM85E2_SIMFIN_ADDR 0xffffff24
50#define SIM85E2_SIMFIN (*(volatile u16 *)SIM85E2_SIMFIN_ADDR)
51
52
53/* For <asm/irq.h> */
54#define NUM_CPU_IRQS 64
55
56
57/* For <asm/page.h> */
58#define PAGE_OFFSET SDRAM_ADDR
59
60
61/* For <asm/entry.h> */
62/* `R0 RAM', used for a few miscellaneous variables that must be accessible
63 using a load instruction relative to R0. The sim85e2 simulator
64 actually puts 1020K of RAM from FFF00000 to FFFFF000, so we arbitarily
65 choose a small portion at the end of that. */
66#define R0_RAM_ADDR 0xFFFFE000
67
68
69#endif /* __V850_SIM85E2_H__ */
diff --git a/include/asm-v850/sim85e2c.h b/include/asm-v850/sim85e2c.h
deleted file mode 100644
index eee543ff3af8..000000000000
--- a/include/asm-v850/sim85e2c.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * include/asm-v850/sim85e2c.h -- Machine-dependent defs for
3 * V850E2 RTL simulator
4 *
5 * Copyright (C) 2002 NEC Corporation
6 * Copyright (C) 2002 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 */
14
15#ifndef __V850_SIM85E2C_H__
16#define __V850_SIM85E2C_H__
17
18/* Use generic sim85e2 settings, other than the various names. */
19#include <asm/sim85e2.h>
20
21#define CPU_MODEL "v850e2"
22#define CPU_MODEL_LONG "NEC V850E2"
23#define PLATFORM "sim85e2c"
24#define PLATFORM_LONG "SIM85E2C V850E2 simulator"
25
26#endif /* __V850_SIM85E2C_H__ */
diff --git a/include/asm-v850/sim85e2s.h b/include/asm-v850/sim85e2s.h
deleted file mode 100644
index ee066d5d3c51..000000000000
--- a/include/asm-v850/sim85e2s.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * include/asm-v850/sim85e2s.h -- Machine-dependent defs for
3 * V850E2 RTL simulator
4 *
5 * Copyright (C) 2003 NEC Electronics Corporation
6 * Copyright (C) 2003 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 */
14
15#ifndef __V850_SIM85E2S_H__
16#define __V850_SIM85E2S_H__
17
18#include <asm/sim85e2.h> /* Use generic sim85e2 settings. */
19#if 0
20#include <asm/v850e2_cache.h> /* + cache */
21#endif
22
23#define CPU_MODEL "v850e2"
24#define CPU_MODEL_LONG "NEC V850E2"
25#define PLATFORM "sim85e2s"
26#define PLATFORM_LONG "SIM85E2S V850E2 simulator"
27
28#endif /* __V850_SIM85E2S_H__ */
diff --git a/include/asm-v850/simsyscall.h b/include/asm-v850/simsyscall.h
deleted file mode 100644
index 4a19d5ae9d17..000000000000
--- a/include/asm-v850/simsyscall.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * include/asm-v850/simsyscall.h -- `System calls' under the v850e emulator
3 *
4 * Copyright (C) 2001 NEC Corporation
5 * Copyright (C) 2001 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_SIMSYSCALL_H__
15#define __V850_SIMSYSCALL_H__
16
17#define V850_SIM_SYS_exit(a...) V850_SIM_SYSCALL_1 (1 , ##a)
18#define V850_SIM_SYS_fork(a...) V850_SIM_SYSCALL_0 (2 , ##a)
19#define V850_SIM_SYS_read(a...) V850_SIM_SYSCALL_3 (3 , ##a)
20#define V850_SIM_SYS_write(a...) V850_SIM_SYSCALL_3 (4 , ##a)
21#define V850_SIM_SYS_open(a...) V850_SIM_SYSCALL_2 (5 , ##a)
22#define V850_SIM_SYS_close(a...) V850_SIM_SYSCALL_1 (6 , ##a)
23#define V850_SIM_SYS_wait4(a...) V850_SIM_SYSCALL_4 (7 , ##a)
24/* #define V850_SIM_SYS_creat(a...) V850_SIM_SYSCALL_1 (8 , ##a) */
25/* #define V850_SIM_SYS_link(a...) V850_SIM_SYSCALL_1 (9 , ##a) */
26/* #define V850_SIM_SYS_unlink(a...) V850_SIM_SYSCALL_1 (10 , ##a) */
27#define V850_SIM_SYS_execv(a...) V850_SIM_SYSCALL_2 (11 , ##a)
28/* #define V850_SIM_SYS_chdir(a...) V850_SIM_SYSCALL_1 (12 , ##a) */
29/* #define V850_SIM_SYS_mknod(a...) V850_SIM_SYSCALL_1 (14 , ##a) */
30#define V850_SIM_SYS_chmod(a...) V850_SIM_SYSCALL_2 (15 , ##a)
31#define V850_SIM_SYS_chown(a...) V850_SIM_SYSCALL_2 (16 , ##a)
32#define V850_SIM_SYS_lseek(a...) V850_SIM_SYSCALL_3 (19 , ##a)
33/* #define V850_SIM_SYS_getpid(a...) V850_SIM_SYSCALL_1 (20 , ##a) */
34/* #define V850_SIM_SYS_isatty(a...) V850_SIM_SYSCALL_1 (21 , ##a) */
35/* #define V850_SIM_SYS_fstat(a...) V850_SIM_SYSCALL_1 (22 , ##a) */
36#define V850_SIM_SYS_time(a...) V850_SIM_SYSCALL_1 (23 , ##a)
37#define V850_SIM_SYS_poll(a...) V850_SIM_SYSCALL_3 (24 , ##a)
38#define V850_SIM_SYS_stat(a...) V850_SIM_SYSCALL_2 (38 , ##a)
39#define V850_SIM_SYS_pipe(a...) V850_SIM_SYSCALL_1 (42 , ##a)
40#define V850_SIM_SYS_times(a...) V850_SIM_SYSCALL_1 (43 , ##a)
41#define V850_SIM_SYS_execve(a...) V850_SIM_SYSCALL_3 (59 , ##a)
42#define V850_SIM_SYS_gettimeofday(a...) V850_SIM_SYSCALL_2 (116 , ##a)
43/* #define V850_SIM_SYS_utime(a...) V850_SIM_SYSCALL_2 (201 , ##a) */
44/* #define V850_SIM_SYS_wait(a...) V850_SIM_SYSCALL_1 (202 , ##a) */
45
46#define V850_SIM_SYS_make_raw(a...) V850_SIM_SYSCALL_1 (1024 , ##a)
47
48
49#define V850_SIM_SYSCALL_0(_call) \
50({ \
51 register int call __asm__ ("r6") = _call; \
52 register int rval __asm__ ("r10"); \
53 __asm__ __volatile__ ("trap 31" \
54 : "=r" (rval) \
55 : "r" (call) \
56 : "r11", "memory"); \
57 rval; \
58})
59#define V850_SIM_SYSCALL_1(_call, _arg0) \
60({ \
61 register int call __asm__ ("r6") = _call; \
62 register long arg0 __asm__ ("r7") = (long)_arg0; \
63 register int rval __asm__ ("r10"); \
64 __asm__ __volatile__ ("trap 31" \
65 : "=r" (rval) \
66 : "r" (call), "r" (arg0) \
67 : "r11", "memory"); \
68 rval; \
69})
70#define V850_SIM_SYSCALL_2(_call, _arg0, _arg1) \
71({ \
72 register int call __asm__ ("r6") = _call; \
73 register long arg0 __asm__ ("r7") = (long)_arg0; \
74 register long arg1 __asm__ ("r8") = (long)_arg1; \
75 register int rval __asm__ ("r10"); \
76 __asm__ __volatile__ ("trap 31" \
77 : "=r" (rval) \
78 : "r" (call), "r" (arg0), "r" (arg1) \
79 : "r11", "memory"); \
80 rval; \
81})
82#define V850_SIM_SYSCALL_3(_call, _arg0, _arg1, _arg2) \
83({ \
84 register int call __asm__ ("r6") = _call; \
85 register long arg0 __asm__ ("r7") = (long)_arg0; \
86 register long arg1 __asm__ ("r8") = (long)_arg1; \
87 register long arg2 __asm__ ("r9") = (long)_arg2; \
88 register int rval __asm__ ("r10"); \
89 __asm__ __volatile__ ("trap 31" \
90 : "=r" (rval) \
91 : "r" (call), "r" (arg0), "r" (arg1), "r" (arg2)\
92 : "r11", "memory"); \
93 rval; \
94})
95
96#define V850_SIM_SYSCALL(call, args...) \
97 V850_SIM_SYS_##call (args)
98
99#endif /* __V850_SIMSYSCALL_H__ */
diff --git a/include/asm-v850/socket.h b/include/asm-v850/socket.h
deleted file mode 100644
index e199a2bf12aa..000000000000
--- a/include/asm-v850/socket.h
+++ /dev/null
@@ -1,57 +0,0 @@
1#ifndef __V850_SOCKET_H__
2#define __V850_SOCKET_H__
3
4#include <asm/sockios.h>
5
6/* For setsockoptions(2) */
7#define SOL_SOCKET 1
8
9#define SO_DEBUG 1
10#define SO_REUSEADDR 2
11#define SO_TYPE 3
12#define SO_ERROR 4
13#define SO_DONTROUTE 5
14#define SO_BROADCAST 6
15#define SO_SNDBUF 7
16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
19#define SO_KEEPALIVE 9
20#define SO_OOBINLINE 10
21#define SO_NO_CHECK 11
22#define SO_PRIORITY 12
23#define SO_LINGER 13
24#define SO_BSDCOMPAT 14
25/* To add :#define SO_REUSEPORT 15 */
26#define SO_PASSCRED 16
27#define SO_PEERCRED 17
28#define SO_RCVLOWAT 18
29#define SO_SNDLOWAT 19
30#define SO_RCVTIMEO 20
31#define SO_SNDTIMEO 21
32
33/* Security levels - as per NRL IPv6 - don't actually do anything */
34#define SO_SECURITY_AUTHENTICATION 22
35#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
36#define SO_SECURITY_ENCRYPTION_NETWORK 24
37
38#define SO_BINDTODEVICE 25
39
40/* Socket filtering */
41#define SO_ATTACH_FILTER 26
42#define SO_DETACH_FILTER 27
43
44#define SO_PEERNAME 28
45#define SO_TIMESTAMP 29
46#define SCM_TIMESTAMP SO_TIMESTAMP
47
48#define SO_ACCEPTCONN 30
49
50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
52#define SO_TIMESTAMPNS 35
53#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
54
55#define SO_MARK 36
56
57#endif /* __V850_SOCKET_H__ */
diff --git a/include/asm-v850/sockios.h b/include/asm-v850/sockios.h
deleted file mode 100644
index 823e106e6cd0..000000000000
--- a/include/asm-v850/sockios.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __V850_SOCKIOS_H__
2#define __V850_SOCKIOS_H__
3
4/* Socket-level I/O control calls. */
5#define FIOSETOWN 0x8901
6#define SIOCSPGRP 0x8902
7#define FIOGETOWN 0x8903
8#define SIOCGPGRP 0x8904
9#define SIOCATMARK 0x8905
10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
12
13#endif /* __V850_SOCKIOS_H__ */
diff --git a/include/asm-v850/stat.h b/include/asm-v850/stat.h
deleted file mode 100644
index c68c60d06e2f..000000000000
--- a/include/asm-v850/stat.h
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * include/asm-v850/stat.h -- v850 stat structure
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_STAT_H__
15#define __V850_STAT_H__
16
17#include <asm/posix_types.h>
18
19struct stat {
20 unsigned int st_dev;
21 unsigned long st_ino;
22 unsigned int st_mode;
23 unsigned int st_nlink;
24 unsigned int st_uid;
25 unsigned int st_gid;
26 unsigned int st_rdev;
27 long st_size;
28 unsigned long st_blksize;
29 unsigned long st_blocks;
30 unsigned long st_atime;
31 unsigned long __unused1;
32 unsigned long st_mtime;
33 unsigned long __unused2;
34 unsigned long st_ctime;
35 unsigned long __unused3;
36 unsigned long __unused4;
37 unsigned long __unused5;
38};
39
40struct stat64 {
41 unsigned long long st_dev;
42 unsigned long __unused1;
43
44 unsigned long long st_ino;
45
46 unsigned int st_mode;
47 unsigned int st_nlink;
48
49 unsigned int st_uid;
50 unsigned int st_gid;
51
52 unsigned long long st_rdev;
53 unsigned long __unused3;
54
55 long long st_size;
56 unsigned long st_blksize;
57
58 unsigned long st_blocks; /* No. of 512-byte blocks allocated */
59 unsigned long __unused4; /* future possible st_blocks high bits */
60
61 unsigned long st_atime;
62 unsigned long st_atime_nsec;
63
64 unsigned long st_mtime;
65 unsigned long st_mtime_nsec;
66
67 unsigned long st_ctime;
68 unsigned long st_ctime_nsec;
69
70 unsigned long __unused8;
71};
72
73#endif /* __V850_STAT_H__ */
diff --git a/include/asm-v850/statfs.h b/include/asm-v850/statfs.h
deleted file mode 100644
index ea1596607f26..000000000000
--- a/include/asm-v850/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __V850_STATFS_H__
2#define __V850_STATFS_H__
3
4#include <asm-generic/statfs.h>
5
6#endif /* __V850_STATFS_H__ */
diff --git a/include/asm-v850/string.h b/include/asm-v850/string.h
deleted file mode 100644
index 478e234789d6..000000000000
--- a/include/asm-v850/string.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * include/asm-v850/string.h -- Architecture specific string routines
3 *
4 * Copyright (C) 2001,02 NEC Corporation
5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_STRING_H__
15#define __V850_STRING_H__
16
17#define __HAVE_ARCH_MEMCPY
18#define __HAVE_ARCH_MEMSET
19#define __HAVE_ARCH_MEMMOVE
20
21extern void *memcpy (void *, const void *, __kernel_size_t);
22extern void *memset (void *, int, __kernel_size_t);
23extern void *memmove (void *, const void *, __kernel_size_t);
24
25#endif /* __V850_STRING_H__ */
diff --git a/include/asm-v850/system.h b/include/asm-v850/system.h
deleted file mode 100644
index 7daf1fdee119..000000000000
--- a/include/asm-v850/system.h
+++ /dev/null
@@ -1,123 +0,0 @@
1/*
2 * include/asm-v850/system.h -- Low-level interrupt/thread ops
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_SYSTEM_H__
15#define __V850_SYSTEM_H__
16
17#include <linux/linkage.h>
18#include <asm/ptrace.h>
19
20
21/*
22 * switch_to(n) should switch tasks to task ptr, first checking that
23 * ptr isn't the current task, in which case it does nothing.
24 */
25struct thread_struct;
26extern void *switch_thread (struct thread_struct *last,
27 struct thread_struct *next);
28#define switch_to(prev,next,last) \
29 do { \
30 if (prev != next) { \
31 (last) = switch_thread (&prev->thread, &next->thread); \
32 } \
33 } while (0)
34
35
36/* Enable/disable interrupts. */
37#define local_irq_enable() __asm__ __volatile__ ("ei")
38#define local_irq_disable() __asm__ __volatile__ ("di")
39
40#define local_save_flags(flags) \
41 __asm__ __volatile__ ("stsr %1, %0" : "=r" (flags) : "i" (SR_PSW))
42#define local_restore_flags(flags) \
43 __asm__ __volatile__ ("ldsr %0, %1" :: "r" (flags), "i" (SR_PSW))
44
45/* For spinlocks etc */
46#define local_irq_save(flags) \
47 do { local_save_flags (flags); local_irq_disable (); } while (0)
48#define local_irq_restore(flags) \
49 local_restore_flags (flags);
50
51
52static inline int irqs_disabled (void)
53{
54 unsigned flags;
55 local_save_flags (flags);
56 return !!(flags & 0x20);
57}
58
59
60/*
61 * Force strict CPU ordering.
62 * Not really required on v850...
63 */
64#define nop() __asm__ __volatile__ ("nop")
65#define mb() __asm__ __volatile__ ("" ::: "memory")
66#define rmb() mb ()
67#define wmb() mb ()
68#define read_barrier_depends() ((void)0)
69#define set_mb(var, value) do { xchg (&var, value); } while (0)
70
71#define smp_mb() mb ()
72#define smp_rmb() rmb ()
73#define smp_wmb() wmb ()
74#define smp_read_barrier_depends() read_barrier_depends()
75
76#define xchg(ptr, with) \
77 ((__typeof__ (*(ptr)))__xchg ((unsigned long)(with), (ptr), sizeof (*(ptr))))
78
79static inline unsigned long __xchg (unsigned long with,
80 __volatile__ void *ptr, int size)
81{
82 unsigned long tmp, flags;
83
84 local_irq_save (flags);
85
86 switch (size) {
87 case 1:
88 tmp = *(unsigned char *)ptr;
89 *(unsigned char *)ptr = with;
90 break;
91 case 2:
92 tmp = *(unsigned short *)ptr;
93 *(unsigned short *)ptr = with;
94 break;
95 case 4:
96 tmp = *(unsigned long *)ptr;
97 *(unsigned long *)ptr = with;
98 break;
99 }
100
101 local_irq_restore (flags);
102
103 return tmp;
104}
105
106#include <asm-generic/cmpxchg-local.h>
107
108/*
109 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
110 * them available.
111 */
112#define cmpxchg_local(ptr, o, n) \
113 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
114 (unsigned long)(n), sizeof(*(ptr))))
115#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
116
117#ifndef CONFIG_SMP
118#include <asm-generic/cmpxchg.h>
119#endif
120
121#define arch_align_stack(x) (x)
122
123#endif /* __V850_SYSTEM_H__ */
diff --git a/include/asm-v850/teg.h b/include/asm-v850/teg.h
deleted file mode 100644
index acc8c7d95329..000000000000
--- a/include/asm-v850/teg.h
+++ /dev/null
@@ -1,101 +0,0 @@
1/*
2 * include/asm-v850/teg.h -- NB85E-TEG cpu chip
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_TEG_H__
15#define __V850_TEG_H__
16
17
18/* The TEG uses the V850E cpu core. */
19#include <asm/v850e.h>
20#include <asm/v850e_cache.h>
21
22
23#define CPU_MODEL "v850e/nb85e-teg"
24#define CPU_MODEL_LONG "NEC V850E/NB85E TEG"
25
26
27/* For <asm/entry.h> */
28/* We use on-chip RAM, for a few miscellaneous variables that must be
29 accessible using a load instruction relative to R0. On the NB85E/TEG,
30 There's 60KB of iRAM starting at 0xFFFF0000, however we need the base
31 address to be addressable by a 16-bit signed offset, so we only use the
32 second half of it starting from 0xFFFF8000. */
33#define R0_RAM_ADDR 0xFFFF8000
34
35
36/* Hardware-specific interrupt numbers (in the kernel IRQ namespace).
37 Some of these are parameterized even though there's only a single
38 interrupt, for compatibility with some generic code that works on other
39 processor models. */
40#define IRQ_INTCMD(n) 6 /* interval timer interrupt */
41#define IRQ_INTCMD_NUM 1
42#define IRQ_INTSER(n) 16 /* UART reception error */
43#define IRQ_INTSER_NUM 1
44#define IRQ_INTSR(n) 17 /* UART reception completion */
45#define IRQ_INTSR_NUM 1
46#define IRQ_INTST(n) 18 /* UART transmission completion */
47#define IRQ_INTST_NUM 1
48
49/* For <asm/irq.h> */
50#define NUM_CPU_IRQS 64
51
52
53/* TEG UART details. */
54#define V850E_UART_BASE_ADDR(n) (0xFFFFF600 + 0x10 * (n))
55#define V850E_UART_ASIM_ADDR(n) (V850E_UART_BASE_ADDR(n) + 0x0)
56#define V850E_UART_ASIS_ADDR(n) (V850E_UART_BASE_ADDR(n) + 0x2)
57#define V850E_UART_ASIF_ADDR(n) (V850E_UART_BASE_ADDR(n) + 0x4)
58#define V850E_UART_CKSR_ADDR(n) (V850E_UART_BASE_ADDR(n) + 0x6)
59#define V850E_UART_BRGC_ADDR(n) (V850E_UART_BASE_ADDR(n) + 0x8)
60#define V850E_UART_TXB_ADDR(n) (V850E_UART_BASE_ADDR(n) + 0xA)
61#define V850E_UART_RXB_ADDR(n) (V850E_UART_BASE_ADDR(n) + 0xC)
62#define V850E_UART_NUM_CHANNELS 1
63#define V850E_UART_BASE_FREQ CPU_CLOCK_FREQ
64/* This is a function that gets called before configuring the UART. */
65#define V850E_UART_PRE_CONFIGURE teg_uart_pre_configure
66#ifndef __ASSEMBLY__
67extern void teg_uart_pre_configure (unsigned chan,
68 unsigned cflags, unsigned baud);
69#endif
70
71
72/* The TEG RTPU. */
73#define V850E_RTPU_BASE_ADDR 0xFFFFF210
74
75
76/* TEG series timer D details. */
77#define V850E_TIMER_D_BASE_ADDR 0xFFFFF210
78#define V850E_TIMER_D_TMCD_BASE_ADDR (V850E_TIMER_D_BASE_ADDR + 0x0)
79#define V850E_TIMER_D_TMD_BASE_ADDR (V850E_TIMER_D_BASE_ADDR + 0x4)
80#define V850E_TIMER_D_CMD_BASE_ADDR (V850E_TIMER_D_BASE_ADDR + 0x8)
81#define V850E_TIMER_D_BASE_FREQ CPU_CLOCK_FREQ
82
83
84/* `Interrupt Source Select' control register. */
85#define TEG_ISS_ADDR 0xFFFFF7FA
86#define TEG_ISS (*(volatile u8 *)TEG_ISS_ADDR)
87
88/* Port 0 I/O register (bits 0-3 used). */
89#define TEG_PORT0_IO_ADDR 0xFFFFF7F2
90#define TEG_PORT0_IO (*(volatile u8 *)TEG_PORT0_IO_ADDR)
91/* Port 0 control register (bits 0-3 control mode, 0 = output, 1 = input). */
92#define TEG_PORT0_PM_ADDR 0xFFFFF7F4
93#define TEG_PORT0_PM (*(volatile u8 *)TEG_PORT0_PM_ADDR)
94
95
96#ifndef __ASSEMBLY__
97extern void teg_init_irqs (void);
98#endif
99
100
101#endif /* __V850_TEG_H__ */
diff --git a/include/asm-v850/termbits.h b/include/asm-v850/termbits.h
deleted file mode 100644
index 295d7bf69451..000000000000
--- a/include/asm-v850/termbits.h
+++ /dev/null
@@ -1,200 +0,0 @@
1#ifndef __V850_TERMBITS_H__
2#define __V850_TERMBITS_H__
3
4#include <linux/posix_types.h>
5
6typedef unsigned char cc_t;
7typedef unsigned int speed_t;
8typedef unsigned int tcflag_t;
9
10#define NCCS 19
11struct termios {
12 tcflag_t c_iflag; /* input mode flags */
13 tcflag_t c_oflag; /* output mode flags */
14 tcflag_t c_cflag; /* control mode flags */
15 tcflag_t c_lflag; /* local mode flags */
16 cc_t c_line; /* line discipline */
17 cc_t c_cc[NCCS]; /* control characters */
18};
19
20struct termios2 {
21 tcflag_t c_iflag; /* input mode flags */
22 tcflag_t c_oflag; /* output mode flags */
23 tcflag_t c_cflag; /* control mode flags */
24 tcflag_t c_lflag; /* local mode flags */
25 cc_t c_line; /* line discipline */
26 cc_t c_cc[NCCS]; /* control characters */
27 speed_t c_ispeed; /* input speed */
28 speed_t c_ospeed; /* output speed */
29};
30
31struct ktermios {
32 tcflag_t c_iflag; /* input mode flags */
33 tcflag_t c_oflag; /* output mode flags */
34 tcflag_t c_cflag; /* control mode flags */
35 tcflag_t c_lflag; /* local mode flags */
36 cc_t c_line; /* line discipline */
37 cc_t c_cc[NCCS]; /* control characters */
38 speed_t c_ispeed; /* input speed */
39 speed_t c_ospeed; /* output speed */
40};
41
42/* c_cc characters */
43#define VINTR 0
44#define VQUIT 1
45#define VERASE 2
46#define VKILL 3
47#define VEOF 4
48#define VTIME 5
49#define VMIN 6
50#define VSWTC 7
51#define VSTART 8
52#define VSTOP 9
53#define VSUSP 10
54#define VEOL 11
55#define VREPRINT 12
56#define VDISCARD 13
57#define VWERASE 14
58#define VLNEXT 15
59#define VEOL2 16
60
61
62/* c_iflag bits */
63#define IGNBRK 0000001
64#define BRKINT 0000002
65#define IGNPAR 0000004
66#define PARMRK 0000010
67#define INPCK 0000020
68#define ISTRIP 0000040
69#define INLCR 0000100
70#define IGNCR 0000200
71#define ICRNL 0000400
72#define IUCLC 0001000
73#define IXON 0002000
74#define IXANY 0004000
75#define IXOFF 0010000
76#define IMAXBEL 0020000
77#define IUTF8 0040000
78
79/* c_oflag bits */
80#define OPOST 0000001
81#define OLCUC 0000002
82#define ONLCR 0000004
83#define OCRNL 0000010
84#define ONOCR 0000020
85#define ONLRET 0000040
86#define OFILL 0000100
87#define OFDEL 0000200
88#define NLDLY 0000400
89#define NL0 0000000
90#define NL1 0000400
91#define CRDLY 0003000
92#define CR0 0000000
93#define CR1 0001000
94#define CR2 0002000
95#define CR3 0003000
96#define TABDLY 0014000
97#define TAB0 0000000
98#define TAB1 0004000
99#define TAB2 0010000
100#define TAB3 0014000
101#define XTABS 0014000
102#define BSDLY 0020000
103#define BS0 0000000
104#define BS1 0020000
105#define VTDLY 0040000
106#define VT0 0000000
107#define VT1 0040000
108#define FFDLY 0100000
109#define FF0 0000000
110#define FF1 0100000
111
112/* c_cflag bit meaning */
113#define CBAUD 0010017
114#define B0 0000000 /* hang up */
115#define B50 0000001
116#define B75 0000002
117#define B110 0000003
118#define B134 0000004
119#define B150 0000005
120#define B200 0000006
121#define B300 0000007
122#define B600 0000010
123#define B1200 0000011
124#define B1800 0000012
125#define B2400 0000013
126#define B4800 0000014
127#define B9600 0000015
128#define B19200 0000016
129#define B38400 0000017
130#define EXTA B19200
131#define EXTB B38400
132#define CSIZE 0000060
133#define CS5 0000000
134#define CS6 0000020
135#define CS7 0000040
136#define CS8 0000060
137#define CSTOPB 0000100
138#define CREAD 0000200
139#define PARENB 0000400
140#define PARODD 0001000
141#define HUPCL 0002000
142#define CLOCAL 0004000
143#define CBAUDEX 0010000
144#define BOTHER 0010000
145#define B57600 0010001
146#define B115200 0010002
147#define B230400 0010003
148#define B460800 0010004
149#define B500000 0010005
150#define B576000 0010006
151#define B921600 0010007
152#define B1000000 0010010
153#define B1152000 0010011
154#define B1500000 0010012
155#define B2000000 0010013
156#define B2500000 0010014
157#define B3000000 0010015
158#define B3500000 0010016
159#define B4000000 0010017
160#define CIBAUD 002003600000 /* input baud rate */
161#define CMSPAR 010000000000 /* mark or space (stick) parity */
162#define CRTSCTS 020000000000 /* flow control */
163
164#define IBSHIFT 16 /* Shifr from CBAUD to CIBAUD */
165
166/* c_lflag bits */
167#define ISIG 0000001
168#define ICANON 0000002
169#define XCASE 0000004
170#define ECHO 0000010
171#define ECHOE 0000020
172#define ECHOK 0000040
173#define ECHONL 0000100
174#define NOFLSH 0000200
175#define TOSTOP 0000400
176#define ECHOCTL 0001000
177#define ECHOPRT 0002000
178#define ECHOKE 0004000
179#define FLUSHO 0010000
180#define PENDIN 0040000
181#define IEXTEN 0100000
182
183
184/* tcflow() and TCXONC use these */
185#define TCOOFF 0
186#define TCOON 1
187#define TCIOFF 2
188#define TCION 3
189
190/* tcflush() and TCFLSH use these */
191#define TCIFLUSH 0
192#define TCOFLUSH 1
193#define TCIOFLUSH 2
194
195/* tcsetattr uses these */
196#define TCSANOW 0
197#define TCSADRAIN 1
198#define TCSAFLUSH 2
199
200#endif /* __V850_TERMBITS_H__ */
diff --git a/include/asm-v850/termios.h b/include/asm-v850/termios.h
deleted file mode 100644
index fcd171838d9c..000000000000
--- a/include/asm-v850/termios.h
+++ /dev/null
@@ -1,90 +0,0 @@
1#ifndef __V850_TERMIOS_H__
2#define __V850_TERMIOS_H__
3
4#include <asm/termbits.h>
5#include <asm/ioctls.h>
6
7struct winsize {
8 unsigned short ws_row;
9 unsigned short ws_col;
10 unsigned short ws_xpixel;
11 unsigned short ws_ypixel;
12};
13
14#define NCC 8
15struct termio {
16 unsigned short c_iflag; /* input mode flags */
17 unsigned short c_oflag; /* output mode flags */
18 unsigned short c_cflag; /* control mode flags */
19 unsigned short c_lflag; /* local mode flags */
20 unsigned char c_line; /* line discipline */
21 unsigned char c_cc[NCC]; /* control characters */
22};
23
24/* modem lines */
25#define TIOCM_LE 0x001
26#define TIOCM_DTR 0x002
27#define TIOCM_RTS 0x004
28#define TIOCM_ST 0x008
29#define TIOCM_SR 0x010
30#define TIOCM_CTS 0x020
31#define TIOCM_CAR 0x040
32#define TIOCM_RNG 0x080
33#define TIOCM_DSR 0x100
34#define TIOCM_CD TIOCM_CAR
35#define TIOCM_RI TIOCM_RNG
36#define TIOCM_OUT1 0x2000
37#define TIOCM_OUT2 0x4000
38#define TIOCM_LOOP 0x8000
39
40/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
41
42#ifdef __KERNEL__
43
44/* intr=^C quit=^\ erase=del kill=^U
45 eof=^D vtime=\0 vmin=\1 sxtc=\0
46 start=^Q stop=^S susp=^Z eol=\0
47 reprint=^R discard=^U werase=^W lnext=^V
48 eol2=\0
49*/
50#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
51
52/*
53 * Translate a "termio" structure into a "termios". Ugh.
54 */
55#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
56 unsigned short __tmp; \
57 get_user(__tmp,&(termio)->x); \
58 *(unsigned short *) &(termios)->x = __tmp; \
59}
60
61#define user_termio_to_kernel_termios(termios, termio) \
62({ \
63 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
64 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
65 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
66 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
67 copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
68})
69
70/*
71 * Translate a "termios" structure into a "termio". Ugh.
72 */
73#define kernel_termios_to_user_termio(termio, termios) \
74({ \
75 put_user((termios)->c_iflag, &(termio)->c_iflag); \
76 put_user((termios)->c_oflag, &(termio)->c_oflag); \
77 put_user((termios)->c_cflag, &(termio)->c_cflag); \
78 put_user((termios)->c_lflag, &(termio)->c_lflag); \
79 put_user((termios)->c_line, &(termio)->c_line); \
80 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
81})
82
83#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
84#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
85#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
86#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
87
88#endif /* __KERNEL__ */
89
90#endif /* __V850_TERMIOS_H__ */
diff --git a/include/asm-v850/thread_info.h b/include/asm-v850/thread_info.h
deleted file mode 100644
index 1a9e6ae0c5fd..000000000000
--- a/include/asm-v850/thread_info.h
+++ /dev/null
@@ -1,129 +0,0 @@
1/*
2 * include/asm-v850/thread_info.h -- v850 low-level thread information
3 *
4 * Copyright (C) 2002 NEC Corporation
5 * Copyright (C) 2002 Miles Bader <miles@gnu.org>
6 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
7 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
8 *
9 * This file is subject to the terms and conditions of the GNU General
10 * Public License. See the file COPYING in the main directory of this
11 * archive for more details.
12 *
13 * This file was derived from the PPC version, include/asm-ppc/thread_info.h
14 * which was adapted from the i386 version by Paul Mackerras
15 */
16
17#ifndef __V850_THREAD_INFO_H__
18#define __V850_THREAD_INFO_H__
19
20#ifdef __KERNEL__
21
22#ifndef __ASSEMBLY__
23
24/*
25 * low level task data.
26 * If you change this, change the TI_* offsets below to match.
27 */
28struct thread_info {
29 struct task_struct *task; /* main task structure */
30 struct exec_domain *exec_domain; /* execution domain */
31 unsigned long flags; /* low level flags */
32 int cpu; /* cpu we're on */
33 int preempt_count; /* 0 => preemptable,
34 <0 => BUG */
35 struct restart_block restart_block;
36};
37
38#define INIT_THREAD_INFO(tsk) \
39{ \
40 .task = &tsk, \
41 .exec_domain = &default_exec_domain, \
42 .flags = 0, \
43 .cpu = 0, \
44 .preempt_count = 1, \
45 .restart_block = { \
46 .fn = do_no_restart_syscall, \
47 }, \
48}
49
50#define init_thread_info (init_thread_union.thread_info)
51#define init_stack (init_thread_union.stack)
52
53/*
54 * macros/functions for gaining access to the thread information structure
55 */
56
57/* thread information allocation */
58#define alloc_thread_info(tsk) ((struct thread_info *) \
59 __get_free_pages(GFP_KERNEL, 1))
60#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
61
62#endif /* __ASSEMBLY__ */
63
64
65/*
66 * Offsets in thread_info structure, used in assembly code
67 */
68#define TI_TASK 0
69#define TI_EXECDOMAIN 4
70#define TI_FLAGS 8
71#define TI_CPU 12
72#define TI_PREEMPT 16
73
74#define PREEMPT_ACTIVE 0x4000000
75
76/*
77 * thread information flag bit numbers
78 */
79#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
80#define TIF_SIGPENDING 1 /* signal pending */
81#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
82#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
83 TIF_NEED_RESCHED */
84#define TIF_MEMDIE 4
85
86/* as above, but as bit values */
87#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
88#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
89#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
90#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
91
92
93/* Size of kernel stack for each process. */
94#define THREAD_SIZE 0x2000
95
96/* The alignment of kernel threads, with thread_info structures at their
97 base. Thus, a pointer for a task's task structure can be derived from
98 its kernel stack pointer. */
99#define THREAD_ALIGNMENT THREAD_SIZE
100#define THREAD_MASK (-THREAD_ALIGNMENT)
101
102
103#ifdef __ASSEMBLY__
104
105/* Put a pointer to the current thread_info structure into REG. Note that
106 this definition requires THREAD_MASK to be representable as a signed
107 16-bit value. */
108#define GET_CURRENT_THREAD(reg) \
109 /* Use `addi' and then `and' instead of just `andi', because \
110 `addi' sign-extends the immediate value, whereas `andi' \
111 zero-extends it. */ \
112 addi THREAD_MASK, r0, reg; \
113 and sp, reg
114
115#else
116
117/* Return a pointer to the current thread_info structure. */
118static inline struct thread_info *current_thread_info (void)
119{
120 register unsigned long sp __asm__ ("sp");
121 return (struct thread_info *)(sp & THREAD_MASK);
122}
123
124#endif /* __ASSEMBLY__ */
125
126
127#endif /* __KERNEL__ */
128
129#endif /* __V850_THREAD_INFO_H__ */
diff --git a/include/asm-v850/timex.h b/include/asm-v850/timex.h
deleted file mode 100644
index 6279e5a0ee8e..000000000000
--- a/include/asm-v850/timex.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * linux/include/asm-v850/timex.h
3 *
4 * v850 architecture timex specifications
5 */
6#ifndef __V850_TIMEX_H__
7#define __V850_TIMEX_H__
8
9#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
10
11typedef unsigned long cycles_t;
12
13static inline cycles_t get_cycles(void)
14{
15 return 0;
16}
17
18#endif /* __V850_TIMEX_H__ */
diff --git a/include/asm-v850/tlb.h b/include/asm-v850/tlb.h
deleted file mode 100644
index 73bc9ead40dd..000000000000
--- a/include/asm-v850/tlb.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * include/asm-v850/tlb.h
3 *
4 * Copyright (C) 2002 NEC Corporation
5 * Copyright (C) 2002 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_TLB_H__
15#define __V850_TLB_H__
16
17#define tlb_flush(tlb) ((void)0)
18
19#include <asm-generic/tlb.h>
20
21#endif /* __V850_TLB_H__ */
diff --git a/include/asm-v850/tlbflush.h b/include/asm-v850/tlbflush.h
deleted file mode 100644
index c44aa64449c8..000000000000
--- a/include/asm-v850/tlbflush.h
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * include/asm-v850/tlbflush.h
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_TLBFLUSH_H__
15#define __V850_TLBFLUSH_H__
16
17#include <asm/machdep.h>
18
19
20/*
21 * flush all user-space atc entries.
22 */
23static inline void __flush_tlb(void)
24{
25 BUG ();
26}
27
28static inline void __flush_tlb_one(unsigned long addr)
29{
30 BUG ();
31}
32
33#define flush_tlb() __flush_tlb()
34
35/*
36 * flush all atc entries (both kernel and user-space entries).
37 */
38static inline void flush_tlb_all(void)
39{
40 BUG ();
41}
42
43static inline void flush_tlb_mm(struct mm_struct *mm)
44{
45 BUG ();
46}
47
48static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
49{
50 BUG ();
51}
52
53static inline void flush_tlb_range(struct vm_area_struct *vma,
54 unsigned long start, unsigned long end)
55{
56 BUG ();
57}
58
59static inline void flush_tlb_kernel_page(unsigned long addr)
60{
61 BUG ();
62}
63
64#endif /* __V850_TLBFLUSH_H__ */
diff --git a/include/asm-v850/topology.h b/include/asm-v850/topology.h
deleted file mode 100644
index 6040e41d7945..000000000000
--- a/include/asm-v850/topology.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __V850_TOPOLOGY_H__
2#define __V850_TOPOLOGY_H__
3
4#include <asm-generic/topology.h>
5
6#endif /* __V850_TOPOLOGY_H__ */
diff --git a/include/asm-v850/types.h b/include/asm-v850/types.h
deleted file mode 100644
index 89f735ee41dd..000000000000
--- a/include/asm-v850/types.h
+++ /dev/null
@@ -1,36 +0,0 @@
1#ifndef __V850_TYPES_H__
2#define __V850_TYPES_H__
3
4#ifndef __ASSEMBLY__
5
6/*
7 * This file is never included by application software unless
8 * explicitly requested (e.g., via linux/types.h) in which case the
9 * application is Linux specific so (user-) name space pollution is
10 * not a major issue. However, for interoperability, libraries still
11 * need to be careful to avoid a name clashes.
12 */
13#include <asm-generic/int-ll64.h>
14
15typedef unsigned short umode_t;
16
17#endif /* !__ASSEMBLY__ */
18
19/*
20 * These aren't exported outside the kernel to avoid name space clashes
21 */
22#ifdef __KERNEL__
23
24#define BITS_PER_LONG 32
25
26#ifndef __ASSEMBLY__
27
28/* Dma addresses are 32-bits wide. */
29
30typedef u32 dma_addr_t;
31
32#endif /* !__ASSEMBLY__ */
33
34#endif /* __KERNEL__ */
35
36#endif /* __V850_TYPES_H__ */
diff --git a/include/asm-v850/uaccess.h b/include/asm-v850/uaccess.h
deleted file mode 100644
index 64563c409bb2..000000000000
--- a/include/asm-v850/uaccess.h
+++ /dev/null
@@ -1,159 +0,0 @@
1#ifndef __V850_UACCESS_H__
2#define __V850_UACCESS_H__
3
4/*
5 * User space memory access functions
6 */
7
8#include <linux/errno.h>
9#include <linux/string.h>
10
11#include <asm/segment.h>
12#include <asm/machdep.h>
13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17static inline int access_ok (int type, const void *addr, unsigned long size)
18{
19 /* XXX I guess we should check against real ram bounds at least, and
20 possibly make sure ADDR is not within the kernel.
21 For now we just check to make sure it's not a small positive
22 or negative value, as that will at least catch some kinds of
23 error. In particular, we make sure that ADDR's not within the
24 interrupt vector area, which we know starts at zero, or within the
25 peripheral-I/O area, which is located just _before_ zero. */
26 unsigned long val = (unsigned long)addr;
27 return val >= (0x80 + NUM_CPU_IRQS*16) && val < 0xFFFFF000;
28}
29
30/*
31 * The exception table consists of pairs of addresses: the first is the
32 * address of an instruction that is allowed to fault, and the second is
33 * the address at which the program should continue. No registers are
34 * modified, so it is entirely up to the continuation code to figure out
35 * what to do.
36 *
37 * All the routines below use bits of fixup code that are out of line
38 * with the main instruction path. This means when everything is well,
39 * we don't even have to jump over them. Further, they do not intrude
40 * on our cache or tlb entries.
41 */
42
43struct exception_table_entry
44{
45 unsigned long insn, fixup;
46};
47
48/* Returns 0 if exception not found and fixup otherwise. */
49extern unsigned long search_exception_table (unsigned long);
50
51
52/*
53 * These are the main single-value transfer routines. They automatically
54 * use the right size if we just have the right pointer type.
55 */
56
57extern int bad_user_access_length (void);
58
59#define __get_user(var, ptr) \
60 ({ \
61 int __gu_err = 0; \
62 typeof(*(ptr)) __gu_val = 0; \
63 switch (sizeof (*(ptr))) { \
64 case 1: \
65 case 2: \
66 case 4: \
67 __gu_val = *(ptr); \
68 break; \
69 case 8: \
70 memcpy(&__gu_val, ptr, sizeof(__gu_val)); \
71 break; \
72 default: \
73 __gu_val = 0; \
74 __gu_err = __get_user_bad (); \
75 break; \
76 } \
77 (var) = __gu_val; \
78 __gu_err; \
79 })
80#define __get_user_bad() (bad_user_access_length (), (-EFAULT))
81
82#define __put_user(var, ptr) \
83 ({ \
84 int __pu_err = 0; \
85 switch (sizeof (*(ptr))) { \
86 case 1: \
87 case 2: \
88 case 4: \
89 *(ptr) = (var); \
90 break; \
91 case 8: { \
92 typeof(*(ptr)) __pu_val = 0; \
93 memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
94 } \
95 break; \
96 default: \
97 __pu_err = __put_user_bad (); \
98 break; \
99 } \
100 __pu_err; \
101 })
102#define __put_user_bad() (bad_user_access_length (), (-EFAULT))
103
104#define put_user(x, ptr) __put_user(x, ptr)
105#define get_user(x, ptr) __get_user(x, ptr)
106
107#define __copy_from_user(to, from, n) (memcpy (to, from, n), 0)
108#define __copy_to_user(to, from, n) (memcpy(to, from, n), 0)
109
110#define __copy_to_user_inatomic __copy_to_user
111#define __copy_from_user_inatomic __copy_from_user
112
113#define copy_from_user(to, from, n) __copy_from_user (to, from, n)
114#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
115
116#define copy_to_user_ret(to,from,n,retval) \
117 ({ if (copy_to_user (to,from,n)) return retval; })
118
119#define copy_from_user_ret(to,from,n,retval) \
120 ({ if (copy_from_user (to,from,n)) return retval; })
121
122/*
123 * Copy a null terminated string from userspace.
124 */
125
126static inline long
127strncpy_from_user (char *dst, const char *src, long count)
128{
129 char *tmp;
130 strncpy (dst, src, count);
131 for (tmp = dst; *tmp && count > 0; tmp++, count--)
132 ;
133 return tmp - dst;
134}
135
136/*
137 * Return the size of a string (including the ending 0)
138 *
139 * Return 0 on exception, a value greater than N if too long
140 */
141static inline long strnlen_user (const char *src, long n)
142{
143 return strlen (src) + 1;
144}
145
146#define strlen_user(str) strnlen_user (str, 32767)
147
148/*
149 * Zero Userspace
150 */
151
152static inline unsigned long
153clear_user (void *to, unsigned long n)
154{
155 memset (to, 0, n);
156 return 0;
157}
158
159#endif /* __V850_UACCESS_H__ */
diff --git a/include/asm-v850/ucontext.h b/include/asm-v850/ucontext.h
deleted file mode 100644
index 303c21590cff..000000000000
--- a/include/asm-v850/ucontext.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __V850_UCONTEXT_H__
2#define __V850_UCONTEXT_H__
3
4#include <asm/sigcontext.h>
5
6struct ucontext {
7 unsigned long uc_flags;
8 struct ucontext *uc_link;
9 stack_t uc_stack;
10 struct sigcontext uc_mcontext;
11 sigset_t uc_sigmask; /* mask last for extensibility */
12};
13
14#endif /* __V850_UCONTEXT_H__ */
diff --git a/include/asm-v850/unaligned.h b/include/asm-v850/unaligned.h
deleted file mode 100644
index 53122b28491e..000000000000
--- a/include/asm-v850/unaligned.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright (C) 2001 NEC Corporation
3 * Copyright (C) 2001 Miles Bader <miles@gnu.org>
4 *
5 * This file is subject to the terms and conditions of the GNU General
6 * Public License. See the file COPYING in the main directory of this
7 * archive for more details.
8 *
9 * Note that some v850 chips support unaligned access, but it seems too
10 * annoying to use.
11 */
12#ifndef _ASM_V850_UNALIGNED_H
13#define _ASM_V850_UNALIGNED_H
14
15#include <linux/unaligned/be_byteshift.h>
16#include <linux/unaligned/le_byteshift.h>
17#include <linux/unaligned/generic.h>
18
19#define get_unaligned __get_unaligned_le
20#define put_unaligned __put_unaligned_le
21
22#endif /* _ASM_V850_UNALIGNED_H */
diff --git a/include/asm-v850/unistd.h b/include/asm-v850/unistd.h
deleted file mode 100644
index 2241ed45ecfe..000000000000
--- a/include/asm-v850/unistd.h
+++ /dev/null
@@ -1,244 +0,0 @@
1/*
2 * include/asm-v850/unistd.h -- System call numbers and invocation mechanism
3 *
4 * Copyright (C) 2001,02,03,04 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03,04 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_UNISTD_H__
15#define __V850_UNISTD_H__
16
17#define __NR_restart_syscall 0
18#define __NR_exit 1
19#define __NR_fork 2
20#define __NR_read 3
21#define __NR_write 4
22#define __NR_open 5
23#define __NR_close 6
24#define __NR_waitpid 7
25#define __NR_creat 8
26#define __NR_link 9
27#define __NR_unlink 10
28#define __NR_execve 11
29#define __NR_chdir 12
30#define __NR_time 13
31#define __NR_mknod 14
32#define __NR_chmod 15
33#define __NR_chown 16
34#define __NR_break 17
35#define __NR_lseek 19
36#define __NR_getpid 20
37#define __NR_mount 21
38#define __NR_umount 22
39#define __NR_setuid 23
40#define __NR_getuid 24
41#define __NR_stime 25
42#define __NR_ptrace 26
43#define __NR_alarm 27
44#define __NR_pause 29
45#define __NR_utime 30
46#define __NR_stty 31
47#define __NR_gtty 32
48#define __NR_access 33
49#define __NR_nice 34
50#define __NR_ftime 35
51#define __NR_sync 36
52#define __NR_kill 37
53#define __NR_rename 38
54#define __NR_mkdir 39
55#define __NR_rmdir 40
56#define __NR_dup 41
57#define __NR_pipe 42
58#define __NR_times 43
59#define __NR_prof 44
60#define __NR_brk 45
61#define __NR_setgid 46
62#define __NR_getgid 47
63#define __NR_signal 48
64#define __NR_geteuid 49
65#define __NR_getegid 50
66#define __NR_acct 51
67#define __NR_umount2 52
68#define __NR_lock 53
69#define __NR_ioctl 54
70#define __NR_fcntl 55
71#define __NR_setpgid 57
72#define __NR_umask 60
73#define __NR_chroot 61
74#define __NR_ustat 62
75#define __NR_dup2 63
76#define __NR_getppid 64
77#define __NR_getpgrp 65
78#define __NR_setsid 66
79#define __NR_sigaction 67
80#define __NR_sgetmask 68
81#define __NR_ssetmask 69
82#define __NR_setreuid 70
83#define __NR_setregid 71
84#define __NR_sigsuspend 72
85#define __NR_sigpending 73
86#define __NR_sethostname 74
87#define __NR_setrlimit 75
88#define __NR_ugetrlimit 76
89#define __NR_getrusage 77
90#define __NR_gettimeofday 78
91#define __NR_settimeofday 79
92#define __NR_getgroups 80
93#define __NR_setgroups 81
94#define __NR_select 82
95#define __NR_symlink 83
96#define __NR_readlink 85
97#define __NR_uselib 86
98#define __NR_swapon 87
99#define __NR_reboot 88
100#define __NR_readdir 89
101#define __NR_mmap 90
102#define __NR_munmap 91
103#define __NR_truncate 92
104#define __NR_ftruncate 93
105#define __NR_fchmod 94
106#define __NR_fchown 95
107#define __NR_getpriority 96
108#define __NR_setpriority 97
109#define __NR_profil 98
110#define __NR_statfs 99
111#define __NR_fstatfs 100
112#define __NR_socketcall 102
113#define __NR_syslog 103
114#define __NR_setitimer 104
115#define __NR_getitimer 105
116#define __NR_stat 106
117#define __NR_lstat 107
118#define __NR_fstat 108
119#define __NR_vhangup 111
120#define __NR_wait4 114
121#define __NR_swapoff 115
122#define __NR_sysinfo 116
123#define __NR_ipc 117
124#define __NR_fsync 118
125#define __NR_sigreturn 119
126#define __NR_clone 120
127#define __NR_setdomainname 121
128#define __NR_uname 122
129#define __NR_cacheflush 123
130#define __NR_adjtimex 124
131#define __NR_mprotect 125
132#define __NR_sigprocmask 126
133#define __NR_create_module 127
134#define __NR_init_module 128
135#define __NR_delete_module 129
136#define __NR_get_kernel_syms 130
137#define __NR_quotactl 131
138#define __NR_getpgid 132
139#define __NR_fchdir 133
140#define __NR_bdflush 134
141#define __NR_sysfs 135
142#define __NR_personality 136
143#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
144#define __NR_setfsuid 138
145#define __NR_setfsgid 139
146#define __NR__llseek 140
147#define __NR_getdents 141
148#define __NR_flock 143
149#define __NR_msync 144
150#define __NR_readv 145
151#define __NR_writev 146
152#define __NR_getsid 147
153#define __NR_fdatasync 148
154#define __NR__sysctl 149
155#define __NR_mlock 150
156#define __NR_munlock 151
157#define __NR_mlockall 152
158#define __NR_munlockall 153
159#define __NR_sched_setparam 154
160#define __NR_sched_getparam 155
161#define __NR_sched_setscheduler 156
162#define __NR_sched_getscheduler 157
163#define __NR_sched_yield 158
164#define __NR_sched_get_priority_max 159
165#define __NR_sched_get_priority_min 160
166#define __NR_sched_rr_get_interval 161
167#define __NR_nanosleep 162
168#define __NR_mremap 163
169#define __NR_setresuid 164
170#define __NR_getresuid 165
171#define __NR_query_module 167
172#define __NR_poll 168
173#define __NR_nfsservctl 169
174#define __NR_setresgid 170
175#define __NR_getresgid 171
176#define __NR_prctl 172
177#define __NR_rt_sigreturn 173
178#define __NR_rt_sigaction 174
179#define __NR_rt_sigprocmask 175
180#define __NR_rt_sigpending 176
181#define __NR_rt_sigtimedwait 177
182#define __NR_rt_sigqueueinfo 178
183#define __NR_rt_sigsuspend 179
184#define __NR_pread 180
185#define __NR_pwrite 181
186#define __NR_lchown 182
187#define __NR_getcwd 183
188#define __NR_capget 184
189#define __NR_capset 185
190#define __NR_sigaltstack 186
191#define __NR_sendfile 187
192#define __NR_getpmsg 188 /* some people actually want streams */
193#define __NR_putpmsg 189 /* some people actually want streams */
194#define __NR_vfork 190
195#define __NR_mmap2 192
196#define __NR_truncate64 193
197#define __NR_ftruncate64 194
198#define __NR_stat64 195
199#define __NR_lstat64 196
200#define __NR_fstat64 197
201#define __NR_fcntl64 198
202#define __NR_getdents64 199
203#define __NR_pivot_root 200
204#define __NR_gettid 201
205#define __NR_tkill 202
206
207#ifdef __KERNEL__
208
209#define __ARCH_WANT_IPC_PARSE_VERSION
210#define __ARCH_WANT_OLD_READDIR
211#define __ARCH_WANT_STAT64
212#define __ARCH_WANT_SYS_ALARM
213#define __ARCH_WANT_SYS_GETHOSTNAME
214#define __ARCH_WANT_SYS_PAUSE
215#define __ARCH_WANT_SYS_SGETMASK
216#define __ARCH_WANT_SYS_SIGNAL
217#define __ARCH_WANT_SYS_TIME
218#define __ARCH_WANT_SYS_UTIME
219#define __ARCH_WANT_SYS_WAITPID
220#define __ARCH_WANT_SYS_SOCKETCALL
221#define __ARCH_WANT_SYS_FADVISE64
222#define __ARCH_WANT_SYS_GETPGRP
223#define __ARCH_WANT_SYS_LLSEEK
224#define __ARCH_WANT_SYS_NICE
225#define __ARCH_WANT_SYS_OLDUMOUNT
226#define __ARCH_WANT_SYS_SIGPENDING
227#define __ARCH_WANT_SYS_SIGPROCMASK
228#define __ARCH_WANT_SYS_RT_SIGACTION
229
230/*
231 * "Conditional" syscalls
232 */
233#define cond_syscall(name) \
234 asm (".weak\t" C_SYMBOL_STRING(name) ";" \
235 ".set\t" C_SYMBOL_STRING(name) "," C_SYMBOL_STRING(sys_ni_syscall))
236#if 0
237/* This doesn't work if there's a function prototype for NAME visible,
238 because the argument types probably won't match. */
239#define cond_syscall(name) \
240 void name (void) __attribute__ ((weak, alias ("sys_ni_syscall")));
241#endif
242
243#endif /* __KERNEL__ */
244#endif /* __V850_UNISTD_H__ */
diff --git a/include/asm-v850/user.h b/include/asm-v850/user.h
deleted file mode 100644
index 63cdc567d272..000000000000
--- a/include/asm-v850/user.h
+++ /dev/null
@@ -1,52 +0,0 @@
1#ifndef __V850_USER_H__
2#define __V850_USER_H__
3
4/* Adapted from <asm-ppc/user.h>. */
5
6#include <linux/ptrace.h>
7#include <asm/page.h>
8
9/*
10 * Core file format: The core file is written in such a way that gdb
11 * can understand it and provide useful information to the user (under
12 * linux we use the `trad-core' bfd, NOT the osf-core). The file contents
13 * are as follows:
14 *
15 * upage: 1 page consisting of a user struct that tells gdb
16 * what is present in the file. Directly after this is a
17 * copy of the task_struct, which is currently not used by gdb,
18 * but it may come in handy at some point. All of the registers
19 * are stored as part of the upage. The upage should always be
20 * only one page long.
21 * data: The data segment follows next. We use current->end_text to
22 * current->brk to pick up all of the user variables, plus any memory
23 * that may have been sbrk'ed. No attempt is made to determine if a
24 * page is demand-zero or if a page is totally unused, we just cover
25 * the entire range. All of the addresses are rounded in such a way
26 * that an integral number of pages is written.
27 * stack: We need the stack information in order to get a meaningful
28 * backtrace. We need to write the data from usp to
29 * current->start_stack, so we round each of these in order to be able
30 * to write an integer number of pages.
31 */
32struct user {
33 struct pt_regs regs; /* entire machine state */
34 size_t u_tsize; /* text size (pages) */
35 size_t u_dsize; /* data size (pages) */
36 size_t u_ssize; /* stack size (pages) */
37 unsigned long start_code; /* text starting address */
38 unsigned long start_data; /* data starting address */
39 unsigned long start_stack; /* stack starting address */
40 long int signal; /* signal causing core dump */
41 unsigned long u_ar0; /* help gdb find registers */
42 unsigned long magic; /* identifies a core file */
43 char u_comm[32]; /* user command name */
44};
45
46#define NBPG PAGE_SIZE
47#define UPAGES 1
48#define HOST_TEXT_START_ADDR (u.start_code)
49#define HOST_DATA_START_ADDR (u.start_data)
50#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
51
52#endif /* __V850_USER_H__ */
diff --git a/include/asm-v850/v850e.h b/include/asm-v850/v850e.h
deleted file mode 100644
index 5a222eb5117f..000000000000
--- a/include/asm-v850/v850e.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * include/asm-v850/v850e.h -- V850E CPU
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_V850E_H__
15#define __V850_V850E_H__
16
17#include <asm/v850e_intc.h>
18
19#define CPU_ARCH "v850e"
20
21#endif /* __V850_V850E_H__ */
diff --git a/include/asm-v850/v850e2.h b/include/asm-v850/v850e2.h
deleted file mode 100644
index 48680408ab7e..000000000000
--- a/include/asm-v850/v850e2.h
+++ /dev/null
@@ -1,69 +0,0 @@
1/*
2 * include/asm-v850/v850e2.h -- Machine-dependent defs for V850E2 CPUs
3 *
4 * Copyright (C) 2002,03 NEC Electronics Corporation
5 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_V850E2_H__
15#define __V850_V850E2_H__
16
17#include <asm/v850e_intc.h> /* v850e-style interrupt system. */
18
19
20#define CPU_ARCH "v850e2"
21
22
23/* Control registers. */
24
25/* Chip area select control */
26#define V850E2_CSC_ADDR(n) (0xFFFFF060 + (n) * 2)
27#define V850E2_CSC(n) (*(volatile u16 *)V850E2_CSC_ADDR(n))
28/* I/O area select control */
29#define V850E2_BPC_ADDR 0xFFFFF064
30#define V850E2_BPC (*(volatile u16 *)V850E2_BPC_ADDR)
31/* Bus size configuration */
32#define V850E2_BSC_ADDR 0xFFFFF066
33#define V850E2_BSC (*(volatile u16 *)V850E2_BSC_ADDR)
34/* Endian configuration */
35#define V850E2_BEC_ADDR 0xFFFFF068
36#define V850E2_BEC (*(volatile u16 *)V850E2_BEC_ADDR)
37/* Cache configuration */
38#define V850E2_BHC_ADDR 0xFFFFF06A
39#define V850E2_BHC (*(volatile u16 *)V850E2_BHC_ADDR)
40/* NPB strobe-wait configuration */
41#define V850E2_VSWC_ADDR 0xFFFFF06E
42#define V850E2_VSWC (*(volatile u16 *)V850E2_VSWC_ADDR)
43/* Bus cycle type */
44#define V850E2_BCT_ADDR(n) (0xFFFFF480 + (n) * 2)
45#define V850E2_BCT(n) (*(volatile u16 *)V850E2_BCT_ADDR(n))
46/* Data wait control */
47#define V850E2_DWC_ADDR(n) (0xFFFFF484 + (n) * 2)
48#define V850E2_DWC(n) (*(volatile u16 *)V850E2_DWC_ADDR(n))
49/* Bus cycle control */
50#define V850E2_BCC_ADDR 0xFFFFF488
51#define V850E2_BCC (*(volatile u16 *)V850E2_BCC_ADDR)
52/* Address wait control */
53#define V850E2_ASC_ADDR 0xFFFFF48A
54#define V850E2_ASC (*(volatile u16 *)V850E2_ASC_ADDR)
55/* Local bus sizing control */
56#define V850E2_LBS_ADDR 0xFFFFF48E
57#define V850E2_LBS (*(volatile u16 *)V850E2_LBS_ADDR)
58/* Line buffer control */
59#define V850E2_LBC_ADDR(n) (0xFFFFF490 + (n) * 2)
60#define V850E2_LBC(n) (*(volatile u16 *)V850E2_LBC_ADDR(n))
61/* SDRAM configuration */
62#define V850E2_SCR_ADDR(n) (0xFFFFF4A0 + (n) * 4)
63#define V850E2_SCR(n) (*(volatile u16 *)V850E2_SCR_ADDR(n))
64/* SDRAM refresh cycle control */
65#define V850E2_RFS_ADDR(n) (0xFFFFF4A2 + (n) * 4)
66#define V850E2_RFS(n) (*(volatile u16 *)V850E2_RFS_ADDR(n))
67
68
69#endif /* __V850_V850E2_H__ */
diff --git a/include/asm-v850/v850e2_cache.h b/include/asm-v850/v850e2_cache.h
deleted file mode 100644
index 87edf0d311d5..000000000000
--- a/include/asm-v850/v850e2_cache.h
+++ /dev/null
@@ -1,75 +0,0 @@
1/*
2 * include/asm-v850/v850e2_cache_cache.h -- Cache control for V850E2
3 * cache memories
4 *
5 * Copyright (C) 2003,05 NEC Electronics Corporation
6 * Copyright (C) 2003,05 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 */
14
15#ifndef __V850_V850E2_CACHE_H__
16#define __V850_V850E2_CACHE_H__
17
18#include <asm/types.h>
19
20
21/* Cache control registers. */
22
23/* Bus Transaction Control */
24#define V850E2_CACHE_BTSC_ADDR 0xFFFFF070
25#define V850E2_CACHE_BTSC (*(volatile u16 *)V850E2_CACHE_BTSC_ADDR)
26#define V850E2_CACHE_BTSC_ICM 0x0001 /* icache enable */
27#define V850E2_CACHE_BTSC_DCM0 0x0004 /* dcache enable, bit 0 */
28#define V850E2_CACHE_BTSC_DCM1 0x0008 /* dcache enable, bit 1 */
29#define V850E2_CACHE_BTSC_DCM_WT /* write-through */ \
30 V850E2_CACHE_BTSC_DCM0
31#ifdef CONFIG_V850E2_V850E2S
32# define V850E2_CACHE_BTSC_DCM_WB_NO_ALLOC /* write-back, non-alloc */ \
33 V850E2_CACHE_BTSC_DCM1
34# define V850E2_CACHE_BTSC_DCM_WB_ALLOC /* write-back, non-alloc */ \
35 (V850E2_CACHE_BTSC_DCM1 | V850E2_CACHE_BTSC_DCM0)
36# define V850E2_CACHE_BTSC_ISEQ 0x0010 /* icache `address sequence mode' */
37# define V850E2_CACHE_BTSC_DSEQ 0x0020 /* dcache `address sequence mode' */
38# define V850E2_CACHE_BTSC_IRFC 0x0030
39# define V850E2_CACHE_BTSC_ILCD 0x4000
40# define V850E2_CACHE_BTSC_VABE 0x8000
41#endif /* CONFIG_V850E2_V850E2S */
42
43/* Cache operation start address register (low-bits). */
44#define V850E2_CACHE_CADL_ADDR 0xFFFFF074
45#define V850E2_CACHE_CADL (*(volatile u16 *)V850E2_CACHE_CADL_ADDR)
46/* Cache operation start address register (high-bits). */
47#define V850E2_CACHE_CADH_ADDR 0xFFFFF076
48#define V850E2_CACHE_CADH (*(volatile u16 *)V850E2_CACHE_CADH_ADDR)
49/* Cache operation count register. */
50#define V850E2_CACHE_CCNT_ADDR 0xFFFFF078
51#define V850E2_CACHE_CCNT (*(volatile u16 *)V850E2_CACHE_CCNT_ADDR)
52/* Cache operation specification register. */
53#define V850E2_CACHE_COPR_ADDR 0xFFFFF07A
54#define V850E2_CACHE_COPR (*(volatile u16 *)V850E2_CACHE_COPR_ADDR)
55#define V850E2_CACHE_COPR_STRT 0x0001 /* start cache operation */
56#define V850E2_CACHE_COPR_LBSL 0x0100 /* 0 = icache, 1 = dcache */
57#define V850E2_CACHE_COPR_WSLE 0x0200 /* operate on cache way */
58#define V850E2_CACHE_COPR_WSL(way) ((way) * 0x0400) /* way select */
59#define V850E2_CACHE_COPR_CFC(op) ((op) * 0x1000) /* cache function code */
60
61
62/* Size of a cache line in bytes. */
63#define V850E2_CACHE_LINE_SIZE_BITS 4
64#define V850E2_CACHE_LINE_SIZE (1 << V850E2_CACHE_LINE_SIZE_BITS)
65
66/* The size of each cache `way' in lines. */
67#define V850E2_CACHE_WAY_SIZE 256
68
69
70/* For <asm/cache.h> */
71#define L1_CACHE_BYTES V850E2_CACHE_LINE_SIZE
72#define L1_CACHE_SHIFT V850E2_CACHE_LINE_SIZE_BITS
73
74
75#endif /* __V850_V850E2_CACHE_H__ */
diff --git a/include/asm-v850/v850e_cache.h b/include/asm-v850/v850e_cache.h
deleted file mode 100644
index aa7d7eb9da50..000000000000
--- a/include/asm-v850/v850e_cache.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * include/asm-v850/v850e_cache.h -- Cache control for V850E cache memories
3 *
4 * Copyright (C) 2001,03 NEC Electronics Corporation
5 * Copyright (C) 2001,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14/* This file implements cache control for the rather simple cache used on
15 some V850E CPUs, specifically the NB85E/TEG CPU-core and the V850E/ME2
16 CPU. V850E2 processors have their own (better) cache
17 implementation. */
18
19#ifndef __V850_V850E_CACHE_H__
20#define __V850_V850E_CACHE_H__
21
22#include <asm/types.h>
23
24
25/* Cache control registers. */
26#define V850E_CACHE_BHC_ADDR 0xFFFFF06A
27#define V850E_CACHE_BHC (*(volatile u16 *)V850E_CACHE_BHC_ADDR)
28#define V850E_CACHE_ICC_ADDR 0xFFFFF070
29#define V850E_CACHE_ICC (*(volatile u16 *)V850E_CACHE_ICC_ADDR)
30#define V850E_CACHE_ISI_ADDR 0xFFFFF072
31#define V850E_CACHE_ISI (*(volatile u16 *)V850E_CACHE_ISI_ADDR)
32#define V850E_CACHE_DCC_ADDR 0xFFFFF078
33#define V850E_CACHE_DCC (*(volatile u16 *)V850E_CACHE_DCC_ADDR)
34
35/* Size of a cache line in bytes. */
36#define V850E_CACHE_LINE_SIZE 16
37
38/* For <asm/cache.h> */
39#define L1_CACHE_BYTES V850E_CACHE_LINE_SIZE
40
41
42#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
43/* Set caching params via the BHC, ICC, and DCC registers. */
44void v850e_cache_enable (u16 bhc, u16 icc, u16 dcc);
45#endif /* __KERNEL__ && !__ASSEMBLY__ */
46
47
48#endif /* __V850_V850E_CACHE_H__ */
diff --git a/include/asm-v850/v850e_intc.h b/include/asm-v850/v850e_intc.h
deleted file mode 100644
index 6fdf95708317..000000000000
--- a/include/asm-v850/v850e_intc.h
+++ /dev/null
@@ -1,133 +0,0 @@
1/*
2 * include/asm-v850/v850e_intc.h -- V850E CPU interrupt controller (INTC)
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14#ifndef __V850_V850E_INTC_H__
15#define __V850_V850E_INTC_H__
16
17
18/* There are 4 16-bit `Interrupt Mask Registers' located contiguously
19 starting from this base. Each interrupt uses a single bit to
20 indicated enabled/disabled status. */
21#define V850E_INTC_IMR_BASE_ADDR 0xFFFFF100
22#define V850E_INTC_IMR_ADDR(irq) (V850E_INTC_IMR_BASE_ADDR + ((irq) >> 3))
23#define V850E_INTC_IMR_BIT(irq) ((irq) & 0x7)
24
25/* Each maskable interrupt has a single-byte control register at this
26 address. */
27#define V850E_INTC_IC_BASE_ADDR 0xFFFFF110
28#define V850E_INTC_IC_ADDR(irq) (V850E_INTC_IC_BASE_ADDR + ((irq) << 1))
29#define V850E_INTC_IC(irq) (*(volatile u8 *)V850E_INTC_IC_ADDR(irq))
30/* Encode priority PR for storing in an interrupt control register. */
31#define V850E_INTC_IC_PR(pr) (pr)
32/* Interrupt disable bit in an interrupt control register. */
33#define V850E_INTC_IC_MK_BIT 6
34#define V850E_INTC_IC_MK (1 << V850E_INTC_IC_MK_BIT)
35/* Interrupt pending flag in an interrupt control register. */
36#define V850E_INTC_IC_IF_BIT 7
37#define V850E_INTC_IC_IF (1 << V850E_INTC_IC_IF_BIT)
38
39/* The ISPR (In-service priority register) contains one bit for each interrupt
40 priority level, which is set to one when that level is currently being
41 serviced (and thus blocking any interrupts of equal or lesser level). */
42#define V850E_INTC_ISPR_ADDR 0xFFFFF1FA
43#define V850E_INTC_ISPR (*(volatile u8 *)V850E_INTC_ISPR_ADDR)
44
45
46#ifndef __ASSEMBLY__
47
48/* Enable interrupt handling for interrupt IRQ. */
49static inline void v850e_intc_enable_irq (unsigned irq)
50{
51 __asm__ __volatile__ ("clr1 %0, [%1]"
52 :: "r" (V850E_INTC_IMR_BIT (irq)),
53 "r" (V850E_INTC_IMR_ADDR (irq))
54 : "memory");
55}
56
57/* Disable interrupt handling for interrupt IRQ. Note that any
58 interrupts received while disabled will be delivered once the
59 interrupt is enabled again, unless they are explicitly cleared using
60 `v850e_intc_clear_pending_irq'. */
61static inline void v850e_intc_disable_irq (unsigned irq)
62{
63 __asm__ __volatile__ ("set1 %0, [%1]"
64 :: "r" (V850E_INTC_IMR_BIT (irq)),
65 "r" (V850E_INTC_IMR_ADDR (irq))
66 : "memory");
67}
68
69/* Return true if interrupt handling for interrupt IRQ is enabled. */
70static inline int v850e_intc_irq_enabled (unsigned irq)
71{
72 int rval;
73 __asm__ __volatile__ ("tst1 %1, [%2]; setf z, %0"
74 : "=r" (rval)
75 : "r" (V850E_INTC_IMR_BIT (irq)),
76 "r" (V850E_INTC_IMR_ADDR (irq)));
77 return rval;
78}
79
80/* Disable irqs from 0 until LIMIT. LIMIT must be a multiple of 8. */
81static inline void _v850e_intc_disable_irqs (unsigned limit)
82{
83 unsigned long addr;
84 for (addr = V850E_INTC_IMR_BASE_ADDR; limit >= 8; addr++, limit -= 8)
85 *(char *)addr = 0xFF;
86}
87
88/* Disable all irqs. This is purposely a macro, because NUM_MACH_IRQS
89 will be only be defined later. */
90#define v850e_intc_disable_irqs() _v850e_intc_disable_irqs (NUM_MACH_IRQS)
91
92/* Clear any pending interrupts for IRQ. */
93static inline void v850e_intc_clear_pending_irq (unsigned irq)
94{
95 __asm__ __volatile__ ("clr1 %0, 0[%1]"
96 :: "i" (V850E_INTC_IC_IF_BIT),
97 "r" (V850E_INTC_IC_ADDR (irq))
98 : "memory");
99}
100
101/* Return true if interrupt IRQ is pending (but disabled). */
102static inline int v850e_intc_irq_pending (unsigned irq)
103{
104 int rval;
105 __asm__ __volatile__ ("tst1 %1, 0[%2]; setf nz, %0"
106 : "=r" (rval)
107 : "i" (V850E_INTC_IC_IF_BIT),
108 "r" (V850E_INTC_IC_ADDR (irq)));
109 return rval;
110}
111
112
113struct v850e_intc_irq_init {
114 const char *name; /* name of interrupt type */
115
116 /* Range of kernel irq numbers for this type:
117 BASE, BASE+INTERVAL, ..., BASE+INTERVAL*NUM */
118 unsigned base, num, interval;
119
120 unsigned priority; /* interrupt priority to assign */
121};
122struct hw_interrupt_type; /* fwd decl */
123
124/* Initialize HW_IRQ_TYPES for INTC-controlled irqs described in array
125 INITS (which is terminated by an entry with the name field == 0). */
126extern void v850e_intc_init_irq_types (struct v850e_intc_irq_init *inits,
127 struct hw_interrupt_type *hw_irq_types);
128
129
130#endif /* !__ASSEMBLY__ */
131
132
133#endif /* __V850_V850E_INTC_H__ */
diff --git a/include/asm-v850/v850e_timer_c.h b/include/asm-v850/v850e_timer_c.h
deleted file mode 100644
index f70575df6ea9..000000000000
--- a/include/asm-v850/v850e_timer_c.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * include/asm-v850/v850e_timer_c.h -- `Timer C' component often used
3 * with the V850E cpu core
4 *
5 * Copyright (C) 2001,03 NEC Electronics Corporation
6 * Copyright (C) 2001,03 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 */
14
15/* NOTE: this include file currently contains only enough to allow us to
16 use timer C as an interrupt pass-through. */
17
18#ifndef __V850_V850E_TIMER_C_H__
19#define __V850_V850E_TIMER_C_H__
20
21#include <asm/types.h>
22#include <asm/machdep.h> /* Pick up chip-specific defs. */
23
24
25/* Timer C (16-bit interval timers). */
26
27/* Control register 0 for timer C. */
28#define V850E_TIMER_C_TMCC0_ADDR(n) (V850E_TIMER_C_BASE_ADDR + 0x6 + 0x10 *(n))
29#define V850E_TIMER_C_TMCC0(n) (*(volatile u8 *)V850E_TIMER_C_TMCC0_ADDR(n))
30#define V850E_TIMER_C_TMCC0_CAE 0x01 /* clock action enable */
31#define V850E_TIMER_C_TMCC0_CE 0x02 /* count enable */
32/* ... */
33
34/* Control register 1 for timer C. */
35#define V850E_TIMER_C_TMCC1_ADDR(n) (V850E_TIMER_C_BASE_ADDR + 0x8 + 0x10 *(n))
36#define V850E_TIMER_C_TMCC1(n) (*(volatile u8 *)V850E_TIMER_C_TMCC1_ADDR(n))
37#define V850E_TIMER_C_TMCC1_CMS0 0x01 /* capture/compare mode select (ccc0) */
38#define V850E_TIMER_C_TMCC1_CMS1 0x02 /* capture/compare mode select (ccc1) */
39/* ... */
40
41/* Interrupt edge-sensitivity control for timer C. */
42#define V850E_TIMER_C_SESC_ADDR(n) (V850E_TIMER_C_BASE_ADDR + 0x9 + 0x10 *(n))
43#define V850E_TIMER_C_SESC(n) (*(volatile u8 *)V850E_TIMER_C_SESC_ADDR(n))
44
45/* ...etc... */
46
47
48#endif /* __V850_V850E_TIMER_C_H__ */
diff --git a/include/asm-v850/v850e_timer_d.h b/include/asm-v850/v850e_timer_d.h
deleted file mode 100644
index 417612c5b22f..000000000000
--- a/include/asm-v850/v850e_timer_d.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * include/asm-v850/v850e_timer_d.h -- `Timer D' component often used
3 * with the V850E cpu core
4 *
5 * Copyright (C) 2001,02,03 NEC Electronics Corporation
6 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 */
14
15#ifndef __V850_V850E_TIMER_D_H__
16#define __V850_V850E_TIMER_D_H__
17
18#include <asm/types.h>
19#include <asm/machdep.h> /* Pick up chip-specific defs. */
20
21
22/* Timer D (16-bit interval timers). */
23
24/* Count registers for timer D. */
25#define V850E_TIMER_D_TMD_ADDR(n) (V850E_TIMER_D_TMD_BASE_ADDR + 0x10 * (n))
26#define V850E_TIMER_D_TMD(n) (*(volatile u16 *)V850E_TIMER_D_TMD_ADDR(n))
27
28/* Count compare registers for timer D. */
29#define V850E_TIMER_D_CMD_ADDR(n) (V850E_TIMER_D_CMD_BASE_ADDR + 0x10 * (n))
30#define V850E_TIMER_D_CMD(n) (*(volatile u16 *)V850E_TIMER_D_CMD_ADDR(n))
31
32/* Control registers for timer D. */
33#define V850E_TIMER_D_TMCD_ADDR(n) (V850E_TIMER_D_TMCD_BASE_ADDR + 0x10 * (n))
34#define V850E_TIMER_D_TMCD(n) (*(volatile u8 *)V850E_TIMER_D_TMCD_ADDR(n))
35/* Control bits for timer D. */
36#define V850E_TIMER_D_TMCD_CE 0x2 /* count enable */
37#define V850E_TIMER_D_TMCD_CAE 0x1 /* clock action enable */
38/* Clock divider setting (log2). */
39#define V850E_TIMER_D_TMCD_CS(divlog2) (((divlog2) - V850E_TIMER_D_TMCD_CS_MIN) << 4)
40/* Minimum clock divider setting (log2). */
41#ifndef V850E_TIMER_D_TMCD_CS_MIN /* Can be overridden by mach-specific hdrs */
42#define V850E_TIMER_D_TMCD_CS_MIN 2 /* Default is correct for the v850e/ma1 */
43#endif
44/* Maximum clock divider setting (log2). */
45#define V850E_TIMER_D_TMCD_CS_MAX (V850E_TIMER_D_TMCD_CS_MIN + 7)
46
47/* Return the clock-divider (log2) of timer D unit N. */
48#define V850E_TIMER_D_DIVLOG2(n) \
49 (((V850E_TIMER_D_TMCD(n) >> 4) & 0x7) + V850E_TIMER_D_TMCD_CS_MIN)
50
51
52#ifndef __ASSEMBLY__
53
54/* Start interval timer TIMER (0-3). The timer will issue the
55 corresponding INTCMD interrupt RATE times per second. This function
56 does not enable the interrupt. */
57extern void v850e_timer_d_configure (unsigned timer, unsigned rate);
58
59#endif /* !__ASSEMBLY__ */
60
61
62#endif /* __V850_V850E_TIMER_D_H__ */
diff --git a/include/asm-v850/v850e_uart.h b/include/asm-v850/v850e_uart.h
deleted file mode 100644
index 5182fb4cc989..000000000000
--- a/include/asm-v850/v850e_uart.h
+++ /dev/null
@@ -1,76 +0,0 @@
1/*
2 * include/asm-v850/v850e_uart.h -- common V850E on-chip UART driver
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14/* There's not actually a single UART implementation used by V850E CPUs,
15 but rather a series of implementations that are all `close' to one
16 another. This file corresponds to the single driver which handles all
17 of them. */
18
19#ifndef __V850_V850E_UART_H__
20#define __V850_V850E_UART_H__
21
22#include <linux/termios.h>
23
24#include <asm/v850e_utils.h>
25#include <asm/types.h>
26#include <asm/machdep.h> /* Pick up chip-specific defs. */
27
28
29/* Include model-specific definitions. */
30#ifdef CONFIG_V850E_UART
31# ifdef CONFIG_V850E_UARTB
32# include <asm-v850/v850e_uartb.h>
33# else
34# include <asm-v850/v850e_uarta.h> /* original V850E UART */
35# endif
36#endif
37
38
39/* Optional capabilities some hardware provides. */
40
41/* This UART doesn't implement RTS/CTS by default, but some platforms
42 implement them externally, so check to see if <asm/machdep.h> defined
43 anything. */
44#ifdef V850E_UART_CTS
45#define v850e_uart_cts(n) V850E_UART_CTS(n)
46#else
47#define v850e_uart_cts(n) (1)
48#endif
49
50/* Do the same for RTS. */
51#ifdef V850E_UART_SET_RTS
52#define v850e_uart_set_rts(n,v) V850E_UART_SET_RTS(n,v)
53#else
54#define v850e_uart_set_rts(n,v) ((void)0)
55#endif
56
57
58/* This is the serial channel to use for the boot console (if desired). */
59#ifndef V850E_UART_CONSOLE_CHANNEL
60# define V850E_UART_CONSOLE_CHANNEL 0
61#endif
62
63
64#ifndef __ASSEMBLY__
65
66/* Setup a console using channel 0 of the builtin uart. */
67extern void v850e_uart_cons_init (unsigned chan);
68
69/* Configure and turn on uart channel CHAN, using the termios `control
70 modes' bits in CFLAGS, and a baud-rate of BAUD. */
71void v850e_uart_configure (unsigned chan, unsigned cflags, unsigned baud);
72
73#endif /* !__ASSEMBLY__ */
74
75
76#endif /* __V850_V850E_UART_H__ */
diff --git a/include/asm-v850/v850e_uarta.h b/include/asm-v850/v850e_uarta.h
deleted file mode 100644
index e483e0950725..000000000000
--- a/include/asm-v850/v850e_uarta.h
+++ /dev/null
@@ -1,278 +0,0 @@
1/*
2 * include/asm-v850/v850e_uarta.h -- original V850E on-chip UART
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14/* This is the original V850E UART implementation is called just `UART' in
15 the docs, but we name this header file <asm/v850e_uarta.h> because the
16 name <asm/v850e_uart.h> is used for the common driver that handles both
17 `UART' and `UARTB' implementations. */
18
19#ifndef __V850_V850E_UARTA_H__
20#define __V850_V850E_UARTA_H__
21
22
23/* Raw hardware interface. */
24
25/* The base address of the UART control registers for channel N.
26 The default is the address used on the V850E/MA1. */
27#ifndef V850E_UART_BASE_ADDR
28#define V850E_UART_BASE_ADDR(n) (0xFFFFFA00 + 0x10 * (n))
29#endif
30
31/* Addresses of specific UART control registers for channel N.
32 The defaults are the addresses used on the V850E/MA1; if a platform
33 wants to redefine any of these, it must redefine them all. */
34#ifndef V850E_UART_ASIM_ADDR
35#define V850E_UART_ASIM_ADDR(n) (V850E_UART_BASE_ADDR(n) + 0x0)
36#define V850E_UART_RXB_ADDR(n) (V850E_UART_BASE_ADDR(n) + 0x2)
37#define V850E_UART_ASIS_ADDR(n) (V850E_UART_BASE_ADDR(n) + 0x3)
38#define V850E_UART_TXB_ADDR(n) (V850E_UART_BASE_ADDR(n) + 0x4)
39#define V850E_UART_ASIF_ADDR(n) (V850E_UART_BASE_ADDR(n) + 0x5)
40#define V850E_UART_CKSR_ADDR(n) (V850E_UART_BASE_ADDR(n) + 0x6)
41#define V850E_UART_BRGC_ADDR(n) (V850E_UART_BASE_ADDR(n) + 0x7)
42#endif
43
44/* UART config registers. */
45#define V850E_UART_ASIM(n) (*(volatile u8 *)V850E_UART_ASIM_ADDR(n))
46/* Control bits for config registers. */
47#define V850E_UART_ASIM_CAE 0x80 /* clock enable */
48#define V850E_UART_ASIM_TXE 0x40 /* transmit enable */
49#define V850E_UART_ASIM_RXE 0x20 /* receive enable */
50#define V850E_UART_ASIM_PS_MASK 0x18 /* mask covering parity-select bits */
51#define V850E_UART_ASIM_PS_NONE 0x00 /* no parity */
52#define V850E_UART_ASIM_PS_ZERO 0x08 /* zero parity */
53#define V850E_UART_ASIM_PS_ODD 0x10 /* odd parity */
54#define V850E_UART_ASIM_PS_EVEN 0x18 /* even parity */
55#define V850E_UART_ASIM_CL_8 0x04 /* char len is 8 bits (otherwise, 7) */
56#define V850E_UART_ASIM_SL_2 0x02 /* 2 stop bits (otherwise, 1) */
57#define V850E_UART_ASIM_ISRM 0x01 /* generate INTSR interrupt on errors
58 (otherwise, generate INTSER) */
59
60/* UART serial interface status registers. */
61#define V850E_UART_ASIS(n) (*(volatile u8 *)V850E_UART_ASIS_ADDR(n))
62/* Control bits for status registers. */
63#define V850E_UART_ASIS_PE 0x04 /* parity error */
64#define V850E_UART_ASIS_FE 0x02 /* framing error */
65#define V850E_UART_ASIS_OVE 0x01 /* overrun error */
66
67/* UART serial interface transmission status registers. */
68#define V850E_UART_ASIF(n) (*(volatile u8 *)V850E_UART_ASIF_ADDR(n))
69#define V850E_UART_ASIF_TXBF 0x02 /* transmit buffer flag (data in TXB) */
70#define V850E_UART_ASIF_TXSF 0x01 /* transmit shift flag (sending data) */
71
72/* UART receive buffer register. */
73#define V850E_UART_RXB(n) (*(volatile u8 *)V850E_UART_RXB_ADDR(n))
74
75/* UART transmit buffer register. */
76#define V850E_UART_TXB(n) (*(volatile u8 *)V850E_UART_TXB_ADDR(n))
77
78/* UART baud-rate generator control registers. */
79#define V850E_UART_CKSR(n) (*(volatile u8 *)V850E_UART_CKSR_ADDR(n))
80#define V850E_UART_CKSR_MAX 11
81#define V850E_UART_BRGC(n) (*(volatile u8 *)V850E_UART_BRGC_ADDR(n))
82#define V850E_UART_BRGC_MIN 8
83
84
85#ifndef V850E_UART_CKSR_MAX_FREQ
86#define V850E_UART_CKSR_MAX_FREQ (25*1000*1000)
87#endif
88
89/* Calculate the minimum value for CKSR on this processor. */
90static inline unsigned v850e_uart_cksr_min (void)
91{
92 int min = 0;
93 unsigned freq = V850E_UART_BASE_FREQ;
94 while (freq > V850E_UART_CKSR_MAX_FREQ) {
95 freq >>= 1;
96 min++;
97 }
98 return min;
99}
100
101
102/* Slightly abstract interface used by driver. */
103
104
105/* Interrupts used by the UART. */
106
107/* Received when the most recently transmitted character has been sent. */
108#define V850E_UART_TX_IRQ(chan) IRQ_INTST (chan)
109/* Received when a new character has been received. */
110#define V850E_UART_RX_IRQ(chan) IRQ_INTSR (chan)
111
112
113/* UART clock generator interface. */
114
115/* This type encapsulates a particular uart frequency. */
116typedef struct {
117 unsigned clk_divlog2;
118 unsigned brgen_count;
119} v850e_uart_speed_t;
120
121/* Calculate a uart speed from BAUD for this uart. */
122static inline v850e_uart_speed_t v850e_uart_calc_speed (unsigned baud)
123{
124 v850e_uart_speed_t speed;
125
126 /* Calculate the log2 clock divider and baud-rate counter values
127 (note that the UART divides the resulting clock by 2, so
128 multiply BAUD by 2 here to compensate). */
129 calc_counter_params (V850E_UART_BASE_FREQ, baud * 2,
130 v850e_uart_cksr_min(),
131 V850E_UART_CKSR_MAX, 8/*bits*/,
132 &speed.clk_divlog2, &speed.brgen_count);
133
134 return speed;
135}
136
137/* Return the current speed of uart channel CHAN. */
138static inline v850e_uart_speed_t v850e_uart_speed (unsigned chan)
139{
140 v850e_uart_speed_t speed;
141 speed.clk_divlog2 = V850E_UART_CKSR (chan);
142 speed.brgen_count = V850E_UART_BRGC (chan);
143 return speed;
144}
145
146/* Set the current speed of uart channel CHAN. */
147static inline void v850e_uart_set_speed(unsigned chan,v850e_uart_speed_t speed)
148{
149 V850E_UART_CKSR (chan) = speed.clk_divlog2;
150 V850E_UART_BRGC (chan) = speed.brgen_count;
151}
152
153static inline int
154v850e_uart_speed_eq (v850e_uart_speed_t speed1, v850e_uart_speed_t speed2)
155{
156 return speed1.clk_divlog2 == speed2.clk_divlog2
157 && speed1.brgen_count == speed2.brgen_count;
158}
159
160/* Minimum baud rate possible. */
161#define v850e_uart_min_baud() \
162 ((V850E_UART_BASE_FREQ >> V850E_UART_CKSR_MAX) / (2 * 255) + 1)
163
164/* Maximum baud rate possible. The error is quite high at max, though. */
165#define v850e_uart_max_baud() \
166 ((V850E_UART_BASE_FREQ >> v850e_uart_cksr_min()) / (2 *V850E_UART_BRGC_MIN))
167
168/* The `maximum' clock rate the uart can used, which is wanted (though not
169 really used in any useful way) by the serial framework. */
170#define v850e_uart_max_clock() \
171 ((V850E_UART_BASE_FREQ >> v850e_uart_cksr_min()) / 2)
172
173
174/* UART configuration interface. */
175
176/* Type of the uart config register; must be a scalar. */
177typedef u16 v850e_uart_config_t;
178
179/* The uart hardware config register for channel CHAN. */
180#define V850E_UART_CONFIG(chan) V850E_UART_ASIM (chan)
181
182/* This config bit set if the uart is enabled. */
183#define V850E_UART_CONFIG_ENABLED V850E_UART_ASIM_CAE
184/* If the uart _isn't_ enabled, store this value to it to do so. */
185#define V850E_UART_CONFIG_INIT V850E_UART_ASIM_CAE
186/* Store this config value to disable the uart channel completely. */
187#define V850E_UART_CONFIG_FINI 0
188
189/* Setting/clearing these bits enable/disable TX/RX, respectively (but
190 otherwise generally leave things running). */
191#define V850E_UART_CONFIG_RX_ENABLE V850E_UART_ASIM_RXE
192#define V850E_UART_CONFIG_TX_ENABLE V850E_UART_ASIM_TXE
193
194/* These masks define which config bits affect TX/RX modes, respectively. */
195#define V850E_UART_CONFIG_RX_BITS \
196 (V850E_UART_ASIM_PS_MASK | V850E_UART_ASIM_CL_8 | V850E_UART_ASIM_ISRM)
197#define V850E_UART_CONFIG_TX_BITS \
198 (V850E_UART_ASIM_PS_MASK | V850E_UART_ASIM_CL_8 | V850E_UART_ASIM_SL_2)
199
200static inline v850e_uart_config_t v850e_uart_calc_config (unsigned cflags)
201{
202 v850e_uart_config_t config = 0;
203
204 /* Figure out new configuration of control register. */
205 if (cflags & CSTOPB)
206 /* Number of stop bits, 1 or 2. */
207 config |= V850E_UART_ASIM_SL_2;
208 if ((cflags & CSIZE) == CS8)
209 /* Number of data bits, 7 or 8. */
210 config |= V850E_UART_ASIM_CL_8;
211 if (! (cflags & PARENB))
212 /* No parity check/generation. */
213 config |= V850E_UART_ASIM_PS_NONE;
214 else if (cflags & PARODD)
215 /* Odd parity check/generation. */
216 config |= V850E_UART_ASIM_PS_ODD;
217 else
218 /* Even parity check/generation. */
219 config |= V850E_UART_ASIM_PS_EVEN;
220 if (cflags & CREAD)
221 /* Reading enabled. */
222 config |= V850E_UART_ASIM_RXE;
223
224 config |= V850E_UART_ASIM_CAE;
225 config |= V850E_UART_ASIM_TXE; /* Writing is always enabled. */
226 config |= V850E_UART_ASIM_ISRM; /* Errors generate a read-irq. */
227
228 return config;
229}
230
231/* This should delay as long as necessary for a recently written config
232 setting to settle, before we turn the uart back on. */
233static inline void
234v850e_uart_config_delay (v850e_uart_config_t config, v850e_uart_speed_t speed)
235{
236 /* The UART may not be reset properly unless we wait at least 2
237 `basic-clocks' until turning on the TXE/RXE bits again.
238 A `basic clock' is the clock used by the baud-rate generator,
239 i.e., the cpu clock divided by the 2^new_clk_divlog2.
240 The loop takes 2 insns, so loop CYCLES / 2 times. */
241 register unsigned count = 1 << speed.clk_divlog2;
242 while (--count != 0)
243 /* nothing */;
244}
245
246
247/* RX/TX interface. */
248
249/* Return true if all characters awaiting transmission on uart channel N
250 have been transmitted. */
251#define v850e_uart_xmit_done(n) \
252 (! (V850E_UART_ASIF(n) & V850E_UART_ASIF_TXBF))
253/* Wait for this to be true. */
254#define v850e_uart_wait_for_xmit_done(n) \
255 do { } while (! v850e_uart_xmit_done (n))
256
257/* Return true if uart channel N is ready to transmit a character. */
258#define v850e_uart_xmit_ok(n) \
259 (v850e_uart_xmit_done(n) && v850e_uart_cts(n))
260/* Wait for this to be true. */
261#define v850e_uart_wait_for_xmit_ok(n) \
262 do { } while (! v850e_uart_xmit_ok (n))
263
264/* Write character CH to uart channel CHAN. */
265#define v850e_uart_putc(chan, ch) (V850E_UART_TXB(chan) = (ch))
266
267/* Return latest character read on channel CHAN. */
268#define v850e_uart_getc(chan) V850E_UART_RXB (chan)
269
270/* Return bit-mask of uart error status. */
271#define v850e_uart_err(chan) V850E_UART_ASIS (chan)
272/* Various error bits set in the error result. */
273#define V850E_UART_ERR_OVERRUN V850E_UART_ASIS_OVE
274#define V850E_UART_ERR_FRAME V850E_UART_ASIS_FE
275#define V850E_UART_ERR_PARITY V850E_UART_ASIS_PE
276
277
278#endif /* __V850_V850E_UARTA_H__ */
diff --git a/include/asm-v850/v850e_uartb.h b/include/asm-v850/v850e_uartb.h
deleted file mode 100644
index 6d4767d5a835..000000000000
--- a/include/asm-v850/v850e_uartb.h
+++ /dev/null
@@ -1,262 +0,0 @@
1/*
2 * include/asm-v850/v850e_uartb.h -- V850E on-chip `UARTB' UART
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14/* The V850E UARTB is basically a superset of the original V850E UART, but
15 even where it's the same, the names and details have changed a bit.
16 It's similar enough to use the same driver (v850e_uart.c), but the
17 details have been abstracted slightly to do so. */
18
19#ifndef __V850_V850E_UARTB_H__
20#define __V850_V850E_UARTB_H__
21
22
23/* Raw hardware interface. */
24
25#define V850E_UARTB_BASE_ADDR(n) (0xFFFFFA00 + 0x10 * (n))
26
27/* Addresses of specific UART control registers for channel N. */
28#define V850E_UARTB_CTL0_ADDR(n) (V850E_UARTB_BASE_ADDR(n) + 0x0)
29#define V850E_UARTB_CTL2_ADDR(n) (V850E_UARTB_BASE_ADDR(n) + 0x2)
30#define V850E_UARTB_STR_ADDR(n) (V850E_UARTB_BASE_ADDR(n) + 0x4)
31#define V850E_UARTB_RX_ADDR(n) (V850E_UARTB_BASE_ADDR(n) + 0x6)
32#define V850E_UARTB_RXAP_ADDR(n) (V850E_UARTB_BASE_ADDR(n) + 0x6)
33#define V850E_UARTB_TX_ADDR(n) (V850E_UARTB_BASE_ADDR(n) + 0x8)
34#define V850E_UARTB_FIC0_ADDR(n) (V850E_UARTB_BASE_ADDR(n) + 0xA)
35#define V850E_UARTB_FIC1_ADDR(n) (V850E_UARTB_BASE_ADDR(n) + 0xB)
36#define V850E_UARTB_FIC2_ADDR(n) (V850E_UARTB_BASE_ADDR(n) + 0xC)
37#define V850E_UARTB_FIS0_ADDR(n) (V850E_UARTB_BASE_ADDR(n) + 0xE)
38#define V850E_UARTB_FIS1_ADDR(n) (V850E_UARTB_BASE_ADDR(n) + 0xF)
39
40/* UARTB control register 0 (general config). */
41#define V850E_UARTB_CTL0(n) (*(volatile u8 *)V850E_UARTB_CTL0_ADDR(n))
42/* Control bits for config registers. */
43#define V850E_UARTB_CTL0_PWR 0x80 /* clock enable */
44#define V850E_UARTB_CTL0_TXE 0x40 /* transmit enable */
45#define V850E_UARTB_CTL0_RXE 0x20 /* receive enable */
46#define V850E_UARTB_CTL0_DIR 0x10 /* */
47#define V850E_UARTB_CTL0_PS1 0x08 /* parity */
48#define V850E_UARTB_CTL0_PS0 0x04 /* parity */
49#define V850E_UARTB_CTL0_CL 0x02 /* char len 1:8bit, 0:7bit */
50#define V850E_UARTB_CTL0_SL 0x01 /* stop bit 1:2bit, 0:1bit */
51#define V850E_UARTB_CTL0_PS_MASK 0x0C /* mask covering parity bits */
52#define V850E_UARTB_CTL0_PS_NONE 0x00 /* no parity */
53#define V850E_UARTB_CTL0_PS_ZERO 0x04 /* zero parity */
54#define V850E_UARTB_CTL0_PS_ODD 0x08 /* odd parity */
55#define V850E_UARTB_CTL0_PS_EVEN 0x0C /* even parity */
56#define V850E_UARTB_CTL0_CL_8 0x02 /* char len 1:8bit, 0:7bit */
57#define V850E_UARTB_CTL0_SL_2 0x01 /* stop bit 1:2bit, 0:1bit */
58
59/* UARTB control register 2 (clock divider). */
60#define V850E_UARTB_CTL2(n) (*(volatile u16 *)V850E_UARTB_CTL2_ADDR(n))
61#define V850E_UARTB_CTL2_MIN 4
62#define V850E_UARTB_CTL2_MAX 0xFFFF
63
64/* UARTB serial interface status register. */
65#define V850E_UARTB_STR(n) (*(volatile u8 *)V850E_UARTB_STR_ADDR(n))
66/* Control bits for status registers. */
67#define V850E_UARTB_STR_TSF 0x80 /* UBTX or FIFO exist data */
68#define V850E_UARTB_STR_OVF 0x08 /* overflow error */
69#define V850E_UARTB_STR_PE 0x04 /* parity error */
70#define V850E_UARTB_STR_FE 0x02 /* framing error */
71#define V850E_UARTB_STR_OVE 0x01 /* overrun error */
72
73/* UARTB receive data register. */
74#define V850E_UARTB_RX(n) (*(volatile u8 *)V850E_UARTB_RX_ADDR(n))
75#define V850E_UARTB_RXAP(n) (*(volatile u16 *)V850E_UARTB_RXAP_ADDR(n))
76/* Control bits for status registers. */
77#define V850E_UARTB_RXAP_PEF 0x0200 /* parity error */
78#define V850E_UARTB_RXAP_FEF 0x0100 /* framing error */
79
80/* UARTB transmit data register. */
81#define V850E_UARTB_TX(n) (*(volatile u8 *)V850E_UARTB_TX_ADDR(n))
82
83/* UARTB FIFO control register 0. */
84#define V850E_UARTB_FIC0(n) (*(volatile u8 *)V850E_UARTB_FIC0_ADDR(n))
85
86/* UARTB FIFO control register 1. */
87#define V850E_UARTB_FIC1(n) (*(volatile u8 *)V850E_UARTB_FIC1_ADDR(n))
88
89/* UARTB FIFO control register 2. */
90#define V850E_UARTB_FIC2(n) (*(volatile u16 *)V850E_UARTB_FIC2_ADDR(n))
91
92/* UARTB FIFO status register 0. */
93#define V850E_UARTB_FIS0(n) (*(volatile u8 *)V850E_UARTB_FIS0_ADDR(n))
94
95/* UARTB FIFO status register 1. */
96#define V850E_UARTB_FIS1(n) (*(volatile u8 *)V850E_UARTB_FIS1_ADDR(n))
97
98
99/* Slightly abstract interface used by driver. */
100
101
102/* Interrupts used by the UART. */
103
104/* Received when the most recently transmitted character has been sent. */
105#define V850E_UART_TX_IRQ(chan) IRQ_INTUBTIT (chan)
106/* Received when a new character has been received. */
107#define V850E_UART_RX_IRQ(chan) IRQ_INTUBTIR (chan)
108
109/* Use by serial driver for information purposes. */
110#define V850E_UART_BASE_ADDR(chan) V850E_UARTB_BASE_ADDR(chan)
111
112
113/* UART clock generator interface. */
114
115/* This type encapsulates a particular uart frequency. */
116typedef u16 v850e_uart_speed_t;
117
118/* Calculate a uart speed from BAUD for this uart. */
119static inline v850e_uart_speed_t v850e_uart_calc_speed (unsigned baud)
120{
121 v850e_uart_speed_t speed;
122
123 /*
124 * V850E/ME2 UARTB baud rate is determined by the value of UBCTL2
125 * fx = V850E_UARTB_BASE_FREQ = CPU_CLOCK_FREQ/4
126 * baud = fx / 2*speed [ speed >= 4 ]
127 */
128 speed = V850E_UARTB_CTL2_MIN;
129 while (((V850E_UARTB_BASE_FREQ / 2) / speed ) > baud)
130 speed++;
131
132 return speed;
133}
134
135/* Return the current speed of uart channel CHAN. */
136#define v850e_uart_speed(chan) V850E_UARTB_CTL2 (chan)
137
138/* Set the current speed of uart channel CHAN. */
139#define v850e_uart_set_speed(chan, speed) (V850E_UARTB_CTL2 (chan) = (speed))
140
141/* Return true if SPEED1 and SPEED2 are the same. */
142#define v850e_uart_speed_eq(speed1, speed2) ((speed1) == (speed2))
143
144/* Minimum baud rate possible. */
145#define v850e_uart_min_baud() \
146 ((V850E_UARTB_BASE_FREQ / 2) / V850E_UARTB_CTL2_MAX)
147
148/* Maximum baud rate possible. The error is quite high at max, though. */
149#define v850e_uart_max_baud() \
150 ((V850E_UARTB_BASE_FREQ / 2) / V850E_UARTB_CTL2_MIN)
151
152/* The `maximum' clock rate the uart can used, which is wanted (though not
153 really used in any useful way) by the serial framework. */
154#define v850e_uart_max_clock() \
155 (V850E_UARTB_BASE_FREQ / 2)
156
157
158/* UART configuration interface. */
159
160/* Type of the uart config register; must be a scalar. */
161typedef u16 v850e_uart_config_t;
162
163/* The uart hardware config register for channel CHAN. */
164#define V850E_UART_CONFIG(chan) V850E_UARTB_CTL0 (chan)
165
166/* This config bit set if the uart is enabled. */
167#define V850E_UART_CONFIG_ENABLED V850E_UARTB_CTL0_PWR
168/* If the uart _isn't_ enabled, store this value to it to do so. */
169#define V850E_UART_CONFIG_INIT V850E_UARTB_CTL0_PWR
170/* Store this config value to disable the uart channel completely. */
171#define V850E_UART_CONFIG_FINI 0
172
173/* Setting/clearing these bits enable/disable TX/RX, respectively (but
174 otherwise generally leave things running). */
175#define V850E_UART_CONFIG_RX_ENABLE V850E_UARTB_CTL0_RXE
176#define V850E_UART_CONFIG_TX_ENABLE V850E_UARTB_CTL0_TXE
177
178/* These masks define which config bits affect TX/RX modes, respectively. */
179#define V850E_UART_CONFIG_RX_BITS \
180 (V850E_UARTB_CTL0_PS_MASK | V850E_UARTB_CTL0_CL_8)
181#define V850E_UART_CONFIG_TX_BITS \
182 (V850E_UARTB_CTL0_PS_MASK | V850E_UARTB_CTL0_CL_8 | V850E_UARTB_CTL0_SL_2)
183
184static inline v850e_uart_config_t v850e_uart_calc_config (unsigned cflags)
185{
186 v850e_uart_config_t config = 0;
187
188 /* Figure out new configuration of control register. */
189 if (cflags & CSTOPB)
190 /* Number of stop bits, 1 or 2. */
191 config |= V850E_UARTB_CTL0_SL_2;
192 if ((cflags & CSIZE) == CS8)
193 /* Number of data bits, 7 or 8. */
194 config |= V850E_UARTB_CTL0_CL_8;
195 if (! (cflags & PARENB))
196 /* No parity check/generation. */
197 config |= V850E_UARTB_CTL0_PS_NONE;
198 else if (cflags & PARODD)
199 /* Odd parity check/generation. */
200 config |= V850E_UARTB_CTL0_PS_ODD;
201 else
202 /* Even parity check/generation. */
203 config |= V850E_UARTB_CTL0_PS_EVEN;
204 if (cflags & CREAD)
205 /* Reading enabled. */
206 config |= V850E_UARTB_CTL0_RXE;
207
208 config |= V850E_UARTB_CTL0_PWR;
209 config |= V850E_UARTB_CTL0_TXE; /* Writing is always enabled. */
210 config |= V850E_UARTB_CTL0_DIR; /* LSB first. */
211
212 return config;
213}
214
215/* This should delay as long as necessary for a recently written config
216 setting to settle, before we turn the uart back on. */
217static inline void
218v850e_uart_config_delay (v850e_uart_config_t config, v850e_uart_speed_t speed)
219{
220 /* The UART may not be reset properly unless we wait at least 2
221 `basic-clocks' until turning on the TXE/RXE bits again.
222 A `basic clock' is the clock used by the baud-rate generator,
223 i.e., the cpu clock divided by the 2^new_clk_divlog2.
224 The loop takes 2 insns, so loop CYCLES / 2 times. */
225 register unsigned count = 1 << speed;
226 while (--count != 0)
227 /* nothing */;
228}
229
230
231/* RX/TX interface. */
232
233/* Return true if all characters awaiting transmission on uart channel N
234 have been transmitted. */
235#define v850e_uart_xmit_done(n) \
236 (! (V850E_UARTB_STR(n) & V850E_UARTB_STR_TSF))
237/* Wait for this to be true. */
238#define v850e_uart_wait_for_xmit_done(n) \
239 do { } while (! v850e_uart_xmit_done (n))
240
241/* Return true if uart channel N is ready to transmit a character. */
242#define v850e_uart_xmit_ok(n) \
243 (v850e_uart_xmit_done(n) && v850e_uart_cts(n))
244/* Wait for this to be true. */
245#define v850e_uart_wait_for_xmit_ok(n) \
246 do { } while (! v850e_uart_xmit_ok (n))
247
248/* Write character CH to uart channel CHAN. */
249#define v850e_uart_putc(chan, ch) (V850E_UARTB_TX(chan) = (ch))
250
251/* Return latest character read on channel CHAN. */
252#define v850e_uart_getc(chan) V850E_UARTB_RX (chan)
253
254/* Return bit-mask of uart error status. */
255#define v850e_uart_err(chan) V850E_UARTB_STR (chan)
256/* Various error bits set in the error result. */
257#define V850E_UART_ERR_OVERRUN V850E_UARTB_STR_OVE
258#define V850E_UART_ERR_FRAME V850E_UARTB_STR_FE
259#define V850E_UART_ERR_PARITY V850E_UARTB_STR_PE
260
261
262#endif /* __V850_V850E_UARTB_H__ */
diff --git a/include/asm-v850/v850e_utils.h b/include/asm-v850/v850e_utils.h
deleted file mode 100644
index 52eb72822d3d..000000000000
--- a/include/asm-v850/v850e_utils.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * include/asm-v850/v850e_utils.h -- Utility functions associated with
3 * V850E CPUs
4 *
5 * Copyright (C) 2001,03 NEC Electronics Corporation
6 * Copyright (C) 2001,03 Miles Bader <miles@gnu.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * Written by Miles Bader <miles@gnu.org>
13 */
14
15#ifndef __V850_V850E_UTILS_H__
16#define __V850_V850E_UTILS_H__
17
18/* Calculate counter clock-divider and count values to attain the
19 desired frequency RATE from the base frequency BASE_FREQ. The
20 counter is expected to have a clock-divider, which can divide the
21 system cpu clock by a power of two value from MIN_DIVLOG2 to
22 MAX_DIV_LOG2, and a word-size of COUNTER_SIZE bits (the counter
23 counts up and resets whenever it's equal to the compare register,
24 generating an interrupt or whatever when it does so). The returned
25 values are: *DIVLOG2 -- log2 of the desired clock divider and *COUNT
26 -- the counter compare value to use. Returns true if it was possible
27 to find a reasonable value, otherwise false (and the other return
28 values will be set to be as good as possible). */
29extern int calc_counter_params (unsigned long base_freq,
30 unsigned long rate,
31 unsigned min_divlog2, unsigned max_divlog2,
32 unsigned counter_size,
33 unsigned *divlog2, unsigned *count);
34
35#endif /* __V850_V850E_UTILS_H__ */
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild
index 1e3554596f72..4a8e80cdcfa5 100644
--- a/include/asm-x86/Kbuild
+++ b/include/asm-x86/Kbuild
@@ -3,7 +3,6 @@ include include/asm-generic/Kbuild.asm
3header-y += boot.h 3header-y += boot.h
4header-y += bootparam.h 4header-y += bootparam.h
5header-y += debugreg.h 5header-y += debugreg.h
6header-y += kvm.h
7header-y += ldt.h 6header-y += ldt.h
8header-y += msr-index.h 7header-y += msr-index.h
9header-y += prctl.h 8header-y += prctl.h
@@ -19,7 +18,6 @@ unifdef-y += msr.h
19unifdef-y += mtrr.h 18unifdef-y += mtrr.h
20unifdef-y += posix_types_32.h 19unifdef-y += posix_types_32.h
21unifdef-y += posix_types_64.h 20unifdef-y += posix_types_64.h
22unifdef-y += ptrace.h
23unifdef-y += unistd_32.h 21unifdef-y += unistd_32.h
24unifdef-y += unistd_64.h 22unifdef-y += unistd_64.h
25unifdef-y += vm86.h 23unifdef-y += vm86.h
diff --git a/include/asm-x86/gpio.h b/include/asm-x86/gpio.h
index ff87fca0caf9..116e9147fe66 100644
--- a/include/asm-x86/gpio.h
+++ b/include/asm-x86/gpio.h
@@ -1,6 +1,62 @@
1/*
2 * Generic GPIO API implementation for x86.
3 *
4 * Derived from the generic GPIO API for powerpc:
5 *
6 * Copyright (c) 2007-2008 MontaVista Software, Inc.
7 *
8 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
1#ifndef _ASM_I386_GPIO_H 16#ifndef _ASM_I386_GPIO_H
2#define _ASM_I386_GPIO_H 17#define _ASM_I386_GPIO_H
3 18
19#ifdef CONFIG_X86_RDC321X
4#include <gpio.h> 20#include <gpio.h>
21#else /* CONFIG_X86_RDC321X */
22
23#include <asm-generic/gpio.h>
24
25#ifdef CONFIG_GPIOLIB
26
27/*
28 * Just call gpiolib.
29 */
30static inline int gpio_get_value(unsigned int gpio)
31{
32 return __gpio_get_value(gpio);
33}
34
35static inline void gpio_set_value(unsigned int gpio, int value)
36{
37 __gpio_set_value(gpio, value);
38}
39
40static inline int gpio_cansleep(unsigned int gpio)
41{
42 return __gpio_cansleep(gpio);
43}
44
45/*
46 * Not implemented, yet.
47 */
48static inline int gpio_to_irq(unsigned int gpio)
49{
50 return -ENOSYS;
51}
52
53static inline int irq_to_gpio(unsigned int irq)
54{
55 return -EINVAL;
56}
57
58#endif /* CONFIG_GPIOLIB */
59
60#endif /* CONFIG_X86_RDC321X */
5 61
6#endif /* _ASM_I386_GPIO_H */ 62#endif /* _ASM_I386_GPIO_H */
diff --git a/include/asm-x86/hugetlb.h b/include/asm-x86/hugetlb.h
index 14171a4924f6..439a9acc132d 100644
--- a/include/asm-x86/hugetlb.h
+++ b/include/asm-x86/hugetlb.h
@@ -14,11 +14,13 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
14 * If the arch doesn't supply something else, assume that hugepage 14 * If the arch doesn't supply something else, assume that hugepage
15 * size aligned regions are ok without further preparation. 15 * size aligned regions are ok without further preparation.
16 */ 16 */
17static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) 17static inline int prepare_hugepage_range(struct file *file,
18 unsigned long addr, unsigned long len)
18{ 19{
19 if (len & ~HPAGE_MASK) 20 struct hstate *h = hstate_file(file);
21 if (len & ~huge_page_mask(h))
20 return -EINVAL; 22 return -EINVAL;
21 if (addr & ~HPAGE_MASK) 23 if (addr & ~huge_page_mask(h))
22 return -EINVAL; 24 return -EINVAL;
23 return 0; 25 return 0;
24} 26}
@@ -26,7 +28,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
26static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { 28static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
27} 29}
28 30
29static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb, 31static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
30 unsigned long addr, unsigned long end, 32 unsigned long addr, unsigned long end,
31 unsigned long floor, 33 unsigned long floor,
32 unsigned long ceiling) 34 unsigned long ceiling)
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 37672f79dcc8..96fa8449ff11 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -137,60 +137,6 @@ static inline void __save_init_fpu(struct task_struct *tsk)
137 task_thread_info(tsk)->status &= ~TS_USEDFPU; 137 task_thread_info(tsk)->status &= ~TS_USEDFPU;
138} 138}
139 139
140/*
141 * Signal frame handlers.
142 */
143
144static inline int save_i387(struct _fpstate __user *buf)
145{
146 struct task_struct *tsk = current;
147 int err = 0;
148
149 BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
150 sizeof(tsk->thread.xstate->fxsave));
151
152 if ((unsigned long)buf % 16)
153 printk("save_i387: bad fpstate %p\n", buf);
154
155 if (!used_math())
156 return 0;
157 clear_used_math(); /* trigger finit */
158 if (task_thread_info(tsk)->status & TS_USEDFPU) {
159 err = save_i387_checking((struct i387_fxsave_struct __user *)
160 buf);
161 if (err)
162 return err;
163 task_thread_info(tsk)->status &= ~TS_USEDFPU;
164 stts();
165 } else {
166 if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
167 sizeof(struct i387_fxsave_struct)))
168 return -1;
169 }
170 return 1;
171}
172
173/*
174 * This restores directly out of user space. Exceptions are handled.
175 */
176static inline int restore_i387(struct _fpstate __user *buf)
177{
178 struct task_struct *tsk = current;
179 int err;
180
181 if (!used_math()) {
182 err = init_fpu(tsk);
183 if (err)
184 return err;
185 }
186
187 if (!(task_thread_info(current)->status & TS_USEDFPU)) {
188 clts();
189 task_thread_info(current)->status |= TS_USEDFPU;
190 }
191 return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
192}
193
194#else /* CONFIG_X86_32 */ 140#else /* CONFIG_X86_32 */
195 141
196extern void finit(void); 142extern void finit(void);
diff --git a/include/asm-x86/ide.h b/include/asm-x86/ide.h
deleted file mode 100644
index cf9c98e5bdb5..000000000000
--- a/include/asm-x86/ide.h
+++ /dev/null
@@ -1,65 +0,0 @@
1/*
2 * Copyright (C) 1994-1996 Linus Torvalds & authors
3 */
4
5/*
6 * This file contains the i386 architecture specific IDE code.
7 */
8
9#ifndef __ASMi386_IDE_H
10#define __ASMi386_IDE_H
11
12#ifdef __KERNEL__
13
14
15#ifndef MAX_HWIFS
16# ifdef CONFIG_BLK_DEV_IDEPCI
17#define MAX_HWIFS 10
18# else
19#define MAX_HWIFS 6
20# endif
21#endif
22
23static __inline__ int ide_default_irq(unsigned long base)
24{
25 switch (base) {
26 case 0x1f0: return 14;
27 case 0x170: return 15;
28 case 0x1e8: return 11;
29 case 0x168: return 10;
30 case 0x1e0: return 8;
31 case 0x160: return 12;
32 default:
33 return 0;
34 }
35}
36
37static __inline__ unsigned long ide_default_io_base(int index)
38{
39 /*
40 * If PCI is present then it is not safe to poke around
41 * the other legacy IDE ports. Only 0x1f0 and 0x170 are
42 * defined compatibility mode ports for PCI. A user can
43 * override this using ide= but we must default safe.
44 */
45 if (no_pci_devices()) {
46 switch(index) {
47 case 2: return 0x1e8;
48 case 3: return 0x168;
49 case 4: return 0x1e0;
50 case 5: return 0x160;
51 }
52 }
53 switch (index) {
54 case 0: return 0x1f0;
55 case 1: return 0x170;
56 default:
57 return 0;
58 }
59}
60
61#include <asm-generic/ide_iops.h>
62
63#endif /* __KERNEL__ */
64
65#endif /* __ASMi386_IDE_H */
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h
index 4df44ed54077..e876d89ac156 100644
--- a/include/asm-x86/io_32.h
+++ b/include/asm-x86/io_32.h
@@ -110,6 +110,8 @@ static inline void *phys_to_virt(unsigned long address)
110 */ 110 */
111extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 111extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
112extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 112extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
113extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
114 unsigned long prot_val);
113 115
114/* 116/*
115 * The default ioremap() behavior is non-cached: 117 * The default ioremap() behavior is non-cached:
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h
index ddd8058a5026..22995c5c5adc 100644
--- a/include/asm-x86/io_64.h
+++ b/include/asm-x86/io_64.h
@@ -175,6 +175,8 @@ extern void early_iounmap(void *addr, unsigned long size);
175 */ 175 */
176extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 176extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
177extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 177extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
178extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
179 unsigned long prot_val);
178 180
179/* 181/*
180 * The default ioremap() behavior is non-cached: 182 * The default ioremap() behavior is non-cached:
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h
index 196d63c28aa4..bb1c09f7a76c 100644
--- a/include/asm-x86/ipi.h
+++ b/include/asm-x86/ipi.h
@@ -122,7 +122,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
122 * - mbligh 122 * - mbligh
123 */ 123 */
124 local_irq_save(flags); 124 local_irq_save(flags);
125 for_each_cpu_mask(query_cpu, mask) { 125 for_each_cpu_mask_nr(query_cpu, mask) {
126 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), 126 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
127 vector, APIC_DEST_PHYSICAL); 127 vector, APIC_DEST_PHYSICAL);
128 } 128 }
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
index 6c846228948d..49982110e4d9 100644
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -32,8 +32,7 @@
32#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 32#define HPAGE_MASK (~(HPAGE_SIZE - 1))
33#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 33#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
34 34
35/* to align the pointer to the (next) page boundary */ 35#define HUGE_MAX_HSTATE 2
36#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
37 36
38#ifndef __ASSEMBLY__ 37#ifndef __ASSEMBLY__
39#include <linux/types.h> 38#include <linux/types.h>
diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h
index 092b39b3a7e6..eff2ecd7fff0 100644
--- a/include/asm-x86/processor-flags.h
+++ b/include/asm-x86/processor-flags.h
@@ -88,10 +88,12 @@
88#define CX86_ARR_BASE 0xc4 88#define CX86_ARR_BASE 0xc4
89#define CX86_RCR_BASE 0xdc 89#define CX86_RCR_BASE 0xdc
90 90
91#ifdef __KERNEL__
91#ifdef CONFIG_VM86 92#ifdef CONFIG_VM86
92#define X86_VM_MASK X86_EFLAGS_VM 93#define X86_VM_MASK X86_EFLAGS_VM
93#else 94#else
94#define X86_VM_MASK 0 /* No VM86 support */ 95#define X86_VM_MASK 0 /* No VM86 support */
95#endif 96#endif
97#endif
96 98
97#endif /* __ASM_I386_PROCESSOR_FLAGS_H */ 99#endif /* __ASM_I386_PROCESSOR_FLAGS_H */
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 15cb82a44e89..5f58da401b43 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -134,7 +134,7 @@ extern __u32 cleared_cpu_caps[NCAPINTS];
134#ifdef CONFIG_SMP 134#ifdef CONFIG_SMP
135DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); 135DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
136#define cpu_data(cpu) per_cpu(cpu_info, cpu) 136#define cpu_data(cpu) per_cpu(cpu_info, cpu)
137#define current_cpu_data cpu_data(smp_processor_id()) 137#define current_cpu_data __get_cpu_var(cpu_info)
138#else 138#else
139#define cpu_data(cpu) boot_cpu_data 139#define cpu_data(cpu) boot_cpu_data
140#define current_cpu_data boot_cpu_data 140#define current_cpu_data boot_cpu_data
diff --git a/include/asm-x86/semaphore.h b/include/asm-x86/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-x86/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h
index 0a8f27d31d0d..da0a675adf94 100644
--- a/include/asm-x86/thread_info.h
+++ b/include/asm-x86/thread_info.h
@@ -79,7 +79,6 @@ struct thread_info {
79#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 79#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
80#define TIF_SECCOMP 8 /* secure computing */ 80#define TIF_SECCOMP 8 /* secure computing */
81#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ 81#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
82#define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */
83#define TIF_NOTSC 16 /* TSC is not accessible in userland */ 82#define TIF_NOTSC 16 /* TSC is not accessible in userland */
84#define TIF_IA32 17 /* 32bit process */ 83#define TIF_IA32 17 /* 32bit process */
85#define TIF_FORK 18 /* ret_from_fork */ 84#define TIF_FORK 18 /* ret_from_fork */
@@ -102,7 +101,6 @@ struct thread_info {
102#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 101#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
103#define _TIF_SECCOMP (1 << TIF_SECCOMP) 102#define _TIF_SECCOMP (1 << TIF_SECCOMP)
104#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) 103#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
105#define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED)
106#define _TIF_NOTSC (1 << TIF_NOTSC) 104#define _TIF_NOTSC (1 << TIF_NOTSC)
107#define _TIF_IA32 (1 << TIF_IA32) 105#define _TIF_IA32 (1 << TIF_IA32)
108#define _TIF_FORK (1 << TIF_FORK) 106#define _TIF_FORK (1 << TIF_FORK)
@@ -135,7 +133,7 @@ struct thread_info {
135 133
136/* Only used for 64 bit */ 134/* Only used for 64 bit */
137#define _TIF_DO_NOTIFY_MASK \ 135#define _TIF_DO_NOTIFY_MASK \
138 (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) 136 (_TIF_SIGPENDING|_TIF_MCE_NOTIFY)
139 137
140/* flags to check in __switch_to() */ 138/* flags to check in __switch_to() */
141#define _TIF_WORK_CTXSW \ 139#define _TIF_WORK_CTXSW \
@@ -154,6 +152,8 @@ struct thread_info {
154#define THREAD_FLAGS GFP_KERNEL 152#define THREAD_FLAGS GFP_KERNEL
155#endif 153#endif
156 154
155#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
156
157#define alloc_thread_info(tsk) \ 157#define alloc_thread_info(tsk) \
158 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER)) 158 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
159 159
diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h
index 8317d94771d3..d7394673b772 100644
--- a/include/asm-x86/unistd_32.h
+++ b/include/asm-x86/unistd_32.h
@@ -332,6 +332,12 @@
332#define __NR_fallocate 324 332#define __NR_fallocate 324
333#define __NR_timerfd_settime 325 333#define __NR_timerfd_settime 325
334#define __NR_timerfd_gettime 326 334#define __NR_timerfd_gettime 326
335#define __NR_signalfd4 327
336#define __NR_eventfd2 328
337#define __NR_epoll_create1 329
338#define __NR_dup3 330
339#define __NR_pipe2 331
340#define __NR_inotify_init1 332
335 341
336#ifdef __KERNEL__ 342#ifdef __KERNEL__
337 343
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h
index 9c1a4a3470d9..3a341d791792 100644
--- a/include/asm-x86/unistd_64.h
+++ b/include/asm-x86/unistd_64.h
@@ -639,6 +639,20 @@ __SYSCALL(__NR_fallocate, sys_fallocate)
639__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) 639__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
640#define __NR_timerfd_gettime 287 640#define __NR_timerfd_gettime 287
641__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) 641__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
642#define __NR_paccept 288
643__SYSCALL(__NR_paccept, sys_paccept)
644#define __NR_signalfd4 289
645__SYSCALL(__NR_signalfd4, sys_signalfd4)
646#define __NR_eventfd2 290
647__SYSCALL(__NR_eventfd2, sys_eventfd2)
648#define __NR_epoll_create1 291
649__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
650#define __NR_dup3 292
651__SYSCALL(__NR_dup3, sys_dup3)
652#define __NR_pipe2 293
653__SYSCALL(__NR_pipe2, sys_pipe2)
654#define __NR_inotify_init1 294
655__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
642 656
643 657
644#ifndef __NO_STUBS 658#ifndef __NO_STUBS
diff --git a/include/asm-xtensa/ide.h b/include/asm-xtensa/ide.h
deleted file mode 100644
index 6b912742a42d..000000000000
--- a/include/asm-xtensa/ide.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * include/asm-xtensa/ide.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1994 - 1996 Linus Torvalds & authors
9 * Copyright (C) 2001 - 2005 Tensilica Inc.
10 */
11
12#ifndef _XTENSA_IDE_H
13#define _XTENSA_IDE_H
14
15#ifdef __KERNEL__
16
17
18#ifndef MAX_HWIFS
19# define MAX_HWIFS 1
20#endif
21
22static __inline__ int ide_default_irq(unsigned long base)
23{
24 /* Unsupported! */
25 return 0;
26}
27
28static __inline__ unsigned long ide_default_io_base(int index)
29{
30 /* Unsupported! */
31 return 0;
32}
33
34#endif /* __KERNEL__ */
35#endif /* _XTENSA_IDE_H */
diff --git a/include/asm-xtensa/kvm.h b/include/asm-xtensa/kvm.h
deleted file mode 100644
index bda4e331e98c..000000000000
--- a/include/asm-xtensa/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_XTENSA_H
2#define __LINUX_KVM_XTENSA_H
3
4/* xtensa does not support KVM */
5
6#endif
diff --git a/include/asm-xtensa/page.h b/include/asm-xtensa/page.h
index 80a6ae0dd259..11f7dc2dbec7 100644
--- a/include/asm-xtensa/page.h
+++ b/include/asm-xtensa/page.h
@@ -26,13 +26,11 @@
26 26
27/* 27/*
28 * PAGE_SHIFT determines the page size 28 * PAGE_SHIFT determines the page size
29 * PAGE_ALIGN(x) aligns the pointer to the (next) page boundary
30 */ 29 */
31 30
32#define PAGE_SHIFT 12 31#define PAGE_SHIFT 12
33#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) 32#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
34#define PAGE_MASK (~(PAGE_SIZE-1)) 33#define PAGE_MASK (~(PAGE_SIZE-1))
35#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK)
36 34
37#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR 35#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
38#define MAX_MEM_PFN XCHAL_KSEG_SIZE 36#define MAX_MEM_PFN XCHAL_KSEG_SIZE
diff --git a/include/asm-xtensa/ptrace.h b/include/asm-xtensa/ptrace.h
index 422c73e26937..089b0db44816 100644
--- a/include/asm-xtensa/ptrace.h
+++ b/include/asm-xtensa/ptrace.h
@@ -73,10 +73,10 @@
73#define PTRACE_GETXTREGS 18 73#define PTRACE_GETXTREGS 18
74#define PTRACE_SETXTREGS 19 74#define PTRACE_SETXTREGS 19
75 75
76#ifndef __ASSEMBLY__
77
78#ifdef __KERNEL__ 76#ifdef __KERNEL__
79 77
78#ifndef __ASSEMBLY__
79
80/* 80/*
81 * This struct defines the way the registers are stored on the 81 * This struct defines the way the registers are stored on the
82 * kernel stack during a system call or other kernel entry. 82 * kernel stack during a system call or other kernel entry.
@@ -122,14 +122,14 @@ extern void show_regs(struct pt_regs *);
122# ifndef CONFIG_SMP 122# ifndef CONFIG_SMP
123# define profile_pc(regs) instruction_pointer(regs) 123# define profile_pc(regs) instruction_pointer(regs)
124# endif 124# endif
125#endif /* __KERNEL__ */
126 125
127#else /* __ASSEMBLY__ */ 126#else /* __ASSEMBLY__ */
128 127
129#ifdef __KERNEL__
130# include <asm/asm-offsets.h> 128# include <asm/asm-offsets.h>
131#define PT_REGS_OFFSET (KERNEL_STACK_SIZE - PT_USER_SIZE) 129#define PT_REGS_OFFSET (KERNEL_STACK_SIZE - PT_USER_SIZE)
132#endif
133 130
134#endif /* !__ASSEMBLY__ */ 131#endif /* !__ASSEMBLY__ */
132
133#endif /* __KERNEL__ */
134
135#endif /* _XTENSA_PTRACE_H */ 135#endif /* _XTENSA_PTRACE_H */
diff --git a/include/asm-xtensa/semaphore.h b/include/asm-xtensa/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-xtensa/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-xtensa/thread_info.h b/include/asm-xtensa/thread_info.h
index a2c640682ed9..7e4131dd546c 100644
--- a/include/asm-xtensa/thread_info.h
+++ b/include/asm-xtensa/thread_info.h
@@ -111,10 +111,6 @@ static inline struct thread_info *current_thread_info(void)
111 return ti; 111 return ti;
112} 112}
113 113
114/* thread information allocation */
115#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
116#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
117
118#else /* !__ASSEMBLY__ */ 114#else /* !__ASSEMBLY__ */
119 115
120/* how to get the thread information struct from ASM */ 116/* how to get the thread information struct from ASM */
@@ -160,6 +156,7 @@ static inline struct thread_info *current_thread_info(void)
160#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ 156#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
161 157
162#define THREAD_SIZE 8192 //(2*PAGE_SIZE) 158#define THREAD_SIZE 8192 //(2*PAGE_SIZE)
159#define THREAD_SIZE_ORDER 1
163 160
164#endif /* __KERNEL__ */ 161#endif /* __KERNEL__ */
165#endif /* _XTENSA_THREAD_INFO */ 162#endif /* _XTENSA_THREAD_INFO */
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 71d70d1fbce2..4c4142c5aa6e 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -189,7 +189,6 @@ unifdef-y += connector.h
189unifdef-y += cuda.h 189unifdef-y += cuda.h
190unifdef-y += cyclades.h 190unifdef-y += cyclades.h
191unifdef-y += dccp.h 191unifdef-y += dccp.h
192unifdef-y += dirent.h
193unifdef-y += dlm.h 192unifdef-y += dlm.h
194unifdef-y += dlm_plock.h 193unifdef-y += dlm_plock.h
195unifdef-y += edd.h 194unifdef-y += edd.h
@@ -256,7 +255,9 @@ unifdef-y += kd.h
256unifdef-y += kernelcapi.h 255unifdef-y += kernelcapi.h
257unifdef-y += kernel.h 256unifdef-y += kernel.h
258unifdef-y += keyboard.h 257unifdef-y += keyboard.h
258ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/kvm.h),)
259unifdef-y += kvm.h 259unifdef-y += kvm.h
260endif
260unifdef-y += llc.h 261unifdef-y += llc.h
261unifdef-y += loop.h 262unifdef-y += loop.h
262unifdef-y += lp.h 263unifdef-y += lp.h
diff --git a/include/linux/acct.h b/include/linux/acct.h
index e8cae54e8d88..882dc7248766 100644
--- a/include/linux/acct.h
+++ b/include/linux/acct.h
@@ -120,17 +120,20 @@ struct acct_v3
120struct vfsmount; 120struct vfsmount;
121struct super_block; 121struct super_block;
122struct pacct_struct; 122struct pacct_struct;
123struct pid_namespace;
123extern void acct_auto_close_mnt(struct vfsmount *m); 124extern void acct_auto_close_mnt(struct vfsmount *m);
124extern void acct_auto_close(struct super_block *sb); 125extern void acct_auto_close(struct super_block *sb);
125extern void acct_init_pacct(struct pacct_struct *pacct); 126extern void acct_init_pacct(struct pacct_struct *pacct);
126extern void acct_collect(long exitcode, int group_dead); 127extern void acct_collect(long exitcode, int group_dead);
127extern void acct_process(void); 128extern void acct_process(void);
129extern void acct_exit_ns(struct pid_namespace *);
128#else 130#else
129#define acct_auto_close_mnt(x) do { } while (0) 131#define acct_auto_close_mnt(x) do { } while (0)
130#define acct_auto_close(x) do { } while (0) 132#define acct_auto_close(x) do { } while (0)
131#define acct_init_pacct(x) do { } while (0) 133#define acct_init_pacct(x) do { } while (0)
132#define acct_collect(x,y) do { } while (0) 134#define acct_collect(x,y) do { } while (0)
133#define acct_process() do { } while (0) 135#define acct_process() do { } while (0)
136#define acct_exit_ns(ns) do { } while (0)
134#endif 137#endif
135 138
136/* 139/*
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index a17177639376..702f79dad16a 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -236,6 +236,7 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
236 const char *name); 236 const char *name);
237 237
238#ifdef CONFIG_PM_SLEEP 238#ifdef CONFIG_PM_SLEEP
239void __init acpi_no_s4_hw_signature(void);
239void __init acpi_old_suspend_ordering(void); 240void __init acpi_old_suspend_ordering(void);
240#endif /* CONFIG_PM_SLEEP */ 241#endif /* CONFIG_PM_SLEEP */
241#else /* CONFIG_ACPI */ 242#else /* CONFIG_ACPI */
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index 6129e58ca7c9..e0a0cdc2da43 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -9,7 +9,7 @@
9#define _LINUX_ANON_INODES_H 9#define _LINUX_ANON_INODES_H
10 10
11int anon_inode_getfd(const char *name, const struct file_operations *fops, 11int anon_inode_getfd(const char *name, const struct file_operations *fops,
12 void *priv); 12 void *priv, int flags);
13 13
14#endif /* _LINUX_ANON_INODES_H */ 14#endif /* _LINUX_ANON_INODES_H */
15 15
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index eb640f0acfac..0f50d4cc4360 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -101,21 +101,14 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
101 101
102/** 102/**
103 * async_tx_sync_epilog - actions to take if an operation is run synchronously 103 * async_tx_sync_epilog - actions to take if an operation is run synchronously
104 * @flags: async_tx flags
105 * @depend_tx: transaction depends on depend_tx
106 * @cb_fn: function to call when the transaction completes 104 * @cb_fn: function to call when the transaction completes
107 * @cb_fn_param: parameter to pass to the callback routine 105 * @cb_fn_param: parameter to pass to the callback routine
108 */ 106 */
109static inline void 107static inline void
110async_tx_sync_epilog(unsigned long flags, 108async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param)
111 struct dma_async_tx_descriptor *depend_tx,
112 dma_async_tx_callback cb_fn, void *cb_fn_param)
113{ 109{
114 if (cb_fn) 110 if (cb_fn)
115 cb_fn(cb_fn_param); 111 cb_fn(cb_fn_param);
116
117 if (depend_tx && (flags & ASYNC_TX_DEP_ACK))
118 async_tx_ack(depend_tx);
119} 112}
120 113
121void 114void
@@ -152,4 +145,6 @@ struct dma_async_tx_descriptor *
152async_trigger_callback(enum async_tx_flags flags, 145async_trigger_callback(enum async_tx_flags flags,
153 struct dma_async_tx_descriptor *depend_tx, 146 struct dma_async_tx_descriptor *depend_tx,
154 dma_async_tx_callback cb_fn, void *cb_fn_param); 147 dma_async_tx_callback cb_fn, void *cb_fn_param);
148
149void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
155#endif /* _ASYNC_TX_H_ */ 150#endif /* _ASYNC_TX_H_ */
diff --git a/include/linux/atmel-pwm-bl.h b/include/linux/atmel-pwm-bl.h
new file mode 100644
index 000000000000..0153a47806c2
--- /dev/null
+++ b/include/linux/atmel-pwm-bl.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (C) 2007 Atmel Corporation
3 *
4 * Driver for the AT32AP700X PS/2 controller (PSIF).
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 */
10
11#ifndef __INCLUDE_ATMEL_PWM_BL_H
12#define __INCLUDE_ATMEL_PWM_BL_H
13
14/**
15 * struct atmel_pwm_bl_platform_data
16 * @pwm_channel: which PWM channel in the PWM module to use.
17 * @pwm_frequency: PWM frequency to generate, the driver will try to be as
18 * close as the prescaler allows.
19 * @pwm_compare_max: value to use in the PWM channel compare register.
20 * @pwm_duty_max: maximum duty cycle value, must be less than or equal to
21 * pwm_compare_max.
22 * @pwm_duty_min: minimum duty cycle value, must be less than pwm_duty_max.
23 * @pwm_active_low: set to one if the low part of the PWM signal increases the
24 * brightness of the backlight.
25 * @gpio_on: GPIO line to control the backlight on/off, set to -1 if not used.
26 * @on_active_low: set to one if the on/off signal is on when GPIO is low.
27 *
28 * This struct must be added to the platform device in the board code. It is
29 * used by the atmel-pwm-bl driver to setup the GPIO to control on/off and the
30 * PWM device.
31 */
32struct atmel_pwm_bl_platform_data {
33 unsigned int pwm_channel;
34 unsigned int pwm_frequency;
35 unsigned int pwm_compare_max;
36 unsigned int pwm_duty_max;
37 unsigned int pwm_duty_min;
38 unsigned int pwm_active_low;
39 int gpio_on;
40 unsigned int on_active_low;
41};
42
43#endif /* __INCLUDE_ATMEL_PWM_BL_H */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 8b82974bdc12..6272a395d43c 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -286,7 +286,6 @@
286#define AUDIT_ARCH_SHEL64 (EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) 286#define AUDIT_ARCH_SHEL64 (EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
287#define AUDIT_ARCH_SPARC (EM_SPARC) 287#define AUDIT_ARCH_SPARC (EM_SPARC)
288#define AUDIT_ARCH_SPARC64 (EM_SPARCV9|__AUDIT_ARCH_64BIT) 288#define AUDIT_ARCH_SPARC64 (EM_SPARCV9|__AUDIT_ARCH_64BIT)
289#define AUDIT_ARCH_V850 (EM_V850|__AUDIT_ARCH_LE)
290#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) 289#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
291 290
292#define AUDIT_PERM_EXEC 1 291#define AUDIT_PERM_EXEC 1
diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h
index 31a29541b504..b785c6f8644d 100644
--- a/include/linux/auto_fs4.h
+++ b/include/linux/auto_fs4.h
@@ -98,8 +98,6 @@ union autofs_v5_packet_union {
98#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI 98#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI
99#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI 99#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI
100#define AUTOFS_IOC_PROTOSUBVER _IOR(0x93,0x67,int) 100#define AUTOFS_IOC_PROTOSUBVER _IOR(0x93,0x67,int)
101#define AUTOFS_IOC_ASKREGHOST _IOR(0x93,0x68,int)
102#define AUTOFS_IOC_TOGGLEREGHOST _IOR(0x93,0x69,int)
103#define AUTOFS_IOC_ASKUMOUNT _IOR(0x93,0x70,int) 101#define AUTOFS_IOC_ASKUMOUNT _IOR(0x93,0x70,int)
104 102
105 103
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index 0da17d14fd13..d7afa9dd6635 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -26,9 +26,13 @@
26 26
27#define AT_SECURE 23 /* secure mode boolean */ 27#define AT_SECURE 23 /* secure mode boolean */
28 28
29#define AT_BASE_PLATFORM 24 /* string identifying real platform, may
30 * differ from AT_PLATFORM. */
31
29#define AT_EXECFN 31 /* filename of program */ 32#define AT_EXECFN 31 /* filename of program */
33
30#ifdef __KERNEL__ 34#ifdef __KERNEL__
31#define AT_VECTOR_SIZE_BASE 17 /* NEW_AUX_ENT entries in auxiliary table */ 35#define AT_VECTOR_SIZE_BASE 18 /* NEW_AUX_ENT entries in auxiliary table */
32 /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ 36 /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
33#endif 37#endif
34 38
diff --git a/include/linux/bcd.h b/include/linux/bcd.h
index c545308125b0..7ac518e3c152 100644
--- a/include/linux/bcd.h
+++ b/include/linux/bcd.h
@@ -10,8 +10,13 @@
10#ifndef _BCD_H 10#ifndef _BCD_H
11#define _BCD_H 11#define _BCD_H
12 12
13#define BCD2BIN(val) (((val) & 0x0f) + ((val)>>4)*10) 13#include <linux/compiler.h>
14#define BIN2BCD(val) ((((val)/10)<<4) + (val)%10) 14
15unsigned bcd2bin(unsigned char val) __attribute_const__;
16unsigned char bin2bcd(unsigned val) __attribute_const__;
17
18#define BCD2BIN(val) bcd2bin(val)
19#define BIN2BCD(val) bin2bcd(val)
15 20
16/* backwards compat */ 21/* backwards compat */
17#define BCD_TO_BIN(val) ((val)=BCD2BIN(val)) 22#define BCD_TO_BIN(val) ((val)=BCD2BIN(val))
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index ee0ed48e8348..826f62350805 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -38,7 +38,7 @@ struct linux_binprm{
38 misc_bang:1; 38 misc_bang:1;
39 struct file * file; 39 struct file * file;
40 int e_uid, e_gid; 40 int e_uid, e_gid;
41 kernel_cap_t cap_inheritable, cap_permitted; 41 kernel_cap_t cap_post_exec_permitted;
42 bool cap_effective; 42 bool cap_effective;
43 void *security; 43 void *security;
44 int argc, envc; 44 int argc, envc;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index a1d9b79078ea..652470b687c9 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -28,52 +28,73 @@ extern unsigned long saved_max_pfn;
28 * memory pages (including holes) on the node. 28 * memory pages (including holes) on the node.
29 */ 29 */
30typedef struct bootmem_data { 30typedef struct bootmem_data {
31 unsigned long node_boot_start; 31 unsigned long node_min_pfn;
32 unsigned long node_low_pfn; 32 unsigned long node_low_pfn;
33 void *node_bootmem_map; 33 void *node_bootmem_map;
34 unsigned long last_offset; 34 unsigned long last_end_off;
35 unsigned long last_pos; 35 unsigned long hint_idx;
36 unsigned long last_success; /* Previous allocation point. To speed
37 * up searching */
38 struct list_head list; 36 struct list_head list;
39} bootmem_data_t; 37} bootmem_data_t;
40 38
39extern bootmem_data_t bootmem_node_data[];
40
41extern unsigned long bootmem_bootmap_pages(unsigned long); 41extern unsigned long bootmem_bootmap_pages(unsigned long);
42
43extern unsigned long init_bootmem_node(pg_data_t *pgdat,
44 unsigned long freepfn,
45 unsigned long startpfn,
46 unsigned long endpfn);
42extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); 47extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
48
49extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
50extern unsigned long free_all_bootmem(void);
51
52extern void free_bootmem_node(pg_data_t *pgdat,
53 unsigned long addr,
54 unsigned long size);
43extern void free_bootmem(unsigned long addr, unsigned long size); 55extern void free_bootmem(unsigned long addr, unsigned long size);
44extern void *__alloc_bootmem(unsigned long size, 56
57/*
58 * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
59 * the architecture-specific code should honor this).
60 *
61 * If flags is 0, then the return value is always 0 (success). If
62 * flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the
63 * memory already was reserved.
64 */
65#define BOOTMEM_DEFAULT 0
66#define BOOTMEM_EXCLUSIVE (1<<0)
67
68extern int reserve_bootmem_node(pg_data_t *pgdat,
69 unsigned long physaddr,
70 unsigned long size,
71 int flags);
72#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
73extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
74#endif
75
76extern void *__alloc_bootmem_nopanic(unsigned long size,
45 unsigned long align, 77 unsigned long align,
46 unsigned long goal); 78 unsigned long goal);
47extern void *__alloc_bootmem_nopanic(unsigned long size, 79extern void *__alloc_bootmem(unsigned long size,
48 unsigned long align, 80 unsigned long align,
49 unsigned long goal); 81 unsigned long goal);
50extern void *__alloc_bootmem_low(unsigned long size, 82extern void *__alloc_bootmem_low(unsigned long size,
51 unsigned long align, 83 unsigned long align,
52 unsigned long goal); 84 unsigned long goal);
85extern void *__alloc_bootmem_node(pg_data_t *pgdat,
86 unsigned long size,
87 unsigned long align,
88 unsigned long goal);
89extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
90 unsigned long size,
91 unsigned long align,
92 unsigned long goal);
53extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, 93extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
54 unsigned long size, 94 unsigned long size,
55 unsigned long align, 95 unsigned long align,
56 unsigned long goal); 96 unsigned long goal);
57extern void *__alloc_bootmem_core(struct bootmem_data *bdata,
58 unsigned long size,
59 unsigned long align,
60 unsigned long goal,
61 unsigned long limit);
62
63/*
64 * flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
65 * the architecture-specific code should honor this)
66 */
67#define BOOTMEM_DEFAULT 0
68#define BOOTMEM_EXCLUSIVE (1<<0)
69
70#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE 97#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
71/*
72 * If flags is 0, then the return value is always 0 (success). If
73 * flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the
74 * memory already was reserved.
75 */
76extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
77#define alloc_bootmem(x) \ 98#define alloc_bootmem(x) \
78 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 99 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
79#define alloc_bootmem_low(x) \ 100#define alloc_bootmem_low(x) \
@@ -82,31 +103,6 @@ extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
82 __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 103 __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
83#define alloc_bootmem_low_pages(x) \ 104#define alloc_bootmem_low_pages(x) \
84 __alloc_bootmem_low(x, PAGE_SIZE, 0) 105 __alloc_bootmem_low(x, PAGE_SIZE, 0)
85#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
86
87extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
88 int flags);
89extern unsigned long free_all_bootmem(void);
90extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
91extern void *__alloc_bootmem_node(pg_data_t *pgdat,
92 unsigned long size,
93 unsigned long align,
94 unsigned long goal);
95extern unsigned long init_bootmem_node(pg_data_t *pgdat,
96 unsigned long freepfn,
97 unsigned long startpfn,
98 unsigned long endpfn);
99extern int reserve_bootmem_node(pg_data_t *pgdat,
100 unsigned long physaddr,
101 unsigned long size,
102 int flags);
103extern void free_bootmem_node(pg_data_t *pgdat,
104 unsigned long addr,
105 unsigned long size);
106extern void *alloc_bootmem_section(unsigned long size,
107 unsigned long section_nr);
108
109#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
110#define alloc_bootmem_node(pgdat, x) \ 106#define alloc_bootmem_node(pgdat, x) \
111 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 107 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
112#define alloc_bootmem_pages_node(pgdat, x) \ 108#define alloc_bootmem_pages_node(pgdat, x) \
@@ -115,6 +111,12 @@ extern void *alloc_bootmem_section(unsigned long size,
115 __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) 111 __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
116#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ 112#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
117 113
114extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
115 int flags);
116
117extern void *alloc_bootmem_section(unsigned long size,
118 unsigned long section_nr);
119
118#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP 120#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
119extern void *alloc_remap(int nid, unsigned long size); 121extern void *alloc_remap(int nid, unsigned long size);
120#else 122#else
diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h
index 961ed4b48d8e..44f95b92393b 100644
--- a/include/linux/byteorder/big_endian.h
+++ b/include/linux/byteorder/big_endian.h
@@ -94,12 +94,12 @@ static inline __u16 __be16_to_cpup(const __be16 *p)
94#define __le32_to_cpus(x) __swab32s((x)) 94#define __le32_to_cpus(x) __swab32s((x))
95#define __cpu_to_le16s(x) __swab16s((x)) 95#define __cpu_to_le16s(x) __swab16s((x))
96#define __le16_to_cpus(x) __swab16s((x)) 96#define __le16_to_cpus(x) __swab16s((x))
97#define __cpu_to_be64s(x) do {} while (0) 97#define __cpu_to_be64s(x) do { (void)(x); } while (0)
98#define __be64_to_cpus(x) do {} while (0) 98#define __be64_to_cpus(x) do { (void)(x); } while (0)
99#define __cpu_to_be32s(x) do {} while (0) 99#define __cpu_to_be32s(x) do { (void)(x); } while (0)
100#define __be32_to_cpus(x) do {} while (0) 100#define __be32_to_cpus(x) do { (void)(x); } while (0)
101#define __cpu_to_be16s(x) do {} while (0) 101#define __cpu_to_be16s(x) do { (void)(x); } while (0)
102#define __be16_to_cpus(x) do {} while (0) 102#define __be16_to_cpus(x) do { (void)(x); } while (0)
103 103
104#ifdef __KERNEL__ 104#ifdef __KERNEL__
105#include <linux/byteorder/generic.h> 105#include <linux/byteorder/generic.h>
diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
index 05dc7c35b3b2..4cc170a31762 100644
--- a/include/linux/byteorder/little_endian.h
+++ b/include/linux/byteorder/little_endian.h
@@ -88,12 +88,12 @@ static inline __u16 __be16_to_cpup(const __be16 *p)
88{ 88{
89 return __swab16p((__u16 *)p); 89 return __swab16p((__u16 *)p);
90} 90}
91#define __cpu_to_le64s(x) do {} while (0) 91#define __cpu_to_le64s(x) do { (void)(x); } while (0)
92#define __le64_to_cpus(x) do {} while (0) 92#define __le64_to_cpus(x) do { (void)(x); } while (0)
93#define __cpu_to_le32s(x) do {} while (0) 93#define __cpu_to_le32s(x) do { (void)(x); } while (0)
94#define __le32_to_cpus(x) do {} while (0) 94#define __le32_to_cpus(x) do { (void)(x); } while (0)
95#define __cpu_to_le16s(x) do {} while (0) 95#define __cpu_to_le16s(x) do { (void)(x); } while (0)
96#define __le16_to_cpus(x) do {} while (0) 96#define __le16_to_cpus(x) do { (void)(x); } while (0)
97#define __cpu_to_be64s(x) __swab64s((x)) 97#define __cpu_to_be64s(x) __swab64s((x))
98#define __be64_to_cpus(x) __swab64s((x)) 98#define __be64_to_cpus(x) __swab64s((x))
99#define __cpu_to_be32s(x) __swab32s((x)) 99#define __cpu_to_be32s(x) __swab32s((x))
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index e155aa78d859..c98dd7cb7076 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -21,11 +21,13 @@
21struct cgroupfs_root; 21struct cgroupfs_root;
22struct cgroup_subsys; 22struct cgroup_subsys;
23struct inode; 23struct inode;
24struct cgroup;
24 25
25extern int cgroup_init_early(void); 26extern int cgroup_init_early(void);
26extern int cgroup_init(void); 27extern int cgroup_init(void);
27extern void cgroup_init_smp(void); 28extern void cgroup_init_smp(void);
28extern void cgroup_lock(void); 29extern void cgroup_lock(void);
30extern bool cgroup_lock_live_group(struct cgroup *cgrp);
29extern void cgroup_unlock(void); 31extern void cgroup_unlock(void);
30extern void cgroup_fork(struct task_struct *p); 32extern void cgroup_fork(struct task_struct *p);
31extern void cgroup_fork_callbacks(struct task_struct *p); 33extern void cgroup_fork_callbacks(struct task_struct *p);
@@ -205,50 +207,64 @@ struct cftype {
205 * subsystem, followed by a period */ 207 * subsystem, followed by a period */
206 char name[MAX_CFTYPE_NAME]; 208 char name[MAX_CFTYPE_NAME];
207 int private; 209 int private;
208 int (*open) (struct inode *inode, struct file *file); 210
209 ssize_t (*read) (struct cgroup *cgrp, struct cftype *cft, 211 /*
210 struct file *file, 212 * If non-zero, defines the maximum length of string that can
211 char __user *buf, size_t nbytes, loff_t *ppos); 213 * be passed to write_string; defaults to 64
214 */
215 size_t max_write_len;
216
217 int (*open)(struct inode *inode, struct file *file);
218 ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
219 struct file *file,
220 char __user *buf, size_t nbytes, loff_t *ppos);
212 /* 221 /*
213 * read_u64() is a shortcut for the common case of returning a 222 * read_u64() is a shortcut for the common case of returning a
214 * single integer. Use it in place of read() 223 * single integer. Use it in place of read()
215 */ 224 */
216 u64 (*read_u64) (struct cgroup *cgrp, struct cftype *cft); 225 u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft);
217 /* 226 /*
218 * read_s64() is a signed version of read_u64() 227 * read_s64() is a signed version of read_u64()
219 */ 228 */
220 s64 (*read_s64) (struct cgroup *cgrp, struct cftype *cft); 229 s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft);
221 /* 230 /*
222 * read_map() is used for defining a map of key/value 231 * read_map() is used for defining a map of key/value
223 * pairs. It should call cb->fill(cb, key, value) for each 232 * pairs. It should call cb->fill(cb, key, value) for each
224 * entry. The key/value pairs (and their ordering) should not 233 * entry. The key/value pairs (and their ordering) should not
225 * change between reboots. 234 * change between reboots.
226 */ 235 */
227 int (*read_map) (struct cgroup *cont, struct cftype *cft, 236 int (*read_map)(struct cgroup *cont, struct cftype *cft,
228 struct cgroup_map_cb *cb); 237 struct cgroup_map_cb *cb);
229 /* 238 /*
230 * read_seq_string() is used for outputting a simple sequence 239 * read_seq_string() is used for outputting a simple sequence
231 * using seqfile. 240 * using seqfile.
232 */ 241 */
233 int (*read_seq_string) (struct cgroup *cont, struct cftype *cft, 242 int (*read_seq_string)(struct cgroup *cont, struct cftype *cft,
234 struct seq_file *m); 243 struct seq_file *m);
235 244
236 ssize_t (*write) (struct cgroup *cgrp, struct cftype *cft, 245 ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft,
237 struct file *file, 246 struct file *file,
238 const char __user *buf, size_t nbytes, loff_t *ppos); 247 const char __user *buf, size_t nbytes, loff_t *ppos);
239 248
240 /* 249 /*
241 * write_u64() is a shortcut for the common case of accepting 250 * write_u64() is a shortcut for the common case of accepting
242 * a single integer (as parsed by simple_strtoull) from 251 * a single integer (as parsed by simple_strtoull) from
243 * userspace. Use in place of write(); return 0 or error. 252 * userspace. Use in place of write(); return 0 or error.
244 */ 253 */
245 int (*write_u64) (struct cgroup *cgrp, struct cftype *cft, u64 val); 254 int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val);
246 /* 255 /*
247 * write_s64() is a signed version of write_u64() 256 * write_s64() is a signed version of write_u64()
248 */ 257 */
249 int (*write_s64) (struct cgroup *cgrp, struct cftype *cft, s64 val); 258 int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val);
250 259
251 /* 260 /*
261 * write_string() is passed a nul-terminated kernelspace
262 * buffer of maximum length determined by max_write_len.
263 * Returns 0 or -ve error code.
264 */
265 int (*write_string)(struct cgroup *cgrp, struct cftype *cft,
266 const char *buffer);
267 /*
252 * trigger() callback can be used to get some kick from the 268 * trigger() callback can be used to get some kick from the
253 * userspace, when the actual string written is not important 269 * userspace, when the actual string written is not important
254 * at all. The private field can be used to determine the 270 * at all. The private field can be used to determine the
@@ -256,7 +272,7 @@ struct cftype {
256 */ 272 */
257 int (*trigger)(struct cgroup *cgrp, unsigned int event); 273 int (*trigger)(struct cgroup *cgrp, unsigned int event);
258 274
259 int (*release) (struct inode *inode, struct file *file); 275 int (*release)(struct inode *inode, struct file *file);
260}; 276};
261 277
262struct cgroup_scanner { 278struct cgroup_scanner {
@@ -348,7 +364,8 @@ static inline struct cgroup* task_cgroup(struct task_struct *task,
348 return task_subsys_state(task, subsys_id)->cgroup; 364 return task_subsys_state(task, subsys_id)->cgroup;
349} 365}
350 366
351int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss); 367int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss,
368 char *nodename);
352 369
353/* A cgroup_iter should be treated as an opaque object */ 370/* A cgroup_iter should be treated as an opaque object */
354struct cgroup_iter { 371struct cgroup_iter {
diff --git a/include/linux/coda.h b/include/linux/coda.h
index b5cf0780c51a..96c87693800b 100644
--- a/include/linux/coda.h
+++ b/include/linux/coda.h
@@ -199,28 +199,6 @@ typedef u_int32_t vuid_t;
199typedef u_int32_t vgid_t; 199typedef u_int32_t vgid_t;
200#endif /*_VUID_T_ */ 200#endif /*_VUID_T_ */
201 201
202#ifdef CONFIG_CODA_FS_OLD_API
203struct CodaFid {
204 u_int32_t opaque[3];
205};
206
207static __inline__ ino_t coda_f2i(struct CodaFid *fid)
208{
209 if ( ! fid )
210 return 0;
211 if (fid->opaque[1] == 0xfffffffe || fid->opaque[1] == 0xffffffff)
212 return ((fid->opaque[0] << 20) | (fid->opaque[2] & 0xfffff));
213 else
214 return (fid->opaque[2] + (fid->opaque[1]<<10) + (fid->opaque[0]<<20));
215}
216
217struct coda_cred {
218 vuid_t cr_uid, cr_euid, cr_suid, cr_fsuid; /* Real, efftve, set, fs uid*/
219 vgid_t cr_groupid, cr_egid, cr_sgid, cr_fsgid; /* same for groups */
220};
221
222#else /* not defined(CONFIG_CODA_FS_OLD_API) */
223
224struct CodaFid { 202struct CodaFid {
225 u_int32_t opaque[4]; 203 u_int32_t opaque[4];
226}; 204};
@@ -228,8 +206,6 @@ struct CodaFid {
228#define coda_f2i(fid)\ 206#define coda_f2i(fid)\
229 (fid ? (fid->opaque[3] ^ (fid->opaque[2]<<10) ^ (fid->opaque[1]<<20) ^ fid->opaque[0]) : 0) 207 (fid ? (fid->opaque[3] ^ (fid->opaque[2]<<10) ^ (fid->opaque[1]<<20) ^ fid->opaque[0]) : 0)
230 208
231#endif
232
233#ifndef _VENUS_VATTR_T_ 209#ifndef _VENUS_VATTR_T_
234#define _VENUS_VATTR_T_ 210#define _VENUS_VATTR_T_
235/* 211/*
@@ -313,15 +289,7 @@ struct coda_statfs {
313 289
314#define CIOC_KERNEL_VERSION _IOWR('c', 10, size_t) 290#define CIOC_KERNEL_VERSION _IOWR('c', 10, size_t)
315 291
316#if 0
317#define CODA_KERNEL_VERSION 0 /* don't care about kernel version number */
318#define CODA_KERNEL_VERSION 1 /* The old venus 4.6 compatible interface */
319#endif
320#ifdef CONFIG_CODA_FS_OLD_API
321#define CODA_KERNEL_VERSION 2 /* venus_lookup got an extra parameter */
322#else
323#define CODA_KERNEL_VERSION 3 /* 128-bit file identifiers */ 292#define CODA_KERNEL_VERSION 3 /* 128-bit file identifiers */
324#endif
325 293
326/* 294/*
327 * Venus <-> Coda RPC arguments 295 * Venus <-> Coda RPC arguments
@@ -329,16 +297,9 @@ struct coda_statfs {
329struct coda_in_hdr { 297struct coda_in_hdr {
330 u_int32_t opcode; 298 u_int32_t opcode;
331 u_int32_t unique; /* Keep multiple outstanding msgs distinct */ 299 u_int32_t unique; /* Keep multiple outstanding msgs distinct */
332#ifdef CONFIG_CODA_FS_OLD_API
333 u_int16_t pid; /* Common to all */
334 u_int16_t pgid; /* Common to all */
335 u_int16_t sid; /* Common to all */
336 struct coda_cred cred; /* Common to all */
337#else
338 pid_t pid; 300 pid_t pid;
339 pid_t pgid; 301 pid_t pgid;
340 vuid_t uid; 302 vuid_t uid;
341#endif
342}; 303};
343 304
344/* Really important that opcode and unique are 1st two fields! */ 305/* Really important that opcode and unique are 1st two fields! */
@@ -613,11 +574,7 @@ struct coda_vget_out {
613/* CODA_PURGEUSER is a venus->kernel call */ 574/* CODA_PURGEUSER is a venus->kernel call */
614struct coda_purgeuser_out { 575struct coda_purgeuser_out {
615 struct coda_out_hdr oh; 576 struct coda_out_hdr oh;
616#ifdef CONFIG_CODA_FS_OLD_API
617 struct coda_cred cred;
618#else
619 vuid_t uid; 577 vuid_t uid;
620#endif
621}; 578};
622 579
623/* coda_zapfile: */ 580/* coda_zapfile: */
diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h
index e2bf7e5db39a..c4811da1338b 100644
--- a/include/linux/consolemap.h
+++ b/include/linux/consolemap.h
@@ -3,6 +3,9 @@
3 * 3 *
4 * Interface between console.c, selection.c and consolemap.c 4 * Interface between console.c, selection.c and consolemap.c
5 */ 5 */
6#ifndef __LINUX_CONSOLEMAP_H__
7#define __LINUX_CONSOLEMAP_H__
8
6#define LAT1_MAP 0 9#define LAT1_MAP 0
7#define GRAF_MAP 1 10#define GRAF_MAP 1
8#define IBMPC_MAP 2 11#define IBMPC_MAP 2
@@ -10,6 +13,7 @@
10 13
11#include <linux/types.h> 14#include <linux/types.h>
12 15
16#ifdef CONFIG_CONSOLE_TRANSLATIONS
13struct vc_data; 17struct vc_data;
14 18
15extern u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode); 19extern u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode);
@@ -18,3 +22,13 @@ extern int conv_uni_to_pc(struct vc_data *conp, long ucs);
18extern u32 conv_8bit_to_uni(unsigned char c); 22extern u32 conv_8bit_to_uni(unsigned char c);
19extern int conv_uni_to_8bit(u32 uni); 23extern int conv_uni_to_8bit(u32 uni);
20void console_map_init(void); 24void console_map_init(void);
25#else
26#define inverse_translate(conp, glyph, uni) ((uint16_t)glyph)
27#define set_translate(m, vc) ((unsigned short *)NULL)
28#define conv_uni_to_pc(conp, ucs) ((int) (ucs > 0xff ? -1: ucs))
29#define conv_8bit_to_uni(c) ((uint32_t)(c))
30#define conv_uni_to_8bit(c) ((int) ((c) & 0xff))
31#define console_map_init(c) do { ; } while (0)
32#endif /* CONFIG_CONSOLE_TRANSLATIONS */
33
34#endif /* __LINUX_CONSOLEMAP_H__ */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 7464ba3b4333..d7faf8808497 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -69,10 +69,11 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
69#endif 69#endif
70 70
71int cpu_up(unsigned int cpu); 71int cpu_up(unsigned int cpu);
72
73extern void cpu_hotplug_init(void); 72extern void cpu_hotplug_init(void);
73extern void cpu_maps_update_begin(void);
74extern void cpu_maps_update_done(void);
74 75
75#else 76#else /* CONFIG_SMP */
76 77
77static inline int register_cpu_notifier(struct notifier_block *nb) 78static inline int register_cpu_notifier(struct notifier_block *nb)
78{ 79{
@@ -87,10 +88,16 @@ static inline void cpu_hotplug_init(void)
87{ 88{
88} 89}
89 90
91static inline void cpu_maps_update_begin(void)
92{
93}
94
95static inline void cpu_maps_update_done(void)
96{
97}
98
90#endif /* CONFIG_SMP */ 99#endif /* CONFIG_SMP */
91extern struct sysdev_class cpu_sysdev_class; 100extern struct sysdev_class cpu_sysdev_class;
92extern void cpu_maps_update_begin(void);
93extern void cpu_maps_update_done(void);
94 101
95#ifdef CONFIG_HOTPLUG_CPU 102#ifdef CONFIG_HOTPLUG_CPU
96/* Stop CPUs going up and down. */ 103/* Stop CPUs going up and down. */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index c24875bd9c5b..1b5c98e7fef7 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -17,6 +17,20 @@
17 * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. 17 * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c.
18 * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. 18 * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c.
19 * 19 *
20 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
21 * Note: The alternate operations with the suffix "_nr" are used
22 * to limit the range of the loop to nr_cpu_ids instead of
23 * NR_CPUS when NR_CPUS > 64 for performance reasons.
24 * If NR_CPUS is <= 64 then most assembler bitmask
25 * operators execute faster with a constant range, so
26 * the operator will continue to use NR_CPUS.
27 *
28 * Another consideration is that nr_cpu_ids is initialized
29 * to NR_CPUS and isn't lowered until the possible cpus are
30 * discovered (including any disabled cpus). So early uses
31 * will span the entire range of NR_CPUS.
32 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
33 *
20 * The available cpumask operations are: 34 * The available cpumask operations are:
21 * 35 *
22 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask 36 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask
@@ -38,18 +52,60 @@
38 * int cpus_empty(mask) Is mask empty (no bits sets)? 52 * int cpus_empty(mask) Is mask empty (no bits sets)?
39 * int cpus_full(mask) Is mask full (all bits sets)? 53 * int cpus_full(mask) Is mask full (all bits sets)?
40 * int cpus_weight(mask) Hamming weigh - number of set bits 54 * int cpus_weight(mask) Hamming weigh - number of set bits
55 * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS
41 * 56 *
42 * void cpus_shift_right(dst, src, n) Shift right 57 * void cpus_shift_right(dst, src, n) Shift right
43 * void cpus_shift_left(dst, src, n) Shift left 58 * void cpus_shift_left(dst, src, n) Shift left
44 * 59 *
45 * int first_cpu(mask) Number lowest set bit, or NR_CPUS 60 * int first_cpu(mask) Number lowest set bit, or NR_CPUS
46 * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS 61 * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
62 * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
47 * 63 *
48 * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set 64 * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
65 *ifdef CONFIG_HAS_CPUMASK_OF_CPU
66 * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v
67 * cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu]
68 * cpumask_of_cpu_ptr(v, cpu) Combines above two operations
69 *else
70 * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v
71 * cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu)
72 * cpumask_of_cpu_ptr(v, cpu) Combines above two operations
73 *endif
49 * CPU_MASK_ALL Initializer - all bits set 74 * CPU_MASK_ALL Initializer - all bits set
50 * CPU_MASK_NONE Initializer - no bits set 75 * CPU_MASK_NONE Initializer - no bits set
51 * unsigned long *cpus_addr(mask) Array of unsigned long's in mask 76 * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
52 * 77 *
78 * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t
79 * variables, and CPUMASK_PTR provides pointers to each field.
80 *
81 * The structure should be defined something like this:
82 * struct my_cpumasks {
83 * cpumask_t mask1;
84 * cpumask_t mask2;
85 * };
86 *
87 * Usage is then:
88 * CPUMASK_ALLOC(my_cpumasks);
89 * CPUMASK_PTR(mask1, my_cpumasks);
90 * CPUMASK_PTR(mask2, my_cpumasks);
91 *
92 * --- DO NOT reference cpumask_t pointers until this check ---
93 * if (my_cpumasks == NULL)
94 * "kmalloc failed"...
95 *
96 * References are now pointers to the cpumask_t variables (*mask1, ...)
97 *
98 *if NR_CPUS > BITS_PER_LONG
99 * CPUMASK_ALLOC(m) Declares and allocates struct m *m =
100 * kmalloc(sizeof(*m), GFP_KERNEL)
101 * CPUMASK_FREE(m) Macro for kfree(m)
102 *else
103 * CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m
104 * CPUMASK_FREE(m) Nop
105 *endif
106 * CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v)
107 * ------------------------------------------------------------------------
108 *
53 * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing 109 * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
54 * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask 110 * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask
55 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing 111 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
@@ -59,7 +115,8 @@
59 * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap 115 * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap
60 * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz 116 * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz
61 * 117 *
62 * for_each_cpu_mask(cpu, mask) for-loop cpu over mask 118 * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS
119 * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids
63 * 120 *
64 * int num_online_cpus() Number of online CPUs 121 * int num_online_cpus() Number of online CPUs
65 * int num_possible_cpus() Number of all possible CPUs 122 * int num_possible_cpus() Number of all possible CPUs
@@ -216,23 +273,19 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
216 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); 273 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
217} 274}
218 275
219#ifdef CONFIG_SMP
220int __first_cpu(const cpumask_t *srcp);
221#define first_cpu(src) __first_cpu(&(src))
222int __next_cpu(int n, const cpumask_t *srcp);
223#define next_cpu(n, src) __next_cpu((n), &(src))
224#else
225#define first_cpu(src) ({ (void)(src); 0; })
226#define next_cpu(n, src) ({ (void)(src); 1; })
227#endif
228 276
229#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP 277#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
230extern cpumask_t *cpumask_of_cpu_map; 278extern cpumask_t *cpumask_of_cpu_map;
231#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) 279#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu])
232 280#define cpumask_of_cpu_ptr(v, cpu) \
281 const cpumask_t *v = &cpumask_of_cpu(cpu)
282#define cpumask_of_cpu_ptr_declare(v) \
283 const cpumask_t *v
284#define cpumask_of_cpu_ptr_next(v, cpu) \
285 v = &cpumask_of_cpu(cpu)
233#else 286#else
234#define cpumask_of_cpu(cpu) \ 287#define cpumask_of_cpu(cpu) \
235(*({ \ 288({ \
236 typeof(_unused_cpumask_arg_) m; \ 289 typeof(_unused_cpumask_arg_) m; \
237 if (sizeof(m) == sizeof(unsigned long)) { \ 290 if (sizeof(m) == sizeof(unsigned long)) { \
238 m.bits[0] = 1UL<<(cpu); \ 291 m.bits[0] = 1UL<<(cpu); \
@@ -240,8 +293,16 @@ extern cpumask_t *cpumask_of_cpu_map;
240 cpus_clear(m); \ 293 cpus_clear(m); \
241 cpu_set((cpu), m); \ 294 cpu_set((cpu), m); \
242 } \ 295 } \
243 &m; \ 296 m; \
244})) 297})
298#define cpumask_of_cpu_ptr(v, cpu) \
299 cpumask_t _##v = cpumask_of_cpu(cpu); \
300 const cpumask_t *v = &_##v
301#define cpumask_of_cpu_ptr_declare(v) \
302 cpumask_t _##v; \
303 const cpumask_t *v = &_##v
304#define cpumask_of_cpu_ptr_next(v, cpu) \
305 _##v = cpumask_of_cpu(cpu)
245#endif 306#endif
246 307
247#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) 308#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
@@ -281,6 +342,15 @@ extern cpumask_t cpu_mask_all;
281 342
282#define cpus_addr(src) ((src).bits) 343#define cpus_addr(src) ((src).bits)
283 344
345#if NR_CPUS > BITS_PER_LONG
346#define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL)
347#define CPUMASK_FREE(m) kfree(m)
348#else
349#define CPUMASK_ALLOC(m) struct m _m, *m = &_m
350#define CPUMASK_FREE(m)
351#endif
352#define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v)
353
284#define cpumask_scnprintf(buf, len, src) \ 354#define cpumask_scnprintf(buf, len, src) \
285 __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) 355 __cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
286static inline int __cpumask_scnprintf(char *buf, int len, 356static inline int __cpumask_scnprintf(char *buf, int len,
@@ -343,29 +413,59 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
343 bitmap_fold(dstp->bits, origp->bits, sz, nbits); 413 bitmap_fold(dstp->bits, origp->bits, sz, nbits);
344} 414}
345 415
346#if NR_CPUS > 1 416#if NR_CPUS == 1
347#define for_each_cpu_mask(cpu, mask) \ 417
348 for ((cpu) = first_cpu(mask); \ 418#define nr_cpu_ids 1
349 (cpu) < NR_CPUS; \ 419#define first_cpu(src) ({ (void)(src); 0; })
350 (cpu) = next_cpu((cpu), (mask))) 420#define next_cpu(n, src) ({ (void)(src); 1; })
351#else /* NR_CPUS == 1 */ 421#define any_online_cpu(mask) 0
352#define for_each_cpu_mask(cpu, mask) \ 422#define for_each_cpu_mask(cpu, mask) \
353 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 423 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
354#endif /* NR_CPUS */ 424
425#else /* NR_CPUS > 1 */
426
427extern int nr_cpu_ids;
428int __first_cpu(const cpumask_t *srcp);
429int __next_cpu(int n, const cpumask_t *srcp);
430int __any_online_cpu(const cpumask_t *mask);
431
432#define first_cpu(src) __first_cpu(&(src))
433#define next_cpu(n, src) __next_cpu((n), &(src))
434#define any_online_cpu(mask) __any_online_cpu(&(mask))
435#define for_each_cpu_mask(cpu, mask) \
436 for ((cpu) = -1; \
437 (cpu) = next_cpu((cpu), (mask)), \
438 (cpu) < NR_CPUS; )
439#endif
440
441#if NR_CPUS <= 64
355 442
356#define next_cpu_nr(n, src) next_cpu(n, src) 443#define next_cpu_nr(n, src) next_cpu(n, src)
357#define cpus_weight_nr(cpumask) cpus_weight(cpumask) 444#define cpus_weight_nr(cpumask) cpus_weight(cpumask)
358#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) 445#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
359 446
447#else /* NR_CPUS > 64 */
448
449int __next_cpu_nr(int n, const cpumask_t *srcp);
450#define next_cpu_nr(n, src) __next_cpu_nr((n), &(src))
451#define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids)
452#define for_each_cpu_mask_nr(cpu, mask) \
453 for ((cpu) = -1; \
454 (cpu) = next_cpu_nr((cpu), (mask)), \
455 (cpu) < nr_cpu_ids; )
456
457#endif /* NR_CPUS > 64 */
458
360/* 459/*
361 * The following particular system cpumasks and operations manage 460 * The following particular system cpumasks and operations manage
362 * possible, present and online cpus. Each of them is a fixed size 461 * possible, present, active and online cpus. Each of them is a fixed size
363 * bitmap of size NR_CPUS. 462 * bitmap of size NR_CPUS.
364 * 463 *
365 * #ifdef CONFIG_HOTPLUG_CPU 464 * #ifdef CONFIG_HOTPLUG_CPU
366 * cpu_possible_map - has bit 'cpu' set iff cpu is populatable 465 * cpu_possible_map - has bit 'cpu' set iff cpu is populatable
367 * cpu_present_map - has bit 'cpu' set iff cpu is populated 466 * cpu_present_map - has bit 'cpu' set iff cpu is populated
368 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler 467 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
468 * cpu_active_map - has bit 'cpu' set iff cpu available to migration
369 * #else 469 * #else
370 * cpu_possible_map - has bit 'cpu' set iff cpu is populated 470 * cpu_possible_map - has bit 'cpu' set iff cpu is populated
371 * cpu_present_map - copy of cpu_possible_map 471 * cpu_present_map - copy of cpu_possible_map
@@ -416,14 +516,16 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
416extern cpumask_t cpu_possible_map; 516extern cpumask_t cpu_possible_map;
417extern cpumask_t cpu_online_map; 517extern cpumask_t cpu_online_map;
418extern cpumask_t cpu_present_map; 518extern cpumask_t cpu_present_map;
519extern cpumask_t cpu_active_map;
419 520
420#if NR_CPUS > 1 521#if NR_CPUS > 1
421#define num_online_cpus() cpus_weight(cpu_online_map) 522#define num_online_cpus() cpus_weight_nr(cpu_online_map)
422#define num_possible_cpus() cpus_weight(cpu_possible_map) 523#define num_possible_cpus() cpus_weight_nr(cpu_possible_map)
423#define num_present_cpus() cpus_weight(cpu_present_map) 524#define num_present_cpus() cpus_weight_nr(cpu_present_map)
424#define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) 525#define cpu_online(cpu) cpu_isset((cpu), cpu_online_map)
425#define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) 526#define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map)
426#define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) 527#define cpu_present(cpu) cpu_isset((cpu), cpu_present_map)
528#define cpu_active(cpu) cpu_isset((cpu), cpu_active_map)
427#else 529#else
428#define num_online_cpus() 1 530#define num_online_cpus() 1
429#define num_possible_cpus() 1 531#define num_possible_cpus() 1
@@ -431,21 +533,13 @@ extern cpumask_t cpu_present_map;
431#define cpu_online(cpu) ((cpu) == 0) 533#define cpu_online(cpu) ((cpu) == 0)
432#define cpu_possible(cpu) ((cpu) == 0) 534#define cpu_possible(cpu) ((cpu) == 0)
433#define cpu_present(cpu) ((cpu) == 0) 535#define cpu_present(cpu) ((cpu) == 0)
536#define cpu_active(cpu) ((cpu) == 0)
434#endif 537#endif
435 538
436#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) 539#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
437 540
438#ifdef CONFIG_SMP 541#define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map)
439extern int nr_cpu_ids; 542#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map)
440#define any_online_cpu(mask) __any_online_cpu(&(mask)) 543#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map)
441int __any_online_cpu(const cpumask_t *mask);
442#else
443#define nr_cpu_ids 1
444#define any_online_cpu(mask) 0
445#endif
446
447#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
448#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map)
449#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
450 544
451#endif /* __LINUX_CPUMASK_H */ 545#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 038578362b47..e8f450c499b0 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -78,6 +78,8 @@ extern void cpuset_track_online_nodes(void);
78 78
79extern int current_cpuset_is_being_rebound(void); 79extern int current_cpuset_is_being_rebound(void);
80 80
81extern void rebuild_sched_domains(void);
82
81#else /* !CONFIG_CPUSETS */ 83#else /* !CONFIG_CPUSETS */
82 84
83static inline int cpuset_init_early(void) { return 0; } 85static inline int cpuset_init_early(void) { return 0; }
@@ -156,6 +158,11 @@ static inline int current_cpuset_is_being_rebound(void)
156 return 0; 158 return 0;
157} 159}
158 160
161static inline void rebuild_sched_domains(void)
162{
163 partition_sched_domains(0, NULL, NULL);
164}
165
159#endif /* !CONFIG_CPUSETS */ 166#endif /* !CONFIG_CPUSETS */
160 167
161#endif /* _LINUX_CPUSET_H */ 168#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 22c7ac5cd80c..6cd39a927e1f 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -22,5 +22,13 @@ extern struct proc_dir_entry *proc_vmcore;
22 22
23#define vmcore_elf_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x)) 23#define vmcore_elf_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
24 24
25static inline int is_kdump_kernel(void)
26{
27 return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0;
28}
29#else /* !CONFIG_CRASH_DUMP */
30static inline int is_kdump_kernel(void) { return 0; }
25#endif /* CONFIG_CRASH_DUMP */ 31#endif /* CONFIG_CRASH_DUMP */
32
33extern unsigned long saved_max_pfn;
26#endif /* LINUX_CRASHDUMP_H */ 34#endif /* LINUX_CRASHDUMP_H */
diff --git a/include/linux/dca.h b/include/linux/dca.h
index af61cd1f37e9..b00a753eda53 100644
--- a/include/linux/dca.h
+++ b/include/linux/dca.h
@@ -10,6 +10,7 @@ void dca_unregister_notify(struct notifier_block *nb);
10#define DCA_PROVIDER_REMOVE 0x0002 10#define DCA_PROVIDER_REMOVE 0x0002
11 11
12struct dca_provider { 12struct dca_provider {
13 struct list_head node;
13 struct dca_ops *ops; 14 struct dca_ops *ops;
14 struct device *cd; 15 struct device *cd;
15 int id; 16 int id;
@@ -18,7 +19,9 @@ struct dca_provider {
18struct dca_ops { 19struct dca_ops {
19 int (*add_requester) (struct dca_provider *, struct device *); 20 int (*add_requester) (struct dca_provider *, struct device *);
20 int (*remove_requester) (struct dca_provider *, struct device *); 21 int (*remove_requester) (struct dca_provider *, struct device *);
21 u8 (*get_tag) (struct dca_provider *, int cpu); 22 u8 (*get_tag) (struct dca_provider *, struct device *,
23 int cpu);
24 int (*dev_managed) (struct dca_provider *, struct device *);
22}; 25};
23 26
24struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); 27struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size);
@@ -32,9 +35,11 @@ static inline void *dca_priv(struct dca_provider *dca)
32} 35}
33 36
34/* Requester API */ 37/* Requester API */
38#define DCA_GET_TAG_TWO_ARGS
35int dca_add_requester(struct device *dev); 39int dca_add_requester(struct device *dev);
36int dca_remove_requester(struct device *dev); 40int dca_remove_requester(struct device *dev);
37u8 dca_get_tag(int cpu); 41u8 dca_get_tag(int cpu);
42u8 dca3_get_tag(struct device *dev, int cpu);
38 43
39/* internal stuff */ 44/* internal stuff */
40int __init dca_sysfs_init(void); 45int __init dca_sysfs_init(void);
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index ab94bc083558..f352f06fa063 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -39,6 +39,8 @@ extern void __delayacct_blkio_start(void);
39extern void __delayacct_blkio_end(void); 39extern void __delayacct_blkio_end(void);
40extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); 40extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
41extern __u64 __delayacct_blkio_ticks(struct task_struct *); 41extern __u64 __delayacct_blkio_ticks(struct task_struct *);
42extern void __delayacct_freepages_start(void);
43extern void __delayacct_freepages_end(void);
42 44
43static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) 45static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
44{ 46{
@@ -107,6 +109,18 @@ static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
107 return 0; 109 return 0;
108} 110}
109 111
112static inline void delayacct_freepages_start(void)
113{
114 if (current->delays)
115 __delayacct_freepages_start();
116}
117
118static inline void delayacct_freepages_end(void)
119{
120 if (current->delays)
121 __delayacct_freepages_end();
122}
123
110#else 124#else
111static inline void delayacct_set_flag(int flag) 125static inline void delayacct_set_flag(int flag)
112{} 126{}
@@ -129,6 +143,11 @@ static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
129{ return 0; } 143{ return 0; }
130static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) 144static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
131{ return 0; } 145{ return 0; }
146static inline void delayacct_freepages_start(void)
147{}
148static inline void delayacct_freepages_end(void)
149{}
150
132#endif /* CONFIG_TASK_DELAY_ACCT */ 151#endif /* CONFIG_TASK_DELAY_ACCT */
133 152
134#endif 153#endif
diff --git a/include/linux/dirent.h b/include/linux/dirent.h
index 5d6023b87800..f072fb8d10a3 100644
--- a/include/linux/dirent.h
+++ b/include/linux/dirent.h
@@ -1,23 +1,6 @@
1#ifndef _LINUX_DIRENT_H 1#ifndef _LINUX_DIRENT_H
2#define _LINUX_DIRENT_H 2#define _LINUX_DIRENT_H
3 3
4struct dirent {
5 long d_ino;
6 __kernel_off_t d_off;
7 unsigned short d_reclen;
8 char d_name[256]; /* We must not include limits.h! */
9};
10
11struct dirent64 {
12 __u64 d_ino;
13 __s64 d_off;
14 unsigned short d_reclen;
15 unsigned char d_type;
16 char d_name[256];
17};
18
19#ifdef __KERNEL__
20
21struct linux_dirent64 { 4struct linux_dirent64 {
22 u64 d_ino; 5 u64 d_ino;
23 s64 d_off; 6 s64 d_off;
@@ -26,7 +9,4 @@ struct linux_dirent64 {
26 char d_name[0]; 9 char d_name[0];
27}; 10};
28 11
29#endif /* __KERNEL__ */
30
31
32#endif 12#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index d08a5c5eb928..adb0b084eb5a 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -89,10 +89,23 @@ enum dma_transaction_type {
89 DMA_MEMSET, 89 DMA_MEMSET,
90 DMA_MEMCPY_CRC32C, 90 DMA_MEMCPY_CRC32C,
91 DMA_INTERRUPT, 91 DMA_INTERRUPT,
92 DMA_SLAVE,
92}; 93};
93 94
94/* last transaction type for creation of the capabilities mask */ 95/* last transaction type for creation of the capabilities mask */
95#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) 96#define DMA_TX_TYPE_END (DMA_SLAVE + 1)
97
98/**
99 * enum dma_slave_width - DMA slave register access width.
100 * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
101 * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
102 * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
103 */
104enum dma_slave_width {
105 DMA_SLAVE_WIDTH_8BIT,
106 DMA_SLAVE_WIDTH_16BIT,
107 DMA_SLAVE_WIDTH_32BIT,
108};
96 109
97/** 110/**
98 * enum dma_ctrl_flags - DMA flags to augment operation preparation, 111 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
@@ -102,10 +115,14 @@ enum dma_transaction_type {
102 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client 115 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
103 * acknowledges receipt, i.e. has has a chance to establish any 116 * acknowledges receipt, i.e. has has a chance to establish any
104 * dependency chains 117 * dependency chains
118 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
119 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
105 */ 120 */
106enum dma_ctrl_flags { 121enum dma_ctrl_flags {
107 DMA_PREP_INTERRUPT = (1 << 0), 122 DMA_PREP_INTERRUPT = (1 << 0),
108 DMA_CTRL_ACK = (1 << 1), 123 DMA_CTRL_ACK = (1 << 1),
124 DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
125 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
109}; 126};
110 127
111/** 128/**
@@ -115,6 +132,32 @@ enum dma_ctrl_flags {
115typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; 132typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
116 133
117/** 134/**
135 * struct dma_slave - Information about a DMA slave
136 * @dev: device acting as DMA slave
137 * @dma_dev: required DMA master device. If non-NULL, the client can not be
138 * bound to other masters than this.
139 * @tx_reg: physical address of data register used for
140 * memory-to-peripheral transfers
141 * @rx_reg: physical address of data register used for
142 * peripheral-to-memory transfers
143 * @reg_width: peripheral register width
144 *
145 * If dma_dev is non-NULL, the client can not be bound to other DMA
146 * masters than the one corresponding to this device. The DMA master
147 * driver may use this to determine if there is controller-specific
148 * data wrapped around this struct. Drivers of platform code that sets
149 * the dma_dev field must therefore make sure to use an appropriate
150 * controller-specific dma slave structure wrapping this struct.
151 */
152struct dma_slave {
153 struct device *dev;
154 struct device *dma_dev;
155 dma_addr_t tx_reg;
156 dma_addr_t rx_reg;
157 enum dma_slave_width reg_width;
158};
159
160/**
118 * struct dma_chan_percpu - the per-CPU part of struct dma_chan 161 * struct dma_chan_percpu - the per-CPU part of struct dma_chan
119 * @refcount: local_t used for open-coded "bigref" counting 162 * @refcount: local_t used for open-coded "bigref" counting
120 * @memcpy_count: transaction counter 163 * @memcpy_count: transaction counter
@@ -139,6 +182,7 @@ struct dma_chan_percpu {
139 * @rcu: the DMA channel's RCU head 182 * @rcu: the DMA channel's RCU head
140 * @device_node: used to add this to the device chan list 183 * @device_node: used to add this to the device chan list
141 * @local: per-cpu pointer to a struct dma_chan_percpu 184 * @local: per-cpu pointer to a struct dma_chan_percpu
185 * @client-count: how many clients are using this channel
142 */ 186 */
143struct dma_chan { 187struct dma_chan {
144 struct dma_device *device; 188 struct dma_device *device;
@@ -154,6 +198,7 @@ struct dma_chan {
154 198
155 struct list_head device_node; 199 struct list_head device_node;
156 struct dma_chan_percpu *local; 200 struct dma_chan_percpu *local;
201 int client_count;
157}; 202};
158 203
159#define to_dma_chan(p) container_of(p, struct dma_chan, dev) 204#define to_dma_chan(p) container_of(p, struct dma_chan, dev)
@@ -202,11 +247,14 @@ typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
202 * @event_callback: func ptr to call when something happens 247 * @event_callback: func ptr to call when something happens
203 * @cap_mask: only return channels that satisfy the requested capabilities 248 * @cap_mask: only return channels that satisfy the requested capabilities
204 * a value of zero corresponds to any capability 249 * a value of zero corresponds to any capability
250 * @slave: data for preparing slave transfer. Must be non-NULL iff the
251 * DMA_SLAVE capability is requested.
205 * @global_node: list_head for global dma_client_list 252 * @global_node: list_head for global dma_client_list
206 */ 253 */
207struct dma_client { 254struct dma_client {
208 dma_event_callback event_callback; 255 dma_event_callback event_callback;
209 dma_cap_mask_t cap_mask; 256 dma_cap_mask_t cap_mask;
257 struct dma_slave *slave;
210 struct list_head global_node; 258 struct list_head global_node;
211}; 259};
212 260
@@ -263,6 +311,8 @@ struct dma_async_tx_descriptor {
263 * @device_prep_dma_zero_sum: prepares a zero_sum operation 311 * @device_prep_dma_zero_sum: prepares a zero_sum operation
264 * @device_prep_dma_memset: prepares a memset operation 312 * @device_prep_dma_memset: prepares a memset operation
265 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 313 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
314 * @device_prep_slave_sg: prepares a slave dma operation
315 * @device_terminate_all: terminate all pending operations
266 * @device_issue_pending: push pending transactions to hardware 316 * @device_issue_pending: push pending transactions to hardware
267 */ 317 */
268struct dma_device { 318struct dma_device {
@@ -279,7 +329,8 @@ struct dma_device {
279 int dev_id; 329 int dev_id;
280 struct device *dev; 330 struct device *dev;
281 331
282 int (*device_alloc_chan_resources)(struct dma_chan *chan); 332 int (*device_alloc_chan_resources)(struct dma_chan *chan,
333 struct dma_client *client);
283 void (*device_free_chan_resources)(struct dma_chan *chan); 334 void (*device_free_chan_resources)(struct dma_chan *chan);
284 335
285 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( 336 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
@@ -297,6 +348,12 @@ struct dma_device {
297 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 348 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
298 struct dma_chan *chan, unsigned long flags); 349 struct dma_chan *chan, unsigned long flags);
299 350
351 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
352 struct dma_chan *chan, struct scatterlist *sgl,
353 unsigned int sg_len, enum dma_data_direction direction,
354 unsigned long flags);
355 void (*device_terminate_all)(struct dma_chan *chan);
356
300 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, 357 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
301 dma_cookie_t cookie, dma_cookie_t *last, 358 dma_cookie_t cookie, dma_cookie_t *last,
302 dma_cookie_t *used); 359 dma_cookie_t *used);
@@ -318,16 +375,14 @@ dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
318void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 375void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
319 struct dma_chan *chan); 376 struct dma_chan *chan);
320 377
321static inline void 378static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
322async_tx_ack(struct dma_async_tx_descriptor *tx)
323{ 379{
324 tx->flags |= DMA_CTRL_ACK; 380 tx->flags |= DMA_CTRL_ACK;
325} 381}
326 382
327static inline int 383static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
328async_tx_test_ack(struct dma_async_tx_descriptor *tx)
329{ 384{
330 return tx->flags & DMA_CTRL_ACK; 385 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
331} 386}
332 387
333#define first_dma_cap(mask) __first_dma_cap(&(mask)) 388#define first_dma_cap(mask) __first_dma_cap(&(mask))
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
new file mode 100644
index 000000000000..04d217b442bf
--- /dev/null
+++ b/include/linux/dw_dmac.h
@@ -0,0 +1,62 @@
1/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3 * AVR32 systems.)
4 *
5 * Copyright (C) 2007 Atmel Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef DW_DMAC_H
12#define DW_DMAC_H
13
14#include <linux/dmaengine.h>
15
16/**
17 * struct dw_dma_platform_data - Controller configuration parameters
18 * @nr_channels: Number of channels supported by hardware (max 8)
19 */
20struct dw_dma_platform_data {
21 unsigned int nr_channels;
22};
23
24/**
25 * struct dw_dma_slave - Controller-specific information about a slave
26 * @slave: Generic information about the slave
27 * @ctl_lo: Platform-specific initializer for the CTL_LO register
28 * @cfg_hi: Platform-specific initializer for the CFG_HI register
29 * @cfg_lo: Platform-specific initializer for the CFG_LO register
30 */
31struct dw_dma_slave {
32 struct dma_slave slave;
33 u32 cfg_hi;
34 u32 cfg_lo;
35};
36
37/* Platform-configurable bits in CFG_HI */
38#define DWC_CFGH_FCMODE (1 << 0)
39#define DWC_CFGH_FIFO_MODE (1 << 1)
40#define DWC_CFGH_PROTCTL(x) ((x) << 2)
41#define DWC_CFGH_SRC_PER(x) ((x) << 7)
42#define DWC_CFGH_DST_PER(x) ((x) << 11)
43
44/* Platform-configurable bits in CFG_LO */
45#define DWC_CFGL_PRIO(x) ((x) << 5) /* priority */
46#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */
47#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12)
48#define DWC_CFGL_LOCK_CH_XACT (2 << 12)
49#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */
50#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14)
51#define DWC_CFGL_LOCK_BUS_XACT (2 << 14)
52#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */
53#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */
54#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
55#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
56
57static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave)
58{
59 return container_of(slave, struct dw_dma_slave, slave);
60}
61
62#endif /* DW_DMAC_H */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index a701399b7fed..a667637b54e3 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -10,6 +10,13 @@
10 10
11#ifdef CONFIG_EVENTFD 11#ifdef CONFIG_EVENTFD
12 12
13/* For O_CLOEXEC and O_NONBLOCK */
14#include <linux/fcntl.h>
15
16/* Flags for eventfd2. */
17#define EFD_CLOEXEC O_CLOEXEC
18#define EFD_NONBLOCK O_NONBLOCK
19
13struct file *eventfd_fget(int fd); 20struct file *eventfd_fget(int fd);
14int eventfd_signal(struct file *file, int n); 21int eventfd_signal(struct file *file, int n);
15 22
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index cf79853967ff..f1e1d3c47125 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -14,8 +14,12 @@
14#ifndef _LINUX_EVENTPOLL_H 14#ifndef _LINUX_EVENTPOLL_H
15#define _LINUX_EVENTPOLL_H 15#define _LINUX_EVENTPOLL_H
16 16
17/* For O_CLOEXEC */
18#include <linux/fcntl.h>
17#include <linux/types.h> 19#include <linux/types.h>
18 20
21/* Flags for epoll_create1. */
22#define EPOLL_CLOEXEC O_CLOEXEC
19 23
20/* Valid opcodes to issue to sys_epoll_ctl() */ 24/* Valid opcodes to issue to sys_epoll_ctl() */
21#define EPOLL_CTL_ADD 1 25#define EPOLL_CTL_ADD 1
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index 84cec2aa9f1e..2efe7b863cff 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -284,8 +284,8 @@ struct ext2_inode {
284 284
285#ifdef __hurd__ 285#ifdef __hurd__
286#define i_translator osd1.hurd1.h_i_translator 286#define i_translator osd1.hurd1.h_i_translator
287#define i_frag osd2.hurd2.h_i_frag; 287#define i_frag osd2.hurd2.h_i_frag
288#define i_fsize osd2.hurd2.h_i_fsize; 288#define i_fsize osd2.hurd2.h_i_fsize
289#define i_uid_high osd2.hurd2.h_i_uid_high 289#define i_uid_high osd2.hurd2.h_i_uid_high
290#define i_gid_high osd2.hurd2.h_i_gid_high 290#define i_gid_high osd2.hurd2.h_i_gid_high
291#define i_author osd2.hurd2.h_i_author 291#define i_author osd2.hurd2.h_i_author
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 36c540396377..80171ee89a22 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -832,6 +832,7 @@ extern void ext3_discard_reservation (struct inode *);
832extern void ext3_dirty_inode(struct inode *); 832extern void ext3_dirty_inode(struct inode *);
833extern int ext3_change_inode_journal_flag(struct inode *, int); 833extern int ext3_change_inode_journal_flag(struct inode *, int);
834extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *); 834extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *);
835extern int ext3_can_truncate(struct inode *inode);
835extern void ext3_truncate (struct inode *); 836extern void ext3_truncate (struct inode *);
836extern void ext3_set_inode_flags(struct inode *); 837extern void ext3_set_inode_flags(struct inode *);
837extern void ext3_get_inode_flags(struct ext3_inode_info *); 838extern void ext3_get_inode_flags(struct ext3_inode_info *);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 72295b099228..3b8870e32afd 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -8,7 +8,6 @@ struct dentry;
8 8
9/* Definitions of frame buffers */ 9/* Definitions of frame buffers */
10 10
11#define FB_MAJOR 29
12#define FB_MAX 32 /* sufficient for now */ 11#define FB_MAX 32 /* sufficient for now */
13 12
14/* ioctls 13/* ioctls
@@ -120,6 +119,10 @@ struct dentry;
120#define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */ 119#define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */
121#define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */ 120#define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */
122#define FB_ACCEL_OMAP1610 49 /* TI OMAP16xx */ 121#define FB_ACCEL_OMAP1610 49 /* TI OMAP16xx */
122#define FB_ACCEL_TRIDENT_TGUI 50 /* Trident TGUI */
123#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */
124#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */
125#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */
123#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ 126#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */
124#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ 127#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */
125#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ 128#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */
diff --git a/include/linux/fd1772.h b/include/linux/fd1772.h
deleted file mode 100644
index 871d6e4c677e..000000000000
--- a/include/linux/fd1772.h
+++ /dev/null
@@ -1,80 +0,0 @@
1#ifndef _LINUX_FD1772REG_H
2#define _LINUX_FD1772REG_H
3
4/*
5** WD1772 stuff - originally from the M68K Linux
6 * Modified for Archimedes by Dave Gilbert (gilbertd@cs.man.ac.uk)
7 */
8
9/* register codes */
10
11#define FDC1772SELREG_STP (0x80) /* command/status register */
12#define FDC1772SELREG_TRA (0x82) /* track register */
13#define FDC1772SELREG_SEC (0x84) /* sector register */
14#define FDC1772SELREG_DTA (0x86) /* data register */
15
16/* register names for FDC1772_READ/WRITE macros */
17
18#define FDC1772REG_CMD 0
19#define FDC1772REG_STATUS 0
20#define FDC1772REG_TRACK 2
21#define FDC1772REG_SECTOR 4
22#define FDC1772REG_DATA 6
23
24/* command opcodes */
25
26#define FDC1772CMD_RESTORE (0x00) /* - */
27#define FDC1772CMD_SEEK (0x10) /* | */
28#define FDC1772CMD_STEP (0x20) /* | TYP 1 Commands */
29#define FDC1772CMD_STIN (0x40) /* | */
30#define FDC1772CMD_STOT (0x60) /* - */
31#define FDC1772CMD_RDSEC (0x80) /* - TYP 2 Commands */
32#define FDC1772CMD_WRSEC (0xa0) /* - " */
33#define FDC1772CMD_RDADR (0xc0) /* - */
34#define FDC1772CMD_RDTRA (0xe0) /* | TYP 3 Commands */
35#define FDC1772CMD_WRTRA (0xf0) /* - */
36#define FDC1772CMD_FORCI (0xd0) /* - TYP 4 Command */
37
38/* command modifier bits */
39
40#define FDC1772CMDADD_SR6 (0x00) /* step rate settings */
41#define FDC1772CMDADD_SR12 (0x01)
42#define FDC1772CMDADD_SR2 (0x02)
43#define FDC1772CMDADD_SR3 (0x03)
44#define FDC1772CMDADD_V (0x04) /* verify */
45#define FDC1772CMDADD_H (0x08) /* wait for spin-up */
46#define FDC1772CMDADD_U (0x10) /* update track register */
47#define FDC1772CMDADD_M (0x10) /* multiple sector access */
48#define FDC1772CMDADD_E (0x04) /* head settling flag */
49#define FDC1772CMDADD_P (0x02) /* precompensation */
50#define FDC1772CMDADD_A0 (0x01) /* DAM flag */
51
52/* status register bits */
53
54#define FDC1772STAT_MOTORON (0x80) /* motor on */
55#define FDC1772STAT_WPROT (0x40) /* write protected (FDC1772CMD_WR*) */
56#define FDC1772STAT_SPINUP (0x20) /* motor speed stable (Type I) */
57#define FDC1772STAT_DELDAM (0x20) /* sector has deleted DAM (Type II+III) */
58#define FDC1772STAT_RECNF (0x10) /* record not found */
59#define FDC1772STAT_CRC (0x08) /* CRC error */
60#define FDC1772STAT_TR00 (0x04) /* Track 00 flag (Type I) */
61#define FDC1772STAT_LOST (0x04) /* Lost Data (Type II+III) */
62#define FDC1772STAT_IDX (0x02) /* Index status (Type I) */
63#define FDC1772STAT_DRQ (0x02) /* DRQ status (Type II+III) */
64#define FDC1772STAT_BUSY (0x01) /* FDC1772 is busy */
65
66
67/* PSG Port A Bit Nr 0 .. Side Sel .. 0 -> Side 1 1 -> Side 2 */
68#define DSKSIDE (0x01)
69
70#define DSKDRVNONE (0x06)
71#define DSKDRV0 (0x02)
72#define DSKDRV1 (0x04)
73
74/* step rates */
75#define FDC1772STEP_6 0x00
76#define FDC1772STEP_12 0x01
77#define FDC1772STEP_2 0x02
78#define FDC1772STEP_3 0x03
79
80#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9c2ac5c0ef5c..49d8eb7a71be 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -886,6 +886,12 @@ static inline int file_check_writeable(struct file *filp)
886#define FL_SLEEP 128 /* A blocking lock */ 886#define FL_SLEEP 128 /* A blocking lock */
887 887
888/* 888/*
889 * Special return value from posix_lock_file() and vfs_lock_file() for
890 * asynchronous locking.
891 */
892#define FILE_LOCK_DEFERRED 1
893
894/*
889 * The POSIX file lock owner is determined by 895 * The POSIX file lock owner is determined by
890 * the "struct files_struct" in the thread group 896 * the "struct files_struct" in the thread group
891 * (or NULL for no owner - BSD locks). 897 * (or NULL for no owner - BSD locks).
@@ -1025,6 +1031,7 @@ extern int send_sigurg(struct fown_struct *fown);
1025extern struct list_head super_blocks; 1031extern struct list_head super_blocks;
1026extern spinlock_t sb_lock; 1032extern spinlock_t sb_lock;
1027 1033
1034#define sb_entry(list) list_entry((list), struct super_block, s_list)
1028#define S_BIAS (1<<30) 1035#define S_BIAS (1<<30)
1029struct super_block { 1036struct super_block {
1030 struct list_head s_list; /* Keep this first */ 1037 struct list_head s_list; /* Keep this first */
@@ -1058,6 +1065,9 @@ struct super_block {
1058 struct list_head s_more_io; /* parked for more writeback */ 1065 struct list_head s_more_io; /* parked for more writeback */
1059 struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ 1066 struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */
1060 struct list_head s_files; 1067 struct list_head s_files;
1068 /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
1069 struct list_head s_dentry_lru; /* unused dentry lru */
1070 int s_nr_dentry_unused; /* # of dentry on lru */
1061 1071
1062 struct block_device *s_bdev; 1072 struct block_device *s_bdev;
1063 struct mtd_info *s_mtd; 1073 struct mtd_info *s_mtd;
@@ -1773,8 +1783,9 @@ static inline void allow_write_access(struct file *file)
1773 atomic_inc(&file->f_path.dentry->d_inode->i_writecount); 1783 atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
1774} 1784}
1775extern int do_pipe(int *); 1785extern int do_pipe(int *);
1776extern struct file *create_read_pipe(struct file *f); 1786extern int do_pipe_flags(int *, int);
1777extern struct file *create_write_pipe(void); 1787extern struct file *create_read_pipe(struct file *f, int flags);
1788extern struct file *create_write_pipe(int flags);
1778extern void free_write_pipe(struct file *); 1789extern void free_write_pipe(struct file *);
1779 1790
1780extern struct file *do_filp_open(int dfd, const char *pathname, 1791extern struct file *do_filp_open(int dfd, const char *pathname,
@@ -2006,8 +2017,6 @@ extern void simple_release_fs(struct vfsmount **mount, int *count);
2006 2017
2007extern ssize_t simple_read_from_buffer(void __user *to, size_t count, 2018extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
2008 loff_t *ppos, const void *from, size_t available); 2019 loff_t *ppos, const void *from, size_t available);
2009extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
2010 const void *from, size_t available);
2011 2020
2012#ifdef CONFIG_MIGRATION 2021#ifdef CONFIG_MIGRATION
2013extern int buffer_migrate_page(struct address_space *, 2022extern int buffer_migrate_page(struct address_space *,
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index d48282197696..265635dc9908 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -104,11 +104,14 @@ struct fuse_file_lock {
104 104
105/** 105/**
106 * INIT request/reply flags 106 * INIT request/reply flags
107 *
108 * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".."
107 */ 109 */
108#define FUSE_ASYNC_READ (1 << 0) 110#define FUSE_ASYNC_READ (1 << 0)
109#define FUSE_POSIX_LOCKS (1 << 1) 111#define FUSE_POSIX_LOCKS (1 << 1)
110#define FUSE_FILE_OPS (1 << 2) 112#define FUSE_FILE_OPS (1 << 2)
111#define FUSE_ATOMIC_O_TRUNC (1 << 3) 113#define FUSE_ATOMIC_O_TRUNC (1 << 3)
114#define FUSE_EXPORT_SUPPORT (1 << 4)
112#define FUSE_BIG_WRITES (1 << 5) 115#define FUSE_BIG_WRITES (1 << 5)
113 116
114/** 117/**
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index e8787417f65a..118216f1bd3c 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -541,7 +541,7 @@ extern dev_t blk_lookup_devt(const char *name, int part);
541extern char *disk_name (struct gendisk *hd, int part, char *buf); 541extern char *disk_name (struct gendisk *hd, int part, char *buf);
542 542
543extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); 543extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
544extern void add_partition(struct gendisk *, int, sector_t, sector_t, int); 544extern int __must_check add_partition(struct gendisk *, int, sector_t, sector_t, int);
545extern void delete_partition(struct gendisk *, int); 545extern void delete_partition(struct gendisk *, int);
546extern void printk_all_partitions(void); 546extern void printk_all_partitions(void);
547 547
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b414be387180..e8003afeffba 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -173,11 +173,24 @@ static inline void arch_free_page(struct page *page, int order) { }
173static inline void arch_alloc_page(struct page *page, int order) { } 173static inline void arch_alloc_page(struct page *page, int order) { }
174#endif 174#endif
175 175
176extern struct page *__alloc_pages(gfp_t, unsigned int, struct zonelist *); 176struct page *
177__alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
178 struct zonelist *zonelist, nodemask_t *nodemask);
179
180static inline struct page *
181__alloc_pages(gfp_t gfp_mask, unsigned int order,
182 struct zonelist *zonelist)
183{
184 return __alloc_pages_internal(gfp_mask, order, zonelist, NULL);
185}
186
187static inline struct page *
188__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
189 struct zonelist *zonelist, nodemask_t *nodemask)
190{
191 return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask);
192}
177 193
178extern struct page *
179__alloc_pages_nodemask(gfp_t, unsigned int,
180 struct zonelist *, nodemask_t *nodemask);
181 194
182static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, 195static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
183 unsigned int order) 196 unsigned int order)
@@ -215,6 +228,9 @@ extern struct page *alloc_page_vma(gfp_t gfp_mask,
215extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); 228extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
216extern unsigned long get_zeroed_page(gfp_t gfp_mask); 229extern unsigned long get_zeroed_page(gfp_t gfp_mask);
217 230
231void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
232void free_pages_exact(void *virt, size_t size);
233
218#define __get_free_page(gfp_mask) \ 234#define __get_free_page(gfp_mask) \
219 __get_free_pages((gfp_mask),0) 235 __get_free_pages((gfp_mask),0)
220 236
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 98be6c5762b9..730a20b83576 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -79,6 +79,19 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value)
79 WARN_ON(1); 79 WARN_ON(1);
80} 80}
81 81
82static inline int gpio_export(unsigned gpio, bool direction_may_change)
83{
84 /* GPIO can never have been requested or set as {in,out}put */
85 WARN_ON(1);
86 return -EINVAL;
87}
88
89static inline void gpio_unexport(unsigned gpio)
90{
91 /* GPIO can never have been exported */
92 WARN_ON(1);
93}
94
82static inline int gpio_to_irq(unsigned gpio) 95static inline int gpio_to_irq(unsigned gpio)
83{ 96{
84 /* GPIO can never have been requested or set as input */ 97 /* GPIO can never have been requested or set as input */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index fe56b86f2c67..ac4e678a04ed 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -512,7 +512,7 @@ struct hid_descriptor {
512 512
513/* Applications from HID Usage Tables 4/8/99 Version 1.1 */ 513/* Applications from HID Usage Tables 4/8/99 Version 1.1 */
514/* We ignore a few input applications that are not widely used */ 514/* We ignore a few input applications that are not widely used */
515#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001)) 515#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002))
516 516
517/* HID core API */ 517/* HID core API */
518 518
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index a79e80b689d8..9a71d4cc88c8 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -8,7 +8,6 @@
8#include <linux/mempolicy.h> 8#include <linux/mempolicy.h>
9#include <linux/shm.h> 9#include <linux/shm.h>
10#include <asm/tlbflush.h> 10#include <asm/tlbflush.h>
11#include <asm/hugetlb.h>
12 11
13struct ctl_table; 12struct ctl_table;
14 13
@@ -17,38 +16,45 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
17 return vma->vm_flags & VM_HUGETLB; 16 return vma->vm_flags & VM_HUGETLB;
18} 17}
19 18
19void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
20int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); 20int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
21int hugetlb_overcommit_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); 21int hugetlb_overcommit_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
22int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); 22int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
23int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 23int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
24int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int, int); 24int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int, int);
25void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long); 25void unmap_hugepage_range(struct vm_area_struct *,
26void __unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long); 26 unsigned long, unsigned long, struct page *);
27void __unmap_hugepage_range(struct vm_area_struct *,
28 unsigned long, unsigned long, struct page *);
27int hugetlb_prefault(struct address_space *, struct vm_area_struct *); 29int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
28int hugetlb_report_meminfo(char *); 30int hugetlb_report_meminfo(char *);
29int hugetlb_report_node_meminfo(int, char *); 31int hugetlb_report_node_meminfo(int, char *);
30unsigned long hugetlb_total_pages(void); 32unsigned long hugetlb_total_pages(void);
31int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 33int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
32 unsigned long address, int write_access); 34 unsigned long address, int write_access);
33int hugetlb_reserve_pages(struct inode *inode, long from, long to); 35int hugetlb_reserve_pages(struct inode *inode, long from, long to,
36 struct vm_area_struct *vma);
34void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); 37void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
35 38
36extern unsigned long max_huge_pages;
37extern unsigned long sysctl_overcommit_huge_pages;
38extern unsigned long hugepages_treat_as_movable; 39extern unsigned long hugepages_treat_as_movable;
39extern const unsigned long hugetlb_zero, hugetlb_infinity; 40extern const unsigned long hugetlb_zero, hugetlb_infinity;
40extern int sysctl_hugetlb_shm_group; 41extern int sysctl_hugetlb_shm_group;
42extern struct list_head huge_boot_pages;
41 43
42/* arch callbacks */ 44/* arch callbacks */
43 45
44pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr); 46pte_t *huge_pte_alloc(struct mm_struct *mm,
47 unsigned long addr, unsigned long sz);
45pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr); 48pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
46int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); 49int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
47struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 50struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
48 int write); 51 int write);
49struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 52struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
50 pmd_t *pmd, int write); 53 pmd_t *pmd, int write);
54struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
55 pud_t *pud, int write);
51int pmd_huge(pmd_t pmd); 56int pmd_huge(pmd_t pmd);
57int pud_huge(pud_t pmd);
52void hugetlb_change_protection(struct vm_area_struct *vma, 58void hugetlb_change_protection(struct vm_area_struct *vma,
53 unsigned long address, unsigned long end, pgprot_t newprot); 59 unsigned long address, unsigned long end, pgprot_t newprot);
54 60
@@ -58,6 +64,11 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
58{ 64{
59 return 0; 65 return 0;
60} 66}
67
68static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
69{
70}
71
61static inline unsigned long hugetlb_total_pages(void) 72static inline unsigned long hugetlb_total_pages(void)
62{ 73{
63 return 0; 74 return 0;
@@ -67,12 +78,14 @@ static inline unsigned long hugetlb_total_pages(void)
67#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) 78#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
68#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) 79#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
69#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) 80#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
70#define unmap_hugepage_range(vma, start, end) BUG() 81#define unmap_hugepage_range(vma, start, end, page) BUG()
71#define hugetlb_report_meminfo(buf) 0 82#define hugetlb_report_meminfo(buf) 0
72#define hugetlb_report_node_meminfo(n, buf) 0 83#define hugetlb_report_node_meminfo(n, buf) 0
73#define follow_huge_pmd(mm, addr, pmd, write) NULL 84#define follow_huge_pmd(mm, addr, pmd, write) NULL
74#define prepare_hugepage_range(addr,len) (-EINVAL) 85#define follow_huge_pud(mm, addr, pud, write) NULL
86#define prepare_hugepage_range(file, addr, len) (-EINVAL)
75#define pmd_huge(x) 0 87#define pmd_huge(x) 0
88#define pud_huge(x) 0
76#define is_hugepage_only_range(mm, addr, len) 0 89#define is_hugepage_only_range(mm, addr, len) 0
77#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) 90#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
78#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) 91#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
@@ -93,6 +106,7 @@ struct hugetlbfs_config {
93 umode_t mode; 106 umode_t mode;
94 long nr_blocks; 107 long nr_blocks;
95 long nr_inodes; 108 long nr_inodes;
109 struct hstate *hstate;
96}; 110};
97 111
98struct hugetlbfs_sb_info { 112struct hugetlbfs_sb_info {
@@ -101,6 +115,7 @@ struct hugetlbfs_sb_info {
101 long max_inodes; /* inodes allowed */ 115 long max_inodes; /* inodes allowed */
102 long free_inodes; /* inodes free */ 116 long free_inodes; /* inodes free */
103 spinlock_t stat_lock; 117 spinlock_t stat_lock;
118 struct hstate *hstate;
104}; 119};
105 120
106 121
@@ -125,8 +140,6 @@ struct file *hugetlb_file_setup(const char *name, size_t);
125int hugetlb_get_quota(struct address_space *mapping, long delta); 140int hugetlb_get_quota(struct address_space *mapping, long delta);
126void hugetlb_put_quota(struct address_space *mapping, long delta); 141void hugetlb_put_quota(struct address_space *mapping, long delta);
127 142
128#define BLOCKS_PER_HUGEPAGE (HPAGE_SIZE / 512)
129
130static inline int is_file_hugepages(struct file *file) 143static inline int is_file_hugepages(struct file *file)
131{ 144{
132 if (file->f_op == &hugetlbfs_file_operations) 145 if (file->f_op == &hugetlbfs_file_operations)
@@ -155,4 +168,112 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
155 unsigned long flags); 168 unsigned long flags);
156#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ 169#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
157 170
171#ifdef CONFIG_HUGETLB_PAGE
172
173#define HSTATE_NAME_LEN 32
174/* Defines one hugetlb page size */
175struct hstate {
176 int hugetlb_next_nid;
177 unsigned int order;
178 unsigned long mask;
179 unsigned long max_huge_pages;
180 unsigned long nr_huge_pages;
181 unsigned long free_huge_pages;
182 unsigned long resv_huge_pages;
183 unsigned long surplus_huge_pages;
184 unsigned long nr_overcommit_huge_pages;
185 struct list_head hugepage_freelists[MAX_NUMNODES];
186 unsigned int nr_huge_pages_node[MAX_NUMNODES];
187 unsigned int free_huge_pages_node[MAX_NUMNODES];
188 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
189 char name[HSTATE_NAME_LEN];
190};
191
192struct huge_bootmem_page {
193 struct list_head list;
194 struct hstate *hstate;
195};
196
197/* arch callback */
198int __init alloc_bootmem_huge_page(struct hstate *h);
199
200void __init hugetlb_add_hstate(unsigned order);
201struct hstate *size_to_hstate(unsigned long size);
202
203#ifndef HUGE_MAX_HSTATE
204#define HUGE_MAX_HSTATE 1
205#endif
206
207extern struct hstate hstates[HUGE_MAX_HSTATE];
208extern unsigned int default_hstate_idx;
209
210#define default_hstate (hstates[default_hstate_idx])
211
212static inline struct hstate *hstate_inode(struct inode *i)
213{
214 struct hugetlbfs_sb_info *hsb;
215 hsb = HUGETLBFS_SB(i->i_sb);
216 return hsb->hstate;
217}
218
219static inline struct hstate *hstate_file(struct file *f)
220{
221 return hstate_inode(f->f_dentry->d_inode);
222}
223
224static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
225{
226 return hstate_file(vma->vm_file);
227}
228
229static inline unsigned long huge_page_size(struct hstate *h)
230{
231 return (unsigned long)PAGE_SIZE << h->order;
232}
233
234static inline unsigned long huge_page_mask(struct hstate *h)
235{
236 return h->mask;
237}
238
239static inline unsigned int huge_page_order(struct hstate *h)
240{
241 return h->order;
242}
243
244static inline unsigned huge_page_shift(struct hstate *h)
245{
246 return h->order + PAGE_SHIFT;
247}
248
249static inline unsigned int pages_per_huge_page(struct hstate *h)
250{
251 return 1 << h->order;
252}
253
254static inline unsigned int blocks_per_huge_page(struct hstate *h)
255{
256 return huge_page_size(h) / 512;
257}
258
259#include <asm/hugetlb.h>
260
261static inline struct hstate *page_hstate(struct page *page)
262{
263 return size_to_hstate(PAGE_SIZE << compound_order(page));
264}
265
266#else
267struct hstate {};
268#define alloc_bootmem_huge_page(h) NULL
269#define hstate_file(f) NULL
270#define hstate_vma(v) NULL
271#define hstate_inode(i) NULL
272#define huge_page_size(h) PAGE_SIZE
273#define huge_page_mask(h) PAGE_MASK
274#define huge_page_order(h) 0
275#define huge_page_shift(h) PAGE_SHIFT
276#define pages_per_huge_page(h) 1
277#endif
278
158#endif /* _LINUX_HUGETLB_H */ 279#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/i2c/max732x.h b/include/linux/i2c/max732x.h
new file mode 100644
index 000000000000..e10336631c62
--- /dev/null
+++ b/include/linux/i2c/max732x.h
@@ -0,0 +1,19 @@
1#ifndef __LINUX_I2C_MAX732X_H
2#define __LINUX_I2C_MAX732X_H
3
4/* platform data for the MAX732x 8/16-bit I/O expander driver */
5
6struct max732x_platform_data {
7 /* number of the first GPIO */
8 unsigned gpio_base;
9
10 void *context; /* param to setup/teardown */
11
12 int (*setup)(struct i2c_client *client,
13 unsigned gpio, unsigned ngpio,
14 void *context);
15 int (*teardown)(struct i2c_client *client,
16 unsigned gpio, unsigned ngpio,
17 void *context);
18};
19#endif /* __LINUX_I2C_MAX732X_H */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 4726126f5a59..b846bc44a27e 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -178,6 +178,7 @@ typedef struct hw_regs_s {
178 ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ 178 ide_ack_intr_t *ack_intr; /* acknowledge interrupt */
179 hwif_chipset_t chipset; 179 hwif_chipset_t chipset;
180 struct device *dev, *parent; 180 struct device *dev, *parent;
181 unsigned long config;
181} hw_regs_t; 182} hw_regs_t;
182 183
183void ide_init_port_data(struct hwif_s *, unsigned int); 184void ide_init_port_data(struct hwif_s *, unsigned int);
@@ -210,7 +211,21 @@ static inline int __ide_default_irq(unsigned long base)
210 return 0; 211 return 0;
211} 212}
212 213
214#if defined(CONFIG_ARM) || defined(CONFIG_FRV) || defined(CONFIG_M68K) || \
215 defined(CONFIG_MIPS) || defined(CONFIG_MN10300) || defined(CONFIG_PARISC) \
216 || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || defined(CONFIG_SPARC64)
213#include <asm/ide.h> 217#include <asm/ide.h>
218#else
219#include <asm-generic/ide_iops.h>
220#endif
221
222#ifndef MAX_HWIFS
223#if defined(CONFIG_BLACKFIN) || defined(CONFIG_H8300) || defined(CONFIG_XTENSA)
224# define MAX_HWIFS 1
225#else
226# define MAX_HWIFS 10
227#endif
228#endif
214 229
215#if !defined(MAX_HWIFS) || defined(CONFIG_EMBEDDED) 230#if !defined(MAX_HWIFS) || defined(CONFIG_EMBEDDED)
216#undef MAX_HWIFS 231#undef MAX_HWIFS
@@ -307,7 +322,65 @@ struct ide_acpi_drive_link;
307struct ide_acpi_hwif_link; 322struct ide_acpi_hwif_link;
308#endif 323#endif
309 324
310typedef struct ide_drive_s { 325/* ATAPI device flags */
326enum {
327 IDE_AFLAG_DRQ_INTERRUPT = (1 << 0),
328 IDE_AFLAG_MEDIA_CHANGED = (1 << 1),
329
330 /* ide-cd */
331 /* Drive cannot lock the door. */
332 IDE_AFLAG_NO_DOORLOCK = (1 << 2),
333 /* Drive cannot eject the disc. */
334 IDE_AFLAG_NO_EJECT = (1 << 3),
335 /* Drive is a pre ATAPI 1.2 drive. */
336 IDE_AFLAG_PRE_ATAPI12 = (1 << 4),
337 /* TOC addresses are in BCD. */
338 IDE_AFLAG_TOCADDR_AS_BCD = (1 << 5),
339 /* TOC track numbers are in BCD. */
340 IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 6),
341 /*
342 * Drive does not provide data in multiples of SECTOR_SIZE
343 * when more than one interrupt is needed.
344 */
345 IDE_AFLAG_LIMIT_NFRAMES = (1 << 7),
346 /* Seeking in progress. */
347 IDE_AFLAG_SEEKING = (1 << 8),
348 /* Saved TOC information is current. */
349 IDE_AFLAG_TOC_VALID = (1 << 9),
350 /* We think that the drive door is locked. */
351 IDE_AFLAG_DOOR_LOCKED = (1 << 10),
352 /* SET_CD_SPEED command is unsupported. */
353 IDE_AFLAG_NO_SPEED_SELECT = (1 << 11),
354 IDE_AFLAG_VERTOS_300_SSD = (1 << 12),
355 IDE_AFLAG_VERTOS_600_ESD = (1 << 13),
356 IDE_AFLAG_SANYO_3CD = (1 << 14),
357 IDE_AFLAG_FULL_CAPS_PAGE = (1 << 15),
358 IDE_AFLAG_PLAY_AUDIO_OK = (1 << 16),
359 IDE_AFLAG_LE_SPEED_FIELDS = (1 << 17),
360
361 /* ide-floppy */
362 /* Format in progress */
363 IDE_AFLAG_FORMAT_IN_PROGRESS = (1 << 18),
364 /* Avoid commands not supported in Clik drive */
365 IDE_AFLAG_CLIK_DRIVE = (1 << 19),
366 /* Requires BH algorithm for packets */
367 IDE_AFLAG_ZIP_DRIVE = (1 << 20),
368
369 /* ide-tape */
370 IDE_AFLAG_IGNORE_DSC = (1 << 21),
371 /* 0 When the tape position is unknown */
372 IDE_AFLAG_ADDRESS_VALID = (1 << 22),
373 /* Device already opened */
374 IDE_AFLAG_BUSY = (1 << 23),
375 /* Attempt to auto-detect the current user block size */
376 IDE_AFLAG_DETECT_BS = (1 << 24),
377 /* Currently on a filemark */
378 IDE_AFLAG_FILEMARK = (1 << 25),
379 /* 0 = no tape is loaded, so we don't rewind after ejecting */
380 IDE_AFLAG_MEDIUM_PRESENT = (1 << 26)
381};
382
383struct ide_drive_s {
311 char name[4]; /* drive name, such as "hda" */ 384 char name[4]; /* drive name, such as "hda" */
312 char driver_req[10]; /* requests specific driver */ 385 char driver_req[10]; /* requests specific driver */
313 386
@@ -355,7 +428,6 @@ typedef struct ide_drive_s {
355 unsigned nodma : 1; /* disallow DMA */ 428 unsigned nodma : 1; /* disallow DMA */
356 unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */ 429 unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */
357 unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */ 430 unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */
358 unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */
359 unsigned scsi : 1; /* 0=default, 1=ide-scsi emulation */ 431 unsigned scsi : 1; /* 0=default, 1=ide-scsi emulation */
360 unsigned sleeping : 1; /* 1=sleeping & sleep field valid */ 432 unsigned sleeping : 1; /* 1=sleeping & sleep field valid */
361 unsigned post_reset : 1; 433 unsigned post_reset : 1;
@@ -400,7 +472,14 @@ typedef struct ide_drive_s {
400 struct list_head list; 472 struct list_head list;
401 struct device gendev; 473 struct device gendev;
402 struct completion gendev_rel_comp; /* to deal with device release() */ 474 struct completion gendev_rel_comp; /* to deal with device release() */
403} ide_drive_t; 475
476 /* callback for packet commands */
477 void (*pc_callback)(struct ide_drive_s *);
478
479 unsigned long atapi_flags;
480};
481
482typedef struct ide_drive_s ide_drive_t;
404 483
405#define to_ide_device(dev)container_of(dev, ide_drive_t, gendev) 484#define to_ide_device(dev)container_of(dev, ide_drive_t, gendev)
406 485
@@ -408,8 +487,28 @@ typedef struct ide_drive_s {
408 ((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx)) 487 ((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx))
409#define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1) 488#define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1)
410 489
490struct ide_task_s;
411struct ide_port_info; 491struct ide_port_info;
412 492
493struct ide_tp_ops {
494 void (*exec_command)(struct hwif_s *, u8);
495 u8 (*read_status)(struct hwif_s *);
496 u8 (*read_altstatus)(struct hwif_s *);
497 u8 (*read_sff_dma_status)(struct hwif_s *);
498
499 void (*set_irq)(struct hwif_s *, int);
500
501 void (*tf_load)(ide_drive_t *, struct ide_task_s *);
502 void (*tf_read)(ide_drive_t *, struct ide_task_s *);
503
504 void (*input_data)(ide_drive_t *, struct request *, void *,
505 unsigned int);
506 void (*output_data)(ide_drive_t *, struct request *, void *,
507 unsigned int);
508};
509
510extern const struct ide_tp_ops default_tp_ops;
511
413struct ide_port_ops { 512struct ide_port_ops {
414 /* host specific initialization of a device */ 513 /* host specific initialization of a device */
415 void (*init_dev)(ide_drive_t *); 514 void (*init_dev)(ide_drive_t *);
@@ -447,7 +546,7 @@ struct ide_dma_ops {
447 void (*dma_timeout)(struct ide_drive_s *); 546 void (*dma_timeout)(struct ide_drive_s *);
448}; 547};
449 548
450struct ide_task_s; 549struct ide_host;
451 550
452typedef struct hwif_s { 551typedef struct hwif_s {
453 struct hwif_s *next; /* for linked-list in ide_hwgroup_t */ 552 struct hwif_s *next; /* for linked-list in ide_hwgroup_t */
@@ -455,6 +554,8 @@ typedef struct hwif_s {
455 struct hwgroup_s *hwgroup; /* actually (ide_hwgroup_t *) */ 554 struct hwgroup_s *hwgroup; /* actually (ide_hwgroup_t *) */
456 struct proc_dir_entry *proc; /* /proc/ide/ directory entry */ 555 struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
457 556
557 struct ide_host *host;
558
458 char name[6]; /* name of interface, eg. "ide0" */ 559 char name[6]; /* name of interface, eg. "ide0" */
459 560
460 struct ide_io_ports io_ports; 561 struct ide_io_ports io_ports;
@@ -486,22 +587,12 @@ typedef struct hwif_s {
486 587
487 void (*rw_disk)(ide_drive_t *, struct request *); 588 void (*rw_disk)(ide_drive_t *, struct request *);
488 589
590 const struct ide_tp_ops *tp_ops;
489 const struct ide_port_ops *port_ops; 591 const struct ide_port_ops *port_ops;
490 const struct ide_dma_ops *dma_ops; 592 const struct ide_dma_ops *dma_ops;
491 593
492 void (*tf_load)(ide_drive_t *, struct ide_task_s *);
493 void (*tf_read)(ide_drive_t *, struct ide_task_s *);
494
495 void (*input_data)(ide_drive_t *, struct request *, void *, unsigned);
496 void (*output_data)(ide_drive_t *, struct request *, void *, unsigned);
497
498 void (*ide_dma_clear_irq)(ide_drive_t *drive); 594 void (*ide_dma_clear_irq)(ide_drive_t *drive);
499 595
500 void (*OUTB)(u8 addr, unsigned long port);
501 void (*OUTBSYNC)(struct hwif_s *hwif, u8 addr, unsigned long port);
502
503 u8 (*INB)(unsigned long port);
504
505 /* dma physical region descriptor table (cpu view) */ 596 /* dma physical region descriptor table (cpu view) */
506 unsigned int *dmatable_cpu; 597 unsigned int *dmatable_cpu;
507 /* dma physical region descriptor table (dma view) */ 598 /* dma physical region descriptor table (dma view) */
@@ -524,8 +615,6 @@ typedef struct hwif_s {
524 int irq; /* our irq number */ 615 int irq; /* our irq number */
525 616
526 unsigned long dma_base; /* base addr for dma ports */ 617 unsigned long dma_base; /* base addr for dma ports */
527 unsigned long dma_command; /* dma command register */
528 unsigned long dma_status; /* dma status register */
529 618
530 unsigned long config_data; /* for use by chipset-specific code */ 619 unsigned long config_data; /* for use by chipset-specific code */
531 unsigned long select_data; /* for use by chipset-specific code */ 620 unsigned long select_data; /* for use by chipset-specific code */
@@ -552,6 +641,14 @@ typedef struct hwif_s {
552#endif 641#endif
553} ____cacheline_internodealigned_in_smp ide_hwif_t; 642} ____cacheline_internodealigned_in_smp ide_hwif_t;
554 643
644struct ide_host {
645 ide_hwif_t *ports[MAX_HWIFS];
646 unsigned int n_ports;
647 struct device *dev[2];
648 unsigned long host_flags;
649 void *host_priv;
650};
651
555/* 652/*
556 * internal ide interrupt handler type 653 * internal ide interrupt handler type
557 */ 654 */
@@ -611,8 +708,6 @@ enum {
611 PC_FLAG_WRITING = (1 << 6), 708 PC_FLAG_WRITING = (1 << 6),
612 /* command timed out */ 709 /* command timed out */
613 PC_FLAG_TIMEDOUT = (1 << 7), 710 PC_FLAG_TIMEDOUT = (1 << 7),
614 PC_FLAG_ZIP_DRIVE = (1 << 8),
615 PC_FLAG_DRQ_INTERRUPT = (1 << 9),
616}; 711};
617 712
618struct ide_atapi_pc { 713struct ide_atapi_pc {
@@ -646,8 +741,6 @@ struct ide_atapi_pc {
646 */ 741 */
647 u8 pc_buf[256]; 742 u8 pc_buf[256];
648 743
649 void (*callback)(ide_drive_t *);
650
651 /* idetape only */ 744 /* idetape only */
652 struct idetape_bh *bh; 745 struct idetape_bh *bh;
653 char *b_data; 746 char *b_data;
@@ -802,18 +895,14 @@ struct ide_driver_s {
802 895
803#define to_ide_driver(drv) container_of(drv, ide_driver_t, gen_driver) 896#define to_ide_driver(drv) container_of(drv, ide_driver_t, gen_driver)
804 897
898int ide_device_get(ide_drive_t *);
899void ide_device_put(ide_drive_t *);
900
805int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsigned, unsigned long); 901int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsigned, unsigned long);
806 902
807extern int ide_vlb_clk; 903extern int ide_vlb_clk;
808extern int ide_pci_clk; 904extern int ide_pci_clk;
809 905
810ide_hwif_t *ide_find_port_slot(const struct ide_port_info *);
811
812static inline ide_hwif_t *ide_find_port(void)
813{
814 return ide_find_port_slot(NULL);
815}
816
817extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs); 906extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);
818int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 907int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
819 int uptodate, int nr_sectors); 908 int uptodate, int nr_sectors);
@@ -884,6 +973,7 @@ enum {
884 IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE | 973 IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE |
885 IDE_TFLAG_IN_HOB_NSECT | 974 IDE_TFLAG_IN_HOB_NSECT |
886 IDE_TFLAG_IN_HOB_LBA, 975 IDE_TFLAG_IN_HOB_LBA,
976 IDE_TFLAG_IN_FEATURE = (1 << 1),
887 IDE_TFLAG_IN_NSECT = (1 << 25), 977 IDE_TFLAG_IN_NSECT = (1 << 25),
888 IDE_TFLAG_IN_LBAL = (1 << 26), 978 IDE_TFLAG_IN_LBAL = (1 << 26),
889 IDE_TFLAG_IN_LBAM = (1 << 27), 979 IDE_TFLAG_IN_LBAM = (1 << 27),
@@ -948,9 +1038,25 @@ typedef struct ide_task_s {
948 1038
949void ide_tf_dump(const char *, struct ide_taskfile *); 1039void ide_tf_dump(const char *, struct ide_taskfile *);
950 1040
1041void ide_exec_command(ide_hwif_t *, u8);
1042u8 ide_read_status(ide_hwif_t *);
1043u8 ide_read_altstatus(ide_hwif_t *);
1044u8 ide_read_sff_dma_status(ide_hwif_t *);
1045
1046void ide_set_irq(ide_hwif_t *, int);
1047
1048void ide_tf_load(ide_drive_t *, ide_task_t *);
1049void ide_tf_read(ide_drive_t *, ide_task_t *);
1050
1051void ide_input_data(ide_drive_t *, struct request *, void *, unsigned int);
1052void ide_output_data(ide_drive_t *, struct request *, void *, unsigned int);
1053
951extern void SELECT_DRIVE(ide_drive_t *); 1054extern void SELECT_DRIVE(ide_drive_t *);
952void SELECT_MASK(ide_drive_t *, int); 1055void SELECT_MASK(ide_drive_t *, int);
953 1056
1057u8 ide_read_error(ide_drive_t *);
1058void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
1059
954extern int drive_is_ready(ide_drive_t *); 1060extern int drive_is_ready(ide_drive_t *);
955 1061
956void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8); 1062void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8);
@@ -1000,12 +1106,15 @@ extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *o
1000#define ide_pci_register_driver(d) pci_register_driver(d) 1106#define ide_pci_register_driver(d) pci_register_driver(d)
1001#endif 1107#endif
1002 1108
1003void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, u8 *); 1109void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int,
1110 hw_regs_t *, hw_regs_t **);
1004void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); 1111void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
1005 1112
1006#ifdef CONFIG_BLK_DEV_IDEDMA_PCI 1113#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
1007int ide_pci_set_master(struct pci_dev *, const char *); 1114int ide_pci_set_master(struct pci_dev *, const char *);
1008unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *); 1115unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
1116extern const struct ide_dma_ops sff_dma_ops;
1117int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
1009int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *); 1118int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
1010#else 1119#else
1011static inline int ide_hwif_setup_dma(ide_hwif_t *hwif, 1120static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
@@ -1015,10 +1124,6 @@ static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
1015} 1124}
1016#endif 1125#endif
1017 1126
1018extern void default_hwif_iops(ide_hwif_t *);
1019extern void default_hwif_mmiops(ide_hwif_t *);
1020extern void default_hwif_transport(ide_hwif_t *);
1021
1022typedef struct ide_pci_enablebit_s { 1127typedef struct ide_pci_enablebit_s {
1023 u8 reg; /* byte pci reg holding the enable-bit */ 1128 u8 reg; /* byte pci reg holding the enable-bit */
1024 u8 mask; /* mask to isolate the enable-bit */ 1129 u8 mask; /* mask to isolate the enable-bit */
@@ -1081,7 +1186,6 @@ enum {
1081 IDE_HFLAG_IO_32BIT = (1 << 24), 1186 IDE_HFLAG_IO_32BIT = (1 << 24),
1082 /* unmask IRQs */ 1187 /* unmask IRQs */
1083 IDE_HFLAG_UNMASK_IRQS = (1 << 25), 1188 IDE_HFLAG_UNMASK_IRQS = (1 << 25),
1084 IDE_HFLAG_ABUSE_SET_DMA_MODE = (1 << 26),
1085 /* serialize ports if DMA is possible (for sl82c105) */ 1189 /* serialize ports if DMA is possible (for sl82c105) */
1086 IDE_HFLAG_SERIALIZE_DMA = (1 << 27), 1190 IDE_HFLAG_SERIALIZE_DMA = (1 << 27),
1087 /* force host out of "simplex" mode */ 1191 /* force host out of "simplex" mode */
@@ -1092,8 +1196,6 @@ enum {
1092 IDE_HFLAG_NO_IO_32BIT = (1 << 30), 1196 IDE_HFLAG_NO_IO_32BIT = (1 << 30),
1093 /* never unmask IRQs */ 1197 /* never unmask IRQs */
1094 IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31), 1198 IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31),
1095 /* host uses VDMA (disabled for now) */
1096 IDE_HFLAG_VDMA = 0,
1097}; 1199};
1098 1200
1099#ifdef CONFIG_BLK_DEV_OFFBOARD 1201#ifdef CONFIG_BLK_DEV_OFFBOARD
@@ -1104,12 +1206,13 @@ enum {
1104 1206
1105struct ide_port_info { 1207struct ide_port_info {
1106 char *name; 1208 char *name;
1107 unsigned int (*init_chipset)(struct pci_dev *, const char *); 1209 unsigned int (*init_chipset)(struct pci_dev *);
1108 void (*init_iops)(ide_hwif_t *); 1210 void (*init_iops)(ide_hwif_t *);
1109 void (*init_hwif)(ide_hwif_t *); 1211 void (*init_hwif)(ide_hwif_t *);
1110 int (*init_dma)(ide_hwif_t *, 1212 int (*init_dma)(ide_hwif_t *,
1111 const struct ide_port_info *); 1213 const struct ide_port_info *);
1112 1214
1215 const struct ide_tp_ops *tp_ops;
1113 const struct ide_port_ops *port_ops; 1216 const struct ide_port_ops *port_ops;
1114 const struct ide_dma_ops *dma_ops; 1217 const struct ide_dma_ops *dma_ops;
1115 1218
@@ -1122,8 +1225,10 @@ struct ide_port_info {
1122 u8 udma_mask; 1225 u8 udma_mask;
1123}; 1226};
1124 1227
1125int ide_setup_pci_device(struct pci_dev *, const struct ide_port_info *); 1228int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
1126int ide_setup_pci_devices(struct pci_dev *, struct pci_dev *, const struct ide_port_info *); 1229int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
1230 const struct ide_port_info *, void *);
1231void ide_pci_remove(struct pci_dev *);
1127 1232
1128void ide_map_sg(ide_drive_t *, struct request *); 1233void ide_map_sg(ide_drive_t *, struct request *);
1129void ide_init_sg_cmd(ide_drive_t *, struct request *); 1234void ide_init_sg_cmd(ide_drive_t *, struct request *);
@@ -1163,7 +1268,6 @@ void ide_destroy_dmatable(ide_drive_t *);
1163extern int ide_build_dmatable(ide_drive_t *, struct request *); 1268extern int ide_build_dmatable(ide_drive_t *, struct request *);
1164int ide_allocate_dma_engine(ide_hwif_t *); 1269int ide_allocate_dma_engine(ide_hwif_t *);
1165void ide_release_dma_engine(ide_hwif_t *); 1270void ide_release_dma_engine(ide_hwif_t *);
1166void ide_setup_dma(ide_hwif_t *, unsigned long);
1167 1271
1168void ide_dma_host_set(ide_drive_t *, int); 1272void ide_dma_host_set(ide_drive_t *, int);
1169extern int ide_dma_setup(ide_drive_t *); 1273extern int ide_dma_setup(ide_drive_t *);
@@ -1217,8 +1321,14 @@ void ide_undecoded_slave(ide_drive_t *);
1217 1321
1218void ide_port_apply_params(ide_hwif_t *); 1322void ide_port_apply_params(ide_hwif_t *);
1219 1323
1220int ide_device_add_all(u8 *idx, const struct ide_port_info *); 1324struct ide_host *ide_host_alloc_all(const struct ide_port_info *, hw_regs_t **);
1221int ide_device_add(u8 idx[4], const struct ide_port_info *); 1325struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **);
1326void ide_host_free(struct ide_host *);
1327int ide_host_register(struct ide_host *, const struct ide_port_info *,
1328 hw_regs_t **);
1329int ide_host_add(const struct ide_port_info *, hw_regs_t **,
1330 struct ide_host **);
1331void ide_host_remove(struct ide_host *);
1222int ide_legacy_device_add(const struct ide_port_info *, unsigned long); 1332int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
1223void ide_port_unregister_devices(ide_hwif_t *); 1333void ide_port_unregister_devices(ide_hwif_t *);
1224void ide_port_scan(ide_hwif_t *); 1334void ide_port_scan(ide_hwif_t *);
@@ -1350,33 +1460,4 @@ static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive)
1350 1460
1351 return &hwif->drives[(drive->dn ^ 1) & 1]; 1461 return &hwif->drives[(drive->dn ^ 1) & 1];
1352} 1462}
1353
1354static inline void ide_set_irq(ide_drive_t *drive, int on)
1355{
1356 ide_hwif_t *hwif = drive->hwif;
1357
1358 hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | (on ? 0 : 2),
1359 hwif->io_ports.ctl_addr);
1360}
1361
1362static inline u8 ide_read_status(ide_drive_t *drive)
1363{
1364 ide_hwif_t *hwif = drive->hwif;
1365
1366 return hwif->INB(hwif->io_ports.status_addr);
1367}
1368
1369static inline u8 ide_read_altstatus(ide_drive_t *drive)
1370{
1371 ide_hwif_t *hwif = drive->hwif;
1372
1373 return hwif->INB(hwif->io_ports.ctl_addr);
1374}
1375
1376static inline u8 ide_read_error(ide_drive_t *drive)
1377{
1378 ide_hwif_t *hwif = drive->hwif;
1379
1380 return hwif->INB(hwif->io_ports.error_addr);
1381}
1382#endif /* _IDE_H */ 1463#endif /* _IDE_H */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 9a2d762124de..fa035f96f2a3 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -15,6 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/bitops.h> 16#include <linux/bitops.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/rcupdate.h>
18 19
19#if BITS_PER_LONG == 32 20#if BITS_PER_LONG == 32
20# define IDR_BITS 5 21# define IDR_BITS 5
@@ -51,6 +52,7 @@ struct idr_layer {
51 unsigned long bitmap; /* A zero bit means "space here" */ 52 unsigned long bitmap; /* A zero bit means "space here" */
52 struct idr_layer *ary[1<<IDR_BITS]; 53 struct idr_layer *ary[1<<IDR_BITS];
53 int count; /* When zero, we can release it */ 54 int count; /* When zero, we can release it */
55 struct rcu_head rcu_head;
54}; 56};
55 57
56struct idr { 58struct idr {
@@ -71,6 +73,28 @@ struct idr {
71} 73}
72#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) 74#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
73 75
76/* Actions to be taken after a call to _idr_sub_alloc */
77#define IDR_NEED_TO_GROW -2
78#define IDR_NOMORE_SPACE -3
79
80#define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC)
81
82/**
83 * idr synchronization (stolen from radix-tree.h)
84 *
85 * idr_find() is able to be called locklessly, using RCU. The caller must
86 * ensure calls to this function are made within rcu_read_lock() regions.
87 * Other readers (lock-free or otherwise) and modifications may be running
88 * concurrently.
89 *
90 * It is still required that the caller manage the synchronization and
91 * lifetimes of the items. So if RCU lock-free lookups are used, typically
92 * this would mean that the items have their own locks, or are amenable to
93 * lock-free access; and that the items are freed by RCU (or only freed after
94 * having been deleted from the idr tree *and* a synchronize_rcu() grace
95 * period).
96 */
97
74/* 98/*
75 * This is what we export. 99 * This is what we export.
76 */ 100 */
diff --git a/include/linux/init.h b/include/linux/init.h
index 21d658cdfa27..42ae95411a93 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -275,13 +275,7 @@ void __init parse_early_param(void);
275 275
276#define security_initcall(fn) module_init(fn) 276#define security_initcall(fn) module_init(fn)
277 277
278/* These macros create a dummy inline: gcc 2.9x does not count alias 278/* Each module must use one module_init(). */
279 as usage, hence the `unused function' warning when __init functions
280 are declared static. We use the dummy __*_module_inline functions
281 both to kill the warning and check the type of the init/cleanup
282 function. */
283
284/* Each module must use one module_init(), or one no_module_init */
285#define module_init(initfn) \ 279#define module_init(initfn) \
286 static inline initcall_t __inittest(void) \ 280 static inline initcall_t __inittest(void) \
287 { return initfn; } \ 281 { return initfn; } \
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 93c45acf249a..021d8e720c79 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -122,7 +122,7 @@ extern struct group_info init_groups;
122 .state = 0, \ 122 .state = 0, \
123 .stack = &init_thread_info, \ 123 .stack = &init_thread_info, \
124 .usage = ATOMIC_INIT(2), \ 124 .usage = ATOMIC_INIT(2), \
125 .flags = 0, \ 125 .flags = PF_KTHREAD, \
126 .lock_depth = -1, \ 126 .lock_depth = -1, \
127 .prio = MAX_PRIO-20, \ 127 .prio = MAX_PRIO-20, \
128 .static_prio = MAX_PRIO-20, \ 128 .static_prio = MAX_PRIO-20, \
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
index 742b917e7d1b..bd578578a8b9 100644
--- a/include/linux/inotify.h
+++ b/include/linux/inotify.h
@@ -7,6 +7,8 @@
7#ifndef _LINUX_INOTIFY_H 7#ifndef _LINUX_INOTIFY_H
8#define _LINUX_INOTIFY_H 8#define _LINUX_INOTIFY_H
9 9
10/* For O_CLOEXEC and O_NONBLOCK */
11#include <linux/fcntl.h>
10#include <linux/types.h> 12#include <linux/types.h>
11 13
12/* 14/*
@@ -63,6 +65,10 @@ struct inotify_event {
63 IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF | \ 65 IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF | \
64 IN_MOVE_SELF) 66 IN_MOVE_SELF)
65 67
68/* Flags for sys_inotify_init1. */
69#define IN_CLOEXEC O_CLOEXEC
70#define IN_NONBLOCK O_NONBLOCK
71
66#ifdef __KERNEL__ 72#ifdef __KERNEL__
67 73
68#include <linux/dcache.h> 74#include <linux/dcache.h>
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index ea6c18a8b0d4..ea330f9e7100 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -36,6 +36,7 @@ struct ipc_namespace {
36 int msg_ctlmni; 36 int msg_ctlmni;
37 atomic_t msg_bytes; 37 atomic_t msg_bytes;
38 atomic_t msg_hdrs; 38 atomic_t msg_hdrs;
39 int auto_msgmni;
39 40
40 size_t shm_ctlmax; 41 size_t shm_ctlmax;
41 size_t shm_ctlall; 42 size_t shm_ctlall;
@@ -53,7 +54,7 @@ extern atomic_t nr_ipc_ns;
53 54
54extern int register_ipcns_notifier(struct ipc_namespace *); 55extern int register_ipcns_notifier(struct ipc_namespace *);
55extern int cond_register_ipcns_notifier(struct ipc_namespace *); 56extern int cond_register_ipcns_notifier(struct ipc_namespace *);
56extern int unregister_ipcns_notifier(struct ipc_namespace *); 57extern void unregister_ipcns_notifier(struct ipc_namespace *);
57extern int ipcns_notify(unsigned long); 58extern int ipcns_notify(unsigned long);
58 59
59#else /* CONFIG_SYSVIPC */ 60#else /* CONFIG_SYSVIPC */
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 2b1c2e58566e..74bde13224c9 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -11,6 +11,8 @@
11#ifndef _LINUX_TRACE_IRQFLAGS_H 11#ifndef _LINUX_TRACE_IRQFLAGS_H
12#define _LINUX_TRACE_IRQFLAGS_H 12#define _LINUX_TRACE_IRQFLAGS_H
13 13
14#include <linux/typecheck.h>
15
14#ifdef CONFIG_TRACE_IRQFLAGS 16#ifdef CONFIG_TRACE_IRQFLAGS
15 extern void trace_softirqs_on(unsigned long ip); 17 extern void trace_softirqs_on(unsigned long ip);
16 extern void trace_softirqs_off(unsigned long ip); 18 extern void trace_softirqs_off(unsigned long ip);
@@ -58,18 +60,24 @@
58 do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) 60 do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
59#define local_irq_disable() \ 61#define local_irq_disable() \
60 do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) 62 do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
61#define local_irq_save(flags) \ 63#define local_irq_save(flags) \
62 do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0) 64 do { \
65 typecheck(unsigned long, flags); \
66 raw_local_irq_save(flags); \
67 trace_hardirqs_off(); \
68 } while (0)
63 69
64#define local_irq_restore(flags) \ 70
65 do { \ 71#define local_irq_restore(flags) \
66 if (raw_irqs_disabled_flags(flags)) { \ 72 do { \
67 raw_local_irq_restore(flags); \ 73 typecheck(unsigned long, flags); \
68 trace_hardirqs_off(); \ 74 if (raw_irqs_disabled_flags(flags)) { \
69 } else { \ 75 raw_local_irq_restore(flags); \
70 trace_hardirqs_on(); \ 76 trace_hardirqs_off(); \
71 raw_local_irq_restore(flags); \ 77 } else { \
72 } \ 78 trace_hardirqs_on(); \
79 raw_local_irq_restore(flags); \
80 } \
73 } while (0) 81 } while (0)
74#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ 82#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
75/* 83/*
@@ -78,8 +86,16 @@
78 */ 86 */
79# define raw_local_irq_disable() local_irq_disable() 87# define raw_local_irq_disable() local_irq_disable()
80# define raw_local_irq_enable() local_irq_enable() 88# define raw_local_irq_enable() local_irq_enable()
81# define raw_local_irq_save(flags) local_irq_save(flags) 89# define raw_local_irq_save(flags) \
82# define raw_local_irq_restore(flags) local_irq_restore(flags) 90 do { \
91 typecheck(unsigned long, flags); \
92 local_irq_save(flags); \
93 } while (0)
94# define raw_local_irq_restore(flags) \
95 do { \
96 typecheck(unsigned long, flags); \
97 local_irq_restore(flags); \
98 } while (0)
83#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ 99#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
84 100
85#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 101#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
@@ -89,7 +105,11 @@
89 raw_safe_halt(); \ 105 raw_safe_halt(); \
90 } while (0) 106 } while (0)
91 107
92#define local_save_flags(flags) raw_local_save_flags(flags) 108#define local_save_flags(flags) \
109 do { \
110 typecheck(unsigned long, flags); \
111 raw_local_save_flags(flags); \
112 } while (0)
93 113
94#define irqs_disabled() \ 114#define irqs_disabled() \
95({ \ 115({ \
@@ -99,7 +119,11 @@
99 raw_irqs_disabled_flags(_flags); \ 119 raw_irqs_disabled_flags(_flags); \
100}) 120})
101 121
102#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) 122#define irqs_disabled_flags(flags) \
123({ \
124 typecheck(unsigned long, flags); \
125 raw_irqs_disabled_flags(flags); \
126})
103#endif /* CONFIG_X86 */ 127#endif /* CONFIG_X86 */
104 128
105#endif 129#endif
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 00c1801099fa..57aefa160a92 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -6,6 +6,7 @@
6#define _LINUX_KALLSYMS_H 6#define _LINUX_KALLSYMS_H
7 7
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/kernel.h>
9#include <linux/stddef.h> 10#include <linux/stddef.h>
10 11
11#define KSYM_NAME_LEN 128 12#define KSYM_NAME_LEN 128
@@ -105,18 +106,10 @@ static inline void print_fn_descriptor_symbol(const char *fmt, void *addr)
105 print_symbol(fmt, (unsigned long)addr); 106 print_symbol(fmt, (unsigned long)addr);
106} 107}
107 108
108#ifndef CONFIG_64BIT 109static inline void print_ip_sym(unsigned long ip)
109#define print_ip_sym(ip) \ 110{
110do { \ 111 printk("[<%p>]", (void *) ip);
111 printk("[<%08lx>]", ip); \ 112 print_symbol(" %s\n", ip);
112 print_symbol(" %s\n", ip); \ 113}
113} while(0)
114#else
115#define print_ip_sym(ip) \
116do { \
117 printk("[<%016lx>]", ip); \
118 print_symbol(" %s\n", ip); \
119} while(0)
120#endif
121 114
122#endif /*_LINUX_KALLSYMS_H*/ 115#endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f9cd7a513f9c..fdbbf72ca2eb 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -14,6 +14,8 @@
14#include <linux/compiler.h> 14#include <linux/compiler.h>
15#include <linux/bitops.h> 15#include <linux/bitops.h>
16#include <linux/log2.h> 16#include <linux/log2.h>
17#include <linux/typecheck.h>
18#include <linux/ratelimit.h>
17#include <asm/byteorder.h> 19#include <asm/byteorder.h>
18#include <asm/bug.h> 20#include <asm/bug.h>
19 21
@@ -188,11 +190,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
188asmlinkage int printk(const char * fmt, ...) 190asmlinkage int printk(const char * fmt, ...)
189 __attribute__ ((format (printf, 1, 2))) __cold; 191 __attribute__ ((format (printf, 1, 2))) __cold;
190 192
191extern int printk_ratelimit_jiffies; 193extern struct ratelimit_state printk_ratelimit_state;
192extern int printk_ratelimit_burst;
193extern int printk_ratelimit(void); 194extern int printk_ratelimit(void);
194extern int __ratelimit(int ratelimit_jiffies, int ratelimit_burst);
195extern int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst);
196extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, 195extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
197 unsigned int interval_msec); 196 unsigned int interval_msec);
198#else 197#else
@@ -203,8 +202,6 @@ static inline int printk(const char *s, ...)
203 __attribute__ ((format (printf, 1, 2))); 202 __attribute__ ((format (printf, 1, 2)));
204static inline int __cold printk(const char *s, ...) { return 0; } 203static inline int __cold printk(const char *s, ...) { return 0; }
205static inline int printk_ratelimit(void) { return 0; } 204static inline int printk_ratelimit(void) { return 0; }
206static inline int __printk_ratelimit(int ratelimit_jiffies, \
207 int ratelimit_burst) { return 0; }
208static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ 205static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
209 unsigned int interval_msec) \ 206 unsigned int interval_msec) \
210 { return false; } 207 { return false; }
@@ -441,26 +438,6 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
441 const typeof( ((type *)0)->member ) *__mptr = (ptr); \ 438 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
442 (type *)( (char *)__mptr - offsetof(type,member) );}) 439 (type *)( (char *)__mptr - offsetof(type,member) );})
443 440
444/*
445 * Check at compile time that something is of a particular type.
446 * Always evaluates to 1 so you may use it easily in comparisons.
447 */
448#define typecheck(type,x) \
449({ type __dummy; \
450 typeof(x) __dummy2; \
451 (void)(&__dummy == &__dummy2); \
452 1; \
453})
454
455/*
456 * Check at compile time that 'function' is a certain type, or is a pointer
457 * to that type (needs to use typedef for the function type.)
458 */
459#define typecheck_fn(type,function) \
460({ typeof(type) __tmp = function; \
461 (void)__tmp; \
462})
463
464struct sysinfo; 441struct sysinfo;
465extern int do_sysinfo(struct sysinfo *info); 442extern int do_sysinfo(struct sysinfo *info);
466 443
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 0509c4ce4857..a1a91577813c 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -19,6 +19,7 @@
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 20 */
21 21
22#include <linux/gfp.h>
22#include <linux/stddef.h> 23#include <linux/stddef.h>
23#include <linux/errno.h> 24#include <linux/errno.h>
24#include <linux/compiler.h> 25#include <linux/compiler.h>
@@ -41,8 +42,8 @@ struct file;
41struct subprocess_info; 42struct subprocess_info;
42 43
43/* Allocate a subprocess_info structure */ 44/* Allocate a subprocess_info structure */
44struct subprocess_info *call_usermodehelper_setup(char *path, 45struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
45 char **argv, char **envp); 46 char **envp, gfp_t gfp_mask);
46 47
47/* Set various pieces of state into the subprocess_info structure */ 48/* Set various pieces of state into the subprocess_info structure */
48void call_usermodehelper_setkeys(struct subprocess_info *info, 49void call_usermodehelper_setkeys(struct subprocess_info *info,
@@ -69,8 +70,9 @@ static inline int
69call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait) 70call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
70{ 71{
71 struct subprocess_info *info; 72 struct subprocess_info *info;
73 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
72 74
73 info = call_usermodehelper_setup(path, argv, envp); 75 info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
74 if (info == NULL) 76 if (info == NULL)
75 return -ENOMEM; 77 return -ENOMEM;
76 return call_usermodehelper_exec(info, wait); 78 return call_usermodehelper_exec(info, wait);
@@ -81,8 +83,9 @@ call_usermodehelper_keys(char *path, char **argv, char **envp,
81 struct key *session_keyring, enum umh_wait wait) 83 struct key *session_keyring, enum umh_wait wait)
82{ 84{
83 struct subprocess_info *info; 85 struct subprocess_info *info;
86 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
84 87
85 info = call_usermodehelper_setup(path, argv, envp); 88 info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
86 if (info == NULL) 89 if (info == NULL)
87 return -ENOMEM; 90 return -ENOMEM;
88 91
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 60f0d418ae32..5437ac0276e2 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -186,6 +186,8 @@ extern struct kobject *kset_find_obj(struct kset *, const char *);
186 186
187/* The global /sys/kernel/ kobject for people to chain off of */ 187/* The global /sys/kernel/ kobject for people to chain off of */
188extern struct kobject *kernel_kobj; 188extern struct kobject *kernel_kobj;
189/* The global /sys/kernel/mm/ kobject for people to chain off of */
190extern struct kobject *mm_kobj;
189/* The global /sys/hypervisor/ kobject for people to chain off of */ 191/* The global /sys/hypervisor/ kobject for people to chain off of */
190extern struct kobject *hypervisor_kobj; 192extern struct kobject *hypervisor_kobj;
191/* The global /sys/power/ kobject for people to chain off of */ 193/* The global /sys/power/ kobject for people to chain off of */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 04a3556bdea6..0be7795655fa 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -157,11 +157,10 @@ struct kretprobe {
157 int nmissed; 157 int nmissed;
158 size_t data_size; 158 size_t data_size;
159 struct hlist_head free_instances; 159 struct hlist_head free_instances;
160 struct hlist_head used_instances; 160 spinlock_t lock;
161}; 161};
162 162
163struct kretprobe_instance { 163struct kretprobe_instance {
164 struct hlist_node uflist; /* either on free list or used list */
165 struct hlist_node hlist; 164 struct hlist_node hlist;
166 struct kretprobe *rp; 165 struct kretprobe *rp;
167 kprobe_opcode_t *ret_addr; 166 kprobe_opcode_t *ret_addr;
@@ -201,7 +200,6 @@ static inline int init_test_probes(void)
201} 200}
202#endif /* CONFIG_KPROBES_SANITY_TEST */ 201#endif /* CONFIG_KPROBES_SANITY_TEST */
203 202
204extern spinlock_t kretprobe_lock;
205extern struct mutex kprobe_mutex; 203extern struct mutex kprobe_mutex;
206extern int arch_prepare_kprobe(struct kprobe *p); 204extern int arch_prepare_kprobe(struct kprobe *p);
207extern void arch_arm_kprobe(struct kprobe *p); 205extern void arch_arm_kprobe(struct kprobe *p);
@@ -214,6 +212,9 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p);
214 212
215/* Get the kprobe at this addr (if any) - called with preemption disabled */ 213/* Get the kprobe at this addr (if any) - called with preemption disabled */
216struct kprobe *get_kprobe(void *addr); 214struct kprobe *get_kprobe(void *addr);
215void kretprobe_hash_lock(struct task_struct *tsk,
216 struct hlist_head **head, unsigned long *flags);
217void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags);
217struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); 218struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
218 219
219/* kprobe_running() will just return the current_kprobe on this CPU */ 220/* kprobe_running() will just return the current_kprobe on this CPU */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 00dd957e245b..aabc8a13ba71 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -6,7 +6,8 @@
6 6
7struct task_struct *kthread_create(int (*threadfn)(void *data), 7struct task_struct *kthread_create(int (*threadfn)(void *data),
8 void *data, 8 void *data,
9 const char namefmt[], ...); 9 const char namefmt[], ...)
10 __attribute__((format(printf, 3, 4)));
10 11
11/** 12/**
12 * kthread_run - create and wake a thread. 13 * kthread_run - create and wake a thread.
diff --git a/include/linux/lcd.h b/include/linux/lcd.h
index 1d379787f2e7..173febac6656 100644
--- a/include/linux/lcd.h
+++ b/include/linux/lcd.h
@@ -47,7 +47,7 @@ struct lcd_ops {
47 int (*set_contrast)(struct lcd_device *, int contrast); 47 int (*set_contrast)(struct lcd_device *, int contrast);
48 /* Check if given framebuffer device is the one LCD is bound to; 48 /* Check if given framebuffer device is the one LCD is bound to;
49 return 0 if not, !=0 if it is. If NULL, lcd always matches the fb. */ 49 return 0 if not, !=0 if it is. If NULL, lcd always matches the fb. */
50 int (*check_fb)(struct fb_info *); 50 int (*check_fb)(struct lcd_device *, struct fb_info *);
51}; 51};
52 52
53struct lcd_device { 53struct lcd_device {
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h
new file mode 100644
index 000000000000..81b4207deb95
--- /dev/null
+++ b/include/linux/leds-pca9532.h
@@ -0,0 +1,45 @@
1/*
2 * pca9532.h - platform data structure for pca9532 led controller
3 *
4 * Copyright (C) 2008 Riku Voipio <riku.voipio@movial.fi>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * Datasheet: http://www.nxp.com/acrobat/datasheets/PCA9532_3.pdf
11 *
12 */
13
14#ifndef __LINUX_PCA9532_H
15#define __LINUX_PCA9532_H
16
17#include <linux/leds.h>
18
19enum pca9532_state {
20 PCA9532_OFF = 0x0,
21 PCA9532_ON = 0x1,
22 PCA9532_PWM0 = 0x2,
23 PCA9532_PWM1 = 0x3
24};
25
26enum pca9532_type { PCA9532_TYPE_NONE, PCA9532_TYPE_LED,
27 PCA9532_TYPE_N2100_BEEP };
28
29struct pca9532_led {
30 u8 id;
31 struct i2c_client *client;
32 char *name;
33 struct led_classdev ldev;
34 enum pca9532_type type;
35 enum pca9532_state state;
36};
37
38struct pca9532_platform_data {
39 struct pca9532_led leds[16];
40 u8 pwm[2];
41 u8 psc[2];
42};
43
44#endif /* __LINUX_PCA9532_H */
45
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 519df72e939d..d41ccb56146a 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -48,7 +48,7 @@ struct led_classdev {
48 48
49 struct device *dev; 49 struct device *dev;
50 struct list_head node; /* LED Device list */ 50 struct list_head node; /* LED Device list */
51 char *default_trigger; /* Trigger to use */ 51 const char *default_trigger; /* Trigger to use */
52 52
53#ifdef CONFIG_LEDS_TRIGGERS 53#ifdef CONFIG_LEDS_TRIGGERS
54 /* Protects the trigger data below */ 54 /* Protects the trigger data below */
@@ -118,6 +118,20 @@ extern void ledtrig_ide_activity(void);
118#define ledtrig_ide_activity() do {} while(0) 118#define ledtrig_ide_activity() do {} while(0)
119#endif 119#endif
120 120
121/*
122 * Generic LED platform data for describing LED names and default triggers.
123 */
124struct led_info {
125 const char *name;
126 char *default_trigger;
127 int flags;
128};
129
130struct led_platform_data {
131 int num_leds;
132 struct led_info *leds;
133};
134
121/* For the leds-gpio driver */ 135/* For the leds-gpio driver */
122struct gpio_led { 136struct gpio_led {
123 const char *name; 137 const char *name;
diff --git a/include/linux/list.h b/include/linux/list.h
index 139ec41d9c2e..453916bc0412 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -61,14 +61,10 @@ extern void __list_add(struct list_head *new,
61 * Insert a new entry after the specified head. 61 * Insert a new entry after the specified head.
62 * This is good for implementing stacks. 62 * This is good for implementing stacks.
63 */ 63 */
64#ifndef CONFIG_DEBUG_LIST
65static inline void list_add(struct list_head *new, struct list_head *head) 64static inline void list_add(struct list_head *new, struct list_head *head)
66{ 65{
67 __list_add(new, head, head->next); 66 __list_add(new, head, head->next);
68} 67}
69#else
70extern void list_add(struct list_head *new, struct list_head *head);
71#endif
72 68
73 69
74/** 70/**
diff --git a/include/linux/major.h b/include/linux/major.h
index 0cb98053537a..53d5fafd85c3 100644
--- a/include/linux/major.h
+++ b/include/linux/major.h
@@ -53,7 +53,7 @@
53#define STL_SIOMEMMAJOR 28 53#define STL_SIOMEMMAJOR 28
54#define ACSI_MAJOR 28 54#define ACSI_MAJOR 28
55#define AZTECH_CDROM_MAJOR 29 55#define AZTECH_CDROM_MAJOR 29
56#define GRAPHDEV_MAJOR 29 /* SparcLinux & Linux/68k /dev/fb */ 56#define FB_MAJOR 29 /* /dev/fb* framebuffers */
57#define CM206_CDROM_MAJOR 32 57#define CM206_CDROM_MAJOR 32
58#define IDE2_MAJOR 33 58#define IDE2_MAJOR 33
59#define IDE3_MAJOR 34 59#define IDE3_MAJOR 34
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e6608776bc96..fdf3967e1397 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -35,7 +35,10 @@ extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
35extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 35extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
36 gfp_t gfp_mask); 36 gfp_t gfp_mask);
37extern void mem_cgroup_uncharge_page(struct page *page); 37extern void mem_cgroup_uncharge_page(struct page *page);
38extern void mem_cgroup_uncharge_cache_page(struct page *page);
38extern void mem_cgroup_move_lists(struct page *page, bool active); 39extern void mem_cgroup_move_lists(struct page *page, bool active);
40extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask);
41
39extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 42extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
40 struct list_head *dst, 43 struct list_head *dst,
41 unsigned long *scanned, int order, 44 unsigned long *scanned, int order,
@@ -50,9 +53,9 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
50#define mm_match_cgroup(mm, cgroup) \ 53#define mm_match_cgroup(mm, cgroup) \
51 ((cgroup) == mem_cgroup_from_task((mm)->owner)) 54 ((cgroup) == mem_cgroup_from_task((mm)->owner))
52 55
53extern int mem_cgroup_prepare_migration(struct page *page); 56extern int
57mem_cgroup_prepare_migration(struct page *page, struct page *newpage);
54extern void mem_cgroup_end_migration(struct page *page); 58extern void mem_cgroup_end_migration(struct page *page);
55extern void mem_cgroup_page_migration(struct page *page, struct page *newpage);
56 59
57/* 60/*
58 * For memory reclaim. 61 * For memory reclaim.
@@ -97,6 +100,15 @@ static inline void mem_cgroup_uncharge_page(struct page *page)
97{ 100{
98} 101}
99 102
103static inline void mem_cgroup_uncharge_cache_page(struct page *page)
104{
105}
106
107static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
108{
109 return 0;
110}
111
100static inline void mem_cgroup_move_lists(struct page *page, bool active) 112static inline void mem_cgroup_move_lists(struct page *page, bool active)
101{ 113{
102} 114}
@@ -112,7 +124,8 @@ static inline int task_in_mem_cgroup(struct task_struct *task,
112 return 1; 124 return 1;
113} 125}
114 126
115static inline int mem_cgroup_prepare_migration(struct page *page) 127static inline int
128mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
116{ 129{
117 return 0; 130 return 0;
118} 131}
@@ -121,11 +134,6 @@ static inline void mem_cgroup_end_migration(struct page *page)
121{ 134{
122} 135}
123 136
124static inline void
125mem_cgroup_page_migration(struct page *page, struct page *newpage)
126{
127}
128
129static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) 137static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
130{ 138{
131 return 0; 139 return 0;
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index ea9f5ad9ec8e..763ba81fc0f0 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -13,12 +13,12 @@ struct mem_section;
13#ifdef CONFIG_MEMORY_HOTPLUG 13#ifdef CONFIG_MEMORY_HOTPLUG
14 14
15/* 15/*
16 * Magic number for free bootmem. 16 * Types for free bootmem.
17 * The normal smallest mapcount is -1. Here is smaller value than it. 17 * The normal smallest mapcount is -1. Here is smaller value than it.
18 */ 18 */
19#define SECTION_INFO 0xfffffffe 19#define SECTION_INFO (-1 - 1)
20#define MIX_INFO 0xfffffffd 20#define MIX_SECTION_INFO (-1 - 2)
21#define NODE_INFO 0xfffffffc 21#define NODE_INFO (-1 - 3)
22 22
23/* 23/*
24 * pgdat resizing functions 24 * pgdat resizing functions
@@ -199,6 +199,18 @@ extern int walk_memory_resource(unsigned long start_pfn,
199 unsigned long nr_pages, void *arg, 199 unsigned long nr_pages, void *arg,
200 int (*func)(unsigned long, unsigned long, void *)); 200 int (*func)(unsigned long, unsigned long, void *));
201 201
202#ifdef CONFIG_MEMORY_HOTREMOVE
203
204extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
205
206#else
207static inline int is_mem_section_removable(unsigned long pfn,
208 unsigned long nr_pages)
209{
210 return 0;
211}
212#endif /* CONFIG_MEMORY_HOTREMOVE */
213
202extern int add_memory(int nid, u64 start, u64 size); 214extern int add_memory(int nid, u64 start, u64 size);
203extern int arch_add_memory(int nid, u64 start, u64 size); 215extern int arch_add_memory(int nid, u64 start, u64 size);
204extern int remove_memory(u64 start, u64 size); 216extern int remove_memory(u64 start, u64 size);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 3a39570b81b8..085c903fe0f1 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -59,6 +59,7 @@ enum {
59#include <linux/rbtree.h> 59#include <linux/rbtree.h>
60#include <linux/spinlock.h> 60#include <linux/spinlock.h>
61#include <linux/nodemask.h> 61#include <linux/nodemask.h>
62#include <linux/pagemap.h>
62 63
63struct mm_struct; 64struct mm_struct;
64 65
@@ -220,6 +221,24 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
220extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, 221extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
221 int no_context); 222 int no_context);
222#endif 223#endif
224
225/* Check if a vma is migratable */
226static inline int vma_migratable(struct vm_area_struct *vma)
227{
228 if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
229 return 0;
230 /*
231 * Migration allocates pages in the highest zone. If we cannot
232 * do so then migration (at least from node to node) is not
233 * possible.
234 */
235 if (vma->vm_file &&
236 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
237 < policy_zone)
238 return 0;
239 return 1;
240}
241
223#else 242#else
224 243
225struct mempolicy {}; 244struct mempolicy {};
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
new file mode 100644
index 000000000000..bb3dd0545928
--- /dev/null
+++ b/include/linux/mfd/core.h
@@ -0,0 +1,55 @@
1#ifndef MFD_CORE_H
2#define MFD_CORE_H
3/*
4 * drivers/mfd/mfd-core.h
5 *
6 * core MFD support
7 * Copyright (c) 2006 Ian Molton
8 * Copyright (c) 2007 Dmitry Baryshkov
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/platform_device.h>
17
18/*
19 * This struct describes the MFD part ("cell").
20 * After registration the copy of this structure will become the platform data
21 * of the resulting platform_device
22 */
23struct mfd_cell {
24 const char *name;
25
26 int (*enable)(struct platform_device *dev);
27 int (*disable)(struct platform_device *dev);
28 int (*suspend)(struct platform_device *dev);
29 int (*resume)(struct platform_device *dev);
30
31 void *driver_data; /* driver-specific data */
32
33 /*
34 * This resources can be specified relatievly to the parent device.
35 * For accessing device you should use resources from device
36 */
37 int num_resources;
38 const struct resource *resources;
39};
40
41static inline struct mfd_cell *
42mfd_get_cell(struct platform_device *pdev)
43{
44 return (struct mfd_cell *)pdev->dev.platform_data;
45}
46
47extern int mfd_add_devices(
48 struct platform_device *parent,
49 const struct mfd_cell *cells, int n_devs,
50 struct resource *mem_base,
51 int irq_base);
52
53extern void mfd_remove_devices(struct platform_device *parent);
54
55#endif
diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h
new file mode 100644
index 000000000000..7cc824a58f7c
--- /dev/null
+++ b/include/linux/mfd/tc6393xb.h
@@ -0,0 +1,49 @@
1/*
2 * Toshiba TC6393XB SoC support
3 *
4 * Copyright(c) 2005-2006 Chris Humbert
5 * Copyright(c) 2005 Dirk Opfer
6 * Copyright(c) 2005 Ian Molton <spyro@f2s.com>
7 * Copyright(c) 2007 Dmitry Baryshkov
8 *
9 * Based on code written by Sharp/Lineo for 2.4 kernels
10 * Based on locomo.c
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#ifndef TC6393XB_H
18#define TC6393XB_H
19
20/* Also one should provide the CK3P6MI clock */
21struct tc6393xb_platform_data {
22 u16 scr_pll2cr; /* PLL2 Control */
23 u16 scr_gper; /* GP Enable */
24 u32 scr_gpo_doecr; /* GPO Data OE Control */
25 u32 scr_gpo_dsr; /* GPO Data Set */
26
27 int (*enable)(struct platform_device *dev);
28 int (*disable)(struct platform_device *dev);
29 int (*suspend)(struct platform_device *dev);
30 int (*resume)(struct platform_device *dev);
31
32 int irq_base; /* a base for cascaded irq */
33 int gpio_base;
34
35 struct tmio_nand_data *nand_data;
36};
37
38/*
39 * Relative to irq_base
40 */
41#define IRQ_TC6393_NAND 0
42#define IRQ_TC6393_MMC 1
43#define IRQ_TC6393_OHCI 2
44#define IRQ_TC6393_SERIAL 3
45#define IRQ_TC6393_FB 4
46
47#define TC6393XB_NR_IRQS 8
48
49#endif
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
new file mode 100644
index 000000000000..9438d8c9ac1c
--- /dev/null
+++ b/include/linux/mfd/tmio.h
@@ -0,0 +1,17 @@
1#ifndef MFD_TMIO_H
2#define MFD_TMIO_H
3
4/*
5 * data for the NAND controller
6 */
7struct tmio_nand_data {
8 struct nand_bbt_descr *badblock_pattern;
9 struct mtd_partition *partition;
10 unsigned int num_partitions;
11};
12
13#define TMIO_NAND_CONFIG "tmio-nand-config"
14#define TMIO_NAND_CONTROL "tmio-nand-control"
15#define TMIO_NAND_IRQ "tmio-nand"
16
17#endif
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index e10a90a93b5d..03aea612d284 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -3,28 +3,10 @@
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/mempolicy.h> 5#include <linux/mempolicy.h>
6#include <linux/pagemap.h>
7 6
8typedef struct page *new_page_t(struct page *, unsigned long private, int **); 7typedef struct page *new_page_t(struct page *, unsigned long private, int **);
9 8
10#ifdef CONFIG_MIGRATION 9#ifdef CONFIG_MIGRATION
11/* Check if a vma is migratable */
12static inline int vma_migratable(struct vm_area_struct *vma)
13{
14 if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
15 return 0;
16 /*
17 * Migration allocates pages in the highest zone. If we cannot
18 * do so then migration (at least from node to node) is not
19 * possible.
20 */
21 if (vma->vm_file &&
22 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
23 < policy_zone)
24 return 0;
25 return 1;
26}
27
28extern int isolate_lru_page(struct page *p, struct list_head *pagelist); 10extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
29extern int putback_lru_pages(struct list_head *l); 11extern int putback_lru_pages(struct list_head *l);
30extern int migrate_page(struct address_space *, 12extern int migrate_page(struct address_space *,
@@ -39,9 +21,6 @@ extern int migrate_vmas(struct mm_struct *mm,
39 const nodemask_t *from, const nodemask_t *to, 21 const nodemask_t *from, const nodemask_t *to,
40 unsigned long flags); 22 unsigned long flags);
41#else 23#else
42static inline int vma_migratable(struct vm_area_struct *vma)
43 { return 0; }
44
45static inline int isolate_lru_page(struct page *p, struct list_head *list) 24static inline int isolate_lru_page(struct page *p, struct list_head *list)
46 { return -ENOSYS; } 25 { return -ENOSYS; }
47static inline int putback_lru_pages(struct list_head *l) { return 0; } 26static inline int putback_lru_pages(struct list_head *l) { return 0; }
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 81b3dd5206e0..655ea0d1ee14 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -68,6 +68,14 @@ enum {
68 MLX4_DEV_CAP_FLAG_UD_MCAST = 1 << 21 68 MLX4_DEV_CAP_FLAG_UD_MCAST = 1 << 21
69}; 69};
70 70
71enum {
72 MLX4_BMME_FLAG_LOCAL_INV = 1 << 6,
73 MLX4_BMME_FLAG_REMOTE_INV = 1 << 7,
74 MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
75 MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
76 MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
77};
78
71enum mlx4_event { 79enum mlx4_event {
72 MLX4_EVENT_TYPE_COMP = 0x00, 80 MLX4_EVENT_TYPE_COMP = 0x00,
73 MLX4_EVENT_TYPE_PATH_MIG = 0x01, 81 MLX4_EVENT_TYPE_PATH_MIG = 0x01,
@@ -184,6 +192,8 @@ struct mlx4_caps {
184 u32 max_msg_sz; 192 u32 max_msg_sz;
185 u32 page_size_cap; 193 u32 page_size_cap;
186 u32 flags; 194 u32 flags;
195 u32 bmme_flags;
196 u32 reserved_lkey;
187 u16 stat_rate_support; 197 u16 stat_rate_support;
188 u8 port_width_cap[MLX4_MAX_PORTS + 1]; 198 u8 port_width_cap[MLX4_MAX_PORTS + 1];
189 int max_gso_sz; 199 int max_gso_sz;
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 7f128b266faa..e27082cd650e 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -219,7 +219,7 @@ struct mlx4_wqe_datagram_seg {
219 __be32 reservd[2]; 219 __be32 reservd[2];
220}; 220};
221 221
222struct mlx4_lso_seg { 222struct mlx4_wqe_lso_seg {
223 __be32 mss_hdr_size; 223 __be32 mss_hdr_size;
224 __be32 header[0]; 224 __be32 header[0];
225}; 225};
@@ -233,6 +233,14 @@ struct mlx4_wqe_bind_seg {
233 __be64 length; 233 __be64 length;
234}; 234};
235 235
236enum {
237 MLX4_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
238 MLX4_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
239 MLX4_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
240 MLX4_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
241 MLX4_WQE_FMR_PERM_ATOMIC = 1 << 31
242};
243
236struct mlx4_wqe_fmr_seg { 244struct mlx4_wqe_fmr_seg {
237 __be32 flags; 245 __be32 flags;
238 __be32 mem_key; 246 __be32 mem_key;
@@ -255,11 +263,11 @@ struct mlx4_wqe_fmr_ext_seg {
255}; 263};
256 264
257struct mlx4_wqe_local_inval_seg { 265struct mlx4_wqe_local_inval_seg {
258 u8 flags; 266 __be32 flags;
259 u8 reserved1[3]; 267 u32 reserved1;
260 __be32 mem_key; 268 __be32 mem_key;
261 u8 reserved2[3]; 269 u32 reserved2[2];
262 u8 guest_id; 270 __be32 guest_id;
263 __be64 pa; 271 __be64 pa;
264}; 272};
265 273
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2128ef7780c6..d87a5a5fe87d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -41,6 +41,9 @@ extern unsigned long mmap_min_addr;
41 41
42#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 42#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
43 43
44/* to align the pointer to the (next) page boundary */
45#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
46
44/* 47/*
45 * Linux kernel virtual memory manager primitives. 48 * Linux kernel virtual memory manager primitives.
46 * The idea being to have a "virtual" mm in the same way 49 * The idea being to have a "virtual" mm in the same way
@@ -100,6 +103,7 @@ extern unsigned int kobjsize(const void *objp);
100#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 103#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
101#define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ 104#define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */
102#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 105#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
106#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
103#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 107#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
104#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ 108#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
105#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ 109#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
@@ -166,12 +170,16 @@ struct vm_operations_struct {
166 void (*open)(struct vm_area_struct * area); 170 void (*open)(struct vm_area_struct * area);
167 void (*close)(struct vm_area_struct * area); 171 void (*close)(struct vm_area_struct * area);
168 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); 172 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
169 unsigned long (*nopfn)(struct vm_area_struct *area,
170 unsigned long address);
171 173
172 /* notification that a previously read-only page is about to become 174 /* notification that a previously read-only page is about to become
173 * writable, if an error is returned it will cause a SIGBUS */ 175 * writable, if an error is returned it will cause a SIGBUS */
174 int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); 176 int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
177
178 /* called by access_process_vm when get_user_pages() fails, typically
179 * for use by special VMAs that can switch between memory and hardware
180 */
181 int (*access)(struct vm_area_struct *vma, unsigned long addr,
182 void *buf, int len, int write);
175#ifdef CONFIG_NUMA 183#ifdef CONFIG_NUMA
176 /* 184 /*
177 * set_policy() op must add a reference to any non-NULL @new mempolicy 185 * set_policy() op must add a reference to any non-NULL @new mempolicy
@@ -675,13 +683,6 @@ static inline int page_mapped(struct page *page)
675} 683}
676 684
677/* 685/*
678 * Error return values for the *_nopfn functions
679 */
680#define NOPFN_SIGBUS ((unsigned long) -1)
681#define NOPFN_OOM ((unsigned long) -2)
682#define NOPFN_REFAULT ((unsigned long) -3)
683
684/*
685 * Different kinds of faults, as returned by handle_mm_fault(). 686 * Different kinds of faults, as returned by handle_mm_fault().
686 * Used to decide whether a process gets delivered SIGBUS or 687 * Used to decide whether a process gets delivered SIGBUS or
687 * just gets major/minor fault counters bumped up. 688 * just gets major/minor fault counters bumped up.
@@ -772,14 +773,14 @@ struct mm_walk {
772 773
773int walk_page_range(unsigned long addr, unsigned long end, 774int walk_page_range(unsigned long addr, unsigned long end,
774 struct mm_walk *walk); 775 struct mm_walk *walk);
775void free_pgd_range(struct mmu_gather **tlb, unsigned long addr, 776void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
776 unsigned long end, unsigned long floor, unsigned long ceiling); 777 unsigned long end, unsigned long floor, unsigned long ceiling);
777void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma,
778 unsigned long floor, unsigned long ceiling);
779int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 778int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
780 struct vm_area_struct *vma); 779 struct vm_area_struct *vma);
781void unmap_mapping_range(struct address_space *mapping, 780void unmap_mapping_range(struct address_space *mapping,
782 loff_t const holebegin, loff_t const holelen, int even_cows); 781 loff_t const holebegin, loff_t const holelen, int even_cows);
782int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
783 void *buf, int len, int write);
783 784
784static inline void unmap_shared_mapping_range(struct address_space *mapping, 785static inline void unmap_shared_mapping_range(struct address_space *mapping,
785 loff_t const holebegin, loff_t const holelen) 786 loff_t const holebegin, loff_t const holelen)
@@ -965,9 +966,8 @@ static inline void pgtable_page_dtor(struct page *page)
965 NULL: pte_offset_kernel(pmd, address)) 966 NULL: pte_offset_kernel(pmd, address))
966 967
967extern void free_area_init(unsigned long * zones_size); 968extern void free_area_init(unsigned long * zones_size);
968extern void free_area_init_node(int nid, pg_data_t *pgdat, 969extern void free_area_init_node(int nid, unsigned long * zones_size,
969 unsigned long * zones_size, unsigned long zone_start_pfn, 970 unsigned long zone_start_pfn, unsigned long *zholes_size);
970 unsigned long *zholes_size);
971#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 971#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
972/* 972/*
973 * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its 973 * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 02a27ae78539..746f975b58ef 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -159,6 +159,17 @@ struct vm_area_struct {
159#endif 159#endif
160}; 160};
161 161
162struct core_thread {
163 struct task_struct *task;
164 struct core_thread *next;
165};
166
167struct core_state {
168 atomic_t nr_threads;
169 struct core_thread dumper;
170 struct completion startup;
171};
172
162struct mm_struct { 173struct mm_struct {
163 struct vm_area_struct * mmap; /* list of VMAs */ 174 struct vm_area_struct * mmap; /* list of VMAs */
164 struct rb_root mm_rb; 175 struct rb_root mm_rb;
@@ -175,7 +186,6 @@ struct mm_struct {
175 atomic_t mm_users; /* How many users with user space? */ 186 atomic_t mm_users; /* How many users with user space? */
176 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ 187 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
177 int map_count; /* number of VMAs */ 188 int map_count; /* number of VMAs */
178 int core_waiters;
179 struct rw_semaphore mmap_sem; 189 struct rw_semaphore mmap_sem;
180 spinlock_t page_table_lock; /* Protects page tables and some counters */ 190 spinlock_t page_table_lock; /* Protects page tables and some counters */
181 191
@@ -219,8 +229,7 @@ struct mm_struct {
219 229
220 unsigned long flags; /* Must use atomic bitops to access the bits */ 230 unsigned long flags; /* Must use atomic bitops to access the bits */
221 231
222 /* coredumping support */ 232 struct core_state *core_state; /* coredumping support */
223 struct completion *core_startup_done, core_done;
224 233
225 /* aio bits */ 234 /* aio bits */
226 rwlock_t ioctx_list_lock; /* aio lock */ 235 rwlock_t ioctx_list_lock; /* aio lock */
diff --git a/include/linux/module.h b/include/linux/module.h
index fce15ebd0e1c..68e09557c951 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -23,7 +23,7 @@
23/* Not Yet Implemented */ 23/* Not Yet Implemented */
24#define MODULE_SUPPORTED_DEVICE(name) 24#define MODULE_SUPPORTED_DEVICE(name)
25 25
26/* v850 toolchain uses a `_' prefix for all user symbols */ 26/* some toolchains uses a `_' prefix for all user symbols */
27#ifndef MODULE_SYMBOL_PREFIX 27#ifndef MODULE_SYMBOL_PREFIX
28#define MODULE_SYMBOL_PREFIX "" 28#define MODULE_SYMBOL_PREFIX ""
29#endif 29#endif
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index 81cd36b735b0..ba63858056c7 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -2,11 +2,11 @@
2#define _LINUX_MSDOS_FS_H 2#define _LINUX_MSDOS_FS_H
3 3
4#include <linux/magic.h> 4#include <linux/magic.h>
5#include <asm/byteorder.h>
5 6
6/* 7/*
7 * The MS-DOS filesystem constants/structures 8 * The MS-DOS filesystem constants/structures
8 */ 9 */
9#include <asm/byteorder.h>
10 10
11#define SECTOR_SIZE 512 /* sector size (bytes) */ 11#define SECTOR_SIZE 512 /* sector size (bytes) */
12#define SECTOR_BITS 9 /* log2(SECTOR_SIZE) */ 12#define SECTOR_BITS 9 /* log2(SECTOR_SIZE) */
@@ -89,24 +89,22 @@
89#define IS_FSINFO(x) (le32_to_cpu((x)->signature1) == FAT_FSINFO_SIG1 \ 89#define IS_FSINFO(x) (le32_to_cpu((x)->signature1) == FAT_FSINFO_SIG1 \
90 && le32_to_cpu((x)->signature2) == FAT_FSINFO_SIG2) 90 && le32_to_cpu((x)->signature2) == FAT_FSINFO_SIG2)
91 91
92struct __fat_dirent {
93 long d_ino;
94 __kernel_off_t d_off;
95 unsigned short d_reclen;
96 char d_name[256]; /* We must not include limits.h! */
97};
98
92/* 99/*
93 * ioctl commands 100 * ioctl commands
94 */ 101 */
95#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct dirent [2]) 102#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct __fat_dirent[2])
96#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct dirent [2]) 103#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct __fat_dirent[2])
97/* <linux/videotext.h> has used 0x72 ('r') in collision, so skip a few */ 104/* <linux/videotext.h> has used 0x72 ('r') in collision, so skip a few */
98#define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32) 105#define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32)
99#define FAT_IOCTL_SET_ATTRIBUTES _IOW('r', 0x11, __u32) 106#define FAT_IOCTL_SET_ATTRIBUTES _IOW('r', 0x11, __u32)
100 107
101/*
102 * vfat shortname flags
103 */
104#define VFAT_SFN_DISPLAY_LOWER 0x0001 /* convert to lowercase for display */
105#define VFAT_SFN_DISPLAY_WIN95 0x0002 /* emulate win95 rule for display */
106#define VFAT_SFN_DISPLAY_WINNT 0x0004 /* emulate winnt rule for display */
107#define VFAT_SFN_CREATE_WIN95 0x0100 /* emulate win95 rule for create */
108#define VFAT_SFN_CREATE_WINNT 0x0200 /* emulate winnt rule for create */
109
110struct fat_boot_sector { 108struct fat_boot_sector {
111 __u8 ignored[3]; /* Boot strap short or near jump */ 109 __u8 ignored[3]; /* Boot strap short or near jump */
112 __u8 system_id[8]; /* Name - can be used to special case 110 __u8 system_id[8]; /* Name - can be used to special case
@@ -168,14 +166,6 @@ struct msdos_dir_slot {
168 __u8 name11_12[4]; /* last 2 characters in name */ 166 __u8 name11_12[4]; /* last 2 characters in name */
169}; 167};
170 168
171struct fat_slot_info {
172 loff_t i_pos; /* on-disk position of directory entry */
173 loff_t slot_off; /* offset for slot or de start */
174 int nr_slots; /* number of slots + 1(de) in filename */
175 struct msdos_dir_entry *de;
176 struct buffer_head *bh;
177};
178
179#ifdef __KERNEL__ 169#ifdef __KERNEL__
180 170
181#include <linux/buffer_head.h> 171#include <linux/buffer_head.h>
@@ -184,6 +174,15 @@ struct fat_slot_info {
184#include <linux/fs.h> 174#include <linux/fs.h>
185#include <linux/mutex.h> 175#include <linux/mutex.h>
186 176
177/*
178 * vfat shortname flags
179 */
180#define VFAT_SFN_DISPLAY_LOWER 0x0001 /* convert to lowercase for display */
181#define VFAT_SFN_DISPLAY_WIN95 0x0002 /* emulate win95 rule for display */
182#define VFAT_SFN_DISPLAY_WINNT 0x0004 /* emulate winnt rule for display */
183#define VFAT_SFN_CREATE_WIN95 0x0100 /* emulate win95 rule for create */
184#define VFAT_SFN_CREATE_WINNT 0x0200 /* emulate winnt rule for create */
185
187struct fat_mount_options { 186struct fat_mount_options {
188 uid_t fs_uid; 187 uid_t fs_uid;
189 gid_t fs_gid; 188 gid_t fs_gid;
@@ -202,10 +201,10 @@ struct fat_mount_options {
202 utf8:1, /* Use of UTF-8 character set (Default) */ 201 utf8:1, /* Use of UTF-8 character set (Default) */
203 unicode_xlate:1, /* create escape sequences for unhandled Unicode */ 202 unicode_xlate:1, /* create escape sequences for unhandled Unicode */
204 numtail:1, /* Does first alias have a numeric '~1' type tail? */ 203 numtail:1, /* Does first alias have a numeric '~1' type tail? */
205 atari:1, /* Use Atari GEMDOS variation of MS-DOS fs */
206 flush:1, /* write things quickly */ 204 flush:1, /* write things quickly */
207 nocase:1, /* Does this need case conversion? 0=need case conversion*/ 205 nocase:1, /* Does this need case conversion? 0=need case conversion*/
208 usefree:1; /* Use free_clusters for FAT32 */ 206 usefree:1, /* Use free_clusters for FAT32 */
207 tz_utc:1; /* Filesystem timestamps are in UTC */
209}; 208};
210 209
211#define FAT_HASH_BITS 8 210#define FAT_HASH_BITS 8
@@ -267,6 +266,14 @@ struct msdos_inode_info {
267 struct inode vfs_inode; 266 struct inode vfs_inode;
268}; 267};
269 268
269struct fat_slot_info {
270 loff_t i_pos; /* on-disk position of directory entry */
271 loff_t slot_off; /* offset for slot or de start */
272 int nr_slots; /* number of slots + 1(de) in filename */
273 struct msdos_dir_entry *de;
274 struct buffer_head *bh;
275};
276
270static inline struct msdos_sb_info *MSDOS_SB(struct super_block *sb) 277static inline struct msdos_sb_info *MSDOS_SB(struct super_block *sb)
271{ 278{
272 return sb->s_fs_info; 279 return sb->s_fs_info;
@@ -428,8 +435,9 @@ extern int fat_flush_inodes(struct super_block *sb, struct inode *i1,
428extern void fat_fs_panic(struct super_block *s, const char *fmt, ...); 435extern void fat_fs_panic(struct super_block *s, const char *fmt, ...);
429extern void fat_clusters_flush(struct super_block *sb); 436extern void fat_clusters_flush(struct super_block *sb);
430extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster); 437extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster);
431extern int date_dos2unix(unsigned short time, unsigned short date); 438extern int date_dos2unix(unsigned short time, unsigned short date, int tz_utc);
432extern void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date); 439extern void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date,
440 int tz_utc);
433extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs); 441extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
434 442
435int fat_cache_init(void); 443int fat_cache_init(void);
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index f71201d0f3e7..6316fafe5c2a 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -45,13 +45,13 @@ enum {
45 * @size: how many physical eraseblocks are reserved for this volume 45 * @size: how many physical eraseblocks are reserved for this volume
46 * @used_bytes: how many bytes of data this volume contains 46 * @used_bytes: how many bytes of data this volume contains
47 * @used_ebs: how many physical eraseblocks of this volume actually contain any 47 * @used_ebs: how many physical eraseblocks of this volume actually contain any
48 * data 48 * data
49 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) 49 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
50 * @corrupted: non-zero if the volume is corrupted (static volumes only) 50 * @corrupted: non-zero if the volume is corrupted (static volumes only)
51 * @upd_marker: non-zero if the volume has update marker set 51 * @upd_marker: non-zero if the volume has update marker set
52 * @alignment: volume alignment 52 * @alignment: volume alignment
53 * @usable_leb_size: how many bytes are available in logical eraseblocks of 53 * @usable_leb_size: how many bytes are available in logical eraseblocks of
54 * this volume 54 * this volume
55 * @name_len: volume name length 55 * @name_len: volume name length
56 * @name: volume name 56 * @name: volume name
57 * @cdev: UBI volume character device major and minor numbers 57 * @cdev: UBI volume character device major and minor numbers
@@ -152,6 +152,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum);
152int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum); 152int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum);
153int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype); 153int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype);
154int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum); 154int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum);
155int ubi_sync(int ubi_num);
155 156
156/* 157/*
157 * This function is the same as the 'ubi_leb_read()' function, but it does not 158 * This function is the same as the 'ubi_leb_read()' function, but it does not
diff --git a/include/linux/net.h b/include/linux/net.h
index 150a48c68d52..4a9a30f2d68f 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -20,6 +20,7 @@
20 20
21#include <linux/wait.h> 21#include <linux/wait.h>
22#include <linux/socket.h> 22#include <linux/socket.h>
23#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
23#include <asm/socket.h> 24#include <asm/socket.h>
24 25
25struct poll_table_struct; 26struct poll_table_struct;
@@ -46,6 +47,7 @@ struct net;
46#define SYS_GETSOCKOPT 15 /* sys_getsockopt(2) */ 47#define SYS_GETSOCKOPT 15 /* sys_getsockopt(2) */
47#define SYS_SENDMSG 16 /* sys_sendmsg(2) */ 48#define SYS_SENDMSG 16 /* sys_sendmsg(2) */
48#define SYS_RECVMSG 17 /* sys_recvmsg(2) */ 49#define SYS_RECVMSG 17 /* sys_recvmsg(2) */
50#define SYS_PACCEPT 18 /* sys_paccept(2) */
49 51
50typedef enum { 52typedef enum {
51 SS_FREE = 0, /* not allocated */ 53 SS_FREE = 0, /* not allocated */
@@ -94,6 +96,15 @@ enum sock_type {
94}; 96};
95 97
96#define SOCK_MAX (SOCK_PACKET + 1) 98#define SOCK_MAX (SOCK_PACKET + 1)
99/* Mask which covers at least up to SOCK_MASK-1. The
100 * remaining bits are used as flags. */
101#define SOCK_TYPE_MASK 0xf
102
103/* Flags for socket, socketpair, paccept */
104#define SOCK_CLOEXEC O_CLOEXEC
105#ifndef SOCK_NONBLOCK
106#define SOCK_NONBLOCK O_NONBLOCK
107#endif
97 108
98#endif /* ARCH_HAS_SOCKET_TYPES */ 109#endif /* ARCH_HAS_SOCKET_TYPES */
99 110
@@ -208,10 +219,12 @@ extern int sock_sendmsg(struct socket *sock, struct msghdr *msg,
208 size_t len); 219 size_t len);
209extern int sock_recvmsg(struct socket *sock, struct msghdr *msg, 220extern int sock_recvmsg(struct socket *sock, struct msghdr *msg,
210 size_t size, int flags); 221 size_t size, int flags);
211extern int sock_map_fd(struct socket *sock); 222extern int sock_map_fd(struct socket *sock, int flags);
212extern struct socket *sockfd_lookup(int fd, int *err); 223extern struct socket *sockfd_lookup(int fd, int *err);
213#define sockfd_put(sock) fput(sock->file) 224#define sockfd_put(sock) fput(sock->file)
214extern int net_ratelimit(void); 225extern int net_ratelimit(void);
226extern long do_accept(int fd, struct sockaddr __user *upeer_sockaddr,
227 int __user *upeer_addrlen, int flags);
215 228
216#define net_random() random32() 229#define net_random() random32()
217#define net_srandom(seed) srandom32((__force u32)seed) 230#define net_srandom(seed) srandom32((__force u32)seed)
@@ -338,8 +351,7 @@ static const struct proto_ops name##_ops = { \
338 351
339#ifdef CONFIG_SYSCTL 352#ifdef CONFIG_SYSCTL
340#include <linux/sysctl.h> 353#include <linux/sysctl.h>
341extern int net_msg_cost; 354extern struct ratelimit_state net_ratelimit_state;
342extern int net_msg_burst;
343#endif 355#endif
344 356
345#endif /* __KERNEL__ */ 357#endif /* __KERNEL__ */
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index a2861d95ecc3..108f47e5fd95 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -12,7 +12,6 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/unistd.h> 14#include <linux/unistd.h>
15#include <linux/dirent.h>
16#include <linux/fs.h> 15#include <linux/fs.h>
17#include <linux/posix_acl.h> 16#include <linux/posix_acl.h>
18#include <linux/mount.h> 17#include <linux/mount.h>
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index bd3d72ddf333..da2698b0fdd1 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -214,6 +214,8 @@ static inline int notifier_to_errno(int ret)
214#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ 214#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
215#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, 215#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
216 * not handling interrupts, soon dead */ 216 * not handling interrupts, soon dead */
217#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
218 * lock is dropped */
217 219
218/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend 220/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
219 * operation in progress 221 * operation in progress
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 0e66b57631fc..c8a768e59640 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -82,9 +82,12 @@ static inline void get_nsproxy(struct nsproxy *ns)
82} 82}
83 83
84#ifdef CONFIG_CGROUP_NS 84#ifdef CONFIG_CGROUP_NS
85int ns_cgroup_clone(struct task_struct *tsk); 85int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid);
86#else 86#else
87static inline int ns_cgroup_clone(struct task_struct *tsk) { return 0; } 87static inline int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid)
88{
89 return 0;
90}
88#endif 91#endif
89 92
90#endif 93#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 0d2a4e7012aa..54590a9a103e 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -96,7 +96,22 @@ enum pageflags {
96#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR 96#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
97 PG_uncached, /* Page has been mapped as uncached */ 97 PG_uncached, /* Page has been mapped as uncached */
98#endif 98#endif
99 __NR_PAGEFLAGS 99 __NR_PAGEFLAGS,
100
101 /* Filesystems */
102 PG_checked = PG_owner_priv_1,
103
104 /* XEN */
105 PG_pinned = PG_owner_priv_1,
106 PG_savepinned = PG_dirty,
107
108 /* SLOB */
109 PG_slob_page = PG_active,
110 PG_slob_free = PG_private,
111
112 /* SLUB */
113 PG_slub_frozen = PG_active,
114 PG_slub_debug = PG_error,
100}; 115};
101 116
102#ifndef __GENERATING_BOUNDS_H 117#ifndef __GENERATING_BOUNDS_H
@@ -155,13 +170,19 @@ PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
155PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) 170PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
156PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) 171PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
157__PAGEFLAG(Slab, slab) 172__PAGEFLAG(Slab, slab)
158PAGEFLAG(Checked, owner_priv_1) /* Used by some filesystems */ 173PAGEFLAG(Checked, checked) /* Used by some filesystems */
159PAGEFLAG(Pinned, owner_priv_1) TESTSCFLAG(Pinned, owner_priv_1) /* Xen */ 174PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
160PAGEFLAG(SavePinned, dirty); /* Xen */ 175PAGEFLAG(SavePinned, savepinned); /* Xen */
161PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) 176PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
162PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) 177PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
163 __SETPAGEFLAG(Private, private) 178 __SETPAGEFLAG(Private, private)
164 179
180__PAGEFLAG(SlobPage, slob_page)
181__PAGEFLAG(SlobFree, slob_free)
182
183__PAGEFLAG(SlubFrozen, slub_frozen)
184__PAGEFLAG(SlubDebug, slub_debug)
185
165/* 186/*
166 * Only test-and-set exist for PG_writeback. The unconditional operators are 187 * Only test-and-set exist for PG_writeback. The unconditional operators are
167 * risky: they bypass page accounting. 188 * risky: they bypass page accounting.
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index d2fca802f809..ee1ec2c7723c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -22,7 +22,7 @@
22 22
23static inline void mapping_set_error(struct address_space *mapping, int error) 23static inline void mapping_set_error(struct address_space *mapping, int error)
24{ 24{
25 if (error) { 25 if (unlikely(error)) {
26 if (error == -ENOSPC) 26 if (error == -ENOSPC)
27 set_bit(AS_ENOSPC, &mapping->flags); 27 set_bit(AS_ENOSPC, &mapping->flags);
28 else 28 else
diff --git a/include/linux/parser.h b/include/linux/parser.h
index 7dcd05075756..cc554ca8bc78 100644
--- a/include/linux/parser.h
+++ b/include/linux/parser.h
@@ -14,7 +14,7 @@ struct match_token {
14 const char *pattern; 14 const char *pattern;
15}; 15};
16 16
17typedef struct match_token match_table_t[]; 17typedef const struct match_token match_table_t[];
18 18
19/* Maximum number of arguments that match_token will find in a pattern */ 19/* Maximum number of arguments that match_token will find in a pattern */
20enum {MAX_OPT_ARGS = 3}; 20enum {MAX_OPT_ARGS = 3};
diff --git a/include/linux/pci.h b/include/linux/pci.h
index a6a088e1a804..1d296d31abe0 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -638,7 +638,9 @@ int pci_save_state(struct pci_dev *dev);
638int pci_restore_state(struct pci_dev *dev); 638int pci_restore_state(struct pci_dev *dev);
639int pci_set_power_state(struct pci_dev *dev, pci_power_t state); 639int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
640pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); 640pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
641bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
641int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); 642int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
643pci_power_t pci_target_state(struct pci_dev *dev);
642int pci_prepare_to_sleep(struct pci_dev *dev); 644int pci_prepare_to_sleep(struct pci_dev *dev);
643int pci_back_from_sleep(struct pci_dev *dev); 645int pci_back_from_sleep(struct pci_dev *dev);
644 646
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d8507eb394cf..c3b1761aba26 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2371,6 +2371,14 @@
2371#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916 2371#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916
2372#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918 2372#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918
2373#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 2373#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
2374#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429
2375#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a
2376#define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b
2377#define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c
2378#define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430
2379#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431
2380#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432
2381#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433
2374#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 2382#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
2375#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 2383#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
2376#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580 2384#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580
@@ -2392,6 +2400,9 @@
2392#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30 2400#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30
2393#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 2401#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60
2394#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f 2402#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
2403#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
2404#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
2405#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
2395#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030 2406#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030
2396#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035 2407#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035
2397#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036 2408#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036
diff --git a/include/linux/pid.h b/include/linux/pid.h
index c21c7e8124a7..22921ac4cfd9 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -48,7 +48,7 @@ enum pid_type
48 */ 48 */
49 49
50struct upid { 50struct upid {
51 /* Try to keep pid_chain in the same cacheline as nr for find_pid */ 51 /* Try to keep pid_chain in the same cacheline as nr for find_vpid */
52 int nr; 52 int nr;
53 struct pid_namespace *ns; 53 struct pid_namespace *ns;
54 struct hlist_node pid_chain; 54 struct hlist_node pid_chain;
@@ -57,10 +57,10 @@ struct upid {
57struct pid 57struct pid
58{ 58{
59 atomic_t count; 59 atomic_t count;
60 unsigned int level;
60 /* lists of tasks that use this pid */ 61 /* lists of tasks that use this pid */
61 struct hlist_head tasks[PIDTYPE_MAX]; 62 struct hlist_head tasks[PIDTYPE_MAX];
62 struct rcu_head rcu; 63 struct rcu_head rcu;
63 unsigned int level;
64 struct upid numbers[1]; 64 struct upid numbers[1];
65}; 65};
66 66
@@ -105,14 +105,12 @@ extern struct pid_namespace init_pid_ns;
105 * or rcu_read_lock() held. 105 * or rcu_read_lock() held.
106 * 106 *
107 * find_pid_ns() finds the pid in the namespace specified 107 * find_pid_ns() finds the pid in the namespace specified
108 * find_pid() find the pid by its global id, i.e. in the init namespace
109 * find_vpid() finr the pid by its virtual id, i.e. in the current namespace 108 * find_vpid() finr the pid by its virtual id, i.e. in the current namespace
110 * 109 *
111 * see also find_task_by_pid() set in include/linux/sched.h 110 * see also find_task_by_vpid() set in include/linux/sched.h
112 */ 111 */
113extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns); 112extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns);
114extern struct pid *find_vpid(int nr); 113extern struct pid *find_vpid(int nr);
115extern struct pid *find_pid(int nr);
116 114
117/* 115/*
118 * Lookup a PID in the hash table, and return with it's count elevated. 116 * Lookup a PID in the hash table, and return with it's count elevated.
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index caff5283d15c..1af82c4e17d4 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -14,6 +14,8 @@ struct pidmap {
14 14
15#define PIDMAP_ENTRIES ((PID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8) 15#define PIDMAP_ENTRIES ((PID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8)
16 16
17struct bsd_acct_struct;
18
17struct pid_namespace { 19struct pid_namespace {
18 struct kref kref; 20 struct kref kref;
19 struct pidmap pidmap[PIDMAP_ENTRIES]; 21 struct pidmap pidmap[PIDMAP_ENTRIES];
@@ -25,6 +27,9 @@ struct pid_namespace {
25#ifdef CONFIG_PROC_FS 27#ifdef CONFIG_PROC_FS
26 struct vfsmount *proc_mnt; 28 struct vfsmount *proc_mnt;
27#endif 29#endif
30#ifdef CONFIG_BSD_PROCESS_ACCT
31 struct bsd_acct_struct *bacct;
32#endif
28}; 33};
29 34
30extern struct pid_namespace init_pid_ns; 35extern struct pid_namespace init_pid_ns;
@@ -85,4 +90,7 @@ static inline struct task_struct *task_child_reaper(struct task_struct *tsk)
85 return tsk->nsproxy->pid_ns->child_reaper; 90 return tsk->nsproxy->pid_ns->child_reaper;
86} 91}
87 92
93void pidhash_init(void);
94void pidmap_init(void);
95
88#endif /* _LINUX_PID_NS_H */ 96#endif /* _LINUX_PID_NS_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 4ad9de94449a..4dcce54b6d76 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -22,78 +22,6 @@
22#define _LINUX_PM_H 22#define _LINUX_PM_H
23 23
24#include <linux/list.h> 24#include <linux/list.h>
25#include <asm/atomic.h>
26#include <asm/errno.h>
27
28/*
29 * Power management requests... these are passed to pm_send_all() and friends.
30 *
31 * these functions are old and deprecated, see below.
32 */
33typedef int __bitwise pm_request_t;
34
35#define PM_SUSPEND ((__force pm_request_t) 1) /* enter D1-D3 */
36#define PM_RESUME ((__force pm_request_t) 2) /* enter D0 */
37
38
39/*
40 * Device types... these are passed to pm_register
41 */
42typedef int __bitwise pm_dev_t;
43
44#define PM_UNKNOWN_DEV ((__force pm_dev_t) 0) /* generic */
45#define PM_SYS_DEV ((__force pm_dev_t) 1) /* system device (fan, KB controller, ...) */
46#define PM_PCI_DEV ((__force pm_dev_t) 2) /* PCI device */
47#define PM_USB_DEV ((__force pm_dev_t) 3) /* USB device */
48#define PM_SCSI_DEV ((__force pm_dev_t) 4) /* SCSI device */
49#define PM_ISA_DEV ((__force pm_dev_t) 5) /* ISA device */
50#define PM_MTD_DEV ((__force pm_dev_t) 6) /* Memory Technology Device */
51
52/*
53 * System device hardware ID (PnP) values
54 */
55enum
56{
57 PM_SYS_UNKNOWN = 0x00000000, /* generic */
58 PM_SYS_KBC = 0x41d00303, /* keyboard controller */
59 PM_SYS_COM = 0x41d00500, /* serial port */
60 PM_SYS_IRDA = 0x41d00510, /* IRDA controller */
61 PM_SYS_FDC = 0x41d00700, /* floppy controller */
62 PM_SYS_VGA = 0x41d00900, /* VGA controller */
63 PM_SYS_PCMCIA = 0x41d00e00, /* PCMCIA controller */
64};
65
66/*
67 * Device identifier
68 */
69#define PM_PCI_ID(dev) ((dev)->bus->number << 16 | (dev)->devfn)
70
71/*
72 * Request handler callback
73 */
74struct pm_dev;
75
76typedef int (*pm_callback)(struct pm_dev *dev, pm_request_t rqst, void *data);
77
78/*
79 * Dynamic device information
80 */
81struct pm_dev
82{
83 pm_dev_t type;
84 unsigned long id;
85 pm_callback callback;
86 void *data;
87
88 unsigned long flags;
89 unsigned long state;
90 unsigned long prev_state;
91
92 struct list_head entry;
93};
94
95/* Functions above this comment are list-based old-style power
96 * management. Please avoid using them. */
97 25
98/* 26/*
99 * Callbacks for platform drivers to implement. 27 * Callbacks for platform drivers to implement.
@@ -317,6 +245,21 @@ struct pm_ext_ops {
317 * RECOVER Creation of a hibernation image or restoration of the main 245 * RECOVER Creation of a hibernation image or restoration of the main
318 * memory contents from a hibernation image has failed, call 246 * memory contents from a hibernation image has failed, call
319 * ->thaw() and ->complete() for all devices. 247 * ->thaw() and ->complete() for all devices.
248 *
249 * The following PM_EVENT_ messages are defined for internal use by
250 * kernel subsystems. They are never issued by the PM core.
251 *
252 * USER_SUSPEND Manual selective suspend was issued by userspace.
253 *
254 * USER_RESUME Manual selective resume was issued by userspace.
255 *
256 * REMOTE_WAKEUP Remote-wakeup request was received from the device.
257 *
258 * AUTO_SUSPEND Automatic (device idle) runtime suspend was
259 * initiated by the subsystem.
260 *
261 * AUTO_RESUME Automatic (device needed) runtime resume was
262 * requested by a driver.
320 */ 263 */
321 264
322#define PM_EVENT_ON 0x0000 265#define PM_EVENT_ON 0x0000
@@ -328,9 +271,18 @@ struct pm_ext_ops {
328#define PM_EVENT_THAW 0x0020 271#define PM_EVENT_THAW 0x0020
329#define PM_EVENT_RESTORE 0x0040 272#define PM_EVENT_RESTORE 0x0040
330#define PM_EVENT_RECOVER 0x0080 273#define PM_EVENT_RECOVER 0x0080
274#define PM_EVENT_USER 0x0100
275#define PM_EVENT_REMOTE 0x0200
276#define PM_EVENT_AUTO 0x0400
331 277
332#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) 278#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
279#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND)
280#define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME)
281#define PM_EVENT_REMOTE_WAKEUP (PM_EVENT_REMOTE | PM_EVENT_RESUME)
282#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND)
283#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME)
333 284
285#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
334#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) 286#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
335#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) 287#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
336#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) 288#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
@@ -339,7 +291,16 @@ struct pm_ext_ops {
339#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) 291#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, })
340#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) 292#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, })
341#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) 293#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, })
342#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) 294#define PMSG_USER_SUSPEND ((struct pm_messge) \
295 { .event = PM_EVENT_USER_SUSPEND, })
296#define PMSG_USER_RESUME ((struct pm_messge) \
297 { .event = PM_EVENT_USER_RESUME, })
298#define PMSG_REMOTE_RESUME ((struct pm_messge) \
299 { .event = PM_EVENT_REMOTE_RESUME, })
300#define PMSG_AUTO_SUSPEND ((struct pm_messge) \
301 { .event = PM_EVENT_AUTO_SUSPEND, })
302#define PMSG_AUTO_RESUME ((struct pm_messge) \
303 { .event = PM_EVENT_AUTO_RESUME, })
343 304
344/** 305/**
345 * Device power management states 306 * Device power management states
diff --git a/include/linux/pm_legacy.h b/include/linux/pm_legacy.h
deleted file mode 100644
index 446f4f42b952..000000000000
--- a/include/linux/pm_legacy.h
+++ /dev/null
@@ -1,35 +0,0 @@
1#ifndef __LINUX_PM_LEGACY_H__
2#define __LINUX_PM_LEGACY_H__
3
4
5#ifdef CONFIG_PM_LEGACY
6
7/*
8 * Register a device with power management
9 */
10struct pm_dev __deprecated *
11pm_register(pm_dev_t type, unsigned long id, pm_callback callback);
12
13/*
14 * Send a request to all devices
15 */
16int __deprecated pm_send_all(pm_request_t rqst, void *data);
17
18#else /* CONFIG_PM_LEGACY */
19
20static inline struct pm_dev *pm_register(pm_dev_t type,
21 unsigned long id,
22 pm_callback callback)
23{
24 return NULL;
25}
26
27static inline int pm_send_all(pm_request_t rqst, void *data)
28{
29 return 0;
30}
31
32#endif /* CONFIG_PM_LEGACY */
33
34#endif /* __LINUX_PM_LEGACY_H__ */
35
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 15a9eaf4a802..f560d1705afe 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -79,6 +79,7 @@ struct proc_dir_entry {
79 int pde_users; /* number of callers into module in progress */ 79 int pde_users; /* number of callers into module in progress */
80 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */ 80 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
81 struct completion *pde_unload_completion; 81 struct completion *pde_unload_completion;
82 struct list_head pde_openers; /* who did ->open, but not ->release */
82}; 83};
83 84
84struct kcore_list { 85struct kcore_list {
@@ -138,7 +139,6 @@ extern int proc_readdir(struct file *, void *, filldir_t);
138extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *); 139extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *);
139 140
140extern const struct file_operations proc_kcore_operations; 141extern const struct file_operations proc_kcore_operations;
141extern const struct file_operations proc_kmsg_operations;
142extern const struct file_operations ppc_htab_operations; 142extern const struct file_operations ppc_htab_operations;
143 143
144extern int pid_ns_prepare_proc(struct pid_namespace *ns); 144extern int pid_ns_prepare_proc(struct pid_namespace *ns);
diff --git a/include/linux/profile.h b/include/linux/profile.h
index 05c1cc736937..7e7087239af5 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -8,8 +8,6 @@
8 8
9#include <asm/errno.h> 9#include <asm/errno.h>
10 10
11extern int prof_on __read_mostly;
12
13#define CPU_PROFILING 1 11#define CPU_PROFILING 1
14#define SCHED_PROFILING 2 12#define SCHED_PROFILING 2
15#define SLEEP_PROFILING 3 13#define SLEEP_PROFILING 3
@@ -19,14 +17,31 @@ struct proc_dir_entry;
19struct pt_regs; 17struct pt_regs;
20struct notifier_block; 18struct notifier_block;
21 19
20#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS)
21void create_prof_cpu_mask(struct proc_dir_entry *de);
22#else
23static inline void create_prof_cpu_mask(struct proc_dir_entry *de)
24{
25}
26#endif
27
28enum profile_type {
29 PROFILE_TASK_EXIT,
30 PROFILE_MUNMAP
31};
32
33#ifdef CONFIG_PROFILING
34
35extern int prof_on __read_mostly;
36
22/* init basic kernel profiler */ 37/* init basic kernel profiler */
23void __init profile_init(void); 38void __init profile_init(void);
24void profile_tick(int); 39void profile_tick(int type);
25 40
26/* 41/*
27 * Add multiple profiler hits to a given address: 42 * Add multiple profiler hits to a given address:
28 */ 43 */
29void profile_hits(int, void *ip, unsigned int nr_hits); 44void profile_hits(int type, void *ip, unsigned int nr_hits);
30 45
31/* 46/*
32 * Single profiler hit: 47 * Single profiler hit:
@@ -40,19 +55,6 @@ static inline void profile_hit(int type, void *ip)
40 profile_hits(type, ip, 1); 55 profile_hits(type, ip, 1);
41} 56}
42 57
43#ifdef CONFIG_PROC_FS
44void create_prof_cpu_mask(struct proc_dir_entry *);
45#else
46#define create_prof_cpu_mask(x) do { (void)(x); } while (0)
47#endif
48
49enum profile_type {
50 PROFILE_TASK_EXIT,
51 PROFILE_MUNMAP
52};
53
54#ifdef CONFIG_PROFILING
55
56struct task_struct; 58struct task_struct;
57struct mm_struct; 59struct mm_struct;
58 60
@@ -80,6 +82,28 @@ struct pt_regs;
80 82
81#else 83#else
82 84
85#define prof_on 0
86
87static inline void profile_init(void)
88{
89 return;
90}
91
92static inline void profile_tick(int type)
93{
94 return;
95}
96
97static inline void profile_hits(int type, void *ip, unsigned int nr_hits)
98{
99 return;
100}
101
102static inline void profile_hit(int type, void *ip)
103{
104 return;
105}
106
83static inline int task_handoff_register(struct notifier_block * n) 107static inline int task_handoff_register(struct notifier_block * n)
84{ 108{
85 return -ENOSYS; 109 return -ENOSYS;
diff --git a/include/linux/quota.h b/include/linux/quota.h
index dcddfb200947..376a05048bc5 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -41,9 +41,6 @@
41#define __DQUOT_VERSION__ "dquot_6.5.1" 41#define __DQUOT_VERSION__ "dquot_6.5.1"
42#define __DQUOT_NUM_VERSION__ 6*10000+5*100+1 42#define __DQUOT_NUM_VERSION__ 6*10000+5*100+1
43 43
44typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */
45typedef __u64 qsize_t; /* Type in which we store sizes */
46
47/* Size of blocks in which are counted size limits */ 44/* Size of blocks in which are counted size limits */
48#define QUOTABLOCK_BITS 10 45#define QUOTABLOCK_BITS 10
49#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) 46#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
@@ -138,6 +135,10 @@ struct if_dqinfo {
138#define QUOTA_NL_BHARDWARN 4 /* Block hardlimit reached */ 135#define QUOTA_NL_BHARDWARN 4 /* Block hardlimit reached */
139#define QUOTA_NL_BSOFTLONGWARN 5 /* Block grace time expired */ 136#define QUOTA_NL_BSOFTLONGWARN 5 /* Block grace time expired */
140#define QUOTA_NL_BSOFTWARN 6 /* Block softlimit reached */ 137#define QUOTA_NL_BSOFTWARN 6 /* Block softlimit reached */
138#define QUOTA_NL_IHARDBELOW 7 /* Usage got below inode hardlimit */
139#define QUOTA_NL_ISOFTBELOW 8 /* Usage got below inode softlimit */
140#define QUOTA_NL_BHARDBELOW 9 /* Usage got below block hardlimit */
141#define QUOTA_NL_BSOFTBELOW 10 /* Usage got below block softlimit */
141 142
142enum { 143enum {
143 QUOTA_NL_C_UNSPEC, 144 QUOTA_NL_C_UNSPEC,
@@ -172,6 +173,9 @@ enum {
172 173
173#include <asm/atomic.h> 174#include <asm/atomic.h>
174 175
176typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */
177typedef __u64 qsize_t; /* Type in which we store sizes */
178
175extern spinlock_t dq_data_lock; 179extern spinlock_t dq_data_lock;
176 180
177/* Maximal numbers of writes for quota operation (insert/delete/update) 181/* Maximal numbers of writes for quota operation (insert/delete/update)
@@ -223,12 +227,10 @@ struct super_block;
223#define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */ 227#define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */
224 228
225extern void mark_info_dirty(struct super_block *sb, int type); 229extern void mark_info_dirty(struct super_block *sb, int type);
226#define info_dirty(info) test_bit(DQF_INFO_DIRTY_B, &(info)->dqi_flags) 230static inline int info_dirty(struct mem_dqinfo *info)
227#define info_any_dquot_dirty(info) (!list_empty(&(info)->dqi_dirty_list)) 231{
228#define info_any_dirty(info) (info_dirty(info) || info_any_dquot_dirty(info)) 232 return test_bit(DQF_INFO_DIRTY_B, &info->dqi_flags);
229 233}
230#define sb_dqopt(sb) (&(sb)->s_dquot)
231#define sb_dqinfo(sb, type) (sb_dqopt(sb)->info+(type))
232 234
233struct dqstats { 235struct dqstats {
234 int lookups; 236 int lookups;
@@ -337,19 +339,6 @@ struct quota_info {
337 struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ 339 struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
338}; 340};
339 341
340#define sb_has_quota_enabled(sb, type) ((type)==USRQUOTA ? \
341 (sb_dqopt(sb)->flags & DQUOT_USR_ENABLED) : (sb_dqopt(sb)->flags & DQUOT_GRP_ENABLED))
342
343#define sb_any_quota_enabled(sb) (sb_has_quota_enabled(sb, USRQUOTA) | \
344 sb_has_quota_enabled(sb, GRPQUOTA))
345
346#define sb_has_quota_suspended(sb, type) \
347 ((type) == USRQUOTA ? (sb_dqopt(sb)->flags & DQUOT_USR_SUSPENDED) : \
348 (sb_dqopt(sb)->flags & DQUOT_GRP_SUSPENDED))
349
350#define sb_any_quota_suspended(sb) (sb_has_quota_suspended(sb, USRQUOTA) | \
351 sb_has_quota_suspended(sb, GRPQUOTA))
352
353int register_quota_format(struct quota_format_type *fmt); 342int register_quota_format(struct quota_format_type *fmt);
354void unregister_quota_format(struct quota_format_type *fmt); 343void unregister_quota_format(struct quota_format_type *fmt);
355 344
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index f86702053853..742187f7a05c 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -11,42 +11,85 @@
11#define _LINUX_QUOTAOPS_ 11#define _LINUX_QUOTAOPS_
12 12
13#include <linux/smp_lock.h> 13#include <linux/smp_lock.h>
14
15#include <linux/fs.h> 14#include <linux/fs.h>
16 15
16static inline struct quota_info *sb_dqopt(struct super_block *sb)
17{
18 return &sb->s_dquot;
19}
20
17#if defined(CONFIG_QUOTA) 21#if defined(CONFIG_QUOTA)
18 22
19/* 23/*
20 * declaration of quota_function calls in kernel. 24 * declaration of quota_function calls in kernel.
21 */ 25 */
22extern void sync_dquots(struct super_block *sb, int type); 26void sync_dquots(struct super_block *sb, int type);
23 27
24extern int dquot_initialize(struct inode *inode, int type); 28int dquot_initialize(struct inode *inode, int type);
25extern int dquot_drop(struct inode *inode); 29int dquot_drop(struct inode *inode);
26 30
27extern int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc); 31int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc);
28extern int dquot_alloc_inode(const struct inode *inode, unsigned long number); 32int dquot_alloc_inode(const struct inode *inode, unsigned long number);
29 33
30extern int dquot_free_space(struct inode *inode, qsize_t number); 34int dquot_free_space(struct inode *inode, qsize_t number);
31extern int dquot_free_inode(const struct inode *inode, unsigned long number); 35int dquot_free_inode(const struct inode *inode, unsigned long number);
32 36
33extern int dquot_transfer(struct inode *inode, struct iattr *iattr); 37int dquot_transfer(struct inode *inode, struct iattr *iattr);
34extern int dquot_commit(struct dquot *dquot); 38int dquot_commit(struct dquot *dquot);
35extern int dquot_acquire(struct dquot *dquot); 39int dquot_acquire(struct dquot *dquot);
36extern int dquot_release(struct dquot *dquot); 40int dquot_release(struct dquot *dquot);
37extern int dquot_commit_info(struct super_block *sb, int type); 41int dquot_commit_info(struct super_block *sb, int type);
38extern int dquot_mark_dquot_dirty(struct dquot *dquot); 42int dquot_mark_dquot_dirty(struct dquot *dquot);
39 43
40extern int vfs_quota_on(struct super_block *sb, int type, int format_id, 44int vfs_quota_on(struct super_block *sb, int type, int format_id,
41 char *path, int remount); 45 char *path, int remount);
42extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name, 46int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
43 int format_id, int type); 47 int format_id, int type);
44extern int vfs_quota_off(struct super_block *sb, int type, int remount); 48int vfs_quota_off(struct super_block *sb, int type, int remount);
45extern int vfs_quota_sync(struct super_block *sb, int type); 49int vfs_quota_sync(struct super_block *sb, int type);
46extern int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 50int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
47extern int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 51int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
48extern int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di); 52int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di);
49extern int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di); 53int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di);
54
55void vfs_dq_drop(struct inode *inode);
56int vfs_dq_transfer(struct inode *inode, struct iattr *iattr);
57int vfs_dq_quota_on_remount(struct super_block *sb);
58
59static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type)
60{
61 return sb_dqopt(sb)->info + type;
62}
63
64/*
65 * Functions for checking status of quota
66 */
67
68static inline int sb_has_quota_enabled(struct super_block *sb, int type)
69{
70 if (type == USRQUOTA)
71 return sb_dqopt(sb)->flags & DQUOT_USR_ENABLED;
72 return sb_dqopt(sb)->flags & DQUOT_GRP_ENABLED;
73}
74
75static inline int sb_any_quota_enabled(struct super_block *sb)
76{
77 return sb_has_quota_enabled(sb, USRQUOTA) ||
78 sb_has_quota_enabled(sb, GRPQUOTA);
79}
80
81static inline int sb_has_quota_suspended(struct super_block *sb, int type)
82{
83 if (type == USRQUOTA)
84 return sb_dqopt(sb)->flags & DQUOT_USR_SUSPENDED;
85 return sb_dqopt(sb)->flags & DQUOT_GRP_SUSPENDED;
86}
87
88static inline int sb_any_quota_suspended(struct super_block *sb)
89{
90 return sb_has_quota_suspended(sb, USRQUOTA) ||
91 sb_has_quota_suspended(sb, GRPQUOTA);
92}
50 93
51/* 94/*
52 * Operations supported for diskquotas. 95 * Operations supported for diskquotas.
@@ -59,38 +102,16 @@ extern struct quotactl_ops vfs_quotactl_ops;
59 102
60/* It is better to call this function outside of any transaction as it might 103/* It is better to call this function outside of any transaction as it might
61 * need a lot of space in journal for dquot structure allocation. */ 104 * need a lot of space in journal for dquot structure allocation. */
62static inline void DQUOT_INIT(struct inode *inode) 105static inline void vfs_dq_init(struct inode *inode)
63{ 106{
64 BUG_ON(!inode->i_sb); 107 BUG_ON(!inode->i_sb);
65 if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) 108 if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode))
66 inode->i_sb->dq_op->initialize(inode, -1); 109 inode->i_sb->dq_op->initialize(inode, -1);
67} 110}
68 111
69/* The same as with DQUOT_INIT */
70static inline void DQUOT_DROP(struct inode *inode)
71{
72 /* Here we can get arbitrary inode from clear_inode() so we have
73 * to be careful. OTOH we don't need locking as quota operations
74 * are allowed to change only at mount time */
75 if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op
76 && inode->i_sb->dq_op->drop) {
77 int cnt;
78 /* Test before calling to rule out calls from proc and such
79 * where we are not allowed to block. Note that this is
80 * actually reliable test even without the lock - the caller
81 * must assure that nobody can come after the DQUOT_DROP and
82 * add quota pointers back anyway */
83 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
84 if (inode->i_dquot[cnt] != NODQUOT)
85 break;
86 if (cnt < MAXQUOTAS)
87 inode->i_sb->dq_op->drop(inode);
88 }
89}
90
91/* The following allocation/freeing/transfer functions *must* be called inside 112/* The following allocation/freeing/transfer functions *must* be called inside
92 * a transaction (deadlocks possible otherwise) */ 113 * a transaction (deadlocks possible otherwise) */
93static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) 114static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr)
94{ 115{
95 if (sb_any_quota_enabled(inode->i_sb)) { 116 if (sb_any_quota_enabled(inode->i_sb)) {
96 /* Used space is updated in alloc_space() */ 117 /* Used space is updated in alloc_space() */
@@ -102,15 +123,15 @@ static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
102 return 0; 123 return 0;
103} 124}
104 125
105static inline int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr) 126static inline int vfs_dq_prealloc_space(struct inode *inode, qsize_t nr)
106{ 127{
107 int ret; 128 int ret;
108 if (!(ret = DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr))) 129 if (!(ret = vfs_dq_prealloc_space_nodirty(inode, nr)))
109 mark_inode_dirty(inode); 130 mark_inode_dirty(inode);
110 return ret; 131 return ret;
111} 132}
112 133
113static inline int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) 134static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr)
114{ 135{
115 if (sb_any_quota_enabled(inode->i_sb)) { 136 if (sb_any_quota_enabled(inode->i_sb)) {
116 /* Used space is updated in alloc_space() */ 137 /* Used space is updated in alloc_space() */
@@ -122,25 +143,25 @@ static inline int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
122 return 0; 143 return 0;
123} 144}
124 145
125static inline int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr) 146static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
126{ 147{
127 int ret; 148 int ret;
128 if (!(ret = DQUOT_ALLOC_SPACE_NODIRTY(inode, nr))) 149 if (!(ret = vfs_dq_alloc_space_nodirty(inode, nr)))
129 mark_inode_dirty(inode); 150 mark_inode_dirty(inode);
130 return ret; 151 return ret;
131} 152}
132 153
133static inline int DQUOT_ALLOC_INODE(struct inode *inode) 154static inline int vfs_dq_alloc_inode(struct inode *inode)
134{ 155{
135 if (sb_any_quota_enabled(inode->i_sb)) { 156 if (sb_any_quota_enabled(inode->i_sb)) {
136 DQUOT_INIT(inode); 157 vfs_dq_init(inode);
137 if (inode->i_sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) 158 if (inode->i_sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA)
138 return 1; 159 return 1;
139 } 160 }
140 return 0; 161 return 0;
141} 162}
142 163
143static inline void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) 164static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
144{ 165{
145 if (sb_any_quota_enabled(inode->i_sb)) 166 if (sb_any_quota_enabled(inode->i_sb))
146 inode->i_sb->dq_op->free_space(inode, nr); 167 inode->i_sb->dq_op->free_space(inode, nr);
@@ -148,35 +169,25 @@ static inline void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
148 inode_sub_bytes(inode, nr); 169 inode_sub_bytes(inode, nr);
149} 170}
150 171
151static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr) 172static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr)
152{ 173{
153 DQUOT_FREE_SPACE_NODIRTY(inode, nr); 174 vfs_dq_free_space_nodirty(inode, nr);
154 mark_inode_dirty(inode); 175 mark_inode_dirty(inode);
155} 176}
156 177
157static inline void DQUOT_FREE_INODE(struct inode *inode) 178static inline void vfs_dq_free_inode(struct inode *inode)
158{ 179{
159 if (sb_any_quota_enabled(inode->i_sb)) 180 if (sb_any_quota_enabled(inode->i_sb))
160 inode->i_sb->dq_op->free_inode(inode, 1); 181 inode->i_sb->dq_op->free_inode(inode, 1);
161} 182}
162 183
163static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
164{
165 if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) {
166 DQUOT_INIT(inode);
167 if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA)
168 return 1;
169 }
170 return 0;
171}
172
173/* The following two functions cannot be called inside a transaction */ 184/* The following two functions cannot be called inside a transaction */
174static inline void DQUOT_SYNC(struct super_block *sb) 185static inline void vfs_dq_sync(struct super_block *sb)
175{ 186{
176 sync_dquots(sb, -1); 187 sync_dquots(sb, -1);
177} 188}
178 189
179static inline int DQUOT_OFF(struct super_block *sb, int remount) 190static inline int vfs_dq_off(struct super_block *sb, int remount)
180{ 191{
181 int ret = -ENOSYS; 192 int ret = -ENOSYS;
182 193
@@ -185,22 +196,27 @@ static inline int DQUOT_OFF(struct super_block *sb, int remount)
185 return ret; 196 return ret;
186} 197}
187 198
188static inline int DQUOT_ON_REMOUNT(struct super_block *sb) 199#else
200
201static inline int sb_has_quota_enabled(struct super_block *sb, int type)
189{ 202{
190 int cnt; 203 return 0;
191 int ret = 0, err; 204}
192 205
193 if (!sb->s_qcop || !sb->s_qcop->quota_on) 206static inline int sb_any_quota_enabled(struct super_block *sb)
194 return -ENOSYS; 207{
195 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 208 return 0;
196 err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1);
197 if (err < 0 && !ret)
198 ret = err;
199 }
200 return ret;
201} 209}
202 210
203#else 211static inline int sb_has_quota_suspended(struct super_block *sb, int type)
212{
213 return 0;
214}
215
216static inline int sb_any_quota_suspended(struct super_block *sb)
217{
218 return 0;
219}
204 220
205/* 221/*
206 * NO-OP when quota not configured. 222 * NO-OP when quota not configured.
@@ -208,113 +224,144 @@ static inline int DQUOT_ON_REMOUNT(struct super_block *sb)
208#define sb_dquot_ops (NULL) 224#define sb_dquot_ops (NULL)
209#define sb_quotactl_ops (NULL) 225#define sb_quotactl_ops (NULL)
210 226
211static inline void DQUOT_INIT(struct inode *inode) 227static inline void vfs_dq_init(struct inode *inode)
212{ 228{
213} 229}
214 230
215static inline void DQUOT_DROP(struct inode *inode) 231static inline void vfs_dq_drop(struct inode *inode)
216{ 232{
217} 233}
218 234
219static inline int DQUOT_ALLOC_INODE(struct inode *inode) 235static inline int vfs_dq_alloc_inode(struct inode *inode)
220{ 236{
221 return 0; 237 return 0;
222} 238}
223 239
224static inline void DQUOT_FREE_INODE(struct inode *inode) 240static inline void vfs_dq_free_inode(struct inode *inode)
225{ 241{
226} 242}
227 243
228static inline void DQUOT_SYNC(struct super_block *sb) 244static inline void vfs_dq_sync(struct super_block *sb)
229{ 245{
230} 246}
231 247
232static inline int DQUOT_OFF(struct super_block *sb, int remount) 248static inline int vfs_dq_off(struct super_block *sb, int remount)
233{ 249{
234 return 0; 250 return 0;
235} 251}
236 252
237static inline int DQUOT_ON_REMOUNT(struct super_block *sb) 253static inline int vfs_dq_quota_on_remount(struct super_block *sb)
238{ 254{
239 return 0; 255 return 0;
240} 256}
241 257
242static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr) 258static inline int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
243{ 259{
244 return 0; 260 return 0;
245} 261}
246 262
247static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) 263static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr)
248{ 264{
249 inode_add_bytes(inode, nr); 265 inode_add_bytes(inode, nr);
250 return 0; 266 return 0;
251} 267}
252 268
253static inline int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr) 269static inline int vfs_dq_prealloc_space(struct inode *inode, qsize_t nr)
254{ 270{
255 DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr); 271 vfs_dq_prealloc_space_nodirty(inode, nr);
256 mark_inode_dirty(inode); 272 mark_inode_dirty(inode);
257 return 0; 273 return 0;
258} 274}
259 275
260static inline int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) 276static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr)
261{ 277{
262 inode_add_bytes(inode, nr); 278 inode_add_bytes(inode, nr);
263 return 0; 279 return 0;
264} 280}
265 281
266static inline int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr) 282static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
267{ 283{
268 DQUOT_ALLOC_SPACE_NODIRTY(inode, nr); 284 vfs_dq_alloc_space_nodirty(inode, nr);
269 mark_inode_dirty(inode); 285 mark_inode_dirty(inode);
270 return 0; 286 return 0;
271} 287}
272 288
273static inline void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) 289static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
274{ 290{
275 inode_sub_bytes(inode, nr); 291 inode_sub_bytes(inode, nr);
276} 292}
277 293
278static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr) 294static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr)
279{ 295{
280 DQUOT_FREE_SPACE_NODIRTY(inode, nr); 296 vfs_dq_free_space_nodirty(inode, nr);
281 mark_inode_dirty(inode); 297 mark_inode_dirty(inode);
282} 298}
283 299
284#endif /* CONFIG_QUOTA */ 300#endif /* CONFIG_QUOTA */
285 301
286static inline int DQUOT_PREALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr) 302static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr)
287{ 303{
288 return DQUOT_PREALLOC_SPACE_NODIRTY(inode, 304 return vfs_dq_prealloc_space_nodirty(inode,
289 nr << inode->i_sb->s_blocksize_bits); 305 nr << inode->i_sb->s_blocksize_bits);
290} 306}
291 307
292static inline int DQUOT_PREALLOC_BLOCK(struct inode *inode, qsize_t nr) 308static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr)
293{ 309{
294 return DQUOT_PREALLOC_SPACE(inode, 310 return vfs_dq_prealloc_space(inode,
295 nr << inode->i_sb->s_blocksize_bits); 311 nr << inode->i_sb->s_blocksize_bits);
296} 312}
297 313
298static inline int DQUOT_ALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr) 314static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr)
299{ 315{
300 return DQUOT_ALLOC_SPACE_NODIRTY(inode, 316 return vfs_dq_alloc_space_nodirty(inode,
301 nr << inode->i_sb->s_blocksize_bits); 317 nr << inode->i_sb->s_blocksize_bits);
302} 318}
303 319
304static inline int DQUOT_ALLOC_BLOCK(struct inode *inode, qsize_t nr) 320static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr)
305{ 321{
306 return DQUOT_ALLOC_SPACE(inode, 322 return vfs_dq_alloc_space(inode,
307 nr << inode->i_sb->s_blocksize_bits); 323 nr << inode->i_sb->s_blocksize_bits);
308} 324}
309 325
310static inline void DQUOT_FREE_BLOCK_NODIRTY(struct inode *inode, qsize_t nr) 326static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr)
311{ 327{
312 DQUOT_FREE_SPACE_NODIRTY(inode, nr << inode->i_sb->s_blocksize_bits); 328 vfs_dq_free_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits);
313} 329}
314 330
315static inline void DQUOT_FREE_BLOCK(struct inode *inode, qsize_t nr) 331static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr)
316{ 332{
317 DQUOT_FREE_SPACE(inode, nr << inode->i_sb->s_blocksize_bits); 333 vfs_dq_free_space(inode, nr << inode->i_sb->s_blocksize_bits);
318} 334}
319 335
336/*
337 * Define uppercase equivalents for compatibility with old function names
338 * Can go away when we think all users have been converted (15/04/2008)
339 */
340#define DQUOT_INIT(inode) vfs_dq_init(inode)
341#define DQUOT_DROP(inode) vfs_dq_drop(inode)
342#define DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr) \
343 vfs_dq_prealloc_space_nodirty(inode, nr)
344#define DQUOT_PREALLOC_SPACE(inode, nr) vfs_dq_prealloc_space(inode, nr)
345#define DQUOT_ALLOC_SPACE_NODIRTY(inode, nr) \
346 vfs_dq_alloc_space_nodirty(inode, nr)
347#define DQUOT_ALLOC_SPACE(inode, nr) vfs_dq_alloc_space(inode, nr)
348#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) \
349 vfs_dq_prealloc_block_nodirty(inode, nr)
350#define DQUOT_PREALLOC_BLOCK(inode, nr) vfs_dq_prealloc_block(inode, nr)
351#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) \
352 vfs_dq_alloc_block_nodirty(inode, nr)
353#define DQUOT_ALLOC_BLOCK(inode, nr) vfs_dq_alloc_block(inode, nr)
354#define DQUOT_ALLOC_INODE(inode) vfs_dq_alloc_inode(inode)
355#define DQUOT_FREE_SPACE_NODIRTY(inode, nr) \
356 vfs_dq_free_space_nodirty(inode, nr)
357#define DQUOT_FREE_SPACE(inode, nr) vfs_dq_free_space(inode, nr)
358#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) \
359 vfs_dq_free_block_nodirty(inode, nr)
360#define DQUOT_FREE_BLOCK(inode, nr) vfs_dq_free_block(inode, nr)
361#define DQUOT_FREE_INODE(inode) vfs_dq_free_inode(inode)
362#define DQUOT_TRANSFER(inode, iattr) vfs_dq_transfer(inode, iattr)
363#define DQUOT_SYNC(sb) vfs_dq_sync(sb)
364#define DQUOT_OFF(sb, remount) vfs_dq_off(sb, remount)
365#define DQUOT_ON_REMOUNT(sb) vfs_dq_quota_on_remount(sb)
366
320#endif /* _LINUX_QUOTAOPS_ */ 367#endif /* _LINUX_QUOTAOPS_ */
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
new file mode 100644
index 000000000000..18a5b9ba9d40
--- /dev/null
+++ b/include/linux/ratelimit.h
@@ -0,0 +1,27 @@
1#ifndef _LINUX_RATELIMIT_H
2#define _LINUX_RATELIMIT_H
3#include <linux/param.h>
4
5#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
6#define DEFAULT_RATELIMIT_BURST 10
7
8struct ratelimit_state {
9 int interval;
10 int burst;
11 int printed;
12 int missed;
13 unsigned long begin;
14};
15
16#define DEFINE_RATELIMIT_STATE(name, interval, burst) \
17 struct ratelimit_state name = {interval, burst,}
18
19extern int __ratelimit(struct ratelimit_state *rs);
20
21static inline int ratelimit(void)
22{
23 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
24 DEFAULT_RATELIMIT_BURST);
25 return __ratelimit(&rs);
26}
27#endif
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index f04b64eca636..0967f03b0705 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -115,16 +115,21 @@ DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
115 115
116static inline void rcu_enter_nohz(void) 116static inline void rcu_enter_nohz(void)
117{ 117{
118 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
119
118 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 120 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
119 __get_cpu_var(rcu_dyntick_sched).dynticks++; 121 __get_cpu_var(rcu_dyntick_sched).dynticks++;
120 WARN_ON(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1); 122 WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
121} 123}
122 124
123static inline void rcu_exit_nohz(void) 125static inline void rcu_exit_nohz(void)
124{ 126{
127 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
128
125 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 129 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
126 __get_cpu_var(rcu_dyntick_sched).dynticks++; 130 __get_cpu_var(rcu_dyntick_sched).dynticks++;
127 WARN_ON(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1)); 131 WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
132 &rs);
128} 133}
129 134
130#else /* CONFIG_NO_HZ */ 135#else /* CONFIG_NO_HZ */
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 4aacaeecb56f..e9963af16cda 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -526,8 +526,8 @@ struct item_head {
526** p is the array of __u32, i is the index into the array, v is the value 526** p is the array of __u32, i is the index into the array, v is the value
527** to store there. 527** to store there.
528*/ 528*/
529#define get_block_num(p, i) le32_to_cpu(get_unaligned((p) + (i))) 529#define get_block_num(p, i) get_unaligned_le32((p) + (i))
530#define put_block_num(p, i, v) put_unaligned(cpu_to_le32(v), (p) + (i)) 530#define put_block_num(p, i, v) put_unaligned_le32((v), (p) + (i))
531 531
532// 532//
533// in old version uniqueness field shows key type 533// in old version uniqueness field shows key type
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 336ee43ed7d8..315517e8bfa1 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -152,7 +152,7 @@ struct reiserfs_journal_list {
152 atomic_t j_nonzerolen; 152 atomic_t j_nonzerolen;
153 atomic_t j_commit_left; 153 atomic_t j_commit_left;
154 atomic_t j_older_commits_done; /* all commits older than this on disk */ 154 atomic_t j_older_commits_done; /* all commits older than this on disk */
155 struct semaphore j_commit_lock; 155 struct mutex j_commit_mutex;
156 unsigned long j_trans_id; 156 unsigned long j_trans_id;
157 time_t j_timestamp; 157 time_t j_timestamp;
158 struct reiserfs_list_bitmap *j_list_bitmap; 158 struct reiserfs_list_bitmap *j_list_bitmap;
@@ -193,8 +193,8 @@ struct reiserfs_journal {
193 struct buffer_head *j_header_bh; 193 struct buffer_head *j_header_bh;
194 194
195 time_t j_trans_start_time; /* time this transaction started */ 195 time_t j_trans_start_time; /* time this transaction started */
196 struct semaphore j_lock; 196 struct mutex j_mutex;
197 struct semaphore j_flush_sem; 197 struct mutex j_flush_mutex;
198 wait_queue_head_t j_join_wait; /* wait for current transaction to finish before starting new one */ 198 wait_queue_head_t j_join_wait; /* wait for current transaction to finish before starting new one */
199 atomic_t j_jlock; /* lock for j_join_wait */ 199 atomic_t j_jlock; /* lock for j_join_wait */
200 int j_list_bitmap_index; /* number of next list bitmap to use */ 200 int j_list_bitmap_index; /* number of next list bitmap to use */
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 6d9e1fca098c..fdeadd9740dc 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -63,9 +63,14 @@ u64 res_counter_read_u64(struct res_counter *counter, int member);
63ssize_t res_counter_read(struct res_counter *counter, int member, 63ssize_t res_counter_read(struct res_counter *counter, int member,
64 const char __user *buf, size_t nbytes, loff_t *pos, 64 const char __user *buf, size_t nbytes, loff_t *pos,
65 int (*read_strategy)(unsigned long long val, char *s)); 65 int (*read_strategy)(unsigned long long val, char *s));
66ssize_t res_counter_write(struct res_counter *counter, int member, 66
67 const char __user *buf, size_t nbytes, loff_t *pos, 67typedef int (*write_strategy_fn)(const char *buf, unsigned long long *val);
68 int (*write_strategy)(char *buf, unsigned long long *val)); 68
69int res_counter_memparse_write_strategy(const char *buf,
70 unsigned long long *res);
71
72int res_counter_write(struct res_counter *counter, int member,
73 const char *buffer, write_strategy_fn write_strategy);
69 74
70/* 75/*
71 * the field descriptors. one for each member of res_counter 76 * the field descriptors. one for each member of res_counter
@@ -95,8 +100,10 @@ void res_counter_init(struct res_counter *counter);
95 * counter->limit _locked call expects the counter->lock to be taken 100 * counter->limit _locked call expects the counter->lock to be taken
96 */ 101 */
97 102
98int res_counter_charge_locked(struct res_counter *counter, unsigned long val); 103int __must_check res_counter_charge_locked(struct res_counter *counter,
99int res_counter_charge(struct res_counter *counter, unsigned long val); 104 unsigned long val);
105int __must_check res_counter_charge(struct res_counter *counter,
106 unsigned long val);
100 107
101/* 108/*
102 * uncharge - tell that some portion of the resource is released 109 * uncharge - tell that some portion of the resource is released
@@ -151,4 +158,20 @@ static inline void res_counter_reset_failcnt(struct res_counter *cnt)
151 cnt->failcnt = 0; 158 cnt->failcnt = 0;
152 spin_unlock_irqrestore(&cnt->lock, flags); 159 spin_unlock_irqrestore(&cnt->lock, flags);
153} 160}
161
162static inline int res_counter_set_limit(struct res_counter *cnt,
163 unsigned long long limit)
164{
165 unsigned long flags;
166 int ret = -EBUSY;
167
168 spin_lock_irqsave(&cnt->lock, flags);
169 if (cnt->usage < limit) {
170 cnt->limit = limit;
171 ret = 0;
172 }
173 spin_unlock_irqrestore(&cnt->lock, flags);
174 return ret;
175}
176
154#endif 177#endif
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index f2d0d1527721..b01fe004cb5e 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -115,6 +115,23 @@ extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm);
115 115
116extern struct class *rtc_class; 116extern struct class *rtc_class;
117 117
118/*
119 * For these RTC methods the device parameter is the physical device
120 * on whatever bus holds the hardware (I2C, Platform, SPI, etc), which
121 * was passed to rtc_device_register(). Its driver_data normally holds
122 * device state, including the rtc_device pointer for the RTC.
123 *
124 * Most of these methods are called with rtc_device.ops_lock held,
125 * through the rtc_*(struct rtc_device *, ...) calls.
126 *
127 * The (current) exceptions are mostly filesystem hooks:
128 * - the proc() hook for procfs
129 * - non-ioctl() chardev hooks: open(), release(), read_callback()
130 * - periodic irq calls: irq_set_state(), irq_set_freq()
131 *
132 * REVISIT those periodic irq calls *do* have ops_lock when they're
133 * issued through ioctl() ...
134 */
118struct rtc_class_ops { 135struct rtc_class_ops {
119 int (*open)(struct device *); 136 int (*open)(struct device *);
120 void (*release)(struct device *); 137 void (*release)(struct device *);
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 71fc81360048..e5996984ddd0 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -224,4 +224,42 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
224 */ 224 */
225#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) 225#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
226 226
227
228/*
229 * Mapping sg iterator
230 *
231 * Iterates over sg entries mapping page-by-page. On each successful
232 * iteration, @miter->page points to the mapped page and
233 * @miter->length bytes of data can be accessed at @miter->addr. As
234 * long as an interation is enclosed between start and stop, the user
235 * is free to choose control structure and when to stop.
236 *
237 * @miter->consumed is set to @miter->length on each iteration. It
238 * can be adjusted if the user can't consume all the bytes in one go.
239 * Also, a stopped iteration can be resumed by calling next on it.
240 * This is useful when iteration needs to release all resources and
241 * continue later (e.g. at the next interrupt).
242 */
243
244#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
245
246struct sg_mapping_iter {
247 /* the following three fields can be accessed directly */
248 struct page *page; /* currently mapped page */
249 void *addr; /* pointer to the mapped area */
250 size_t length; /* length of the mapped area */
251 size_t consumed; /* number of consumed bytes */
252
253 /* these are internal states, keep away */
254 struct scatterlist *__sg; /* current entry */
255 unsigned int __nents; /* nr of remaining entries */
256 unsigned int __offset; /* offset within sg */
257 unsigned int __flags;
258};
259
260void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
261 unsigned int nents, unsigned int flags);
262bool sg_miter_next(struct sg_mapping_iter *miter);
263void sg_miter_stop(struct sg_mapping_iter *miter);
264
227#endif /* _LINUX_SCATTERLIST_H */ 265#endif /* _LINUX_SCATTERLIST_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1941d8b5cf11..42036ffe6b00 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -295,10 +295,11 @@ extern void softlockup_tick(void);
295extern void spawn_softlockup_task(void); 295extern void spawn_softlockup_task(void);
296extern void touch_softlockup_watchdog(void); 296extern void touch_softlockup_watchdog(void);
297extern void touch_all_softlockup_watchdogs(void); 297extern void touch_all_softlockup_watchdogs(void);
298extern unsigned long softlockup_thresh; 298extern unsigned int softlockup_panic;
299extern unsigned long sysctl_hung_task_check_count; 299extern unsigned long sysctl_hung_task_check_count;
300extern unsigned long sysctl_hung_task_timeout_secs; 300extern unsigned long sysctl_hung_task_timeout_secs;
301extern unsigned long sysctl_hung_task_warnings; 301extern unsigned long sysctl_hung_task_warnings;
302extern int softlockup_thresh;
302#else 303#else
303static inline void softlockup_tick(void) 304static inline void softlockup_tick(void)
304{ 305{
@@ -505,6 +506,10 @@ struct signal_struct {
505 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 506 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
506 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 507 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
507 unsigned long inblock, oublock, cinblock, coublock; 508 unsigned long inblock, oublock, cinblock, coublock;
509#ifdef CONFIG_TASK_XACCT
510 u64 rchar, wchar, syscr, syscw;
511#endif
512 struct task_io_accounting ioac;
508 513
509 /* 514 /*
510 * Cumulative ns of scheduled CPU time for dead threads in the 515 * Cumulative ns of scheduled CPU time for dead threads in the
@@ -667,6 +672,10 @@ struct task_delay_info {
667 /* io operations performed */ 672 /* io operations performed */
668 u32 swapin_count; /* total count of the number of swapin block */ 673 u32 swapin_count; /* total count of the number of swapin block */
669 /* io operations performed */ 674 /* io operations performed */
675
676 struct timespec freepages_start, freepages_end;
677 u64 freepages_delay; /* wait for memory reclaim */
678 u32 freepages_count; /* total count of memory reclaim */
670}; 679};
671#endif /* CONFIG_TASK_DELAY_ACCT */ 680#endif /* CONFIG_TASK_DELAY_ACCT */
672 681
@@ -824,7 +833,16 @@ extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
824 struct sched_domain_attr *dattr_new); 833 struct sched_domain_attr *dattr_new);
825extern int arch_reinit_sched_domains(void); 834extern int arch_reinit_sched_domains(void);
826 835
827#endif /* CONFIG_SMP */ 836#else /* CONFIG_SMP */
837
838struct sched_domain_attr;
839
840static inline void
841partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
842 struct sched_domain_attr *dattr_new)
843{
844}
845#endif /* !CONFIG_SMP */
828 846
829struct io_context; /* See blkdev.h */ 847struct io_context; /* See blkdev.h */
830#define NGROUPS_SMALL 32 848#define NGROUPS_SMALL 32
@@ -1247,7 +1265,7 @@ struct task_struct {
1247#if defined(CONFIG_TASK_XACCT) 1265#if defined(CONFIG_TASK_XACCT)
1248 u64 acct_rss_mem1; /* accumulated rss usage */ 1266 u64 acct_rss_mem1; /* accumulated rss usage */
1249 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1267 u64 acct_vm_mem1; /* accumulated virtual memory usage */
1250 cputime_t acct_stimexpd;/* stime since last update */ 1268 cputime_t acct_timexpd; /* stime + utime since last update */
1251#endif 1269#endif
1252#ifdef CONFIG_CPUSETS 1270#ifdef CONFIG_CPUSETS
1253 nodemask_t mems_allowed; 1271 nodemask_t mems_allowed;
@@ -1486,7 +1504,7 @@ static inline void put_task_struct(struct task_struct *t)
1486#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1504#define PF_KSWAPD 0x00040000 /* I am kswapd */
1487#define PF_SWAPOFF 0x00080000 /* I am in swapoff */ 1505#define PF_SWAPOFF 0x00080000 /* I am in swapoff */
1488#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1506#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1489#define PF_BORROWED_MM 0x00200000 /* I am a kthread doing use_mm */ 1507#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1490#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1508#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1491#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1509#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1492#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ 1510#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
@@ -1705,19 +1723,13 @@ extern struct pid_namespace init_pid_ns;
1705 * finds a task by its pid in the specified namespace 1723 * finds a task by its pid in the specified namespace
1706 * find_task_by_vpid(): 1724 * find_task_by_vpid():
1707 * finds a task by its virtual pid 1725 * finds a task by its virtual pid
1708 * find_task_by_pid():
1709 * finds a task by its global pid
1710 * 1726 *
1711 * see also find_pid() etc in include/linux/pid.h 1727 * see also find_vpid() etc in include/linux/pid.h
1712 */ 1728 */
1713 1729
1714extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, 1730extern struct task_struct *find_task_by_pid_type_ns(int type, int pid,
1715 struct pid_namespace *ns); 1731 struct pid_namespace *ns);
1716 1732
1717static inline struct task_struct *__deprecated find_task_by_pid(pid_t nr)
1718{
1719 return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns);
1720}
1721extern struct task_struct *find_task_by_vpid(pid_t nr); 1733extern struct task_struct *find_task_by_vpid(pid_t nr);
1722extern struct task_struct *find_task_by_pid_ns(pid_t nr, 1734extern struct task_struct *find_task_by_pid_ns(pid_t nr,
1723 struct pid_namespace *ns); 1735 struct pid_namespace *ns);
@@ -1790,7 +1802,6 @@ extern void force_sig(int, struct task_struct *);
1790extern void force_sig_specific(int, struct task_struct *); 1802extern void force_sig_specific(int, struct task_struct *);
1791extern int send_sig(int, struct task_struct *, int); 1803extern int send_sig(int, struct task_struct *, int);
1792extern void zap_other_threads(struct task_struct *p); 1804extern void zap_other_threads(struct task_struct *p);
1793extern int kill_proc(pid_t, int, int);
1794extern struct sigqueue *sigqueue_alloc(void); 1805extern struct sigqueue *sigqueue_alloc(void);
1795extern void sigqueue_free(struct sigqueue *); 1806extern void sigqueue_free(struct sigqueue *);
1796extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 1807extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
@@ -1973,6 +1984,13 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
1973 1984
1974#endif 1985#endif
1975 1986
1987static inline int object_is_on_stack(void *obj)
1988{
1989 void *stack = task_stack_page(current);
1990
1991 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
1992}
1993
1976extern void thread_info_cache_init(void); 1994extern void thread_info_cache_init(void);
1977 1995
1978/* set thread flags in other task's structures 1996/* set thread flags in other task's structures
@@ -2037,9 +2055,6 @@ static inline int signal_pending_state(long state, struct task_struct *p)
2037 if (!signal_pending(p)) 2055 if (!signal_pending(p))
2038 return 0; 2056 return 0;
2039 2057
2040 if (state & (__TASK_STOPPED | __TASK_TRACED))
2041 return 0;
2042
2043 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); 2058 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2044} 2059}
2045 2060
diff --git a/include/linux/security.h b/include/linux/security.h
index 31c8851ec5d0..f0e9adb22ac2 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -102,9 +102,7 @@ extern unsigned long mmap_min_addr;
102#define LSM_SETID_FS 8 102#define LSM_SETID_FS 8
103 103
104/* forward declares to avoid warnings */ 104/* forward declares to avoid warnings */
105struct nfsctl_arg;
106struct sched_param; 105struct sched_param;
107struct swap_info_struct;
108struct request_sock; 106struct request_sock;
109 107
110/* bprm_apply_creds unsafe reasons */ 108/* bprm_apply_creds unsafe reasons */
diff --git a/include/linux/sem.h b/include/linux/sem.h
index c8eaad9e4b72..1b191c176bcd 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -78,6 +78,7 @@ struct seminfo {
78 78
79#ifdef __KERNEL__ 79#ifdef __KERNEL__
80#include <asm/atomic.h> 80#include <asm/atomic.h>
81#include <linux/rcupdate.h>
81 82
82struct task_struct; 83struct task_struct;
83 84
@@ -93,23 +94,19 @@ struct sem_array {
93 time_t sem_otime; /* last semop time */ 94 time_t sem_otime; /* last semop time */
94 time_t sem_ctime; /* last change time */ 95 time_t sem_ctime; /* last change time */
95 struct sem *sem_base; /* ptr to first semaphore in array */ 96 struct sem *sem_base; /* ptr to first semaphore in array */
96 struct sem_queue *sem_pending; /* pending operations to be processed */ 97 struct list_head sem_pending; /* pending operations to be processed */
97 struct sem_queue **sem_pending_last; /* last pending operation */ 98 struct list_head list_id; /* undo requests on this array */
98 struct sem_undo *undo; /* undo requests on this array */
99 unsigned long sem_nsems; /* no. of semaphores in array */ 99 unsigned long sem_nsems; /* no. of semaphores in array */
100}; 100};
101 101
102/* One queue for each sleeping process in the system. */ 102/* One queue for each sleeping process in the system. */
103struct sem_queue { 103struct sem_queue {
104 struct sem_queue * next; /* next entry in the queue */ 104 struct list_head list; /* queue of pending operations */
105 struct sem_queue ** prev; /* previous entry in the queue, *(q->prev) == q */ 105 struct task_struct *sleeper; /* this process */
106 struct task_struct* sleeper; /* this process */ 106 struct sem_undo *undo; /* undo structure */
107 struct sem_undo * undo; /* undo structure */
108 int pid; /* process id of requesting process */ 107 int pid; /* process id of requesting process */
109 int status; /* completion status of operation */ 108 int status; /* completion status of operation */
110 struct sem_array * sma; /* semaphore array for operations */ 109 struct sembuf *sops; /* array of pending operations */
111 int id; /* internal sem id */
112 struct sembuf * sops; /* array of pending operations */
113 int nsops; /* number of operations */ 110 int nsops; /* number of operations */
114 int alter; /* does the operation alter the array? */ 111 int alter; /* does the operation alter the array? */
115}; 112};
@@ -118,8 +115,11 @@ struct sem_queue {
118 * when the process exits. 115 * when the process exits.
119 */ 116 */
120struct sem_undo { 117struct sem_undo {
121 struct sem_undo * proc_next; /* next entry on this process */ 118 struct list_head list_proc; /* per-process list: all undos from one process. */
122 struct sem_undo * id_next; /* next entry on this semaphore set */ 119 /* rcu protected */
120 struct rcu_head rcu; /* rcu struct for sem_undo() */
121 struct sem_undo_list *ulp; /* sem_undo_list for the process */
122 struct list_head list_id; /* per semaphore array list: all undos for one array */
123 int semid; /* semaphore set identifier */ 123 int semid; /* semaphore set identifier */
124 short * semadj; /* array of adjustments, one per semaphore */ 124 short * semadj; /* array of adjustments, one per semaphore */
125}; 125};
@@ -128,9 +128,9 @@ struct sem_undo {
128 * that may be shared among all a CLONE_SYSVSEM task group. 128 * that may be shared among all a CLONE_SYSVSEM task group.
129 */ 129 */
130struct sem_undo_list { 130struct sem_undo_list {
131 atomic_t refcnt; 131 atomic_t refcnt;
132 spinlock_t lock; 132 spinlock_t lock;
133 struct sem_undo *proc_list; 133 struct list_head list_proc;
134}; 134};
135 135
136struct sysv_sem { 136struct sysv_sem {
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 9cae64b00d6b..7415839ac890 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -26,10 +26,8 @@ struct semaphore {
26 .wait_list = LIST_HEAD_INIT((name).wait_list), \ 26 .wait_list = LIST_HEAD_INIT((name).wait_list), \
27} 27}
28 28
29#define __DECLARE_SEMAPHORE_GENERIC(name, count) \ 29#define DECLARE_MUTEX(name) \
30 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) 30 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
31
32#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
33 31
34static inline void sema_init(struct semaphore *sem, int val) 32static inline void sema_init(struct semaphore *sem, int val)
35{ 33{
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index f3a1c0e45021..3b2f6c04855e 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -59,9 +59,6 @@
59#define PORT_SUNZILOG 38 59#define PORT_SUNZILOG 38
60#define PORT_SUNSAB 39 60#define PORT_SUNSAB 39
61 61
62/* NEC v850. */
63#define PORT_V850E_UART 40
64
65/* DEC */ 62/* DEC */
66#define PORT_DZ 46 63#define PORT_DZ 46
67#define PORT_ZS 47 64#define PORT_ZS 47
diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h
index ea037f28df91..bef0c46d4713 100644
--- a/include/linux/signalfd.h
+++ b/include/linux/signalfd.h
@@ -8,6 +8,12 @@
8#ifndef _LINUX_SIGNALFD_H 8#ifndef _LINUX_SIGNALFD_H
9#define _LINUX_SIGNALFD_H 9#define _LINUX_SIGNALFD_H
10 10
11/* For O_CLOEXEC and O_NONBLOCK */
12#include <linux/fcntl.h>
13
14/* Flags for signalfd4. */
15#define SFD_CLOEXEC O_CLOEXEC
16#define SFD_NONBLOCK O_NONBLOCK
11 17
12struct signalfd_siginfo { 18struct signalfd_siginfo {
13 __u32 ssi_signo; 19 __u32 ssi_signo;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9aa90a6f20e0..41103910f8a2 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -180,7 +180,7 @@ size_t ksize(const void *);
180 */ 180 */
181static inline void *kcalloc(size_t n, size_t size, gfp_t flags) 181static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
182{ 182{
183 if (n != 0 && size > ULONG_MAX / n) 183 if (size != 0 && n > ULONG_MAX / size)
184 return NULL; 184 return NULL;
185 return __kmalloc(n * size, flags | __GFP_ZERO); 185 return __kmalloc(n * size, flags | __GFP_ZERO);
186} 186}
diff --git a/include/linux/sm501.h b/include/linux/sm501.h
index 95c1c39ba445..214f93209b8c 100644
--- a/include/linux/sm501.h
+++ b/include/linux/sm501.h
@@ -46,24 +46,6 @@ extern unsigned long sm501_modify_reg(struct device *dev,
46 unsigned long set, 46 unsigned long set,
47 unsigned long clear); 47 unsigned long clear);
48 48
49/* sm501_gpio_set
50 *
51 * set the state of the given GPIO line
52*/
53
54extern void sm501_gpio_set(struct device *dev,
55 unsigned long gpio,
56 unsigned int to,
57 unsigned int dir);
58
59/* sm501_gpio_get
60 *
61 * get the state of the given GPIO line
62*/
63
64extern unsigned long sm501_gpio_get(struct device *dev,
65 unsigned long gpio);
66
67 49
68/* Platform data definitions */ 50/* Platform data definitions */
69 51
@@ -73,6 +55,8 @@ extern unsigned long sm501_gpio_get(struct device *dev,
73#define SM501FB_FLAG_USE_HWACCEL (1<<3) 55#define SM501FB_FLAG_USE_HWACCEL (1<<3)
74#define SM501FB_FLAG_PANEL_NO_FPEN (1<<4) 56#define SM501FB_FLAG_PANEL_NO_FPEN (1<<4)
75#define SM501FB_FLAG_PANEL_NO_VBIASEN (1<<5) 57#define SM501FB_FLAG_PANEL_NO_VBIASEN (1<<5)
58#define SM501FB_FLAG_PANEL_INV_FPEN (1<<6)
59#define SM501FB_FLAG_PANEL_INV_VBIASEN (1<<7)
76 60
77struct sm501_platdata_fbsub { 61struct sm501_platdata_fbsub {
78 struct fb_videomode *def_mode; 62 struct fb_videomode *def_mode;
@@ -102,11 +86,19 @@ struct sm501_platdata_fb {
102 struct sm501_platdata_fbsub *fb_pnl; 86 struct sm501_platdata_fbsub *fb_pnl;
103}; 87};
104 88
105/* gpio i2c */ 89/* gpio i2c
90 *
91 * Note, we have to pass in the bus number, as the number used will be
92 * passed to the i2c-gpio driver's platform_device.id, subsequently used
93 * to register the i2c bus.
94*/
106 95
107struct sm501_platdata_gpio_i2c { 96struct sm501_platdata_gpio_i2c {
97 unsigned int bus_num;
108 unsigned int pin_sda; 98 unsigned int pin_sda;
109 unsigned int pin_scl; 99 unsigned int pin_scl;
100 int udelay;
101 int timeout;
110}; 102};
111 103
112/* sm501_initdata 104/* sm501_initdata
@@ -129,6 +121,7 @@ struct sm501_reg_init {
129#define SM501_USE_FBACCEL (1<<6) 121#define SM501_USE_FBACCEL (1<<6)
130#define SM501_USE_AC97 (1<<7) 122#define SM501_USE_AC97 (1<<7)
131#define SM501_USE_I2S (1<<8) 123#define SM501_USE_I2S (1<<8)
124#define SM501_USE_GPIO (1<<9)
132 125
133#define SM501_USE_ALL (0xffffffff) 126#define SM501_USE_ALL (0xffffffff)
134 127
@@ -155,6 +148,8 @@ struct sm501_init_gpio {
155 struct sm501_reg_init gpio_ddr_high; 148 struct sm501_reg_init gpio_ddr_high;
156}; 149};
157 150
151#define SM501_FLAG_SUSPEND_OFF (1<<4)
152
158/* sm501_platdata 153/* sm501_platdata
159 * 154 *
160 * This is passed with the platform device to allow the board 155 * This is passed with the platform device to allow the board
@@ -168,6 +163,12 @@ struct sm501_platdata {
168 struct sm501_init_gpio *init_gpiop; 163 struct sm501_init_gpio *init_gpiop;
169 struct sm501_platdata_fb *fb; 164 struct sm501_platdata_fb *fb;
170 165
166 int flags;
167 int gpio_base;
168
169 int (*get_power)(struct device *dev);
170 int (*set_power)(struct device *dev, unsigned int on);
171
171 struct sm501_platdata_gpio_i2c *gpio_i2c; 172 struct sm501_platdata_gpio_i2c *gpio_i2c;
172 unsigned int gpio_i2c_nr; 173 unsigned int gpio_i2c_nr;
173}; 174};
diff --git a/include/linux/smb_fs.h b/include/linux/smb_fs.h
index 2c5cd55f44ff..923cd8a247b1 100644
--- a/include/linux/smb_fs.h
+++ b/include/linux/smb_fs.h
@@ -43,18 +43,13 @@ static inline struct smb_inode_info *SMB_I(struct inode *inode)
43} 43}
44 44
45/* macro names are short for word, double-word, long value (?) */ 45/* macro names are short for word, double-word, long value (?) */
46#define WVAL(buf,pos) \ 46#define WVAL(buf, pos) (get_unaligned_le16((u8 *)(buf) + (pos)))
47 (le16_to_cpu(get_unaligned((__le16 *)((u8 *)(buf) + (pos))))) 47#define DVAL(buf, pos) (get_unaligned_le32((u8 *)(buf) + (pos)))
48#define DVAL(buf,pos) \ 48#define LVAL(buf, pos) (get_unaligned_le64((u8 *)(buf) + (pos)))
49 (le32_to_cpu(get_unaligned((__le32 *)((u8 *)(buf) + (pos))))) 49
50#define LVAL(buf,pos) \ 50#define WSET(buf, pos, val) put_unaligned_le16((val), (u8 *)(buf) + (pos))
51 (le64_to_cpu(get_unaligned((__le64 *)((u8 *)(buf) + (pos))))) 51#define DSET(buf, pos, val) put_unaligned_le32((val), (u8 *)(buf) + (pos))
52#define WSET(buf,pos,val) \ 52#define LSET(buf, pos, val) put_unaligned_le64((val), (u8 *)(buf) + (pos))
53 put_unaligned(cpu_to_le16((u16)(val)), (__le16 *)((u8 *)(buf) + (pos)))
54#define DSET(buf,pos,val) \
55 put_unaligned(cpu_to_le32((u32)(val)), (__le32 *)((u8 *)(buf) + (pos)))
56#define LSET(buf,pos,val) \
57 put_unaligned(cpu_to_le64((u64)(val)), (__le64 *)((u8 *)(buf) + (pos)))
58 53
59/* where to find the base of the SMB packet proper */ 54/* where to find the base of the SMB packet proper */
60#define smb_base(buf) ((u8 *)(((u8 *)(buf))+4)) 55#define smb_base(buf) ((u8 *)(((u8 *)(buf))+4))
diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h
index 8e0556b8781c..3827b922ba1f 100644
--- a/include/linux/smc91x.h
+++ b/include/linux/smc91x.h
@@ -5,9 +5,19 @@
5#define SMC91X_USE_16BIT (1 << 1) 5#define SMC91X_USE_16BIT (1 << 1)
6#define SMC91X_USE_32BIT (1 << 2) 6#define SMC91X_USE_32BIT (1 << 2)
7 7
8#define SMC91X_NOWAIT (1 << 3)
9
10/* two bits for IO_SHIFT, let's hope later designs will keep this sane */
11#define SMC91X_IO_SHIFT_0 (0 << 4)
12#define SMC91X_IO_SHIFT_1 (1 << 4)
13#define SMC91X_IO_SHIFT_2 (2 << 4)
14#define SMC91X_IO_SHIFT_3 (3 << 4)
15#define SMC91X_IO_SHIFT(x) (((x) >> 4) & 0x3)
16
17#define SMC91X_USE_DMA (1 << 6)
18
8struct smc91x_platdata { 19struct smc91x_platdata {
9 unsigned long flags; 20 unsigned long flags;
10 unsigned long irq_flags; /* IRQF_... */
11}; 21};
12 22
13#endif /* __SMC91X_H__ */ 23#endif /* __SMC91X_H__ */
diff --git a/include/linux/spi/ds1305.h b/include/linux/spi/ds1305.h
new file mode 100644
index 000000000000..287ec830eab7
--- /dev/null
+++ b/include/linux/spi/ds1305.h
@@ -0,0 +1,35 @@
1#ifndef __LINUX_SPI_DS1305_H
2#define __LINUX_SPI_DS1305_H
3
4/*
5 * One-time configuration for ds1305 and ds1306 RTC chips.
6 *
7 * Put a pointer to this in spi_board_info.platform_data if you want to
8 * be sure that Linux (re)initializes this as needed ... after losing
9 * backup power, and potentially on the first boot.
10 */
11struct ds1305_platform_data {
12
13 /* Trickle charge configuration: it's OK to leave out the MAGIC
14 * bitmask; mask in either DS1 or DS2, and then one of 2K/4k/8K.
15 */
16#define DS1305_TRICKLE_MAGIC 0xa0
17#define DS1305_TRICKLE_DS2 0x08 /* two diodes */
18#define DS1305_TRICKLE_DS1 0x04 /* one diode */
19#define DS1305_TRICKLE_2K 0x01 /* 2 KOhm resistance */
20#define DS1305_TRICKLE_4K 0x02 /* 4 KOhm resistance */
21#define DS1305_TRICKLE_8K 0x03 /* 8 KOhm resistance */
22 u8 trickle;
23
24 /* set only on ds1306 parts */
25 bool is_ds1306;
26
27 /* ds1306 only: enable 1 Hz output */
28 bool en_1hz;
29
30 /* REVISIT: the driver currently expects nINT0 to be wired
31 * as the alarm IRQ. ALM1 may also need to be set up ...
32 */
33};
34
35#endif /* __LINUX_SPI_DS1305_H */
diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h
index 835ddf47d45c..22ef107d7704 100644
--- a/include/linux/spi/mcp23s08.h
+++ b/include/linux/spi/mcp23s08.h
@@ -1,18 +1,25 @@
1 1
2/* FIXME driver should be able to handle all four slaves that 2/* FIXME driver should be able to handle IRQs... */
3 * can be hooked up to each chipselect, as well as IRQs... 3
4 */ 4struct mcp23s08_chip_info {
5 bool is_present; /* true iff populated */
6 u8 pullups; /* BIT(x) means enable pullup x */
7};
5 8
6struct mcp23s08_platform_data { 9struct mcp23s08_platform_data {
7 /* four slaves can share one SPI chipselect */ 10 /* Four slaves (numbered 0..3) can share one SPI chipselect, and
8 u8 slave; 11 * will provide 8..32 GPIOs using 1..4 gpio_chip instances.
12 */
13 struct mcp23s08_chip_info chip[4];
9 14
10 /* number assigned to the first GPIO */ 15 /* "base" is the number of the first GPIO. Dynamic assignment is
16 * not currently supported, and even if there are gaps in chip
17 * addressing the GPIO numbers are sequential .. so for example
18 * if only slaves 0 and 3 are present, their GPIOs range from
19 * base to base+15.
20 */
11 unsigned base; 21 unsigned base;
12 22
13 /* pins with pullups */
14 u8 pullups;
15
16 void *context; /* param to setup/teardown */ 23 void *context; /* param to setup/teardown */
17 24
18 int (*setup)(struct spi_device *spi, 25 int (*setup)(struct spi_device *spi,
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index b9a76c972084..a9cc29d46653 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -82,7 +82,7 @@ struct spi_device {
82 int irq; 82 int irq;
83 void *controller_state; 83 void *controller_state;
84 void *controller_data; 84 void *controller_data;
85 const char *modalias; 85 char modalias[32];
86 86
87 /* 87 /*
88 * likely need more hooks for more protocol options affecting how 88 * likely need more hooks for more protocol options affecting how
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index d311a090fae7..61e5610ad165 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -46,6 +46,7 @@
46 * linux/spinlock.h: builds the final spin_*() APIs. 46 * linux/spinlock.h: builds the final spin_*() APIs.
47 */ 47 */
48 48
49#include <linux/typecheck.h>
49#include <linux/preempt.h> 50#include <linux/preempt.h>
50#include <linux/linkage.h> 51#include <linux/linkage.h>
51#include <linux/compiler.h> 52#include <linux/compiler.h>
@@ -191,23 +192,53 @@ do { \
191 192
192#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 193#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
193 194
194#define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) 195#define spin_lock_irqsave(lock, flags) \
195#define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) 196 do { \
196#define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) 197 typecheck(unsigned long, flags); \
198 flags = _spin_lock_irqsave(lock); \
199 } while (0)
200#define read_lock_irqsave(lock, flags) \
201 do { \
202 typecheck(unsigned long, flags); \
203 flags = _read_lock_irqsave(lock); \
204 } while (0)
205#define write_lock_irqsave(lock, flags) \
206 do { \
207 typecheck(unsigned long, flags); \
208 flags = _write_lock_irqsave(lock); \
209 } while (0)
197 210
198#ifdef CONFIG_DEBUG_LOCK_ALLOC 211#ifdef CONFIG_DEBUG_LOCK_ALLOC
199#define spin_lock_irqsave_nested(lock, flags, subclass) \ 212#define spin_lock_irqsave_nested(lock, flags, subclass) \
200 flags = _spin_lock_irqsave_nested(lock, subclass) 213 do { \
214 typecheck(unsigned long, flags); \
215 flags = _spin_lock_irqsave_nested(lock, subclass); \
216 } while (0)
201#else 217#else
202#define spin_lock_irqsave_nested(lock, flags, subclass) \ 218#define spin_lock_irqsave_nested(lock, flags, subclass) \
203 flags = _spin_lock_irqsave(lock) 219 do { \
220 typecheck(unsigned long, flags); \
221 flags = _spin_lock_irqsave(lock); \
222 } while (0)
204#endif 223#endif
205 224
206#else 225#else
207 226
208#define spin_lock_irqsave(lock, flags) _spin_lock_irqsave(lock, flags) 227#define spin_lock_irqsave(lock, flags) \
209#define read_lock_irqsave(lock, flags) _read_lock_irqsave(lock, flags) 228 do { \
210#define write_lock_irqsave(lock, flags) _write_lock_irqsave(lock, flags) 229 typecheck(unsigned long, flags); \
230 _spin_lock_irqsave(lock, flags); \
231 } while (0)
232#define read_lock_irqsave(lock, flags) \
233 do { \
234 typecheck(unsigned long, flags); \
235 _read_lock_irqsave(lock, flags); \
236 } while (0)
237#define write_lock_irqsave(lock, flags) \
238 do { \
239 typecheck(unsigned long, flags); \
240 _write_lock_irqsave(lock, flags); \
241 } while (0)
211#define spin_lock_irqsave_nested(lock, flags, subclass) \ 242#define spin_lock_irqsave_nested(lock, flags, subclass) \
212 spin_lock_irqsave(lock, flags) 243 spin_lock_irqsave(lock, flags)
213 244
@@ -260,16 +291,25 @@ do { \
260} while (0) 291} while (0)
261#endif 292#endif
262 293
263#define spin_unlock_irqrestore(lock, flags) \ 294#define spin_unlock_irqrestore(lock, flags) \
264 _spin_unlock_irqrestore(lock, flags) 295 do { \
296 typecheck(unsigned long, flags); \
297 _spin_unlock_irqrestore(lock, flags); \
298 } while (0)
265#define spin_unlock_bh(lock) _spin_unlock_bh(lock) 299#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
266 300
267#define read_unlock_irqrestore(lock, flags) \ 301#define read_unlock_irqrestore(lock, flags) \
268 _read_unlock_irqrestore(lock, flags) 302 do { \
303 typecheck(unsigned long, flags); \
304 _read_unlock_irqrestore(lock, flags); \
305 } while (0)
269#define read_unlock_bh(lock) _read_unlock_bh(lock) 306#define read_unlock_bh(lock) _read_unlock_bh(lock)
270 307
271#define write_unlock_irqrestore(lock, flags) \ 308#define write_unlock_irqrestore(lock, flags) \
272 _write_unlock_irqrestore(lock, flags) 309 do { \
310 typecheck(unsigned long, flags); \
311 _write_unlock_irqrestore(lock, flags); \
312 } while (0)
273#define write_unlock_bh(lock) _write_unlock_bh(lock) 313#define write_unlock_bh(lock) _write_unlock_bh(lock)
274 314
275#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) 315#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
diff --git a/include/linux/string.h b/include/linux/string.h
index efdc44593b52..810d80df0a1d 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -111,5 +111,8 @@ extern void argv_free(char **argv);
111 111
112extern bool sysfs_streq(const char *s1, const char *s2); 112extern bool sysfs_streq(const char *s1, const char *s2);
113 113
114extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
115 const void *from, size_t available);
116
114#endif 117#endif
115#endif /* _LINUX_STRING_H_ */ 118#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 0522f368f9d7..d6ff145919ca 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -305,6 +305,7 @@ asmlinkage long sys_fcntl64(unsigned int fd,
305#endif 305#endif
306asmlinkage long sys_dup(unsigned int fildes); 306asmlinkage long sys_dup(unsigned int fildes);
307asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd); 307asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd);
308asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags);
308asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on); 309asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on);
309asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, 310asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd,
310 unsigned long arg); 311 unsigned long arg);
@@ -409,6 +410,8 @@ asmlinkage long sys_getsockopt(int fd, int level, int optname,
409asmlinkage long sys_bind(int, struct sockaddr __user *, int); 410asmlinkage long sys_bind(int, struct sockaddr __user *, int);
410asmlinkage long sys_connect(int, struct sockaddr __user *, int); 411asmlinkage long sys_connect(int, struct sockaddr __user *, int);
411asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *); 412asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *);
413asmlinkage long sys_paccept(int, struct sockaddr __user *, int __user *,
414 const __user sigset_t *, size_t, int);
412asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *); 415asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
413asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); 416asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
414asmlinkage long sys_send(int, void __user *, size_t, unsigned); 417asmlinkage long sys_send(int, void __user *, size_t, unsigned);
@@ -428,6 +431,7 @@ asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
428asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, 431asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
429 fd_set __user *exp, struct timeval __user *tvp); 432 fd_set __user *exp, struct timeval __user *tvp);
430asmlinkage long sys_epoll_create(int size); 433asmlinkage long sys_epoll_create(int size);
434asmlinkage long sys_epoll_create1(int flags);
431asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, 435asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
432 struct epoll_event __user *event); 436 struct epoll_event __user *event);
433asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events, 437asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events,
@@ -443,7 +447,7 @@ asmlinkage long sys_newuname(struct new_utsname __user *name);
443 447
444asmlinkage long sys_getrlimit(unsigned int resource, 448asmlinkage long sys_getrlimit(unsigned int resource,
445 struct rlimit __user *rlim); 449 struct rlimit __user *rlim);
446#if defined(COMPAT_RLIM_OLD_INFINITY) || !(defined(CONFIG_IA64) || defined(CONFIG_V850)) 450#if defined(COMPAT_RLIM_OLD_INFINITY) || !(defined(CONFIG_IA64))
447asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim); 451asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim);
448#endif 452#endif
449asmlinkage long sys_setrlimit(unsigned int resource, 453asmlinkage long sys_setrlimit(unsigned int resource,
@@ -543,6 +547,7 @@ asmlinkage long sys_get_mempolicy(int __user *policy,
543 unsigned long addr, unsigned long flags); 547 unsigned long addr, unsigned long flags);
544 548
545asmlinkage long sys_inotify_init(void); 549asmlinkage long sys_inotify_init(void);
550asmlinkage long sys_inotify_init1(int flags);
546asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, 551asmlinkage long sys_inotify_add_watch(int fd, const char __user *path,
547 u32 mask); 552 u32 mask);
548asmlinkage long sys_inotify_rm_watch(int fd, u32 wd); 553asmlinkage long sys_inotify_rm_watch(int fd, u32 wd);
@@ -608,12 +613,14 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
608 size_t len); 613 size_t len);
609asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache); 614asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache);
610asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask); 615asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask);
616asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, int flags);
611asmlinkage long sys_timerfd_create(int clockid, int flags); 617asmlinkage long sys_timerfd_create(int clockid, int flags);
612asmlinkage long sys_timerfd_settime(int ufd, int flags, 618asmlinkage long sys_timerfd_settime(int ufd, int flags,
613 const struct itimerspec __user *utmr, 619 const struct itimerspec __user *utmr,
614 struct itimerspec __user *otmr); 620 struct itimerspec __user *otmr);
615asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr); 621asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
616asmlinkage long sys_eventfd(unsigned int count); 622asmlinkage long sys_eventfd(unsigned int count);
623asmlinkage long sys_eventfd2(unsigned int count, int flags);
617asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); 624asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
618 625
619int kernel_execve(const char *filename, char *const argv[], char *const envp[]); 626int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h
index 5d69c0744fff..18269e956a71 100644
--- a/include/linux/taskstats.h
+++ b/include/linux/taskstats.h
@@ -31,7 +31,7 @@
31 */ 31 */
32 32
33 33
34#define TASKSTATS_VERSION 6 34#define TASKSTATS_VERSION 7
35#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN 35#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
36 * in linux/sched.h */ 36 * in linux/sched.h */
37 37
@@ -157,6 +157,10 @@ struct taskstats {
157 __u64 ac_utimescaled; /* utime scaled on frequency etc */ 157 __u64 ac_utimescaled; /* utime scaled on frequency etc */
158 __u64 ac_stimescaled; /* stime scaled on frequency etc */ 158 __u64 ac_stimescaled; /* stime scaled on frequency etc */
159 __u64 cpu_scaled_run_real_total; /* scaled cpu_run_real_total */ 159 __u64 cpu_scaled_run_real_total; /* scaled cpu_run_real_total */
160
161 /* Delay waiting for memory reclaim */
162 __u64 freepages_count;
163 __u64 freepages_delay_total;
160}; 164};
161 165
162 166
diff --git a/include/linux/tick.h b/include/linux/tick.h
index a881c652f7e9..d3c02695dc5d 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -49,6 +49,7 @@ struct tick_sched {
49 unsigned long check_clocks; 49 unsigned long check_clocks;
50 enum tick_nohz_mode nohz_mode; 50 enum tick_nohz_mode nohz_mode;
51 ktime_t idle_tick; 51 ktime_t idle_tick;
52 int inidle;
52 int tick_stopped; 53 int tick_stopped;
53 unsigned long idle_jiffies; 54 unsigned long idle_jiffies;
54 unsigned long idle_calls; 55 unsigned long idle_calls;
@@ -105,14 +106,14 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
105#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 106#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
106 107
107# ifdef CONFIG_NO_HZ 108# ifdef CONFIG_NO_HZ
108extern void tick_nohz_stop_sched_tick(void); 109extern void tick_nohz_stop_sched_tick(int inidle);
109extern void tick_nohz_restart_sched_tick(void); 110extern void tick_nohz_restart_sched_tick(void);
110extern void tick_nohz_update_jiffies(void); 111extern void tick_nohz_update_jiffies(void);
111extern ktime_t tick_nohz_get_sleep_length(void); 112extern ktime_t tick_nohz_get_sleep_length(void);
112extern void tick_nohz_stop_idle(int cpu); 113extern void tick_nohz_stop_idle(int cpu);
113extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 114extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
114# else 115# else
115static inline void tick_nohz_stop_sched_tick(void) { } 116static inline void tick_nohz_stop_sched_tick(int inidle) { }
116static inline void tick_nohz_restart_sched_tick(void) { } 117static inline void tick_nohz_restart_sched_tick(void) { }
117static inline void tick_nohz_update_jiffies(void) { } 118static inline void tick_nohz_update_jiffies(void) { }
118static inline ktime_t tick_nohz_get_sleep_length(void) 119static inline ktime_t tick_nohz_get_sleep_length(void)
diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h
index cf2b10d75731..86cb0501d3e2 100644
--- a/include/linux/timerfd.h
+++ b/include/linux/timerfd.h
@@ -8,9 +8,15 @@
8#ifndef _LINUX_TIMERFD_H 8#ifndef _LINUX_TIMERFD_H
9#define _LINUX_TIMERFD_H 9#define _LINUX_TIMERFD_H
10 10
11/* For O_CLOEXEC and O_NONBLOCK */
12#include <linux/fcntl.h>
11 13
14/* Flags for timerfd_settime. */
12#define TFD_TIMER_ABSTIME (1 << 0) 15#define TFD_TIMER_ABSTIME (1 << 0)
13 16
17/* Flags for timerfd_create. */
18#define TFD_CLOEXEC O_CLOEXEC
19#define TFD_NONBLOCK O_NONBLOCK
14 20
15 21
16#endif /* _LINUX_TIMERFD_H */ 22#endif /* _LINUX_TIMERFD_H */
diff --git a/include/linux/typecheck.h b/include/linux/typecheck.h
new file mode 100644
index 000000000000..eb5b74a575be
--- /dev/null
+++ b/include/linux/typecheck.h
@@ -0,0 +1,24 @@
1#ifndef TYPECHECK_H_INCLUDED
2#define TYPECHECK_H_INCLUDED
3
4/*
5 * Check at compile time that something is of a particular type.
6 * Always evaluates to 1 so you may use it easily in comparisons.
7 */
8#define typecheck(type,x) \
9({ type __dummy; \
10 typeof(x) __dummy2; \
11 (void)(&__dummy == &__dummy2); \
12 1; \
13})
14
15/*
16 * Check at compile time that 'function' is a certain type, or is a pointer
17 * to that type (needs to use typedef for the function type.)
18 */
19#define typecheck_fn(type,function) \
20({ typeof(type) __tmp = function; \
21 (void)__tmp; \
22})
23
24#endif /* TYPECHECK_H_INCLUDED */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 747c3a49cdc9..c932390c6da0 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -330,7 +330,7 @@ extern int usb_string_id(struct usb_composite_dev *c);
330 dev_vdbg(&(d)->gadget->dev , fmt , ## args) 330 dev_vdbg(&(d)->gadget->dev , fmt , ## args)
331#define ERROR(d, fmt, args...) \ 331#define ERROR(d, fmt, args...) \
332 dev_err(&(d)->gadget->dev , fmt , ## args) 332 dev_err(&(d)->gadget->dev , fmt , ## args)
333#define WARN(d, fmt, args...) \ 333#define WARNING(d, fmt, args...) \
334 dev_warn(&(d)->gadget->dev , fmt , ## args) 334 dev_warn(&(d)->gadget->dev , fmt , ## args)
335#define INFO(d, fmt, args...) \ 335#define INFO(d, fmt, args...) \
336 dev_info(&(d)->gadget->dev , fmt , ## args) 336 dev_info(&(d)->gadget->dev , fmt , ## args)
diff --git a/include/linux/virtio_9p.h b/include/linux/virtio_9p.h
index 8eff0b53910b..b3c4a60ceeb3 100644
--- a/include/linux/virtio_9p.h
+++ b/include/linux/virtio_9p.h
@@ -1,5 +1,7 @@
1#ifndef _LINUX_VIRTIO_9P_H 1#ifndef _LINUX_VIRTIO_9P_H
2#define _LINUX_VIRTIO_9P_H 2#define _LINUX_VIRTIO_9P_H
3/* This header is BSD licensed so anyone can use the definitions to implement
4 * compatible drivers/servers. */
3#include <linux/virtio_config.h> 5#include <linux/virtio_config.h>
4 6
5/* The ID for virtio console */ 7/* The ID for virtio console */
diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h
index 979524ee75b7..c30c7bfbf39b 100644
--- a/include/linux/virtio_balloon.h
+++ b/include/linux/virtio_balloon.h
@@ -1,5 +1,7 @@
1#ifndef _LINUX_VIRTIO_BALLOON_H 1#ifndef _LINUX_VIRTIO_BALLOON_H
2#define _LINUX_VIRTIO_BALLOON_H 2#define _LINUX_VIRTIO_BALLOON_H
3/* This header is BSD licensed so anyone can use the definitions to implement
4 * compatible drivers/servers. */
3#include <linux/virtio_config.h> 5#include <linux/virtio_config.h>
4 6
5/* The ID for virtio_balloon */ 7/* The ID for virtio_balloon */
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index 5f79a5f9de79..c1aef85243bf 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -1,5 +1,7 @@
1#ifndef _LINUX_VIRTIO_BLK_H 1#ifndef _LINUX_VIRTIO_BLK_H
2#define _LINUX_VIRTIO_BLK_H 2#define _LINUX_VIRTIO_BLK_H
3/* This header is BSD licensed so anyone can use the definitions to implement
4 * compatible drivers/servers. */
3#include <linux/virtio_config.h> 5#include <linux/virtio_config.h>
4 6
5/* The ID for virtio_block */ 7/* The ID for virtio_block */
@@ -11,6 +13,7 @@
11#define VIRTIO_BLK_F_SEG_MAX 2 /* Indicates maximum # of segments */ 13#define VIRTIO_BLK_F_SEG_MAX 2 /* Indicates maximum # of segments */
12#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ 14#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */
13#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ 15#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */
16#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
14 17
15struct virtio_blk_config 18struct virtio_blk_config
16{ 19{
@@ -26,6 +29,8 @@ struct virtio_blk_config
26 __u8 heads; 29 __u8 heads;
27 __u8 sectors; 30 __u8 sectors;
28 } geometry; 31 } geometry;
32 /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
33 __u32 blk_size;
29} __attribute__((packed)); 34} __attribute__((packed));
30 35
31/* These two define direction. */ 36/* These two define direction. */
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index f364bbf63c34..bf8ec283b232 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -1,5 +1,8 @@
1#ifndef _LINUX_VIRTIO_CONFIG_H 1#ifndef _LINUX_VIRTIO_CONFIG_H
2#define _LINUX_VIRTIO_CONFIG_H 2#define _LINUX_VIRTIO_CONFIG_H
3/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
4 * anyone can use the definitions to implement compatible drivers/servers. */
5
3/* Virtio devices use a standardized configuration space to define their 6/* Virtio devices use a standardized configuration space to define their
4 * features and pass configuration information, but each implementation can 7 * features and pass configuration information, but each implementation can
5 * store and access that space differently. */ 8 * store and access that space differently. */
@@ -15,6 +18,12 @@
15/* We've given up on this device. */ 18/* We've given up on this device. */
16#define VIRTIO_CONFIG_S_FAILED 0x80 19#define VIRTIO_CONFIG_S_FAILED 0x80
17 20
21/* Some virtio feature bits (currently bits 28 through 31) are reserved for the
22 * transport being used (eg. virtio_ring), the rest are per-device feature
23 * bits. */
24#define VIRTIO_TRANSPORT_F_START 28
25#define VIRTIO_TRANSPORT_F_END 32
26
18/* Do we get callbacks when the ring is completely used, even if we've 27/* Do we get callbacks when the ring is completely used, even if we've
19 * suppressed them? */ 28 * suppressed them? */
20#define VIRTIO_F_NOTIFY_ON_EMPTY 24 29#define VIRTIO_F_NOTIFY_ON_EMPTY 24
@@ -52,9 +61,10 @@
52 * @get_features: get the array of feature bits for this device. 61 * @get_features: get the array of feature bits for this device.
53 * vdev: the virtio_device 62 * vdev: the virtio_device
54 * Returns the first 32 feature bits (all we currently need). 63 * Returns the first 32 feature bits (all we currently need).
55 * @set_features: confirm what device features we'll be using. 64 * @finalize_features: confirm what device features we'll be using.
56 * vdev: the virtio_device 65 * vdev: the virtio_device
57 * feature: the first 32 feature bits 66 * This gives the final feature bits for the device: it can change
67 * the dev->feature bits if it wants.
58 */ 68 */
59struct virtio_config_ops 69struct virtio_config_ops
60{ 70{
@@ -70,7 +80,7 @@ struct virtio_config_ops
70 void (*callback)(struct virtqueue *)); 80 void (*callback)(struct virtqueue *));
71 void (*del_vq)(struct virtqueue *vq); 81 void (*del_vq)(struct virtqueue *vq);
72 u32 (*get_features)(struct virtio_device *vdev); 82 u32 (*get_features)(struct virtio_device *vdev);
73 void (*set_features)(struct virtio_device *vdev, u32 features); 83 void (*finalize_features)(struct virtio_device *vdev);
74}; 84};
75 85
76/* If driver didn't advertise the feature, it will never appear. */ 86/* If driver didn't advertise the feature, it will never appear. */
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
index ed2d4ead7eb7..19a0da0dba41 100644
--- a/include/linux/virtio_console.h
+++ b/include/linux/virtio_console.h
@@ -1,6 +1,8 @@
1#ifndef _LINUX_VIRTIO_CONSOLE_H 1#ifndef _LINUX_VIRTIO_CONSOLE_H
2#define _LINUX_VIRTIO_CONSOLE_H 2#define _LINUX_VIRTIO_CONSOLE_H
3#include <linux/virtio_config.h> 3#include <linux/virtio_config.h>
4/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
5 * anyone can use the definitions to implement compatible drivers/servers. */
4 6
5/* The ID for virtio console */ 7/* The ID for virtio console */
6#define VIRTIO_ID_CONSOLE 3 8#define VIRTIO_ID_CONSOLE 3
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 38c0571820fb..5e33761b9b8a 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -1,5 +1,7 @@
1#ifndef _LINUX_VIRTIO_NET_H 1#ifndef _LINUX_VIRTIO_NET_H
2#define _LINUX_VIRTIO_NET_H 2#define _LINUX_VIRTIO_NET_H
3/* This header is BSD licensed so anyone can use the definitions to implement
4 * compatible drivers/servers. */
3#include <linux/virtio_config.h> 5#include <linux/virtio_config.h>
4 6
5/* The ID for virtio_net */ 7/* The ID for virtio_net */
diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h
index b3151659cf49..cdef35742932 100644
--- a/include/linux/virtio_pci.h
+++ b/include/linux/virtio_pci.h
@@ -9,9 +9,8 @@
9 * Authors: 9 * Authors:
10 * Anthony Liguori <aliguori@us.ibm.com> 10 * Anthony Liguori <aliguori@us.ibm.com>
11 * 11 *
12 * This work is licensed under the terms of the GNU GPL, version 2 or later. 12 * This header is BSD licensed so anyone can use the definitions to implement
13 * See the COPYING file in the top-level directory. 13 * compatible drivers/servers.
14 *
15 */ 14 */
16 15
17#ifndef _LINUX_VIRTIO_PCI_H 16#ifndef _LINUX_VIRTIO_PCI_H
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index abe481ed990e..c4a598fb3826 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -120,6 +120,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
120 void (*notify)(struct virtqueue *vq), 120 void (*notify)(struct virtqueue *vq),
121 void (*callback)(struct virtqueue *vq)); 121 void (*callback)(struct virtqueue *vq));
122void vring_del_virtqueue(struct virtqueue *vq); 122void vring_del_virtqueue(struct virtqueue *vq);
123/* Filter out transport-specific feature bits. */
124void vring_transport_features(struct virtio_device *vdev);
123 125
124irqreturn_t vring_interrupt(int irq, void *_vq); 126irqreturn_t vring_interrupt(int irq, void *_vq);
125#endif /* __KERNEL__ */ 127#endif /* __KERNEL__ */
diff --git a/include/linux/virtio_rng.h b/include/linux/virtio_rng.h
index 331afb6c9f62..1a85dab8a940 100644
--- a/include/linux/virtio_rng.h
+++ b/include/linux/virtio_rng.h
@@ -1,5 +1,7 @@
1#ifndef _LINUX_VIRTIO_RNG_H 1#ifndef _LINUX_VIRTIO_RNG_H
2#define _LINUX_VIRTIO_RNG_H 2#define _LINUX_VIRTIO_RNG_H
3/* This header is BSD licensed so anyone can use the definitions to implement
4 * compatible drivers/servers. */
3#include <linux/virtio_config.h> 5#include <linux/virtio_config.h>
4 6
5/* The ID for virtio_rng */ 7/* The ID for virtio_rng */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index e83b69346d23..58334d439516 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -44,6 +44,12 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
44 NR_VM_EVENT_ITEMS 44 NR_VM_EVENT_ITEMS
45}; 45};
46 46
47extern const struct seq_operations fragmentation_op;
48extern const struct seq_operations pagetypeinfo_op;
49extern const struct seq_operations zoneinfo_op;
50extern const struct seq_operations vmstat_op;
51extern int sysctl_stat_interval;
52
47#ifdef CONFIG_VM_EVENT_COUNTERS 53#ifdef CONFIG_VM_EVENT_COUNTERS
48/* 54/*
49 * Light weight per cpu counter implementation. 55 * Light weight per cpu counter implementation.
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 9448ffbdcbf6..14c0e91be9b5 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -12,6 +12,7 @@
12#include <linux/mutex.h> 12#include <linux/mutex.h>
13#include <linux/console_struct.h> 13#include <linux/console_struct.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/consolemap.h>
15 16
16/* 17/*
17 * Presently, a lot of graphics programs do not restore the contents of 18 * Presently, a lot of graphics programs do not restore the contents of
@@ -54,6 +55,7 @@ void redraw_screen(struct vc_data *vc, int is_switch);
54struct tty_struct; 55struct tty_struct;
55int tioclinux(struct tty_struct *tty, unsigned long arg); 56int tioclinux(struct tty_struct *tty, unsigned long arg);
56 57
58#ifdef CONFIG_CONSOLE_TRANSLATIONS
57/* consolemap.c */ 59/* consolemap.c */
58 60
59struct unimapinit; 61struct unimapinit;
@@ -71,6 +73,23 @@ void con_free_unimap(struct vc_data *vc);
71void con_protect_unimap(struct vc_data *vc, int rdonly); 73void con_protect_unimap(struct vc_data *vc, int rdonly);
72int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); 74int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc);
73 75
76#define vc_translate(vc, c) ((vc)->vc_translate[(c) | \
77 (vc)->vc_toggle_meta ? 0x80 : 0])
78#else
79#define con_set_trans_old(arg) (0)
80#define con_get_trans_old(arg) (-EINVAL)
81#define con_set_trans_new(arg) (0)
82#define con_get_trans_new(arg) (-EINVAL)
83#define con_clear_unimap(vc, ui) (0)
84#define con_set_unimap(vc, ct, list) (0)
85#define con_set_default_unimap(vc) (0)
86#define con_copy_unimap(d, s) (0)
87#define con_get_unimap(vc, ct, uct, list) (-EINVAL)
88#define con_free_unimap(vc) do { ; } while (0)
89
90#define vc_translate(vc, c) (c)
91#endif
92
74/* vt.c */ 93/* vt.c */
75int vt_waitactive(int vt); 94int vt_waitactive(int vt);
76void change_console(struct vc_data *new_vc); 95void change_console(struct vc_data *new_vc);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 542526c6e8ef..5c158c477ac7 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -179,6 +179,8 @@ __create_workqueue_key(const char *name, int singlethread,
179extern void destroy_workqueue(struct workqueue_struct *wq); 179extern void destroy_workqueue(struct workqueue_struct *wq);
180 180
181extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); 181extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
182extern int queue_work_on(int cpu, struct workqueue_struct *wq,
183 struct work_struct *work);
182extern int queue_delayed_work(struct workqueue_struct *wq, 184extern int queue_delayed_work(struct workqueue_struct *wq,
183 struct delayed_work *work, unsigned long delay); 185 struct delayed_work *work, unsigned long delay);
184extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 186extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
@@ -188,6 +190,7 @@ extern void flush_workqueue(struct workqueue_struct *wq);
188extern void flush_scheduled_work(void); 190extern void flush_scheduled_work(void);
189 191
190extern int schedule_work(struct work_struct *work); 192extern int schedule_work(struct work_struct *work);
193extern int schedule_work_on(int cpu, struct work_struct *work);
191extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); 194extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
192extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, 195extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
193 unsigned long delay); 196 unsigned long delay);
@@ -198,6 +201,8 @@ extern int keventd_up(void);
198extern void init_workqueues(void); 201extern void init_workqueues(void);
199int execute_in_process_context(work_func_t fn, struct execute_work *); 202int execute_in_process_context(work_func_t fn, struct execute_work *);
200 203
204extern int flush_work(struct work_struct *work);
205
201extern int cancel_work_sync(struct work_struct *work); 206extern int cancel_work_sync(struct work_struct *work);
202 207
203/* 208/*
diff --git a/include/mtd/ubi-user.h b/include/mtd/ubi-user.h
index a7421f130cc0..ccdc562e444e 100644
--- a/include/mtd/ubi-user.h
+++ b/include/mtd/ubi-user.h
@@ -58,6 +58,13 @@
58 * device should be used. A &struct ubi_rsvol_req object has to be properly 58 * device should be used. A &struct ubi_rsvol_req object has to be properly
59 * filled and a pointer to it has to be passed to the IOCTL. 59 * filled and a pointer to it has to be passed to the IOCTL.
60 * 60 *
61 * UBI volumes re-name
62 * ~~~~~~~~~~~~~~~~~~~
63 *
64 * To re-name several volumes atomically at one go, the %UBI_IOCRNVOL command
65 * of the UBI character device should be used. A &struct ubi_rnvol_req object
66 * has to be properly filled and a pointer to it has to be passed to the IOCTL.
67 *
61 * UBI volume update 68 * UBI volume update
62 * ~~~~~~~~~~~~~~~~~ 69 * ~~~~~~~~~~~~~~~~~
63 * 70 *
@@ -104,6 +111,8 @@
104#define UBI_IOCRMVOL _IOW(UBI_IOC_MAGIC, 1, int32_t) 111#define UBI_IOCRMVOL _IOW(UBI_IOC_MAGIC, 1, int32_t)
105/* Re-size an UBI volume */ 112/* Re-size an UBI volume */
106#define UBI_IOCRSVOL _IOW(UBI_IOC_MAGIC, 2, struct ubi_rsvol_req) 113#define UBI_IOCRSVOL _IOW(UBI_IOC_MAGIC, 2, struct ubi_rsvol_req)
114/* Re-name volumes */
115#define UBI_IOCRNVOL _IOW(UBI_IOC_MAGIC, 3, struct ubi_rnvol_req)
107 116
108/* IOCTL commands of the UBI control character device */ 117/* IOCTL commands of the UBI control character device */
109 118
@@ -128,6 +137,9 @@
128/* Maximum MTD device name length supported by UBI */ 137/* Maximum MTD device name length supported by UBI */
129#define MAX_UBI_MTD_NAME_LEN 127 138#define MAX_UBI_MTD_NAME_LEN 127
130 139
140/* Maximum amount of UBI volumes that can be re-named at one go */
141#define UBI_MAX_RNVOL 32
142
131/* 143/*
132 * UBI data type hint constants. 144 * UBI data type hint constants.
133 * 145 *
@@ -176,20 +188,20 @@ enum {
176 * it will be 512 in case of a 2KiB page NAND flash with 4 512-byte sub-pages. 188 * it will be 512 in case of a 2KiB page NAND flash with 4 512-byte sub-pages.
177 * 189 *
178 * But in rare cases, if this optimizes things, the VID header may be placed to 190 * But in rare cases, if this optimizes things, the VID header may be placed to
179 * a different offset. For example, the boot-loader might do things faster if the 191 * a different offset. For example, the boot-loader might do things faster if
180 * VID header sits at the end of the first 2KiB NAND page with 4 sub-pages. As 192 * the VID header sits at the end of the first 2KiB NAND page with 4 sub-pages.
181 * the boot-loader would not normally need to read EC headers (unless it needs 193 * As the boot-loader would not normally need to read EC headers (unless it
182 * UBI in RW mode), it might be faster to calculate ECC. This is weird example, 194 * needs UBI in RW mode), it might be faster to calculate ECC. This is weird
183 * but it real-life example. So, in this example, @vid_hdr_offer would be 195 * example, but it real-life example. So, in this example, @vid_hdr_offer would
184 * 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes 196 * be 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes
185 * aligned, which is OK, as UBI is clever enough to realize this is 4th sub-page 197 * aligned, which is OK, as UBI is clever enough to realize this is 4th
186 * of the first page and add needed padding. 198 * sub-page of the first page and add needed padding.
187 */ 199 */
188struct ubi_attach_req { 200struct ubi_attach_req {
189 int32_t ubi_num; 201 int32_t ubi_num;
190 int32_t mtd_num; 202 int32_t mtd_num;
191 int32_t vid_hdr_offset; 203 int32_t vid_hdr_offset;
192 uint8_t padding[12]; 204 int8_t padding[12];
193}; 205};
194 206
195/** 207/**
@@ -251,6 +263,48 @@ struct ubi_rsvol_req {
251} __attribute__ ((packed)); 263} __attribute__ ((packed));
252 264
253/** 265/**
266 * struct ubi_rnvol_req - volumes re-name request.
267 * @count: count of volumes to re-name
268 * @padding1: reserved for future, not used, has to be zeroed
269 * @vol_id: ID of the volume to re-name
270 * @name_len: name length
271 * @padding2: reserved for future, not used, has to be zeroed
272 * @name: new volume name
273 *
274 * UBI allows to re-name up to %32 volumes at one go. The count of volumes to
275 * re-name is specified in the @count field. The ID of the volumes to re-name
276 * and the new names are specified in the @vol_id and @name fields.
277 *
278 * The UBI volume re-name operation is atomic, which means that should power cut
279 * happen, the volumes will have either old name or new name. So the possible
280 * use-cases of this command is atomic upgrade. Indeed, to upgrade, say, volumes
281 * A and B one may create temporary volumes %A1 and %B1 with the new contents,
282 * then atomically re-name A1->A and B1->B, in which case old %A and %B will
283 * be removed.
284 *
285 * If it is not desirable to remove old A and B, the re-name request has to
286 * contain 4 entries: A1->A, A->A1, B1->B, B->B1, in which case old A1 and B1
287 * become A and B, and old A and B will become A1 and B1.
288 *
289 * It is also OK to request: A1->A, A1->X, B1->B, B->Y, in which case old A1
290 * and B1 become A and B, and old A and B become X and Y.
291 *
292 * In other words, in case of re-naming into an existing volume name, the
293 * existing volume is removed, unless it is re-named as well at the same
294 * re-name request.
295 */
296struct ubi_rnvol_req {
297 int32_t count;
298 int8_t padding1[12];
299 struct {
300 int32_t vol_id;
301 int16_t name_len;
302 int8_t padding2[2];
303 char name[UBI_MAX_VOLUME_NAME + 1];
304 } ents[UBI_MAX_RNVOL];
305} __attribute__ ((packed));
306
307/**
254 * struct ubi_leb_change_req - a data structure used in atomic logical 308 * struct ubi_leb_change_req - a data structure used in atomic logical
255 * eraseblock change requests. 309 * eraseblock change requests.
256 * @lnum: logical eraseblock number to change 310 * @lnum: logical eraseblock number to change
@@ -261,8 +315,8 @@ struct ubi_rsvol_req {
261struct ubi_leb_change_req { 315struct ubi_leb_change_req {
262 int32_t lnum; 316 int32_t lnum;
263 int32_t bytes; 317 int32_t bytes;
264 uint8_t dtype; 318 int8_t dtype;
265 uint8_t padding[7]; 319 int8_t padding[7];
266} __attribute__ ((packed)); 320} __attribute__ ((packed));
267 321
268#endif /* __UBI_USER_H__ */ 322#endif /* __UBI_USER_H__ */
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index dfd8bf66ce27..d364fd594ea4 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -262,7 +262,7 @@ static inline int ieee80211_get_radiotap_len(unsigned char *data)
262 struct ieee80211_radiotap_header *hdr = 262 struct ieee80211_radiotap_header *hdr =
263 (struct ieee80211_radiotap_header *)data; 263 (struct ieee80211_radiotap_header *)data;
264 264
265 return le16_to_cpu(get_unaligned(&hdr->it_len)); 265 return get_unaligned_le16(&hdr->it_len);
266} 266}
267 267
268#endif /* IEEE80211_RADIOTAP_H */ 268#endif /* IEEE80211_RADIOTAP_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 22bb2e7bab1a..df7faf09d66f 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -57,7 +57,9 @@ enum rdma_cm_event_type {
57 RDMA_CM_EVENT_DISCONNECTED, 57 RDMA_CM_EVENT_DISCONNECTED,
58 RDMA_CM_EVENT_DEVICE_REMOVAL, 58 RDMA_CM_EVENT_DEVICE_REMOVAL,
59 RDMA_CM_EVENT_MULTICAST_JOIN, 59 RDMA_CM_EVENT_MULTICAST_JOIN,
60 RDMA_CM_EVENT_MULTICAST_ERROR 60 RDMA_CM_EVENT_MULTICAST_ERROR,
61 RDMA_CM_EVENT_ADDR_CHANGE,
62 RDMA_CM_EVENT_TIMEWAIT_EXIT
61}; 63};
62 64
63enum rdma_port_space { 65enum rdma_port_space {
diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h
index ed64862c4e18..1ccf462b433a 100644
--- a/include/video/atmel_lcdc.h
+++ b/include/video/atmel_lcdc.h
@@ -37,6 +37,7 @@ struct atmel_lcdfb_info {
37 struct fb_info *info; 37 struct fb_info *info;
38 void __iomem *mmio; 38 void __iomem *mmio;
39 unsigned long irq_base; 39 unsigned long irq_base;
40 struct work_struct task;
40 41
41 unsigned int guard_time; 42 unsigned int guard_time;
42 struct platform_device *pdev; 43 struct platform_device *pdev;
diff --git a/include/video/ili9320.h b/include/video/ili9320.h
new file mode 100644
index 000000000000..e5d1622e3f33
--- /dev/null
+++ b/include/video/ili9320.h
@@ -0,0 +1,201 @@
1/* include/video/ili9320.c
2 *
3 * ILI9320 LCD controller configuration control.
4 *
5 * Copyright 2007 Simtec Electronics
6 * Ben Dooks <ben@simtec.co.uk>
7 *
8 * http://armlinux.simtec.co.uk/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#define ILI9320_REG(x) (x)
16
17#define ILI9320_INDEX ILI9320_REG(0x00)
18
19#define ILI9320_OSCILATION ILI9320_REG(0x00)
20#define ILI9320_DRIVER ILI9320_REG(0x01)
21#define ILI9320_DRIVEWAVE ILI9320_REG(0x02)
22#define ILI9320_ENTRYMODE ILI9320_REG(0x03)
23#define ILI9320_RESIZING ILI9320_REG(0x04)
24#define ILI9320_DISPLAY1 ILI9320_REG(0x07)
25#define ILI9320_DISPLAY2 ILI9320_REG(0x08)
26#define ILI9320_DISPLAY3 ILI9320_REG(0x09)
27#define ILI9320_DISPLAY4 ILI9320_REG(0x0A)
28#define ILI9320_RGB_IF1 ILI9320_REG(0x0C)
29#define ILI9320_FRAMEMAKER ILI9320_REG(0x0D)
30#define ILI9320_RGB_IF2 ILI9320_REG(0x0F)
31
32#define ILI9320_POWER1 ILI9320_REG(0x10)
33#define ILI9320_POWER2 ILI9320_REG(0x11)
34#define ILI9320_POWER3 ILI9320_REG(0x12)
35#define ILI9320_POWER4 ILI9320_REG(0x13)
36#define ILI9320_GRAM_HORIZ_ADDR ILI9320_REG(0x20)
37#define ILI9320_GRAM_VERT_ADD ILI9320_REG(0x21)
38#define ILI9320_POWER7 ILI9320_REG(0x29)
39#define ILI9320_FRAME_RATE_COLOUR ILI9320_REG(0x2B)
40
41#define ILI9320_GAMMA1 ILI9320_REG(0x30)
42#define ILI9320_GAMMA2 ILI9320_REG(0x31)
43#define ILI9320_GAMMA3 ILI9320_REG(0x32)
44#define ILI9320_GAMMA4 ILI9320_REG(0x35)
45#define ILI9320_GAMMA5 ILI9320_REG(0x36)
46#define ILI9320_GAMMA6 ILI9320_REG(0x37)
47#define ILI9320_GAMMA7 ILI9320_REG(0x38)
48#define ILI9320_GAMMA8 ILI9320_REG(0x39)
49#define ILI9320_GAMMA9 ILI9320_REG(0x3C)
50#define ILI9320_GAMMA10 ILI9320_REG(0x3D)
51
52#define ILI9320_HORIZ_START ILI9320_REG(0x50)
53#define ILI9320_HORIZ_END ILI9320_REG(0x51)
54#define ILI9320_VERT_START ILI9320_REG(0x52)
55#define ILI9320_VERT_END ILI9320_REG(0x53)
56
57#define ILI9320_DRIVER2 ILI9320_REG(0x60)
58#define ILI9320_BASE_IMAGE ILI9320_REG(0x61)
59#define ILI9320_VERT_SCROLL ILI9320_REG(0x6a)
60
61#define ILI9320_PARTIAL1_POSITION ILI9320_REG(0x80)
62#define ILI9320_PARTIAL1_START ILI9320_REG(0x81)
63#define ILI9320_PARTIAL1_END ILI9320_REG(0x82)
64#define ILI9320_PARTIAL2_POSITION ILI9320_REG(0x83)
65#define ILI9320_PARTIAL2_START ILI9320_REG(0x84)
66#define ILI9320_PARTIAL2_END ILI9320_REG(0x85)
67
68#define ILI9320_INTERFACE1 ILI9320_REG(0x90)
69#define ILI9320_INTERFACE2 ILI9320_REG(0x92)
70#define ILI9320_INTERFACE3 ILI9320_REG(0x93)
71#define ILI9320_INTERFACE4 ILI9320_REG(0x95)
72#define ILI9320_INTERFACE5 ILI9320_REG(0x97)
73#define ILI9320_INTERFACE6 ILI9320_REG(0x98)
74
75/* Register contents definitions. */
76
77#define ILI9320_OSCILATION_OSC (1 << 0)
78
79#define ILI9320_DRIVER_SS (1 << 8)
80#define ILI9320_DRIVER_SM (1 << 10)
81
82#define ILI9320_DRIVEWAVE_EOR (1 << 8)
83#define ILI9320_DRIVEWAVE_BC (1 << 9)
84#define ILI9320_DRIVEWAVE_MUSTSET (1 << 10)
85
86#define ILI9320_ENTRYMODE_AM (1 << 3)
87#define ILI9320_ENTRYMODE_ID(x) ((x) << 4)
88#define ILI9320_ENTRYMODE_ORG (1 << 7)
89#define ILI9320_ENTRYMODE_HWM (1 << 8)
90#define ILI9320_ENTRYMODE_BGR (1 << 12)
91#define ILI9320_ENTRYMODE_DFM (1 << 14)
92#define ILI9320_ENTRYMODE_TRI (1 << 15)
93
94
95#define ILI9320_RESIZING_RSZ(x) ((x) << 0)
96#define ILI9320_RESIZING_RCH(x) ((x) << 4)
97#define ILI9320_RESIZING_RCV(x) ((x) << 8)
98
99
100#define ILI9320_DISPLAY1_D(x) ((x) << 0)
101#define ILI9320_DISPLAY1_CL (1 << 3)
102#define ILI9320_DISPLAY1_DTE (1 << 4)
103#define ILI9320_DISPLAY1_GON (1 << 5)
104#define ILI9320_DISPLAY1_BASEE (1 << 8)
105#define ILI9320_DISPLAY1_PTDE(x) ((x) << 12)
106
107
108#define ILI9320_DISPLAY2_BP(x) ((x) << 0)
109#define ILI9320_DISPLAY2_FP(x) ((x) << 8)
110
111
112#define ILI9320_RGBIF1_RIM_RGB18 (0 << 0)
113#define ILI9320_RGBIF1_RIM_RGB16 (1 << 0)
114#define ILI9320_RGBIF1_RIM_RGB6 (2 << 0)
115
116#define ILI9320_RGBIF1_CLK_INT (0 << 4)
117#define ILI9320_RGBIF1_CLK_RGBIF (1 << 4)
118#define ILI9320_RGBIF1_CLK_VSYNC (2 << 4)
119
120#define ILI9320_RGBIF1_RM (1 << 8)
121
122#define ILI9320_RGBIF1_ENC_FRAMES(x) (((x) - 1)<< 13)
123
124#define ILI9320_RGBIF2_DPL (1 << 0)
125#define ILI9320_RGBIF2_EPL (1 << 1)
126#define ILI9320_RGBIF2_HSPL (1 << 3)
127#define ILI9320_RGBIF2_VSPL (1 << 4)
128
129
130#define ILI9320_POWER1_SLP (1 << 1)
131#define ILI9320_POWER1_DSTB (1 << 2)
132#define ILI9320_POWER1_AP(x) ((x) << 4)
133#define ILI9320_POWER1_APE (1 << 7)
134#define ILI9320_POWER1_BT(x) ((x) << 8)
135#define ILI9320_POWER1_SAP (1 << 12)
136
137
138#define ILI9320_POWER2_VC(x) ((x) << 0)
139#define ILI9320_POWER2_DC0(x) ((x) << 4)
140#define ILI9320_POWER2_DC1(x) ((x) << 8)
141
142
143#define ILI9320_POWER3_VRH(x) ((x) << 0)
144#define ILI9320_POWER3_PON (1 << 4)
145#define ILI9320_POWER3_VCMR (1 << 8)
146
147
148#define ILI9320_POWER4_VREOUT(x) ((x) << 8)
149
150
151#define ILI9320_DRIVER2_SCNL(x) ((x) << 0)
152#define ILI9320_DRIVER2_NL(x) ((x) << 8)
153#define ILI9320_DRIVER2_GS (1 << 15)
154
155
156#define ILI9320_BASEIMAGE_REV (1 << 0)
157#define ILI9320_BASEIMAGE_VLE (1 << 1)
158#define ILI9320_BASEIMAGE_NDL (1 << 2)
159
160
161#define ILI9320_INTERFACE4_RTNE(x) (x)
162#define ILI9320_INTERFACE4_DIVE(x) ((x) << 8)
163
164/* SPI interface definitions */
165
166#define ILI9320_SPI_IDCODE (0x70)
167#define ILI9320_SPI_ID(x) ((x) << 2)
168#define ILI9320_SPI_READ (0x01)
169#define ILI9320_SPI_WRITE (0x00)
170#define ILI9320_SPI_DATA (0x02)
171#define ILI9320_SPI_INDEX (0x00)
172
173/* platform data to pass configuration from lcd */
174
175enum ili9320_suspend {
176 ILI9320_SUSPEND_OFF,
177 ILI9320_SUSPEND_DEEP,
178};
179
180struct ili9320_platdata {
181 unsigned short hsize;
182 unsigned short vsize;
183
184 enum ili9320_suspend suspend;
185
186 /* set the reset line, 0 = reset asserted, 1 = normal */
187 void (*reset)(unsigned int val);
188
189 unsigned short entry_mode;
190 unsigned short display2;
191 unsigned short display3;
192 unsigned short display4;
193 unsigned short rgb_if1;
194 unsigned short rgb_if2;
195 unsigned short interface2;
196 unsigned short interface3;
197 unsigned short interface4;
198 unsigned short interface5;
199 unsigned short interface6;
200};
201
diff --git a/include/video/neomagic.h b/include/video/neomagic.h
index a9e118a1cd16..38910da0ae59 100644
--- a/include/video/neomagic.h
+++ b/include/video/neomagic.h
@@ -90,23 +90,6 @@
90#define PCI_CHIP_NM2360 0x0006 90#define PCI_CHIP_NM2360 0x0006
91#define PCI_CHIP_NM2380 0x0016 91#define PCI_CHIP_NM2380 0x0016
92 92
93
94struct xtimings {
95 unsigned int pixclock;
96 unsigned int HDisplay;
97 unsigned int HSyncStart;
98 unsigned int HSyncEnd;
99 unsigned int HTotal;
100 unsigned int VDisplay;
101 unsigned int VSyncStart;
102 unsigned int VSyncEnd;
103 unsigned int VTotal;
104 unsigned int sync;
105 int dblscan;
106 int interlaced;
107};
108
109
110/* --------------------------------------------------------------------- */ 93/* --------------------------------------------------------------------- */
111 94
112typedef volatile struct { 95typedef volatile struct {
diff --git a/include/video/platform_lcd.h b/include/video/platform_lcd.h
new file mode 100644
index 000000000000..ad3bdfe743b2
--- /dev/null
+++ b/include/video/platform_lcd.h
@@ -0,0 +1,21 @@
1/* include/video/platform_lcd.h
2 *
3 * Copyright 2008 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * Generic platform-device LCD power control interface.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12*/
13
14struct plat_lcd_data;
15struct fb_info;
16
17struct plat_lcd_data {
18 void (*set_power)(struct plat_lcd_data *, unsigned int power);
19 int (*match_fb)(struct plat_lcd_data *, struct fb_info *);
20};
21
diff --git a/include/video/trident.h b/include/video/trident.h
index 200be2551681..b6ce19d1b61b 100644
--- a/include/video/trident.h
+++ b/include/video/trident.h
@@ -4,9 +4,9 @@
4#endif 4#endif
5 5
6#if TRIDENTFB_DEBUG 6#if TRIDENTFB_DEBUG
7#define debug(f,a...) printk("%s:" f, __FUNCTION__ , ## a);mdelay(1000); 7#define debug(f, a...) printk("%s:" f, __func__ , ## a);
8#else 8#else
9#define debug(f,a...) 9#define debug(f, a...)
10#endif 10#endif
11 11
12#define output(f, a...) pr_info("tridentfb: " f, ## a) 12#define output(f, a...) pr_info("tridentfb: " f, ## a)
@@ -24,7 +24,9 @@
24#define CYBER9397DVD 0x939A 24#define CYBER9397DVD 0x939A
25#define CYBER9520 0x9520 25#define CYBER9520 0x9520
26#define CYBER9525DVD 0x9525 26#define CYBER9525DVD 0x9525
27#define TGUI9440 0x9440
27#define TGUI9660 0x9660 28#define TGUI9660 0x9660
29#define PROVIDIA9685 0x9685
28#define IMAGE975 0x9750 30#define IMAGE975 0x9750
29#define IMAGE985 0x9850 31#define IMAGE985 0x9850
30#define BLADE3D 0x9880 32#define BLADE3D 0x9880
@@ -39,36 +41,11 @@
39#define CYBERBLADEXPm8 0x9910 41#define CYBERBLADEXPm8 0x9910
40#define CYBERBLADEXPm16 0x9930 42#define CYBERBLADEXPm16 0x9930
41 43
42/* acceleration families */
43#define IMAGE 0
44#define BLADE 1
45#define XP 2
46
47#define is_image(id)
48#define is_xp(id) ((id == CYBERBLADEXPAi1) ||\
49 (id == CYBERBLADEXPm8) ||\
50 (id == CYBERBLADEXPm16))
51
52#define is_blade(id) ((id == BLADE3D) ||\
53 (id == CYBERBLADEE4) ||\
54 (id == CYBERBLADEi7) ||\
55 (id == CYBERBLADEi7D) ||\
56 (id == CYBERBLADEi1) ||\
57 (id == CYBERBLADEi1D) ||\
58 (id == CYBERBLADEAi1) ||\
59 (id == CYBERBLADEAi1D))
60
61/* these defines are for 'lcd' variable */ 44/* these defines are for 'lcd' variable */
62#define LCD_STRETCH 0 45#define LCD_STRETCH 0
63#define LCD_CENTER 1 46#define LCD_CENTER 1
64#define LCD_BIOS 2 47#define LCD_BIOS 2
65 48
66/* display types */
67#define DISPLAY_CRT 0
68#define DISPLAY_FP 1
69
70#define flatpanel (displaytype == DISPLAY_FP)
71
72/* General Registers */ 49/* General Registers */
73#define SPR 0x1F /* Software Programming Register (videoram) */ 50#define SPR 0x1F /* Software Programming Register (videoram) */
74 51
@@ -88,33 +65,7 @@
88#define SKey 0x37 65#define SKey 0x37
89#define SPKey 0x57 66#define SPKey 0x57
90 67
91/* 0x3x4 */
92#define CRTHTotal 0x00
93#define CRTHDispEnd 0x01
94#define CRTHBlankStart 0x02
95#define CRTHBlankEnd 0x03
96#define CRTHSyncStart 0x04
97#define CRTHSyncEnd 0x05
98
99#define CRTVTotal 0x06
100#define CRTVDispEnd 0x12
101#define CRTVBlankStart 0x15
102#define CRTVBlankEnd 0x16
103#define CRTVSyncStart 0x10
104#define CRTVSyncEnd 0x11
105
106#define CRTOverflow 0x07
107#define CRTPRowScan 0x08
108#define CRTMaxScanLine 0x09
109#define CRTModeControl 0x17
110#define CRTLineCompare 0x18
111
112/* 3x4 */ 68/* 3x4 */
113#define StartAddrHigh 0x0C
114#define StartAddrLow 0x0D
115#define Offset 0x13
116#define Underline 0x14
117#define CRTCMode 0x17
118#define CRTCModuleTest 0x1E 69#define CRTCModuleTest 0x1E
119#define FIFOControl 0x20 70#define FIFOControl 0x20
120#define LinearAddReg 0x21 71#define LinearAddReg 0x21
@@ -173,3 +124,23 @@
173#define BiosMode 0x5c 124#define BiosMode 0x5c
174#define BiosReg 0x5d 125#define BiosReg 0x5d
175 126
127/* Graphics Engine */
128#define STATUS 0x2120
129#define OLDCMD 0x2124
130#define DRAWFL 0x2128
131#define OLDCLR 0x212C
132#define OLDDST 0x2138
133#define OLDSRC 0x213C
134#define OLDDIM 0x2140
135#define CMD 0x2144
136#define ROP 0x2148
137#define COLOR 0x2160
138#define BGCOLOR 0x2164
139#define SRC1 0x2100
140#define SRC2 0x2104
141#define DST1 0x2108
142#define DST2 0x210C
143
144#define ROP_S 0xCC
145#define ROP_P 0xF0
146#define ROP_X 0x66
diff --git a/init/do_mounts.c b/init/do_mounts.c
index a1de1bf3d6b9..f769fac4f4c0 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -12,6 +12,7 @@
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/fs.h> 14#include <linux/fs.h>
15#include <linux/initrd.h>
15 16
16#include <linux/nfs_fs.h> 17#include <linux/nfs_fs.h>
17#include <linux/nfs_fs_sb.h> 18#include <linux/nfs_fs_sb.h>
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 46dfd64ae8fb..fedef93b586f 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -10,8 +10,6 @@
10 10
11#include "do_mounts.h" 11#include "do_mounts.h"
12 12
13#define BUILD_CRAMDISK
14
15int __initdata rd_prompt = 1;/* 1 = prompt for RAM disk, 0 = don't prompt */ 13int __initdata rd_prompt = 1;/* 1 = prompt for RAM disk, 0 = don't prompt */
16 14
17static int __init prompt_ramdisk(char *str) 15static int __init prompt_ramdisk(char *str)
@@ -162,14 +160,8 @@ int __init rd_load_image(char *from)
162 goto done; 160 goto done;
163 161
164 if (nblocks == 0) { 162 if (nblocks == 0) {
165#ifdef BUILD_CRAMDISK
166 if (crd_load(in_fd, out_fd) == 0) 163 if (crd_load(in_fd, out_fd) == 0)
167 goto successful_load; 164 goto successful_load;
168#else
169 printk(KERN_NOTICE
170 "RAMDISK: Kernel does not support compressed "
171 "RAM disk images\n");
172#endif
173 goto done; 165 goto done;
174 } 166 }
175 167
@@ -267,8 +259,6 @@ int __init rd_load_disk(int n)
267 return rd_load_image("/dev/root"); 259 return rd_load_image("/dev/root");
268} 260}
269 261
270#ifdef BUILD_CRAMDISK
271
272/* 262/*
273 * gzip declarations 263 * gzip declarations
274 */ 264 */
@@ -313,32 +303,11 @@ static int crd_infd, crd_outfd;
313 303
314static int __init fill_inbuf(void); 304static int __init fill_inbuf(void);
315static void __init flush_window(void); 305static void __init flush_window(void);
316static void __init *malloc(size_t size);
317static void __init free(void *where);
318static void __init error(char *m); 306static void __init error(char *m);
319static void __init gzip_mark(void **);
320static void __init gzip_release(void **);
321
322#include "../lib/inflate.c"
323 307
324static void __init *malloc(size_t size) 308#define NO_INFLATE_MALLOC
325{
326 return kmalloc(size, GFP_KERNEL);
327}
328
329static void __init free(void *where)
330{
331 kfree(where);
332}
333
334static void __init gzip_mark(void **ptr)
335{
336}
337
338static void __init gzip_release(void **ptr)
339{
340}
341 309
310#include "../lib/inflate.c"
342 311
343/* =========================================================================== 312/* ===========================================================================
344 * Fill the input buffer. This is called only when the buffer is empty 313 * Fill the input buffer. This is called only when the buffer is empty
@@ -425,5 +394,3 @@ static int __init crd_load(int in_fd, int out_fd)
425 kfree(window); 394 kfree(window);
426 return result; 395 return result;
427} 396}
428
429#endif /* BUILD_CRAMDISK */
diff --git a/init/initramfs.c b/init/initramfs.c
index 8eeeccb328c9..644fc01ad5f0 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -14,16 +14,6 @@ static void __init error(char *x)
14 message = x; 14 message = x;
15} 15}
16 16
17static void __init *malloc(size_t size)
18{
19 return kmalloc(size, GFP_KERNEL);
20}
21
22static void __init free(void *where)
23{
24 kfree(where);
25}
26
27/* link hash */ 17/* link hash */
28 18
29#define N_ALIGN(len) ((((len) + 1) & ~3) + 2) 19#define N_ALIGN(len) ((((len) + 1) & ~3) + 2)
@@ -407,18 +397,10 @@ static long bytes_out;
407 397
408static void __init flush_window(void); 398static void __init flush_window(void);
409static void __init error(char *m); 399static void __init error(char *m);
410static void __init gzip_mark(void **);
411static void __init gzip_release(void **);
412 400
413#include "../lib/inflate.c" 401#define NO_INFLATE_MALLOC
414 402
415static void __init gzip_mark(void **ptr) 403#include "../lib/inflate.c"
416{
417}
418
419static void __init gzip_release(void **ptr)
420{
421}
422 404
423/* =========================================================================== 405/* ===========================================================================
424 * Write the output window window[0..outcnt-1] and update crc and bytes_out. 406 * Write the output window window[0..outcnt-1] and update crc and bytes_out.
diff --git a/init/main.c b/init/main.c
index 756eca4b821a..0604cbcaf1e4 100644
--- a/init/main.c
+++ b/init/main.c
@@ -87,8 +87,6 @@ extern void init_IRQ(void);
87extern void fork_init(unsigned long); 87extern void fork_init(unsigned long);
88extern void mca_init(void); 88extern void mca_init(void);
89extern void sbus_init(void); 89extern void sbus_init(void);
90extern void pidhash_init(void);
91extern void pidmap_init(void);
92extern void prio_tree_init(void); 90extern void prio_tree_init(void);
93extern void radix_tree_init(void); 91extern void radix_tree_init(void);
94extern void free_initmem(void); 92extern void free_initmem(void);
@@ -415,6 +413,13 @@ static void __init smp_init(void)
415{ 413{
416 unsigned int cpu; 414 unsigned int cpu;
417 415
416 /*
417 * Set up the current CPU as possible to migrate to.
418 * The other ones will be done by cpu_up/cpu_down()
419 */
420 cpu = smp_processor_id();
421 cpu_set(cpu, cpu_active_map);
422
418 /* FIXME: This should be done in userspace --RR */ 423 /* FIXME: This should be done in userspace --RR */
419 for_each_present_cpu(cpu) { 424 for_each_present_cpu(cpu) {
420 if (num_online_cpus() >= setup_max_cpus) 425 if (num_online_cpus() >= setup_max_cpus)
diff --git a/init/version.c b/init/version.c
index 9d17d70ee02d..52a8b98642b8 100644
--- a/init/version.c
+++ b/init/version.c
@@ -13,10 +13,13 @@
13#include <linux/utsrelease.h> 13#include <linux/utsrelease.h>
14#include <linux/version.h> 14#include <linux/version.h>
15 15
16#ifndef CONFIG_KALLSYMS
16#define version(a) Version_ ## a 17#define version(a) Version_ ## a
17#define version_string(a) version(a) 18#define version_string(a) version(a)
18 19
20extern int version_string(LINUX_VERSION_CODE);
19int version_string(LINUX_VERSION_CODE); 21int version_string(LINUX_VERSION_CODE);
22#endif
20 23
21struct uts_namespace init_uts_ns = { 24struct uts_namespace init_uts_ns = {
22 .kref = { 25 .kref = {
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index d3497465cc0a..69bc85978ba0 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -27,15 +27,17 @@ static void *get_ipc(ctl_table *table)
27} 27}
28 28
29/* 29/*
30 * Routine that is called when a tunable has successfully been changed by 30 * Routine that is called when the file "auto_msgmni" has successfully been
31 * hand and it has a callback routine registered on the ipc namespace notifier 31 * written.
32 * chain: we don't want such tunables to be recomputed anymore upon memory 32 * Two values are allowed:
33 * add/remove or ipc namespace creation/removal. 33 * 0: unregister msgmni's callback routine from the ipc namespace notifier
34 * They can come back to a recomputable state by being set to a <0 value. 34 * chain. This means that msgmni won't be recomputed anymore upon memory
35 * add/remove or ipc namespace creation/removal.
36 * 1: register back the callback routine.
35 */ 37 */
36static void tunable_set_callback(int val) 38static void ipc_auto_callback(int val)
37{ 39{
38 if (val >= 0) 40 if (!val)
39 unregister_ipcns_notifier(current->nsproxy->ipc_ns); 41 unregister_ipcns_notifier(current->nsproxy->ipc_ns);
40 else { 42 else {
41 /* 43 /*
@@ -71,7 +73,12 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
71 rc = proc_dointvec(&ipc_table, write, filp, buffer, lenp, ppos); 73 rc = proc_dointvec(&ipc_table, write, filp, buffer, lenp, ppos);
72 74
73 if (write && !rc && lenp_bef == *lenp) 75 if (write && !rc && lenp_bef == *lenp)
74 tunable_set_callback(*((int *)(ipc_table.data))); 76 /*
77 * Tunable has successfully been changed by hand. Disable its
78 * automatic adjustment. This simply requires unregistering
79 * the notifiers that trigger recalculation.
80 */
81 unregister_ipcns_notifier(current->nsproxy->ipc_ns);
75 82
76 return rc; 83 return rc;
77} 84}
@@ -87,10 +94,39 @@ static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
87 lenp, ppos); 94 lenp, ppos);
88} 95}
89 96
97static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
98 struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos)
99{
100 struct ctl_table ipc_table;
101 size_t lenp_bef = *lenp;
102 int oldval;
103 int rc;
104
105 memcpy(&ipc_table, table, sizeof(ipc_table));
106 ipc_table.data = get_ipc(table);
107 oldval = *((int *)(ipc_table.data));
108
109 rc = proc_dointvec_minmax(&ipc_table, write, filp, buffer, lenp, ppos);
110
111 if (write && !rc && lenp_bef == *lenp) {
112 int newval = *((int *)(ipc_table.data));
113 /*
114 * The file "auto_msgmni" has correctly been set.
115 * React by (un)registering the corresponding tunable, if the
116 * value has changed.
117 */
118 if (newval != oldval)
119 ipc_auto_callback(newval);
120 }
121
122 return rc;
123}
124
90#else 125#else
91#define proc_ipc_doulongvec_minmax NULL 126#define proc_ipc_doulongvec_minmax NULL
92#define proc_ipc_dointvec NULL 127#define proc_ipc_dointvec NULL
93#define proc_ipc_callback_dointvec NULL 128#define proc_ipc_callback_dointvec NULL
129#define proc_ipcauto_dointvec_minmax NULL
94#endif 130#endif
95 131
96#ifdef CONFIG_SYSCTL_SYSCALL 132#ifdef CONFIG_SYSCTL_SYSCALL
@@ -142,14 +178,11 @@ static int sysctl_ipc_registered_data(ctl_table *table, int __user *name,
142 rc = sysctl_ipc_data(table, name, nlen, oldval, oldlenp, newval, 178 rc = sysctl_ipc_data(table, name, nlen, oldval, oldlenp, newval,
143 newlen); 179 newlen);
144 180
145 if (newval && newlen && rc > 0) { 181 if (newval && newlen && rc > 0)
146 /* 182 /*
147 * Tunable has successfully been changed from userland 183 * Tunable has successfully been changed from userland
148 */ 184 */
149 int *data = get_ipc(table); 185 unregister_ipcns_notifier(current->nsproxy->ipc_ns);
150
151 tunable_set_callback(*data);
152 }
153 186
154 return rc; 187 return rc;
155} 188}
@@ -158,6 +191,9 @@ static int sysctl_ipc_registered_data(ctl_table *table, int __user *name,
158#define sysctl_ipc_registered_data NULL 191#define sysctl_ipc_registered_data NULL
159#endif 192#endif
160 193
194static int zero;
195static int one = 1;
196
161static struct ctl_table ipc_kern_table[] = { 197static struct ctl_table ipc_kern_table[] = {
162 { 198 {
163 .ctl_name = KERN_SHMMAX, 199 .ctl_name = KERN_SHMMAX,
@@ -222,6 +258,16 @@ static struct ctl_table ipc_kern_table[] = {
222 .proc_handler = proc_ipc_dointvec, 258 .proc_handler = proc_ipc_dointvec,
223 .strategy = sysctl_ipc_data, 259 .strategy = sysctl_ipc_data,
224 }, 260 },
261 {
262 .ctl_name = CTL_UNNUMBERED,
263 .procname = "auto_msgmni",
264 .data = &init_ipc_ns.auto_msgmni,
265 .maxlen = sizeof(int),
266 .mode = 0644,
267 .proc_handler = proc_ipcauto_dointvec_minmax,
268 .extra1 = &zero,
269 .extra2 = &one,
270 },
225 {} 271 {}
226}; 272};
227 273
diff --git a/ipc/ipcns_notifier.c b/ipc/ipcns_notifier.c
index 70ff09183f7b..b9b31a4f77e1 100644
--- a/ipc/ipcns_notifier.c
+++ b/ipc/ipcns_notifier.c
@@ -55,25 +55,35 @@ static int ipcns_callback(struct notifier_block *self,
55 55
56int register_ipcns_notifier(struct ipc_namespace *ns) 56int register_ipcns_notifier(struct ipc_namespace *ns)
57{ 57{
58 int rc;
59
58 memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); 60 memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb));
59 ns->ipcns_nb.notifier_call = ipcns_callback; 61 ns->ipcns_nb.notifier_call = ipcns_callback;
60 ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; 62 ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI;
61 return blocking_notifier_chain_register(&ipcns_chain, &ns->ipcns_nb); 63 rc = blocking_notifier_chain_register(&ipcns_chain, &ns->ipcns_nb);
64 if (!rc)
65 ns->auto_msgmni = 1;
66 return rc;
62} 67}
63 68
64int cond_register_ipcns_notifier(struct ipc_namespace *ns) 69int cond_register_ipcns_notifier(struct ipc_namespace *ns)
65{ 70{
71 int rc;
72
66 memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); 73 memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb));
67 ns->ipcns_nb.notifier_call = ipcns_callback; 74 ns->ipcns_nb.notifier_call = ipcns_callback;
68 ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; 75 ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI;
69 return blocking_notifier_chain_cond_register(&ipcns_chain, 76 rc = blocking_notifier_chain_cond_register(&ipcns_chain,
70 &ns->ipcns_nb); 77 &ns->ipcns_nb);
78 if (!rc)
79 ns->auto_msgmni = 1;
80 return rc;
71} 81}
72 82
73int unregister_ipcns_notifier(struct ipc_namespace *ns) 83void unregister_ipcns_notifier(struct ipc_namespace *ns)
74{ 84{
75 return blocking_notifier_chain_unregister(&ipcns_chain, 85 blocking_notifier_chain_unregister(&ipcns_chain, &ns->ipcns_nb);
76 &ns->ipcns_nb); 86 ns->auto_msgmni = 0;
77} 87}
78 88
79int ipcns_notify(unsigned long val) 89int ipcns_notify(unsigned long val)
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 3e84b958186b..1fdc2eb2f6d8 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -314,15 +314,11 @@ static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
314* through std routines) 314* through std routines)
315*/ 315*/
316static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, 316static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
317 size_t count, loff_t * off) 317 size_t count, loff_t *off)
318{ 318{
319 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 319 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
320 char buffer[FILENT_SIZE]; 320 char buffer[FILENT_SIZE];
321 size_t slen; 321 ssize_t ret;
322 loff_t o;
323
324 if (!count)
325 return 0;
326 322
327 spin_lock(&info->lock); 323 spin_lock(&info->lock);
328 snprintf(buffer, sizeof(buffer), 324 snprintf(buffer, sizeof(buffer),
@@ -335,21 +331,14 @@ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
335 pid_vnr(info->notify_owner)); 331 pid_vnr(info->notify_owner));
336 spin_unlock(&info->lock); 332 spin_unlock(&info->lock);
337 buffer[sizeof(buffer)-1] = '\0'; 333 buffer[sizeof(buffer)-1] = '\0';
338 slen = strlen(buffer)+1;
339
340 o = *off;
341 if (o > slen)
342 return 0;
343
344 if (o + count > slen)
345 count = slen - o;
346 334
347 if (copy_to_user(u_data, buffer + o, count)) 335 ret = simple_read_from_buffer(u_data, count, off, buffer,
348 return -EFAULT; 336 strlen(buffer));
337 if (ret <= 0)
338 return ret;
349 339
350 *off = o + count;
351 filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME; 340 filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME;
352 return count; 341 return ret;
353} 342}
354 343
355static int mqueue_flush_file(struct file *filp, fl_owner_t id) 344static int mqueue_flush_file(struct file *filp, fl_owner_t id)
diff --git a/ipc/sem.c b/ipc/sem.c
index e9418df5ff3e..bf1bc36cb7ee 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -272,9 +272,8 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
272 ns->used_sems += nsems; 272 ns->used_sems += nsems;
273 273
274 sma->sem_base = (struct sem *) &sma[1]; 274 sma->sem_base = (struct sem *) &sma[1];
275 /* sma->sem_pending = NULL; */ 275 INIT_LIST_HEAD(&sma->sem_pending);
276 sma->sem_pending_last = &sma->sem_pending; 276 INIT_LIST_HEAD(&sma->list_id);
277 /* sma->undo = NULL; */
278 sma->sem_nsems = nsems; 277 sma->sem_nsems = nsems;
279 sma->sem_ctime = get_seconds(); 278 sma->sem_ctime = get_seconds();
280 sem_unlock(sma); 279 sem_unlock(sma);
@@ -331,38 +330,6 @@ asmlinkage long sys_semget(key_t key, int nsems, int semflg)
331 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); 330 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
332} 331}
333 332
334/* Manage the doubly linked list sma->sem_pending as a FIFO:
335 * insert new queue elements at the tail sma->sem_pending_last.
336 */
337static inline void append_to_queue (struct sem_array * sma,
338 struct sem_queue * q)
339{
340 *(q->prev = sma->sem_pending_last) = q;
341 *(sma->sem_pending_last = &q->next) = NULL;
342}
343
344static inline void prepend_to_queue (struct sem_array * sma,
345 struct sem_queue * q)
346{
347 q->next = sma->sem_pending;
348 *(q->prev = &sma->sem_pending) = q;
349 if (q->next)
350 q->next->prev = &q->next;
351 else /* sma->sem_pending_last == &sma->sem_pending */
352 sma->sem_pending_last = &q->next;
353}
354
355static inline void remove_from_queue (struct sem_array * sma,
356 struct sem_queue * q)
357{
358 *(q->prev) = q->next;
359 if (q->next)
360 q->next->prev = q->prev;
361 else /* sma->sem_pending_last == &q->next */
362 sma->sem_pending_last = q->prev;
363 q->prev = NULL; /* mark as removed */
364}
365
366/* 333/*
367 * Determine whether a sequence of semaphore operations would succeed 334 * Determine whether a sequence of semaphore operations would succeed
368 * all at once. Return 0 if yes, 1 if need to sleep, else return error code. 335 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
@@ -438,16 +405,15 @@ static void update_queue (struct sem_array * sma)
438 int error; 405 int error;
439 struct sem_queue * q; 406 struct sem_queue * q;
440 407
441 q = sma->sem_pending; 408 q = list_entry(sma->sem_pending.next, struct sem_queue, list);
442 while(q) { 409 while (&q->list != &sma->sem_pending) {
443 error = try_atomic_semop(sma, q->sops, q->nsops, 410 error = try_atomic_semop(sma, q->sops, q->nsops,
444 q->undo, q->pid); 411 q->undo, q->pid);
445 412
446 /* Does q->sleeper still need to sleep? */ 413 /* Does q->sleeper still need to sleep? */
447 if (error <= 0) { 414 if (error <= 0) {
448 struct sem_queue *n; 415 struct sem_queue *n;
449 remove_from_queue(sma,q); 416
450 q->status = IN_WAKEUP;
451 /* 417 /*
452 * Continue scanning. The next operation 418 * Continue scanning. The next operation
453 * that must be checked depends on the type of the 419 * that must be checked depends on the type of the
@@ -458,11 +424,26 @@ static void update_queue (struct sem_array * sma)
458 * for semaphore values to become 0. 424 * for semaphore values to become 0.
459 * - if the operation didn't modify the array, 425 * - if the operation didn't modify the array,
460 * then just continue. 426 * then just continue.
427 * The order of list_del() and reading ->next
428 * is crucial: In the former case, the list_del()
429 * must be done first [because we might be the
430 * first entry in ->sem_pending], in the latter
431 * case the list_del() must be done last
432 * [because the list is invalid after the list_del()]
461 */ 433 */
462 if (q->alter) 434 if (q->alter) {
463 n = sma->sem_pending; 435 list_del(&q->list);
464 else 436 n = list_entry(sma->sem_pending.next,
465 n = q->next; 437 struct sem_queue, list);
438 } else {
439 n = list_entry(q->list.next, struct sem_queue,
440 list);
441 list_del(&q->list);
442 }
443
444 /* wake up the waiting thread */
445 q->status = IN_WAKEUP;
446
466 wake_up_process(q->sleeper); 447 wake_up_process(q->sleeper);
467 /* hands-off: q will disappear immediately after 448 /* hands-off: q will disappear immediately after
468 * writing q->status. 449 * writing q->status.
@@ -471,7 +452,7 @@ static void update_queue (struct sem_array * sma)
471 q->status = error; 452 q->status = error;
472 q = n; 453 q = n;
473 } else { 454 } else {
474 q = q->next; 455 q = list_entry(q->list.next, struct sem_queue, list);
475 } 456 }
476 } 457 }
477} 458}
@@ -491,7 +472,7 @@ static int count_semncnt (struct sem_array * sma, ushort semnum)
491 struct sem_queue * q; 472 struct sem_queue * q;
492 473
493 semncnt = 0; 474 semncnt = 0;
494 for (q = sma->sem_pending; q; q = q->next) { 475 list_for_each_entry(q, &sma->sem_pending, list) {
495 struct sembuf * sops = q->sops; 476 struct sembuf * sops = q->sops;
496 int nsops = q->nsops; 477 int nsops = q->nsops;
497 int i; 478 int i;
@@ -503,13 +484,14 @@ static int count_semncnt (struct sem_array * sma, ushort semnum)
503 } 484 }
504 return semncnt; 485 return semncnt;
505} 486}
487
506static int count_semzcnt (struct sem_array * sma, ushort semnum) 488static int count_semzcnt (struct sem_array * sma, ushort semnum)
507{ 489{
508 int semzcnt; 490 int semzcnt;
509 struct sem_queue * q; 491 struct sem_queue * q;
510 492
511 semzcnt = 0; 493 semzcnt = 0;
512 for (q = sma->sem_pending; q; q = q->next) { 494 list_for_each_entry(q, &sma->sem_pending, list) {
513 struct sembuf * sops = q->sops; 495 struct sembuf * sops = q->sops;
514 int nsops = q->nsops; 496 int nsops = q->nsops;
515 int i; 497 int i;
@@ -522,35 +504,41 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum)
522 return semzcnt; 504 return semzcnt;
523} 505}
524 506
507void free_un(struct rcu_head *head)
508{
509 struct sem_undo *un = container_of(head, struct sem_undo, rcu);
510 kfree(un);
511}
512
525/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked 513/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
526 * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex 514 * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
527 * remains locked on exit. 515 * remains locked on exit.
528 */ 516 */
529static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) 517static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
530{ 518{
531 struct sem_undo *un; 519 struct sem_undo *un, *tu;
532 struct sem_queue *q; 520 struct sem_queue *q, *tq;
533 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); 521 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
534 522
535 /* Invalidate the existing undo structures for this semaphore set. 523 /* Free the existing undo structures for this semaphore set. */
536 * (They will be freed without any further action in exit_sem() 524 assert_spin_locked(&sma->sem_perm.lock);
537 * or during the next semop.) 525 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
538 */ 526 list_del(&un->list_id);
539 for (un = sma->undo; un; un = un->id_next) 527 spin_lock(&un->ulp->lock);
540 un->semid = -1; 528 un->semid = -1;
529 list_del_rcu(&un->list_proc);
530 spin_unlock(&un->ulp->lock);
531 call_rcu(&un->rcu, free_un);
532 }
541 533
542 /* Wake up all pending processes and let them fail with EIDRM. */ 534 /* Wake up all pending processes and let them fail with EIDRM. */
543 q = sma->sem_pending; 535 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
544 while(q) { 536 list_del(&q->list);
545 struct sem_queue *n; 537
546 /* lazy remove_from_queue: we are killing the whole queue */
547 q->prev = NULL;
548 n = q->next;
549 q->status = IN_WAKEUP; 538 q->status = IN_WAKEUP;
550 wake_up_process(q->sleeper); /* doesn't sleep */ 539 wake_up_process(q->sleeper); /* doesn't sleep */
551 smp_wmb(); 540 smp_wmb();
552 q->status = -EIDRM; /* hands-off q */ 541 q->status = -EIDRM; /* hands-off q */
553 q = n;
554 } 542 }
555 543
556 /* Remove the semaphore set from the IDR */ 544 /* Remove the semaphore set from the IDR */
@@ -763,9 +751,12 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
763 751
764 for (i = 0; i < nsems; i++) 752 for (i = 0; i < nsems; i++)
765 sma->sem_base[i].semval = sem_io[i]; 753 sma->sem_base[i].semval = sem_io[i];
766 for (un = sma->undo; un; un = un->id_next) 754
755 assert_spin_locked(&sma->sem_perm.lock);
756 list_for_each_entry(un, &sma->list_id, list_id) {
767 for (i = 0; i < nsems; i++) 757 for (i = 0; i < nsems; i++)
768 un->semadj[i] = 0; 758 un->semadj[i] = 0;
759 }
769 sma->sem_ctime = get_seconds(); 760 sma->sem_ctime = get_seconds();
770 /* maybe some queued-up processes were waiting for this */ 761 /* maybe some queued-up processes were waiting for this */
771 update_queue(sma); 762 update_queue(sma);
@@ -797,12 +788,15 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
797 { 788 {
798 int val = arg.val; 789 int val = arg.val;
799 struct sem_undo *un; 790 struct sem_undo *un;
791
800 err = -ERANGE; 792 err = -ERANGE;
801 if (val > SEMVMX || val < 0) 793 if (val > SEMVMX || val < 0)
802 goto out_unlock; 794 goto out_unlock;
803 795
804 for (un = sma->undo; un; un = un->id_next) 796 assert_spin_locked(&sma->sem_perm.lock);
797 list_for_each_entry(un, &sma->list_id, list_id)
805 un->semadj[semnum] = 0; 798 un->semadj[semnum] = 0;
799
806 curr->semval = val; 800 curr->semval = val;
807 curr->sempid = task_tgid_vnr(current); 801 curr->sempid = task_tgid_vnr(current);
808 sma->sem_ctime = get_seconds(); 802 sma->sem_ctime = get_seconds();
@@ -952,6 +946,8 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
952 return -ENOMEM; 946 return -ENOMEM;
953 spin_lock_init(&undo_list->lock); 947 spin_lock_init(&undo_list->lock);
954 atomic_set(&undo_list->refcnt, 1); 948 atomic_set(&undo_list->refcnt, 1);
949 INIT_LIST_HEAD(&undo_list->list_proc);
950
955 current->sysvsem.undo_list = undo_list; 951 current->sysvsem.undo_list = undo_list;
956 } 952 }
957 *undo_listp = undo_list; 953 *undo_listp = undo_list;
@@ -960,25 +956,27 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
960 956
961static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) 957static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
962{ 958{
963 struct sem_undo **last, *un; 959 struct sem_undo *walk;
964 960
965 last = &ulp->proc_list; 961 list_for_each_entry_rcu(walk, &ulp->list_proc, list_proc) {
966 un = *last; 962 if (walk->semid == semid)
967 while(un != NULL) { 963 return walk;
968 if(un->semid==semid)
969 break;
970 if(un->semid==-1) {
971 *last=un->proc_next;
972 kfree(un);
973 } else {
974 last=&un->proc_next;
975 }
976 un=*last;
977 } 964 }
978 return un; 965 return NULL;
979} 966}
980 967
981static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) 968/**
969 * find_alloc_undo - Lookup (and if not present create) undo array
970 * @ns: namespace
971 * @semid: semaphore array id
972 *
973 * The function looks up (and if not present creates) the undo structure.
974 * The size of the undo structure depends on the size of the semaphore
975 * array, thus the alloc path is not that straightforward.
976 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
977 * performs a rcu_read_lock().
978 */
979static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
982{ 980{
983 struct sem_array *sma; 981 struct sem_array *sma;
984 struct sem_undo_list *ulp; 982 struct sem_undo_list *ulp;
@@ -990,13 +988,16 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
990 if (error) 988 if (error)
991 return ERR_PTR(error); 989 return ERR_PTR(error);
992 990
991 rcu_read_lock();
993 spin_lock(&ulp->lock); 992 spin_lock(&ulp->lock);
994 un = lookup_undo(ulp, semid); 993 un = lookup_undo(ulp, semid);
995 spin_unlock(&ulp->lock); 994 spin_unlock(&ulp->lock);
996 if (likely(un!=NULL)) 995 if (likely(un!=NULL))
997 goto out; 996 goto out;
997 rcu_read_unlock();
998 998
999 /* no undo structure around - allocate one. */ 999 /* no undo structure around - allocate one. */
1000 /* step 1: figure out the size of the semaphore array */
1000 sma = sem_lock_check(ns, semid); 1001 sma = sem_lock_check(ns, semid);
1001 if (IS_ERR(sma)) 1002 if (IS_ERR(sma))
1002 return ERR_PTR(PTR_ERR(sma)); 1003 return ERR_PTR(PTR_ERR(sma));
@@ -1004,37 +1005,45 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
1004 nsems = sma->sem_nsems; 1005 nsems = sma->sem_nsems;
1005 sem_getref_and_unlock(sma); 1006 sem_getref_and_unlock(sma);
1006 1007
1008 /* step 2: allocate new undo structure */
1007 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); 1009 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1008 if (!new) { 1010 if (!new) {
1009 sem_putref(sma); 1011 sem_putref(sma);
1010 return ERR_PTR(-ENOMEM); 1012 return ERR_PTR(-ENOMEM);
1011 } 1013 }
1012 new->semadj = (short *) &new[1];
1013 new->semid = semid;
1014 1014
1015 spin_lock(&ulp->lock); 1015 /* step 3: Acquire the lock on semaphore array */
1016 un = lookup_undo(ulp, semid);
1017 if (un) {
1018 spin_unlock(&ulp->lock);
1019 kfree(new);
1020 sem_putref(sma);
1021 goto out;
1022 }
1023 sem_lock_and_putref(sma); 1016 sem_lock_and_putref(sma);
1024 if (sma->sem_perm.deleted) { 1017 if (sma->sem_perm.deleted) {
1025 sem_unlock(sma); 1018 sem_unlock(sma);
1026 spin_unlock(&ulp->lock);
1027 kfree(new); 1019 kfree(new);
1028 un = ERR_PTR(-EIDRM); 1020 un = ERR_PTR(-EIDRM);
1029 goto out; 1021 goto out;
1030 } 1022 }
1031 new->proc_next = ulp->proc_list; 1023 spin_lock(&ulp->lock);
1032 ulp->proc_list = new; 1024
1033 new->id_next = sma->undo; 1025 /*
1034 sma->undo = new; 1026 * step 4: check for races: did someone else allocate the undo struct?
1035 sem_unlock(sma); 1027 */
1028 un = lookup_undo(ulp, semid);
1029 if (un) {
1030 kfree(new);
1031 goto success;
1032 }
1033 /* step 5: initialize & link new undo structure */
1034 new->semadj = (short *) &new[1];
1035 new->ulp = ulp;
1036 new->semid = semid;
1037 assert_spin_locked(&ulp->lock);
1038 list_add_rcu(&new->list_proc, &ulp->list_proc);
1039 assert_spin_locked(&sma->sem_perm.lock);
1040 list_add(&new->list_id, &sma->list_id);
1036 un = new; 1041 un = new;
1042
1043success:
1037 spin_unlock(&ulp->lock); 1044 spin_unlock(&ulp->lock);
1045 rcu_read_lock();
1046 sem_unlock(sma);
1038out: 1047out:
1039 return un; 1048 return un;
1040} 1049}
@@ -1090,9 +1099,8 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
1090 alter = 1; 1099 alter = 1;
1091 } 1100 }
1092 1101
1093retry_undos:
1094 if (undos) { 1102 if (undos) {
1095 un = find_undo(ns, semid); 1103 un = find_alloc_undo(ns, semid);
1096 if (IS_ERR(un)) { 1104 if (IS_ERR(un)) {
1097 error = PTR_ERR(un); 1105 error = PTR_ERR(un);
1098 goto out_free; 1106 goto out_free;
@@ -1102,19 +1110,37 @@ retry_undos:
1102 1110
1103 sma = sem_lock_check(ns, semid); 1111 sma = sem_lock_check(ns, semid);
1104 if (IS_ERR(sma)) { 1112 if (IS_ERR(sma)) {
1113 if (un)
1114 rcu_read_unlock();
1105 error = PTR_ERR(sma); 1115 error = PTR_ERR(sma);
1106 goto out_free; 1116 goto out_free;
1107 } 1117 }
1108 1118
1109 /* 1119 /*
1110 * semid identifiers are not unique - find_undo may have 1120 * semid identifiers are not unique - find_alloc_undo may have
1111 * allocated an undo structure, it was invalidated by an RMID 1121 * allocated an undo structure, it was invalidated by an RMID
1112 * and now a new array with received the same id. Check and retry. 1122 * and now a new array with received the same id. Check and fail.
1123 * This case can be detected checking un->semid. The existance of
1124 * "un" itself is guaranteed by rcu.
1113 */ 1125 */
1114 if (un && un->semid == -1) { 1126 error = -EIDRM;
1115 sem_unlock(sma); 1127 if (un) {
1116 goto retry_undos; 1128 if (un->semid == -1) {
1129 rcu_read_unlock();
1130 goto out_unlock_free;
1131 } else {
1132 /*
1133 * rcu lock can be released, "un" cannot disappear:
1134 * - sem_lock is acquired, thus IPC_RMID is
1135 * impossible.
1136 * - exit_sem is impossible, it always operates on
1137 * current (or a dead task).
1138 */
1139
1140 rcu_read_unlock();
1141 }
1117 } 1142 }
1143
1118 error = -EFBIG; 1144 error = -EFBIG;
1119 if (max >= sma->sem_nsems) 1145 if (max >= sma->sem_nsems)
1120 goto out_unlock_free; 1146 goto out_unlock_free;
@@ -1138,17 +1164,15 @@ retry_undos:
1138 * task into the pending queue and go to sleep. 1164 * task into the pending queue and go to sleep.
1139 */ 1165 */
1140 1166
1141 queue.sma = sma;
1142 queue.sops = sops; 1167 queue.sops = sops;
1143 queue.nsops = nsops; 1168 queue.nsops = nsops;
1144 queue.undo = un; 1169 queue.undo = un;
1145 queue.pid = task_tgid_vnr(current); 1170 queue.pid = task_tgid_vnr(current);
1146 queue.id = semid;
1147 queue.alter = alter; 1171 queue.alter = alter;
1148 if (alter) 1172 if (alter)
1149 append_to_queue(sma ,&queue); 1173 list_add_tail(&queue.list, &sma->sem_pending);
1150 else 1174 else
1151 prepend_to_queue(sma ,&queue); 1175 list_add(&queue.list, &sma->sem_pending);
1152 1176
1153 queue.status = -EINTR; 1177 queue.status = -EINTR;
1154 queue.sleeper = current; 1178 queue.sleeper = current;
@@ -1174,7 +1198,6 @@ retry_undos:
1174 1198
1175 sma = sem_lock(ns, semid); 1199 sma = sem_lock(ns, semid);
1176 if (IS_ERR(sma)) { 1200 if (IS_ERR(sma)) {
1177 BUG_ON(queue.prev != NULL);
1178 error = -EIDRM; 1201 error = -EIDRM;
1179 goto out_free; 1202 goto out_free;
1180 } 1203 }
@@ -1192,7 +1215,7 @@ retry_undos:
1192 */ 1215 */
1193 if (timeout && jiffies_left == 0) 1216 if (timeout && jiffies_left == 0)
1194 error = -EAGAIN; 1217 error = -EAGAIN;
1195 remove_from_queue(sma,&queue); 1218 list_del(&queue.list);
1196 goto out_unlock_free; 1219 goto out_unlock_free;
1197 1220
1198out_unlock_free: 1221out_unlock_free:
@@ -1243,56 +1266,62 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1243 */ 1266 */
1244void exit_sem(struct task_struct *tsk) 1267void exit_sem(struct task_struct *tsk)
1245{ 1268{
1246 struct sem_undo_list *undo_list; 1269 struct sem_undo_list *ulp;
1247 struct sem_undo *u, **up;
1248 struct ipc_namespace *ns;
1249 1270
1250 undo_list = tsk->sysvsem.undo_list; 1271 ulp = tsk->sysvsem.undo_list;
1251 if (!undo_list) 1272 if (!ulp)
1252 return; 1273 return;
1253 tsk->sysvsem.undo_list = NULL; 1274 tsk->sysvsem.undo_list = NULL;
1254 1275
1255 if (!atomic_dec_and_test(&undo_list->refcnt)) 1276 if (!atomic_dec_and_test(&ulp->refcnt))
1256 return; 1277 return;
1257 1278
1258 ns = tsk->nsproxy->ipc_ns; 1279 for (;;) {
1259 /* There's no need to hold the semundo list lock, as current
1260 * is the last task exiting for this undo list.
1261 */
1262 for (up = &undo_list->proc_list; (u = *up); *up = u->proc_next, kfree(u)) {
1263 struct sem_array *sma; 1280 struct sem_array *sma;
1264 int nsems, i; 1281 struct sem_undo *un;
1265 struct sem_undo *un, **unp;
1266 int semid; 1282 int semid;
1267 1283 int i;
1268 semid = u->semid;
1269 1284
1270 if(semid == -1) 1285 rcu_read_lock();
1271 continue; 1286 un = list_entry(rcu_dereference(ulp->list_proc.next),
1272 sma = sem_lock(ns, semid); 1287 struct sem_undo, list_proc);
1288 if (&un->list_proc == &ulp->list_proc)
1289 semid = -1;
1290 else
1291 semid = un->semid;
1292 rcu_read_unlock();
1293
1294 if (semid == -1)
1295 break;
1296
1297 sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
1298
1299 /* exit_sem raced with IPC_RMID, nothing to do */
1273 if (IS_ERR(sma)) 1300 if (IS_ERR(sma))
1274 continue; 1301 continue;
1275 1302
1276 if (u->semid == -1) 1303 un = lookup_undo(ulp, semid);
1277 goto next_entry; 1304 if (un == NULL) {
1305 /* exit_sem raced with IPC_RMID+semget() that created
1306 * exactly the same semid. Nothing to do.
1307 */
1308 sem_unlock(sma);
1309 continue;
1310 }
1278 1311
1279 BUG_ON(sem_checkid(sma, u->semid)); 1312 /* remove un from the linked lists */
1313 assert_spin_locked(&sma->sem_perm.lock);
1314 list_del(&un->list_id);
1280 1315
1281 /* remove u from the sma->undo list */ 1316 spin_lock(&ulp->lock);
1282 for (unp = &sma->undo; (un = *unp); unp = &un->id_next) { 1317 list_del_rcu(&un->list_proc);
1283 if (u == un) 1318 spin_unlock(&ulp->lock);
1284 goto found; 1319
1285 } 1320 /* perform adjustments registered in un */
1286 printk ("exit_sem undo list error id=%d\n", u->semid); 1321 for (i = 0; i < sma->sem_nsems; i++) {
1287 goto next_entry;
1288found:
1289 *unp = un->id_next;
1290 /* perform adjustments registered in u */
1291 nsems = sma->sem_nsems;
1292 for (i = 0; i < nsems; i++) {
1293 struct sem * semaphore = &sma->sem_base[i]; 1322 struct sem * semaphore = &sma->sem_base[i];
1294 if (u->semadj[i]) { 1323 if (un->semadj[i]) {
1295 semaphore->semval += u->semadj[i]; 1324 semaphore->semval += un->semadj[i];
1296 /* 1325 /*
1297 * Range checks of the new semaphore value, 1326 * Range checks of the new semaphore value,
1298 * not defined by sus: 1327 * not defined by sus:
@@ -1316,10 +1345,11 @@ found:
1316 sma->sem_otime = get_seconds(); 1345 sma->sem_otime = get_seconds();
1317 /* maybe some queued-up processes were waiting for this */ 1346 /* maybe some queued-up processes were waiting for this */
1318 update_queue(sma); 1347 update_queue(sma);
1319next_entry:
1320 sem_unlock(sma); 1348 sem_unlock(sma);
1349
1350 call_rcu(&un->rcu, free_un);
1321 } 1351 }
1322 kfree(undo_list); 1352 kfree(ulp);
1323} 1353}
1324 1354
1325#ifdef CONFIG_PROC_FS 1355#ifdef CONFIG_PROC_FS
diff --git a/ipc/shm.c b/ipc/shm.c
index 790240cd067f..e77ec698cf40 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -112,23 +112,8 @@ void __init shm_init (void)
112} 112}
113 113
114/* 114/*
115 * shm_lock_(check_)down routines are called in the paths where the rw_mutex
116 * is held to protect access to the idr tree.
117 */
118static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns,
119 int id)
120{
121 struct kern_ipc_perm *ipcp = ipc_lock_down(&shm_ids(ns), id);
122
123 if (IS_ERR(ipcp))
124 return (struct shmid_kernel *)ipcp;
125
126 return container_of(ipcp, struct shmid_kernel, shm_perm);
127}
128
129/*
130 * shm_lock_(check_) routines are called in the paths where the rw_mutex 115 * shm_lock_(check_) routines are called in the paths where the rw_mutex
131 * is not held. 116 * is not necessarily held.
132 */ 117 */
133static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) 118static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
134{ 119{
@@ -211,7 +196,7 @@ static void shm_close(struct vm_area_struct *vma)
211 196
212 down_write(&shm_ids(ns).rw_mutex); 197 down_write(&shm_ids(ns).rw_mutex);
213 /* remove from the list of attaches of the shm segment */ 198 /* remove from the list of attaches of the shm segment */
214 shp = shm_lock_down(ns, sfd->id); 199 shp = shm_lock(ns, sfd->id);
215 BUG_ON(IS_ERR(shp)); 200 BUG_ON(IS_ERR(shp));
216 shp->shm_lprid = task_tgid_vnr(current); 201 shp->shm_lprid = task_tgid_vnr(current);
217 shp->shm_dtim = get_seconds(); 202 shp->shm_dtim = get_seconds();
@@ -577,7 +562,8 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
577 562
578 if (is_file_hugepages(shp->shm_file)) { 563 if (is_file_hugepages(shp->shm_file)) {
579 struct address_space *mapping = inode->i_mapping; 564 struct address_space *mapping = inode->i_mapping;
580 *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages; 565 struct hstate *h = hstate_file(shp->shm_file);
566 *rss += pages_per_huge_page(h) * mapping->nrpages;
581 } else { 567 } else {
582 struct shmem_inode_info *info = SHMEM_I(inode); 568 struct shmem_inode_info *info = SHMEM_I(inode);
583 spin_lock(&info->lock); 569 spin_lock(&info->lock);
@@ -931,7 +917,7 @@ invalid:
931 917
932out_nattch: 918out_nattch:
933 down_write(&shm_ids(ns).rw_mutex); 919 down_write(&shm_ids(ns).rw_mutex);
934 shp = shm_lock_down(ns, shmid); 920 shp = shm_lock(ns, shmid);
935 BUG_ON(IS_ERR(shp)); 921 BUG_ON(IS_ERR(shp));
936 shp->shm_nattch--; 922 shp->shm_nattch--;
937 if(shp->shm_nattch == 0 && 923 if(shp->shm_nattch == 0 &&
diff --git a/ipc/util.c b/ipc/util.c
index 3339177b336c..49b3ea615dc5 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -688,10 +688,6 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
688 * Look for an id in the ipc ids idr and lock the associated ipc object. 688 * Look for an id in the ipc ids idr and lock the associated ipc object.
689 * 689 *
690 * The ipc object is locked on exit. 690 * The ipc object is locked on exit.
691 *
692 * This is the routine that should be called when the rw_mutex is not already
693 * held, i.e. idr tree not protected: it protects the idr tree in read mode
694 * during the idr_find().
695 */ 691 */
696 692
697struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) 693struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
@@ -699,18 +695,13 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
699 struct kern_ipc_perm *out; 695 struct kern_ipc_perm *out;
700 int lid = ipcid_to_idx(id); 696 int lid = ipcid_to_idx(id);
701 697
702 down_read(&ids->rw_mutex);
703
704 rcu_read_lock(); 698 rcu_read_lock();
705 out = idr_find(&ids->ipcs_idr, lid); 699 out = idr_find(&ids->ipcs_idr, lid);
706 if (out == NULL) { 700 if (out == NULL) {
707 rcu_read_unlock(); 701 rcu_read_unlock();
708 up_read(&ids->rw_mutex);
709 return ERR_PTR(-EINVAL); 702 return ERR_PTR(-EINVAL);
710 } 703 }
711 704
712 up_read(&ids->rw_mutex);
713
714 spin_lock(&out->lock); 705 spin_lock(&out->lock);
715 706
716 /* ipc_rmid() may have already freed the ID while ipc_lock 707 /* ipc_rmid() may have already freed the ID while ipc_lock
@@ -725,56 +716,6 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
725 return out; 716 return out;
726} 717}
727 718
728/**
729 * ipc_lock_down - Lock an ipc structure with rw_sem held
730 * @ids: IPC identifier set
731 * @id: ipc id to look for
732 *
733 * Look for an id in the ipc ids idr and lock the associated ipc object.
734 *
735 * The ipc object is locked on exit.
736 *
737 * This is the routine that should be called when the rw_mutex is already
738 * held, i.e. idr tree protected.
739 */
740
741struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *ids, int id)
742{
743 struct kern_ipc_perm *out;
744 int lid = ipcid_to_idx(id);
745
746 rcu_read_lock();
747 out = idr_find(&ids->ipcs_idr, lid);
748 if (out == NULL) {
749 rcu_read_unlock();
750 return ERR_PTR(-EINVAL);
751 }
752
753 spin_lock(&out->lock);
754
755 /*
756 * No need to verify that the structure is still valid since the
757 * rw_mutex is held.
758 */
759 return out;
760}
761
762struct kern_ipc_perm *ipc_lock_check_down(struct ipc_ids *ids, int id)
763{
764 struct kern_ipc_perm *out;
765
766 out = ipc_lock_down(ids, id);
767 if (IS_ERR(out))
768 return out;
769
770 if (ipc_checkid(out, id)) {
771 ipc_unlock(out);
772 return ERR_PTR(-EIDRM);
773 }
774
775 return out;
776}
777
778struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id) 719struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
779{ 720{
780 struct kern_ipc_perm *out; 721 struct kern_ipc_perm *out;
@@ -846,7 +787,7 @@ struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd,
846 int err; 787 int err;
847 788
848 down_write(&ids->rw_mutex); 789 down_write(&ids->rw_mutex);
849 ipcp = ipc_lock_check_down(ids, id); 790 ipcp = ipc_lock_check(ids, id);
850 if (IS_ERR(ipcp)) { 791 if (IS_ERR(ipcp)) {
851 err = PTR_ERR(ipcp); 792 err = PTR_ERR(ipcp);
852 goto out_up; 793 goto out_up;
diff --git a/ipc/util.h b/ipc/util.h
index cdb966aebe07..3646b45a03c9 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -102,11 +102,6 @@ void* ipc_rcu_alloc(int size);
102void ipc_rcu_getref(void *ptr); 102void ipc_rcu_getref(void *ptr);
103void ipc_rcu_putref(void *ptr); 103void ipc_rcu_putref(void *ptr);
104 104
105/*
106 * ipc_lock_down: called with rw_mutex held
107 * ipc_lock: called without that lock held
108 */
109struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *, int);
110struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); 105struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
111 106
112void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); 107void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out);
@@ -155,7 +150,6 @@ static inline void ipc_unlock(struct kern_ipc_perm *perm)
155 rcu_read_unlock(); 150 rcu_read_unlock();
156} 151}
157 152
158struct kern_ipc_perm *ipc_lock_check_down(struct ipc_ids *ids, int id);
159struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id); 153struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id);
160int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, 154int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
161 struct ipc_ops *ops, struct ipc_params *params); 155 struct ipc_ops *ops, struct ipc_params *params);
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 526128a2e622..382dd5a8b2d7 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -55,4 +55,4 @@ config HZ
55 default 1000 if HZ_1000 55 default 1000 if HZ_1000
56 56
57config SCHED_HRTICK 57config SCHED_HRTICK
58 def_bool HIGH_RES_TIMERS && X86 58 def_bool HIGH_RES_TIMERS && USE_GENERIC_SMP_HELPERS
diff --git a/kernel/Makefile b/kernel/Makefile
index 985ddb7da4d0..54f69837d35a 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ 5obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
6 cpu.o exit.o itimer.o time.o softirq.o resource.o \ 6 cpu.o exit.o itimer.o time.o softirq.o resource.o \
7 sysctl.o capability.o ptrace.o timer.o user.o \ 7 sysctl.o capability.o ptrace.o timer.o user.o \
8 signal.o sys.o kmod.o workqueue.o pid.o \ 8 signal.o sys.o kmod.o workqueue.o pid.o \
@@ -11,6 +11,8 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ 11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
12 notifier.o ksysfs.o pm_qos_params.o sched_clock.o 12 notifier.o ksysfs.o pm_qos_params.o sched_clock.o
13 13
14CFLAGS_REMOVE_sched.o = -mno-spe
15
14ifdef CONFIG_FTRACE 16ifdef CONFIG_FTRACE
15# Do not trace debug files and internal ftrace files 17# Do not trace debug files and internal ftrace files
16CFLAGS_REMOVE_lockdep.o = -pg 18CFLAGS_REMOVE_lockdep.o = -pg
@@ -22,6 +24,7 @@ CFLAGS_REMOVE_sched_clock.o = -pg
22CFLAGS_REMOVE_sched.o = -mno-spe -pg 24CFLAGS_REMOVE_sched.o = -mno-spe -pg
23endif 25endif
24 26
27obj-$(CONFIG_PROFILING) += profile.o
25obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o 28obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
26obj-$(CONFIG_STACKTRACE) += stacktrace.o 29obj-$(CONFIG_STACKTRACE) += stacktrace.o
27obj-y += time/ 30obj-y += time/
diff --git a/kernel/acct.c b/kernel/acct.c
index 91e1cfd734d2..dd68b9059418 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -75,37 +75,39 @@ int acct_parm[3] = {4, 2, 30};
75/* 75/*
76 * External references and all of the globals. 76 * External references and all of the globals.
77 */ 77 */
78static void do_acct_process(struct pid_namespace *ns, struct file *); 78static void do_acct_process(struct bsd_acct_struct *acct,
79 struct pid_namespace *ns, struct file *);
79 80
80/* 81/*
81 * This structure is used so that all the data protected by lock 82 * This structure is used so that all the data protected by lock
82 * can be placed in the same cache line as the lock. This primes 83 * can be placed in the same cache line as the lock. This primes
83 * the cache line to have the data after getting the lock. 84 * the cache line to have the data after getting the lock.
84 */ 85 */
85struct acct_glbs { 86struct bsd_acct_struct {
86 spinlock_t lock;
87 volatile int active; 87 volatile int active;
88 volatile int needcheck; 88 volatile int needcheck;
89 struct file *file; 89 struct file *file;
90 struct pid_namespace *ns; 90 struct pid_namespace *ns;
91 struct timer_list timer; 91 struct timer_list timer;
92 struct list_head list;
92}; 93};
93 94
94static struct acct_glbs acct_globals __cacheline_aligned = 95static DEFINE_SPINLOCK(acct_lock);
95 {__SPIN_LOCK_UNLOCKED(acct_globals.lock)}; 96static LIST_HEAD(acct_list);
96 97
97/* 98/*
98 * Called whenever the timer says to check the free space. 99 * Called whenever the timer says to check the free space.
99 */ 100 */
100static void acct_timeout(unsigned long unused) 101static void acct_timeout(unsigned long x)
101{ 102{
102 acct_globals.needcheck = 1; 103 struct bsd_acct_struct *acct = (struct bsd_acct_struct *)x;
104 acct->needcheck = 1;
103} 105}
104 106
105/* 107/*
106 * Check the amount of free space and suspend/resume accordingly. 108 * Check the amount of free space and suspend/resume accordingly.
107 */ 109 */
108static int check_free_space(struct file *file) 110static int check_free_space(struct bsd_acct_struct *acct, struct file *file)
109{ 111{
110 struct kstatfs sbuf; 112 struct kstatfs sbuf;
111 int res; 113 int res;
@@ -113,11 +115,11 @@ static int check_free_space(struct file *file)
113 sector_t resume; 115 sector_t resume;
114 sector_t suspend; 116 sector_t suspend;
115 117
116 spin_lock(&acct_globals.lock); 118 spin_lock(&acct_lock);
117 res = acct_globals.active; 119 res = acct->active;
118 if (!file || !acct_globals.needcheck) 120 if (!file || !acct->needcheck)
119 goto out; 121 goto out;
120 spin_unlock(&acct_globals.lock); 122 spin_unlock(&acct_lock);
121 123
122 /* May block */ 124 /* May block */
123 if (vfs_statfs(file->f_path.dentry, &sbuf)) 125 if (vfs_statfs(file->f_path.dentry, &sbuf))
@@ -136,35 +138,35 @@ static int check_free_space(struct file *file)
136 act = 0; 138 act = 0;
137 139
138 /* 140 /*
139 * If some joker switched acct_globals.file under us we'ld better be 141 * If some joker switched acct->file under us we'ld better be
140 * silent and _not_ touch anything. 142 * silent and _not_ touch anything.
141 */ 143 */
142 spin_lock(&acct_globals.lock); 144 spin_lock(&acct_lock);
143 if (file != acct_globals.file) { 145 if (file != acct->file) {
144 if (act) 146 if (act)
145 res = act>0; 147 res = act>0;
146 goto out; 148 goto out;
147 } 149 }
148 150
149 if (acct_globals.active) { 151 if (acct->active) {
150 if (act < 0) { 152 if (act < 0) {
151 acct_globals.active = 0; 153 acct->active = 0;
152 printk(KERN_INFO "Process accounting paused\n"); 154 printk(KERN_INFO "Process accounting paused\n");
153 } 155 }
154 } else { 156 } else {
155 if (act > 0) { 157 if (act > 0) {
156 acct_globals.active = 1; 158 acct->active = 1;
157 printk(KERN_INFO "Process accounting resumed\n"); 159 printk(KERN_INFO "Process accounting resumed\n");
158 } 160 }
159 } 161 }
160 162
161 del_timer(&acct_globals.timer); 163 del_timer(&acct->timer);
162 acct_globals.needcheck = 0; 164 acct->needcheck = 0;
163 acct_globals.timer.expires = jiffies + ACCT_TIMEOUT*HZ; 165 acct->timer.expires = jiffies + ACCT_TIMEOUT*HZ;
164 add_timer(&acct_globals.timer); 166 add_timer(&acct->timer);
165 res = acct_globals.active; 167 res = acct->active;
166out: 168out:
167 spin_unlock(&acct_globals.lock); 169 spin_unlock(&acct_lock);
168 return res; 170 return res;
169} 171}
170 172
@@ -172,39 +174,41 @@ out:
172 * Close the old accounting file (if currently open) and then replace 174 * Close the old accounting file (if currently open) and then replace
173 * it with file (if non-NULL). 175 * it with file (if non-NULL).
174 * 176 *
175 * NOTE: acct_globals.lock MUST be held on entry and exit. 177 * NOTE: acct_lock MUST be held on entry and exit.
176 */ 178 */
177static void acct_file_reopen(struct file *file) 179static void acct_file_reopen(struct bsd_acct_struct *acct, struct file *file,
180 struct pid_namespace *ns)
178{ 181{
179 struct file *old_acct = NULL; 182 struct file *old_acct = NULL;
180 struct pid_namespace *old_ns = NULL; 183 struct pid_namespace *old_ns = NULL;
181 184
182 if (acct_globals.file) { 185 if (acct->file) {
183 old_acct = acct_globals.file; 186 old_acct = acct->file;
184 old_ns = acct_globals.ns; 187 old_ns = acct->ns;
185 del_timer(&acct_globals.timer); 188 del_timer(&acct->timer);
186 acct_globals.active = 0; 189 acct->active = 0;
187 acct_globals.needcheck = 0; 190 acct->needcheck = 0;
188 acct_globals.file = NULL; 191 acct->file = NULL;
192 acct->ns = NULL;
193 list_del(&acct->list);
189 } 194 }
190 if (file) { 195 if (file) {
191 acct_globals.file = file; 196 acct->file = file;
192 acct_globals.ns = get_pid_ns(task_active_pid_ns(current)); 197 acct->ns = ns;
193 acct_globals.needcheck = 0; 198 acct->needcheck = 0;
194 acct_globals.active = 1; 199 acct->active = 1;
200 list_add(&acct->list, &acct_list);
195 /* It's been deleted if it was used before so this is safe */ 201 /* It's been deleted if it was used before so this is safe */
196 init_timer(&acct_globals.timer); 202 setup_timer(&acct->timer, acct_timeout, (unsigned long)acct);
197 acct_globals.timer.function = acct_timeout; 203 acct->timer.expires = jiffies + ACCT_TIMEOUT*HZ;
198 acct_globals.timer.expires = jiffies + ACCT_TIMEOUT*HZ; 204 add_timer(&acct->timer);
199 add_timer(&acct_globals.timer);
200 } 205 }
201 if (old_acct) { 206 if (old_acct) {
202 mnt_unpin(old_acct->f_path.mnt); 207 mnt_unpin(old_acct->f_path.mnt);
203 spin_unlock(&acct_globals.lock); 208 spin_unlock(&acct_lock);
204 do_acct_process(old_ns, old_acct); 209 do_acct_process(acct, old_ns, old_acct);
205 filp_close(old_acct, NULL); 210 filp_close(old_acct, NULL);
206 put_pid_ns(old_ns); 211 spin_lock(&acct_lock);
207 spin_lock(&acct_globals.lock);
208 } 212 }
209} 213}
210 214
@@ -212,6 +216,8 @@ static int acct_on(char *name)
212{ 216{
213 struct file *file; 217 struct file *file;
214 int error; 218 int error;
219 struct pid_namespace *ns;
220 struct bsd_acct_struct *acct = NULL;
215 221
216 /* Difference from BSD - they don't do O_APPEND */ 222 /* Difference from BSD - they don't do O_APPEND */
217 file = filp_open(name, O_WRONLY|O_APPEND|O_LARGEFILE, 0); 223 file = filp_open(name, O_WRONLY|O_APPEND|O_LARGEFILE, 0);
@@ -228,18 +234,34 @@ static int acct_on(char *name)
228 return -EIO; 234 return -EIO;
229 } 235 }
230 236
237 ns = task_active_pid_ns(current);
238 if (ns->bacct == NULL) {
239 acct = kzalloc(sizeof(struct bsd_acct_struct), GFP_KERNEL);
240 if (acct == NULL) {
241 filp_close(file, NULL);
242 return -ENOMEM;
243 }
244 }
245
231 error = security_acct(file); 246 error = security_acct(file);
232 if (error) { 247 if (error) {
248 kfree(acct);
233 filp_close(file, NULL); 249 filp_close(file, NULL);
234 return error; 250 return error;
235 } 251 }
236 252
237 spin_lock(&acct_globals.lock); 253 spin_lock(&acct_lock);
254 if (ns->bacct == NULL) {
255 ns->bacct = acct;
256 acct = NULL;
257 }
258
238 mnt_pin(file->f_path.mnt); 259 mnt_pin(file->f_path.mnt);
239 acct_file_reopen(file); 260 acct_file_reopen(ns->bacct, file, ns);
240 spin_unlock(&acct_globals.lock); 261 spin_unlock(&acct_lock);
241 262
242 mntput(file->f_path.mnt); /* it's pinned, now give up active reference */ 263 mntput(file->f_path.mnt); /* it's pinned, now give up active reference */
264 kfree(acct);
243 265
244 return 0; 266 return 0;
245} 267}
@@ -269,11 +291,17 @@ asmlinkage long sys_acct(const char __user *name)
269 error = acct_on(tmp); 291 error = acct_on(tmp);
270 putname(tmp); 292 putname(tmp);
271 } else { 293 } else {
294 struct bsd_acct_struct *acct;
295
296 acct = task_active_pid_ns(current)->bacct;
297 if (acct == NULL)
298 return 0;
299
272 error = security_acct(NULL); 300 error = security_acct(NULL);
273 if (!error) { 301 if (!error) {
274 spin_lock(&acct_globals.lock); 302 spin_lock(&acct_lock);
275 acct_file_reopen(NULL); 303 acct_file_reopen(acct, NULL, NULL);
276 spin_unlock(&acct_globals.lock); 304 spin_unlock(&acct_lock);
277 } 305 }
278 } 306 }
279 return error; 307 return error;
@@ -288,10 +316,16 @@ asmlinkage long sys_acct(const char __user *name)
288 */ 316 */
289void acct_auto_close_mnt(struct vfsmount *m) 317void acct_auto_close_mnt(struct vfsmount *m)
290{ 318{
291 spin_lock(&acct_globals.lock); 319 struct bsd_acct_struct *acct;
292 if (acct_globals.file && acct_globals.file->f_path.mnt == m) 320
293 acct_file_reopen(NULL); 321 spin_lock(&acct_lock);
294 spin_unlock(&acct_globals.lock); 322restart:
323 list_for_each_entry(acct, &acct_list, list)
324 if (acct->file && acct->file->f_path.mnt == m) {
325 acct_file_reopen(acct, NULL, NULL);
326 goto restart;
327 }
328 spin_unlock(&acct_lock);
295} 329}
296 330
297/** 331/**
@@ -303,12 +337,31 @@ void acct_auto_close_mnt(struct vfsmount *m)
303 */ 337 */
304void acct_auto_close(struct super_block *sb) 338void acct_auto_close(struct super_block *sb)
305{ 339{
306 spin_lock(&acct_globals.lock); 340 struct bsd_acct_struct *acct;
307 if (acct_globals.file && 341
308 acct_globals.file->f_path.mnt->mnt_sb == sb) { 342 spin_lock(&acct_lock);
309 acct_file_reopen(NULL); 343restart:
344 list_for_each_entry(acct, &acct_list, list)
345 if (acct->file && acct->file->f_path.mnt->mnt_sb == sb) {
346 acct_file_reopen(acct, NULL, NULL);
347 goto restart;
348 }
349 spin_unlock(&acct_lock);
350}
351
352void acct_exit_ns(struct pid_namespace *ns)
353{
354 struct bsd_acct_struct *acct;
355
356 spin_lock(&acct_lock);
357 acct = ns->bacct;
358 if (acct != NULL) {
359 if (acct->file != NULL)
360 acct_file_reopen(acct, NULL, NULL);
361
362 kfree(acct);
310 } 363 }
311 spin_unlock(&acct_globals.lock); 364 spin_unlock(&acct_lock);
312} 365}
313 366
314/* 367/*
@@ -425,7 +478,8 @@ static u32 encode_float(u64 value)
425/* 478/*
426 * do_acct_process does all actual work. Caller holds the reference to file. 479 * do_acct_process does all actual work. Caller holds the reference to file.
427 */ 480 */
428static void do_acct_process(struct pid_namespace *ns, struct file *file) 481static void do_acct_process(struct bsd_acct_struct *acct,
482 struct pid_namespace *ns, struct file *file)
429{ 483{
430 struct pacct_struct *pacct = &current->signal->pacct; 484 struct pacct_struct *pacct = &current->signal->pacct;
431 acct_t ac; 485 acct_t ac;
@@ -440,7 +494,7 @@ static void do_acct_process(struct pid_namespace *ns, struct file *file)
440 * First check to see if there is enough free_space to continue 494 * First check to see if there is enough free_space to continue
441 * the process accounting system. 495 * the process accounting system.
442 */ 496 */
443 if (!check_free_space(file)) 497 if (!check_free_space(acct, file))
444 return; 498 return;
445 499
446 /* 500 /*
@@ -577,34 +631,46 @@ void acct_collect(long exitcode, int group_dead)
577 spin_unlock_irq(&current->sighand->siglock); 631 spin_unlock_irq(&current->sighand->siglock);
578} 632}
579 633
580/** 634static void acct_process_in_ns(struct pid_namespace *ns)
581 * acct_process - now just a wrapper around do_acct_process
582 * @exitcode: task exit code
583 *
584 * handles process accounting for an exiting task
585 */
586void acct_process(void)
587{ 635{
588 struct file *file = NULL; 636 struct file *file = NULL;
589 struct pid_namespace *ns; 637 struct bsd_acct_struct *acct;
590 638
639 acct = ns->bacct;
591 /* 640 /*
592 * accelerate the common fastpath: 641 * accelerate the common fastpath:
593 */ 642 */
594 if (!acct_globals.file) 643 if (!acct || !acct->file)
595 return; 644 return;
596 645
597 spin_lock(&acct_globals.lock); 646 spin_lock(&acct_lock);
598 file = acct_globals.file; 647 file = acct->file;
599 if (unlikely(!file)) { 648 if (unlikely(!file)) {
600 spin_unlock(&acct_globals.lock); 649 spin_unlock(&acct_lock);
601 return; 650 return;
602 } 651 }
603 get_file(file); 652 get_file(file);
604 ns = get_pid_ns(acct_globals.ns); 653 spin_unlock(&acct_lock);
605 spin_unlock(&acct_globals.lock);
606 654
607 do_acct_process(ns, file); 655 do_acct_process(acct, ns, file);
608 fput(file); 656 fput(file);
609 put_pid_ns(ns); 657}
658
659/**
660 * acct_process - now just a wrapper around acct_process_in_ns,
661 * which in turn is a wrapper around do_acct_process.
662 *
663 * handles process accounting for an exiting task
664 */
665void acct_process(void)
666{
667 struct pid_namespace *ns;
668
669 /*
670 * This loop is safe lockless, since current is still
671 * alive and holds its namespace, which in turn holds
672 * its parent.
673 */
674 for (ns = task_active_pid_ns(current); ns != NULL; ns = ns->parent)
675 acct_process_in_ns(ns);
610} 676}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index c10e7aae04d7..4699950e65bd 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1476,7 +1476,8 @@ void audit_syscall_entry(int arch, int major,
1476 struct audit_context *context = tsk->audit_context; 1476 struct audit_context *context = tsk->audit_context;
1477 enum audit_state state; 1477 enum audit_state state;
1478 1478
1479 BUG_ON(!context); 1479 if (unlikely(!context))
1480 return;
1480 1481
1481 /* 1482 /*
1482 * This happens only on certain architectures that make system 1483 * This happens only on certain architectures that make system
diff --git a/kernel/capability.c b/kernel/capability.c
index 901e0fdc3fff..0101e847603e 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -115,11 +115,208 @@ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy)
115 return 0; 115 return 0;
116} 116}
117 117
118#ifndef CONFIG_SECURITY_FILE_CAPABILITIES
119
120/*
121 * Without filesystem capability support, we nominally support one process
122 * setting the capabilities of another
123 */
124static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
125 kernel_cap_t *pIp, kernel_cap_t *pPp)
126{
127 struct task_struct *target;
128 int ret;
129
130 spin_lock(&task_capability_lock);
131 read_lock(&tasklist_lock);
132
133 if (pid && pid != task_pid_vnr(current)) {
134 target = find_task_by_vpid(pid);
135 if (!target) {
136 ret = -ESRCH;
137 goto out;
138 }
139 } else
140 target = current;
141
142 ret = security_capget(target, pEp, pIp, pPp);
143
144out:
145 read_unlock(&tasklist_lock);
146 spin_unlock(&task_capability_lock);
147
148 return ret;
149}
150
151/*
152 * cap_set_pg - set capabilities for all processes in a given process
153 * group. We call this holding task_capability_lock and tasklist_lock.
154 */
155static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective,
156 kernel_cap_t *inheritable,
157 kernel_cap_t *permitted)
158{
159 struct task_struct *g, *target;
160 int ret = -EPERM;
161 int found = 0;
162 struct pid *pgrp;
163
164 spin_lock(&task_capability_lock);
165 read_lock(&tasklist_lock);
166
167 pgrp = find_vpid(pgrp_nr);
168 do_each_pid_task(pgrp, PIDTYPE_PGID, g) {
169 target = g;
170 while_each_thread(g, target) {
171 if (!security_capset_check(target, effective,
172 inheritable, permitted)) {
173 security_capset_set(target, effective,
174 inheritable, permitted);
175 ret = 0;
176 }
177 found = 1;
178 }
179 } while_each_pid_task(pgrp, PIDTYPE_PGID, g);
180
181 read_unlock(&tasklist_lock);
182 spin_unlock(&task_capability_lock);
183
184 if (!found)
185 ret = 0;
186 return ret;
187}
188
118/* 189/*
119 * For sys_getproccap() and sys_setproccap(), any of the three 190 * cap_set_all - set capabilities for all processes other than init
120 * capability set pointers may be NULL -- indicating that that set is 191 * and self. We call this holding task_capability_lock and tasklist_lock.
121 * uninteresting and/or not to be changed.
122 */ 192 */
193static inline int cap_set_all(kernel_cap_t *effective,
194 kernel_cap_t *inheritable,
195 kernel_cap_t *permitted)
196{
197 struct task_struct *g, *target;
198 int ret = -EPERM;
199 int found = 0;
200
201 spin_lock(&task_capability_lock);
202 read_lock(&tasklist_lock);
203
204 do_each_thread(g, target) {
205 if (target == current
206 || is_container_init(target->group_leader))
207 continue;
208 found = 1;
209 if (security_capset_check(target, effective, inheritable,
210 permitted))
211 continue;
212 ret = 0;
213 security_capset_set(target, effective, inheritable, permitted);
214 } while_each_thread(g, target);
215
216 read_unlock(&tasklist_lock);
217 spin_unlock(&task_capability_lock);
218
219 if (!found)
220 ret = 0;
221
222 return ret;
223}
224
225/*
226 * Given the target pid does not refer to the current process we
227 * need more elaborate support... (This support is not present when
228 * filesystem capabilities are configured.)
229 */
230static inline int do_sys_capset_other_tasks(pid_t pid, kernel_cap_t *effective,
231 kernel_cap_t *inheritable,
232 kernel_cap_t *permitted)
233{
234 struct task_struct *target;
235 int ret;
236
237 if (!capable(CAP_SETPCAP))
238 return -EPERM;
239
240 if (pid == -1) /* all procs other than current and init */
241 return cap_set_all(effective, inheritable, permitted);
242
243 else if (pid < 0) /* all procs in process group */
244 return cap_set_pg(-pid, effective, inheritable, permitted);
245
246 /* target != current */
247 spin_lock(&task_capability_lock);
248 read_lock(&tasklist_lock);
249
250 target = find_task_by_vpid(pid);
251 if (!target)
252 ret = -ESRCH;
253 else {
254 ret = security_capset_check(target, effective, inheritable,
255 permitted);
256
257 /* having verified that the proposed changes are legal,
258 we now put them into effect. */
259 if (!ret)
260 security_capset_set(target, effective, inheritable,
261 permitted);
262 }
263
264 read_unlock(&tasklist_lock);
265 spin_unlock(&task_capability_lock);
266
267 return ret;
268}
269
270#else /* ie., def CONFIG_SECURITY_FILE_CAPABILITIES */
271
272/*
273 * If we have configured with filesystem capability support, then the
274 * only thing that can change the capabilities of the current process
275 * is the current process. As such, we can't be in this code at the
276 * same time as we are in the process of setting capabilities in this
277 * process. The net result is that we can limit our use of locks to
278 * when we are reading the caps of another process.
279 */
280static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
281 kernel_cap_t *pIp, kernel_cap_t *pPp)
282{
283 int ret;
284
285 if (pid && (pid != task_pid_vnr(current))) {
286 struct task_struct *target;
287
288 spin_lock(&task_capability_lock);
289 read_lock(&tasklist_lock);
290
291 target = find_task_by_vpid(pid);
292 if (!target)
293 ret = -ESRCH;
294 else
295 ret = security_capget(target, pEp, pIp, pPp);
296
297 read_unlock(&tasklist_lock);
298 spin_unlock(&task_capability_lock);
299 } else
300 ret = security_capget(current, pEp, pIp, pPp);
301
302 return ret;
303}
304
305/*
306 * With filesystem capability support configured, the kernel does not
307 * permit the changing of capabilities in one process by another
308 * process. (CAP_SETPCAP has much less broad semantics when configured
309 * this way.)
310 */
311static inline int do_sys_capset_other_tasks(pid_t pid,
312 kernel_cap_t *effective,
313 kernel_cap_t *inheritable,
314 kernel_cap_t *permitted)
315{
316 return -EPERM;
317}
318
319#endif /* ie., ndef CONFIG_SECURITY_FILE_CAPABILITIES */
123 320
124/* 321/*
125 * Atomically modify the effective capabilities returning the original 322 * Atomically modify the effective capabilities returning the original
@@ -155,7 +352,6 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
155{ 352{
156 int ret = 0; 353 int ret = 0;
157 pid_t pid; 354 pid_t pid;
158 struct task_struct *target;
159 unsigned tocopy; 355 unsigned tocopy;
160 kernel_cap_t pE, pI, pP; 356 kernel_cap_t pE, pI, pP;
161 357
@@ -169,23 +365,7 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
169 if (pid < 0) 365 if (pid < 0)
170 return -EINVAL; 366 return -EINVAL;
171 367
172 spin_lock(&task_capability_lock); 368 ret = cap_get_target_pid(pid, &pE, &pI, &pP);
173 read_lock(&tasklist_lock);
174
175 if (pid && pid != task_pid_vnr(current)) {
176 target = find_task_by_vpid(pid);
177 if (!target) {
178 ret = -ESRCH;
179 goto out;
180 }
181 } else
182 target = current;
183
184 ret = security_capget(target, &pE, &pI, &pP);
185
186out:
187 read_unlock(&tasklist_lock);
188 spin_unlock(&task_capability_lock);
189 369
190 if (!ret) { 370 if (!ret) {
191 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; 371 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
@@ -216,7 +396,6 @@ out:
216 * before modification is attempted and the application 396 * before modification is attempted and the application
217 * fails. 397 * fails.
218 */ 398 */
219
220 if (copy_to_user(dataptr, kdata, tocopy 399 if (copy_to_user(dataptr, kdata, tocopy
221 * sizeof(struct __user_cap_data_struct))) { 400 * sizeof(struct __user_cap_data_struct))) {
222 return -EFAULT; 401 return -EFAULT;
@@ -226,70 +405,8 @@ out:
226 return ret; 405 return ret;
227} 406}
228 407
229/*
230 * cap_set_pg - set capabilities for all processes in a given process
231 * group. We call this holding task_capability_lock and tasklist_lock.
232 */
233static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective,
234 kernel_cap_t *inheritable,
235 kernel_cap_t *permitted)
236{
237 struct task_struct *g, *target;
238 int ret = -EPERM;
239 int found = 0;
240 struct pid *pgrp;
241
242 pgrp = find_vpid(pgrp_nr);
243 do_each_pid_task(pgrp, PIDTYPE_PGID, g) {
244 target = g;
245 while_each_thread(g, target) {
246 if (!security_capset_check(target, effective,
247 inheritable,
248 permitted)) {
249 security_capset_set(target, effective,
250 inheritable,
251 permitted);
252 ret = 0;
253 }
254 found = 1;
255 }
256 } while_each_pid_task(pgrp, PIDTYPE_PGID, g);
257
258 if (!found)
259 ret = 0;
260 return ret;
261}
262
263/*
264 * cap_set_all - set capabilities for all processes other than init
265 * and self. We call this holding task_capability_lock and tasklist_lock.
266 */
267static inline int cap_set_all(kernel_cap_t *effective,
268 kernel_cap_t *inheritable,
269 kernel_cap_t *permitted)
270{
271 struct task_struct *g, *target;
272 int ret = -EPERM;
273 int found = 0;
274
275 do_each_thread(g, target) {
276 if (target == current || is_container_init(target->group_leader))
277 continue;
278 found = 1;
279 if (security_capset_check(target, effective, inheritable,
280 permitted))
281 continue;
282 ret = 0;
283 security_capset_set(target, effective, inheritable, permitted);
284 } while_each_thread(g, target);
285
286 if (!found)
287 ret = 0;
288 return ret;
289}
290
291/** 408/**
292 * sys_capset - set capabilities for a process or a group of processes 409 * sys_capset - set capabilities for a process or (*) a group of processes
293 * @header: pointer to struct that contains capability version and 410 * @header: pointer to struct that contains capability version and
294 * target pid data 411 * target pid data
295 * @data: pointer to struct that contains the effective, permitted, 412 * @data: pointer to struct that contains the effective, permitted,
@@ -313,7 +430,6 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
313 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; 430 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
314 unsigned i, tocopy; 431 unsigned i, tocopy;
315 kernel_cap_t inheritable, permitted, effective; 432 kernel_cap_t inheritable, permitted, effective;
316 struct task_struct *target;
317 int ret; 433 int ret;
318 pid_t pid; 434 pid_t pid;
319 435
@@ -324,9 +440,6 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
324 if (get_user(pid, &header->pid)) 440 if (get_user(pid, &header->pid))
325 return -EFAULT; 441 return -EFAULT;
326 442
327 if (pid && pid != task_pid_vnr(current) && !capable(CAP_SETPCAP))
328 return -EPERM;
329
330 if (copy_from_user(&kdata, data, tocopy 443 if (copy_from_user(&kdata, data, tocopy
331 * sizeof(struct __user_cap_data_struct))) { 444 * sizeof(struct __user_cap_data_struct))) {
332 return -EFAULT; 445 return -EFAULT;
@@ -344,40 +457,31 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
344 i++; 457 i++;
345 } 458 }
346 459
347 spin_lock(&task_capability_lock); 460 if (pid && (pid != task_pid_vnr(current)))
348 read_lock(&tasklist_lock); 461 ret = do_sys_capset_other_tasks(pid, &effective, &inheritable,
349 462 &permitted);
350 if (pid > 0 && pid != task_pid_vnr(current)) { 463 else {
351 target = find_task_by_vpid(pid); 464 /*
352 if (!target) { 465 * This lock is required even when filesystem
353 ret = -ESRCH; 466 * capability support is configured - it protects the
354 goto out; 467 * sys_capget() call from returning incorrect data in
355 } 468 * the case that the targeted process is not the
356 } else 469 * current one.
357 target = current; 470 */
358 471 spin_lock(&task_capability_lock);
359 ret = 0;
360
361 /* having verified that the proposed changes are legal,
362 we now put them into effect. */
363 if (pid < 0) {
364 if (pid == -1) /* all procs other than current and init */
365 ret = cap_set_all(&effective, &inheritable, &permitted);
366 472
367 else /* all procs in process group */ 473 ret = security_capset_check(current, &effective, &inheritable,
368 ret = cap_set_pg(-pid, &effective, &inheritable,
369 &permitted);
370 } else {
371 ret = security_capset_check(target, &effective, &inheritable,
372 &permitted); 474 &permitted);
475 /*
476 * Having verified that the proposed changes are
477 * legal, we now put them into effect.
478 */
373 if (!ret) 479 if (!ret)
374 security_capset_set(target, &effective, &inheritable, 480 security_capset_set(current, &effective, &inheritable,
375 &permitted); 481 &permitted);
482 spin_unlock(&task_capability_lock);
376 } 483 }
377 484
378out:
379 read_unlock(&tasklist_lock);
380 spin_unlock(&task_capability_lock);
381 485
382 return ret; 486 return ret;
383} 487}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 15ac0e1e4f4d..66ec9fd21e0c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -89,11 +89,7 @@ struct cgroupfs_root {
89 /* Hierarchy-specific flags */ 89 /* Hierarchy-specific flags */
90 unsigned long flags; 90 unsigned long flags;
91 91
92 /* The path to use for release notifications. No locking 92 /* The path to use for release notifications. */
93 * between setting and use - so if userspace updates this
94 * while child cgroups exist, you could miss a
95 * notification. We ensure that it's always a valid
96 * NUL-terminated string */
97 char release_agent_path[PATH_MAX]; 93 char release_agent_path[PATH_MAX];
98}; 94};
99 95
@@ -118,7 +114,7 @@ static int root_count;
118 * extra work in the fork/exit path if none of the subsystems need to 114 * extra work in the fork/exit path if none of the subsystems need to
119 * be called. 115 * be called.
120 */ 116 */
121static int need_forkexit_callback; 117static int need_forkexit_callback __read_mostly;
122static int need_mm_owner_callback __read_mostly; 118static int need_mm_owner_callback __read_mostly;
123 119
124/* convenient tests for these bits */ 120/* convenient tests for these bits */
@@ -220,7 +216,7 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
220 * task until after the first call to cgroup_iter_start(). This 216 * task until after the first call to cgroup_iter_start(). This
221 * reduces the fork()/exit() overhead for people who have cgroups 217 * reduces the fork()/exit() overhead for people who have cgroups
222 * compiled into their kernel but not actually in use */ 218 * compiled into their kernel but not actually in use */
223static int use_task_css_set_links; 219static int use_task_css_set_links __read_mostly;
224 220
225/* When we create or destroy a css_set, the operation simply 221/* When we create or destroy a css_set, the operation simply
226 * takes/releases a reference count on all the cgroups referenced 222 * takes/releases a reference count on all the cgroups referenced
@@ -241,17 +237,20 @@ static int use_task_css_set_links;
241 */ 237 */
242static void unlink_css_set(struct css_set *cg) 238static void unlink_css_set(struct css_set *cg)
243{ 239{
240 struct cg_cgroup_link *link;
241 struct cg_cgroup_link *saved_link;
242
244 write_lock(&css_set_lock); 243 write_lock(&css_set_lock);
245 hlist_del(&cg->hlist); 244 hlist_del(&cg->hlist);
246 css_set_count--; 245 css_set_count--;
247 while (!list_empty(&cg->cg_links)) { 246
248 struct cg_cgroup_link *link; 247 list_for_each_entry_safe(link, saved_link, &cg->cg_links,
249 link = list_entry(cg->cg_links.next, 248 cg_link_list) {
250 struct cg_cgroup_link, cg_link_list);
251 list_del(&link->cg_link_list); 249 list_del(&link->cg_link_list);
252 list_del(&link->cgrp_link_list); 250 list_del(&link->cgrp_link_list);
253 kfree(link); 251 kfree(link);
254 } 252 }
253
255 write_unlock(&css_set_lock); 254 write_unlock(&css_set_lock);
256} 255}
257 256
@@ -363,15 +362,14 @@ static struct css_set *find_existing_css_set(
363static int allocate_cg_links(int count, struct list_head *tmp) 362static int allocate_cg_links(int count, struct list_head *tmp)
364{ 363{
365 struct cg_cgroup_link *link; 364 struct cg_cgroup_link *link;
365 struct cg_cgroup_link *saved_link;
366 int i; 366 int i;
367 INIT_LIST_HEAD(tmp); 367 INIT_LIST_HEAD(tmp);
368 for (i = 0; i < count; i++) { 368 for (i = 0; i < count; i++) {
369 link = kmalloc(sizeof(*link), GFP_KERNEL); 369 link = kmalloc(sizeof(*link), GFP_KERNEL);
370 if (!link) { 370 if (!link) {
371 while (!list_empty(tmp)) { 371 list_for_each_entry_safe(link, saved_link, tmp,
372 link = list_entry(tmp->next, 372 cgrp_link_list) {
373 struct cg_cgroup_link,
374 cgrp_link_list);
375 list_del(&link->cgrp_link_list); 373 list_del(&link->cgrp_link_list);
376 kfree(link); 374 kfree(link);
377 } 375 }
@@ -384,11 +382,10 @@ static int allocate_cg_links(int count, struct list_head *tmp)
384 382
385static void free_cg_links(struct list_head *tmp) 383static void free_cg_links(struct list_head *tmp)
386{ 384{
387 while (!list_empty(tmp)) { 385 struct cg_cgroup_link *link;
388 struct cg_cgroup_link *link; 386 struct cg_cgroup_link *saved_link;
389 link = list_entry(tmp->next, 387
390 struct cg_cgroup_link, 388 list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
391 cgrp_link_list);
392 list_del(&link->cgrp_link_list); 389 list_del(&link->cgrp_link_list);
393 kfree(link); 390 kfree(link);
394 } 391 }
@@ -415,11 +412,11 @@ static struct css_set *find_css_set(
415 412
416 /* First see if we already have a cgroup group that matches 413 /* First see if we already have a cgroup group that matches
417 * the desired set */ 414 * the desired set */
418 write_lock(&css_set_lock); 415 read_lock(&css_set_lock);
419 res = find_existing_css_set(oldcg, cgrp, template); 416 res = find_existing_css_set(oldcg, cgrp, template);
420 if (res) 417 if (res)
421 get_css_set(res); 418 get_css_set(res);
422 write_unlock(&css_set_lock); 419 read_unlock(&css_set_lock);
423 420
424 if (res) 421 if (res)
425 return res; 422 return res;
@@ -507,10 +504,6 @@ static struct css_set *find_css_set(
507 * knows that the cgroup won't be removed, as cgroup_rmdir() 504 * knows that the cgroup won't be removed, as cgroup_rmdir()
508 * needs that mutex. 505 * needs that mutex.
509 * 506 *
510 * The cgroup_common_file_write handler for operations that modify
511 * the cgroup hierarchy holds cgroup_mutex across the entire operation,
512 * single threading all such cgroup modifications across the system.
513 *
514 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't 507 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
515 * (usually) take cgroup_mutex. These are the two most performance 508 * (usually) take cgroup_mutex. These are the two most performance
516 * critical pieces of code here. The exception occurs on cgroup_exit(), 509 * critical pieces of code here. The exception occurs on cgroup_exit(),
@@ -1093,6 +1086,8 @@ static void cgroup_kill_sb(struct super_block *sb) {
1093 struct cgroupfs_root *root = sb->s_fs_info; 1086 struct cgroupfs_root *root = sb->s_fs_info;
1094 struct cgroup *cgrp = &root->top_cgroup; 1087 struct cgroup *cgrp = &root->top_cgroup;
1095 int ret; 1088 int ret;
1089 struct cg_cgroup_link *link;
1090 struct cg_cgroup_link *saved_link;
1096 1091
1097 BUG_ON(!root); 1092 BUG_ON(!root);
1098 1093
@@ -1112,10 +1107,9 @@ static void cgroup_kill_sb(struct super_block *sb) {
1112 * root cgroup 1107 * root cgroup
1113 */ 1108 */
1114 write_lock(&css_set_lock); 1109 write_lock(&css_set_lock);
1115 while (!list_empty(&cgrp->css_sets)) { 1110
1116 struct cg_cgroup_link *link; 1111 list_for_each_entry_safe(link, saved_link, &cgrp->css_sets,
1117 link = list_entry(cgrp->css_sets.next, 1112 cgrp_link_list) {
1118 struct cg_cgroup_link, cgrp_link_list);
1119 list_del(&link->cg_link_list); 1113 list_del(&link->cg_link_list);
1120 list_del(&link->cgrp_link_list); 1114 list_del(&link->cgrp_link_list);
1121 kfree(link); 1115 kfree(link);
@@ -1281,18 +1275,14 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1281} 1275}
1282 1276
1283/* 1277/*
1284 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with 1278 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
1285 * cgroup_mutex, may take task_lock of task 1279 * held. May take task_lock of task
1286 */ 1280 */
1287static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf) 1281static int attach_task_by_pid(struct cgroup *cgrp, u64 pid)
1288{ 1282{
1289 pid_t pid;
1290 struct task_struct *tsk; 1283 struct task_struct *tsk;
1291 int ret; 1284 int ret;
1292 1285
1293 if (sscanf(pidbuf, "%d", &pid) != 1)
1294 return -EIO;
1295
1296 if (pid) { 1286 if (pid) {
1297 rcu_read_lock(); 1287 rcu_read_lock();
1298 tsk = find_task_by_vpid(pid); 1288 tsk = find_task_by_vpid(pid);
@@ -1318,6 +1308,16 @@ static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf)
1318 return ret; 1308 return ret;
1319} 1309}
1320 1310
1311static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
1312{
1313 int ret;
1314 if (!cgroup_lock_live_group(cgrp))
1315 return -ENODEV;
1316 ret = attach_task_by_pid(cgrp, pid);
1317 cgroup_unlock();
1318 return ret;
1319}
1320
1321/* The various types of files and directories in a cgroup file system */ 1321/* The various types of files and directories in a cgroup file system */
1322enum cgroup_filetype { 1322enum cgroup_filetype {
1323 FILE_ROOT, 1323 FILE_ROOT,
@@ -1327,12 +1327,54 @@ enum cgroup_filetype {
1327 FILE_RELEASE_AGENT, 1327 FILE_RELEASE_AGENT,
1328}; 1328};
1329 1329
1330/**
1331 * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
1332 * @cgrp: the cgroup to be checked for liveness
1333 *
1334 * On success, returns true; the lock should be later released with
1335 * cgroup_unlock(). On failure returns false with no lock held.
1336 */
1337bool cgroup_lock_live_group(struct cgroup *cgrp)
1338{
1339 mutex_lock(&cgroup_mutex);
1340 if (cgroup_is_removed(cgrp)) {
1341 mutex_unlock(&cgroup_mutex);
1342 return false;
1343 }
1344 return true;
1345}
1346
1347static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
1348 const char *buffer)
1349{
1350 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
1351 if (!cgroup_lock_live_group(cgrp))
1352 return -ENODEV;
1353 strcpy(cgrp->root->release_agent_path, buffer);
1354 cgroup_unlock();
1355 return 0;
1356}
1357
1358static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
1359 struct seq_file *seq)
1360{
1361 if (!cgroup_lock_live_group(cgrp))
1362 return -ENODEV;
1363 seq_puts(seq, cgrp->root->release_agent_path);
1364 seq_putc(seq, '\n');
1365 cgroup_unlock();
1366 return 0;
1367}
1368
1369/* A buffer size big enough for numbers or short strings */
1370#define CGROUP_LOCAL_BUFFER_SIZE 64
1371
1330static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft, 1372static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
1331 struct file *file, 1373 struct file *file,
1332 const char __user *userbuf, 1374 const char __user *userbuf,
1333 size_t nbytes, loff_t *unused_ppos) 1375 size_t nbytes, loff_t *unused_ppos)
1334{ 1376{
1335 char buffer[64]; 1377 char buffer[CGROUP_LOCAL_BUFFER_SIZE];
1336 int retval = 0; 1378 int retval = 0;
1337 char *end; 1379 char *end;
1338 1380
@@ -1361,68 +1403,36 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
1361 return retval; 1403 return retval;
1362} 1404}
1363 1405
1364static ssize_t cgroup_common_file_write(struct cgroup *cgrp, 1406static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
1365 struct cftype *cft, 1407 struct file *file,
1366 struct file *file, 1408 const char __user *userbuf,
1367 const char __user *userbuf, 1409 size_t nbytes, loff_t *unused_ppos)
1368 size_t nbytes, loff_t *unused_ppos)
1369{ 1410{
1370 enum cgroup_filetype type = cft->private; 1411 char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
1371 char *buffer;
1372 int retval = 0; 1412 int retval = 0;
1413 size_t max_bytes = cft->max_write_len;
1414 char *buffer = local_buffer;
1373 1415
1374 if (nbytes >= PATH_MAX) 1416 if (!max_bytes)
1417 max_bytes = sizeof(local_buffer) - 1;
1418 if (nbytes >= max_bytes)
1375 return -E2BIG; 1419 return -E2BIG;
1376 1420 /* Allocate a dynamic buffer if we need one */
1377 /* +1 for nul-terminator */ 1421 if (nbytes >= sizeof(local_buffer)) {
1378 buffer = kmalloc(nbytes + 1, GFP_KERNEL); 1422 buffer = kmalloc(nbytes + 1, GFP_KERNEL);
1379 if (buffer == NULL) 1423 if (buffer == NULL)
1380 return -ENOMEM; 1424 return -ENOMEM;
1381
1382 if (copy_from_user(buffer, userbuf, nbytes)) {
1383 retval = -EFAULT;
1384 goto out1;
1385 } 1425 }
1386 buffer[nbytes] = 0; /* nul-terminate */ 1426 if (nbytes && copy_from_user(buffer, userbuf, nbytes))
1387 strstrip(buffer); /* strip -just- trailing whitespace */ 1427 return -EFAULT;
1388
1389 mutex_lock(&cgroup_mutex);
1390 1428
1391 /* 1429 buffer[nbytes] = 0; /* nul-terminate */
1392 * This was already checked for in cgroup_file_write(), but 1430 strstrip(buffer);
1393 * check again now we're holding cgroup_mutex. 1431 retval = cft->write_string(cgrp, cft, buffer);
1394 */ 1432 if (!retval)
1395 if (cgroup_is_removed(cgrp)) {
1396 retval = -ENODEV;
1397 goto out2;
1398 }
1399
1400 switch (type) {
1401 case FILE_TASKLIST:
1402 retval = attach_task_by_pid(cgrp, buffer);
1403 break;
1404 case FILE_NOTIFY_ON_RELEASE:
1405 clear_bit(CGRP_RELEASABLE, &cgrp->flags);
1406 if (simple_strtoul(buffer, NULL, 10) != 0)
1407 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
1408 else
1409 clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
1410 break;
1411 case FILE_RELEASE_AGENT:
1412 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
1413 strcpy(cgrp->root->release_agent_path, buffer);
1414 break;
1415 default:
1416 retval = -EINVAL;
1417 goto out2;
1418 }
1419
1420 if (retval == 0)
1421 retval = nbytes; 1433 retval = nbytes;
1422out2: 1434 if (buffer != local_buffer)
1423 mutex_unlock(&cgroup_mutex); 1435 kfree(buffer);
1424out1:
1425 kfree(buffer);
1426 return retval; 1436 return retval;
1427} 1437}
1428 1438
@@ -1438,6 +1448,8 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
1438 return cft->write(cgrp, cft, file, buf, nbytes, ppos); 1448 return cft->write(cgrp, cft, file, buf, nbytes, ppos);
1439 if (cft->write_u64 || cft->write_s64) 1449 if (cft->write_u64 || cft->write_s64)
1440 return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos); 1450 return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
1451 if (cft->write_string)
1452 return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
1441 if (cft->trigger) { 1453 if (cft->trigger) {
1442 int ret = cft->trigger(cgrp, (unsigned int)cft->private); 1454 int ret = cft->trigger(cgrp, (unsigned int)cft->private);
1443 return ret ? ret : nbytes; 1455 return ret ? ret : nbytes;
@@ -1450,7 +1462,7 @@ static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
1450 char __user *buf, size_t nbytes, 1462 char __user *buf, size_t nbytes,
1451 loff_t *ppos) 1463 loff_t *ppos)
1452{ 1464{
1453 char tmp[64]; 1465 char tmp[CGROUP_LOCAL_BUFFER_SIZE];
1454 u64 val = cft->read_u64(cgrp, cft); 1466 u64 val = cft->read_u64(cgrp, cft);
1455 int len = sprintf(tmp, "%llu\n", (unsigned long long) val); 1467 int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
1456 1468
@@ -1462,56 +1474,13 @@ static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
1462 char __user *buf, size_t nbytes, 1474 char __user *buf, size_t nbytes,
1463 loff_t *ppos) 1475 loff_t *ppos)
1464{ 1476{
1465 char tmp[64]; 1477 char tmp[CGROUP_LOCAL_BUFFER_SIZE];
1466 s64 val = cft->read_s64(cgrp, cft); 1478 s64 val = cft->read_s64(cgrp, cft);
1467 int len = sprintf(tmp, "%lld\n", (long long) val); 1479 int len = sprintf(tmp, "%lld\n", (long long) val);
1468 1480
1469 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); 1481 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
1470} 1482}
1471 1483
1472static ssize_t cgroup_common_file_read(struct cgroup *cgrp,
1473 struct cftype *cft,
1474 struct file *file,
1475 char __user *buf,
1476 size_t nbytes, loff_t *ppos)
1477{
1478 enum cgroup_filetype type = cft->private;
1479 char *page;
1480 ssize_t retval = 0;
1481 char *s;
1482
1483 if (!(page = (char *)__get_free_page(GFP_KERNEL)))
1484 return -ENOMEM;
1485
1486 s = page;
1487
1488 switch (type) {
1489 case FILE_RELEASE_AGENT:
1490 {
1491 struct cgroupfs_root *root;
1492 size_t n;
1493 mutex_lock(&cgroup_mutex);
1494 root = cgrp->root;
1495 n = strnlen(root->release_agent_path,
1496 sizeof(root->release_agent_path));
1497 n = min(n, (size_t) PAGE_SIZE);
1498 strncpy(s, root->release_agent_path, n);
1499 mutex_unlock(&cgroup_mutex);
1500 s += n;
1501 break;
1502 }
1503 default:
1504 retval = -EINVAL;
1505 goto out;
1506 }
1507 *s++ = '\n';
1508
1509 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1510out:
1511 free_page((unsigned long)page);
1512 return retval;
1513}
1514
1515static ssize_t cgroup_file_read(struct file *file, char __user *buf, 1484static ssize_t cgroup_file_read(struct file *file, char __user *buf,
1516 size_t nbytes, loff_t *ppos) 1485 size_t nbytes, loff_t *ppos)
1517{ 1486{
@@ -1569,6 +1538,7 @@ int cgroup_seqfile_release(struct inode *inode, struct file *file)
1569 1538
1570static struct file_operations cgroup_seqfile_operations = { 1539static struct file_operations cgroup_seqfile_operations = {
1571 .read = seq_read, 1540 .read = seq_read,
1541 .write = cgroup_file_write,
1572 .llseek = seq_lseek, 1542 .llseek = seq_lseek,
1573 .release = cgroup_seqfile_release, 1543 .release = cgroup_seqfile_release,
1574}; 1544};
@@ -1756,15 +1726,11 @@ int cgroup_add_files(struct cgroup *cgrp,
1756int cgroup_task_count(const struct cgroup *cgrp) 1726int cgroup_task_count(const struct cgroup *cgrp)
1757{ 1727{
1758 int count = 0; 1728 int count = 0;
1759 struct list_head *l; 1729 struct cg_cgroup_link *link;
1760 1730
1761 read_lock(&css_set_lock); 1731 read_lock(&css_set_lock);
1762 l = cgrp->css_sets.next; 1732 list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
1763 while (l != &cgrp->css_sets) {
1764 struct cg_cgroup_link *link =
1765 list_entry(l, struct cg_cgroup_link, cgrp_link_list);
1766 count += atomic_read(&link->cg->ref.refcount); 1733 count += atomic_read(&link->cg->ref.refcount);
1767 l = l->next;
1768 } 1734 }
1769 read_unlock(&css_set_lock); 1735 read_unlock(&css_set_lock);
1770 return count; 1736 return count;
@@ -2227,6 +2193,18 @@ static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
2227 return notify_on_release(cgrp); 2193 return notify_on_release(cgrp);
2228} 2194}
2229 2195
2196static int cgroup_write_notify_on_release(struct cgroup *cgrp,
2197 struct cftype *cft,
2198 u64 val)
2199{
2200 clear_bit(CGRP_RELEASABLE, &cgrp->flags);
2201 if (val)
2202 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
2203 else
2204 clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
2205 return 0;
2206}
2207
2230/* 2208/*
2231 * for the common functions, 'private' gives the type of file 2209 * for the common functions, 'private' gives the type of file
2232 */ 2210 */
@@ -2235,7 +2213,7 @@ static struct cftype files[] = {
2235 .name = "tasks", 2213 .name = "tasks",
2236 .open = cgroup_tasks_open, 2214 .open = cgroup_tasks_open,
2237 .read = cgroup_tasks_read, 2215 .read = cgroup_tasks_read,
2238 .write = cgroup_common_file_write, 2216 .write_u64 = cgroup_tasks_write,
2239 .release = cgroup_tasks_release, 2217 .release = cgroup_tasks_release,
2240 .private = FILE_TASKLIST, 2218 .private = FILE_TASKLIST,
2241 }, 2219 },
@@ -2243,15 +2221,16 @@ static struct cftype files[] = {
2243 { 2221 {
2244 .name = "notify_on_release", 2222 .name = "notify_on_release",
2245 .read_u64 = cgroup_read_notify_on_release, 2223 .read_u64 = cgroup_read_notify_on_release,
2246 .write = cgroup_common_file_write, 2224 .write_u64 = cgroup_write_notify_on_release,
2247 .private = FILE_NOTIFY_ON_RELEASE, 2225 .private = FILE_NOTIFY_ON_RELEASE,
2248 }, 2226 },
2249}; 2227};
2250 2228
2251static struct cftype cft_release_agent = { 2229static struct cftype cft_release_agent = {
2252 .name = "release_agent", 2230 .name = "release_agent",
2253 .read = cgroup_common_file_read, 2231 .read_seq_string = cgroup_release_agent_show,
2254 .write = cgroup_common_file_write, 2232 .write_string = cgroup_release_agent_write,
2233 .max_write_len = PATH_MAX,
2255 .private = FILE_RELEASE_AGENT, 2234 .private = FILE_RELEASE_AGENT,
2256}; 2235};
2257 2236
@@ -2869,16 +2848,17 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
2869 * cgroup_clone - clone the cgroup the given subsystem is attached to 2848 * cgroup_clone - clone the cgroup the given subsystem is attached to
2870 * @tsk: the task to be moved 2849 * @tsk: the task to be moved
2871 * @subsys: the given subsystem 2850 * @subsys: the given subsystem
2851 * @nodename: the name for the new cgroup
2872 * 2852 *
2873 * Duplicate the current cgroup in the hierarchy that the given 2853 * Duplicate the current cgroup in the hierarchy that the given
2874 * subsystem is attached to, and move this task into the new 2854 * subsystem is attached to, and move this task into the new
2875 * child. 2855 * child.
2876 */ 2856 */
2877int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) 2857int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
2858 char *nodename)
2878{ 2859{
2879 struct dentry *dentry; 2860 struct dentry *dentry;
2880 int ret = 0; 2861 int ret = 0;
2881 char nodename[MAX_CGROUP_TYPE_NAMELEN];
2882 struct cgroup *parent, *child; 2862 struct cgroup *parent, *child;
2883 struct inode *inode; 2863 struct inode *inode;
2884 struct css_set *cg; 2864 struct css_set *cg;
@@ -2903,8 +2883,6 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
2903 cg = tsk->cgroups; 2883 cg = tsk->cgroups;
2904 parent = task_cgroup(tsk, subsys->subsys_id); 2884 parent = task_cgroup(tsk, subsys->subsys_id);
2905 2885
2906 snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "%d", tsk->pid);
2907
2908 /* Pin the hierarchy */ 2886 /* Pin the hierarchy */
2909 atomic_inc(&parent->root->sb->s_active); 2887 atomic_inc(&parent->root->sb->s_active);
2910 2888
@@ -3078,27 +3056,24 @@ static void cgroup_release_agent(struct work_struct *work)
3078 while (!list_empty(&release_list)) { 3056 while (!list_empty(&release_list)) {
3079 char *argv[3], *envp[3]; 3057 char *argv[3], *envp[3];
3080 int i; 3058 int i;
3081 char *pathbuf; 3059 char *pathbuf = NULL, *agentbuf = NULL;
3082 struct cgroup *cgrp = list_entry(release_list.next, 3060 struct cgroup *cgrp = list_entry(release_list.next,
3083 struct cgroup, 3061 struct cgroup,
3084 release_list); 3062 release_list);
3085 list_del_init(&cgrp->release_list); 3063 list_del_init(&cgrp->release_list);
3086 spin_unlock(&release_list_lock); 3064 spin_unlock(&release_list_lock);
3087 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); 3065 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
3088 if (!pathbuf) { 3066 if (!pathbuf)
3089 spin_lock(&release_list_lock); 3067 goto continue_free;
3090 continue; 3068 if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
3091 } 3069 goto continue_free;
3092 3070 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
3093 if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0) { 3071 if (!agentbuf)
3094 kfree(pathbuf); 3072 goto continue_free;
3095 spin_lock(&release_list_lock);
3096 continue;
3097 }
3098 3073
3099 i = 0; 3074 i = 0;
3100 argv[i++] = cgrp->root->release_agent_path; 3075 argv[i++] = agentbuf;
3101 argv[i++] = (char *)pathbuf; 3076 argv[i++] = pathbuf;
3102 argv[i] = NULL; 3077 argv[i] = NULL;
3103 3078
3104 i = 0; 3079 i = 0;
@@ -3112,8 +3087,10 @@ static void cgroup_release_agent(struct work_struct *work)
3112 * be a slow process */ 3087 * be a slow process */
3113 mutex_unlock(&cgroup_mutex); 3088 mutex_unlock(&cgroup_mutex);
3114 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); 3089 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
3115 kfree(pathbuf);
3116 mutex_lock(&cgroup_mutex); 3090 mutex_lock(&cgroup_mutex);
3091 continue_free:
3092 kfree(pathbuf);
3093 kfree(agentbuf);
3117 spin_lock(&release_list_lock); 3094 spin_lock(&release_list_lock);
3118 } 3095 }
3119 spin_unlock(&release_list_lock); 3096 spin_unlock(&release_list_lock);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index cfb1d43ab801..10ba5f1004a5 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -64,6 +64,8 @@ void __init cpu_hotplug_init(void)
64 cpu_hotplug.refcount = 0; 64 cpu_hotplug.refcount = 0;
65} 65}
66 66
67cpumask_t cpu_active_map;
68
67#ifdef CONFIG_HOTPLUG_CPU 69#ifdef CONFIG_HOTPLUG_CPU
68 70
69void get_online_cpus(void) 71void get_online_cpus(void)
@@ -283,6 +285,11 @@ out_allowed:
283 set_cpus_allowed_ptr(current, &old_allowed); 285 set_cpus_allowed_ptr(current, &old_allowed);
284out_release: 286out_release:
285 cpu_hotplug_done(); 287 cpu_hotplug_done();
288 if (!err) {
289 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod,
290 hcpu) == NOTIFY_BAD)
291 BUG();
292 }
286 return err; 293 return err;
287} 294}
288 295
@@ -291,11 +298,30 @@ int __ref cpu_down(unsigned int cpu)
291 int err = 0; 298 int err = 0;
292 299
293 cpu_maps_update_begin(); 300 cpu_maps_update_begin();
294 if (cpu_hotplug_disabled) 301
302 if (cpu_hotplug_disabled) {
295 err = -EBUSY; 303 err = -EBUSY;
296 else 304 goto out;
297 err = _cpu_down(cpu, 0); 305 }
306
307 cpu_clear(cpu, cpu_active_map);
308
309 /*
310 * Make sure the all cpus did the reschedule and are not
311 * using stale version of the cpu_active_map.
312 * This is not strictly necessary becuase stop_machine()
313 * that we run down the line already provides the required
314 * synchronization. But it's really a side effect and we do not
315 * want to depend on the innards of the stop_machine here.
316 */
317 synchronize_sched();
318
319 err = _cpu_down(cpu, 0);
320
321 if (cpu_online(cpu))
322 cpu_set(cpu, cpu_active_map);
298 323
324out:
299 cpu_maps_update_done(); 325 cpu_maps_update_done();
300 return err; 326 return err;
301} 327}
@@ -355,11 +381,18 @@ int __cpuinit cpu_up(unsigned int cpu)
355 } 381 }
356 382
357 cpu_maps_update_begin(); 383 cpu_maps_update_begin();
358 if (cpu_hotplug_disabled) 384
385 if (cpu_hotplug_disabled) {
359 err = -EBUSY; 386 err = -EBUSY;
360 else 387 goto out;
361 err = _cpu_up(cpu, 0); 388 }
389
390 err = _cpu_up(cpu, 0);
362 391
392 if (cpu_online(cpu))
393 cpu_set(cpu, cpu_active_map);
394
395out:
363 cpu_maps_update_done(); 396 cpu_maps_update_done();
364 return err; 397 return err;
365} 398}
@@ -413,7 +446,7 @@ void __ref enable_nonboot_cpus(void)
413 goto out; 446 goto out;
414 447
415 printk("Enabling non-boot CPUs ...\n"); 448 printk("Enabling non-boot CPUs ...\n");
416 for_each_cpu_mask(cpu, frozen_cpus) { 449 for_each_cpu_mask_nr(cpu, frozen_cpus) {
417 error = _cpu_up(cpu, 1); 450 error = _cpu_up(cpu, 1);
418 if (!error) { 451 if (!error) {
419 printk("CPU%d is up\n", cpu); 452 printk("CPU%d is up\n", cpu);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index d2cc67dac8b1..91cf85b36dd5 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -227,10 +227,6 @@ static struct cpuset top_cpuset = {
227 * The task_struct fields mems_allowed and mems_generation may only 227 * The task_struct fields mems_allowed and mems_generation may only
228 * be accessed in the context of that task, so require no locks. 228 * be accessed in the context of that task, so require no locks.
229 * 229 *
230 * The cpuset_common_file_write handler for operations that modify
231 * the cpuset hierarchy holds cgroup_mutex across the entire operation,
232 * single threading all such cpuset modifications across the system.
233 *
234 * The cpuset_common_file_read() handlers only hold callback_mutex across 230 * The cpuset_common_file_read() handlers only hold callback_mutex across
235 * small pieces of code, such as when reading out possibly multi-word 231 * small pieces of code, such as when reading out possibly multi-word
236 * cpumasks and nodemasks. 232 * cpumasks and nodemasks.
@@ -369,7 +365,7 @@ void cpuset_update_task_memory_state(void)
369 my_cpusets_mem_gen = top_cpuset.mems_generation; 365 my_cpusets_mem_gen = top_cpuset.mems_generation;
370 } else { 366 } else {
371 rcu_read_lock(); 367 rcu_read_lock();
372 my_cpusets_mem_gen = task_cs(current)->mems_generation; 368 my_cpusets_mem_gen = task_cs(tsk)->mems_generation;
373 rcu_read_unlock(); 369 rcu_read_unlock();
374 } 370 }
375 371
@@ -500,11 +496,16 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
500/* 496/*
501 * rebuild_sched_domains() 497 * rebuild_sched_domains()
502 * 498 *
503 * If the flag 'sched_load_balance' of any cpuset with non-empty 499 * This routine will be called to rebuild the scheduler's dynamic
504 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset 500 * sched domains:
505 * which has that flag enabled, or if any cpuset with a non-empty 501 * - if the flag 'sched_load_balance' of any cpuset with non-empty
506 * 'cpus' is removed, then call this routine to rebuild the 502 * 'cpus' changes,
507 * scheduler's dynamic sched domains. 503 * - or if the 'cpus' allowed changes in any cpuset which has that
504 * flag enabled,
505 * - or if the 'sched_relax_domain_level' of any cpuset which has
506 * that flag enabled and with non-empty 'cpus' changes,
507 * - or if any cpuset with non-empty 'cpus' is removed,
508 * - or if a cpu gets offlined.
508 * 509 *
509 * This routine builds a partial partition of the systems CPUs 510 * This routine builds a partial partition of the systems CPUs
510 * (the set of non-overlappping cpumask_t's in the array 'part' 511 * (the set of non-overlappping cpumask_t's in the array 'part'
@@ -564,7 +565,7 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
564 * partition_sched_domains(). 565 * partition_sched_domains().
565 */ 566 */
566 567
567static void rebuild_sched_domains(void) 568void rebuild_sched_domains(void)
568{ 569{
569 struct kfifo *q; /* queue of cpusets to be scanned */ 570 struct kfifo *q; /* queue of cpusets to be scanned */
570 struct cpuset *cp; /* scans q */ 571 struct cpuset *cp; /* scans q */
@@ -609,8 +610,13 @@ static void rebuild_sched_domains(void)
609 while (__kfifo_get(q, (void *)&cp, sizeof(cp))) { 610 while (__kfifo_get(q, (void *)&cp, sizeof(cp))) {
610 struct cgroup *cont; 611 struct cgroup *cont;
611 struct cpuset *child; /* scans child cpusets of cp */ 612 struct cpuset *child; /* scans child cpusets of cp */
613
614 if (cpus_empty(cp->cpus_allowed))
615 continue;
616
612 if (is_sched_load_balance(cp)) 617 if (is_sched_load_balance(cp))
613 csa[csn++] = cp; 618 csa[csn++] = cp;
619
614 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { 620 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
615 child = cgroup_cs(cont); 621 child = cgroup_cs(cont);
616 __kfifo_put(q, (void *)&child, sizeof(cp)); 622 __kfifo_put(q, (void *)&child, sizeof(cp));
@@ -703,36 +709,6 @@ done:
703 /* Don't kfree(dattr) -- partition_sched_domains() does that. */ 709 /* Don't kfree(dattr) -- partition_sched_domains() does that. */
704} 710}
705 711
706static inline int started_after_time(struct task_struct *t1,
707 struct timespec *time,
708 struct task_struct *t2)
709{
710 int start_diff = timespec_compare(&t1->start_time, time);
711 if (start_diff > 0) {
712 return 1;
713 } else if (start_diff < 0) {
714 return 0;
715 } else {
716 /*
717 * Arbitrarily, if two processes started at the same
718 * time, we'll say that the lower pointer value
719 * started first. Note that t2 may have exited by now
720 * so this may not be a valid pointer any longer, but
721 * that's fine - it still serves to distinguish
722 * between two tasks started (effectively)
723 * simultaneously.
724 */
725 return t1 > t2;
726 }
727}
728
729static inline int started_after(void *p1, void *p2)
730{
731 struct task_struct *t1 = p1;
732 struct task_struct *t2 = p2;
733 return started_after_time(t1, &t2->start_time, t2);
734}
735
736/** 712/**
737 * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's 713 * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
738 * @tsk: task to test 714 * @tsk: task to test
@@ -768,15 +744,49 @@ static void cpuset_change_cpumask(struct task_struct *tsk,
768} 744}
769 745
770/** 746/**
747 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
748 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
749 *
750 * Called with cgroup_mutex held
751 *
752 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
753 * calling callback functions for each.
754 *
755 * Return 0 if successful, -errno if not.
756 */
757static int update_tasks_cpumask(struct cpuset *cs)
758{
759 struct cgroup_scanner scan;
760 struct ptr_heap heap;
761 int retval;
762
763 /*
764 * cgroup_scan_tasks() will initialize heap->gt for us.
765 * heap_init() is still needed here for we should not change
766 * cs->cpus_allowed when heap_init() fails.
767 */
768 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
769 if (retval)
770 return retval;
771
772 scan.cg = cs->css.cgroup;
773 scan.test_task = cpuset_test_cpumask;
774 scan.process_task = cpuset_change_cpumask;
775 scan.heap = &heap;
776 retval = cgroup_scan_tasks(&scan);
777
778 heap_free(&heap);
779 return retval;
780}
781
782/**
771 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it 783 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
772 * @cs: the cpuset to consider 784 * @cs: the cpuset to consider
773 * @buf: buffer of cpu numbers written to this cpuset 785 * @buf: buffer of cpu numbers written to this cpuset
774 */ 786 */
775static int update_cpumask(struct cpuset *cs, char *buf) 787static int update_cpumask(struct cpuset *cs, const char *buf)
776{ 788{
777 struct cpuset trialcs; 789 struct cpuset trialcs;
778 struct cgroup_scanner scan;
779 struct ptr_heap heap;
780 int retval; 790 int retval;
781 int is_load_balanced; 791 int is_load_balanced;
782 792
@@ -792,7 +802,6 @@ static int update_cpumask(struct cpuset *cs, char *buf)
792 * that parsing. The validate_change() call ensures that cpusets 802 * that parsing. The validate_change() call ensures that cpusets
793 * with tasks have cpus. 803 * with tasks have cpus.
794 */ 804 */
795 buf = strstrip(buf);
796 if (!*buf) { 805 if (!*buf) {
797 cpus_clear(trialcs.cpus_allowed); 806 cpus_clear(trialcs.cpus_allowed);
798 } else { 807 } else {
@@ -811,10 +820,6 @@ static int update_cpumask(struct cpuset *cs, char *buf)
811 if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) 820 if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed))
812 return 0; 821 return 0;
813 822
814 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, &started_after);
815 if (retval)
816 return retval;
817
818 is_load_balanced = is_sched_load_balance(&trialcs); 823 is_load_balanced = is_sched_load_balance(&trialcs);
819 824
820 mutex_lock(&callback_mutex); 825 mutex_lock(&callback_mutex);
@@ -825,12 +830,9 @@ static int update_cpumask(struct cpuset *cs, char *buf)
825 * Scan tasks in the cpuset, and update the cpumasks of any 830 * Scan tasks in the cpuset, and update the cpumasks of any
826 * that need an update. 831 * that need an update.
827 */ 832 */
828 scan.cg = cs->css.cgroup; 833 retval = update_tasks_cpumask(cs);
829 scan.test_task = cpuset_test_cpumask; 834 if (retval < 0)
830 scan.process_task = cpuset_change_cpumask; 835 return retval;
831 scan.heap = &heap;
832 cgroup_scan_tasks(&scan);
833 heap_free(&heap);
834 836
835 if (is_load_balanced) 837 if (is_load_balanced)
836 rebuild_sched_domains(); 838 rebuild_sched_domains();
@@ -886,74 +888,25 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
886 mutex_unlock(&callback_mutex); 888 mutex_unlock(&callback_mutex);
887} 889}
888 890
889/*
890 * Handle user request to change the 'mems' memory placement
891 * of a cpuset. Needs to validate the request, update the
892 * cpusets mems_allowed and mems_generation, and for each
893 * task in the cpuset, rebind any vma mempolicies and if
894 * the cpuset is marked 'memory_migrate', migrate the tasks
895 * pages to the new memory.
896 *
897 * Call with cgroup_mutex held. May take callback_mutex during call.
898 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
899 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
900 * their mempolicies to the cpusets new mems_allowed.
901 */
902
903static void *cpuset_being_rebound; 891static void *cpuset_being_rebound;
904 892
905static int update_nodemask(struct cpuset *cs, char *buf) 893/**
894 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
895 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
896 * @oldmem: old mems_allowed of cpuset cs
897 *
898 * Called with cgroup_mutex held
899 * Return 0 if successful, -errno if not.
900 */
901static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem)
906{ 902{
907 struct cpuset trialcs;
908 nodemask_t oldmem;
909 struct task_struct *p; 903 struct task_struct *p;
910 struct mm_struct **mmarray; 904 struct mm_struct **mmarray;
911 int i, n, ntasks; 905 int i, n, ntasks;
912 int migrate; 906 int migrate;
913 int fudge; 907 int fudge;
914 int retval;
915 struct cgroup_iter it; 908 struct cgroup_iter it;
916 909 int retval;
917 /*
918 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
919 * it's read-only
920 */
921 if (cs == &top_cpuset)
922 return -EACCES;
923
924 trialcs = *cs;
925
926 /*
927 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
928 * Since nodelist_parse() fails on an empty mask, we special case
929 * that parsing. The validate_change() call ensures that cpusets
930 * with tasks have memory.
931 */
932 buf = strstrip(buf);
933 if (!*buf) {
934 nodes_clear(trialcs.mems_allowed);
935 } else {
936 retval = nodelist_parse(buf, trialcs.mems_allowed);
937 if (retval < 0)
938 goto done;
939
940 if (!nodes_subset(trialcs.mems_allowed,
941 node_states[N_HIGH_MEMORY]))
942 return -EINVAL;
943 }
944 oldmem = cs->mems_allowed;
945 if (nodes_equal(oldmem, trialcs.mems_allowed)) {
946 retval = 0; /* Too easy - nothing to do */
947 goto done;
948 }
949 retval = validate_change(cs, &trialcs);
950 if (retval < 0)
951 goto done;
952
953 mutex_lock(&callback_mutex);
954 cs->mems_allowed = trialcs.mems_allowed;
955 cs->mems_generation = cpuset_mems_generation++;
956 mutex_unlock(&callback_mutex);
957 910
958 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ 911 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
959 912
@@ -1020,7 +973,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
1020 973
1021 mpol_rebind_mm(mm, &cs->mems_allowed); 974 mpol_rebind_mm(mm, &cs->mems_allowed);
1022 if (migrate) 975 if (migrate)
1023 cpuset_migrate_mm(mm, &oldmem, &cs->mems_allowed); 976 cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
1024 mmput(mm); 977 mmput(mm);
1025 } 978 }
1026 979
@@ -1032,6 +985,70 @@ done:
1032 return retval; 985 return retval;
1033} 986}
1034 987
988/*
989 * Handle user request to change the 'mems' memory placement
990 * of a cpuset. Needs to validate the request, update the
991 * cpusets mems_allowed and mems_generation, and for each
992 * task in the cpuset, rebind any vma mempolicies and if
993 * the cpuset is marked 'memory_migrate', migrate the tasks
994 * pages to the new memory.
995 *
996 * Call with cgroup_mutex held. May take callback_mutex during call.
997 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
998 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
999 * their mempolicies to the cpusets new mems_allowed.
1000 */
1001static int update_nodemask(struct cpuset *cs, const char *buf)
1002{
1003 struct cpuset trialcs;
1004 nodemask_t oldmem;
1005 int retval;
1006
1007 /*
1008 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
1009 * it's read-only
1010 */
1011 if (cs == &top_cpuset)
1012 return -EACCES;
1013
1014 trialcs = *cs;
1015
1016 /*
1017 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1018 * Since nodelist_parse() fails on an empty mask, we special case
1019 * that parsing. The validate_change() call ensures that cpusets
1020 * with tasks have memory.
1021 */
1022 if (!*buf) {
1023 nodes_clear(trialcs.mems_allowed);
1024 } else {
1025 retval = nodelist_parse(buf, trialcs.mems_allowed);
1026 if (retval < 0)
1027 goto done;
1028
1029 if (!nodes_subset(trialcs.mems_allowed,
1030 node_states[N_HIGH_MEMORY]))
1031 return -EINVAL;
1032 }
1033 oldmem = cs->mems_allowed;
1034 if (nodes_equal(oldmem, trialcs.mems_allowed)) {
1035 retval = 0; /* Too easy - nothing to do */
1036 goto done;
1037 }
1038 retval = validate_change(cs, &trialcs);
1039 if (retval < 0)
1040 goto done;
1041
1042 mutex_lock(&callback_mutex);
1043 cs->mems_allowed = trialcs.mems_allowed;
1044 cs->mems_generation = cpuset_mems_generation++;
1045 mutex_unlock(&callback_mutex);
1046
1047 retval = update_tasks_nodemask(cs, &oldmem);
1048done:
1049 return retval;
1050}
1051
1035int current_cpuset_is_being_rebound(void) 1052int current_cpuset_is_being_rebound(void)
1036{ 1053{
1037 return task_cs(current) == cpuset_being_rebound; 1054 return task_cs(current) == cpuset_being_rebound;
@@ -1044,7 +1061,8 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
1044 1061
1045 if (val != cs->relax_domain_level) { 1062 if (val != cs->relax_domain_level) {
1046 cs->relax_domain_level = val; 1063 cs->relax_domain_level = val;
1047 rebuild_sched_domains(); 1064 if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs))
1065 rebuild_sched_domains();
1048 } 1066 }
1049 1067
1050 return 0; 1068 return 0;
@@ -1256,72 +1274,14 @@ typedef enum {
1256 FILE_SPREAD_SLAB, 1274 FILE_SPREAD_SLAB,
1257} cpuset_filetype_t; 1275} cpuset_filetype_t;
1258 1276
1259static ssize_t cpuset_common_file_write(struct cgroup *cont,
1260 struct cftype *cft,
1261 struct file *file,
1262 const char __user *userbuf,
1263 size_t nbytes, loff_t *unused_ppos)
1264{
1265 struct cpuset *cs = cgroup_cs(cont);
1266 cpuset_filetype_t type = cft->private;
1267 char *buffer;
1268 int retval = 0;
1269
1270 /* Crude upper limit on largest legitimate cpulist user might write. */
1271 if (nbytes > 100U + 6 * max(NR_CPUS, MAX_NUMNODES))
1272 return -E2BIG;
1273
1274 /* +1 for nul-terminator */
1275 buffer = kmalloc(nbytes + 1, GFP_KERNEL);
1276 if (!buffer)
1277 return -ENOMEM;
1278
1279 if (copy_from_user(buffer, userbuf, nbytes)) {
1280 retval = -EFAULT;
1281 goto out1;
1282 }
1283 buffer[nbytes] = 0; /* nul-terminate */
1284
1285 cgroup_lock();
1286
1287 if (cgroup_is_removed(cont)) {
1288 retval = -ENODEV;
1289 goto out2;
1290 }
1291
1292 switch (type) {
1293 case FILE_CPULIST:
1294 retval = update_cpumask(cs, buffer);
1295 break;
1296 case FILE_MEMLIST:
1297 retval = update_nodemask(cs, buffer);
1298 break;
1299 default:
1300 retval = -EINVAL;
1301 goto out2;
1302 }
1303
1304 if (retval == 0)
1305 retval = nbytes;
1306out2:
1307 cgroup_unlock();
1308out1:
1309 kfree(buffer);
1310 return retval;
1311}
1312
1313static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) 1277static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1314{ 1278{
1315 int retval = 0; 1279 int retval = 0;
1316 struct cpuset *cs = cgroup_cs(cgrp); 1280 struct cpuset *cs = cgroup_cs(cgrp);
1317 cpuset_filetype_t type = cft->private; 1281 cpuset_filetype_t type = cft->private;
1318 1282
1319 cgroup_lock(); 1283 if (!cgroup_lock_live_group(cgrp))
1320
1321 if (cgroup_is_removed(cgrp)) {
1322 cgroup_unlock();
1323 return -ENODEV; 1284 return -ENODEV;
1324 }
1325 1285
1326 switch (type) { 1286 switch (type) {
1327 case FILE_CPU_EXCLUSIVE: 1287 case FILE_CPU_EXCLUSIVE:
@@ -1367,12 +1327,9 @@ static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1367 struct cpuset *cs = cgroup_cs(cgrp); 1327 struct cpuset *cs = cgroup_cs(cgrp);
1368 cpuset_filetype_t type = cft->private; 1328 cpuset_filetype_t type = cft->private;
1369 1329
1370 cgroup_lock(); 1330 if (!cgroup_lock_live_group(cgrp))
1371
1372 if (cgroup_is_removed(cgrp)) {
1373 cgroup_unlock();
1374 return -ENODEV; 1331 return -ENODEV;
1375 } 1332
1376 switch (type) { 1333 switch (type) {
1377 case FILE_SCHED_RELAX_DOMAIN_LEVEL: 1334 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1378 retval = update_relax_domain_level(cs, val); 1335 retval = update_relax_domain_level(cs, val);
@@ -1386,6 +1343,32 @@ static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1386} 1343}
1387 1344
1388/* 1345/*
1346 * Common handling for a write to a "cpus" or "mems" file.
1347 */
1348static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1349 const char *buf)
1350{
1351 int retval = 0;
1352
1353 if (!cgroup_lock_live_group(cgrp))
1354 return -ENODEV;
1355
1356 switch (cft->private) {
1357 case FILE_CPULIST:
1358 retval = update_cpumask(cgroup_cs(cgrp), buf);
1359 break;
1360 case FILE_MEMLIST:
1361 retval = update_nodemask(cgroup_cs(cgrp), buf);
1362 break;
1363 default:
1364 retval = -EINVAL;
1365 break;
1366 }
1367 cgroup_unlock();
1368 return retval;
1369}
1370
1371/*
1389 * These ascii lists should be read in a single call, by using a user 1372 * These ascii lists should be read in a single call, by using a user
1390 * buffer large enough to hold the entire map. If read in smaller 1373 * buffer large enough to hold the entire map. If read in smaller
1391 * chunks, there is no guarantee of atomicity. Since the display format 1374 * chunks, there is no guarantee of atomicity. Since the display format
@@ -1504,14 +1487,16 @@ static struct cftype files[] = {
1504 { 1487 {
1505 .name = "cpus", 1488 .name = "cpus",
1506 .read = cpuset_common_file_read, 1489 .read = cpuset_common_file_read,
1507 .write = cpuset_common_file_write, 1490 .write_string = cpuset_write_resmask,
1491 .max_write_len = (100U + 6 * NR_CPUS),
1508 .private = FILE_CPULIST, 1492 .private = FILE_CPULIST,
1509 }, 1493 },
1510 1494
1511 { 1495 {
1512 .name = "mems", 1496 .name = "mems",
1513 .read = cpuset_common_file_read, 1497 .read = cpuset_common_file_read,
1514 .write = cpuset_common_file_write, 1498 .write_string = cpuset_write_resmask,
1499 .max_write_len = (100U + 6 * MAX_NUMNODES),
1515 .private = FILE_MEMLIST, 1500 .private = FILE_MEMLIST,
1516 }, 1501 },
1517 1502
@@ -1792,7 +1777,7 @@ static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
1792 scan.scan.heap = NULL; 1777 scan.scan.heap = NULL;
1793 scan.to = to->css.cgroup; 1778 scan.to = to->css.cgroup;
1794 1779
1795 if (cgroup_scan_tasks((struct cgroup_scanner *)&scan)) 1780 if (cgroup_scan_tasks(&scan.scan))
1796 printk(KERN_ERR "move_member_tasks_to_cpuset: " 1781 printk(KERN_ERR "move_member_tasks_to_cpuset: "
1797 "cgroup_scan_tasks failed\n"); 1782 "cgroup_scan_tasks failed\n");
1798} 1783}
@@ -1852,6 +1837,7 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
1852 struct cpuset *child; /* scans child cpusets of cp */ 1837 struct cpuset *child; /* scans child cpusets of cp */
1853 struct list_head queue; 1838 struct list_head queue;
1854 struct cgroup *cont; 1839 struct cgroup *cont;
1840 nodemask_t oldmems;
1855 1841
1856 INIT_LIST_HEAD(&queue); 1842 INIT_LIST_HEAD(&queue);
1857 1843
@@ -1871,6 +1857,8 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
1871 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) 1857 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
1872 continue; 1858 continue;
1873 1859
1860 oldmems = cp->mems_allowed;
1861
1874 /* Remove offline cpus and mems from this cpuset. */ 1862 /* Remove offline cpus and mems from this cpuset. */
1875 mutex_lock(&callback_mutex); 1863 mutex_lock(&callback_mutex);
1876 cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map); 1864 cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map);
@@ -1882,6 +1870,10 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
1882 if (cpus_empty(cp->cpus_allowed) || 1870 if (cpus_empty(cp->cpus_allowed) ||
1883 nodes_empty(cp->mems_allowed)) 1871 nodes_empty(cp->mems_allowed))
1884 remove_tasks_in_empty_cpuset(cp); 1872 remove_tasks_in_empty_cpuset(cp);
1873 else {
1874 update_tasks_cpumask(cp);
1875 update_tasks_nodemask(cp, &oldmems);
1876 }
1885 } 1877 }
1886} 1878}
1887 1879
@@ -1974,7 +1966,6 @@ void __init cpuset_init_smp(void)
1974} 1966}
1975 1967
1976/** 1968/**
1977
1978 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. 1969 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
1979 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. 1970 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
1980 * @pmask: pointer to cpumask_t variable to receive cpus_allowed set. 1971 * @pmask: pointer to cpumask_t variable to receive cpus_allowed set.
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 10e43fd8b721..b3179dad71be 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -145,8 +145,11 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
145 d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; 145 d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
146 tmp = d->swapin_delay_total + tsk->delays->swapin_delay; 146 tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
147 d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; 147 d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
148 tmp = d->freepages_delay_total + tsk->delays->freepages_delay;
149 d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp;
148 d->blkio_count += tsk->delays->blkio_count; 150 d->blkio_count += tsk->delays->blkio_count;
149 d->swapin_count += tsk->delays->swapin_count; 151 d->swapin_count += tsk->delays->swapin_count;
152 d->freepages_count += tsk->delays->freepages_count;
150 spin_unlock_irqrestore(&tsk->delays->lock, flags); 153 spin_unlock_irqrestore(&tsk->delays->lock, flags);
151 154
152done: 155done:
@@ -165,3 +168,16 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk)
165 return ret; 168 return ret;
166} 169}
167 170
171void __delayacct_freepages_start(void)
172{
173 delayacct_start(&current->delays->freepages_start);
174}
175
176void __delayacct_freepages_end(void)
177{
178 delayacct_end(&current->delays->freepages_start,
179 &current->delays->freepages_end,
180 &current->delays->freepages_delay,
181 &current->delays->freepages_count);
182}
183
diff --git a/kernel/exit.c b/kernel/exit.c
index 93d2711b9381..ad933bb29ec7 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -85,7 +85,6 @@ static void __exit_signal(struct task_struct *tsk)
85 BUG_ON(!sig); 85 BUG_ON(!sig);
86 BUG_ON(!atomic_read(&sig->count)); 86 BUG_ON(!atomic_read(&sig->count));
87 87
88 rcu_read_lock();
89 sighand = rcu_dereference(tsk->sighand); 88 sighand = rcu_dereference(tsk->sighand);
90 spin_lock(&sighand->siglock); 89 spin_lock(&sighand->siglock);
91 90
@@ -121,6 +120,18 @@ static void __exit_signal(struct task_struct *tsk)
121 sig->nivcsw += tsk->nivcsw; 120 sig->nivcsw += tsk->nivcsw;
122 sig->inblock += task_io_get_inblock(tsk); 121 sig->inblock += task_io_get_inblock(tsk);
123 sig->oublock += task_io_get_oublock(tsk); 122 sig->oublock += task_io_get_oublock(tsk);
123#ifdef CONFIG_TASK_XACCT
124 sig->rchar += tsk->rchar;
125 sig->wchar += tsk->wchar;
126 sig->syscr += tsk->syscr;
127 sig->syscw += tsk->syscw;
128#endif /* CONFIG_TASK_XACCT */
129#ifdef CONFIG_TASK_IO_ACCOUNTING
130 sig->ioac.read_bytes += tsk->ioac.read_bytes;
131 sig->ioac.write_bytes += tsk->ioac.write_bytes;
132 sig->ioac.cancelled_write_bytes +=
133 tsk->ioac.cancelled_write_bytes;
134#endif /* CONFIG_TASK_IO_ACCOUNTING */
124 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 135 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
125 sig = NULL; /* Marker for below. */ 136 sig = NULL; /* Marker for below. */
126 } 137 }
@@ -136,7 +147,6 @@ static void __exit_signal(struct task_struct *tsk)
136 tsk->signal = NULL; 147 tsk->signal = NULL;
137 tsk->sighand = NULL; 148 tsk->sighand = NULL;
138 spin_unlock(&sighand->siglock); 149 spin_unlock(&sighand->siglock);
139 rcu_read_unlock();
140 150
141 __cleanup_sighand(sighand); 151 __cleanup_sighand(sighand);
142 clear_tsk_thread_flag(tsk,TIF_SIGPENDING); 152 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
@@ -432,7 +442,7 @@ void daemonize(const char *name, ...)
432 * We don't want to have TIF_FREEZE set if the system-wide hibernation 442 * We don't want to have TIF_FREEZE set if the system-wide hibernation
433 * or suspend transition begins right now. 443 * or suspend transition begins right now.
434 */ 444 */
435 current->flags |= PF_NOFREEZE; 445 current->flags |= (PF_NOFREEZE | PF_KTHREAD);
436 446
437 if (current->nsproxy != &init_nsproxy) { 447 if (current->nsproxy != &init_nsproxy) {
438 get_nsproxy(&init_nsproxy); 448 get_nsproxy(&init_nsproxy);
@@ -666,26 +676,40 @@ assign_new_owner:
666static void exit_mm(struct task_struct * tsk) 676static void exit_mm(struct task_struct * tsk)
667{ 677{
668 struct mm_struct *mm = tsk->mm; 678 struct mm_struct *mm = tsk->mm;
679 struct core_state *core_state;
669 680
670 mm_release(tsk, mm); 681 mm_release(tsk, mm);
671 if (!mm) 682 if (!mm)
672 return; 683 return;
673 /* 684 /*
674 * Serialize with any possible pending coredump. 685 * Serialize with any possible pending coredump.
675 * We must hold mmap_sem around checking core_waiters 686 * We must hold mmap_sem around checking core_state
676 * and clearing tsk->mm. The core-inducing thread 687 * and clearing tsk->mm. The core-inducing thread
677 * will increment core_waiters for each thread in the 688 * will increment ->nr_threads for each thread in the
678 * group with ->mm != NULL. 689 * group with ->mm != NULL.
679 */ 690 */
680 down_read(&mm->mmap_sem); 691 down_read(&mm->mmap_sem);
681 if (mm->core_waiters) { 692 core_state = mm->core_state;
693 if (core_state) {
694 struct core_thread self;
682 up_read(&mm->mmap_sem); 695 up_read(&mm->mmap_sem);
683 down_write(&mm->mmap_sem);
684 if (!--mm->core_waiters)
685 complete(mm->core_startup_done);
686 up_write(&mm->mmap_sem);
687 696
688 wait_for_completion(&mm->core_done); 697 self.task = tsk;
698 self.next = xchg(&core_state->dumper.next, &self);
699 /*
700 * Implies mb(), the result of xchg() must be visible
701 * to core_state->dumper.
702 */
703 if (atomic_dec_and_test(&core_state->nr_threads))
704 complete(&core_state->startup);
705
706 for (;;) {
707 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
708 if (!self.task) /* see coredump_finish() */
709 break;
710 schedule();
711 }
712 __set_task_state(tsk, TASK_RUNNING);
689 down_read(&mm->mmap_sem); 713 down_read(&mm->mmap_sem);
690 } 714 }
691 atomic_inc(&mm->mm_count); 715 atomic_inc(&mm->mm_count);
@@ -1354,6 +1378,21 @@ static int wait_task_zombie(struct task_struct *p, int options,
1354 psig->coublock += 1378 psig->coublock +=
1355 task_io_get_oublock(p) + 1379 task_io_get_oublock(p) +
1356 sig->oublock + sig->coublock; 1380 sig->oublock + sig->coublock;
1381#ifdef CONFIG_TASK_XACCT
1382 psig->rchar += p->rchar + sig->rchar;
1383 psig->wchar += p->wchar + sig->wchar;
1384 psig->syscr += p->syscr + sig->syscr;
1385 psig->syscw += p->syscw + sig->syscw;
1386#endif /* CONFIG_TASK_XACCT */
1387#ifdef CONFIG_TASK_IO_ACCOUNTING
1388 psig->ioac.read_bytes +=
1389 p->ioac.read_bytes + sig->ioac.read_bytes;
1390 psig->ioac.write_bytes +=
1391 p->ioac.write_bytes + sig->ioac.write_bytes;
1392 psig->ioac.cancelled_write_bytes +=
1393 p->ioac.cancelled_write_bytes +
1394 sig->ioac.cancelled_write_bytes;
1395#endif /* CONFIG_TASK_IO_ACCOUNTING */
1357 spin_unlock_irq(&p->parent->sighand->siglock); 1396 spin_unlock_irq(&p->parent->sighand->siglock);
1358 } 1397 }
1359 1398
diff --git a/kernel/fork.c b/kernel/fork.c
index adefc1131f27..b99d73e971a4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -33,6 +33,7 @@
33#include <linux/cpu.h> 33#include <linux/cpu.h>
34#include <linux/cgroup.h> 34#include <linux/cgroup.h>
35#include <linux/security.h> 35#include <linux/security.h>
36#include <linux/hugetlb.h>
36#include <linux/swap.h> 37#include <linux/swap.h>
37#include <linux/syscalls.h> 38#include <linux/syscalls.h>
38#include <linux/jiffies.h> 39#include <linux/jiffies.h>
@@ -92,6 +93,23 @@ int nr_processes(void)
92static struct kmem_cache *task_struct_cachep; 93static struct kmem_cache *task_struct_cachep;
93#endif 94#endif
94 95
96#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
97static inline struct thread_info *alloc_thread_info(struct task_struct *tsk)
98{
99#ifdef CONFIG_DEBUG_STACK_USAGE
100 gfp_t mask = GFP_KERNEL | __GFP_ZERO;
101#else
102 gfp_t mask = GFP_KERNEL;
103#endif
104 return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
105}
106
107static inline void free_thread_info(struct thread_info *ti)
108{
109 free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
110}
111#endif
112
95/* SLAB cache for signal_struct structures (tsk->signal) */ 113/* SLAB cache for signal_struct structures (tsk->signal) */
96static struct kmem_cache *signal_cachep; 114static struct kmem_cache *signal_cachep;
97 115
@@ -307,6 +325,14 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
307 } 325 }
308 326
309 /* 327 /*
328 * Clear hugetlb-related page reserves for children. This only
329 * affects MAP_PRIVATE mappings. Faults generated by the child
330 * are not guaranteed to succeed, even if read-only
331 */
332 if (is_vm_hugetlb_page(tmp))
333 reset_vma_resv_huge_pages(tmp);
334
335 /*
310 * Link in the new vma and copy the page table entries. 336 * Link in the new vma and copy the page table entries.
311 */ 337 */
312 *pprev = tmp; 338 *pprev = tmp;
@@ -374,7 +400,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
374 INIT_LIST_HEAD(&mm->mmlist); 400 INIT_LIST_HEAD(&mm->mmlist);
375 mm->flags = (current->mm) ? current->mm->flags 401 mm->flags = (current->mm) ? current->mm->flags
376 : MMF_DUMP_FILTER_DEFAULT; 402 : MMF_DUMP_FILTER_DEFAULT;
377 mm->core_waiters = 0; 403 mm->core_state = NULL;
378 mm->nr_ptes = 0; 404 mm->nr_ptes = 0;
379 set_mm_counter(mm, file_rss, 0); 405 set_mm_counter(mm, file_rss, 0);
380 set_mm_counter(mm, anon_rss, 0); 406 set_mm_counter(mm, anon_rss, 0);
@@ -448,7 +474,7 @@ EXPORT_SYMBOL_GPL(mmput);
448/** 474/**
449 * get_task_mm - acquire a reference to the task's mm 475 * get_task_mm - acquire a reference to the task's mm
450 * 476 *
451 * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning 477 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
452 * this kernel workthread has transiently adopted a user mm with use_mm, 478 * this kernel workthread has transiently adopted a user mm with use_mm,
453 * to do its AIO) is not set and if so returns a reference to it, after 479 * to do its AIO) is not set and if so returns a reference to it, after
454 * bumping up the use count. User must release the mm via mmput() 480 * bumping up the use count. User must release the mm via mmput()
@@ -461,7 +487,7 @@ struct mm_struct *get_task_mm(struct task_struct *task)
461 task_lock(task); 487 task_lock(task);
462 mm = task->mm; 488 mm = task->mm;
463 if (mm) { 489 if (mm) {
464 if (task->flags & PF_BORROWED_MM) 490 if (task->flags & PF_KTHREAD)
465 mm = NULL; 491 mm = NULL;
466 else 492 else
467 atomic_inc(&mm->mm_users); 493 atomic_inc(&mm->mm_users);
@@ -786,6 +812,12 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
786 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 812 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
787 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 813 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
788 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 814 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
815#ifdef CONFIG_TASK_XACCT
816 sig->rchar = sig->wchar = sig->syscr = sig->syscw = 0;
817#endif
818#ifdef CONFIG_TASK_IO_ACCOUNTING
819 memset(&sig->ioac, 0, sizeof(sig->ioac));
820#endif
789 sig->sum_sched_runtime = 0; 821 sig->sum_sched_runtime = 0;
790 INIT_LIST_HEAD(&sig->cpu_timers[0]); 822 INIT_LIST_HEAD(&sig->cpu_timers[0]);
791 INIT_LIST_HEAD(&sig->cpu_timers[1]); 823 INIT_LIST_HEAD(&sig->cpu_timers[1]);
@@ -1081,6 +1113,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1081 if (clone_flags & CLONE_THREAD) 1113 if (clone_flags & CLONE_THREAD)
1082 p->tgid = current->tgid; 1114 p->tgid = current->tgid;
1083 1115
1116 if (current->nsproxy != p->nsproxy) {
1117 retval = ns_cgroup_clone(p, pid);
1118 if (retval)
1119 goto bad_fork_free_pid;
1120 }
1121
1084 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 1122 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1085 /* 1123 /*
1086 * Clear TID on mm_release()? 1124 * Clear TID on mm_release()?
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 77a51be36010..f8914b92b664 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -217,6 +217,17 @@ void enable_irq(unsigned int irq)
217} 217}
218EXPORT_SYMBOL(enable_irq); 218EXPORT_SYMBOL(enable_irq);
219 219
220int set_irq_wake_real(unsigned int irq, unsigned int on)
221{
222 struct irq_desc *desc = irq_desc + irq;
223 int ret = -ENXIO;
224
225 if (desc->chip->set_wake)
226 ret = desc->chip->set_wake(irq, on);
227
228 return ret;
229}
230
220/** 231/**
221 * set_irq_wake - control irq power management wakeup 232 * set_irq_wake - control irq power management wakeup
222 * @irq: interrupt to control 233 * @irq: interrupt to control
@@ -233,30 +244,32 @@ int set_irq_wake(unsigned int irq, unsigned int on)
233{ 244{
234 struct irq_desc *desc = irq_desc + irq; 245 struct irq_desc *desc = irq_desc + irq;
235 unsigned long flags; 246 unsigned long flags;
236 int ret = -ENXIO; 247 int ret = 0;
237 int (*set_wake)(unsigned, unsigned) = desc->chip->set_wake;
238 248
239 /* wakeup-capable irqs can be shared between drivers that 249 /* wakeup-capable irqs can be shared between drivers that
240 * don't need to have the same sleep mode behaviors. 250 * don't need to have the same sleep mode behaviors.
241 */ 251 */
242 spin_lock_irqsave(&desc->lock, flags); 252 spin_lock_irqsave(&desc->lock, flags);
243 if (on) { 253 if (on) {
244 if (desc->wake_depth++ == 0) 254 if (desc->wake_depth++ == 0) {
245 desc->status |= IRQ_WAKEUP; 255 ret = set_irq_wake_real(irq, on);
246 else 256 if (ret)
247 set_wake = NULL; 257 desc->wake_depth = 0;
258 else
259 desc->status |= IRQ_WAKEUP;
260 }
248 } else { 261 } else {
249 if (desc->wake_depth == 0) { 262 if (desc->wake_depth == 0) {
250 printk(KERN_WARNING "Unbalanced IRQ %d " 263 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
251 "wake disable\n", irq); 264 } else if (--desc->wake_depth == 0) {
252 WARN_ON(1); 265 ret = set_irq_wake_real(irq, on);
253 } else if (--desc->wake_depth == 0) 266 if (ret)
254 desc->status &= ~IRQ_WAKEUP; 267 desc->wake_depth = 1;
255 else 268 else
256 set_wake = NULL; 269 desc->status &= ~IRQ_WAKEUP;
270 }
257 } 271 }
258 if (set_wake) 272
259 ret = desc->chip->set_wake(irq, on);
260 spin_unlock_irqrestore(&desc->lock, flags); 273 spin_unlock_irqrestore(&desc->lock, flags);
261 return ret; 274 return ret;
262} 275}
@@ -293,6 +306,30 @@ void compat_irq_chip_set_default_handler(struct irq_desc *desc)
293 desc->handle_irq = NULL; 306 desc->handle_irq = NULL;
294} 307}
295 308
309static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq,
310 unsigned long flags)
311{
312 int ret;
313
314 if (!chip || !chip->set_type) {
315 /*
316 * IRQF_TRIGGER_* but the PIC does not support multiple
317 * flow-types?
318 */
319 pr_warning("No set_type function for IRQ %d (%s)\n", irq,
320 chip ? (chip->name ? : "unknown") : "unknown");
321 return 0;
322 }
323
324 ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK);
325
326 if (ret)
327 pr_err("setting flow type for irq %u failed (%pF)\n",
328 irq, chip->set_type);
329
330 return ret;
331}
332
296/* 333/*
297 * Internal function to register an irqaction - typically used to 334 * Internal function to register an irqaction - typically used to
298 * allocate special interrupts that are part of the architecture. 335 * allocate special interrupts that are part of the architecture.
@@ -304,6 +341,7 @@ int setup_irq(unsigned int irq, struct irqaction *new)
304 const char *old_name = NULL; 341 const char *old_name = NULL;
305 unsigned long flags; 342 unsigned long flags;
306 int shared = 0; 343 int shared = 0;
344 int ret;
307 345
308 if (irq >= NR_IRQS) 346 if (irq >= NR_IRQS)
309 return -EINVAL; 347 return -EINVAL;
@@ -361,35 +399,23 @@ int setup_irq(unsigned int irq, struct irqaction *new)
361 shared = 1; 399 shared = 1;
362 } 400 }
363 401
364 *p = new;
365
366 /* Exclude IRQ from balancing */
367 if (new->flags & IRQF_NOBALANCING)
368 desc->status |= IRQ_NO_BALANCING;
369
370 if (!shared) { 402 if (!shared) {
371 irq_chip_set_defaults(desc->chip); 403 irq_chip_set_defaults(desc->chip);
372 404
373#if defined(CONFIG_IRQ_PER_CPU)
374 if (new->flags & IRQF_PERCPU)
375 desc->status |= IRQ_PER_CPU;
376#endif
377
378 /* Setup the type (level, edge polarity) if configured: */ 405 /* Setup the type (level, edge polarity) if configured: */
379 if (new->flags & IRQF_TRIGGER_MASK) { 406 if (new->flags & IRQF_TRIGGER_MASK) {
380 if (desc->chip->set_type) 407 ret = __irq_set_trigger(desc->chip, irq, new->flags);
381 desc->chip->set_type(irq, 408
382 new->flags & IRQF_TRIGGER_MASK); 409 if (ret) {
383 else 410 spin_unlock_irqrestore(&desc->lock, flags);
384 /* 411 return ret;
385 * IRQF_TRIGGER_* but the PIC does not support 412 }
386 * multiple flow-types?
387 */
388 printk(KERN_WARNING "No IRQF_TRIGGER set_type "
389 "function for IRQ %d (%s)\n", irq,
390 desc->chip->name);
391 } else 413 } else
392 compat_irq_chip_set_default_handler(desc); 414 compat_irq_chip_set_default_handler(desc);
415#if defined(CONFIG_IRQ_PER_CPU)
416 if (new->flags & IRQF_PERCPU)
417 desc->status |= IRQ_PER_CPU;
418#endif
393 419
394 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | 420 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |
395 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); 421 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
@@ -408,6 +434,13 @@ int setup_irq(unsigned int irq, struct irqaction *new)
408 /* Set default affinity mask once everything is setup */ 434 /* Set default affinity mask once everything is setup */
409 irq_select_affinity(irq); 435 irq_select_affinity(irq);
410 } 436 }
437
438 *p = new;
439
440 /* Exclude IRQ from balancing */
441 if (new->flags & IRQF_NOBALANCING)
442 desc->status |= IRQ_NO_BALANCING;
443
411 /* Reset broken irq detection when installing new handler */ 444 /* Reset broken irq detection when installing new handler */
412 desc->irq_count = 0; 445 desc->irq_count = 0;
413 desc->irqs_unhandled = 0; 446 desc->irqs_unhandled = 0;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 6fc0040f3e3a..38fc10ac7541 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -176,7 +176,7 @@ static unsigned long get_symbol_pos(unsigned long addr,
176 high = kallsyms_num_syms; 176 high = kallsyms_num_syms;
177 177
178 while (high - low > 1) { 178 while (high - low > 1) {
179 mid = (low + high) / 2; 179 mid = low + (high - low) / 2;
180 if (kallsyms_addresses[mid] <= addr) 180 if (kallsyms_addresses[mid] <= addr)
181 low = mid; 181 low = mid;
182 else 182 else
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 90d7af1c1655..2456d1a0befb 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -352,16 +352,17 @@ static inline void register_pm_notifier_callback(void) {}
352 * @path: path to usermode executable 352 * @path: path to usermode executable
353 * @argv: arg vector for process 353 * @argv: arg vector for process
354 * @envp: environment for process 354 * @envp: environment for process
355 * @gfp_mask: gfp mask for memory allocation
355 * 356 *
356 * Returns either %NULL on allocation failure, or a subprocess_info 357 * Returns either %NULL on allocation failure, or a subprocess_info
357 * structure. This should be passed to call_usermodehelper_exec to 358 * structure. This should be passed to call_usermodehelper_exec to
358 * exec the process and free the structure. 359 * exec the process and free the structure.
359 */ 360 */
360struct subprocess_info *call_usermodehelper_setup(char *path, 361struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
361 char **argv, char **envp) 362 char **envp, gfp_t gfp_mask)
362{ 363{
363 struct subprocess_info *sub_info; 364 struct subprocess_info *sub_info;
364 sub_info = kzalloc(sizeof(struct subprocess_info), GFP_ATOMIC); 365 sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
365 if (!sub_info) 366 if (!sub_info)
366 goto out; 367 goto out;
367 368
@@ -417,12 +418,12 @@ int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
417{ 418{
418 struct file *f; 419 struct file *f;
419 420
420 f = create_write_pipe(); 421 f = create_write_pipe(0);
421 if (IS_ERR(f)) 422 if (IS_ERR(f))
422 return PTR_ERR(f); 423 return PTR_ERR(f);
423 *filp = f; 424 *filp = f;
424 425
425 f = create_read_pipe(f); 426 f = create_read_pipe(f, 0);
426 if (IS_ERR(f)) { 427 if (IS_ERR(f)) {
427 free_write_pipe(*filp); 428 free_write_pipe(*filp);
428 return PTR_ERR(f); 429 return PTR_ERR(f);
@@ -494,7 +495,7 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
494 struct subprocess_info *sub_info; 495 struct subprocess_info *sub_info;
495 int ret; 496 int ret;
496 497
497 sub_info = call_usermodehelper_setup(path, argv, envp); 498 sub_info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL);
498 if (sub_info == NULL) 499 if (sub_info == NULL)
499 return -ENOMEM; 500 return -ENOMEM;
500 501
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1485ca8d0e00..75bc2cd9ebc6 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -62,6 +62,7 @@
62 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) 62 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
63#endif 63#endif
64 64
65static int kprobes_initialized;
65static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 66static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
66static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 67static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
67 68
@@ -69,8 +70,15 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
69static bool kprobe_enabled; 70static bool kprobe_enabled;
70 71
71DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ 72DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
72DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
73static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 73static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
74static struct {
75 spinlock_t lock ____cacheline_aligned;
76} kretprobe_table_locks[KPROBE_TABLE_SIZE];
77
78static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
79{
80 return &(kretprobe_table_locks[hash].lock);
81}
74 82
75/* 83/*
76 * Normally, functions that we'd want to prohibit kprobes in, are marked 84 * Normally, functions that we'd want to prohibit kprobes in, are marked
@@ -368,26 +376,53 @@ void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
368 return; 376 return;
369} 377}
370 378
371/* Called with kretprobe_lock held */
372void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, 379void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
373 struct hlist_head *head) 380 struct hlist_head *head)
374{ 381{
382 struct kretprobe *rp = ri->rp;
383
375 /* remove rp inst off the rprobe_inst_table */ 384 /* remove rp inst off the rprobe_inst_table */
376 hlist_del(&ri->hlist); 385 hlist_del(&ri->hlist);
377 if (ri->rp) { 386 INIT_HLIST_NODE(&ri->hlist);
378 /* remove rp inst off the used list */ 387 if (likely(rp)) {
379 hlist_del(&ri->uflist); 388 spin_lock(&rp->lock);
380 /* put rp inst back onto the free list */ 389 hlist_add_head(&ri->hlist, &rp->free_instances);
381 INIT_HLIST_NODE(&ri->uflist); 390 spin_unlock(&rp->lock);
382 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
383 } else 391 } else
384 /* Unregistering */ 392 /* Unregistering */
385 hlist_add_head(&ri->hlist, head); 393 hlist_add_head(&ri->hlist, head);
386} 394}
387 395
388struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk) 396void kretprobe_hash_lock(struct task_struct *tsk,
397 struct hlist_head **head, unsigned long *flags)
389{ 398{
390 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; 399 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
400 spinlock_t *hlist_lock;
401
402 *head = &kretprobe_inst_table[hash];
403 hlist_lock = kretprobe_table_lock_ptr(hash);
404 spin_lock_irqsave(hlist_lock, *flags);
405}
406
407void kretprobe_table_lock(unsigned long hash, unsigned long *flags)
408{
409 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
410 spin_lock_irqsave(hlist_lock, *flags);
411}
412
413void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags)
414{
415 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
416 spinlock_t *hlist_lock;
417
418 hlist_lock = kretprobe_table_lock_ptr(hash);
419 spin_unlock_irqrestore(hlist_lock, *flags);
420}
421
422void kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
423{
424 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
425 spin_unlock_irqrestore(hlist_lock, *flags);
391} 426}
392 427
393/* 428/*
@@ -401,17 +436,21 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
401 struct kretprobe_instance *ri; 436 struct kretprobe_instance *ri;
402 struct hlist_head *head, empty_rp; 437 struct hlist_head *head, empty_rp;
403 struct hlist_node *node, *tmp; 438 struct hlist_node *node, *tmp;
404 unsigned long flags = 0; 439 unsigned long hash, flags = 0;
405 440
406 INIT_HLIST_HEAD(&empty_rp); 441 if (unlikely(!kprobes_initialized))
407 spin_lock_irqsave(&kretprobe_lock, flags); 442 /* Early boot. kretprobe_table_locks not yet initialized. */
408 head = kretprobe_inst_table_head(tk); 443 return;
444
445 hash = hash_ptr(tk, KPROBE_HASH_BITS);
446 head = &kretprobe_inst_table[hash];
447 kretprobe_table_lock(hash, &flags);
409 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 448 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
410 if (ri->task == tk) 449 if (ri->task == tk)
411 recycle_rp_inst(ri, &empty_rp); 450 recycle_rp_inst(ri, &empty_rp);
412 } 451 }
413 spin_unlock_irqrestore(&kretprobe_lock, flags); 452 kretprobe_table_unlock(hash, &flags);
414 453 INIT_HLIST_HEAD(&empty_rp);
415 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 454 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
416 hlist_del(&ri->hlist); 455 hlist_del(&ri->hlist);
417 kfree(ri); 456 kfree(ri);
@@ -423,24 +462,29 @@ static inline void free_rp_inst(struct kretprobe *rp)
423 struct kretprobe_instance *ri; 462 struct kretprobe_instance *ri;
424 struct hlist_node *pos, *next; 463 struct hlist_node *pos, *next;
425 464
426 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) { 465 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
427 hlist_del(&ri->uflist); 466 hlist_del(&ri->hlist);
428 kfree(ri); 467 kfree(ri);
429 } 468 }
430} 469}
431 470
432static void __kprobes cleanup_rp_inst(struct kretprobe *rp) 471static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
433{ 472{
434 unsigned long flags; 473 unsigned long flags, hash;
435 struct kretprobe_instance *ri; 474 struct kretprobe_instance *ri;
436 struct hlist_node *pos, *next; 475 struct hlist_node *pos, *next;
476 struct hlist_head *head;
477
437 /* No race here */ 478 /* No race here */
438 spin_lock_irqsave(&kretprobe_lock, flags); 479 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
439 hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) { 480 kretprobe_table_lock(hash, &flags);
440 ri->rp = NULL; 481 head = &kretprobe_inst_table[hash];
441 hlist_del(&ri->uflist); 482 hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
483 if (ri->rp == rp)
484 ri->rp = NULL;
485 }
486 kretprobe_table_unlock(hash, &flags);
442 } 487 }
443 spin_unlock_irqrestore(&kretprobe_lock, flags);
444 free_rp_inst(rp); 488 free_rp_inst(rp);
445} 489}
446 490
@@ -831,32 +875,37 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
831 struct pt_regs *regs) 875 struct pt_regs *regs)
832{ 876{
833 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 877 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
834 unsigned long flags = 0; 878 unsigned long hash, flags = 0;
879 struct kretprobe_instance *ri;
835 880
836 /*TODO: consider to only swap the RA after the last pre_handler fired */ 881 /*TODO: consider to only swap the RA after the last pre_handler fired */
837 spin_lock_irqsave(&kretprobe_lock, flags); 882 hash = hash_ptr(current, KPROBE_HASH_BITS);
883 spin_lock_irqsave(&rp->lock, flags);
838 if (!hlist_empty(&rp->free_instances)) { 884 if (!hlist_empty(&rp->free_instances)) {
839 struct kretprobe_instance *ri;
840
841 ri = hlist_entry(rp->free_instances.first, 885 ri = hlist_entry(rp->free_instances.first,
842 struct kretprobe_instance, uflist); 886 struct kretprobe_instance, hlist);
887 hlist_del(&ri->hlist);
888 spin_unlock_irqrestore(&rp->lock, flags);
889
843 ri->rp = rp; 890 ri->rp = rp;
844 ri->task = current; 891 ri->task = current;
845 892
846 if (rp->entry_handler && rp->entry_handler(ri, regs)) { 893 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
847 spin_unlock_irqrestore(&kretprobe_lock, flags); 894 spin_unlock_irqrestore(&rp->lock, flags);
848 return 0; 895 return 0;
849 } 896 }
850 897
851 arch_prepare_kretprobe(ri, regs); 898 arch_prepare_kretprobe(ri, regs);
852 899
853 /* XXX(hch): why is there no hlist_move_head? */ 900 /* XXX(hch): why is there no hlist_move_head? */
854 hlist_del(&ri->uflist); 901 INIT_HLIST_NODE(&ri->hlist);
855 hlist_add_head(&ri->uflist, &ri->rp->used_instances); 902 kretprobe_table_lock(hash, &flags);
856 hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task)); 903 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
857 } else 904 kretprobe_table_unlock(hash, &flags);
905 } else {
858 rp->nmissed++; 906 rp->nmissed++;
859 spin_unlock_irqrestore(&kretprobe_lock, flags); 907 spin_unlock_irqrestore(&rp->lock, flags);
908 }
860 return 0; 909 return 0;
861} 910}
862 911
@@ -892,7 +941,7 @@ static int __kprobes __register_kretprobe(struct kretprobe *rp,
892 rp->maxactive = NR_CPUS; 941 rp->maxactive = NR_CPUS;
893#endif 942#endif
894 } 943 }
895 INIT_HLIST_HEAD(&rp->used_instances); 944 spin_lock_init(&rp->lock);
896 INIT_HLIST_HEAD(&rp->free_instances); 945 INIT_HLIST_HEAD(&rp->free_instances);
897 for (i = 0; i < rp->maxactive; i++) { 946 for (i = 0; i < rp->maxactive; i++) {
898 inst = kmalloc(sizeof(struct kretprobe_instance) + 947 inst = kmalloc(sizeof(struct kretprobe_instance) +
@@ -901,8 +950,8 @@ static int __kprobes __register_kretprobe(struct kretprobe *rp,
901 free_rp_inst(rp); 950 free_rp_inst(rp);
902 return -ENOMEM; 951 return -ENOMEM;
903 } 952 }
904 INIT_HLIST_NODE(&inst->uflist); 953 INIT_HLIST_NODE(&inst->hlist);
905 hlist_add_head(&inst->uflist, &rp->free_instances); 954 hlist_add_head(&inst->hlist, &rp->free_instances);
906 } 955 }
907 956
908 rp->nmissed = 0; 957 rp->nmissed = 0;
@@ -1009,6 +1058,7 @@ static int __init init_kprobes(void)
1009 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1058 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1010 INIT_HLIST_HEAD(&kprobe_table[i]); 1059 INIT_HLIST_HEAD(&kprobe_table[i]);
1011 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 1060 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1061 spin_lock_init(&(kretprobe_table_locks[i].lock));
1012 } 1062 }
1013 1063
1014 /* 1064 /*
@@ -1050,6 +1100,7 @@ static int __init init_kprobes(void)
1050 err = arch_init_kprobes(); 1100 err = arch_init_kprobes();
1051 if (!err) 1101 if (!err)
1052 err = register_die_notifier(&kprobe_exceptions_nb); 1102 err = register_die_notifier(&kprobe_exceptions_nb);
1103 kprobes_initialized = (err == 0);
1053 1104
1054 if (!err) 1105 if (!err)
1055 init_test_probes(); 1106 init_test_probes();
@@ -1286,13 +1337,8 @@ EXPORT_SYMBOL_GPL(register_jprobe);
1286EXPORT_SYMBOL_GPL(unregister_jprobe); 1337EXPORT_SYMBOL_GPL(unregister_jprobe);
1287EXPORT_SYMBOL_GPL(register_jprobes); 1338EXPORT_SYMBOL_GPL(register_jprobes);
1288EXPORT_SYMBOL_GPL(unregister_jprobes); 1339EXPORT_SYMBOL_GPL(unregister_jprobes);
1289#ifdef CONFIG_KPROBES
1290EXPORT_SYMBOL_GPL(jprobe_return); 1340EXPORT_SYMBOL_GPL(jprobe_return);
1291#endif
1292
1293#ifdef CONFIG_KPROBES
1294EXPORT_SYMBOL_GPL(register_kretprobe); 1341EXPORT_SYMBOL_GPL(register_kretprobe);
1295EXPORT_SYMBOL_GPL(unregister_kretprobe); 1342EXPORT_SYMBOL_GPL(unregister_kretprobe);
1296EXPORT_SYMBOL_GPL(register_kretprobes); 1343EXPORT_SYMBOL_GPL(register_kretprobes);
1297EXPORT_SYMBOL_GPL(unregister_kretprobes); 1344EXPORT_SYMBOL_GPL(unregister_kretprobes);
1298#endif
diff --git a/kernel/kthread.c b/kernel/kthread.c
index ac3fb7326641..6111c27491b1 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -106,7 +106,7 @@ static void create_kthread(struct kthread_create_info *create)
106 */ 106 */
107 sched_setscheduler(create->result, SCHED_NORMAL, &param); 107 sched_setscheduler(create->result, SCHED_NORMAL, &param);
108 set_user_nice(create->result, KTHREAD_NICE_LEVEL); 108 set_user_nice(create->result, KTHREAD_NICE_LEVEL);
109 set_cpus_allowed(create->result, CPU_MASK_ALL); 109 set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR);
110 } 110 }
111 complete(&create->done); 111 complete(&create->done);
112} 112}
@@ -233,7 +233,7 @@ int kthreadd(void *unused)
233 set_task_comm(tsk, "kthreadd"); 233 set_task_comm(tsk, "kthreadd");
234 ignore_signals(tsk); 234 ignore_signals(tsk);
235 set_user_nice(tsk, KTHREAD_NICE_LEVEL); 235 set_user_nice(tsk, KTHREAD_NICE_LEVEL);
236 set_cpus_allowed(tsk, CPU_MASK_ALL); 236 set_cpus_allowed_ptr(tsk, CPU_MASK_ALL_PTR);
237 237
238 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; 238 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
239 239
diff --git a/kernel/marker.c b/kernel/marker.c
index 1abfb923b761..971da5317903 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -441,7 +441,7 @@ static int remove_marker(const char *name)
441 hlist_del(&e->hlist); 441 hlist_del(&e->hlist);
442 /* Make sure the call_rcu has been executed */ 442 /* Make sure the call_rcu has been executed */
443 if (e->rcu_pending) 443 if (e->rcu_pending)
444 rcu_barrier(); 444 rcu_barrier_sched();
445 kfree(e); 445 kfree(e);
446 return 0; 446 return 0;
447} 447}
@@ -476,7 +476,7 @@ static int marker_set_format(struct marker_entry **entry, const char *format)
476 hlist_del(&(*entry)->hlist); 476 hlist_del(&(*entry)->hlist);
477 /* Make sure the call_rcu has been executed */ 477 /* Make sure the call_rcu has been executed */
478 if ((*entry)->rcu_pending) 478 if ((*entry)->rcu_pending)
479 rcu_barrier(); 479 rcu_barrier_sched();
480 kfree(*entry); 480 kfree(*entry);
481 *entry = e; 481 *entry = e;
482 trace_mark(core_marker_format, "name %s format %s", 482 trace_mark(core_marker_format, "name %s format %s",
@@ -655,7 +655,7 @@ int marker_probe_register(const char *name, const char *format,
655 * make sure it's executed now. 655 * make sure it's executed now.
656 */ 656 */
657 if (entry->rcu_pending) 657 if (entry->rcu_pending)
658 rcu_barrier(); 658 rcu_barrier_sched();
659 old = marker_entry_add_probe(entry, probe, probe_private); 659 old = marker_entry_add_probe(entry, probe, probe_private);
660 if (IS_ERR(old)) { 660 if (IS_ERR(old)) {
661 ret = PTR_ERR(old); 661 ret = PTR_ERR(old);
@@ -670,10 +670,7 @@ int marker_probe_register(const char *name, const char *format,
670 entry->rcu_pending = 1; 670 entry->rcu_pending = 1;
671 /* write rcu_pending before calling the RCU callback */ 671 /* write rcu_pending before calling the RCU callback */
672 smp_wmb(); 672 smp_wmb();
673#ifdef CONFIG_PREEMPT_RCU 673 call_rcu_sched(&entry->rcu, free_old_closure);
674 synchronize_sched(); /* Until we have the call_rcu_sched() */
675#endif
676 call_rcu(&entry->rcu, free_old_closure);
677end: 674end:
678 mutex_unlock(&markers_mutex); 675 mutex_unlock(&markers_mutex);
679 return ret; 676 return ret;
@@ -704,7 +701,7 @@ int marker_probe_unregister(const char *name,
704 if (!entry) 701 if (!entry)
705 goto end; 702 goto end;
706 if (entry->rcu_pending) 703 if (entry->rcu_pending)
707 rcu_barrier(); 704 rcu_barrier_sched();
708 old = marker_entry_remove_probe(entry, probe, probe_private); 705 old = marker_entry_remove_probe(entry, probe, probe_private);
709 mutex_unlock(&markers_mutex); 706 mutex_unlock(&markers_mutex);
710 marker_update_probes(); /* may update entry */ 707 marker_update_probes(); /* may update entry */
@@ -716,10 +713,7 @@ int marker_probe_unregister(const char *name,
716 entry->rcu_pending = 1; 713 entry->rcu_pending = 1;
717 /* write rcu_pending before calling the RCU callback */ 714 /* write rcu_pending before calling the RCU callback */
718 smp_wmb(); 715 smp_wmb();
719#ifdef CONFIG_PREEMPT_RCU 716 call_rcu_sched(&entry->rcu, free_old_closure);
720 synchronize_sched(); /* Until we have the call_rcu_sched() */
721#endif
722 call_rcu(&entry->rcu, free_old_closure);
723 remove_marker(name); /* Ignore busy error message */ 717 remove_marker(name); /* Ignore busy error message */
724 ret = 0; 718 ret = 0;
725end: 719end:
@@ -786,7 +780,7 @@ int marker_probe_unregister_private_data(marker_probe_func *probe,
786 goto end; 780 goto end;
787 } 781 }
788 if (entry->rcu_pending) 782 if (entry->rcu_pending)
789 rcu_barrier(); 783 rcu_barrier_sched();
790 old = marker_entry_remove_probe(entry, NULL, probe_private); 784 old = marker_entry_remove_probe(entry, NULL, probe_private);
791 mutex_unlock(&markers_mutex); 785 mutex_unlock(&markers_mutex);
792 marker_update_probes(); /* may update entry */ 786 marker_update_probes(); /* may update entry */
@@ -797,10 +791,7 @@ int marker_probe_unregister_private_data(marker_probe_func *probe,
797 entry->rcu_pending = 1; 791 entry->rcu_pending = 1;
798 /* write rcu_pending before calling the RCU callback */ 792 /* write rcu_pending before calling the RCU callback */
799 smp_wmb(); 793 smp_wmb();
800#ifdef CONFIG_PREEMPT_RCU 794 call_rcu_sched(&entry->rcu, free_old_closure);
801 synchronize_sched(); /* Until we have the call_rcu_sched() */
802#endif
803 call_rcu(&entry->rcu, free_old_closure);
804 remove_marker(entry->name); /* Ignore busy error message */ 795 remove_marker(entry->name); /* Ignore busy error message */
805end: 796end:
806 mutex_unlock(&markers_mutex); 797 mutex_unlock(&markers_mutex);
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
index 48d7ed6fc3a4..43c2111cd54d 100644
--- a/kernel/ns_cgroup.c
+++ b/kernel/ns_cgroup.c
@@ -7,6 +7,7 @@
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/cgroup.h> 8#include <linux/cgroup.h>
9#include <linux/fs.h> 9#include <linux/fs.h>
10#include <linux/proc_fs.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
11#include <linux/nsproxy.h> 12#include <linux/nsproxy.h>
12 13
@@ -24,9 +25,12 @@ static inline struct ns_cgroup *cgroup_to_ns(
24 struct ns_cgroup, css); 25 struct ns_cgroup, css);
25} 26}
26 27
27int ns_cgroup_clone(struct task_struct *task) 28int ns_cgroup_clone(struct task_struct *task, struct pid *pid)
28{ 29{
29 return cgroup_clone(task, &ns_subsys); 30 char name[PROC_NUMBUF];
31
32 snprintf(name, PROC_NUMBUF, "%d", pid_vnr(pid));
33 return cgroup_clone(task, &ns_subsys, name);
30} 34}
31 35
32/* 36/*
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index adc785146a1c..21575fc46d05 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -157,12 +157,6 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
157 goto out; 157 goto out;
158 } 158 }
159 159
160 err = ns_cgroup_clone(tsk);
161 if (err) {
162 put_nsproxy(new_ns);
163 goto out;
164 }
165
166 tsk->nsproxy = new_ns; 160 tsk->nsproxy = new_ns;
167 161
168out: 162out:
@@ -209,7 +203,7 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags,
209 goto out; 203 goto out;
210 } 204 }
211 205
212 err = ns_cgroup_clone(current); 206 err = ns_cgroup_clone(current, task_pid(current));
213 if (err) 207 if (err)
214 put_nsproxy(*new_nsp); 208 put_nsproxy(*new_nsp);
215 209
diff --git a/kernel/panic.c b/kernel/panic.c
index 425567f45b9f..12c5a0a6c89b 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -318,6 +318,28 @@ void warn_on_slowpath(const char *file, int line)
318 add_taint(TAINT_WARN); 318 add_taint(TAINT_WARN);
319} 319}
320EXPORT_SYMBOL(warn_on_slowpath); 320EXPORT_SYMBOL(warn_on_slowpath);
321
322
323void warn_slowpath(const char *file, int line, const char *fmt, ...)
324{
325 va_list args;
326 char function[KSYM_SYMBOL_LEN];
327 unsigned long caller = (unsigned long)__builtin_return_address(0);
328 sprint_symbol(function, caller);
329
330 printk(KERN_WARNING "------------[ cut here ]------------\n");
331 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
332 line, function);
333 va_start(args, fmt);
334 vprintk(fmt, args);
335 va_end(args);
336
337 print_modules();
338 dump_stack();
339 print_oops_end_marker();
340 add_taint(TAINT_WARN);
341}
342EXPORT_SYMBOL(warn_slowpath);
321#endif 343#endif
322 344
323#ifdef CONFIG_CC_STACKPROTECTOR 345#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/kernel/pid.c b/kernel/pid.c
index 30bd5d4b2ac7..064e76afa507 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -309,12 +309,6 @@ struct pid *find_vpid(int nr)
309} 309}
310EXPORT_SYMBOL_GPL(find_vpid); 310EXPORT_SYMBOL_GPL(find_vpid);
311 311
312struct pid *find_pid(int nr)
313{
314 return find_pid_ns(nr, &init_pid_ns);
315}
316EXPORT_SYMBOL_GPL(find_pid);
317
318/* 312/*
319 * attach_pid() must be called with the tasklist_lock write-held. 313 * attach_pid() must be called with the tasklist_lock write-held.
320 */ 314 */
@@ -435,6 +429,7 @@ struct pid *find_get_pid(pid_t nr)
435 429
436 return pid; 430 return pid;
437} 431}
432EXPORT_SYMBOL_GPL(find_get_pid);
438 433
439pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) 434pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
440{ 435{
@@ -482,7 +477,7 @@ EXPORT_SYMBOL(task_session_nr_ns);
482/* 477/*
483 * Used by proc to find the first pid that is greater then or equal to nr. 478 * Used by proc to find the first pid that is greater then or equal to nr.
484 * 479 *
485 * If there is a pid at nr this function is exactly the same as find_pid. 480 * If there is a pid at nr this function is exactly the same as find_pid_ns.
486 */ 481 */
487struct pid *find_ge_pid(int nr, struct pid_namespace *ns) 482struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
488{ 483{
@@ -497,7 +492,6 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
497 492
498 return pid; 493 return pid;
499} 494}
500EXPORT_SYMBOL_GPL(find_get_pid);
501 495
502/* 496/*
503 * The pid hash table is scaled according to the amount of memory in the 497 * The pid hash table is scaled according to the amount of memory in the
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 98702b4b8851..ea567b78d1aa 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -12,6 +12,7 @@
12#include <linux/pid_namespace.h> 12#include <linux/pid_namespace.h>
13#include <linux/syscalls.h> 13#include <linux/syscalls.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/acct.h>
15 16
16#define BITS_PER_PAGE (PAGE_SIZE*8) 17#define BITS_PER_PAGE (PAGE_SIZE*8)
17 18
@@ -71,7 +72,7 @@ static struct pid_namespace *create_pid_namespace(unsigned int level)
71 struct pid_namespace *ns; 72 struct pid_namespace *ns;
72 int i; 73 int i;
73 74
74 ns = kmem_cache_alloc(pid_ns_cachep, GFP_KERNEL); 75 ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
75 if (ns == NULL) 76 if (ns == NULL)
76 goto out; 77 goto out;
77 78
@@ -84,17 +85,13 @@ static struct pid_namespace *create_pid_namespace(unsigned int level)
84 goto out_free_map; 85 goto out_free_map;
85 86
86 kref_init(&ns->kref); 87 kref_init(&ns->kref);
87 ns->last_pid = 0;
88 ns->child_reaper = NULL;
89 ns->level = level; 88 ns->level = level;
90 89
91 set_bit(0, ns->pidmap[0].page); 90 set_bit(0, ns->pidmap[0].page);
92 atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1); 91 atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
93 92
94 for (i = 1; i < PIDMAP_ENTRIES; i++) { 93 for (i = 1; i < PIDMAP_ENTRIES; i++)
95 ns->pidmap[i].page = NULL;
96 atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE); 94 atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
97 }
98 95
99 return ns; 96 return ns;
100 97
@@ -185,6 +182,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
185 182
186 /* Child reaper for the pid namespace is going away */ 183 /* Child reaper for the pid namespace is going away */
187 pid_ns->child_reaper = NULL; 184 pid_ns->child_reaper = NULL;
185 acct_exit_ns(pid_ns);
188 return; 186 return;
189} 187}
190 188
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index dbd8398ddb0b..9a21681aa80f 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -449,9 +449,6 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
449 spin_unlock_irqrestore(&idr_lock, flags); 449 spin_unlock_irqrestore(&idr_lock, flags);
450 } 450 }
451 sigqueue_free(tmr->sigq); 451 sigqueue_free(tmr->sigq);
452 if (unlikely(tmr->it_process) &&
453 tmr->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
454 put_task_struct(tmr->it_process);
455 kmem_cache_free(posix_timers_cache, tmr); 452 kmem_cache_free(posix_timers_cache, tmr);
456} 453}
457 454
@@ -856,11 +853,10 @@ retry_delete:
856 * This keeps any tasks waiting on the spin lock from thinking 853 * This keeps any tasks waiting on the spin lock from thinking
857 * they got something (see the lock code above). 854 * they got something (see the lock code above).
858 */ 855 */
859 if (timer->it_process) { 856 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
860 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) 857 put_task_struct(timer->it_process);
861 put_task_struct(timer->it_process); 858 timer->it_process = NULL;
862 timer->it_process = NULL; 859
863 }
864 unlock_timer(timer, flags); 860 unlock_timer(timer, flags);
865 release_posix_timer(timer, IT_ID_SET); 861 release_posix_timer(timer, IT_ID_SET);
866 return 0; 862 return 0;
@@ -885,11 +881,10 @@ retry_delete:
885 * This keeps any tasks waiting on the spin lock from thinking 881 * This keeps any tasks waiting on the spin lock from thinking
886 * they got something (see the lock code above). 882 * they got something (see the lock code above).
887 */ 883 */
888 if (timer->it_process) { 884 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
889 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) 885 put_task_struct(timer->it_process);
890 put_task_struct(timer->it_process); 886 timer->it_process = NULL;
891 timer->it_process = NULL; 887
892 }
893 unlock_timer(timer, flags); 888 unlock_timer(timer, flags);
894 release_posix_timer(timer, IT_ID_SET); 889 release_posix_timer(timer, IT_ID_SET);
895} 890}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 59dfdf1e1d20..dcd165f92a88 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -94,6 +94,17 @@ config SUSPEND
94 powered and thus its contents are preserved, such as the 94 powered and thus its contents are preserved, such as the
95 suspend-to-RAM state (e.g. the ACPI S3 state). 95 suspend-to-RAM state (e.g. the ACPI S3 state).
96 96
97config PM_TEST_SUSPEND
98 bool "Test suspend/resume and wakealarm during bootup"
99 depends on SUSPEND && PM_DEBUG && RTC_LIB=y
100 ---help---
101 This option will let you suspend your machine during bootup, and
102 make it wake up a few seconds later using an RTC wakeup alarm.
103 Enable this with a kernel parameter like "test_suspend=mem".
104
105 You probably want to have your system's RTC driver statically
106 linked, ensuring that it's available when this test runs.
107
97config SUSPEND_FREEZER 108config SUSPEND_FREEZER
98 bool "Enable freezer for suspend to RAM/standby" \ 109 bool "Enable freezer for suspend to RAM/standby" \
99 if ARCH_WANTS_FREEZER_CONTROL || BROKEN 110 if ARCH_WANTS_FREEZER_CONTROL || BROKEN
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 3398f4651aa1..95bff23ecdaa 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -132,6 +132,61 @@ static inline int suspend_test(int level) { return 0; }
132 132
133#ifdef CONFIG_SUSPEND 133#ifdef CONFIG_SUSPEND
134 134
135#ifdef CONFIG_PM_TEST_SUSPEND
136
137/*
138 * We test the system suspend code by setting an RTC wakealarm a short
139 * time in the future, then suspending. Suspending the devices won't
140 * normally take long ... some systems only need a few milliseconds.
141 *
142 * The time it takes is system-specific though, so when we test this
143 * during system bootup we allow a LOT of time.
144 */
145#define TEST_SUSPEND_SECONDS 5
146
147static unsigned long suspend_test_start_time;
148
149static void suspend_test_start(void)
150{
151 /* FIXME Use better timebase than "jiffies", ideally a clocksource.
152 * What we want is a hardware counter that will work correctly even
153 * during the irqs-are-off stages of the suspend/resume cycle...
154 */
155 suspend_test_start_time = jiffies;
156}
157
158static void suspend_test_finish(const char *label)
159{
160 long nj = jiffies - suspend_test_start_time;
161 unsigned msec;
162
163 msec = jiffies_to_msecs(abs(nj));
164 pr_info("PM: %s took %d.%03d seconds\n", label,
165 msec / 1000, msec % 1000);
166
167 /* Warning on suspend means the RTC alarm period needs to be
168 * larger -- the system was sooo slooowwww to suspend that the
169 * alarm (should have) fired before the system went to sleep!
170 *
171 * Warning on either suspend or resume also means the system
172 * has some performance issues. The stack dump of a WARN_ON
173 * is more likely to get the right attention than a printk...
174 */
175 WARN_ON(msec > (TEST_SUSPEND_SECONDS * 1000));
176}
177
178#else
179
180static void suspend_test_start(void)
181{
182}
183
184static void suspend_test_finish(const char *label)
185{
186}
187
188#endif
189
135/* This is just an arbitrary number */ 190/* This is just an arbitrary number */
136#define FREE_PAGE_NUMBER (100) 191#define FREE_PAGE_NUMBER (100)
137 192
@@ -266,12 +321,13 @@ int suspend_devices_and_enter(suspend_state_t state)
266 goto Close; 321 goto Close;
267 } 322 }
268 suspend_console(); 323 suspend_console();
324 suspend_test_start();
269 error = device_suspend(PMSG_SUSPEND); 325 error = device_suspend(PMSG_SUSPEND);
270 if (error) { 326 if (error) {
271 printk(KERN_ERR "PM: Some devices failed to suspend\n"); 327 printk(KERN_ERR "PM: Some devices failed to suspend\n");
272 goto Recover_platform; 328 goto Recover_platform;
273 } 329 }
274 330 suspend_test_finish("suspend devices");
275 if (suspend_test(TEST_DEVICES)) 331 if (suspend_test(TEST_DEVICES))
276 goto Recover_platform; 332 goto Recover_platform;
277 333
@@ -293,7 +349,9 @@ int suspend_devices_and_enter(suspend_state_t state)
293 if (suspend_ops->finish) 349 if (suspend_ops->finish)
294 suspend_ops->finish(); 350 suspend_ops->finish();
295 Resume_devices: 351 Resume_devices:
352 suspend_test_start();
296 device_resume(PMSG_RESUME); 353 device_resume(PMSG_RESUME);
354 suspend_test_finish("resume devices");
297 resume_console(); 355 resume_console();
298 Close: 356 Close:
299 if (suspend_ops->end) 357 if (suspend_ops->end)
@@ -521,3 +579,137 @@ static int __init pm_init(void)
521} 579}
522 580
523core_initcall(pm_init); 581core_initcall(pm_init);
582
583
584#ifdef CONFIG_PM_TEST_SUSPEND
585
586#include <linux/rtc.h>
587
588/*
589 * To test system suspend, we need a hands-off mechanism to resume the
590 * system. RTCs wake alarms are a common self-contained mechanism.
591 */
592
593static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
594{
595 static char err_readtime[] __initdata =
596 KERN_ERR "PM: can't read %s time, err %d\n";
597 static char err_wakealarm [] __initdata =
598 KERN_ERR "PM: can't set %s wakealarm, err %d\n";
599 static char err_suspend[] __initdata =
600 KERN_ERR "PM: suspend test failed, error %d\n";
601 static char info_test[] __initdata =
602 KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
603
604 unsigned long now;
605 struct rtc_wkalrm alm;
606 int status;
607
608 /* this may fail if the RTC hasn't been initialized */
609 status = rtc_read_time(rtc, &alm.time);
610 if (status < 0) {
611 printk(err_readtime, rtc->dev.bus_id, status);
612 return;
613 }
614 rtc_tm_to_time(&alm.time, &now);
615
616 memset(&alm, 0, sizeof alm);
617 rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
618 alm.enabled = true;
619
620 status = rtc_set_alarm(rtc, &alm);
621 if (status < 0) {
622 printk(err_wakealarm, rtc->dev.bus_id, status);
623 return;
624 }
625
626 if (state == PM_SUSPEND_MEM) {
627 printk(info_test, pm_states[state]);
628 status = pm_suspend(state);
629 if (status == -ENODEV)
630 state = PM_SUSPEND_STANDBY;
631 }
632 if (state == PM_SUSPEND_STANDBY) {
633 printk(info_test, pm_states[state]);
634 status = pm_suspend(state);
635 }
636 if (status < 0)
637 printk(err_suspend, status);
638}
639
640static int __init has_wakealarm(struct device *dev, void *name_ptr)
641{
642 struct rtc_device *candidate = to_rtc_device(dev);
643
644 if (!candidate->ops->set_alarm)
645 return 0;
646 if (!device_may_wakeup(candidate->dev.parent))
647 return 0;
648
649 *(char **)name_ptr = dev->bus_id;
650 return 1;
651}
652
653/*
654 * Kernel options like "test_suspend=mem" force suspend/resume sanity tests
655 * at startup time. They're normally disabled, for faster boot and because
656 * we can't know which states really work on this particular system.
657 */
658static suspend_state_t test_state __initdata = PM_SUSPEND_ON;
659
660static char warn_bad_state[] __initdata =
661 KERN_WARNING "PM: can't test '%s' suspend state\n";
662
663static int __init setup_test_suspend(char *value)
664{
665 unsigned i;
666
667 /* "=mem" ==> "mem" */
668 value++;
669 for (i = 0; i < PM_SUSPEND_MAX; i++) {
670 if (!pm_states[i])
671 continue;
672 if (strcmp(pm_states[i], value) != 0)
673 continue;
674 test_state = (__force suspend_state_t) i;
675 return 0;
676 }
677 printk(warn_bad_state, value);
678 return 0;
679}
680__setup("test_suspend", setup_test_suspend);
681
682static int __init test_suspend(void)
683{
684 static char warn_no_rtc[] __initdata =
685 KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n";
686
687 char *pony = NULL;
688 struct rtc_device *rtc = NULL;
689
690 /* PM is initialized by now; is that state testable? */
691 if (test_state == PM_SUSPEND_ON)
692 goto done;
693 if (!valid_state(test_state)) {
694 printk(warn_bad_state, pm_states[test_state]);
695 goto done;
696 }
697
698 /* RTCs have initialized by now too ... can we use one? */
699 class_find_device(rtc_class, NULL, &pony, has_wakealarm);
700 if (pony)
701 rtc = rtc_class_open(pony);
702 if (!rtc) {
703 printk(warn_no_rtc);
704 goto done;
705 }
706
707 /* go for it */
708 test_wakealarm(rtc, test_state);
709 rtc_class_close(rtc);
710done:
711 return 0;
712}
713late_initcall(test_suspend);
714
715#endif /* CONFIG_PM_TEST_SUSPEND */
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index 678ec736076b..72016f051477 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -10,6 +10,7 @@
10#include <linux/pm.h> 10#include <linux/pm.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/reboot.h> 12#include <linux/reboot.h>
13#include <linux/cpumask.h>
13 14
14/* 15/*
15 * When the user hits Sys-Rq o to power down the machine this is the 16 * When the user hits Sys-Rq o to power down the machine this is the
@@ -25,7 +26,8 @@ static DECLARE_WORK(poweroff_work, do_poweroff);
25 26
26static void handle_poweroff(int key, struct tty_struct *tty) 27static void handle_poweroff(int key, struct tty_struct *tty)
27{ 28{
28 schedule_work(&poweroff_work); 29 /* run sysrq poweroff on boot cpu */
30 schedule_work_on(first_cpu(cpu_online_map), &poweroff_work);
29} 31}
30 32
31static struct sysrq_key_op sysrq_poweroff_op = { 33static struct sysrq_key_op sysrq_poweroff_op = {
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 5fb87652f214..278946aecaf0 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -149,7 +149,7 @@ static int try_to_freeze_tasks(bool sig_only)
149 unsigned long end_time; 149 unsigned long end_time;
150 unsigned int todo; 150 unsigned int todo;
151 struct timeval start, end; 151 struct timeval start, end;
152 s64 elapsed_csecs64; 152 u64 elapsed_csecs64;
153 unsigned int elapsed_csecs; 153 unsigned int elapsed_csecs;
154 154
155 do_gettimeofday(&start); 155 do_gettimeofday(&start);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 5f91a07c4eac..5d2ab836e998 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -205,8 +205,7 @@ static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
205 * objects. The main list's elements are of type struct zone_bitmap 205 * objects. The main list's elements are of type struct zone_bitmap
206 * and each of them corresonds to one zone. For each zone bitmap 206 * and each of them corresonds to one zone. For each zone bitmap
207 * object there is a list of objects of type struct bm_block that 207 * object there is a list of objects of type struct bm_block that
208 * represent each blocks of bit chunks in which information is 208 * represent each blocks of bitmap in which information is stored.
209 * stored.
210 * 209 *
211 * struct memory_bitmap contains a pointer to the main list of zone 210 * struct memory_bitmap contains a pointer to the main list of zone
212 * bitmap objects, a struct bm_position used for browsing the bitmap, 211 * bitmap objects, a struct bm_position used for browsing the bitmap,
@@ -224,26 +223,27 @@ static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
224 * pfns that correspond to the start and end of the represented zone. 223 * pfns that correspond to the start and end of the represented zone.
225 * 224 *
226 * struct bm_block contains a pointer to the memory page in which 225 * struct bm_block contains a pointer to the memory page in which
227 * information is stored (in the form of a block of bit chunks 226 * information is stored (in the form of a block of bitmap)
228 * of type unsigned long each). It also contains the pfns that 227 * It also contains the pfns that correspond to the start and end of
229 * correspond to the start and end of the represented memory area and 228 * the represented memory area.
230 * the number of bit chunks in the block.
231 */ 229 */
232 230
233#define BM_END_OF_MAP (~0UL) 231#define BM_END_OF_MAP (~0UL)
234 232
235#define BM_CHUNKS_PER_BLOCK (PAGE_SIZE / sizeof(long))
236#define BM_BITS_PER_CHUNK (sizeof(long) << 3)
237#define BM_BITS_PER_BLOCK (PAGE_SIZE << 3) 233#define BM_BITS_PER_BLOCK (PAGE_SIZE << 3)
238 234
239struct bm_block { 235struct bm_block {
240 struct bm_block *next; /* next element of the list */ 236 struct bm_block *next; /* next element of the list */
241 unsigned long start_pfn; /* pfn represented by the first bit */ 237 unsigned long start_pfn; /* pfn represented by the first bit */
242 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ 238 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */
243 unsigned int size; /* number of bit chunks */ 239 unsigned long *data; /* bitmap representing pages */
244 unsigned long *data; /* chunks of bits representing pages */
245}; 240};
246 241
242static inline unsigned long bm_block_bits(struct bm_block *bb)
243{
244 return bb->end_pfn - bb->start_pfn;
245}
246
247struct zone_bitmap { 247struct zone_bitmap {
248 struct zone_bitmap *next; /* next element of the list */ 248 struct zone_bitmap *next; /* next element of the list */
249 unsigned long start_pfn; /* minimal pfn in this zone */ 249 unsigned long start_pfn; /* minimal pfn in this zone */
@@ -257,7 +257,6 @@ struct zone_bitmap {
257struct bm_position { 257struct bm_position {
258 struct zone_bitmap *zone_bm; 258 struct zone_bitmap *zone_bm;
259 struct bm_block *block; 259 struct bm_block *block;
260 int chunk;
261 int bit; 260 int bit;
262}; 261};
263 262
@@ -272,12 +271,6 @@ struct memory_bitmap {
272 271
273/* Functions that operate on memory bitmaps */ 272/* Functions that operate on memory bitmaps */
274 273
275static inline void memory_bm_reset_chunk(struct memory_bitmap *bm)
276{
277 bm->cur.chunk = 0;
278 bm->cur.bit = -1;
279}
280
281static void memory_bm_position_reset(struct memory_bitmap *bm) 274static void memory_bm_position_reset(struct memory_bitmap *bm)
282{ 275{
283 struct zone_bitmap *zone_bm; 276 struct zone_bitmap *zone_bm;
@@ -285,7 +278,7 @@ static void memory_bm_position_reset(struct memory_bitmap *bm)
285 zone_bm = bm->zone_bm_list; 278 zone_bm = bm->zone_bm_list;
286 bm->cur.zone_bm = zone_bm; 279 bm->cur.zone_bm = zone_bm;
287 bm->cur.block = zone_bm->bm_blocks; 280 bm->cur.block = zone_bm->bm_blocks;
288 memory_bm_reset_chunk(bm); 281 bm->cur.bit = 0;
289} 282}
290 283
291static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); 284static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
@@ -394,12 +387,10 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
394 bb->start_pfn = pfn; 387 bb->start_pfn = pfn;
395 if (nr >= BM_BITS_PER_BLOCK) { 388 if (nr >= BM_BITS_PER_BLOCK) {
396 pfn += BM_BITS_PER_BLOCK; 389 pfn += BM_BITS_PER_BLOCK;
397 bb->size = BM_CHUNKS_PER_BLOCK;
398 nr -= BM_BITS_PER_BLOCK; 390 nr -= BM_BITS_PER_BLOCK;
399 } else { 391 } else {
400 /* This is executed only once in the loop */ 392 /* This is executed only once in the loop */
401 pfn += nr; 393 pfn += nr;
402 bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK);
403 } 394 }
404 bb->end_pfn = pfn; 395 bb->end_pfn = pfn;
405 bb = bb->next; 396 bb = bb->next;
@@ -478,8 +469,8 @@ static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
478 } 469 }
479 zone_bm->cur_block = bb; 470 zone_bm->cur_block = bb;
480 pfn -= bb->start_pfn; 471 pfn -= bb->start_pfn;
481 *bit_nr = pfn % BM_BITS_PER_CHUNK; 472 *bit_nr = pfn;
482 *addr = bb->data + pfn / BM_BITS_PER_CHUNK; 473 *addr = bb->data;
483 return 0; 474 return 0;
484} 475}
485 476
@@ -528,36 +519,6 @@ static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
528 return test_bit(bit, addr); 519 return test_bit(bit, addr);
529} 520}
530 521
531/* Two auxiliary functions for memory_bm_next_pfn */
532
533/* Find the first set bit in the given chunk, if there is one */
534
535static inline int next_bit_in_chunk(int bit, unsigned long *chunk_p)
536{
537 bit++;
538 while (bit < BM_BITS_PER_CHUNK) {
539 if (test_bit(bit, chunk_p))
540 return bit;
541
542 bit++;
543 }
544 return -1;
545}
546
547/* Find a chunk containing some bits set in given block of bits */
548
549static inline int next_chunk_in_block(int n, struct bm_block *bb)
550{
551 n++;
552 while (n < bb->size) {
553 if (bb->data[n])
554 return n;
555
556 n++;
557 }
558 return -1;
559}
560
561/** 522/**
562 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit 523 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit
563 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is 524 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
@@ -571,40 +532,33 @@ static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
571{ 532{
572 struct zone_bitmap *zone_bm; 533 struct zone_bitmap *zone_bm;
573 struct bm_block *bb; 534 struct bm_block *bb;
574 int chunk;
575 int bit; 535 int bit;
576 536
577 do { 537 do {
578 bb = bm->cur.block; 538 bb = bm->cur.block;
579 do { 539 do {
580 chunk = bm->cur.chunk;
581 bit = bm->cur.bit; 540 bit = bm->cur.bit;
582 do { 541 bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
583 bit = next_bit_in_chunk(bit, bb->data + chunk); 542 if (bit < bm_block_bits(bb))
584 if (bit >= 0) 543 goto Return_pfn;
585 goto Return_pfn; 544
586
587 chunk = next_chunk_in_block(chunk, bb);
588 bit = -1;
589 } while (chunk >= 0);
590 bb = bb->next; 545 bb = bb->next;
591 bm->cur.block = bb; 546 bm->cur.block = bb;
592 memory_bm_reset_chunk(bm); 547 bm->cur.bit = 0;
593 } while (bb); 548 } while (bb);
594 zone_bm = bm->cur.zone_bm->next; 549 zone_bm = bm->cur.zone_bm->next;
595 if (zone_bm) { 550 if (zone_bm) {
596 bm->cur.zone_bm = zone_bm; 551 bm->cur.zone_bm = zone_bm;
597 bm->cur.block = zone_bm->bm_blocks; 552 bm->cur.block = zone_bm->bm_blocks;
598 memory_bm_reset_chunk(bm); 553 bm->cur.bit = 0;
599 } 554 }
600 } while (zone_bm); 555 } while (zone_bm);
601 memory_bm_position_reset(bm); 556 memory_bm_position_reset(bm);
602 return BM_END_OF_MAP; 557 return BM_END_OF_MAP;
603 558
604 Return_pfn: 559 Return_pfn:
605 bm->cur.chunk = chunk; 560 bm->cur.bit = bit + 1;
606 bm->cur.bit = bit; 561 return bb->start_pfn + bit;
607 return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit;
608} 562}
609 563
610/** 564/**
diff --git a/kernel/printk.c b/kernel/printk.c
index 07ad9e7f7a66..a7f7559c5f6c 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -933,7 +933,7 @@ void suspend_console(void)
933{ 933{
934 if (!console_suspend_enabled) 934 if (!console_suspend_enabled)
935 return; 935 return;
936 printk("Suspending console(s)\n"); 936 printk("Suspending console(s) (use no_console_suspend to debug)\n");
937 acquire_console_sem(); 937 acquire_console_sem();
938 console_suspended = 1; 938 console_suspended = 1;
939} 939}
@@ -1308,6 +1308,8 @@ void tty_write_message(struct tty_struct *tty, char *msg)
1308} 1308}
1309 1309
1310#if defined CONFIG_PRINTK 1310#if defined CONFIG_PRINTK
1311
1312DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
1311/* 1313/*
1312 * printk rate limiting, lifted from the networking subsystem. 1314 * printk rate limiting, lifted from the networking subsystem.
1313 * 1315 *
@@ -1315,22 +1317,9 @@ void tty_write_message(struct tty_struct *tty, char *msg)
1315 * every printk_ratelimit_jiffies to make a denial-of-service 1317 * every printk_ratelimit_jiffies to make a denial-of-service
1316 * attack impossible. 1318 * attack impossible.
1317 */ 1319 */
1318int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst)
1319{
1320 return __ratelimit(ratelimit_jiffies, ratelimit_burst);
1321}
1322EXPORT_SYMBOL(__printk_ratelimit);
1323
1324/* minimum time in jiffies between messages */
1325int printk_ratelimit_jiffies = 5 * HZ;
1326
1327/* number of messages we send before ratelimiting */
1328int printk_ratelimit_burst = 10;
1329
1330int printk_ratelimit(void) 1320int printk_ratelimit(void)
1331{ 1321{
1332 return __printk_ratelimit(printk_ratelimit_jiffies, 1322 return __ratelimit(&printk_ratelimit_state);
1333 printk_ratelimit_burst);
1334} 1323}
1335EXPORT_SYMBOL(printk_ratelimit); 1324EXPORT_SYMBOL(printk_ratelimit);
1336 1325
diff --git a/kernel/profile.c b/kernel/profile.c
index 58926411eb2a..cd26bed4cc26 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -112,8 +112,6 @@ void __init profile_init(void)
112 112
113/* Profile event notifications */ 113/* Profile event notifications */
114 114
115#ifdef CONFIG_PROFILING
116
117static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); 115static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
118static ATOMIC_NOTIFIER_HEAD(task_free_notifier); 116static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
119static BLOCKING_NOTIFIER_HEAD(munmap_notifier); 117static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
@@ -203,8 +201,6 @@ void unregister_timer_hook(int (*hook)(struct pt_regs *))
203} 201}
204EXPORT_SYMBOL_GPL(unregister_timer_hook); 202EXPORT_SYMBOL_GPL(unregister_timer_hook);
205 203
206#endif /* CONFIG_PROFILING */
207
208 204
209#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
210/* 206/*
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 16eeeaa9d618..6f8696c502f4 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -106,7 +106,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
106 */ 106 */
107 cpus_and(cpumask, rcp->cpumask, cpu_online_map); 107 cpus_and(cpumask, rcp->cpumask, cpu_online_map);
108 cpu_clear(rdp->cpu, cpumask); 108 cpu_clear(rdp->cpu, cpumask);
109 for_each_cpu_mask(cpu, cpumask) 109 for_each_cpu_mask_nr(cpu, cpumask)
110 smp_send_reschedule(cpu); 110 smp_send_reschedule(cpu);
111 } 111 }
112} 112}
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 6f62b77d93c4..27827931ca0d 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -756,7 +756,7 @@ rcu_try_flip_idle(void)
756 756
757 /* Now ask each CPU for acknowledgement of the flip. */ 757 /* Now ask each CPU for acknowledgement of the flip. */
758 758
759 for_each_cpu_mask(cpu, rcu_cpu_online_map) { 759 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
760 per_cpu(rcu_flip_flag, cpu) = rcu_flipped; 760 per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
761 dyntick_save_progress_counter(cpu); 761 dyntick_save_progress_counter(cpu);
762 } 762 }
@@ -774,7 +774,7 @@ rcu_try_flip_waitack(void)
774 int cpu; 774 int cpu;
775 775
776 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); 776 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
777 for_each_cpu_mask(cpu, rcu_cpu_online_map) 777 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
778 if (rcu_try_flip_waitack_needed(cpu) && 778 if (rcu_try_flip_waitack_needed(cpu) &&
779 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { 779 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
780 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); 780 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
@@ -806,7 +806,7 @@ rcu_try_flip_waitzero(void)
806 /* Check to see if the sum of the "last" counters is zero. */ 806 /* Check to see if the sum of the "last" counters is zero. */
807 807
808 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); 808 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
809 for_each_cpu_mask(cpu, rcu_cpu_online_map) 809 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
810 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; 810 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
811 if (sum != 0) { 811 if (sum != 0) {
812 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); 812 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
@@ -821,7 +821,7 @@ rcu_try_flip_waitzero(void)
821 smp_mb(); /* ^^^^^^^^^^^^ */ 821 smp_mb(); /* ^^^^^^^^^^^^ */
822 822
823 /* Call for a memory barrier from each CPU. */ 823 /* Call for a memory barrier from each CPU. */
824 for_each_cpu_mask(cpu, rcu_cpu_online_map) { 824 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
825 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; 825 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
826 dyntick_save_progress_counter(cpu); 826 dyntick_save_progress_counter(cpu);
827 } 827 }
@@ -841,7 +841,7 @@ rcu_try_flip_waitmb(void)
841 int cpu; 841 int cpu;
842 842
843 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); 843 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
844 for_each_cpu_mask(cpu, rcu_cpu_online_map) 844 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
845 if (rcu_try_flip_waitmb_needed(cpu) && 845 if (rcu_try_flip_waitmb_needed(cpu) &&
846 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { 846 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
847 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); 847 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index d3c61b4ebef2..f275c8eca772 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -13,6 +13,7 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/res_counter.h> 14#include <linux/res_counter.h>
15#include <linux/uaccess.h> 15#include <linux/uaccess.h>
16#include <linux/mm.h>
16 17
17void res_counter_init(struct res_counter *counter) 18void res_counter_init(struct res_counter *counter)
18{ 19{
@@ -102,44 +103,37 @@ u64 res_counter_read_u64(struct res_counter *counter, int member)
102 return *res_counter_member(counter, member); 103 return *res_counter_member(counter, member);
103} 104}
104 105
105ssize_t res_counter_write(struct res_counter *counter, int member, 106int res_counter_memparse_write_strategy(const char *buf,
106 const char __user *userbuf, size_t nbytes, loff_t *pos, 107 unsigned long long *res)
107 int (*write_strategy)(char *st_buf, unsigned long long *val))
108{ 108{
109 int ret; 109 char *end;
110 char *buf, *end; 110 /* FIXME - make memparse() take const char* args */
111 unsigned long flags; 111 *res = memparse((char *)buf, &end);
112 unsigned long long tmp, *val; 112 if (*end != '\0')
113 113 return -EINVAL;
114 buf = kmalloc(nbytes + 1, GFP_KERNEL);
115 ret = -ENOMEM;
116 if (buf == NULL)
117 goto out;
118 114
119 buf[nbytes] = '\0'; 115 *res = PAGE_ALIGN(*res);
120 ret = -EFAULT; 116 return 0;
121 if (copy_from_user(buf, userbuf, nbytes)) 117}
122 goto out_free;
123 118
124 ret = -EINVAL; 119int res_counter_write(struct res_counter *counter, int member,
120 const char *buf, write_strategy_fn write_strategy)
121{
122 char *end;
123 unsigned long flags;
124 unsigned long long tmp, *val;
125 125
126 strstrip(buf);
127 if (write_strategy) { 126 if (write_strategy) {
128 if (write_strategy(buf, &tmp)) { 127 if (write_strategy(buf, &tmp))
129 goto out_free; 128 return -EINVAL;
130 }
131 } else { 129 } else {
132 tmp = simple_strtoull(buf, &end, 10); 130 tmp = simple_strtoull(buf, &end, 10);
133 if (*end != '\0') 131 if (*end != '\0')
134 goto out_free; 132 return -EINVAL;
135 } 133 }
136 spin_lock_irqsave(&counter->lock, flags); 134 spin_lock_irqsave(&counter->lock, flags);
137 val = res_counter_member(counter, member); 135 val = res_counter_member(counter, member);
138 *val = tmp; 136 *val = tmp;
139 spin_unlock_irqrestore(&counter->lock, flags); 137 spin_unlock_irqrestore(&counter->lock, flags);
140 ret = nbytes; 138 return 0;
141out_free:
142 kfree(buf);
143out:
144 return ret;
145} 139}
diff --git a/kernel/sched.c b/kernel/sched.c
index b1104ea5d255..0047bd9b96aa 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -571,8 +571,10 @@ struct rq {
571#endif 571#endif
572 572
573#ifdef CONFIG_SCHED_HRTICK 573#ifdef CONFIG_SCHED_HRTICK
574 unsigned long hrtick_flags; 574#ifdef CONFIG_SMP
575 ktime_t hrtick_expire; 575 int hrtick_csd_pending;
576 struct call_single_data hrtick_csd;
577#endif
576 struct hrtimer hrtick_timer; 578 struct hrtimer hrtick_timer;
577#endif 579#endif
578 580
@@ -983,13 +985,6 @@ static struct rq *this_rq_lock(void)
983 return rq; 985 return rq;
984} 986}
985 987
986static void __resched_task(struct task_struct *p, int tif_bit);
987
988static inline void resched_task(struct task_struct *p)
989{
990 __resched_task(p, TIF_NEED_RESCHED);
991}
992
993#ifdef CONFIG_SCHED_HRTICK 988#ifdef CONFIG_SCHED_HRTICK
994/* 989/*
995 * Use HR-timers to deliver accurate preemption points. 990 * Use HR-timers to deliver accurate preemption points.
@@ -1001,25 +996,6 @@ static inline void resched_task(struct task_struct *p)
1001 * When we get rescheduled we reprogram the hrtick_timer outside of the 996 * When we get rescheduled we reprogram the hrtick_timer outside of the
1002 * rq->lock. 997 * rq->lock.
1003 */ 998 */
1004static inline void resched_hrt(struct task_struct *p)
1005{
1006 __resched_task(p, TIF_HRTICK_RESCHED);
1007}
1008
1009static inline void resched_rq(struct rq *rq)
1010{
1011 unsigned long flags;
1012
1013 spin_lock_irqsave(&rq->lock, flags);
1014 resched_task(rq->curr);
1015 spin_unlock_irqrestore(&rq->lock, flags);
1016}
1017
1018enum {
1019 HRTICK_SET, /* re-programm hrtick_timer */
1020 HRTICK_RESET, /* not a new slice */
1021 HRTICK_BLOCK, /* stop hrtick operations */
1022};
1023 999
1024/* 1000/*
1025 * Use hrtick when: 1001 * Use hrtick when:
@@ -1030,40 +1006,11 @@ static inline int hrtick_enabled(struct rq *rq)
1030{ 1006{
1031 if (!sched_feat(HRTICK)) 1007 if (!sched_feat(HRTICK))
1032 return 0; 1008 return 0;
1033 if (unlikely(test_bit(HRTICK_BLOCK, &rq->hrtick_flags))) 1009 if (!cpu_active(cpu_of(rq)))
1034 return 0; 1010 return 0;
1035 return hrtimer_is_hres_active(&rq->hrtick_timer); 1011 return hrtimer_is_hres_active(&rq->hrtick_timer);
1036} 1012}
1037 1013
1038/*
1039 * Called to set the hrtick timer state.
1040 *
1041 * called with rq->lock held and irqs disabled
1042 */
1043static void hrtick_start(struct rq *rq, u64 delay, int reset)
1044{
1045 assert_spin_locked(&rq->lock);
1046
1047 /*
1048 * preempt at: now + delay
1049 */
1050 rq->hrtick_expire =
1051 ktime_add_ns(rq->hrtick_timer.base->get_time(), delay);
1052 /*
1053 * indicate we need to program the timer
1054 */
1055 __set_bit(HRTICK_SET, &rq->hrtick_flags);
1056 if (reset)
1057 __set_bit(HRTICK_RESET, &rq->hrtick_flags);
1058
1059 /*
1060 * New slices are called from the schedule path and don't need a
1061 * forced reschedule.
1062 */
1063 if (reset)
1064 resched_hrt(rq->curr);
1065}
1066
1067static void hrtick_clear(struct rq *rq) 1014static void hrtick_clear(struct rq *rq)
1068{ 1015{
1069 if (hrtimer_active(&rq->hrtick_timer)) 1016 if (hrtimer_active(&rq->hrtick_timer))
@@ -1071,32 +1018,6 @@ static void hrtick_clear(struct rq *rq)
1071} 1018}
1072 1019
1073/* 1020/*
1074 * Update the timer from the possible pending state.
1075 */
1076static void hrtick_set(struct rq *rq)
1077{
1078 ktime_t time;
1079 int set, reset;
1080 unsigned long flags;
1081
1082 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1083
1084 spin_lock_irqsave(&rq->lock, flags);
1085 set = __test_and_clear_bit(HRTICK_SET, &rq->hrtick_flags);
1086 reset = __test_and_clear_bit(HRTICK_RESET, &rq->hrtick_flags);
1087 time = rq->hrtick_expire;
1088 clear_thread_flag(TIF_HRTICK_RESCHED);
1089 spin_unlock_irqrestore(&rq->lock, flags);
1090
1091 if (set) {
1092 hrtimer_start(&rq->hrtick_timer, time, HRTIMER_MODE_ABS);
1093 if (reset && !hrtimer_active(&rq->hrtick_timer))
1094 resched_rq(rq);
1095 } else
1096 hrtick_clear(rq);
1097}
1098
1099/*
1100 * High-resolution timer tick. 1021 * High-resolution timer tick.
1101 * Runs from hardirq context with interrupts disabled. 1022 * Runs from hardirq context with interrupts disabled.
1102 */ 1023 */
@@ -1115,27 +1036,37 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1115} 1036}
1116 1037
1117#ifdef CONFIG_SMP 1038#ifdef CONFIG_SMP
1118static void hotplug_hrtick_disable(int cpu) 1039/*
1040 * called from hardirq (IPI) context
1041 */
1042static void __hrtick_start(void *arg)
1119{ 1043{
1120 struct rq *rq = cpu_rq(cpu); 1044 struct rq *rq = arg;
1121 unsigned long flags;
1122 1045
1123 spin_lock_irqsave(&rq->lock, flags); 1046 spin_lock(&rq->lock);
1124 rq->hrtick_flags = 0; 1047 hrtimer_restart(&rq->hrtick_timer);
1125 __set_bit(HRTICK_BLOCK, &rq->hrtick_flags); 1048 rq->hrtick_csd_pending = 0;
1126 spin_unlock_irqrestore(&rq->lock, flags); 1049 spin_unlock(&rq->lock);
1127
1128 hrtick_clear(rq);
1129} 1050}
1130 1051
1131static void hotplug_hrtick_enable(int cpu) 1052/*
1053 * Called to set the hrtick timer state.
1054 *
1055 * called with rq->lock held and irqs disabled
1056 */
1057static void hrtick_start(struct rq *rq, u64 delay)
1132{ 1058{
1133 struct rq *rq = cpu_rq(cpu); 1059 struct hrtimer *timer = &rq->hrtick_timer;
1134 unsigned long flags; 1060 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
1135 1061
1136 spin_lock_irqsave(&rq->lock, flags); 1062 timer->expires = time;
1137 __clear_bit(HRTICK_BLOCK, &rq->hrtick_flags); 1063
1138 spin_unlock_irqrestore(&rq->lock, flags); 1064 if (rq == this_rq()) {
1065 hrtimer_restart(timer);
1066 } else if (!rq->hrtick_csd_pending) {
1067 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd);
1068 rq->hrtick_csd_pending = 1;
1069 }
1139} 1070}
1140 1071
1141static int 1072static int
@@ -1150,16 +1081,7 @@ hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1150 case CPU_DOWN_PREPARE_FROZEN: 1081 case CPU_DOWN_PREPARE_FROZEN:
1151 case CPU_DEAD: 1082 case CPU_DEAD:
1152 case CPU_DEAD_FROZEN: 1083 case CPU_DEAD_FROZEN:
1153 hotplug_hrtick_disable(cpu); 1084 hrtick_clear(cpu_rq(cpu));
1154 return NOTIFY_OK;
1155
1156 case CPU_UP_PREPARE:
1157 case CPU_UP_PREPARE_FROZEN:
1158 case CPU_DOWN_FAILED:
1159 case CPU_DOWN_FAILED_FROZEN:
1160 case CPU_ONLINE:
1161 case CPU_ONLINE_FROZEN:
1162 hotplug_hrtick_enable(cpu);
1163 return NOTIFY_OK; 1085 return NOTIFY_OK;
1164 } 1086 }
1165 1087
@@ -1170,46 +1092,45 @@ static void init_hrtick(void)
1170{ 1092{
1171 hotcpu_notifier(hotplug_hrtick, 0); 1093 hotcpu_notifier(hotplug_hrtick, 0);
1172} 1094}
1173#endif /* CONFIG_SMP */ 1095#else
1096/*
1097 * Called to set the hrtick timer state.
1098 *
1099 * called with rq->lock held and irqs disabled
1100 */
1101static void hrtick_start(struct rq *rq, u64 delay)
1102{
1103 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL);
1104}
1174 1105
1175static void init_rq_hrtick(struct rq *rq) 1106static void init_hrtick(void)
1176{ 1107{
1177 rq->hrtick_flags = 0;
1178 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1179 rq->hrtick_timer.function = hrtick;
1180 rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
1181} 1108}
1109#endif /* CONFIG_SMP */
1182 1110
1183void hrtick_resched(void) 1111static void init_rq_hrtick(struct rq *rq)
1184{ 1112{
1185 struct rq *rq; 1113#ifdef CONFIG_SMP
1186 unsigned long flags; 1114 rq->hrtick_csd_pending = 0;
1187 1115
1188 if (!test_thread_flag(TIF_HRTICK_RESCHED)) 1116 rq->hrtick_csd.flags = 0;
1189 return; 1117 rq->hrtick_csd.func = __hrtick_start;
1118 rq->hrtick_csd.info = rq;
1119#endif
1190 1120
1191 local_irq_save(flags); 1121 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1192 rq = cpu_rq(smp_processor_id()); 1122 rq->hrtick_timer.function = hrtick;
1193 hrtick_set(rq); 1123 rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
1194 local_irq_restore(flags);
1195} 1124}
1196#else 1125#else
1197static inline void hrtick_clear(struct rq *rq) 1126static inline void hrtick_clear(struct rq *rq)
1198{ 1127{
1199} 1128}
1200 1129
1201static inline void hrtick_set(struct rq *rq)
1202{
1203}
1204
1205static inline void init_rq_hrtick(struct rq *rq) 1130static inline void init_rq_hrtick(struct rq *rq)
1206{ 1131{
1207} 1132}
1208 1133
1209void hrtick_resched(void)
1210{
1211}
1212
1213static inline void init_hrtick(void) 1134static inline void init_hrtick(void)
1214{ 1135{
1215} 1136}
@@ -1228,16 +1149,16 @@ static inline void init_hrtick(void)
1228#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 1149#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1229#endif 1150#endif
1230 1151
1231static void __resched_task(struct task_struct *p, int tif_bit) 1152static void resched_task(struct task_struct *p)
1232{ 1153{
1233 int cpu; 1154 int cpu;
1234 1155
1235 assert_spin_locked(&task_rq(p)->lock); 1156 assert_spin_locked(&task_rq(p)->lock);
1236 1157
1237 if (unlikely(test_tsk_thread_flag(p, tif_bit))) 1158 if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
1238 return; 1159 return;
1239 1160
1240 set_tsk_thread_flag(p, tif_bit); 1161 set_tsk_thread_flag(p, TIF_NEED_RESCHED);
1241 1162
1242 cpu = task_cpu(p); 1163 cpu = task_cpu(p);
1243 if (cpu == smp_processor_id()) 1164 if (cpu == smp_processor_id())
@@ -1303,10 +1224,10 @@ void wake_up_idle_cpu(int cpu)
1303#endif /* CONFIG_NO_HZ */ 1224#endif /* CONFIG_NO_HZ */
1304 1225
1305#else /* !CONFIG_SMP */ 1226#else /* !CONFIG_SMP */
1306static void __resched_task(struct task_struct *p, int tif_bit) 1227static void resched_task(struct task_struct *p)
1307{ 1228{
1308 assert_spin_locked(&task_rq(p)->lock); 1229 assert_spin_locked(&task_rq(p)->lock);
1309 set_tsk_thread_flag(p, tif_bit); 1230 set_tsk_need_resched(p);
1310} 1231}
1311#endif /* CONFIG_SMP */ 1232#endif /* CONFIG_SMP */
1312 1233
@@ -2108,7 +2029,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2108 /* Tally up the load of all CPUs in the group */ 2029 /* Tally up the load of all CPUs in the group */
2109 avg_load = 0; 2030 avg_load = 0;
2110 2031
2111 for_each_cpu_mask(i, group->cpumask) { 2032 for_each_cpu_mask_nr(i, group->cpumask) {
2112 /* Bias balancing toward cpus of our domain */ 2033 /* Bias balancing toward cpus of our domain */
2113 if (local_group) 2034 if (local_group)
2114 load = source_load(i, load_idx); 2035 load = source_load(i, load_idx);
@@ -2150,7 +2071,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
2150 /* Traverse only the allowed CPUs */ 2071 /* Traverse only the allowed CPUs */
2151 cpus_and(*tmp, group->cpumask, p->cpus_allowed); 2072 cpus_and(*tmp, group->cpumask, p->cpus_allowed);
2152 2073
2153 for_each_cpu_mask(i, *tmp) { 2074 for_each_cpu_mask_nr(i, *tmp) {
2154 load = weighted_cpuload(i); 2075 load = weighted_cpuload(i);
2155 2076
2156 if (load < min_load || (load == min_load && i == this_cpu)) { 2077 if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -2881,7 +2802,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
2881 2802
2882 rq = task_rq_lock(p, &flags); 2803 rq = task_rq_lock(p, &flags);
2883 if (!cpu_isset(dest_cpu, p->cpus_allowed) 2804 if (!cpu_isset(dest_cpu, p->cpus_allowed)
2884 || unlikely(cpu_is_offline(dest_cpu))) 2805 || unlikely(!cpu_active(dest_cpu)))
2885 goto out; 2806 goto out;
2886 2807
2887 /* force the process onto the specified CPU */ 2808 /* force the process onto the specified CPU */
@@ -3168,7 +3089,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3168 max_cpu_load = 0; 3089 max_cpu_load = 0;
3169 min_cpu_load = ~0UL; 3090 min_cpu_load = ~0UL;
3170 3091
3171 for_each_cpu_mask(i, group->cpumask) { 3092 for_each_cpu_mask_nr(i, group->cpumask) {
3172 struct rq *rq; 3093 struct rq *rq;
3173 3094
3174 if (!cpu_isset(i, *cpus)) 3095 if (!cpu_isset(i, *cpus))
@@ -3447,7 +3368,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3447 unsigned long max_load = 0; 3368 unsigned long max_load = 0;
3448 int i; 3369 int i;
3449 3370
3450 for_each_cpu_mask(i, group->cpumask) { 3371 for_each_cpu_mask_nr(i, group->cpumask) {
3451 unsigned long wl; 3372 unsigned long wl;
3452 3373
3453 if (!cpu_isset(i, *cpus)) 3374 if (!cpu_isset(i, *cpus))
@@ -3849,7 +3770,7 @@ int select_nohz_load_balancer(int stop_tick)
3849 /* 3770 /*
3850 * If we are going offline and still the leader, give up! 3771 * If we are going offline and still the leader, give up!
3851 */ 3772 */
3852 if (cpu_is_offline(cpu) && 3773 if (!cpu_active(cpu) &&
3853 atomic_read(&nohz.load_balancer) == cpu) { 3774 atomic_read(&nohz.load_balancer) == cpu) {
3854 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) 3775 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3855 BUG(); 3776 BUG();
@@ -3989,7 +3910,7 @@ static void run_rebalance_domains(struct softirq_action *h)
3989 int balance_cpu; 3910 int balance_cpu;
3990 3911
3991 cpu_clear(this_cpu, cpus); 3912 cpu_clear(this_cpu, cpus);
3992 for_each_cpu_mask(balance_cpu, cpus) { 3913 for_each_cpu_mask_nr(balance_cpu, cpus) {
3993 /* 3914 /*
3994 * If this cpu gets work to do, stop the load balancing 3915 * If this cpu gets work to do, stop the load balancing
3995 * work being done for other cpus. Next load 3916 * work being done for other cpus. Next load
@@ -4125,6 +4046,8 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
4125 cpustat->nice = cputime64_add(cpustat->nice, tmp); 4046 cpustat->nice = cputime64_add(cpustat->nice, tmp);
4126 else 4047 else
4127 cpustat->user = cputime64_add(cpustat->user, tmp); 4048 cpustat->user = cputime64_add(cpustat->user, tmp);
4049 /* Account for user time used */
4050 acct_update_integrals(p);
4128} 4051}
4129 4052
4130/* 4053/*
@@ -4395,7 +4318,7 @@ asmlinkage void __sched schedule(void)
4395 struct task_struct *prev, *next; 4318 struct task_struct *prev, *next;
4396 unsigned long *switch_count; 4319 unsigned long *switch_count;
4397 struct rq *rq; 4320 struct rq *rq;
4398 int cpu, hrtick = sched_feat(HRTICK); 4321 int cpu;
4399 4322
4400need_resched: 4323need_resched:
4401 preempt_disable(); 4324 preempt_disable();
@@ -4410,7 +4333,7 @@ need_resched_nonpreemptible:
4410 4333
4411 schedule_debug(prev); 4334 schedule_debug(prev);
4412 4335
4413 if (hrtick) 4336 if (sched_feat(HRTICK))
4414 hrtick_clear(rq); 4337 hrtick_clear(rq);
4415 4338
4416 /* 4339 /*
@@ -4457,9 +4380,6 @@ need_resched_nonpreemptible:
4457 } else 4380 } else
4458 spin_unlock_irq(&rq->lock); 4381 spin_unlock_irq(&rq->lock);
4459 4382
4460 if (hrtick)
4461 hrtick_set(rq);
4462
4463 if (unlikely(reacquire_kernel_lock(current) < 0)) 4383 if (unlikely(reacquire_kernel_lock(current) < 0))
4464 goto need_resched_nonpreemptible; 4384 goto need_resched_nonpreemptible;
4465 4385
@@ -5876,7 +5796,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5876 struct rq *rq_dest, *rq_src; 5796 struct rq *rq_dest, *rq_src;
5877 int ret = 0, on_rq; 5797 int ret = 0, on_rq;
5878 5798
5879 if (unlikely(cpu_is_offline(dest_cpu))) 5799 if (unlikely(!cpu_active(dest_cpu)))
5880 return ret; 5800 return ret;
5881 5801
5882 rq_src = cpu_rq(src_cpu); 5802 rq_src = cpu_rq(src_cpu);
@@ -6768,7 +6688,8 @@ static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
6768/* Setup the mask of cpus configured for isolated domains */ 6688/* Setup the mask of cpus configured for isolated domains */
6769static int __init isolated_cpu_setup(char *str) 6689static int __init isolated_cpu_setup(char *str)
6770{ 6690{
6771 int ints[NR_CPUS], i; 6691 static int __initdata ints[NR_CPUS];
6692 int i;
6772 6693
6773 str = get_options(str, ARRAY_SIZE(ints), ints); 6694 str = get_options(str, ARRAY_SIZE(ints), ints);
6774 cpus_clear(cpu_isolated_map); 6695 cpus_clear(cpu_isolated_map);
@@ -6802,7 +6723,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6802 6723
6803 cpus_clear(*covered); 6724 cpus_clear(*covered);
6804 6725
6805 for_each_cpu_mask(i, *span) { 6726 for_each_cpu_mask_nr(i, *span) {
6806 struct sched_group *sg; 6727 struct sched_group *sg;
6807 int group = group_fn(i, cpu_map, &sg, tmpmask); 6728 int group = group_fn(i, cpu_map, &sg, tmpmask);
6808 int j; 6729 int j;
@@ -6813,7 +6734,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6813 cpus_clear(sg->cpumask); 6734 cpus_clear(sg->cpumask);
6814 sg->__cpu_power = 0; 6735 sg->__cpu_power = 0;
6815 6736
6816 for_each_cpu_mask(j, *span) { 6737 for_each_cpu_mask_nr(j, *span) {
6817 if (group_fn(j, cpu_map, NULL, tmpmask) != group) 6738 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
6818 continue; 6739 continue;
6819 6740
@@ -7013,7 +6934,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
7013 if (!sg) 6934 if (!sg)
7014 return; 6935 return;
7015 do { 6936 do {
7016 for_each_cpu_mask(j, sg->cpumask) { 6937 for_each_cpu_mask_nr(j, sg->cpumask) {
7017 struct sched_domain *sd; 6938 struct sched_domain *sd;
7018 6939
7019 sd = &per_cpu(phys_domains, j); 6940 sd = &per_cpu(phys_domains, j);
@@ -7038,7 +6959,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
7038{ 6959{
7039 int cpu, i; 6960 int cpu, i;
7040 6961
7041 for_each_cpu_mask(cpu, *cpu_map) { 6962 for_each_cpu_mask_nr(cpu, *cpu_map) {
7042 struct sched_group **sched_group_nodes 6963 struct sched_group **sched_group_nodes
7043 = sched_group_nodes_bycpu[cpu]; 6964 = sched_group_nodes_bycpu[cpu];
7044 6965
@@ -7277,7 +7198,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7277 /* 7198 /*
7278 * Set up domains for cpus specified by the cpu_map. 7199 * Set up domains for cpus specified by the cpu_map.
7279 */ 7200 */
7280 for_each_cpu_mask(i, *cpu_map) { 7201 for_each_cpu_mask_nr(i, *cpu_map) {
7281 struct sched_domain *sd = NULL, *p; 7202 struct sched_domain *sd = NULL, *p;
7282 SCHED_CPUMASK_VAR(nodemask, allmasks); 7203 SCHED_CPUMASK_VAR(nodemask, allmasks);
7283 7204
@@ -7344,7 +7265,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7344 7265
7345#ifdef CONFIG_SCHED_SMT 7266#ifdef CONFIG_SCHED_SMT
7346 /* Set up CPU (sibling) groups */ 7267 /* Set up CPU (sibling) groups */
7347 for_each_cpu_mask(i, *cpu_map) { 7268 for_each_cpu_mask_nr(i, *cpu_map) {
7348 SCHED_CPUMASK_VAR(this_sibling_map, allmasks); 7269 SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
7349 SCHED_CPUMASK_VAR(send_covered, allmasks); 7270 SCHED_CPUMASK_VAR(send_covered, allmasks);
7350 7271
@@ -7361,7 +7282,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7361 7282
7362#ifdef CONFIG_SCHED_MC 7283#ifdef CONFIG_SCHED_MC
7363 /* Set up multi-core groups */ 7284 /* Set up multi-core groups */
7364 for_each_cpu_mask(i, *cpu_map) { 7285 for_each_cpu_mask_nr(i, *cpu_map) {
7365 SCHED_CPUMASK_VAR(this_core_map, allmasks); 7286 SCHED_CPUMASK_VAR(this_core_map, allmasks);
7366 SCHED_CPUMASK_VAR(send_covered, allmasks); 7287 SCHED_CPUMASK_VAR(send_covered, allmasks);
7367 7288
@@ -7428,7 +7349,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7428 goto error; 7349 goto error;
7429 } 7350 }
7430 sched_group_nodes[i] = sg; 7351 sched_group_nodes[i] = sg;
7431 for_each_cpu_mask(j, *nodemask) { 7352 for_each_cpu_mask_nr(j, *nodemask) {
7432 struct sched_domain *sd; 7353 struct sched_domain *sd;
7433 7354
7434 sd = &per_cpu(node_domains, j); 7355 sd = &per_cpu(node_domains, j);
@@ -7474,21 +7395,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7474 7395
7475 /* Calculate CPU power for physical packages and nodes */ 7396 /* Calculate CPU power for physical packages and nodes */
7476#ifdef CONFIG_SCHED_SMT 7397#ifdef CONFIG_SCHED_SMT
7477 for_each_cpu_mask(i, *cpu_map) { 7398 for_each_cpu_mask_nr(i, *cpu_map) {
7478 struct sched_domain *sd = &per_cpu(cpu_domains, i); 7399 struct sched_domain *sd = &per_cpu(cpu_domains, i);
7479 7400
7480 init_sched_groups_power(i, sd); 7401 init_sched_groups_power(i, sd);
7481 } 7402 }
7482#endif 7403#endif
7483#ifdef CONFIG_SCHED_MC 7404#ifdef CONFIG_SCHED_MC
7484 for_each_cpu_mask(i, *cpu_map) { 7405 for_each_cpu_mask_nr(i, *cpu_map) {
7485 struct sched_domain *sd = &per_cpu(core_domains, i); 7406 struct sched_domain *sd = &per_cpu(core_domains, i);
7486 7407
7487 init_sched_groups_power(i, sd); 7408 init_sched_groups_power(i, sd);
7488 } 7409 }
7489#endif 7410#endif
7490 7411
7491 for_each_cpu_mask(i, *cpu_map) { 7412 for_each_cpu_mask_nr(i, *cpu_map) {
7492 struct sched_domain *sd = &per_cpu(phys_domains, i); 7413 struct sched_domain *sd = &per_cpu(phys_domains, i);
7493 7414
7494 init_sched_groups_power(i, sd); 7415 init_sched_groups_power(i, sd);
@@ -7508,7 +7429,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7508#endif 7429#endif
7509 7430
7510 /* Attach the domains */ 7431 /* Attach the domains */
7511 for_each_cpu_mask(i, *cpu_map) { 7432 for_each_cpu_mask_nr(i, *cpu_map) {
7512 struct sched_domain *sd; 7433 struct sched_domain *sd;
7513#ifdef CONFIG_SCHED_SMT 7434#ifdef CONFIG_SCHED_SMT
7514 sd = &per_cpu(cpu_domains, i); 7435 sd = &per_cpu(cpu_domains, i);
@@ -7553,18 +7474,6 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
7553} 7474}
7554 7475
7555/* 7476/*
7556 * Free current domain masks.
7557 * Called after all cpus are attached to NULL domain.
7558 */
7559static void free_sched_domains(void)
7560{
7561 ndoms_cur = 0;
7562 if (doms_cur != &fallback_doms)
7563 kfree(doms_cur);
7564 doms_cur = &fallback_doms;
7565}
7566
7567/*
7568 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 7477 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7569 * For now this just excludes isolated cpus, but could be used to 7478 * For now this just excludes isolated cpus, but could be used to
7570 * exclude other special cases in the future. 7479 * exclude other special cases in the future.
@@ -7603,7 +7512,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
7603 7512
7604 unregister_sched_domain_sysctl(); 7513 unregister_sched_domain_sysctl();
7605 7514
7606 for_each_cpu_mask(i, *cpu_map) 7515 for_each_cpu_mask_nr(i, *cpu_map)
7607 cpu_attach_domain(NULL, &def_root_domain, i); 7516 cpu_attach_domain(NULL, &def_root_domain, i);
7608 synchronize_sched(); 7517 synchronize_sched();
7609 arch_destroy_sched_domains(cpu_map, &tmpmask); 7518 arch_destroy_sched_domains(cpu_map, &tmpmask);
@@ -7642,7 +7551,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7642 * ownership of it and will kfree it when done with it. If the caller 7551 * ownership of it and will kfree it when done with it. If the caller
7643 * failed the kmalloc call, then it can pass in doms_new == NULL, 7552 * failed the kmalloc call, then it can pass in doms_new == NULL,
7644 * and partition_sched_domains() will fallback to the single partition 7553 * and partition_sched_domains() will fallback to the single partition
7645 * 'fallback_doms'. 7554 * 'fallback_doms', it also forces the domains to be rebuilt.
7646 * 7555 *
7647 * Call with hotplug lock held 7556 * Call with hotplug lock held
7648 */ 7557 */
@@ -7656,12 +7565,8 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
7656 /* always unregister in case we don't destroy any domains */ 7565 /* always unregister in case we don't destroy any domains */
7657 unregister_sched_domain_sysctl(); 7566 unregister_sched_domain_sysctl();
7658 7567
7659 if (doms_new == NULL) { 7568 if (doms_new == NULL)
7660 ndoms_new = 1; 7569 ndoms_new = 0;
7661 doms_new = &fallback_doms;
7662 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
7663 dattr_new = NULL;
7664 }
7665 7570
7666 /* Destroy deleted domains */ 7571 /* Destroy deleted domains */
7667 for (i = 0; i < ndoms_cur; i++) { 7572 for (i = 0; i < ndoms_cur; i++) {
@@ -7676,6 +7581,14 @@ match1:
7676 ; 7581 ;
7677 } 7582 }
7678 7583
7584 if (doms_new == NULL) {
7585 ndoms_cur = 0;
7586 ndoms_new = 1;
7587 doms_new = &fallback_doms;
7588 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
7589 dattr_new = NULL;
7590 }
7591
7679 /* Build new domains */ 7592 /* Build new domains */
7680 for (i = 0; i < ndoms_new; i++) { 7593 for (i = 0; i < ndoms_new; i++) {
7681 for (j = 0; j < ndoms_cur; j++) { 7594 for (j = 0; j < ndoms_cur; j++) {
@@ -7706,17 +7619,10 @@ match2:
7706#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 7619#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
7707int arch_reinit_sched_domains(void) 7620int arch_reinit_sched_domains(void)
7708{ 7621{
7709 int err;
7710
7711 get_online_cpus(); 7622 get_online_cpus();
7712 mutex_lock(&sched_domains_mutex); 7623 rebuild_sched_domains();
7713 detach_destroy_domains(&cpu_online_map);
7714 free_sched_domains();
7715 err = arch_init_sched_domains(&cpu_online_map);
7716 mutex_unlock(&sched_domains_mutex);
7717 put_online_cpus(); 7624 put_online_cpus();
7718 7625 return 0;
7719 return err;
7720} 7626}
7721 7627
7722static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) 7628static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
@@ -7786,59 +7692,49 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
7786} 7692}
7787#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ 7693#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
7788 7694
7695#ifndef CONFIG_CPUSETS
7789/* 7696/*
7790 * Force a reinitialization of the sched domains hierarchy. The domains 7697 * Add online and remove offline CPUs from the scheduler domains.
7791 * and groups cannot be updated in place without racing with the balancing 7698 * When cpusets are enabled they take over this function.
7792 * code, so we temporarily attach all running cpus to the NULL domain
7793 * which will prevent rebalancing while the sched domains are recalculated.
7794 */ 7699 */
7795static int update_sched_domains(struct notifier_block *nfb, 7700static int update_sched_domains(struct notifier_block *nfb,
7796 unsigned long action, void *hcpu) 7701 unsigned long action, void *hcpu)
7797{ 7702{
7703 switch (action) {
7704 case CPU_ONLINE:
7705 case CPU_ONLINE_FROZEN:
7706 case CPU_DEAD:
7707 case CPU_DEAD_FROZEN:
7708 partition_sched_domains(0, NULL, NULL);
7709 return NOTIFY_OK;
7710
7711 default:
7712 return NOTIFY_DONE;
7713 }
7714}
7715#endif
7716
7717static int update_runtime(struct notifier_block *nfb,
7718 unsigned long action, void *hcpu)
7719{
7798 int cpu = (int)(long)hcpu; 7720 int cpu = (int)(long)hcpu;
7799 7721
7800 switch (action) { 7722 switch (action) {
7801 case CPU_DOWN_PREPARE: 7723 case CPU_DOWN_PREPARE:
7802 case CPU_DOWN_PREPARE_FROZEN: 7724 case CPU_DOWN_PREPARE_FROZEN:
7803 disable_runtime(cpu_rq(cpu)); 7725 disable_runtime(cpu_rq(cpu));
7804 /* fall-through */
7805 case CPU_UP_PREPARE:
7806 case CPU_UP_PREPARE_FROZEN:
7807 detach_destroy_domains(&cpu_online_map);
7808 free_sched_domains();
7809 return NOTIFY_OK; 7726 return NOTIFY_OK;
7810 7727
7811
7812 case CPU_DOWN_FAILED: 7728 case CPU_DOWN_FAILED:
7813 case CPU_DOWN_FAILED_FROZEN: 7729 case CPU_DOWN_FAILED_FROZEN:
7814 case CPU_ONLINE: 7730 case CPU_ONLINE:
7815 case CPU_ONLINE_FROZEN: 7731 case CPU_ONLINE_FROZEN:
7816 enable_runtime(cpu_rq(cpu)); 7732 enable_runtime(cpu_rq(cpu));
7817 /* fall-through */ 7733 return NOTIFY_OK;
7818 case CPU_UP_CANCELED: 7734
7819 case CPU_UP_CANCELED_FROZEN:
7820 case CPU_DEAD:
7821 case CPU_DEAD_FROZEN:
7822 /*
7823 * Fall through and re-initialise the domains.
7824 */
7825 break;
7826 default: 7735 default:
7827 return NOTIFY_DONE; 7736 return NOTIFY_DONE;
7828 } 7737 }
7829
7830#ifndef CONFIG_CPUSETS
7831 /*
7832 * Create default domain partitioning if cpusets are disabled.
7833 * Otherwise we let cpusets rebuild the domains based on the
7834 * current setup.
7835 */
7836
7837 /* The hotplug lock is already held by cpu_up/cpu_down */
7838 arch_init_sched_domains(&cpu_online_map);
7839#endif
7840
7841 return NOTIFY_OK;
7842} 7738}
7843 7739
7844void __init sched_init_smp(void) 7740void __init sched_init_smp(void)
@@ -7858,8 +7754,15 @@ void __init sched_init_smp(void)
7858 cpu_set(smp_processor_id(), non_isolated_cpus); 7754 cpu_set(smp_processor_id(), non_isolated_cpus);
7859 mutex_unlock(&sched_domains_mutex); 7755 mutex_unlock(&sched_domains_mutex);
7860 put_online_cpus(); 7756 put_online_cpus();
7757
7758#ifndef CONFIG_CPUSETS
7861 /* XXX: Theoretical race here - CPU may be hotplugged now */ 7759 /* XXX: Theoretical race here - CPU may be hotplugged now */
7862 hotcpu_notifier(update_sched_domains, 0); 7760 hotcpu_notifier(update_sched_domains, 0);
7761#endif
7762
7763 /* RT runtime code needs to handle some hotplug events */
7764 hotcpu_notifier(update_runtime, 0);
7765
7863 init_hrtick(); 7766 init_hrtick();
7864 7767
7865 /* Move init over to a non-isolated CPU */ 7768 /* Move init over to a non-isolated CPU */
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f2aa987027d6..cf2cd6ce4cb2 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -878,7 +878,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
878#ifdef CONFIG_SCHED_HRTICK 878#ifdef CONFIG_SCHED_HRTICK
879static void hrtick_start_fair(struct rq *rq, struct task_struct *p) 879static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
880{ 880{
881 int requeue = rq->curr == p;
882 struct sched_entity *se = &p->se; 881 struct sched_entity *se = &p->se;
883 struct cfs_rq *cfs_rq = cfs_rq_of(se); 882 struct cfs_rq *cfs_rq = cfs_rq_of(se);
884 883
@@ -899,10 +898,10 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
899 * Don't schedule slices shorter than 10000ns, that just 898 * Don't schedule slices shorter than 10000ns, that just
900 * doesn't make sense. Rely on vruntime for fairness. 899 * doesn't make sense. Rely on vruntime for fairness.
901 */ 900 */
902 if (!requeue) 901 if (rq->curr != p)
903 delta = max(10000LL, delta); 902 delta = max(10000LL, delta);
904 903
905 hrtick_start(rq, delta, requeue); 904 hrtick_start(rq, delta);
906 } 905 }
907} 906}
908#else /* !CONFIG_SCHED_HRTICK */ 907#else /* !CONFIG_SCHED_HRTICK */
@@ -1004,6 +1003,8 @@ static void yield_task_fair(struct rq *rq)
1004 * not idle and an idle cpu is available. The span of cpus to 1003 * not idle and an idle cpu is available. The span of cpus to
1005 * search starts with cpus closest then further out as needed, 1004 * search starts with cpus closest then further out as needed,
1006 * so we always favor a closer, idle cpu. 1005 * so we always favor a closer, idle cpu.
1006 * Domains may include CPUs that are not usable for migration,
1007 * hence we need to mask them out (cpu_active_map)
1007 * 1008 *
1008 * Returns the CPU we should wake onto. 1009 * Returns the CPU we should wake onto.
1009 */ 1010 */
@@ -1031,7 +1032,8 @@ static int wake_idle(int cpu, struct task_struct *p)
1031 || ((sd->flags & SD_WAKE_IDLE_FAR) 1032 || ((sd->flags & SD_WAKE_IDLE_FAR)
1032 && !task_hot(p, task_rq(p)->clock, sd))) { 1033 && !task_hot(p, task_rq(p)->clock, sd))) {
1033 cpus_and(tmp, sd->span, p->cpus_allowed); 1034 cpus_and(tmp, sd->span, p->cpus_allowed);
1034 for_each_cpu_mask(i, tmp) { 1035 cpus_and(tmp, tmp, cpu_active_map);
1036 for_each_cpu_mask_nr(i, tmp) {
1035 if (idle_cpu(i)) { 1037 if (idle_cpu(i)) {
1036 if (i != task_cpu(p)) { 1038 if (i != task_cpu(p)) {
1037 schedstat_inc(p, 1039 schedstat_inc(p,
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 47ceac9e8552..908c04f9dad0 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -240,7 +240,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
240 240
241 spin_lock(&rt_b->rt_runtime_lock); 241 spin_lock(&rt_b->rt_runtime_lock);
242 rt_period = ktime_to_ns(rt_b->rt_period); 242 rt_period = ktime_to_ns(rt_b->rt_period);
243 for_each_cpu_mask(i, rd->span) { 243 for_each_cpu_mask_nr(i, rd->span) {
244 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 244 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
245 s64 diff; 245 s64 diff;
246 246
@@ -253,7 +253,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
253 253
254 diff = iter->rt_runtime - iter->rt_time; 254 diff = iter->rt_runtime - iter->rt_time;
255 if (diff > 0) { 255 if (diff > 0) {
256 do_div(diff, weight); 256 diff = div_u64((u64)diff, weight);
257 if (rt_rq->rt_runtime + diff > rt_period) 257 if (rt_rq->rt_runtime + diff > rt_period)
258 diff = rt_period - rt_rq->rt_runtime; 258 diff = rt_period - rt_rq->rt_runtime;
259 iter->rt_runtime -= diff; 259 iter->rt_runtime -= diff;
@@ -505,7 +505,9 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
505 rt_rq->rt_nr_running++; 505 rt_rq->rt_nr_running++;
506#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 506#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
507 if (rt_se_prio(rt_se) < rt_rq->highest_prio) { 507 if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
508#ifdef CONFIG_SMP
508 struct rq *rq = rq_of_rt_rq(rt_rq); 509 struct rq *rq = rq_of_rt_rq(rt_rq);
510#endif
509 511
510 rt_rq->highest_prio = rt_se_prio(rt_se); 512 rt_rq->highest_prio = rt_se_prio(rt_se);
511#ifdef CONFIG_SMP 513#ifdef CONFIG_SMP
@@ -599,11 +601,7 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
599 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) 601 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
600 return; 602 return;
601 603
602 if (rt_se->nr_cpus_allowed == 1) 604 list_add_tail(&rt_se->run_list, queue);
603 list_add(&rt_se->run_list, queue);
604 else
605 list_add_tail(&rt_se->run_list, queue);
606
607 __set_bit(rt_se_prio(rt_se), array->bitmap); 605 __set_bit(rt_se_prio(rt_se), array->bitmap);
608 606
609 inc_rt_tasks(rt_se, rt_rq); 607 inc_rt_tasks(rt_se, rt_rq);
@@ -688,32 +686,34 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
688 * Put task to the end of the run list without the overhead of dequeue 686 * Put task to the end of the run list without the overhead of dequeue
689 * followed by enqueue. 687 * followed by enqueue.
690 */ 688 */
691static 689static void
692void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 690requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
693{ 691{
694 struct rt_prio_array *array = &rt_rq->active;
695
696 if (on_rt_rq(rt_se)) { 692 if (on_rt_rq(rt_se)) {
697 list_del_init(&rt_se->run_list); 693 struct rt_prio_array *array = &rt_rq->active;
698 list_add_tail(&rt_se->run_list, 694 struct list_head *queue = array->queue + rt_se_prio(rt_se);
699 array->queue + rt_se_prio(rt_se)); 695
696 if (head)
697 list_move(&rt_se->run_list, queue);
698 else
699 list_move_tail(&rt_se->run_list, queue);
700 } 700 }
701} 701}
702 702
703static void requeue_task_rt(struct rq *rq, struct task_struct *p) 703static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
704{ 704{
705 struct sched_rt_entity *rt_se = &p->rt; 705 struct sched_rt_entity *rt_se = &p->rt;
706 struct rt_rq *rt_rq; 706 struct rt_rq *rt_rq;
707 707
708 for_each_sched_rt_entity(rt_se) { 708 for_each_sched_rt_entity(rt_se) {
709 rt_rq = rt_rq_of_se(rt_se); 709 rt_rq = rt_rq_of_se(rt_se);
710 requeue_rt_entity(rt_rq, rt_se); 710 requeue_rt_entity(rt_rq, rt_se, head);
711 } 711 }
712} 712}
713 713
714static void yield_task_rt(struct rq *rq) 714static void yield_task_rt(struct rq *rq)
715{ 715{
716 requeue_task_rt(rq, rq->curr); 716 requeue_task_rt(rq, rq->curr, 0);
717} 717}
718 718
719#ifdef CONFIG_SMP 719#ifdef CONFIG_SMP
@@ -753,6 +753,30 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
753 */ 753 */
754 return task_cpu(p); 754 return task_cpu(p);
755} 755}
756
757static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
758{
759 cpumask_t mask;
760
761 if (rq->curr->rt.nr_cpus_allowed == 1)
762 return;
763
764 if (p->rt.nr_cpus_allowed != 1
765 && cpupri_find(&rq->rd->cpupri, p, &mask))
766 return;
767
768 if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
769 return;
770
771 /*
772 * There appears to be other cpus that can accept
773 * current and none to run 'p', so lets reschedule
774 * to try and push current away:
775 */
776 requeue_task_rt(rq, p, 1);
777 resched_task(rq->curr);
778}
779
756#endif /* CONFIG_SMP */ 780#endif /* CONFIG_SMP */
757 781
758/* 782/*
@@ -778,18 +802,8 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
778 * to move current somewhere else, making room for our non-migratable 802 * to move current somewhere else, making room for our non-migratable
779 * task. 803 * task.
780 */ 804 */
781 if((p->prio == rq->curr->prio) 805 if (p->prio == rq->curr->prio && !need_resched())
782 && p->rt.nr_cpus_allowed == 1 806 check_preempt_equal_prio(rq, p);
783 && rq->curr->rt.nr_cpus_allowed != 1) {
784 cpumask_t mask;
785
786 if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
787 /*
788 * There appears to be other cpus that can accept
789 * current, so lets reschedule to try and push it away
790 */
791 resched_task(rq->curr);
792 }
793#endif 807#endif
794} 808}
795 809
@@ -922,6 +936,13 @@ static int find_lowest_rq(struct task_struct *task)
922 return -1; /* No targets found */ 936 return -1; /* No targets found */
923 937
924 /* 938 /*
939 * Only consider CPUs that are usable for migration.
940 * I guess we might want to change cpupri_find() to ignore those
941 * in the first place.
942 */
943 cpus_and(*lowest_mask, *lowest_mask, cpu_active_map);
944
945 /*
925 * At this point we have built a mask of cpus representing the 946 * At this point we have built a mask of cpus representing the
926 * lowest priority tasks in the system. Now we want to elect 947 * lowest priority tasks in the system. Now we want to elect
927 * the best one based on our affinity and topology. 948 * the best one based on our affinity and topology.
@@ -1107,7 +1128,7 @@ static int pull_rt_task(struct rq *this_rq)
1107 1128
1108 next = pick_next_task_rt(this_rq); 1129 next = pick_next_task_rt(this_rq);
1109 1130
1110 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { 1131 for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
1111 if (this_cpu == cpu) 1132 if (this_cpu == cpu)
1112 continue; 1133 continue;
1113 1134
@@ -1415,7 +1436,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1415 * on the queue: 1436 * on the queue:
1416 */ 1437 */
1417 if (p->rt.run_list.prev != p->rt.run_list.next) { 1438 if (p->rt.run_list.prev != p->rt.run_list.next) {
1418 requeue_task_rt(rq, p); 1439 requeue_task_rt(rq, p, 0);
1419 set_tsk_need_resched(p); 1440 set_tsk_need_resched(p);
1420 } 1441 }
1421} 1442}
diff --git a/kernel/signal.c b/kernel/signal.c
index 6c0958e52ea7..82c3545596c5 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -338,13 +338,9 @@ unblock_all_signals(void)
338 spin_unlock_irqrestore(&current->sighand->siglock, flags); 338 spin_unlock_irqrestore(&current->sighand->siglock, flags);
339} 339}
340 340
341static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) 341static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
342{ 342{
343 struct sigqueue *q, *first = NULL; 343 struct sigqueue *q, *first = NULL;
344 int still_pending = 0;
345
346 if (unlikely(!sigismember(&list->signal, sig)))
347 return 0;
348 344
349 /* 345 /*
350 * Collect the siginfo appropriate to this signal. Check if 346 * Collect the siginfo appropriate to this signal. Check if
@@ -352,33 +348,30 @@ static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
352 */ 348 */
353 list_for_each_entry(q, &list->list, list) { 349 list_for_each_entry(q, &list->list, list) {
354 if (q->info.si_signo == sig) { 350 if (q->info.si_signo == sig) {
355 if (first) { 351 if (first)
356 still_pending = 1; 352 goto still_pending;
357 break;
358 }
359 first = q; 353 first = q;
360 } 354 }
361 } 355 }
356
357 sigdelset(&list->signal, sig);
358
362 if (first) { 359 if (first) {
360still_pending:
363 list_del_init(&first->list); 361 list_del_init(&first->list);
364 copy_siginfo(info, &first->info); 362 copy_siginfo(info, &first->info);
365 __sigqueue_free(first); 363 __sigqueue_free(first);
366 if (!still_pending)
367 sigdelset(&list->signal, sig);
368 } else { 364 } else {
369
370 /* Ok, it wasn't in the queue. This must be 365 /* Ok, it wasn't in the queue. This must be
371 a fast-pathed signal or we must have been 366 a fast-pathed signal or we must have been
372 out of queue space. So zero out the info. 367 out of queue space. So zero out the info.
373 */ 368 */
374 sigdelset(&list->signal, sig);
375 info->si_signo = sig; 369 info->si_signo = sig;
376 info->si_errno = 0; 370 info->si_errno = 0;
377 info->si_code = 0; 371 info->si_code = 0;
378 info->si_pid = 0; 372 info->si_pid = 0;
379 info->si_uid = 0; 373 info->si_uid = 0;
380 } 374 }
381 return 1;
382} 375}
383 376
384static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 377static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
@@ -396,8 +389,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
396 } 389 }
397 } 390 }
398 391
399 if (!collect_signal(sig, pending, info)) 392 collect_signal(sig, pending, info);
400 sig = 0;
401 } 393 }
402 394
403 return sig; 395 return sig;
@@ -462,8 +454,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
462 * is to alert stop-signal processing code when another 454 * is to alert stop-signal processing code when another
463 * processor has come along and cleared the flag. 455 * processor has come along and cleared the flag.
464 */ 456 */
465 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) 457 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
466 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
467 } 458 }
468 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { 459 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
469 /* 460 /*
@@ -1125,7 +1116,7 @@ EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1125 * is probably wrong. Should make it like BSD or SYSV. 1116 * is probably wrong. Should make it like BSD or SYSV.
1126 */ 1117 */
1127 1118
1128static int kill_something_info(int sig, struct siginfo *info, int pid) 1119static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1129{ 1120{
1130 int ret; 1121 int ret;
1131 1122
@@ -1237,17 +1228,6 @@ int kill_pid(struct pid *pid, int sig, int priv)
1237} 1228}
1238EXPORT_SYMBOL(kill_pid); 1229EXPORT_SYMBOL(kill_pid);
1239 1230
1240int
1241kill_proc(pid_t pid, int sig, int priv)
1242{
1243 int ret;
1244
1245 rcu_read_lock();
1246 ret = kill_pid_info(sig, __si_special(priv), find_pid(pid));
1247 rcu_read_unlock();
1248 return ret;
1249}
1250
1251/* 1231/*
1252 * These functions support sending signals using preallocated sigqueue 1232 * These functions support sending signals using preallocated sigqueue
1253 * structures. This is needed "because realtime applications cannot 1233 * structures. This is needed "because realtime applications cannot
@@ -1379,10 +1359,9 @@ void do_notify_parent(struct task_struct *tsk, int sig)
1379 1359
1380 info.si_uid = tsk->uid; 1360 info.si_uid = tsk->uid;
1381 1361
1382 /* FIXME: find out whether or not this is supposed to be c*time. */ 1362 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1383 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1384 tsk->signal->utime)); 1363 tsk->signal->utime));
1385 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime, 1364 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1386 tsk->signal->stime)); 1365 tsk->signal->stime));
1387 1366
1388 info.si_status = tsk->exit_code & 0x7f; 1367 info.si_status = tsk->exit_code & 0x7f;
@@ -1450,9 +1429,8 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1450 1429
1451 info.si_uid = tsk->uid; 1430 info.si_uid = tsk->uid;
1452 1431
1453 /* FIXME: find out whether or not this is supposed to be c*time. */ 1432 info.si_utime = cputime_to_clock_t(tsk->utime);
1454 info.si_utime = cputime_to_jiffies(tsk->utime); 1433 info.si_stime = cputime_to_clock_t(tsk->stime);
1455 info.si_stime = cputime_to_jiffies(tsk->stime);
1456 1434
1457 info.si_code = why; 1435 info.si_code = why;
1458 switch (why) { 1436 switch (why) {
@@ -1491,10 +1469,10 @@ static inline int may_ptrace_stop(void)
1491 * is a deadlock situation, and pointless because our tracer 1469 * is a deadlock situation, and pointless because our tracer
1492 * is dead so don't allow us to stop. 1470 * is dead so don't allow us to stop.
1493 * If SIGKILL was already sent before the caller unlocked 1471 * If SIGKILL was already sent before the caller unlocked
1494 * ->siglock we must see ->core_waiters != 0. Otherwise it 1472 * ->siglock we must see ->core_state != NULL. Otherwise it
1495 * is safe to enter schedule(). 1473 * is safe to enter schedule().
1496 */ 1474 */
1497 if (unlikely(current->mm->core_waiters) && 1475 if (unlikely(current->mm->core_state) &&
1498 unlikely(current->mm == current->parent->mm)) 1476 unlikely(current->mm == current->parent->mm))
1499 return 0; 1477 return 0;
1500 1478
@@ -1507,9 +1485,8 @@ static inline int may_ptrace_stop(void)
1507 */ 1485 */
1508static int sigkill_pending(struct task_struct *tsk) 1486static int sigkill_pending(struct task_struct *tsk)
1509{ 1487{
1510 return ((sigismember(&tsk->pending.signal, SIGKILL) || 1488 return sigismember(&tsk->pending.signal, SIGKILL) ||
1511 sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) && 1489 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1512 !unlikely(sigismember(&tsk->blocked, SIGKILL)));
1513} 1490}
1514 1491
1515/* 1492/*
@@ -1525,8 +1502,6 @@ static int sigkill_pending(struct task_struct *tsk)
1525 */ 1502 */
1526static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) 1503static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1527{ 1504{
1528 int killed = 0;
1529
1530 if (arch_ptrace_stop_needed(exit_code, info)) { 1505 if (arch_ptrace_stop_needed(exit_code, info)) {
1531 /* 1506 /*
1532 * The arch code has something special to do before a 1507 * The arch code has something special to do before a
@@ -1542,7 +1517,8 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1542 spin_unlock_irq(&current->sighand->siglock); 1517 spin_unlock_irq(&current->sighand->siglock);
1543 arch_ptrace_stop(exit_code, info); 1518 arch_ptrace_stop(exit_code, info);
1544 spin_lock_irq(&current->sighand->siglock); 1519 spin_lock_irq(&current->sighand->siglock);
1545 killed = sigkill_pending(current); 1520 if (sigkill_pending(current))
1521 return;
1546 } 1522 }
1547 1523
1548 /* 1524 /*
@@ -1559,7 +1535,7 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1559 __set_current_state(TASK_TRACED); 1535 __set_current_state(TASK_TRACED);
1560 spin_unlock_irq(&current->sighand->siglock); 1536 spin_unlock_irq(&current->sighand->siglock);
1561 read_lock(&tasklist_lock); 1537 read_lock(&tasklist_lock);
1562 if (!unlikely(killed) && may_ptrace_stop()) { 1538 if (may_ptrace_stop()) {
1563 do_notify_parent_cldstop(current, CLD_TRAPPED); 1539 do_notify_parent_cldstop(current, CLD_TRAPPED);
1564 read_unlock(&tasklist_lock); 1540 read_unlock(&tasklist_lock);
1565 schedule(); 1541 schedule();
@@ -1658,8 +1634,7 @@ static int do_signal_stop(int signr)
1658 } else { 1634 } else {
1659 struct task_struct *t; 1635 struct task_struct *t;
1660 1636
1661 if (unlikely((sig->flags & (SIGNAL_STOP_DEQUEUED | SIGNAL_UNKILLABLE)) 1637 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1662 != SIGNAL_STOP_DEQUEUED) ||
1663 unlikely(signal_group_exit(sig))) 1638 unlikely(signal_group_exit(sig)))
1664 return 0; 1639 return 0;
1665 /* 1640 /*
@@ -1920,7 +1895,6 @@ EXPORT_SYMBOL(recalc_sigpending);
1920EXPORT_SYMBOL_GPL(dequeue_signal); 1895EXPORT_SYMBOL_GPL(dequeue_signal);
1921EXPORT_SYMBOL(flush_signals); 1896EXPORT_SYMBOL(flush_signals);
1922EXPORT_SYMBOL(force_sig); 1897EXPORT_SYMBOL(force_sig);
1923EXPORT_SYMBOL(kill_proc);
1924EXPORT_SYMBOL(ptrace_notify); 1898EXPORT_SYMBOL(ptrace_notify);
1925EXPORT_SYMBOL(send_sig); 1899EXPORT_SYMBOL(send_sig);
1926EXPORT_SYMBOL(send_sig_info); 1900EXPORT_SYMBOL(send_sig_info);
@@ -2196,7 +2170,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
2196} 2170}
2197 2171
2198asmlinkage long 2172asmlinkage long
2199sys_kill(int pid, int sig) 2173sys_kill(pid_t pid, int sig)
2200{ 2174{
2201 struct siginfo info; 2175 struct siginfo info;
2202 2176
@@ -2209,7 +2183,7 @@ sys_kill(int pid, int sig)
2209 return kill_something_info(sig, &info, pid); 2183 return kill_something_info(sig, &info, pid);
2210} 2184}
2211 2185
2212static int do_tkill(int tgid, int pid, int sig) 2186static int do_tkill(pid_t tgid, pid_t pid, int sig)
2213{ 2187{
2214 int error; 2188 int error;
2215 struct siginfo info; 2189 struct siginfo info;
@@ -2255,7 +2229,7 @@ static int do_tkill(int tgid, int pid, int sig)
2255 * exists but it's not belonging to the target process anymore. This 2229 * exists but it's not belonging to the target process anymore. This
2256 * method solves the problem of threads exiting and PIDs getting reused. 2230 * method solves the problem of threads exiting and PIDs getting reused.
2257 */ 2231 */
2258asmlinkage long sys_tgkill(int tgid, int pid, int sig) 2232asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
2259{ 2233{
2260 /* This is only valid for single tasks */ 2234 /* This is only valid for single tasks */
2261 if (pid <= 0 || tgid <= 0) 2235 if (pid <= 0 || tgid <= 0)
@@ -2268,7 +2242,7 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2268 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2242 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2269 */ 2243 */
2270asmlinkage long 2244asmlinkage long
2271sys_tkill(int pid, int sig) 2245sys_tkill(pid_t pid, int sig)
2272{ 2246{
2273 /* This is only valid for single tasks */ 2247 /* This is only valid for single tasks */
2274 if (pid <= 0) 2248 if (pid <= 0)
@@ -2278,7 +2252,7 @@ sys_tkill(int pid, int sig)
2278} 2252}
2279 2253
2280asmlinkage long 2254asmlinkage long
2281sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) 2255sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
2282{ 2256{
2283 siginfo_t info; 2257 siginfo_t info;
2284 2258
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 81e2fe0f983a..f6b03d56c2bf 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -286,7 +286,7 @@ void irq_exit(void)
286#ifdef CONFIG_NO_HZ 286#ifdef CONFIG_NO_HZ
287 /* Make sure that timer wheel updates are propagated */ 287 /* Make sure that timer wheel updates are propagated */
288 if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched()) 288 if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
289 tick_nohz_stop_sched_tick(); 289 tick_nohz_stop_sched_tick(0);
290 rcu_irq_exit(); 290 rcu_irq_exit();
291#endif 291#endif
292 preempt_enable_no_resched(); 292 preempt_enable_no_resched();
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index a272d78185eb..7bd8d1aadd5d 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -13,6 +13,7 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/freezer.h> 14#include <linux/freezer.h>
15#include <linux/kthread.h> 15#include <linux/kthread.h>
16#include <linux/lockdep.h>
16#include <linux/notifier.h> 17#include <linux/notifier.h>
17#include <linux/module.h> 18#include <linux/module.h>
18 19
@@ -25,7 +26,22 @@ static DEFINE_PER_CPU(unsigned long, print_timestamp);
25static DEFINE_PER_CPU(struct task_struct *, watchdog_task); 26static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
26 27
27static int __read_mostly did_panic; 28static int __read_mostly did_panic;
28unsigned long __read_mostly softlockup_thresh = 60; 29int __read_mostly softlockup_thresh = 60;
30
31/*
32 * Should we panic (and reboot, if panic_timeout= is set) when a
33 * soft-lockup occurs:
34 */
35unsigned int __read_mostly softlockup_panic =
36 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
37
38static int __init softlockup_panic_setup(char *str)
39{
40 softlockup_panic = simple_strtoul(str, NULL, 0);
41
42 return 1;
43}
44__setup("softlockup_panic=", softlockup_panic_setup);
29 45
30static int 46static int
31softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) 47softlock_panic(struct notifier_block *this, unsigned long event, void *ptr)
@@ -84,6 +100,14 @@ void softlockup_tick(void)
84 struct pt_regs *regs = get_irq_regs(); 100 struct pt_regs *regs = get_irq_regs();
85 unsigned long now; 101 unsigned long now;
86 102
103 /* Is detection switched off? */
104 if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) {
105 /* Be sure we don't false trigger if switched back on */
106 if (touch_timestamp)
107 per_cpu(touch_timestamp, this_cpu) = 0;
108 return;
109 }
110
87 if (touch_timestamp == 0) { 111 if (touch_timestamp == 0) {
88 __touch_softlockup_watchdog(); 112 __touch_softlockup_watchdog();
89 return; 113 return;
@@ -92,11 +116,8 @@ void softlockup_tick(void)
92 print_timestamp = per_cpu(print_timestamp, this_cpu); 116 print_timestamp = per_cpu(print_timestamp, this_cpu);
93 117
94 /* report at most once a second */ 118 /* report at most once a second */
95 if ((print_timestamp >= touch_timestamp && 119 if (print_timestamp == touch_timestamp || did_panic)
96 print_timestamp < (touch_timestamp + 1)) ||
97 did_panic || !per_cpu(watchdog_task, this_cpu)) {
98 return; 120 return;
99 }
100 121
101 /* do not print during early bootup: */ 122 /* do not print during early bootup: */
102 if (unlikely(system_state != SYSTEM_RUNNING)) { 123 if (unlikely(system_state != SYSTEM_RUNNING)) {
@@ -106,8 +127,11 @@ void softlockup_tick(void)
106 127
107 now = get_timestamp(this_cpu); 128 now = get_timestamp(this_cpu);
108 129
109 /* Wake up the high-prio watchdog task every second: */ 130 /*
110 if (now > (touch_timestamp + 1)) 131 * Wake up the high-prio watchdog task twice per
132 * threshold timespan.
133 */
134 if (now > touch_timestamp + softlockup_thresh/2)
111 wake_up_process(per_cpu(watchdog_task, this_cpu)); 135 wake_up_process(per_cpu(watchdog_task, this_cpu));
112 136
113 /* Warn about unreasonable delays: */ 137 /* Warn about unreasonable delays: */
@@ -121,11 +145,15 @@ void softlockup_tick(void)
121 this_cpu, now - touch_timestamp, 145 this_cpu, now - touch_timestamp,
122 current->comm, task_pid_nr(current)); 146 current->comm, task_pid_nr(current));
123 print_modules(); 147 print_modules();
148 print_irqtrace_events(current);
124 if (regs) 149 if (regs)
125 show_regs(regs); 150 show_regs(regs);
126 else 151 else
127 dump_stack(); 152 dump_stack();
128 spin_unlock(&print_lock); 153 spin_unlock(&print_lock);
154
155 if (softlockup_panic)
156 panic("softlockup: hung tasks");
129} 157}
130 158
131/* 159/*
@@ -178,6 +206,9 @@ static void check_hung_task(struct task_struct *t, unsigned long now)
178 206
179 t->last_switch_timestamp = now; 207 t->last_switch_timestamp = now;
180 touch_nmi_watchdog(); 208 touch_nmi_watchdog();
209
210 if (softlockup_panic)
211 panic("softlockup: blocked tasks");
181} 212}
182 213
183/* 214/*
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index ba9b2054ecbd..738b411ff2d3 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -33,8 +33,9 @@ static int stopmachine(void *cpu)
33{ 33{
34 int irqs_disabled = 0; 34 int irqs_disabled = 0;
35 int prepared = 0; 35 int prepared = 0;
36 cpumask_of_cpu_ptr(cpumask, (int)(long)cpu);
36 37
37 set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu)); 38 set_cpus_allowed_ptr(current, cpumask);
38 39
39 /* Ack: we are alive */ 40 /* Ack: we are alive */
40 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ 41 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
diff --git a/kernel/sys.c b/kernel/sys.c
index 14e97282eb6c..0c9d3fa1f5ff 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1343,8 +1343,6 @@ EXPORT_SYMBOL(in_egroup_p);
1343 1343
1344DECLARE_RWSEM(uts_sem); 1344DECLARE_RWSEM(uts_sem);
1345 1345
1346EXPORT_SYMBOL(uts_sem);
1347
1348asmlinkage long sys_newuname(struct new_utsname __user * name) 1346asmlinkage long sys_newuname(struct new_utsname __user * name)
1349{ 1347{
1350 int errno = 0; 1348 int errno = 0;
@@ -1795,7 +1793,7 @@ int orderly_poweroff(bool force)
1795 goto out; 1793 goto out;
1796 } 1794 }
1797 1795
1798 info = call_usermodehelper_setup(argv[0], argv, envp); 1796 info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC);
1799 if (info == NULL) { 1797 if (info == NULL) {
1800 argv_free(argv); 1798 argv_free(argv);
1801 goto out; 1799 goto out;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 0fea0ee12da9..08d6e1bb99ac 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -31,6 +31,7 @@ cond_syscall(sys_socketpair);
31cond_syscall(sys_bind); 31cond_syscall(sys_bind);
32cond_syscall(sys_listen); 32cond_syscall(sys_listen);
33cond_syscall(sys_accept); 33cond_syscall(sys_accept);
34cond_syscall(sys_paccept);
34cond_syscall(sys_connect); 35cond_syscall(sys_connect);
35cond_syscall(sys_getsockname); 36cond_syscall(sys_getsockname);
36cond_syscall(sys_getpeername); 37cond_syscall(sys_getpeername);
@@ -56,6 +57,7 @@ cond_syscall(compat_sys_set_robust_list);
56cond_syscall(sys_get_robust_list); 57cond_syscall(sys_get_robust_list);
57cond_syscall(compat_sys_get_robust_list); 58cond_syscall(compat_sys_get_robust_list);
58cond_syscall(sys_epoll_create); 59cond_syscall(sys_epoll_create);
60cond_syscall(sys_epoll_create1);
59cond_syscall(sys_epoll_ctl); 61cond_syscall(sys_epoll_ctl);
60cond_syscall(sys_epoll_wait); 62cond_syscall(sys_epoll_wait);
61cond_syscall(sys_epoll_pwait); 63cond_syscall(sys_epoll_pwait);
@@ -95,6 +97,7 @@ cond_syscall(sys_keyctl);
95cond_syscall(compat_sys_keyctl); 97cond_syscall(compat_sys_keyctl);
96cond_syscall(compat_sys_socketcall); 98cond_syscall(compat_sys_socketcall);
97cond_syscall(sys_inotify_init); 99cond_syscall(sys_inotify_init);
100cond_syscall(sys_inotify_init1);
98cond_syscall(sys_inotify_add_watch); 101cond_syscall(sys_inotify_add_watch);
99cond_syscall(sys_inotify_rm_watch); 102cond_syscall(sys_inotify_rm_watch);
100cond_syscall(sys_migrate_pages); 103cond_syscall(sys_migrate_pages);
@@ -155,10 +158,13 @@ cond_syscall(sys_ioprio_get);
155 158
156/* New file descriptors */ 159/* New file descriptors */
157cond_syscall(sys_signalfd); 160cond_syscall(sys_signalfd);
161cond_syscall(sys_signalfd4);
158cond_syscall(compat_sys_signalfd); 162cond_syscall(compat_sys_signalfd);
163cond_syscall(compat_sys_signalfd4);
159cond_syscall(sys_timerfd_create); 164cond_syscall(sys_timerfd_create);
160cond_syscall(sys_timerfd_settime); 165cond_syscall(sys_timerfd_settime);
161cond_syscall(sys_timerfd_gettime); 166cond_syscall(sys_timerfd_gettime);
162cond_syscall(compat_sys_timerfd_settime); 167cond_syscall(compat_sys_timerfd_settime);
163cond_syscall(compat_sys_timerfd_gettime); 168cond_syscall(compat_sys_timerfd_gettime);
164cond_syscall(sys_eventfd); 169cond_syscall(sys_eventfd);
170cond_syscall(sys_eventfd2);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b859e6b5a767..35a50db9b6ce 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -43,6 +43,7 @@
43#include <linux/limits.h> 43#include <linux/limits.h>
44#include <linux/dcache.h> 44#include <linux/dcache.h>
45#include <linux/syscalls.h> 45#include <linux/syscalls.h>
46#include <linux/vmstat.h>
46#include <linux/nfs_fs.h> 47#include <linux/nfs_fs.h>
47#include <linux/acpi.h> 48#include <linux/acpi.h>
48#include <linux/reboot.h> 49#include <linux/reboot.h>
@@ -80,7 +81,6 @@ extern int sysctl_drop_caches;
80extern int percpu_pagelist_fraction; 81extern int percpu_pagelist_fraction;
81extern int compat_log; 82extern int compat_log;
82extern int maps_protect; 83extern int maps_protect;
83extern int sysctl_stat_interval;
84extern int latencytop_enabled; 84extern int latencytop_enabled;
85extern int sysctl_nr_open_min, sysctl_nr_open_max; 85extern int sysctl_nr_open_min, sysctl_nr_open_max;
86#ifdef CONFIG_RCU_TORTURE_TEST 86#ifdef CONFIG_RCU_TORTURE_TEST
@@ -88,12 +88,13 @@ extern int rcutorture_runnable;
88#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ 88#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
89 89
90/* Constants used for minimum and maximum */ 90/* Constants used for minimum and maximum */
91#if defined(CONFIG_DETECT_SOFTLOCKUP) || defined(CONFIG_HIGHMEM) 91#if defined(CONFIG_HIGHMEM) || defined(CONFIG_DETECT_SOFTLOCKUP)
92static int one = 1; 92static int one = 1;
93#endif 93#endif
94 94
95#ifdef CONFIG_DETECT_SOFTLOCKUP 95#ifdef CONFIG_DETECT_SOFTLOCKUP
96static int sixty = 60; 96static int sixty = 60;
97static int neg_one = -1;
97#endif 98#endif
98 99
99#ifdef CONFIG_MMU 100#ifdef CONFIG_MMU
@@ -623,7 +624,7 @@ static struct ctl_table kern_table[] = {
623 { 624 {
624 .ctl_name = KERN_PRINTK_RATELIMIT, 625 .ctl_name = KERN_PRINTK_RATELIMIT,
625 .procname = "printk_ratelimit", 626 .procname = "printk_ratelimit",
626 .data = &printk_ratelimit_jiffies, 627 .data = &printk_ratelimit_state.interval,
627 .maxlen = sizeof(int), 628 .maxlen = sizeof(int),
628 .mode = 0644, 629 .mode = 0644,
629 .proc_handler = &proc_dointvec_jiffies, 630 .proc_handler = &proc_dointvec_jiffies,
@@ -632,7 +633,7 @@ static struct ctl_table kern_table[] = {
632 { 633 {
633 .ctl_name = KERN_PRINTK_RATELIMIT_BURST, 634 .ctl_name = KERN_PRINTK_RATELIMIT_BURST,
634 .procname = "printk_ratelimit_burst", 635 .procname = "printk_ratelimit_burst",
635 .data = &printk_ratelimit_burst, 636 .data = &printk_ratelimit_state.burst,
636 .maxlen = sizeof(int), 637 .maxlen = sizeof(int),
637 .mode = 0644, 638 .mode = 0644,
638 .proc_handler = &proc_dointvec, 639 .proc_handler = &proc_dointvec,
@@ -739,13 +740,24 @@ static struct ctl_table kern_table[] = {
739#ifdef CONFIG_DETECT_SOFTLOCKUP 740#ifdef CONFIG_DETECT_SOFTLOCKUP
740 { 741 {
741 .ctl_name = CTL_UNNUMBERED, 742 .ctl_name = CTL_UNNUMBERED,
743 .procname = "softlockup_panic",
744 .data = &softlockup_panic,
745 .maxlen = sizeof(int),
746 .mode = 0644,
747 .proc_handler = &proc_dointvec_minmax,
748 .strategy = &sysctl_intvec,
749 .extra1 = &zero,
750 .extra2 = &one,
751 },
752 {
753 .ctl_name = CTL_UNNUMBERED,
742 .procname = "softlockup_thresh", 754 .procname = "softlockup_thresh",
743 .data = &softlockup_thresh, 755 .data = &softlockup_thresh,
744 .maxlen = sizeof(unsigned long), 756 .maxlen = sizeof(int),
745 .mode = 0644, 757 .mode = 0644,
746 .proc_handler = &proc_doulongvec_minmax, 758 .proc_handler = &proc_dointvec_minmax,
747 .strategy = &sysctl_intvec, 759 .strategy = &sysctl_intvec,
748 .extra1 = &one, 760 .extra1 = &neg_one,
749 .extra2 = &sixty, 761 .extra2 = &sixty,
750 }, 762 },
751 { 763 {
@@ -947,7 +959,7 @@ static struct ctl_table vm_table[] = {
947#ifdef CONFIG_HUGETLB_PAGE 959#ifdef CONFIG_HUGETLB_PAGE
948 { 960 {
949 .procname = "nr_hugepages", 961 .procname = "nr_hugepages",
950 .data = &max_huge_pages, 962 .data = NULL,
951 .maxlen = sizeof(unsigned long), 963 .maxlen = sizeof(unsigned long),
952 .mode = 0644, 964 .mode = 0644,
953 .proc_handler = &hugetlb_sysctl_handler, 965 .proc_handler = &hugetlb_sysctl_handler,
@@ -973,10 +985,12 @@ static struct ctl_table vm_table[] = {
973 { 985 {
974 .ctl_name = CTL_UNNUMBERED, 986 .ctl_name = CTL_UNNUMBERED,
975 .procname = "nr_overcommit_hugepages", 987 .procname = "nr_overcommit_hugepages",
976 .data = &sysctl_overcommit_huge_pages, 988 .data = NULL,
977 .maxlen = sizeof(sysctl_overcommit_huge_pages), 989 .maxlen = sizeof(unsigned long),
978 .mode = 0644, 990 .mode = 0644,
979 .proc_handler = &hugetlb_overcommit_handler, 991 .proc_handler = &hugetlb_overcommit_handler,
992 .extra1 = (void *)&hugetlb_zero,
993 .extra2 = (void *)&hugetlb_infinity,
980 }, 994 },
981#endif 995#endif
982 { 996 {
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index c09350d564f2..c35da23ab8fb 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -1532,6 +1532,8 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
1532 sysctl_check_leaf(namespaces, table, &fail); 1532 sysctl_check_leaf(namespaces, table, &fail);
1533 } 1533 }
1534 sysctl_check_bin_path(table, &fail); 1534 sysctl_check_bin_path(table, &fail);
1535 if (table->mode > 0777)
1536 set_fail(&fail, table, "bogus .mode");
1535 if (fail) { 1537 if (fail) {
1536 set_fail(&fail, table, NULL); 1538 set_fail(&fail, table, NULL);
1537 error = -EINVAL; 1539 error = -EINVAL;
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 4a23517169a6..bd6be76303cf 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -35,7 +35,7 @@
35 */ 35 */
36#define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS) 36#define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
37 37
38static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 }; 38static DEFINE_PER_CPU(__u32, taskstats_seqnum);
39static int family_registered; 39static int family_registered;
40struct kmem_cache *taskstats_cache; 40struct kmem_cache *taskstats_cache;
41 41
@@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
301 return -EINVAL; 301 return -EINVAL;
302 302
303 if (isadd == REGISTER) { 303 if (isadd == REGISTER) {
304 for_each_cpu_mask(cpu, mask) { 304 for_each_cpu_mask_nr(cpu, mask) {
305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, 305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
306 cpu_to_node(cpu)); 306 cpu_to_node(cpu));
307 if (!s) 307 if (!s)
@@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
320 320
321 /* Deregister or cleanup */ 321 /* Deregister or cleanup */
322cleanup: 322cleanup:
323 for_each_cpu_mask(cpu, mask) { 323 for_each_cpu_mask_nr(cpu, mask) {
324 listeners = &per_cpu(listener_array, cpu); 324 listeners = &per_cpu(listener_array, cpu);
325 down_write(&listeners->sem); 325 down_write(&listeners->sem);
326 list_for_each_entry_safe(s, tmp, &listeners->list, list) { 326 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index b1c2da81b050..093d4acf993b 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -145,9 +145,9 @@ static void clocksource_watchdog(unsigned long data)
145 * Cycle through CPUs to check if the CPUs stay 145 * Cycle through CPUs to check if the CPUs stay
146 * synchronized to each other. 146 * synchronized to each other.
147 */ 147 */
148 int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map); 148 int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map);
149 149
150 if (next_cpu >= NR_CPUS) 150 if (next_cpu >= nr_cpu_ids)
151 next_cpu = first_cpu(cpu_online_map); 151 next_cpu = first_cpu(cpu_online_map);
152 watchdog_timer.expires += WATCHDOG_INTERVAL; 152 watchdog_timer.expires += WATCHDOG_INTERVAL;
153 add_timer_on(&watchdog_timer, next_cpu); 153 add_timer_on(&watchdog_timer, next_cpu);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f48d0f09d32f..31463d370b94 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -399,8 +399,7 @@ again:
399 mask = CPU_MASK_NONE; 399 mask = CPU_MASK_NONE;
400 now = ktime_get(); 400 now = ktime_get();
401 /* Find all expired events */ 401 /* Find all expired events */
402 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; 402 for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) {
403 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
404 td = &per_cpu(tick_cpu_device, cpu); 403 td = &per_cpu(tick_cpu_device, cpu);
405 if (td->evtdev->next_event.tv64 <= now.tv64) 404 if (td->evtdev->next_event.tv64 <= now.tv64)
406 cpu_set(cpu, mask); 405 cpu_set(cpu, mask);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 4f3886562b8c..bf43284d6855 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -135,7 +135,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
135 */ 135 */
136static void tick_setup_device(struct tick_device *td, 136static void tick_setup_device(struct tick_device *td,
137 struct clock_event_device *newdev, int cpu, 137 struct clock_event_device *newdev, int cpu,
138 cpumask_t cpumask) 138 const cpumask_t *cpumask)
139{ 139{
140 ktime_t next_event; 140 ktime_t next_event;
141 void (*handler)(struct clock_event_device *) = NULL; 141 void (*handler)(struct clock_event_device *) = NULL;
@@ -169,8 +169,8 @@ static void tick_setup_device(struct tick_device *td,
169 * When the device is not per cpu, pin the interrupt to the 169 * When the device is not per cpu, pin the interrupt to the
170 * current cpu: 170 * current cpu:
171 */ 171 */
172 if (!cpus_equal(newdev->cpumask, cpumask)) 172 if (!cpus_equal(newdev->cpumask, *cpumask))
173 irq_set_affinity(newdev->irq, cpumask); 173 irq_set_affinity(newdev->irq, *cpumask);
174 174
175 /* 175 /*
176 * When global broadcasting is active, check if the current 176 * When global broadcasting is active, check if the current
@@ -196,20 +196,20 @@ static int tick_check_new_device(struct clock_event_device *newdev)
196 struct tick_device *td; 196 struct tick_device *td;
197 int cpu, ret = NOTIFY_OK; 197 int cpu, ret = NOTIFY_OK;
198 unsigned long flags; 198 unsigned long flags;
199 cpumask_t cpumask; 199 cpumask_of_cpu_ptr_declare(cpumask);
200 200
201 spin_lock_irqsave(&tick_device_lock, flags); 201 spin_lock_irqsave(&tick_device_lock, flags);
202 202
203 cpu = smp_processor_id(); 203 cpu = smp_processor_id();
204 cpumask_of_cpu_ptr_next(cpumask, cpu);
204 if (!cpu_isset(cpu, newdev->cpumask)) 205 if (!cpu_isset(cpu, newdev->cpumask))
205 goto out_bc; 206 goto out_bc;
206 207
207 td = &per_cpu(tick_cpu_device, cpu); 208 td = &per_cpu(tick_cpu_device, cpu);
208 curdev = td->evtdev; 209 curdev = td->evtdev;
209 cpumask = cpumask_of_cpu(cpu);
210 210
211 /* cpu local device ? */ 211 /* cpu local device ? */
212 if (!cpus_equal(newdev->cpumask, cpumask)) { 212 if (!cpus_equal(newdev->cpumask, *cpumask)) {
213 213
214 /* 214 /*
215 * If the cpu affinity of the device interrupt can not 215 * If the cpu affinity of the device interrupt can not
@@ -222,7 +222,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
222 * If we have a cpu local device already, do not replace it 222 * If we have a cpu local device already, do not replace it
223 * by a non cpu local device 223 * by a non cpu local device
224 */ 224 */
225 if (curdev && cpus_equal(curdev->cpumask, cpumask)) 225 if (curdev && cpus_equal(curdev->cpumask, *cpumask))
226 goto out_bc; 226 goto out_bc;
227 } 227 }
228 228
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index beef7ccdf842..825b4c00fe44 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -140,8 +140,6 @@ void tick_nohz_update_jiffies(void)
140 if (!ts->tick_stopped) 140 if (!ts->tick_stopped)
141 return; 141 return;
142 142
143 touch_softlockup_watchdog();
144
145 cpu_clear(cpu, nohz_cpu_mask); 143 cpu_clear(cpu, nohz_cpu_mask);
146 now = ktime_get(); 144 now = ktime_get();
147 ts->idle_waketime = now; 145 ts->idle_waketime = now;
@@ -149,6 +147,8 @@ void tick_nohz_update_jiffies(void)
149 local_irq_save(flags); 147 local_irq_save(flags);
150 tick_do_update_jiffies64(now); 148 tick_do_update_jiffies64(now);
151 local_irq_restore(flags); 149 local_irq_restore(flags);
150
151 touch_softlockup_watchdog();
152} 152}
153 153
154void tick_nohz_stop_idle(int cpu) 154void tick_nohz_stop_idle(int cpu)
@@ -195,7 +195,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
195 * Called either from the idle loop or from irq_exit() when an idle period was 195 * Called either from the idle loop or from irq_exit() when an idle period was
196 * just interrupted by an interrupt which did not cause a reschedule. 196 * just interrupted by an interrupt which did not cause a reschedule.
197 */ 197 */
198void tick_nohz_stop_sched_tick(void) 198void tick_nohz_stop_sched_tick(int inidle)
199{ 199{
200 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; 200 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
201 struct tick_sched *ts; 201 struct tick_sched *ts;
@@ -224,6 +224,11 @@ void tick_nohz_stop_sched_tick(void)
224 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 224 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
225 goto end; 225 goto end;
226 226
227 if (!inidle && !ts->inidle)
228 goto end;
229
230 ts->inidle = 1;
231
227 if (need_resched()) 232 if (need_resched())
228 goto end; 233 goto end;
229 234
@@ -373,11 +378,14 @@ void tick_nohz_restart_sched_tick(void)
373 local_irq_disable(); 378 local_irq_disable();
374 tick_nohz_stop_idle(cpu); 379 tick_nohz_stop_idle(cpu);
375 380
376 if (!ts->tick_stopped) { 381 if (!ts->inidle || !ts->tick_stopped) {
382 ts->inidle = 0;
377 local_irq_enable(); 383 local_irq_enable();
378 return; 384 return;
379 } 385 }
380 386
387 ts->inidle = 0;
388
381 rcu_exit_nohz(); 389 rcu_exit_nohz();
382 390
383 /* Update jiffies first */ 391 /* Update jiffies first */
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 2301e1e7c606..ce2d723c10e1 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -161,7 +161,7 @@ static void timer_notify(struct pt_regs *regs, int cpu)
161 __trace_special(tr, data, 2, regs->ip, 0); 161 __trace_special(tr, data, 2, regs->ip, 0);
162 162
163 while (i < sample_max_depth) { 163 while (i < sample_max_depth) {
164 frame.next_fp = 0; 164 frame.next_fp = NULL;
165 frame.return_address = 0; 165 frame.return_address = 0;
166 if (!copy_stack_frame(fp, &frame)) 166 if (!copy_stack_frame(fp, &frame))
167 break; 167 break;
@@ -213,7 +213,9 @@ static void start_stack_timers(void)
213 int cpu; 213 int cpu;
214 214
215 for_each_online_cpu(cpu) { 215 for_each_online_cpu(cpu) {
216 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 216 cpumask_of_cpu_ptr(new_mask, cpu);
217
218 set_cpus_allowed_ptr(current, new_mask);
217 start_stack_timer(cpu); 219 start_stack_timer(cpu);
218 } 220 }
219 set_cpus_allowed_ptr(current, &saved_mask); 221 set_cpus_allowed_ptr(current, &saved_mask);
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 4ab1b584961b..3da47ccdc5e5 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -28,14 +28,14 @@
28void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) 28void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
29{ 29{
30 struct timespec uptime, ts; 30 struct timespec uptime, ts;
31 s64 ac_etime; 31 u64 ac_etime;
32 32
33 BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); 33 BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN);
34 34
35 /* calculate task elapsed time in timespec */ 35 /* calculate task elapsed time in timespec */
36 do_posix_clock_monotonic_gettime(&uptime); 36 do_posix_clock_monotonic_gettime(&uptime);
37 ts = timespec_sub(uptime, tsk->start_time); 37 ts = timespec_sub(uptime, tsk->start_time);
38 /* rebase elapsed time to usec */ 38 /* rebase elapsed time to usec (should never be negative) */
39 ac_etime = timespec_to_ns(&ts); 39 ac_etime = timespec_to_ns(&ts);
40 do_div(ac_etime, NSEC_PER_USEC); 40 do_div(ac_etime, NSEC_PER_USEC);
41 stats->ac_etime = ac_etime; 41 stats->ac_etime = ac_etime;
@@ -84,9 +84,9 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
84{ 84{
85 struct mm_struct *mm; 85 struct mm_struct *mm;
86 86
87 /* convert pages-jiffies to Mbyte-usec */ 87 /* convert pages-usec to Mbyte-usec */
88 stats->coremem = jiffies_to_usecs(p->acct_rss_mem1) * PAGE_SIZE / MB; 88 stats->coremem = p->acct_rss_mem1 * PAGE_SIZE / MB;
89 stats->virtmem = jiffies_to_usecs(p->acct_vm_mem1) * PAGE_SIZE / MB; 89 stats->virtmem = p->acct_vm_mem1 * PAGE_SIZE / MB;
90 mm = get_task_mm(p); 90 mm = get_task_mm(p);
91 if (mm) { 91 if (mm) {
92 /* adjust to KB unit */ 92 /* adjust to KB unit */
@@ -118,12 +118,19 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
118void acct_update_integrals(struct task_struct *tsk) 118void acct_update_integrals(struct task_struct *tsk)
119{ 119{
120 if (likely(tsk->mm)) { 120 if (likely(tsk->mm)) {
121 long delta = cputime_to_jiffies( 121 cputime_t time, dtime;
122 cputime_sub(tsk->stime, tsk->acct_stimexpd)); 122 struct timeval value;
123 u64 delta;
124
125 time = tsk->stime + tsk->utime;
126 dtime = cputime_sub(time, tsk->acct_timexpd);
127 jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
128 delta = value.tv_sec;
129 delta = delta * USEC_PER_SEC + value.tv_usec;
123 130
124 if (delta == 0) 131 if (delta == 0)
125 return; 132 return;
126 tsk->acct_stimexpd = tsk->stime; 133 tsk->acct_timexpd = time;
127 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm); 134 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm);
128 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; 135 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm;
129 } 136 }
@@ -135,7 +142,7 @@ void acct_update_integrals(struct task_struct *tsk)
135 */ 142 */
136void acct_clear_integrals(struct task_struct *tsk) 143void acct_clear_integrals(struct task_struct *tsk)
137{ 144{
138 tsk->acct_stimexpd = 0; 145 tsk->acct_timexpd = 0;
139 tsk->acct_rss_mem1 = 0; 146 tsk->acct_rss_mem1 = 0;
140 tsk->acct_vm_mem1 = 0; 147 tsk->acct_vm_mem1 = 0;
141} 148}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ce7799540c91..ec7e4f62aaff 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -125,7 +125,7 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
125} 125}
126 126
127static void insert_work(struct cpu_workqueue_struct *cwq, 127static void insert_work(struct cpu_workqueue_struct *cwq,
128 struct work_struct *work, int tail) 128 struct work_struct *work, struct list_head *head)
129{ 129{
130 set_wq_data(work, cwq); 130 set_wq_data(work, cwq);
131 /* 131 /*
@@ -133,21 +133,17 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
133 * result of list_add() below, see try_to_grab_pending(). 133 * result of list_add() below, see try_to_grab_pending().
134 */ 134 */
135 smp_wmb(); 135 smp_wmb();
136 if (tail) 136 list_add_tail(&work->entry, head);
137 list_add_tail(&work->entry, &cwq->worklist);
138 else
139 list_add(&work->entry, &cwq->worklist);
140 wake_up(&cwq->more_work); 137 wake_up(&cwq->more_work);
141} 138}
142 139
143/* Preempt must be disabled. */
144static void __queue_work(struct cpu_workqueue_struct *cwq, 140static void __queue_work(struct cpu_workqueue_struct *cwq,
145 struct work_struct *work) 141 struct work_struct *work)
146{ 142{
147 unsigned long flags; 143 unsigned long flags;
148 144
149 spin_lock_irqsave(&cwq->lock, flags); 145 spin_lock_irqsave(&cwq->lock, flags);
150 insert_work(cwq, work, 1); 146 insert_work(cwq, work, &cwq->worklist);
151 spin_unlock_irqrestore(&cwq->lock, flags); 147 spin_unlock_irqrestore(&cwq->lock, flags);
152} 148}
153 149
@@ -163,17 +159,39 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
163 */ 159 */
164int queue_work(struct workqueue_struct *wq, struct work_struct *work) 160int queue_work(struct workqueue_struct *wq, struct work_struct *work)
165{ 161{
162 int ret;
163
164 ret = queue_work_on(get_cpu(), wq, work);
165 put_cpu();
166
167 return ret;
168}
169EXPORT_SYMBOL_GPL(queue_work);
170
171/**
172 * queue_work_on - queue work on specific cpu
173 * @cpu: CPU number to execute work on
174 * @wq: workqueue to use
175 * @work: work to queue
176 *
177 * Returns 0 if @work was already on a queue, non-zero otherwise.
178 *
179 * We queue the work to a specific CPU, the caller must ensure it
180 * can't go away.
181 */
182int
183queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
184{
166 int ret = 0; 185 int ret = 0;
167 186
168 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 187 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
169 BUG_ON(!list_empty(&work->entry)); 188 BUG_ON(!list_empty(&work->entry));
170 __queue_work(wq_per_cpu(wq, get_cpu()), work); 189 __queue_work(wq_per_cpu(wq, cpu), work);
171 put_cpu();
172 ret = 1; 190 ret = 1;
173 } 191 }
174 return ret; 192 return ret;
175} 193}
176EXPORT_SYMBOL_GPL(queue_work); 194EXPORT_SYMBOL_GPL(queue_work_on);
177 195
178static void delayed_work_timer_fn(unsigned long __data) 196static void delayed_work_timer_fn(unsigned long __data)
179{ 197{
@@ -337,14 +355,14 @@ static void wq_barrier_func(struct work_struct *work)
337} 355}
338 356
339static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 357static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
340 struct wq_barrier *barr, int tail) 358 struct wq_barrier *barr, struct list_head *head)
341{ 359{
342 INIT_WORK(&barr->work, wq_barrier_func); 360 INIT_WORK(&barr->work, wq_barrier_func);
343 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); 361 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
344 362
345 init_completion(&barr->done); 363 init_completion(&barr->done);
346 364
347 insert_work(cwq, &barr->work, tail); 365 insert_work(cwq, &barr->work, head);
348} 366}
349 367
350static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 368static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
@@ -364,7 +382,7 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
364 active = 0; 382 active = 0;
365 spin_lock_irq(&cwq->lock); 383 spin_lock_irq(&cwq->lock);
366 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { 384 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
367 insert_wq_barrier(cwq, &barr, 1); 385 insert_wq_barrier(cwq, &barr, &cwq->worklist);
368 active = 1; 386 active = 1;
369 } 387 }
370 spin_unlock_irq(&cwq->lock); 388 spin_unlock_irq(&cwq->lock);
@@ -397,11 +415,62 @@ void flush_workqueue(struct workqueue_struct *wq)
397 might_sleep(); 415 might_sleep();
398 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 416 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
399 lock_release(&wq->lockdep_map, 1, _THIS_IP_); 417 lock_release(&wq->lockdep_map, 1, _THIS_IP_);
400 for_each_cpu_mask(cpu, *cpu_map) 418 for_each_cpu_mask_nr(cpu, *cpu_map)
401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 419 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
402} 420}
403EXPORT_SYMBOL_GPL(flush_workqueue); 421EXPORT_SYMBOL_GPL(flush_workqueue);
404 422
423/**
424 * flush_work - block until a work_struct's callback has terminated
425 * @work: the work which is to be flushed
426 *
427 * Returns false if @work has already terminated.
428 *
429 * It is expected that, prior to calling flush_work(), the caller has
430 * arranged for the work to not be requeued, otherwise it doesn't make
431 * sense to use this function.
432 */
433int flush_work(struct work_struct *work)
434{
435 struct cpu_workqueue_struct *cwq;
436 struct list_head *prev;
437 struct wq_barrier barr;
438
439 might_sleep();
440 cwq = get_wq_data(work);
441 if (!cwq)
442 return 0;
443
444 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
445 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
446
447 prev = NULL;
448 spin_lock_irq(&cwq->lock);
449 if (!list_empty(&work->entry)) {
450 /*
451 * See the comment near try_to_grab_pending()->smp_rmb().
452 * If it was re-queued under us we are not going to wait.
453 */
454 smp_rmb();
455 if (unlikely(cwq != get_wq_data(work)))
456 goto out;
457 prev = &work->entry;
458 } else {
459 if (cwq->current_work != work)
460 goto out;
461 prev = &cwq->worklist;
462 }
463 insert_wq_barrier(cwq, &barr, prev->next);
464out:
465 spin_unlock_irq(&cwq->lock);
466 if (!prev)
467 return 0;
468
469 wait_for_completion(&barr.done);
470 return 1;
471}
472EXPORT_SYMBOL_GPL(flush_work);
473
405/* 474/*
406 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 475 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
407 * so this work can't be re-armed in any way. 476 * so this work can't be re-armed in any way.
@@ -449,7 +518,7 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
449 518
450 spin_lock_irq(&cwq->lock); 519 spin_lock_irq(&cwq->lock);
451 if (unlikely(cwq->current_work == work)) { 520 if (unlikely(cwq->current_work == work)) {
452 insert_wq_barrier(cwq, &barr, 0); 521 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
453 running = 1; 522 running = 1;
454 } 523 }
455 spin_unlock_irq(&cwq->lock); 524 spin_unlock_irq(&cwq->lock);
@@ -477,7 +546,7 @@ static void wait_on_work(struct work_struct *work)
477 wq = cwq->wq; 546 wq = cwq->wq;
478 cpu_map = wq_cpu_map(wq); 547 cpu_map = wq_cpu_map(wq);
479 548
480 for_each_cpu_mask(cpu, *cpu_map) 549 for_each_cpu_mask_nr(cpu, *cpu_map)
481 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 550 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
482} 551}
483 552
@@ -553,6 +622,19 @@ int schedule_work(struct work_struct *work)
553} 622}
554EXPORT_SYMBOL(schedule_work); 623EXPORT_SYMBOL(schedule_work);
555 624
625/*
626 * schedule_work_on - put work task on a specific cpu
627 * @cpu: cpu to put the work task on
628 * @work: job to be done
629 *
630 * This puts a job on a specific cpu
631 */
632int schedule_work_on(int cpu, struct work_struct *work)
633{
634 return queue_work_on(cpu, keventd_wq, work);
635}
636EXPORT_SYMBOL(schedule_work_on);
637
556/** 638/**
557 * schedule_delayed_work - put work task in global workqueue after delay 639 * schedule_delayed_work - put work task in global workqueue after delay
558 * @dwork: job to be done 640 * @dwork: job to be done
@@ -607,10 +689,10 @@ int schedule_on_each_cpu(work_func_t func)
607 struct work_struct *work = per_cpu_ptr(works, cpu); 689 struct work_struct *work = per_cpu_ptr(works, cpu);
608 690
609 INIT_WORK(work, func); 691 INIT_WORK(work, func);
610 set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); 692 schedule_work_on(cpu, work);
611 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
612 } 693 }
613 flush_workqueue(keventd_wq); 694 for_each_online_cpu(cpu)
695 flush_work(per_cpu_ptr(works, cpu));
614 put_online_cpus(); 696 put_online_cpus();
615 free_percpu(works); 697 free_percpu(works);
616 return 0; 698 return 0;
@@ -747,7 +829,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
747 err = create_workqueue_thread(cwq, singlethread_cpu); 829 err = create_workqueue_thread(cwq, singlethread_cpu);
748 start_workqueue_thread(cwq, -1); 830 start_workqueue_thread(cwq, -1);
749 } else { 831 } else {
750 get_online_cpus(); 832 cpu_maps_update_begin();
751 spin_lock(&workqueue_lock); 833 spin_lock(&workqueue_lock);
752 list_add(&wq->list, &workqueues); 834 list_add(&wq->list, &workqueues);
753 spin_unlock(&workqueue_lock); 835 spin_unlock(&workqueue_lock);
@@ -759,7 +841,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
759 err = create_workqueue_thread(cwq, cpu); 841 err = create_workqueue_thread(cwq, cpu);
760 start_workqueue_thread(cwq, cpu); 842 start_workqueue_thread(cwq, cpu);
761 } 843 }
762 put_online_cpus(); 844 cpu_maps_update_done();
763 } 845 }
764 846
765 if (err) { 847 if (err) {
@@ -773,8 +855,8 @@ EXPORT_SYMBOL_GPL(__create_workqueue_key);
773static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) 855static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
774{ 856{
775 /* 857 /*
776 * Our caller is either destroy_workqueue() or CPU_DEAD, 858 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
777 * get_online_cpus() protects cwq->thread. 859 * cpu_add_remove_lock protects cwq->thread.
778 */ 860 */
779 if (cwq->thread == NULL) 861 if (cwq->thread == NULL)
780 return; 862 return;
@@ -784,7 +866,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
784 866
785 flush_cpu_workqueue(cwq); 867 flush_cpu_workqueue(cwq);
786 /* 868 /*
787 * If the caller is CPU_DEAD and cwq->worklist was not empty, 869 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
788 * a concurrent flush_workqueue() can insert a barrier after us. 870 * a concurrent flush_workqueue() can insert a barrier after us.
789 * However, in that case run_workqueue() won't return and check 871 * However, in that case run_workqueue() won't return and check
790 * kthread_should_stop() until it flushes all work_struct's. 872 * kthread_should_stop() until it flushes all work_struct's.
@@ -808,14 +890,14 @@ void destroy_workqueue(struct workqueue_struct *wq)
808 const cpumask_t *cpu_map = wq_cpu_map(wq); 890 const cpumask_t *cpu_map = wq_cpu_map(wq);
809 int cpu; 891 int cpu;
810 892
811 get_online_cpus(); 893 cpu_maps_update_begin();
812 spin_lock(&workqueue_lock); 894 spin_lock(&workqueue_lock);
813 list_del(&wq->list); 895 list_del(&wq->list);
814 spin_unlock(&workqueue_lock); 896 spin_unlock(&workqueue_lock);
815 897
816 for_each_cpu_mask(cpu, *cpu_map) 898 for_each_cpu_mask_nr(cpu, *cpu_map)
817 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 899 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
818 put_online_cpus(); 900 cpu_maps_update_done();
819 901
820 free_percpu(wq->cpu_wq); 902 free_percpu(wq->cpu_wq);
821 kfree(wq); 903 kfree(wq);
@@ -829,6 +911,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
829 unsigned int cpu = (unsigned long)hcpu; 911 unsigned int cpu = (unsigned long)hcpu;
830 struct cpu_workqueue_struct *cwq; 912 struct cpu_workqueue_struct *cwq;
831 struct workqueue_struct *wq; 913 struct workqueue_struct *wq;
914 int ret = NOTIFY_OK;
832 915
833 action &= ~CPU_TASKS_FROZEN; 916 action &= ~CPU_TASKS_FROZEN;
834 917
@@ -836,7 +919,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
836 case CPU_UP_PREPARE: 919 case CPU_UP_PREPARE:
837 cpu_set(cpu, cpu_populated_map); 920 cpu_set(cpu, cpu_populated_map);
838 } 921 }
839 922undo:
840 list_for_each_entry(wq, &workqueues, list) { 923 list_for_each_entry(wq, &workqueues, list) {
841 cwq = per_cpu_ptr(wq->cpu_wq, cpu); 924 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
842 925
@@ -846,7 +929,9 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
846 break; 929 break;
847 printk(KERN_ERR "workqueue [%s] for %i failed\n", 930 printk(KERN_ERR "workqueue [%s] for %i failed\n",
848 wq->name, cpu); 931 wq->name, cpu);
849 return NOTIFY_BAD; 932 action = CPU_UP_CANCELED;
933 ret = NOTIFY_BAD;
934 goto undo;
850 935
851 case CPU_ONLINE: 936 case CPU_ONLINE:
852 start_workqueue_thread(cwq, cpu); 937 start_workqueue_thread(cwq, cpu);
@@ -854,7 +939,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
854 939
855 case CPU_UP_CANCELED: 940 case CPU_UP_CANCELED:
856 start_workqueue_thread(cwq, -1); 941 start_workqueue_thread(cwq, -1);
857 case CPU_DEAD: 942 case CPU_POST_DEAD:
858 cleanup_workqueue_thread(cwq); 943 cleanup_workqueue_thread(cwq);
859 break; 944 break;
860 } 945 }
@@ -862,11 +947,11 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
862 947
863 switch (action) { 948 switch (action) {
864 case CPU_UP_CANCELED: 949 case CPU_UP_CANCELED:
865 case CPU_DEAD: 950 case CPU_POST_DEAD:
866 cpu_clear(cpu, cpu_populated_map); 951 cpu_clear(cpu, cpu_populated_map);
867 } 952 }
868 953
869 return NOTIFY_OK; 954 return ret;
870} 955}
871 956
872void __init init_workqueues(void) 957void __init init_workqueues(void)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ba106db5a65b..e1d4764435ed 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -150,7 +150,7 @@ config DETECT_SOFTLOCKUP
150 help 150 help
151 Say Y here to enable the kernel to detect "soft lockups", 151 Say Y here to enable the kernel to detect "soft lockups",
152 which are bugs that cause the kernel to loop in kernel 152 which are bugs that cause the kernel to loop in kernel
153 mode for more than 10 seconds, without giving other tasks a 153 mode for more than 60 seconds, without giving other tasks a
154 chance to run. 154 chance to run.
155 155
156 When a soft-lockup is detected, the kernel will print the 156 When a soft-lockup is detected, the kernel will print the
@@ -162,6 +162,30 @@ config DETECT_SOFTLOCKUP
162 can be detected via the NMI-watchdog, on platforms that 162 can be detected via the NMI-watchdog, on platforms that
163 support it.) 163 support it.)
164 164
165config BOOTPARAM_SOFTLOCKUP_PANIC
166 bool "Panic (Reboot) On Soft Lockups"
167 depends on DETECT_SOFTLOCKUP
168 help
169 Say Y here to enable the kernel to panic on "soft lockups",
170 which are bugs that cause the kernel to loop in kernel
171 mode for more than 60 seconds, without giving other tasks a
172 chance to run.
173
174 The panic can be used in combination with panic_timeout,
175 to cause the system to reboot automatically after a
176 lockup has been detected. This feature is useful for
177 high-availability systems that have uptime guarantees and
178 where a lockup must be resolved ASAP.
179
180 Say N if unsure.
181
182config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
183 int
184 depends on DETECT_SOFTLOCKUP
185 range 0 1
186 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
187 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
188
165config SCHED_DEBUG 189config SCHED_DEBUG
166 bool "Collect scheduler debugging info" 190 bool "Collect scheduler debugging info"
167 depends on DEBUG_KERNEL && PROC_FS 191 depends on DEBUG_KERNEL && PROC_FS
@@ -481,6 +505,18 @@ config DEBUG_WRITECOUNT
481 505
482 If unsure, say N. 506 If unsure, say N.
483 507
508config DEBUG_MEMORY_INIT
509 bool "Debug memory initialisation" if EMBEDDED
510 default !EMBEDDED
511 help
512 Enable this for additional checks during memory initialisation.
513 The sanity checks verify aspects of the VM such as the memory model
514 and other information provided by the architecture. Verbose
515 information will be printed at KERN_DEBUG loglevel depending
516 on the mminit_loglevel= command-line option.
517
518 If unsure, say Y
519
484config DEBUG_LIST 520config DEBUG_LIST
485 bool "Debug linked list manipulation" 521 bool "Debug linked list manipulation"
486 depends on DEBUG_KERNEL 522 depends on DEBUG_KERNEL
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index a5d4b1dac2a5..2cfd2721f7ed 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -1,7 +1,4 @@
1 1
2config HAVE_ARCH_KGDB_SHADOW_INFO
3 bool
4
5config HAVE_ARCH_KGDB 2config HAVE_ARCH_KGDB
6 bool 3 bool
7 4
diff --git a/lib/Makefile b/lib/Makefile
index 818c4d455518..9085ad6fa53d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -18,7 +18,7 @@ lib-$(CONFIG_SMP) += cpumask.o
18 18
19lib-y += kobject.o kref.o klist.o 19lib-y += kobject.o kref.o klist.o
20 20
21obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 21obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
22 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o 22 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o
23 23
24ifeq ($(CONFIG_DEBUG_KOBJECT),y) 24ifeq ($(CONFIG_DEBUG_KOBJECT),y)
diff --git a/lib/bcd.c b/lib/bcd.c
new file mode 100644
index 000000000000..d74257fd0fe7
--- /dev/null
+++ b/lib/bcd.c
@@ -0,0 +1,14 @@
1#include <linux/bcd.h>
2#include <linux/module.h>
3
4unsigned bcd2bin(unsigned char val)
5{
6 return (val & 0x0f) + (val >> 4) * 10;
7}
8EXPORT_SYMBOL(bcd2bin);
9
10unsigned char bin2bcd(unsigned val)
11{
12 return ((val / 10) << 4) + val % 10;
13}
14EXPORT_SYMBOL(bin2bcd);
diff --git a/lib/cmdline.c b/lib/cmdline.c
index f596c08d213a..5ba8a942a478 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -116,7 +116,7 @@ char *get_options(const char *str, int nints, int *ints)
116/** 116/**
117 * memparse - parse a string with mem suffixes into a number 117 * memparse - parse a string with mem suffixes into a number
118 * @ptr: Where parse begins 118 * @ptr: Where parse begins
119 * @retptr: (output) Pointer to next char after parse completes 119 * @retptr: (output) Optional pointer to next char after parse completes
120 * 120 *
121 * Parses a string into a number. The number stored at @ptr is 121 * Parses a string into a number. The number stored at @ptr is
122 * potentially suffixed with %K (for kilobytes, or 1024 bytes), 122 * potentially suffixed with %K (for kilobytes, or 1024 bytes),
@@ -126,11 +126,13 @@ char *get_options(const char *str, int nints, int *ints)
126 * megabyte, or one gigabyte, respectively. 126 * megabyte, or one gigabyte, respectively.
127 */ 127 */
128 128
129unsigned long long memparse (char *ptr, char **retptr) 129unsigned long long memparse(char *ptr, char **retptr)
130{ 130{
131 unsigned long long ret = simple_strtoull (ptr, retptr, 0); 131 char *endptr; /* local pointer to end of parsed string */
132 132
133 switch (**retptr) { 133 unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
134
135 switch (*endptr) {
134 case 'G': 136 case 'G':
135 case 'g': 137 case 'g':
136 ret <<= 10; 138 ret <<= 10;
@@ -140,10 +142,14 @@ unsigned long long memparse (char *ptr, char **retptr)
140 case 'K': 142 case 'K':
141 case 'k': 143 case 'k':
142 ret <<= 10; 144 ret <<= 10;
143 (*retptr)++; 145 endptr++;
144 default: 146 default:
145 break; 147 break;
146 } 148 }
149
150 if (retptr)
151 *retptr = endptr;
152
147 return ret; 153 return ret;
148} 154}
149 155
diff --git a/lib/cpumask.c b/lib/cpumask.c
index bb4f76d3c3e7..5f97dc25ef9c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp)
15} 15}
16EXPORT_SYMBOL(__next_cpu); 16EXPORT_SYMBOL(__next_cpu);
17 17
18#if NR_CPUS > 64
19int __next_cpu_nr(int n, const cpumask_t *srcp)
20{
21 return min_t(int, nr_cpu_ids,
22 find_next_bit(srcp->bits, nr_cpu_ids, n+1));
23}
24EXPORT_SYMBOL(__next_cpu_nr);
25#endif
26
18int __any_online_cpu(const cpumask_t *mask) 27int __any_online_cpu(const cpumask_t *mask)
19{ 28{
20 int cpu; 29 int cpu;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 85b18d79be89..f86196390cfd 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -226,15 +226,13 @@ debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
226 226
227static void debug_object_is_on_stack(void *addr, int onstack) 227static void debug_object_is_on_stack(void *addr, int onstack)
228{ 228{
229 void *stack = current->stack;
230 int is_on_stack; 229 int is_on_stack;
231 static int limit; 230 static int limit;
232 231
233 if (limit > 4) 232 if (limit > 4)
234 return; 233 return;
235 234
236 is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); 235 is_on_stack = object_is_on_stack(addr);
237
238 if (is_on_stack == onstack) 236 if (is_on_stack == onstack)
239 return; 237 return;
240 238
diff --git a/lib/idr.c b/lib/idr.c
index 7a02e173f027..3476f8203e97 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -6,6 +6,8 @@
6 * Modified by George Anzinger to reuse immediately and to use 6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks. 7 * find bit instructions. Also removed _irq on spinlocks.
8 * 8 *
9 * Modified by Nadia Derbey to make it RCU safe.
10 *
9 * Small id to pointer translation service. 11 * Small id to pointer translation service.
10 * 12 *
11 * It uses a radix tree like structure as a sparse array indexed 13 * It uses a radix tree like structure as a sparse array indexed
@@ -35,7 +37,7 @@
35 37
36static struct kmem_cache *idr_layer_cache; 38static struct kmem_cache *idr_layer_cache;
37 39
38static struct idr_layer *alloc_layer(struct idr *idp) 40static struct idr_layer *get_from_free_list(struct idr *idp)
39{ 41{
40 struct idr_layer *p; 42 struct idr_layer *p;
41 unsigned long flags; 43 unsigned long flags;
@@ -50,15 +52,28 @@ static struct idr_layer *alloc_layer(struct idr *idp)
50 return(p); 52 return(p);
51} 53}
52 54
55static void idr_layer_rcu_free(struct rcu_head *head)
56{
57 struct idr_layer *layer;
58
59 layer = container_of(head, struct idr_layer, rcu_head);
60 kmem_cache_free(idr_layer_cache, layer);
61}
62
63static inline void free_layer(struct idr_layer *p)
64{
65 call_rcu(&p->rcu_head, idr_layer_rcu_free);
66}
67
53/* only called when idp->lock is held */ 68/* only called when idp->lock is held */
54static void __free_layer(struct idr *idp, struct idr_layer *p) 69static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
55{ 70{
56 p->ary[0] = idp->id_free; 71 p->ary[0] = idp->id_free;
57 idp->id_free = p; 72 idp->id_free = p;
58 idp->id_free_cnt++; 73 idp->id_free_cnt++;
59} 74}
60 75
61static void free_layer(struct idr *idp, struct idr_layer *p) 76static void move_to_free_list(struct idr *idp, struct idr_layer *p)
62{ 77{
63 unsigned long flags; 78 unsigned long flags;
64 79
@@ -66,7 +81,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p)
66 * Depends on the return element being zeroed. 81 * Depends on the return element being zeroed.
67 */ 82 */
68 spin_lock_irqsave(&idp->lock, flags); 83 spin_lock_irqsave(&idp->lock, flags);
69 __free_layer(idp, p); 84 __move_to_free_list(idp, p);
70 spin_unlock_irqrestore(&idp->lock, flags); 85 spin_unlock_irqrestore(&idp->lock, flags);
71} 86}
72 87
@@ -96,7 +111,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
96 * @gfp_mask: memory allocation flags 111 * @gfp_mask: memory allocation flags
97 * 112 *
98 * This function should be called prior to locking and calling the 113 * This function should be called prior to locking and calling the
99 * following function. It preallocates enough memory to satisfy 114 * idr_get_new* functions. It preallocates enough memory to satisfy
100 * the worst possible allocation. 115 * the worst possible allocation.
101 * 116 *
102 * If the system is REALLY out of memory this function returns 0, 117 * If the system is REALLY out of memory this function returns 0,
@@ -109,7 +124,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
109 new = kmem_cache_alloc(idr_layer_cache, gfp_mask); 124 new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
110 if (new == NULL) 125 if (new == NULL)
111 return (0); 126 return (0);
112 free_layer(idp, new); 127 move_to_free_list(idp, new);
113 } 128 }
114 return 1; 129 return 1;
115} 130}
@@ -143,7 +158,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
143 /* if already at the top layer, we need to grow */ 158 /* if already at the top layer, we need to grow */
144 if (!(p = pa[l])) { 159 if (!(p = pa[l])) {
145 *starting_id = id; 160 *starting_id = id;
146 return -2; 161 return IDR_NEED_TO_GROW;
147 } 162 }
148 163
149 /* If we need to go up one layer, continue the 164 /* If we need to go up one layer, continue the
@@ -160,16 +175,17 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
160 id = ((id >> sh) ^ n ^ m) << sh; 175 id = ((id >> sh) ^ n ^ m) << sh;
161 } 176 }
162 if ((id >= MAX_ID_BIT) || (id < 0)) 177 if ((id >= MAX_ID_BIT) || (id < 0))
163 return -3; 178 return IDR_NOMORE_SPACE;
164 if (l == 0) 179 if (l == 0)
165 break; 180 break;
166 /* 181 /*
167 * Create the layer below if it is missing. 182 * Create the layer below if it is missing.
168 */ 183 */
169 if (!p->ary[m]) { 184 if (!p->ary[m]) {
170 if (!(new = alloc_layer(idp))) 185 new = get_from_free_list(idp);
186 if (!new)
171 return -1; 187 return -1;
172 p->ary[m] = new; 188 rcu_assign_pointer(p->ary[m], new);
173 p->count++; 189 p->count++;
174 } 190 }
175 pa[l--] = p; 191 pa[l--] = p;
@@ -192,7 +208,7 @@ build_up:
192 p = idp->top; 208 p = idp->top;
193 layers = idp->layers; 209 layers = idp->layers;
194 if (unlikely(!p)) { 210 if (unlikely(!p)) {
195 if (!(p = alloc_layer(idp))) 211 if (!(p = get_from_free_list(idp)))
196 return -1; 212 return -1;
197 layers = 1; 213 layers = 1;
198 } 214 }
@@ -204,7 +220,7 @@ build_up:
204 layers++; 220 layers++;
205 if (!p->count) 221 if (!p->count)
206 continue; 222 continue;
207 if (!(new = alloc_layer(idp))) { 223 if (!(new = get_from_free_list(idp))) {
208 /* 224 /*
209 * The allocation failed. If we built part of 225 * The allocation failed. If we built part of
210 * the structure tear it down. 226 * the structure tear it down.
@@ -214,7 +230,7 @@ build_up:
214 p = p->ary[0]; 230 p = p->ary[0];
215 new->ary[0] = NULL; 231 new->ary[0] = NULL;
216 new->bitmap = new->count = 0; 232 new->bitmap = new->count = 0;
217 __free_layer(idp, new); 233 __move_to_free_list(idp, new);
218 } 234 }
219 spin_unlock_irqrestore(&idp->lock, flags); 235 spin_unlock_irqrestore(&idp->lock, flags);
220 return -1; 236 return -1;
@@ -225,10 +241,10 @@ build_up:
225 __set_bit(0, &new->bitmap); 241 __set_bit(0, &new->bitmap);
226 p = new; 242 p = new;
227 } 243 }
228 idp->top = p; 244 rcu_assign_pointer(idp->top, p);
229 idp->layers = layers; 245 idp->layers = layers;
230 v = sub_alloc(idp, &id, pa); 246 v = sub_alloc(idp, &id, pa);
231 if (v == -2) 247 if (v == IDR_NEED_TO_GROW)
232 goto build_up; 248 goto build_up;
233 return(v); 249 return(v);
234} 250}
@@ -244,7 +260,8 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
244 * Successfully found an empty slot. Install the user 260 * Successfully found an empty slot. Install the user
245 * pointer and mark the slot full. 261 * pointer and mark the slot full.
246 */ 262 */
247 pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr; 263 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
264 (struct idr_layer *)ptr);
248 pa[0]->count++; 265 pa[0]->count++;
249 idr_mark_full(pa, id); 266 idr_mark_full(pa, id);
250 } 267 }
@@ -277,12 +294,8 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
277 * This is a cheap hack until the IDR code can be fixed to 294 * This is a cheap hack until the IDR code can be fixed to
278 * return proper error values. 295 * return proper error values.
279 */ 296 */
280 if (rv < 0) { 297 if (rv < 0)
281 if (rv == -1) 298 return _idr_rc_to_errno(rv);
282 return -EAGAIN;
283 else /* Will be -3 */
284 return -ENOSPC;
285 }
286 *id = rv; 299 *id = rv;
287 return 0; 300 return 0;
288} 301}
@@ -312,12 +325,8 @@ int idr_get_new(struct idr *idp, void *ptr, int *id)
312 * This is a cheap hack until the IDR code can be fixed to 325 * This is a cheap hack until the IDR code can be fixed to
313 * return proper error values. 326 * return proper error values.
314 */ 327 */
315 if (rv < 0) { 328 if (rv < 0)
316 if (rv == -1) 329 return _idr_rc_to_errno(rv);
317 return -EAGAIN;
318 else /* Will be -3 */
319 return -ENOSPC;
320 }
321 *id = rv; 330 *id = rv;
322 return 0; 331 return 0;
323} 332}
@@ -325,7 +334,8 @@ EXPORT_SYMBOL(idr_get_new);
325 334
326static void idr_remove_warning(int id) 335static void idr_remove_warning(int id)
327{ 336{
328 printk("idr_remove called for id=%d which is not allocated.\n", id); 337 printk(KERN_WARNING
338 "idr_remove called for id=%d which is not allocated.\n", id);
329 dump_stack(); 339 dump_stack();
330} 340}
331 341
@@ -334,6 +344,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
334 struct idr_layer *p = idp->top; 344 struct idr_layer *p = idp->top;
335 struct idr_layer **pa[MAX_LEVEL]; 345 struct idr_layer **pa[MAX_LEVEL];
336 struct idr_layer ***paa = &pa[0]; 346 struct idr_layer ***paa = &pa[0];
347 struct idr_layer *to_free;
337 int n; 348 int n;
338 349
339 *paa = NULL; 350 *paa = NULL;
@@ -349,13 +360,18 @@ static void sub_remove(struct idr *idp, int shift, int id)
349 n = id & IDR_MASK; 360 n = id & IDR_MASK;
350 if (likely(p != NULL && test_bit(n, &p->bitmap))){ 361 if (likely(p != NULL && test_bit(n, &p->bitmap))){
351 __clear_bit(n, &p->bitmap); 362 __clear_bit(n, &p->bitmap);
352 p->ary[n] = NULL; 363 rcu_assign_pointer(p->ary[n], NULL);
364 to_free = NULL;
353 while(*paa && ! --((**paa)->count)){ 365 while(*paa && ! --((**paa)->count)){
354 free_layer(idp, **paa); 366 if (to_free)
367 free_layer(to_free);
368 to_free = **paa;
355 **paa-- = NULL; 369 **paa-- = NULL;
356 } 370 }
357 if (!*paa) 371 if (!*paa)
358 idp->layers = 0; 372 idp->layers = 0;
373 if (to_free)
374 free_layer(to_free);
359 } else 375 } else
360 idr_remove_warning(id); 376 idr_remove_warning(id);
361} 377}
@@ -368,22 +384,34 @@ static void sub_remove(struct idr *idp, int shift, int id)
368void idr_remove(struct idr *idp, int id) 384void idr_remove(struct idr *idp, int id)
369{ 385{
370 struct idr_layer *p; 386 struct idr_layer *p;
387 struct idr_layer *to_free;
371 388
372 /* Mask off upper bits we don't use for the search. */ 389 /* Mask off upper bits we don't use for the search. */
373 id &= MAX_ID_MASK; 390 id &= MAX_ID_MASK;
374 391
375 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 392 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
376 if (idp->top && idp->top->count == 1 && (idp->layers > 1) && 393 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
377 idp->top->ary[0]) { // We can drop a layer 394 idp->top->ary[0]) {
378 395 /*
396 * Single child at leftmost slot: we can shrink the tree.
397 * This level is not needed anymore since when layers are
398 * inserted, they are inserted at the top of the existing
399 * tree.
400 */
401 to_free = idp->top;
379 p = idp->top->ary[0]; 402 p = idp->top->ary[0];
380 idp->top->bitmap = idp->top->count = 0; 403 rcu_assign_pointer(idp->top, p);
381 free_layer(idp, idp->top);
382 idp->top = p;
383 --idp->layers; 404 --idp->layers;
405 to_free->bitmap = to_free->count = 0;
406 free_layer(to_free);
384 } 407 }
385 while (idp->id_free_cnt >= IDR_FREE_MAX) { 408 while (idp->id_free_cnt >= IDR_FREE_MAX) {
386 p = alloc_layer(idp); 409 p = get_from_free_list(idp);
410 /*
411 * Note: we don't call the rcu callback here, since the only
412 * layers that fall into the freelist are those that have been
413 * preallocated.
414 */
387 kmem_cache_free(idr_layer_cache, p); 415 kmem_cache_free(idr_layer_cache, p);
388 } 416 }
389 return; 417 return;
@@ -424,15 +452,13 @@ void idr_remove_all(struct idr *idp)
424 452
425 id += 1 << n; 453 id += 1 << n;
426 while (n < fls(id)) { 454 while (n < fls(id)) {
427 if (p) { 455 if (p)
428 memset(p, 0, sizeof *p); 456 free_layer(p);
429 free_layer(idp, p);
430 }
431 n += IDR_BITS; 457 n += IDR_BITS;
432 p = *--paa; 458 p = *--paa;
433 } 459 }
434 } 460 }
435 idp->top = NULL; 461 rcu_assign_pointer(idp->top, NULL);
436 idp->layers = 0; 462 idp->layers = 0;
437} 463}
438EXPORT_SYMBOL(idr_remove_all); 464EXPORT_SYMBOL(idr_remove_all);
@@ -444,7 +470,7 @@ EXPORT_SYMBOL(idr_remove_all);
444void idr_destroy(struct idr *idp) 470void idr_destroy(struct idr *idp)
445{ 471{
446 while (idp->id_free_cnt) { 472 while (idp->id_free_cnt) {
447 struct idr_layer *p = alloc_layer(idp); 473 struct idr_layer *p = get_from_free_list(idp);
448 kmem_cache_free(idr_layer_cache, p); 474 kmem_cache_free(idr_layer_cache, p);
449 } 475 }
450} 476}
@@ -459,7 +485,8 @@ EXPORT_SYMBOL(idr_destroy);
459 * return indicates that @id is not valid or you passed %NULL in 485 * return indicates that @id is not valid or you passed %NULL in
460 * idr_get_new(). 486 * idr_get_new().
461 * 487 *
462 * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). 488 * This function can be called under rcu_read_lock(), given that the leaf
489 * pointers lifetimes are correctly managed.
463 */ 490 */
464void *idr_find(struct idr *idp, int id) 491void *idr_find(struct idr *idp, int id)
465{ 492{
@@ -467,7 +494,7 @@ void *idr_find(struct idr *idp, int id)
467 struct idr_layer *p; 494 struct idr_layer *p;
468 495
469 n = idp->layers * IDR_BITS; 496 n = idp->layers * IDR_BITS;
470 p = idp->top; 497 p = rcu_dereference(idp->top);
471 498
472 /* Mask off upper bits we don't use for the search. */ 499 /* Mask off upper bits we don't use for the search. */
473 id &= MAX_ID_MASK; 500 id &= MAX_ID_MASK;
@@ -477,7 +504,7 @@ void *idr_find(struct idr *idp, int id)
477 504
478 while (n > 0 && p) { 505 while (n > 0 && p) {
479 n -= IDR_BITS; 506 n -= IDR_BITS;
480 p = p->ary[(id >> n) & IDR_MASK]; 507 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
481 } 508 }
482 return((void *)p); 509 return((void *)p);
483} 510}
@@ -510,7 +537,7 @@ int idr_for_each(struct idr *idp,
510 struct idr_layer **paa = &pa[0]; 537 struct idr_layer **paa = &pa[0];
511 538
512 n = idp->layers * IDR_BITS; 539 n = idp->layers * IDR_BITS;
513 p = idp->top; 540 p = rcu_dereference(idp->top);
514 max = 1 << n; 541 max = 1 << n;
515 542
516 id = 0; 543 id = 0;
@@ -518,7 +545,7 @@ int idr_for_each(struct idr *idp,
518 while (n > 0 && p) { 545 while (n > 0 && p) {
519 n -= IDR_BITS; 546 n -= IDR_BITS;
520 *paa++ = p; 547 *paa++ = p;
521 p = p->ary[(id >> n) & IDR_MASK]; 548 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
522 } 549 }
523 550
524 if (p) { 551 if (p) {
@@ -548,7 +575,7 @@ EXPORT_SYMBOL(idr_for_each);
548 * A -ENOENT return indicates that @id was not found. 575 * A -ENOENT return indicates that @id was not found.
549 * A -EINVAL return indicates that @id was not within valid constraints. 576 * A -EINVAL return indicates that @id was not within valid constraints.
550 * 577 *
551 * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). 578 * The caller must serialize with writers.
552 */ 579 */
553void *idr_replace(struct idr *idp, void *ptr, int id) 580void *idr_replace(struct idr *idp, void *ptr, int id)
554{ 581{
@@ -574,7 +601,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
574 return ERR_PTR(-ENOENT); 601 return ERR_PTR(-ENOENT);
575 602
576 old_p = p->ary[n]; 603 old_p = p->ary[n];
577 p->ary[n] = ptr; 604 rcu_assign_pointer(p->ary[n], ptr);
578 605
579 return old_p; 606 return old_p;
580} 607}
@@ -694,12 +721,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
694 restart: 721 restart:
695 /* get vacant slot */ 722 /* get vacant slot */
696 t = idr_get_empty_slot(&ida->idr, idr_id, pa); 723 t = idr_get_empty_slot(&ida->idr, idr_id, pa);
697 if (t < 0) { 724 if (t < 0)
698 if (t == -1) 725 return _idr_rc_to_errno(t);
699 return -EAGAIN;
700 else /* will be -3 */
701 return -ENOSPC;
702 }
703 726
704 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) 727 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
705 return -ENOSPC; 728 return -ENOSPC;
@@ -720,7 +743,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
720 return -EAGAIN; 743 return -EAGAIN;
721 744
722 memset(bitmap, 0, sizeof(struct ida_bitmap)); 745 memset(bitmap, 0, sizeof(struct ida_bitmap));
723 pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; 746 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
747 (void *)bitmap);
724 pa[0]->count++; 748 pa[0]->count++;
725 } 749 }
726 750
@@ -749,7 +773,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
749 * allocation. 773 * allocation.
750 */ 774 */
751 if (ida->idr.id_free_cnt || ida->free_bitmap) { 775 if (ida->idr.id_free_cnt || ida->free_bitmap) {
752 struct idr_layer *p = alloc_layer(&ida->idr); 776 struct idr_layer *p = get_from_free_list(&ida->idr);
753 if (p) 777 if (p)
754 kmem_cache_free(idr_layer_cache, p); 778 kmem_cache_free(idr_layer_cache, p);
755 } 779 }
diff --git a/lib/inflate.c b/lib/inflate.c
index 9762294be062..1a8e8a978128 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -230,6 +230,45 @@ STATIC const ush mask_bits[] = {
230#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}} 230#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}}
231#define DUMPBITS(n) {b>>=(n);k-=(n);} 231#define DUMPBITS(n) {b>>=(n);k-=(n);}
232 232
233#ifndef NO_INFLATE_MALLOC
234/* A trivial malloc implementation, adapted from
235 * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
236 */
237
238static unsigned long malloc_ptr;
239static int malloc_count;
240
241static void *malloc(int size)
242{
243 void *p;
244
245 if (size < 0)
246 error("Malloc error");
247 if (!malloc_ptr)
248 malloc_ptr = free_mem_ptr;
249
250 malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
251
252 p = (void *)malloc_ptr;
253 malloc_ptr += size;
254
255 if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
256 error("Out of memory");
257
258 malloc_count++;
259 return p;
260}
261
262static void free(void *where)
263{
264 malloc_count--;
265 if (!malloc_count)
266 malloc_ptr = free_mem_ptr;
267}
268#else
269#define malloc(a) kmalloc(a, GFP_KERNEL)
270#define free(a) kfree(a)
271#endif
233 272
234/* 273/*
235 Huffman code decoding is performed using a multi-level table lookup. 274 Huffman code decoding is performed using a multi-level table lookup.
@@ -1045,7 +1084,6 @@ STATIC int INIT inflate(void)
1045 int e; /* last block flag */ 1084 int e; /* last block flag */
1046 int r; /* result code */ 1085 int r; /* result code */
1047 unsigned h; /* maximum struct huft's malloc'ed */ 1086 unsigned h; /* maximum struct huft's malloc'ed */
1048 void *ptr;
1049 1087
1050 /* initialize window, bit buffer */ 1088 /* initialize window, bit buffer */
1051 wp = 0; 1089 wp = 0;
@@ -1057,12 +1095,12 @@ STATIC int INIT inflate(void)
1057 h = 0; 1095 h = 0;
1058 do { 1096 do {
1059 hufts = 0; 1097 hufts = 0;
1060 gzip_mark(&ptr); 1098#ifdef ARCH_HAS_DECOMP_WDOG
1061 if ((r = inflate_block(&e)) != 0) { 1099 arch_decomp_wdog();
1062 gzip_release(&ptr); 1100#endif
1063 return r; 1101 r = inflate_block(&e);
1064 } 1102 if (r)
1065 gzip_release(&ptr); 1103 return r;
1066 if (hufts > h) 1104 if (hufts > h)
1067 h = hufts; 1105 h = hufts;
1068 } while (!e); 1106 } while (!e);
diff --git a/lib/kobject.c b/lib/kobject.c
index 744401571ed7..bd732ffebc85 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -164,9 +164,8 @@ static int kobject_add_internal(struct kobject *kobj)
164 return -ENOENT; 164 return -ENOENT;
165 165
166 if (!kobj->name || !kobj->name[0]) { 166 if (!kobj->name || !kobj->name[0]) {
167 pr_debug("kobject: (%p): attempted to be registered with empty " 167 WARN(1, "kobject: (%p): attempted to be registered with empty "
168 "name!\n", kobj); 168 "name!\n", kobj);
169 WARN_ON(1);
170 return -EINVAL; 169 return -EINVAL;
171 } 170 }
172 171
@@ -583,12 +582,10 @@ static void kobject_release(struct kref *kref)
583void kobject_put(struct kobject *kobj) 582void kobject_put(struct kobject *kobj)
584{ 583{
585 if (kobj) { 584 if (kobj) {
586 if (!kobj->state_initialized) { 585 if (!kobj->state_initialized)
587 printk(KERN_WARNING "kobject: '%s' (%p): is not " 586 WARN(1, KERN_WARNING "kobject: '%s' (%p): is not "
588 "initialized, yet kobject_put() is being " 587 "initialized, yet kobject_put() is being "
589 "called.\n", kobject_name(kobj), kobj); 588 "called.\n", kobject_name(kobj), kobj);
590 WARN_ON(1);
591 }
592 kref_put(&kobj->kref, kobject_release); 589 kref_put(&kobj->kref, kobject_release);
593 } 590 }
594} 591}
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 4350ba9655bd..1a39f4e3ae1f 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -20,18 +20,14 @@ void __list_add(struct list_head *new,
20 struct list_head *prev, 20 struct list_head *prev,
21 struct list_head *next) 21 struct list_head *next)
22{ 22{
23 if (unlikely(next->prev != prev)) { 23 WARN(next->prev != prev,
24 printk(KERN_ERR "list_add corruption. next->prev should be " 24 "list_add corruption. next->prev should be "
25 "prev (%p), but was %p. (next=%p).\n", 25 "prev (%p), but was %p. (next=%p).\n",
26 prev, next->prev, next); 26 prev, next->prev, next);
27 BUG(); 27 WARN(prev->next != next,
28 } 28 "list_add corruption. prev->next should be "
29 if (unlikely(prev->next != next)) { 29 "next (%p), but was %p. (prev=%p).\n",
30 printk(KERN_ERR "list_add corruption. prev->next should be " 30 next, prev->next, prev);
31 "next (%p), but was %p. (prev=%p).\n",
32 next, prev->next, prev);
33 BUG();
34 }
35 next->prev = new; 31 next->prev = new;
36 new->next = next; 32 new->next = next;
37 new->prev = prev; 33 new->prev = prev;
@@ -40,20 +36,6 @@ void __list_add(struct list_head *new,
40EXPORT_SYMBOL(__list_add); 36EXPORT_SYMBOL(__list_add);
41 37
42/** 38/**
43 * list_add - add a new entry
44 * @new: new entry to be added
45 * @head: list head to add it after
46 *
47 * Insert a new entry after the specified head.
48 * This is good for implementing stacks.
49 */
50void list_add(struct list_head *new, struct list_head *head)
51{
52 __list_add(new, head, head->next);
53}
54EXPORT_SYMBOL(list_add);
55
56/**
57 * list_del - deletes entry from list. 39 * list_del - deletes entry from list.
58 * @entry: the element to delete from the list. 40 * @entry: the element to delete from the list.
59 * Note: list_empty on entry does not return true after this, the entry is 41 * Note: list_empty on entry does not return true after this, the entry is
@@ -61,16 +43,12 @@ EXPORT_SYMBOL(list_add);
61 */ 43 */
62void list_del(struct list_head *entry) 44void list_del(struct list_head *entry)
63{ 45{
64 if (unlikely(entry->prev->next != entry)) { 46 WARN(entry->prev->next != entry,
65 printk(KERN_ERR "list_del corruption. prev->next should be %p, " 47 "list_del corruption. prev->next should be %p, "
66 "but was %p\n", entry, entry->prev->next); 48 "but was %p\n", entry, entry->prev->next);
67 BUG(); 49 WARN(entry->next->prev != entry,
68 } 50 "list_del corruption. next->prev should be %p, "
69 if (unlikely(entry->next->prev != entry)) { 51 "but was %p\n", entry, entry->next->prev);
70 printk(KERN_ERR "list_del corruption. next->prev should be %p, "
71 "but was %p\n", entry, entry->next->prev);
72 BUG();
73 }
74 __list_del(entry->prev, entry->next); 52 __list_del(entry->prev, entry->next);
75 entry->next = LIST_POISON1; 53 entry->next = LIST_POISON1;
76 entry->prev = LIST_POISON2; 54 entry->prev = LIST_POISON2;
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c
index 77f0f9b775a9..5dc6b29c1575 100644
--- a/lib/lzo/lzo1x_decompress.c
+++ b/lib/lzo/lzo1x_decompress.c
@@ -138,8 +138,7 @@ match:
138 t += 31 + *ip++; 138 t += 31 + *ip++;
139 } 139 }
140 m_pos = op - 1; 140 m_pos = op - 1;
141 m_pos -= le16_to_cpu(get_unaligned( 141 m_pos -= get_unaligned_le16(ip) >> 2;
142 (const unsigned short *)ip)) >> 2;
143 ip += 2; 142 ip += 2;
144 } else if (t >= 16) { 143 } else if (t >= 16) {
145 m_pos = op; 144 m_pos = op;
@@ -157,8 +156,7 @@ match:
157 } 156 }
158 t += 7 + *ip++; 157 t += 7 + *ip++;
159 } 158 }
160 m_pos -= le16_to_cpu(get_unaligned( 159 m_pos -= get_unaligned_le16(ip) >> 2;
161 (const unsigned short *)ip)) >> 2;
162 ip += 2; 160 ip += 2;
163 if (m_pos == op) 161 if (m_pos == op)
164 goto eof_found; 162 goto eof_found;
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 485e3040dcd4..35136671b215 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -3,6 +3,9 @@
3 * 3 *
4 * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> 4 * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com>
5 * 5 *
6 * 2008-05-01 rewrite the function and use a ratelimit_state data struct as
7 * parameter. Now every user can use their own standalone ratelimit_state.
8 *
6 * This file is released under the GPLv2. 9 * This file is released under the GPLv2.
7 * 10 *
8 */ 11 */
@@ -11,41 +14,43 @@
11#include <linux/jiffies.h> 14#include <linux/jiffies.h>
12#include <linux/module.h> 15#include <linux/module.h>
13 16
17static DEFINE_SPINLOCK(ratelimit_lock);
18static unsigned long flags;
19
14/* 20/*
15 * __ratelimit - rate limiting 21 * __ratelimit - rate limiting
16 * @ratelimit_jiffies: minimum time in jiffies between two callbacks 22 * @rs: ratelimit_state data
17 * @ratelimit_burst: number of callbacks we do before ratelimiting
18 * 23 *
19 * This enforces a rate limit: not more than @ratelimit_burst callbacks 24 * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks
20 * in every ratelimit_jiffies 25 * in every @rs->ratelimit_jiffies
21 */ 26 */
22int __ratelimit(int ratelimit_jiffies, int ratelimit_burst) 27int __ratelimit(struct ratelimit_state *rs)
23{ 28{
24 static DEFINE_SPINLOCK(ratelimit_lock); 29 if (!rs->interval)
25 static unsigned toks = 10 * 5 * HZ; 30 return 1;
26 static unsigned long last_msg;
27 static int missed;
28 unsigned long flags;
29 unsigned long now = jiffies;
30 31
31 spin_lock_irqsave(&ratelimit_lock, flags); 32 spin_lock_irqsave(&ratelimit_lock, flags);
32 toks += now - last_msg; 33 if (!rs->begin)
33 last_msg = now; 34 rs->begin = jiffies;
34 if (toks > (ratelimit_burst * ratelimit_jiffies))
35 toks = ratelimit_burst * ratelimit_jiffies;
36 if (toks >= ratelimit_jiffies) {
37 int lost = missed;
38 35
39 missed = 0; 36 if (time_is_before_jiffies(rs->begin + rs->interval)) {
40 toks -= ratelimit_jiffies; 37 if (rs->missed)
41 spin_unlock_irqrestore(&ratelimit_lock, flags); 38 printk(KERN_WARNING "%s: %d callbacks suppressed\n",
42 if (lost) 39 __func__, rs->missed);
43 printk(KERN_WARNING "%s: %d messages suppressed\n", 40 rs->begin = 0;
44 __func__, lost); 41 rs->printed = 0;
45 return 1; 42 rs->missed = 0;
46 } 43 }
47 missed++; 44 if (rs->burst && rs->burst > rs->printed)
45 goto print;
46
47 rs->missed++;
48 spin_unlock_irqrestore(&ratelimit_lock, flags); 48 spin_unlock_irqrestore(&ratelimit_lock, flags);
49 return 0; 49 return 0;
50
51print:
52 rs->printed++;
53 spin_unlock_irqrestore(&ratelimit_lock, flags);
54 return 1;
50} 55}
51EXPORT_SYMBOL(__ratelimit); 56EXPORT_SYMBOL(__ratelimit);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b80c21100d78..876ba6d5b670 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -295,6 +295,117 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
295EXPORT_SYMBOL(sg_alloc_table); 295EXPORT_SYMBOL(sg_alloc_table);
296 296
297/** 297/**
298 * sg_miter_start - start mapping iteration over a sg list
299 * @miter: sg mapping iter to be started
300 * @sgl: sg list to iterate over
301 * @nents: number of sg entries
302 *
303 * Description:
304 * Starts mapping iterator @miter.
305 *
306 * Context:
307 * Don't care.
308 */
309void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
310 unsigned int nents, unsigned int flags)
311{
312 memset(miter, 0, sizeof(struct sg_mapping_iter));
313
314 miter->__sg = sgl;
315 miter->__nents = nents;
316 miter->__offset = 0;
317 miter->__flags = flags;
318}
319EXPORT_SYMBOL(sg_miter_start);
320
321/**
322 * sg_miter_next - proceed mapping iterator to the next mapping
323 * @miter: sg mapping iter to proceed
324 *
325 * Description:
326 * Proceeds @miter@ to the next mapping. @miter@ should have been
327 * started using sg_miter_start(). On successful return,
328 * @miter@->page, @miter@->addr and @miter@->length point to the
329 * current mapping.
330 *
331 * Context:
332 * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
333 * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
334 *
335 * Returns:
336 * true if @miter contains the next mapping. false if end of sg
337 * list is reached.
338 */
339bool sg_miter_next(struct sg_mapping_iter *miter)
340{
341 unsigned int off, len;
342
343 /* check for end and drop resources from the last iteration */
344 if (!miter->__nents)
345 return false;
346
347 sg_miter_stop(miter);
348
349 /* get to the next sg if necessary. __offset is adjusted by stop */
350 if (miter->__offset == miter->__sg->length && --miter->__nents) {
351 miter->__sg = sg_next(miter->__sg);
352 miter->__offset = 0;
353 }
354
355 /* map the next page */
356 off = miter->__sg->offset + miter->__offset;
357 len = miter->__sg->length - miter->__offset;
358
359 miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
360 off &= ~PAGE_MASK;
361 miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
362 miter->consumed = miter->length;
363
364 if (miter->__flags & SG_MITER_ATOMIC)
365 miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
366 else
367 miter->addr = kmap(miter->page) + off;
368
369 return true;
370}
371EXPORT_SYMBOL(sg_miter_next);
372
373/**
374 * sg_miter_stop - stop mapping iteration
375 * @miter: sg mapping iter to be stopped
376 *
377 * Description:
378 * Stops mapping iterator @miter. @miter should have been started
379 * started using sg_miter_start(). A stopped iteration can be
380 * resumed by calling sg_miter_next() on it. This is useful when
381 * resources (kmap) need to be released during iteration.
382 *
383 * Context:
384 * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
385 */
386void sg_miter_stop(struct sg_mapping_iter *miter)
387{
388 WARN_ON(miter->consumed > miter->length);
389
390 /* drop resources from the last iteration */
391 if (miter->addr) {
392 miter->__offset += miter->consumed;
393
394 if (miter->__flags & SG_MITER_ATOMIC) {
395 WARN_ON(!irqs_disabled());
396 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
397 } else
398 kunmap(miter->addr);
399
400 miter->page = NULL;
401 miter->addr = NULL;
402 miter->length = 0;
403 miter->consumed = 0;
404 }
405}
406EXPORT_SYMBOL(sg_miter_stop);
407
408/**
298 * sg_copy_buffer - Copy data between a linear buffer and an SG list 409 * sg_copy_buffer - Copy data between a linear buffer and an SG list
299 * @sgl: The SG list 410 * @sgl: The SG list
300 * @nents: Number of SG entries 411 * @nents: Number of SG entries
@@ -309,56 +420,29 @@ EXPORT_SYMBOL(sg_alloc_table);
309static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, 420static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
310 void *buf, size_t buflen, int to_buffer) 421 void *buf, size_t buflen, int to_buffer)
311{ 422{
312 struct scatterlist *sg; 423 unsigned int offset = 0;
313 size_t buf_off = 0; 424 struct sg_mapping_iter miter;
314 int i; 425
315 426 sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC);
316 WARN_ON(!irqs_disabled()); 427
317 428 while (sg_miter_next(&miter) && offset < buflen) {
318 for_each_sg(sgl, sg, nents, i) { 429 unsigned int len;
319 struct page *page; 430
320 int n = 0; 431 len = min(miter.length, buflen - offset);
321 unsigned int sg_off = sg->offset; 432
322 unsigned int sg_copy = sg->length; 433 if (to_buffer)
323 434 memcpy(buf + offset, miter.addr, len);
324 if (sg_copy > buflen) 435 else {
325 sg_copy = buflen; 436 memcpy(miter.addr, buf + offset, len);
326 buflen -= sg_copy; 437 flush_kernel_dcache_page(miter.page);
327
328 while (sg_copy > 0) {
329 unsigned int page_copy;
330 void *p;
331
332 page_copy = PAGE_SIZE - sg_off;
333 if (page_copy > sg_copy)
334 page_copy = sg_copy;
335
336 page = nth_page(sg_page(sg), n);
337 p = kmap_atomic(page, KM_BIO_SRC_IRQ);
338
339 if (to_buffer)
340 memcpy(buf + buf_off, p + sg_off, page_copy);
341 else {
342 memcpy(p + sg_off, buf + buf_off, page_copy);
343 flush_kernel_dcache_page(page);
344 }
345
346 kunmap_atomic(p, KM_BIO_SRC_IRQ);
347
348 buf_off += page_copy;
349 sg_off += page_copy;
350 if (sg_off == PAGE_SIZE) {
351 sg_off = 0;
352 n++;
353 }
354 sg_copy -= page_copy;
355 } 438 }
356 439
357 if (!buflen) 440 offset += len;
358 break;
359 } 441 }
360 442
361 return buf_off; 443 sg_miter_stop(&miter);
444
445 return offset;
362} 446}
363 447
364/** 448/**
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 3b4dc098181e..c4381d9516f6 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -11,7 +11,7 @@ notrace unsigned int debug_smp_processor_id(void)
11{ 11{
12 unsigned long preempt_count = preempt_count(); 12 unsigned long preempt_count = preempt_count();
13 int this_cpu = raw_smp_processor_id(); 13 int this_cpu = raw_smp_processor_id();
14 cpumask_t this_mask; 14 cpumask_of_cpu_ptr_declare(this_mask);
15 15
16 if (likely(preempt_count)) 16 if (likely(preempt_count))
17 goto out; 17 goto out;
@@ -23,9 +23,9 @@ notrace unsigned int debug_smp_processor_id(void)
23 * Kernel threads bound to a single CPU can safely use 23 * Kernel threads bound to a single CPU can safely use
24 * smp_processor_id(): 24 * smp_processor_id():
25 */ 25 */
26 this_mask = cpumask_of_cpu(this_cpu); 26 cpumask_of_cpu_ptr_next(this_mask, this_cpu);
27 27
28 if (cpus_equal(current->cpus_allowed, this_mask)) 28 if (cpus_equal(current->cpus_allowed, *this_mask))
29 goto out; 29 goto out;
30 30
31 /* 31 /*
diff --git a/mm/Kconfig b/mm/Kconfig
index c4de85285bb4..aa799007a11b 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -174,7 +174,7 @@ config SPLIT_PTLOCK_CPUS
174config MIGRATION 174config MIGRATION
175 bool "Page migration" 175 bool "Page migration"
176 def_bool y 176 def_bool y
177 depends on NUMA 177 depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE
178 help 178 help
179 Allows the migration of the physical location of pages of processes 179 Allows the migration of the physical location of pages of processes
180 while the virtual addresses are not changed. This is useful for 180 while the virtual addresses are not changed. This is useful for
diff --git a/mm/Makefile b/mm/Makefile
index 18c143b3c46c..06ca2381fef1 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -11,7 +11,7 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
11 maccess.o page_alloc.o page-writeback.o pdflush.o \ 11 maccess.o page_alloc.o page-writeback.o pdflush.o \
12 readahead.o swap.o truncate.o vmscan.o \ 12 readahead.o swap.o truncate.o vmscan.o \
13 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ 13 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
14 page_isolation.o $(mmu-y) 14 page_isolation.o mm_init.o $(mmu-y)
15 15
16obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o 16obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o
17obj-$(CONFIG_BOUNCE) += bounce.o 17obj-$(CONFIG_BOUNCE) += bounce.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index 05f2b4009ccc..843364594e23 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(percpu_depopulate);
35void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) 35void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
36{ 36{
37 int cpu; 37 int cpu;
38 for_each_cpu_mask(cpu, *mask) 38 for_each_cpu_mask_nr(cpu, *mask)
39 percpu_depopulate(__pdata, cpu); 39 percpu_depopulate(__pdata, cpu);
40} 40}
41EXPORT_SYMBOL_GPL(__percpu_depopulate_mask); 41EXPORT_SYMBOL_GPL(__percpu_depopulate_mask);
@@ -86,7 +86,7 @@ int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
86 int cpu; 86 int cpu;
87 87
88 cpus_clear(populated); 88 cpus_clear(populated);
89 for_each_cpu_mask(cpu, *mask) 89 for_each_cpu_mask_nr(cpu, *mask)
90 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { 90 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
91 __percpu_depopulate_mask(__pdata, &populated); 91 __percpu_depopulate_mask(__pdata, &populated);
92 return -ENOMEM; 92 return -ENOMEM;
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 8d9f60e06f62..4af15d0340ad 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * linux/mm/bootmem.c 2 * bootmem - A boot-time physical memory allocator and configurator
3 * 3 *
4 * Copyright (C) 1999 Ingo Molnar 4 * Copyright (C) 1999 Ingo Molnar
5 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 5 * 1999 Kanoj Sarcar, SGI
6 * 2008 Johannes Weiner
6 * 7 *
7 * simple boot-time physical memory area allocator and 8 * Access to this subsystem has to be serialized externally (which is true
8 * free memory collector. It's used to deal with reserved 9 * for the boot process anyway).
9 * system memory and memory holes as well.
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/pfn.h> 12#include <linux/pfn.h>
@@ -19,15 +19,10 @@
19 19
20#include "internal.h" 20#include "internal.h"
21 21
22/*
23 * Access to this subsystem has to be serialized externally. (this is
24 * true for the boot process anyway)
25 */
26unsigned long max_low_pfn; 22unsigned long max_low_pfn;
27unsigned long min_low_pfn; 23unsigned long min_low_pfn;
28unsigned long max_pfn; 24unsigned long max_pfn;
29 25
30static LIST_HEAD(bdata_list);
31#ifdef CONFIG_CRASH_DUMP 26#ifdef CONFIG_CRASH_DUMP
32/* 27/*
33 * If we have booted due to a crash, max_pfn will be a very low value. We need 28 * If we have booted due to a crash, max_pfn will be a very low value. We need
@@ -36,63 +31,72 @@ static LIST_HEAD(bdata_list);
36unsigned long saved_max_pfn; 31unsigned long saved_max_pfn;
37#endif 32#endif
38 33
39/* return the number of _pages_ that will be allocated for the boot bitmap */ 34bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
40unsigned long __init bootmem_bootmap_pages(unsigned long pages) 35
36static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
37
38static int bootmem_debug;
39
40static int __init bootmem_debug_setup(char *buf)
41{ 41{
42 unsigned long mapsize; 42 bootmem_debug = 1;
43 return 0;
44}
45early_param("bootmem_debug", bootmem_debug_setup);
43 46
44 mapsize = (pages+7)/8; 47#define bdebug(fmt, args...) ({ \
45 mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK; 48 if (unlikely(bootmem_debug)) \
46 mapsize >>= PAGE_SHIFT; 49 printk(KERN_INFO \
50 "bootmem::%s " fmt, \
51 __FUNCTION__, ## args); \
52})
47 53
48 return mapsize; 54static unsigned long __init bootmap_bytes(unsigned long pages)
55{
56 unsigned long bytes = (pages + 7) / 8;
57
58 return ALIGN(bytes, sizeof(long));
49} 59}
50 60
51/* 61/**
52 * link bdata in order 62 * bootmem_bootmap_pages - calculate bitmap size in pages
63 * @pages: number of pages the bitmap has to represent
53 */ 64 */
54static void __init link_bootmem(bootmem_data_t *bdata) 65unsigned long __init bootmem_bootmap_pages(unsigned long pages)
55{ 66{
56 bootmem_data_t *ent; 67 unsigned long bytes = bootmap_bytes(pages);
57 68
58 if (list_empty(&bdata_list)) { 69 return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
59 list_add(&bdata->list, &bdata_list);
60 return;
61 }
62 /* insert in order */
63 list_for_each_entry(ent, &bdata_list, list) {
64 if (bdata->node_boot_start < ent->node_boot_start) {
65 list_add_tail(&bdata->list, &ent->list);
66 return;
67 }
68 }
69 list_add_tail(&bdata->list, &bdata_list);
70} 70}
71 71
72/* 72/*
73 * Given an initialised bdata, it returns the size of the boot bitmap 73 * link bdata in order
74 */ 74 */
75static unsigned long __init get_mapsize(bootmem_data_t *bdata) 75static void __init link_bootmem(bootmem_data_t *bdata)
76{ 76{
77 unsigned long mapsize; 77 struct list_head *iter;
78 unsigned long start = PFN_DOWN(bdata->node_boot_start);
79 unsigned long end = bdata->node_low_pfn;
80 78
81 mapsize = ((end - start) + 7) / 8; 79 list_for_each(iter, &bdata_list) {
82 return ALIGN(mapsize, sizeof(long)); 80 bootmem_data_t *ent;
81
82 ent = list_entry(iter, bootmem_data_t, list);
83 if (bdata->node_min_pfn < ent->node_min_pfn)
84 break;
85 }
86 list_add_tail(&bdata->list, iter);
83} 87}
84 88
85/* 89/*
86 * Called once to set up the allocator itself. 90 * Called once to set up the allocator itself.
87 */ 91 */
88static unsigned long __init init_bootmem_core(pg_data_t *pgdat, 92static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
89 unsigned long mapstart, unsigned long start, unsigned long end) 93 unsigned long mapstart, unsigned long start, unsigned long end)
90{ 94{
91 bootmem_data_t *bdata = pgdat->bdata;
92 unsigned long mapsize; 95 unsigned long mapsize;
93 96
97 mminit_validate_memmodel_limits(&start, &end);
94 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart)); 98 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
95 bdata->node_boot_start = PFN_PHYS(start); 99 bdata->node_min_pfn = start;
96 bdata->node_low_pfn = end; 100 bdata->node_low_pfn = end;
97 link_bootmem(bdata); 101 link_bootmem(bdata);
98 102
@@ -100,429 +104,461 @@ static unsigned long __init init_bootmem_core(pg_data_t *pgdat,
100 * Initially all pages are reserved - setup_arch() has to 104 * Initially all pages are reserved - setup_arch() has to
101 * register free RAM areas explicitly. 105 * register free RAM areas explicitly.
102 */ 106 */
103 mapsize = get_mapsize(bdata); 107 mapsize = bootmap_bytes(end - start);
104 memset(bdata->node_bootmem_map, 0xff, mapsize); 108 memset(bdata->node_bootmem_map, 0xff, mapsize);
105 109
110 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
111 bdata - bootmem_node_data, start, mapstart, end, mapsize);
112
106 return mapsize; 113 return mapsize;
107} 114}
108 115
109/* 116/**
110 * Marks a particular physical memory range as unallocatable. Usable RAM 117 * init_bootmem_node - register a node as boot memory
111 * might be used for boot-time allocations - or it might get added 118 * @pgdat: node to register
112 * to the free page pool later on. 119 * @freepfn: pfn where the bitmap for this node is to be placed
120 * @startpfn: first pfn on the node
121 * @endpfn: first pfn after the node
122 *
123 * Returns the number of bytes needed to hold the bitmap for this node.
113 */ 124 */
114static int __init can_reserve_bootmem_core(bootmem_data_t *bdata, 125unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
115 unsigned long addr, unsigned long size, int flags) 126 unsigned long startpfn, unsigned long endpfn)
116{ 127{
117 unsigned long sidx, eidx; 128 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
118 unsigned long i; 129}
119 130
120 BUG_ON(!size); 131/**
132 * init_bootmem - register boot memory
133 * @start: pfn where the bitmap is to be placed
134 * @pages: number of available physical pages
135 *
136 * Returns the number of bytes needed to hold the bitmap.
137 */
138unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
139{
140 max_low_pfn = pages;
141 min_low_pfn = start;
142 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
143}
121 144
122 /* out of range, don't hold other */ 145static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
123 if (addr + size < bdata->node_boot_start || 146{
124 PFN_DOWN(addr) > bdata->node_low_pfn) 147 int aligned;
148 struct page *page;
149 unsigned long start, end, pages, count = 0;
150
151 if (!bdata->node_bootmem_map)
125 return 0; 152 return 0;
126 153
154 start = bdata->node_min_pfn;
155 end = bdata->node_low_pfn;
156
127 /* 157 /*
128 * Round up to index to the range. 158 * If the start is aligned to the machines wordsize, we might
159 * be able to free pages in bulks of that order.
129 */ 160 */
130 if (addr > bdata->node_boot_start) 161 aligned = !(start & (BITS_PER_LONG - 1));
131 sidx= PFN_DOWN(addr - bdata->node_boot_start);
132 else
133 sidx = 0;
134 162
135 eidx = PFN_UP(addr + size - bdata->node_boot_start); 163 bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
136 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start)) 164 bdata - bootmem_node_data, start, end, aligned);
137 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
138 165
139 for (i = sidx; i < eidx; i++) { 166 while (start < end) {
140 if (test_bit(i, bdata->node_bootmem_map)) { 167 unsigned long *map, idx, vec;
141 if (flags & BOOTMEM_EXCLUSIVE)
142 return -EBUSY;
143 }
144 }
145 168
146 return 0; 169 map = bdata->node_bootmem_map;
170 idx = start - bdata->node_min_pfn;
171 vec = ~map[idx / BITS_PER_LONG];
147 172
148} 173 if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) {
174 int order = ilog2(BITS_PER_LONG);
149 175
150static void __init reserve_bootmem_core(bootmem_data_t *bdata, 176 __free_pages_bootmem(pfn_to_page(start), order);
151 unsigned long addr, unsigned long size, int flags) 177 count += BITS_PER_LONG;
152{ 178 } else {
153 unsigned long sidx, eidx; 179 unsigned long off = 0;
154 unsigned long i;
155
156 BUG_ON(!size);
157 180
158 /* out of range */ 181 while (vec && off < BITS_PER_LONG) {
159 if (addr + size < bdata->node_boot_start || 182 if (vec & 1) {
160 PFN_DOWN(addr) > bdata->node_low_pfn) 183 page = pfn_to_page(start + off);
161 return; 184 __free_pages_bootmem(page, 0);
185 count++;
186 }
187 vec >>= 1;
188 off++;
189 }
190 }
191 start += BITS_PER_LONG;
192 }
162 193
163 /* 194 page = virt_to_page(bdata->node_bootmem_map);
164 * Round up to index to the range. 195 pages = bdata->node_low_pfn - bdata->node_min_pfn;
165 */ 196 pages = bootmem_bootmap_pages(pages);
166 if (addr > bdata->node_boot_start) 197 count += pages;
167 sidx= PFN_DOWN(addr - bdata->node_boot_start); 198 while (pages--)
168 else 199 __free_pages_bootmem(page++, 0);
169 sidx = 0;
170 200
171 eidx = PFN_UP(addr + size - bdata->node_boot_start); 201 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
172 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
173 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
174 202
175 for (i = sidx; i < eidx; i++) { 203 return count;
176 if (test_and_set_bit(i, bdata->node_bootmem_map)) {
177#ifdef CONFIG_DEBUG_BOOTMEM
178 printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
179#endif
180 }
181 }
182} 204}
183 205
184static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, 206/**
185 unsigned long size) 207 * free_all_bootmem_node - release a node's free pages to the buddy allocator
208 * @pgdat: node to be released
209 *
210 * Returns the number of pages actually released.
211 */
212unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
186{ 213{
187 unsigned long sidx, eidx; 214 register_page_bootmem_info_node(pgdat);
188 unsigned long i; 215 return free_all_bootmem_core(pgdat->bdata);
189 216}
190 BUG_ON(!size);
191 217
192 /* out range */ 218/**
193 if (addr + size < bdata->node_boot_start || 219 * free_all_bootmem - release free pages to the buddy allocator
194 PFN_DOWN(addr) > bdata->node_low_pfn) 220 *
195 return; 221 * Returns the number of pages actually released.
196 /* 222 */
197 * round down end of usable mem, partially free pages are 223unsigned long __init free_all_bootmem(void)
198 * considered reserved. 224{
199 */ 225 return free_all_bootmem_core(NODE_DATA(0)->bdata);
226}
200 227
201 if (addr >= bdata->node_boot_start && addr < bdata->last_success) 228static void __init __free(bootmem_data_t *bdata,
202 bdata->last_success = addr; 229 unsigned long sidx, unsigned long eidx)
230{
231 unsigned long idx;
203 232
204 /* 233 bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
205 * Round up to index to the range. 234 sidx + bdata->node_min_pfn,
206 */ 235 eidx + bdata->node_min_pfn);
207 if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
208 sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
209 else
210 sidx = 0;
211 236
212 eidx = PFN_DOWN(addr + size - bdata->node_boot_start); 237 if (bdata->hint_idx > sidx)
213 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start)) 238 bdata->hint_idx = sidx;
214 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
215 239
216 for (i = sidx; i < eidx; i++) { 240 for (idx = sidx; idx < eidx; idx++)
217 if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map))) 241 if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
218 BUG(); 242 BUG();
219 }
220} 243}
221 244
222/* 245static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
223 * We 'merge' subsequent allocations to save space. We might 'lose' 246 unsigned long eidx, int flags)
224 * some fraction of a page if allocations cannot be satisfied due to
225 * size constraints on boxes where there is physical RAM space
226 * fragmentation - in these cases (mostly large memory boxes) this
227 * is not a problem.
228 *
229 * On low memory boxes we get it right in 100% of the cases.
230 *
231 * alignment has to be a power of 2 value.
232 *
233 * NOTE: This function is _not_ reentrant.
234 */
235void * __init
236__alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
237 unsigned long align, unsigned long goal, unsigned long limit)
238{ 247{
239 unsigned long areasize, preferred; 248 unsigned long idx;
240 unsigned long i, start = 0, incr, eidx, end_pfn; 249 int exclusive = flags & BOOTMEM_EXCLUSIVE;
241 void *ret; 250
242 unsigned long node_boot_start; 251 bdebug("nid=%td start=%lx end=%lx flags=%x\n",
243 void *node_bootmem_map; 252 bdata - bootmem_node_data,
244 253 sidx + bdata->node_min_pfn,
245 if (!size) { 254 eidx + bdata->node_min_pfn,
246 printk("__alloc_bootmem_core(): zero-sized request\n"); 255 flags);
247 BUG(); 256
248 } 257 for (idx = sidx; idx < eidx; idx++)
249 BUG_ON(align & (align-1)); 258 if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
250 259 if (exclusive) {
251 /* on nodes without memory - bootmem_map is NULL */ 260 __free(bdata, sidx, idx);
252 if (!bdata->node_bootmem_map) 261 return -EBUSY;
253 return NULL; 262 }
263 bdebug("silent double reserve of PFN %lx\n",
264 idx + bdata->node_min_pfn);
265 }
266 return 0;
267}
254 268
255 /* bdata->node_boot_start is supposed to be (12+6)bits alignment on x86_64 ? */ 269static int __init mark_bootmem_node(bootmem_data_t *bdata,
256 node_boot_start = bdata->node_boot_start; 270 unsigned long start, unsigned long end,
257 node_bootmem_map = bdata->node_bootmem_map; 271 int reserve, int flags)
258 if (align) { 272{
259 node_boot_start = ALIGN(bdata->node_boot_start, align); 273 unsigned long sidx, eidx;
260 if (node_boot_start > bdata->node_boot_start)
261 node_bootmem_map = (unsigned long *)bdata->node_bootmem_map +
262 PFN_DOWN(node_boot_start - bdata->node_boot_start)/BITS_PER_LONG;
263 }
264 274
265 if (limit && node_boot_start >= limit) 275 bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
266 return NULL; 276 bdata - bootmem_node_data, start, end, reserve, flags);
267 277
268 end_pfn = bdata->node_low_pfn; 278 BUG_ON(start < bdata->node_min_pfn);
269 limit = PFN_DOWN(limit); 279 BUG_ON(end > bdata->node_low_pfn);
270 if (limit && end_pfn > limit)
271 end_pfn = limit;
272 280
273 eidx = end_pfn - PFN_DOWN(node_boot_start); 281 sidx = start - bdata->node_min_pfn;
282 eidx = end - bdata->node_min_pfn;
274 283
275 /* 284 if (reserve)
276 * We try to allocate bootmem pages above 'goal' 285 return __reserve(bdata, sidx, eidx, flags);
277 * first, then we try to allocate lower pages. 286 else
278 */ 287 __free(bdata, sidx, eidx);
279 preferred = 0; 288 return 0;
280 if (goal && PFN_DOWN(goal) < end_pfn) { 289}
281 if (goal > node_boot_start)
282 preferred = goal - node_boot_start;
283
284 if (bdata->last_success > node_boot_start &&
285 bdata->last_success - node_boot_start >= preferred)
286 if (!limit || (limit && limit > bdata->last_success))
287 preferred = bdata->last_success - node_boot_start;
288 }
289 290
290 preferred = PFN_DOWN(ALIGN(preferred, align)); 291static int __init mark_bootmem(unsigned long start, unsigned long end,
291 areasize = (size + PAGE_SIZE-1) / PAGE_SIZE; 292 int reserve, int flags)
292 incr = align >> PAGE_SHIFT ? : 1; 293{
294 unsigned long pos;
295 bootmem_data_t *bdata;
293 296
294restart_scan: 297 pos = start;
295 for (i = preferred; i < eidx;) { 298 list_for_each_entry(bdata, &bdata_list, list) {
296 unsigned long j; 299 int err;
300 unsigned long max;
297 301
298 i = find_next_zero_bit(node_bootmem_map, eidx, i); 302 if (pos < bdata->node_min_pfn ||
299 i = ALIGN(i, incr); 303 pos >= bdata->node_low_pfn) {
300 if (i >= eidx) 304 BUG_ON(pos != start);
301 break;
302 if (test_bit(i, node_bootmem_map)) {
303 i += incr;
304 continue; 305 continue;
305 } 306 }
306 for (j = i + 1; j < i + areasize; ++j) {
307 if (j >= eidx)
308 goto fail_block;
309 if (test_bit(j, node_bootmem_map))
310 goto fail_block;
311 }
312 start = i;
313 goto found;
314 fail_block:
315 i = ALIGN(j, incr);
316 if (i == j)
317 i += incr;
318 }
319 307
320 if (preferred > 0) { 308 max = min(bdata->node_low_pfn, end);
321 preferred = 0;
322 goto restart_scan;
323 }
324 return NULL;
325 309
326found: 310 err = mark_bootmem_node(bdata, pos, max, reserve, flags);
327 bdata->last_success = PFN_PHYS(start) + node_boot_start; 311 if (reserve && err) {
328 BUG_ON(start >= eidx); 312 mark_bootmem(start, pos, 0, 0);
329 313 return err;
330 /*
331 * Is the next page of the previous allocation-end the start
332 * of this allocation's buffer? If yes then we can 'merge'
333 * the previous partial page with this allocation.
334 */
335 if (align < PAGE_SIZE &&
336 bdata->last_offset && bdata->last_pos+1 == start) {
337 unsigned long offset, remaining_size;
338 offset = ALIGN(bdata->last_offset, align);
339 BUG_ON(offset > PAGE_SIZE);
340 remaining_size = PAGE_SIZE - offset;
341 if (size < remaining_size) {
342 areasize = 0;
343 /* last_pos unchanged */
344 bdata->last_offset = offset + size;
345 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
346 offset + node_boot_start);
347 } else {
348 remaining_size = size - remaining_size;
349 areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE;
350 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
351 offset + node_boot_start);
352 bdata->last_pos = start + areasize - 1;
353 bdata->last_offset = remaining_size;
354 } 314 }
355 bdata->last_offset &= ~PAGE_MASK;
356 } else {
357 bdata->last_pos = start + areasize - 1;
358 bdata->last_offset = size & ~PAGE_MASK;
359 ret = phys_to_virt(start * PAGE_SIZE + node_boot_start);
360 }
361 315
362 /* 316 if (max == end)
363 * Reserve the area now: 317 return 0;
364 */ 318 pos = bdata->node_low_pfn;
365 for (i = start; i < start + areasize; i++) 319 }
366 if (unlikely(test_and_set_bit(i, node_bootmem_map))) 320 BUG();
367 BUG();
368 memset(ret, 0, size);
369 return ret;
370} 321}
371 322
372static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat) 323/**
324 * free_bootmem_node - mark a page range as usable
325 * @pgdat: node the range resides on
326 * @physaddr: starting address of the range
327 * @size: size of the range in bytes
328 *
329 * Partial pages will be considered reserved and left as they are.
330 *
331 * The range must reside completely on the specified node.
332 */
333void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
334 unsigned long size)
373{ 335{
374 struct page *page; 336 unsigned long start, end;
375 unsigned long pfn;
376 bootmem_data_t *bdata = pgdat->bdata;
377 unsigned long i, count, total = 0;
378 unsigned long idx;
379 unsigned long *map;
380 int gofast = 0;
381
382 BUG_ON(!bdata->node_bootmem_map);
383
384 count = 0;
385 /* first extant page of the node */
386 pfn = PFN_DOWN(bdata->node_boot_start);
387 idx = bdata->node_low_pfn - pfn;
388 map = bdata->node_bootmem_map;
389 /* Check physaddr is O(LOG2(BITS_PER_LONG)) page aligned */
390 if (bdata->node_boot_start == 0 ||
391 ffs(bdata->node_boot_start) - PAGE_SHIFT > ffs(BITS_PER_LONG))
392 gofast = 1;
393 for (i = 0; i < idx; ) {
394 unsigned long v = ~map[i / BITS_PER_LONG];
395
396 if (gofast && v == ~0UL) {
397 int order;
398
399 page = pfn_to_page(pfn);
400 count += BITS_PER_LONG;
401 order = ffs(BITS_PER_LONG) - 1;
402 __free_pages_bootmem(page, order);
403 i += BITS_PER_LONG;
404 page += BITS_PER_LONG;
405 } else if (v) {
406 unsigned long m;
407
408 page = pfn_to_page(pfn);
409 for (m = 1; m && i < idx; m<<=1, page++, i++) {
410 if (v & m) {
411 count++;
412 __free_pages_bootmem(page, 0);
413 }
414 }
415 } else {
416 i += BITS_PER_LONG;
417 }
418 pfn += BITS_PER_LONG;
419 }
420 total += count;
421 337
422 /* 338 start = PFN_UP(physaddr);
423 * Now free the allocator bitmap itself, it's not 339 end = PFN_DOWN(physaddr + size);
424 * needed anymore:
425 */
426 page = virt_to_page(bdata->node_bootmem_map);
427 count = 0;
428 idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT;
429 for (i = 0; i < idx; i++, page++) {
430 __free_pages_bootmem(page, 0);
431 count++;
432 }
433 total += count;
434 bdata->node_bootmem_map = NULL;
435 340
436 return total; 341 mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
437} 342}
438 343
439unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn, 344/**
440 unsigned long startpfn, unsigned long endpfn) 345 * free_bootmem - mark a page range as usable
441{ 346 * @addr: starting address of the range
442 return init_bootmem_core(pgdat, freepfn, startpfn, endpfn); 347 * @size: size of the range in bytes
443} 348 *
444 349 * Partial pages will be considered reserved and left as they are.
445int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 350 *
446 unsigned long size, int flags) 351 * The range must be contiguous but may span node boundaries.
352 */
353void __init free_bootmem(unsigned long addr, unsigned long size)
447{ 354{
448 int ret; 355 unsigned long start, end;
449 356
450 ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags); 357 start = PFN_UP(addr);
451 if (ret < 0) 358 end = PFN_DOWN(addr + size);
452 return -ENOMEM;
453 reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
454 359
455 return 0; 360 mark_bootmem(start, end, 0, 0);
456} 361}
457 362
458void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 363/**
459 unsigned long size) 364 * reserve_bootmem_node - mark a page range as reserved
365 * @pgdat: node the range resides on
366 * @physaddr: starting address of the range
367 * @size: size of the range in bytes
368 * @flags: reservation flags (see linux/bootmem.h)
369 *
370 * Partial pages will be reserved.
371 *
372 * The range must reside completely on the specified node.
373 */
374int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
375 unsigned long size, int flags)
460{ 376{
461 free_bootmem_core(pgdat->bdata, physaddr, size); 377 unsigned long start, end;
462}
463 378
464unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) 379 start = PFN_DOWN(physaddr);
465{ 380 end = PFN_UP(physaddr + size);
466 register_page_bootmem_info_node(pgdat);
467 return free_all_bootmem_core(pgdat);
468}
469 381
470unsigned long __init init_bootmem(unsigned long start, unsigned long pages) 382 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
471{
472 max_low_pfn = pages;
473 min_low_pfn = start;
474 return init_bootmem_core(NODE_DATA(0), start, 0, pages);
475} 383}
476 384
477#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE 385#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
386/**
387 * reserve_bootmem - mark a page range as usable
388 * @addr: starting address of the range
389 * @size: size of the range in bytes
390 * @flags: reservation flags (see linux/bootmem.h)
391 *
392 * Partial pages will be reserved.
393 *
394 * The range must be contiguous but may span node boundaries.
395 */
478int __init reserve_bootmem(unsigned long addr, unsigned long size, 396int __init reserve_bootmem(unsigned long addr, unsigned long size,
479 int flags) 397 int flags)
480{ 398{
481 bootmem_data_t *bdata; 399 unsigned long start, end;
482 int ret;
483 400
484 list_for_each_entry(bdata, &bdata_list, list) { 401 start = PFN_DOWN(addr);
485 ret = can_reserve_bootmem_core(bdata, addr, size, flags); 402 end = PFN_UP(addr + size);
486 if (ret < 0)
487 return ret;
488 }
489 list_for_each_entry(bdata, &bdata_list, list)
490 reserve_bootmem_core(bdata, addr, size, flags);
491 403
492 return 0; 404 return mark_bootmem(start, end, 1, flags);
493} 405}
494#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ 406#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
495 407
496void __init free_bootmem(unsigned long addr, unsigned long size) 408static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
409 unsigned long size, unsigned long align,
410 unsigned long goal, unsigned long limit)
497{ 411{
498 bootmem_data_t *bdata; 412 unsigned long fallback = 0;
499 list_for_each_entry(bdata, &bdata_list, list) 413 unsigned long min, max, start, sidx, midx, step;
500 free_bootmem_core(bdata, addr, size);
501}
502 414
503unsigned long __init free_all_bootmem(void) 415 BUG_ON(!size);
504{ 416 BUG_ON(align & (align - 1));
505 return free_all_bootmem_core(NODE_DATA(0)); 417 BUG_ON(limit && goal + size > limit);
418
419 if (!bdata->node_bootmem_map)
420 return NULL;
421
422 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
423 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
424 align, goal, limit);
425
426 min = bdata->node_min_pfn;
427 max = bdata->node_low_pfn;
428
429 goal >>= PAGE_SHIFT;
430 limit >>= PAGE_SHIFT;
431
432 if (limit && max > limit)
433 max = limit;
434 if (max <= min)
435 return NULL;
436
437 step = max(align >> PAGE_SHIFT, 1UL);
438
439 if (goal && min < goal && goal < max)
440 start = ALIGN(goal, step);
441 else
442 start = ALIGN(min, step);
443
444 sidx = start - bdata->node_min_pfn;;
445 midx = max - bdata->node_min_pfn;
446
447 if (bdata->hint_idx > sidx) {
448 /*
449 * Handle the valid case of sidx being zero and still
450 * catch the fallback below.
451 */
452 fallback = sidx + 1;
453 sidx = ALIGN(bdata->hint_idx, step);
454 }
455
456 while (1) {
457 int merge;
458 void *region;
459 unsigned long eidx, i, start_off, end_off;
460find_block:
461 sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
462 sidx = ALIGN(sidx, step);
463 eidx = sidx + PFN_UP(size);
464
465 if (sidx >= midx || eidx > midx)
466 break;
467
468 for (i = sidx; i < eidx; i++)
469 if (test_bit(i, bdata->node_bootmem_map)) {
470 sidx = ALIGN(i, step);
471 if (sidx == i)
472 sidx += step;
473 goto find_block;
474 }
475
476 if (bdata->last_end_off &&
477 PFN_DOWN(bdata->last_end_off) + 1 == sidx)
478 start_off = ALIGN(bdata->last_end_off, align);
479 else
480 start_off = PFN_PHYS(sidx);
481
482 merge = PFN_DOWN(start_off) < sidx;
483 end_off = start_off + size;
484
485 bdata->last_end_off = end_off;
486 bdata->hint_idx = PFN_UP(end_off);
487
488 /*
489 * Reserve the area now:
490 */
491 if (__reserve(bdata, PFN_DOWN(start_off) + merge,
492 PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
493 BUG();
494
495 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
496 start_off);
497 memset(region, 0, size);
498 return region;
499 }
500
501 if (fallback) {
502 sidx = ALIGN(fallback - 1, step);
503 fallback = 0;
504 goto find_block;
505 }
506
507 return NULL;
506} 508}
507 509
508void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, 510static void * __init ___alloc_bootmem_nopanic(unsigned long size,
509 unsigned long goal) 511 unsigned long align,
512 unsigned long goal,
513 unsigned long limit)
510{ 514{
511 bootmem_data_t *bdata; 515 bootmem_data_t *bdata;
512 void *ptr;
513 516
517restart:
514 list_for_each_entry(bdata, &bdata_list, list) { 518 list_for_each_entry(bdata, &bdata_list, list) {
515 ptr = __alloc_bootmem_core(bdata, size, align, goal, 0); 519 void *region;
516 if (ptr) 520
517 return ptr; 521 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
522 continue;
523 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
524 break;
525
526 region = alloc_bootmem_core(bdata, size, align, goal, limit);
527 if (region)
528 return region;
529 }
530
531 if (goal) {
532 goal = 0;
533 goto restart;
518 } 534 }
535
519 return NULL; 536 return NULL;
520} 537}
521 538
522void * __init __alloc_bootmem(unsigned long size, unsigned long align, 539/**
523 unsigned long goal) 540 * __alloc_bootmem_nopanic - allocate boot memory without panicking
541 * @size: size of the request in bytes
542 * @align: alignment of the region
543 * @goal: preferred starting address of the region
544 *
545 * The goal is dropped if it can not be satisfied and the allocation will
546 * fall back to memory below @goal.
547 *
548 * Allocation may happen on any node in the system.
549 *
550 * Returns NULL on failure.
551 */
552void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
553 unsigned long goal)
524{ 554{
525 void *mem = __alloc_bootmem_nopanic(size,align,goal); 555 return ___alloc_bootmem_nopanic(size, align, goal, 0);
556}
557
558static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
559 unsigned long goal, unsigned long limit)
560{
561 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
526 562
527 if (mem) 563 if (mem)
528 return mem; 564 return mem;
@@ -534,78 +570,135 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
534 return NULL; 570 return NULL;
535} 571}
536 572
573/**
574 * __alloc_bootmem - allocate boot memory
575 * @size: size of the request in bytes
576 * @align: alignment of the region
577 * @goal: preferred starting address of the region
578 *
579 * The goal is dropped if it can not be satisfied and the allocation will
580 * fall back to memory below @goal.
581 *
582 * Allocation may happen on any node in the system.
583 *
584 * The function panics if the request can not be satisfied.
585 */
586void * __init __alloc_bootmem(unsigned long size, unsigned long align,
587 unsigned long goal)
588{
589 return ___alloc_bootmem(size, align, goal, 0);
590}
537 591
538void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, 592static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
539 unsigned long align, unsigned long goal) 593 unsigned long size, unsigned long align,
594 unsigned long goal, unsigned long limit)
540{ 595{
541 void *ptr; 596 void *ptr;
542 597
543 ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); 598 ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
544 if (ptr) 599 if (ptr)
545 return ptr; 600 return ptr;
546 601
547 return __alloc_bootmem(size, align, goal); 602 return ___alloc_bootmem(size, align, goal, limit);
603}
604
605/**
606 * __alloc_bootmem_node - allocate boot memory from a specific node
607 * @pgdat: node to allocate from
608 * @size: size of the request in bytes
609 * @align: alignment of the region
610 * @goal: preferred starting address of the region
611 *
612 * The goal is dropped if it can not be satisfied and the allocation will
613 * fall back to memory below @goal.
614 *
615 * Allocation may fall back to any node in the system if the specified node
616 * can not hold the requested memory.
617 *
618 * The function panics if the request can not be satisfied.
619 */
620void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
621 unsigned long align, unsigned long goal)
622{
623 return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
548} 624}
549 625
550#ifdef CONFIG_SPARSEMEM 626#ifdef CONFIG_SPARSEMEM
627/**
628 * alloc_bootmem_section - allocate boot memory from a specific section
629 * @size: size of the request in bytes
630 * @section_nr: sparse map section to allocate from
631 *
632 * Return NULL on failure.
633 */
551void * __init alloc_bootmem_section(unsigned long size, 634void * __init alloc_bootmem_section(unsigned long size,
552 unsigned long section_nr) 635 unsigned long section_nr)
553{ 636{
554 void *ptr; 637 bootmem_data_t *bdata;
555 unsigned long limit, goal, start_nr, end_nr, pfn; 638 unsigned long pfn, goal, limit;
556 struct pglist_data *pgdat;
557 639
558 pfn = section_nr_to_pfn(section_nr); 640 pfn = section_nr_to_pfn(section_nr);
559 goal = PFN_PHYS(pfn); 641 goal = pfn << PAGE_SHIFT;
560 limit = PFN_PHYS(section_nr_to_pfn(section_nr + 1)) - 1; 642 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
561 pgdat = NODE_DATA(early_pfn_to_nid(pfn)); 643 bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
562 ptr = __alloc_bootmem_core(pgdat->bdata, size, SMP_CACHE_BYTES, goal,
563 limit);
564 644
565 if (!ptr) 645 return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
566 return NULL; 646}
647#endif
567 648
568 start_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr))); 649void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
569 end_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr) + size)); 650 unsigned long align, unsigned long goal)
570 if (start_nr != section_nr || end_nr != section_nr) { 651{
571 printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n", 652 void *ptr;
572 section_nr);
573 free_bootmem_core(pgdat->bdata, __pa(ptr), size);
574 ptr = NULL;
575 }
576 653
577 return ptr; 654 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
655 if (ptr)
656 return ptr;
657
658 return __alloc_bootmem_nopanic(size, align, goal);
578} 659}
579#endif
580 660
581#ifndef ARCH_LOW_ADDRESS_LIMIT 661#ifndef ARCH_LOW_ADDRESS_LIMIT
582#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL 662#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
583#endif 663#endif
584 664
665/**
666 * __alloc_bootmem_low - allocate low boot memory
667 * @size: size of the request in bytes
668 * @align: alignment of the region
669 * @goal: preferred starting address of the region
670 *
671 * The goal is dropped if it can not be satisfied and the allocation will
672 * fall back to memory below @goal.
673 *
674 * Allocation may happen on any node in the system.
675 *
676 * The function panics if the request can not be satisfied.
677 */
585void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, 678void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
586 unsigned long goal) 679 unsigned long goal)
587{ 680{
588 bootmem_data_t *bdata; 681 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
589 void *ptr;
590
591 list_for_each_entry(bdata, &bdata_list, list) {
592 ptr = __alloc_bootmem_core(bdata, size, align, goal,
593 ARCH_LOW_ADDRESS_LIMIT);
594 if (ptr)
595 return ptr;
596 }
597
598 /*
599 * Whoops, we cannot satisfy the allocation request.
600 */
601 printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
602 panic("Out of low memory");
603 return NULL;
604} 682}
605 683
684/**
685 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
686 * @pgdat: node to allocate from
687 * @size: size of the request in bytes
688 * @align: alignment of the region
689 * @goal: preferred starting address of the region
690 *
691 * The goal is dropped if it can not be satisfied and the allocation will
692 * fall back to memory below @goal.
693 *
694 * Allocation may fall back to any node in the system if the specified node
695 * can not hold the requested memory.
696 *
697 * The function panics if the request can not be satisfied.
698 */
606void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, 699void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
607 unsigned long align, unsigned long goal) 700 unsigned long align, unsigned long goal)
608{ 701{
609 return __alloc_bootmem_core(pgdat->bdata, size, align, goal, 702 return ___alloc_bootmem_node(pgdat->bdata, size, align,
610 ARCH_LOW_ADDRESS_LIMIT); 703 goal, ARCH_LOW_ADDRESS_LIMIT);
611} 704}
diff --git a/mm/filemap.c b/mm/filemap.c
index 65d9d9e2b755..2d3ec1ffc66e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -42,9 +42,6 @@
42 42
43#include <asm/mman.h> 43#include <asm/mman.h>
44 44
45static ssize_t
46generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
47 loff_t offset, unsigned long nr_segs);
48 45
49/* 46/*
50 * Shared mappings implemented 30.11.1994. It's not fully working yet, 47 * Shared mappings implemented 30.11.1994. It's not fully working yet,
@@ -118,7 +115,7 @@ void __remove_from_page_cache(struct page *page)
118{ 115{
119 struct address_space *mapping = page->mapping; 116 struct address_space *mapping = page->mapping;
120 117
121 mem_cgroup_uncharge_page(page); 118 mem_cgroup_uncharge_cache_page(page);
122 radix_tree_delete(&mapping->page_tree, page->index); 119 radix_tree_delete(&mapping->page_tree, page->index);
123 page->mapping = NULL; 120 page->mapping = NULL;
124 mapping->nrpages--; 121 mapping->nrpages--;
@@ -477,12 +474,12 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
477 mapping->nrpages++; 474 mapping->nrpages++;
478 __inc_zone_page_state(page, NR_FILE_PAGES); 475 __inc_zone_page_state(page, NR_FILE_PAGES);
479 } else 476 } else
480 mem_cgroup_uncharge_page(page); 477 mem_cgroup_uncharge_cache_page(page);
481 478
482 write_unlock_irq(&mapping->tree_lock); 479 write_unlock_irq(&mapping->tree_lock);
483 radix_tree_preload_end(); 480 radix_tree_preload_end();
484 } else 481 } else
485 mem_cgroup_uncharge_page(page); 482 mem_cgroup_uncharge_cache_page(page);
486out: 483out:
487 return error; 484 return error;
488} 485}
@@ -1200,42 +1197,41 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1200 1197
1201 mapping = filp->f_mapping; 1198 mapping = filp->f_mapping;
1202 inode = mapping->host; 1199 inode = mapping->host;
1203 retval = 0;
1204 if (!count) 1200 if (!count)
1205 goto out; /* skip atime */ 1201 goto out; /* skip atime */
1206 size = i_size_read(inode); 1202 size = i_size_read(inode);
1207 if (pos < size) { 1203 if (pos < size) {
1208 retval = generic_file_direct_IO(READ, iocb, 1204 retval = filemap_write_and_wait(mapping);
1209 iov, pos, nr_segs); 1205 if (!retval) {
1206 retval = mapping->a_ops->direct_IO(READ, iocb,
1207 iov, pos, nr_segs);
1208 }
1210 if (retval > 0) 1209 if (retval > 0)
1211 *ppos = pos + retval; 1210 *ppos = pos + retval;
1212 } 1211 if (retval) {
1213 if (likely(retval != 0)) { 1212 file_accessed(filp);
1214 file_accessed(filp); 1213 goto out;
1215 goto out; 1214 }
1216 } 1215 }
1217 } 1216 }
1218 1217
1219 retval = 0; 1218 for (seg = 0; seg < nr_segs; seg++) {
1220 if (count) { 1219 read_descriptor_t desc;
1221 for (seg = 0; seg < nr_segs; seg++) {
1222 read_descriptor_t desc;
1223 1220
1224 desc.written = 0; 1221 desc.written = 0;
1225 desc.arg.buf = iov[seg].iov_base; 1222 desc.arg.buf = iov[seg].iov_base;
1226 desc.count = iov[seg].iov_len; 1223 desc.count = iov[seg].iov_len;
1227 if (desc.count == 0) 1224 if (desc.count == 0)
1228 continue; 1225 continue;
1229 desc.error = 0; 1226 desc.error = 0;
1230 do_generic_file_read(filp,ppos,&desc,file_read_actor); 1227 do_generic_file_read(filp, ppos, &desc, file_read_actor);
1231 retval += desc.written; 1228 retval += desc.written;
1232 if (desc.error) { 1229 if (desc.error) {
1233 retval = retval ?: desc.error; 1230 retval = retval ?: desc.error;
1234 break; 1231 break;
1235 }
1236 if (desc.count > 0)
1237 break;
1238 } 1232 }
1233 if (desc.count > 0)
1234 break;
1239 } 1235 }
1240out: 1236out:
1241 return retval; 1237 return retval;
@@ -2004,11 +2000,55 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2004 struct address_space *mapping = file->f_mapping; 2000 struct address_space *mapping = file->f_mapping;
2005 struct inode *inode = mapping->host; 2001 struct inode *inode = mapping->host;
2006 ssize_t written; 2002 ssize_t written;
2003 size_t write_len;
2004 pgoff_t end;
2007 2005
2008 if (count != ocount) 2006 if (count != ocount)
2009 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); 2007 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
2010 2008
2011 written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs); 2009 /*
2010 * Unmap all mmappings of the file up-front.
2011 *
2012 * This will cause any pte dirty bits to be propagated into the
2013 * pageframes for the subsequent filemap_write_and_wait().
2014 */
2015 write_len = iov_length(iov, *nr_segs);
2016 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
2017 if (mapping_mapped(mapping))
2018 unmap_mapping_range(mapping, pos, write_len, 0);
2019
2020 written = filemap_write_and_wait(mapping);
2021 if (written)
2022 goto out;
2023
2024 /*
2025 * After a write we want buffered reads to be sure to go to disk to get
2026 * the new data. We invalidate clean cached page from the region we're
2027 * about to write. We do this *before* the write so that we can return
2028 * -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
2029 */
2030 if (mapping->nrpages) {
2031 written = invalidate_inode_pages2_range(mapping,
2032 pos >> PAGE_CACHE_SHIFT, end);
2033 if (written)
2034 goto out;
2035 }
2036
2037 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
2038
2039 /*
2040 * Finally, try again to invalidate clean pages which might have been
2041 * cached by non-direct readahead, or faulted in by get_user_pages()
2042 * if the source of the write was an mmap'ed region of the file
2043 * we're writing. Either one is a pretty crazy thing to do,
2044 * so we don't support it 100%. If this invalidation
2045 * fails, tough, the write still worked...
2046 */
2047 if (mapping->nrpages) {
2048 invalidate_inode_pages2_range(mapping,
2049 pos >> PAGE_CACHE_SHIFT, end);
2050 }
2051
2012 if (written > 0) { 2052 if (written > 0) {
2013 loff_t end = pos + written; 2053 loff_t end = pos + written;
2014 if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 2054 if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
@@ -2024,6 +2064,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2024 * i_mutex is held, which protects generic_osync_inode() from 2064 * i_mutex is held, which protects generic_osync_inode() from
2025 * livelocking. AIO O_DIRECT ops attempt to sync metadata here. 2065 * livelocking. AIO O_DIRECT ops attempt to sync metadata here.
2026 */ 2066 */
2067out:
2027 if ((written >= 0 || written == -EIOCBQUEUED) && 2068 if ((written >= 0 || written == -EIOCBQUEUED) &&
2028 ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2069 ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2029 int err = generic_osync_inode(inode, mapping, OSYNC_METADATA); 2070 int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
@@ -2511,66 +2552,6 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2511} 2552}
2512EXPORT_SYMBOL(generic_file_aio_write); 2553EXPORT_SYMBOL(generic_file_aio_write);
2513 2554
2514/*
2515 * Called under i_mutex for writes to S_ISREG files. Returns -EIO if something
2516 * went wrong during pagecache shootdown.
2517 */
2518static ssize_t
2519generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2520 loff_t offset, unsigned long nr_segs)
2521{
2522 struct file *file = iocb->ki_filp;
2523 struct address_space *mapping = file->f_mapping;
2524 ssize_t retval;
2525 size_t write_len;
2526 pgoff_t end = 0; /* silence gcc */
2527
2528 /*
2529 * If it's a write, unmap all mmappings of the file up-front. This
2530 * will cause any pte dirty bits to be propagated into the pageframes
2531 * for the subsequent filemap_write_and_wait().
2532 */
2533 if (rw == WRITE) {
2534 write_len = iov_length(iov, nr_segs);
2535 end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT;
2536 if (mapping_mapped(mapping))
2537 unmap_mapping_range(mapping, offset, write_len, 0);
2538 }
2539
2540 retval = filemap_write_and_wait(mapping);
2541 if (retval)
2542 goto out;
2543
2544 /*
2545 * After a write we want buffered reads to be sure to go to disk to get
2546 * the new data. We invalidate clean cached page from the region we're
2547 * about to write. We do this *before* the write so that we can return
2548 * -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
2549 */
2550 if (rw == WRITE && mapping->nrpages) {
2551 retval = invalidate_inode_pages2_range(mapping,
2552 offset >> PAGE_CACHE_SHIFT, end);
2553 if (retval)
2554 goto out;
2555 }
2556
2557 retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs);
2558
2559 /*
2560 * Finally, try again to invalidate clean pages which might have been
2561 * cached by non-direct readahead, or faulted in by get_user_pages()
2562 * if the source of the write was an mmap'ed region of the file
2563 * we're writing. Either one is a pretty crazy thing to do,
2564 * so we don't support it 100%. If this invalidation
2565 * fails, tough, the write still worked...
2566 */
2567 if (rw == WRITE && mapping->nrpages) {
2568 invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end);
2569 }
2570out:
2571 return retval;
2572}
2573
2574/** 2555/**
2575 * try_to_release_page() - release old fs-specific metadata on a page 2556 * try_to_release_page() - release old fs-specific metadata on a page
2576 * 2557 *
@@ -2582,9 +2563,8 @@ out:
2582 * Otherwise return zero. 2563 * Otherwise return zero.
2583 * 2564 *
2584 * The @gfp_mask argument specifies whether I/O may be performed to release 2565 * The @gfp_mask argument specifies whether I/O may be performed to release
2585 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT). 2566 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
2586 * 2567 *
2587 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
2588 */ 2568 */
2589int try_to_release_page(struct page *page, gfp_t gfp_mask) 2569int try_to_release_page(struct page *page, gfp_t gfp_mask)
2590{ 2570{
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ab171274ef21..a8bf4ab01f86 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -14,6 +14,8 @@
14#include <linux/mempolicy.h> 14#include <linux/mempolicy.h>
15#include <linux/cpuset.h> 15#include <linux/cpuset.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/bootmem.h>
18#include <linux/sysfs.h>
17 19
18#include <asm/page.h> 20#include <asm/page.h>
19#include <asm/pgtable.h> 21#include <asm/pgtable.h>
@@ -22,30 +24,340 @@
22#include "internal.h" 24#include "internal.h"
23 25
24const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 26const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26static unsigned long surplus_huge_pages;
27static unsigned long nr_overcommit_huge_pages;
28unsigned long max_huge_pages;
29unsigned long sysctl_overcommit_huge_pages;
30static struct list_head hugepage_freelists[MAX_NUMNODES];
31static unsigned int nr_huge_pages_node[MAX_NUMNODES];
32static unsigned int free_huge_pages_node[MAX_NUMNODES];
33static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
34static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 27static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
35unsigned long hugepages_treat_as_movable; 28unsigned long hugepages_treat_as_movable;
36static int hugetlb_next_nid; 29
30static int max_hstate;
31unsigned int default_hstate_idx;
32struct hstate hstates[HUGE_MAX_HSTATE];
33
34__initdata LIST_HEAD(huge_boot_pages);
35
36/* for command line parsing */
37static struct hstate * __initdata parsed_hstate;
38static unsigned long __initdata default_hstate_max_huge_pages;
39static unsigned long __initdata default_hstate_size;
40
41#define for_each_hstate(h) \
42 for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
37 43
38/* 44/*
39 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 45 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
40 */ 46 */
41static DEFINE_SPINLOCK(hugetlb_lock); 47static DEFINE_SPINLOCK(hugetlb_lock);
42 48
43static void clear_huge_page(struct page *page, unsigned long addr) 49/*
50 * Region tracking -- allows tracking of reservations and instantiated pages
51 * across the pages in a mapping.
52 *
53 * The region data structures are protected by a combination of the mmap_sem
54 * and the hugetlb_instantion_mutex. To access or modify a region the caller
55 * must either hold the mmap_sem for write, or the mmap_sem for read and
56 * the hugetlb_instantiation mutex:
57 *
58 * down_write(&mm->mmap_sem);
59 * or
60 * down_read(&mm->mmap_sem);
61 * mutex_lock(&hugetlb_instantiation_mutex);
62 */
63struct file_region {
64 struct list_head link;
65 long from;
66 long to;
67};
68
69static long region_add(struct list_head *head, long f, long t)
70{
71 struct file_region *rg, *nrg, *trg;
72
73 /* Locate the region we are either in or before. */
74 list_for_each_entry(rg, head, link)
75 if (f <= rg->to)
76 break;
77
78 /* Round our left edge to the current segment if it encloses us. */
79 if (f > rg->from)
80 f = rg->from;
81
82 /* Check for and consume any regions we now overlap with. */
83 nrg = rg;
84 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
85 if (&rg->link == head)
86 break;
87 if (rg->from > t)
88 break;
89
90 /* If this area reaches higher then extend our area to
91 * include it completely. If this is not the first area
92 * which we intend to reuse, free it. */
93 if (rg->to > t)
94 t = rg->to;
95 if (rg != nrg) {
96 list_del(&rg->link);
97 kfree(rg);
98 }
99 }
100 nrg->from = f;
101 nrg->to = t;
102 return 0;
103}
104
105static long region_chg(struct list_head *head, long f, long t)
106{
107 struct file_region *rg, *nrg;
108 long chg = 0;
109
110 /* Locate the region we are before or in. */
111 list_for_each_entry(rg, head, link)
112 if (f <= rg->to)
113 break;
114
115 /* If we are below the current region then a new region is required.
116 * Subtle, allocate a new region at the position but make it zero
117 * size such that we can guarantee to record the reservation. */
118 if (&rg->link == head || t < rg->from) {
119 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
120 if (!nrg)
121 return -ENOMEM;
122 nrg->from = f;
123 nrg->to = f;
124 INIT_LIST_HEAD(&nrg->link);
125 list_add(&nrg->link, rg->link.prev);
126
127 return t - f;
128 }
129
130 /* Round our left edge to the current segment if it encloses us. */
131 if (f > rg->from)
132 f = rg->from;
133 chg = t - f;
134
135 /* Check for and consume any regions we now overlap with. */
136 list_for_each_entry(rg, rg->link.prev, link) {
137 if (&rg->link == head)
138 break;
139 if (rg->from > t)
140 return chg;
141
142 /* We overlap with this area, if it extends futher than
143 * us then we must extend ourselves. Account for its
144 * existing reservation. */
145 if (rg->to > t) {
146 chg += rg->to - t;
147 t = rg->to;
148 }
149 chg -= rg->to - rg->from;
150 }
151 return chg;
152}
153
154static long region_truncate(struct list_head *head, long end)
155{
156 struct file_region *rg, *trg;
157 long chg = 0;
158
159 /* Locate the region we are either in or before. */
160 list_for_each_entry(rg, head, link)
161 if (end <= rg->to)
162 break;
163 if (&rg->link == head)
164 return 0;
165
166 /* If we are in the middle of a region then adjust it. */
167 if (end > rg->from) {
168 chg = rg->to - end;
169 rg->to = end;
170 rg = list_entry(rg->link.next, typeof(*rg), link);
171 }
172
173 /* Drop any remaining regions. */
174 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
175 if (&rg->link == head)
176 break;
177 chg += rg->to - rg->from;
178 list_del(&rg->link);
179 kfree(rg);
180 }
181 return chg;
182}
183
184static long region_count(struct list_head *head, long f, long t)
185{
186 struct file_region *rg;
187 long chg = 0;
188
189 /* Locate each segment we overlap with, and count that overlap. */
190 list_for_each_entry(rg, head, link) {
191 int seg_from;
192 int seg_to;
193
194 if (rg->to <= f)
195 continue;
196 if (rg->from >= t)
197 break;
198
199 seg_from = max(rg->from, f);
200 seg_to = min(rg->to, t);
201
202 chg += seg_to - seg_from;
203 }
204
205 return chg;
206}
207
208/*
209 * Convert the address within this vma to the page offset within
210 * the mapping, in pagecache page units; huge pages here.
211 */
212static pgoff_t vma_hugecache_offset(struct hstate *h,
213 struct vm_area_struct *vma, unsigned long address)
214{
215 return ((address - vma->vm_start) >> huge_page_shift(h)) +
216 (vma->vm_pgoff >> huge_page_order(h));
217}
218
219/*
220 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
221 * bits of the reservation map pointer, which are always clear due to
222 * alignment.
223 */
224#define HPAGE_RESV_OWNER (1UL << 0)
225#define HPAGE_RESV_UNMAPPED (1UL << 1)
226#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
227
228/*
229 * These helpers are used to track how many pages are reserved for
230 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
231 * is guaranteed to have their future faults succeed.
232 *
233 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
234 * the reserve counters are updated with the hugetlb_lock held. It is safe
235 * to reset the VMA at fork() time as it is not in use yet and there is no
236 * chance of the global counters getting corrupted as a result of the values.
237 *
238 * The private mapping reservation is represented in a subtly different
239 * manner to a shared mapping. A shared mapping has a region map associated
240 * with the underlying file, this region map represents the backing file
241 * pages which have ever had a reservation assigned which this persists even
242 * after the page is instantiated. A private mapping has a region map
243 * associated with the original mmap which is attached to all VMAs which
244 * reference it, this region map represents those offsets which have consumed
245 * reservation ie. where pages have been instantiated.
246 */
247static unsigned long get_vma_private_data(struct vm_area_struct *vma)
248{
249 return (unsigned long)vma->vm_private_data;
250}
251
252static void set_vma_private_data(struct vm_area_struct *vma,
253 unsigned long value)
254{
255 vma->vm_private_data = (void *)value;
256}
257
258struct resv_map {
259 struct kref refs;
260 struct list_head regions;
261};
262
263struct resv_map *resv_map_alloc(void)
264{
265 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
266 if (!resv_map)
267 return NULL;
268
269 kref_init(&resv_map->refs);
270 INIT_LIST_HEAD(&resv_map->regions);
271
272 return resv_map;
273}
274
275void resv_map_release(struct kref *ref)
276{
277 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
278
279 /* Clear out any active regions before we release the map. */
280 region_truncate(&resv_map->regions, 0);
281 kfree(resv_map);
282}
283
284static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
285{
286 VM_BUG_ON(!is_vm_hugetlb_page(vma));
287 if (!(vma->vm_flags & VM_SHARED))
288 return (struct resv_map *)(get_vma_private_data(vma) &
289 ~HPAGE_RESV_MASK);
290 return 0;
291}
292
293static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
294{
295 VM_BUG_ON(!is_vm_hugetlb_page(vma));
296 VM_BUG_ON(vma->vm_flags & VM_SHARED);
297
298 set_vma_private_data(vma, (get_vma_private_data(vma) &
299 HPAGE_RESV_MASK) | (unsigned long)map);
300}
301
302static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
303{
304 VM_BUG_ON(!is_vm_hugetlb_page(vma));
305 VM_BUG_ON(vma->vm_flags & VM_SHARED);
306
307 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
308}
309
310static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
311{
312 VM_BUG_ON(!is_vm_hugetlb_page(vma));
313
314 return (get_vma_private_data(vma) & flag) != 0;
315}
316
317/* Decrement the reserved pages in the hugepage pool by one */
318static void decrement_hugepage_resv_vma(struct hstate *h,
319 struct vm_area_struct *vma)
320{
321 if (vma->vm_flags & VM_NORESERVE)
322 return;
323
324 if (vma->vm_flags & VM_SHARED) {
325 /* Shared mappings always use reserves */
326 h->resv_huge_pages--;
327 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
328 /*
329 * Only the process that called mmap() has reserves for
330 * private mappings.
331 */
332 h->resv_huge_pages--;
333 }
334}
335
336/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
337void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
338{
339 VM_BUG_ON(!is_vm_hugetlb_page(vma));
340 if (!(vma->vm_flags & VM_SHARED))
341 vma->vm_private_data = (void *)0;
342}
343
344/* Returns true if the VMA has associated reserve pages */
345static int vma_has_reserves(struct vm_area_struct *vma)
346{
347 if (vma->vm_flags & VM_SHARED)
348 return 1;
349 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
350 return 1;
351 return 0;
352}
353
354static void clear_huge_page(struct page *page,
355 unsigned long addr, unsigned long sz)
44{ 356{
45 int i; 357 int i;
46 358
47 might_sleep(); 359 might_sleep();
48 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { 360 for (i = 0; i < sz/PAGE_SIZE; i++) {
49 cond_resched(); 361 cond_resched();
50 clear_user_highpage(page + i, addr + i * PAGE_SIZE); 362 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
51 } 363 }
@@ -55,42 +367,44 @@ static void copy_huge_page(struct page *dst, struct page *src,
55 unsigned long addr, struct vm_area_struct *vma) 367 unsigned long addr, struct vm_area_struct *vma)
56{ 368{
57 int i; 369 int i;
370 struct hstate *h = hstate_vma(vma);
58 371
59 might_sleep(); 372 might_sleep();
60 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { 373 for (i = 0; i < pages_per_huge_page(h); i++) {
61 cond_resched(); 374 cond_resched();
62 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 375 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
63 } 376 }
64} 377}
65 378
66static void enqueue_huge_page(struct page *page) 379static void enqueue_huge_page(struct hstate *h, struct page *page)
67{ 380{
68 int nid = page_to_nid(page); 381 int nid = page_to_nid(page);
69 list_add(&page->lru, &hugepage_freelists[nid]); 382 list_add(&page->lru, &h->hugepage_freelists[nid]);
70 free_huge_pages++; 383 h->free_huge_pages++;
71 free_huge_pages_node[nid]++; 384 h->free_huge_pages_node[nid]++;
72} 385}
73 386
74static struct page *dequeue_huge_page(void) 387static struct page *dequeue_huge_page(struct hstate *h)
75{ 388{
76 int nid; 389 int nid;
77 struct page *page = NULL; 390 struct page *page = NULL;
78 391
79 for (nid = 0; nid < MAX_NUMNODES; ++nid) { 392 for (nid = 0; nid < MAX_NUMNODES; ++nid) {
80 if (!list_empty(&hugepage_freelists[nid])) { 393 if (!list_empty(&h->hugepage_freelists[nid])) {
81 page = list_entry(hugepage_freelists[nid].next, 394 page = list_entry(h->hugepage_freelists[nid].next,
82 struct page, lru); 395 struct page, lru);
83 list_del(&page->lru); 396 list_del(&page->lru);
84 free_huge_pages--; 397 h->free_huge_pages--;
85 free_huge_pages_node[nid]--; 398 h->free_huge_pages_node[nid]--;
86 break; 399 break;
87 } 400 }
88 } 401 }
89 return page; 402 return page;
90} 403}
91 404
92static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma, 405static struct page *dequeue_huge_page_vma(struct hstate *h,
93 unsigned long address) 406 struct vm_area_struct *vma,
407 unsigned long address, int avoid_reserve)
94{ 408{
95 int nid; 409 int nid;
96 struct page *page = NULL; 410 struct page *page = NULL;
@@ -101,18 +415,33 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
101 struct zone *zone; 415 struct zone *zone;
102 struct zoneref *z; 416 struct zoneref *z;
103 417
418 /*
419 * A child process with MAP_PRIVATE mappings created by their parent
420 * have no page reserves. This check ensures that reservations are
421 * not "stolen". The child may still get SIGKILLed
422 */
423 if (!vma_has_reserves(vma) &&
424 h->free_huge_pages - h->resv_huge_pages == 0)
425 return NULL;
426
427 /* If reserves cannot be used, ensure enough pages are in the pool */
428 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
429 return NULL;
430
104 for_each_zone_zonelist_nodemask(zone, z, zonelist, 431 for_each_zone_zonelist_nodemask(zone, z, zonelist,
105 MAX_NR_ZONES - 1, nodemask) { 432 MAX_NR_ZONES - 1, nodemask) {
106 nid = zone_to_nid(zone); 433 nid = zone_to_nid(zone);
107 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) && 434 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
108 !list_empty(&hugepage_freelists[nid])) { 435 !list_empty(&h->hugepage_freelists[nid])) {
109 page = list_entry(hugepage_freelists[nid].next, 436 page = list_entry(h->hugepage_freelists[nid].next,
110 struct page, lru); 437 struct page, lru);
111 list_del(&page->lru); 438 list_del(&page->lru);
112 free_huge_pages--; 439 h->free_huge_pages--;
113 free_huge_pages_node[nid]--; 440 h->free_huge_pages_node[nid]--;
114 if (vma && vma->vm_flags & VM_MAYSHARE) 441
115 resv_huge_pages--; 442 if (!avoid_reserve)
443 decrement_hugepage_resv_vma(h, vma);
444
116 break; 445 break;
117 } 446 }
118 } 447 }
@@ -120,12 +449,13 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
120 return page; 449 return page;
121} 450}
122 451
123static void update_and_free_page(struct page *page) 452static void update_and_free_page(struct hstate *h, struct page *page)
124{ 453{
125 int i; 454 int i;
126 nr_huge_pages--; 455
127 nr_huge_pages_node[page_to_nid(page)]--; 456 h->nr_huge_pages--;
128 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { 457 h->nr_huge_pages_node[page_to_nid(page)]--;
458 for (i = 0; i < pages_per_huge_page(h); i++) {
129 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 459 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
130 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 460 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
131 1 << PG_private | 1<< PG_writeback); 461 1 << PG_private | 1<< PG_writeback);
@@ -133,11 +463,27 @@ static void update_and_free_page(struct page *page)
133 set_compound_page_dtor(page, NULL); 463 set_compound_page_dtor(page, NULL);
134 set_page_refcounted(page); 464 set_page_refcounted(page);
135 arch_release_hugepage(page); 465 arch_release_hugepage(page);
136 __free_pages(page, HUGETLB_PAGE_ORDER); 466 __free_pages(page, huge_page_order(h));
467}
468
469struct hstate *size_to_hstate(unsigned long size)
470{
471 struct hstate *h;
472
473 for_each_hstate(h) {
474 if (huge_page_size(h) == size)
475 return h;
476 }
477 return NULL;
137} 478}
138 479
139static void free_huge_page(struct page *page) 480static void free_huge_page(struct page *page)
140{ 481{
482 /*
483 * Can't pass hstate in here because it is called from the
484 * compound page destructor.
485 */
486 struct hstate *h = page_hstate(page);
141 int nid = page_to_nid(page); 487 int nid = page_to_nid(page);
142 struct address_space *mapping; 488 struct address_space *mapping;
143 489
@@ -147,12 +493,12 @@ static void free_huge_page(struct page *page)
147 INIT_LIST_HEAD(&page->lru); 493 INIT_LIST_HEAD(&page->lru);
148 494
149 spin_lock(&hugetlb_lock); 495 spin_lock(&hugetlb_lock);
150 if (surplus_huge_pages_node[nid]) { 496 if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
151 update_and_free_page(page); 497 update_and_free_page(h, page);
152 surplus_huge_pages--; 498 h->surplus_huge_pages--;
153 surplus_huge_pages_node[nid]--; 499 h->surplus_huge_pages_node[nid]--;
154 } else { 500 } else {
155 enqueue_huge_page(page); 501 enqueue_huge_page(h, page);
156 } 502 }
157 spin_unlock(&hugetlb_lock); 503 spin_unlock(&hugetlb_lock);
158 if (mapping) 504 if (mapping)
@@ -164,7 +510,7 @@ static void free_huge_page(struct page *page)
164 * balanced by operating on them in a round-robin fashion. 510 * balanced by operating on them in a round-robin fashion.
165 * Returns 1 if an adjustment was made. 511 * Returns 1 if an adjustment was made.
166 */ 512 */
167static int adjust_pool_surplus(int delta) 513static int adjust_pool_surplus(struct hstate *h, int delta)
168{ 514{
169 static int prev_nid; 515 static int prev_nid;
170 int nid = prev_nid; 516 int nid = prev_nid;
@@ -177,15 +523,15 @@ static int adjust_pool_surplus(int delta)
177 nid = first_node(node_online_map); 523 nid = first_node(node_online_map);
178 524
179 /* To shrink on this node, there must be a surplus page */ 525 /* To shrink on this node, there must be a surplus page */
180 if (delta < 0 && !surplus_huge_pages_node[nid]) 526 if (delta < 0 && !h->surplus_huge_pages_node[nid])
181 continue; 527 continue;
182 /* Surplus cannot exceed the total number of pages */ 528 /* Surplus cannot exceed the total number of pages */
183 if (delta > 0 && surplus_huge_pages_node[nid] >= 529 if (delta > 0 && h->surplus_huge_pages_node[nid] >=
184 nr_huge_pages_node[nid]) 530 h->nr_huge_pages_node[nid])
185 continue; 531 continue;
186 532
187 surplus_huge_pages += delta; 533 h->surplus_huge_pages += delta;
188 surplus_huge_pages_node[nid] += delta; 534 h->surplus_huge_pages_node[nid] += delta;
189 ret = 1; 535 ret = 1;
190 break; 536 break;
191 } while (nid != prev_nid); 537 } while (nid != prev_nid);
@@ -194,59 +540,74 @@ static int adjust_pool_surplus(int delta)
194 return ret; 540 return ret;
195} 541}
196 542
197static struct page *alloc_fresh_huge_page_node(int nid) 543static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
544{
545 set_compound_page_dtor(page, free_huge_page);
546 spin_lock(&hugetlb_lock);
547 h->nr_huge_pages++;
548 h->nr_huge_pages_node[nid]++;
549 spin_unlock(&hugetlb_lock);
550 put_page(page); /* free it into the hugepage allocator */
551}
552
553static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
198{ 554{
199 struct page *page; 555 struct page *page;
200 556
557 if (h->order >= MAX_ORDER)
558 return NULL;
559
201 page = alloc_pages_node(nid, 560 page = alloc_pages_node(nid,
202 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 561 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
203 __GFP_REPEAT|__GFP_NOWARN, 562 __GFP_REPEAT|__GFP_NOWARN,
204 HUGETLB_PAGE_ORDER); 563 huge_page_order(h));
205 if (page) { 564 if (page) {
206 if (arch_prepare_hugepage(page)) { 565 if (arch_prepare_hugepage(page)) {
207 __free_pages(page, HUGETLB_PAGE_ORDER); 566 __free_pages(page, HUGETLB_PAGE_ORDER);
208 return NULL; 567 return NULL;
209 } 568 }
210 set_compound_page_dtor(page, free_huge_page); 569 prep_new_huge_page(h, page, nid);
211 spin_lock(&hugetlb_lock);
212 nr_huge_pages++;
213 nr_huge_pages_node[nid]++;
214 spin_unlock(&hugetlb_lock);
215 put_page(page); /* free it into the hugepage allocator */
216 } 570 }
217 571
218 return page; 572 return page;
219} 573}
220 574
221static int alloc_fresh_huge_page(void) 575/*
576 * Use a helper variable to find the next node and then
577 * copy it back to hugetlb_next_nid afterwards:
578 * otherwise there's a window in which a racer might
579 * pass invalid nid MAX_NUMNODES to alloc_pages_node.
580 * But we don't need to use a spin_lock here: it really
581 * doesn't matter if occasionally a racer chooses the
582 * same nid as we do. Move nid forward in the mask even
583 * if we just successfully allocated a hugepage so that
584 * the next caller gets hugepages on the next node.
585 */
586static int hstate_next_node(struct hstate *h)
587{
588 int next_nid;
589 next_nid = next_node(h->hugetlb_next_nid, node_online_map);
590 if (next_nid == MAX_NUMNODES)
591 next_nid = first_node(node_online_map);
592 h->hugetlb_next_nid = next_nid;
593 return next_nid;
594}
595
596static int alloc_fresh_huge_page(struct hstate *h)
222{ 597{
223 struct page *page; 598 struct page *page;
224 int start_nid; 599 int start_nid;
225 int next_nid; 600 int next_nid;
226 int ret = 0; 601 int ret = 0;
227 602
228 start_nid = hugetlb_next_nid; 603 start_nid = h->hugetlb_next_nid;
229 604
230 do { 605 do {
231 page = alloc_fresh_huge_page_node(hugetlb_next_nid); 606 page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
232 if (page) 607 if (page)
233 ret = 1; 608 ret = 1;
234 /* 609 next_nid = hstate_next_node(h);
235 * Use a helper variable to find the next node and then 610 } while (!page && h->hugetlb_next_nid != start_nid);
236 * copy it back to hugetlb_next_nid afterwards:
237 * otherwise there's a window in which a racer might
238 * pass invalid nid MAX_NUMNODES to alloc_pages_node.
239 * But we don't need to use a spin_lock here: it really
240 * doesn't matter if occasionally a racer chooses the
241 * same nid as we do. Move nid forward in the mask even
242 * if we just successfully allocated a hugepage so that
243 * the next caller gets hugepages on the next node.
244 */
245 next_nid = next_node(hugetlb_next_nid, node_online_map);
246 if (next_nid == MAX_NUMNODES)
247 next_nid = first_node(node_online_map);
248 hugetlb_next_nid = next_nid;
249 } while (!page && hugetlb_next_nid != start_nid);
250 611
251 if (ret) 612 if (ret)
252 count_vm_event(HTLB_BUDDY_PGALLOC); 613 count_vm_event(HTLB_BUDDY_PGALLOC);
@@ -256,12 +617,15 @@ static int alloc_fresh_huge_page(void)
256 return ret; 617 return ret;
257} 618}
258 619
259static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, 620static struct page *alloc_buddy_huge_page(struct hstate *h,
260 unsigned long address) 621 struct vm_area_struct *vma, unsigned long address)
261{ 622{
262 struct page *page; 623 struct page *page;
263 unsigned int nid; 624 unsigned int nid;
264 625
626 if (h->order >= MAX_ORDER)
627 return NULL;
628
265 /* 629 /*
266 * Assume we will successfully allocate the surplus page to 630 * Assume we will successfully allocate the surplus page to
267 * prevent racing processes from causing the surplus to exceed 631 * prevent racing processes from causing the surplus to exceed
@@ -286,18 +650,18 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
286 * per-node value is checked there. 650 * per-node value is checked there.
287 */ 651 */
288 spin_lock(&hugetlb_lock); 652 spin_lock(&hugetlb_lock);
289 if (surplus_huge_pages >= nr_overcommit_huge_pages) { 653 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
290 spin_unlock(&hugetlb_lock); 654 spin_unlock(&hugetlb_lock);
291 return NULL; 655 return NULL;
292 } else { 656 } else {
293 nr_huge_pages++; 657 h->nr_huge_pages++;
294 surplus_huge_pages++; 658 h->surplus_huge_pages++;
295 } 659 }
296 spin_unlock(&hugetlb_lock); 660 spin_unlock(&hugetlb_lock);
297 661
298 page = alloc_pages(htlb_alloc_mask|__GFP_COMP| 662 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
299 __GFP_REPEAT|__GFP_NOWARN, 663 __GFP_REPEAT|__GFP_NOWARN,
300 HUGETLB_PAGE_ORDER); 664 huge_page_order(h));
301 665
302 spin_lock(&hugetlb_lock); 666 spin_lock(&hugetlb_lock);
303 if (page) { 667 if (page) {
@@ -312,12 +676,12 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
312 /* 676 /*
313 * We incremented the global counters already 677 * We incremented the global counters already
314 */ 678 */
315 nr_huge_pages_node[nid]++; 679 h->nr_huge_pages_node[nid]++;
316 surplus_huge_pages_node[nid]++; 680 h->surplus_huge_pages_node[nid]++;
317 __count_vm_event(HTLB_BUDDY_PGALLOC); 681 __count_vm_event(HTLB_BUDDY_PGALLOC);
318 } else { 682 } else {
319 nr_huge_pages--; 683 h->nr_huge_pages--;
320 surplus_huge_pages--; 684 h->surplus_huge_pages--;
321 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 685 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
322 } 686 }
323 spin_unlock(&hugetlb_lock); 687 spin_unlock(&hugetlb_lock);
@@ -329,16 +693,16 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
329 * Increase the hugetlb pool such that it can accomodate a reservation 693 * Increase the hugetlb pool such that it can accomodate a reservation
330 * of size 'delta'. 694 * of size 'delta'.
331 */ 695 */
332static int gather_surplus_pages(int delta) 696static int gather_surplus_pages(struct hstate *h, int delta)
333{ 697{
334 struct list_head surplus_list; 698 struct list_head surplus_list;
335 struct page *page, *tmp; 699 struct page *page, *tmp;
336 int ret, i; 700 int ret, i;
337 int needed, allocated; 701 int needed, allocated;
338 702
339 needed = (resv_huge_pages + delta) - free_huge_pages; 703 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
340 if (needed <= 0) { 704 if (needed <= 0) {
341 resv_huge_pages += delta; 705 h->resv_huge_pages += delta;
342 return 0; 706 return 0;
343 } 707 }
344 708
@@ -349,7 +713,7 @@ static int gather_surplus_pages(int delta)
349retry: 713retry:
350 spin_unlock(&hugetlb_lock); 714 spin_unlock(&hugetlb_lock);
351 for (i = 0; i < needed; i++) { 715 for (i = 0; i < needed; i++) {
352 page = alloc_buddy_huge_page(NULL, 0); 716 page = alloc_buddy_huge_page(h, NULL, 0);
353 if (!page) { 717 if (!page) {
354 /* 718 /*
355 * We were not able to allocate enough pages to 719 * We were not able to allocate enough pages to
@@ -370,7 +734,8 @@ retry:
370 * because either resv_huge_pages or free_huge_pages may have changed. 734 * because either resv_huge_pages or free_huge_pages may have changed.
371 */ 735 */
372 spin_lock(&hugetlb_lock); 736 spin_lock(&hugetlb_lock);
373 needed = (resv_huge_pages + delta) - (free_huge_pages + allocated); 737 needed = (h->resv_huge_pages + delta) -
738 (h->free_huge_pages + allocated);
374 if (needed > 0) 739 if (needed > 0)
375 goto retry; 740 goto retry;
376 741
@@ -383,7 +748,7 @@ retry:
383 * before they are reserved. 748 * before they are reserved.
384 */ 749 */
385 needed += allocated; 750 needed += allocated;
386 resv_huge_pages += delta; 751 h->resv_huge_pages += delta;
387 ret = 0; 752 ret = 0;
388free: 753free:
389 /* Free the needed pages to the hugetlb pool */ 754 /* Free the needed pages to the hugetlb pool */
@@ -391,7 +756,7 @@ free:
391 if ((--needed) < 0) 756 if ((--needed) < 0)
392 break; 757 break;
393 list_del(&page->lru); 758 list_del(&page->lru);
394 enqueue_huge_page(page); 759 enqueue_huge_page(h, page);
395 } 760 }
396 761
397 /* Free unnecessary surplus pages to the buddy allocator */ 762 /* Free unnecessary surplus pages to the buddy allocator */
@@ -419,7 +784,8 @@ free:
419 * allocated to satisfy the reservation must be explicitly freed if they were 784 * allocated to satisfy the reservation must be explicitly freed if they were
420 * never used. 785 * never used.
421 */ 786 */
422static void return_unused_surplus_pages(unsigned long unused_resv_pages) 787static void return_unused_surplus_pages(struct hstate *h,
788 unsigned long unused_resv_pages)
423{ 789{
424 static int nid = -1; 790 static int nid = -1;
425 struct page *page; 791 struct page *page;
@@ -434,114 +800,231 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
434 unsigned long remaining_iterations = num_online_nodes(); 800 unsigned long remaining_iterations = num_online_nodes();
435 801
436 /* Uncommit the reservation */ 802 /* Uncommit the reservation */
437 resv_huge_pages -= unused_resv_pages; 803 h->resv_huge_pages -= unused_resv_pages;
804
805 /* Cannot return gigantic pages currently */
806 if (h->order >= MAX_ORDER)
807 return;
438 808
439 nr_pages = min(unused_resv_pages, surplus_huge_pages); 809 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
440 810
441 while (remaining_iterations-- && nr_pages) { 811 while (remaining_iterations-- && nr_pages) {
442 nid = next_node(nid, node_online_map); 812 nid = next_node(nid, node_online_map);
443 if (nid == MAX_NUMNODES) 813 if (nid == MAX_NUMNODES)
444 nid = first_node(node_online_map); 814 nid = first_node(node_online_map);
445 815
446 if (!surplus_huge_pages_node[nid]) 816 if (!h->surplus_huge_pages_node[nid])
447 continue; 817 continue;
448 818
449 if (!list_empty(&hugepage_freelists[nid])) { 819 if (!list_empty(&h->hugepage_freelists[nid])) {
450 page = list_entry(hugepage_freelists[nid].next, 820 page = list_entry(h->hugepage_freelists[nid].next,
451 struct page, lru); 821 struct page, lru);
452 list_del(&page->lru); 822 list_del(&page->lru);
453 update_and_free_page(page); 823 update_and_free_page(h, page);
454 free_huge_pages--; 824 h->free_huge_pages--;
455 free_huge_pages_node[nid]--; 825 h->free_huge_pages_node[nid]--;
456 surplus_huge_pages--; 826 h->surplus_huge_pages--;
457 surplus_huge_pages_node[nid]--; 827 h->surplus_huge_pages_node[nid]--;
458 nr_pages--; 828 nr_pages--;
459 remaining_iterations = num_online_nodes(); 829 remaining_iterations = num_online_nodes();
460 } 830 }
461 } 831 }
462} 832}
463 833
834/*
835 * Determine if the huge page at addr within the vma has an associated
836 * reservation. Where it does not we will need to logically increase
837 * reservation and actually increase quota before an allocation can occur.
838 * Where any new reservation would be required the reservation change is
839 * prepared, but not committed. Once the page has been quota'd allocated
840 * an instantiated the change should be committed via vma_commit_reservation.
841 * No action is required on failure.
842 */
843static int vma_needs_reservation(struct hstate *h,
844 struct vm_area_struct *vma, unsigned long addr)
845{
846 struct address_space *mapping = vma->vm_file->f_mapping;
847 struct inode *inode = mapping->host;
848
849 if (vma->vm_flags & VM_SHARED) {
850 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
851 return region_chg(&inode->i_mapping->private_list,
852 idx, idx + 1);
853
854 } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
855 return 1;
464 856
465static struct page *alloc_huge_page_shared(struct vm_area_struct *vma, 857 } else {
466 unsigned long addr) 858 int err;
859 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
860 struct resv_map *reservations = vma_resv_map(vma);
861
862 err = region_chg(&reservations->regions, idx, idx + 1);
863 if (err < 0)
864 return err;
865 return 0;
866 }
867}
868static void vma_commit_reservation(struct hstate *h,
869 struct vm_area_struct *vma, unsigned long addr)
467{ 870{
468 struct page *page; 871 struct address_space *mapping = vma->vm_file->f_mapping;
872 struct inode *inode = mapping->host;
469 873
470 spin_lock(&hugetlb_lock); 874 if (vma->vm_flags & VM_SHARED) {
471 page = dequeue_huge_page_vma(vma, addr); 875 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
472 spin_unlock(&hugetlb_lock); 876 region_add(&inode->i_mapping->private_list, idx, idx + 1);
473 return page ? page : ERR_PTR(-VM_FAULT_OOM); 877
878 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
879 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
880 struct resv_map *reservations = vma_resv_map(vma);
881
882 /* Mark this page used in the map. */
883 region_add(&reservations->regions, idx, idx + 1);
884 }
474} 885}
475 886
476static struct page *alloc_huge_page_private(struct vm_area_struct *vma, 887static struct page *alloc_huge_page(struct vm_area_struct *vma,
477 unsigned long addr) 888 unsigned long addr, int avoid_reserve)
478{ 889{
479 struct page *page = NULL; 890 struct hstate *h = hstate_vma(vma);
891 struct page *page;
892 struct address_space *mapping = vma->vm_file->f_mapping;
893 struct inode *inode = mapping->host;
894 unsigned int chg;
480 895
481 if (hugetlb_get_quota(vma->vm_file->f_mapping, 1)) 896 /*
482 return ERR_PTR(-VM_FAULT_SIGBUS); 897 * Processes that did not create the mapping will have no reserves and
898 * will not have accounted against quota. Check that the quota can be
899 * made before satisfying the allocation
900 * MAP_NORESERVE mappings may also need pages and quota allocated
901 * if no reserve mapping overlaps.
902 */
903 chg = vma_needs_reservation(h, vma, addr);
904 if (chg < 0)
905 return ERR_PTR(chg);
906 if (chg)
907 if (hugetlb_get_quota(inode->i_mapping, chg))
908 return ERR_PTR(-ENOSPC);
483 909
484 spin_lock(&hugetlb_lock); 910 spin_lock(&hugetlb_lock);
485 if (free_huge_pages > resv_huge_pages) 911 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
486 page = dequeue_huge_page_vma(vma, addr);
487 spin_unlock(&hugetlb_lock); 912 spin_unlock(&hugetlb_lock);
913
488 if (!page) { 914 if (!page) {
489 page = alloc_buddy_huge_page(vma, addr); 915 page = alloc_buddy_huge_page(h, vma, addr);
490 if (!page) { 916 if (!page) {
491 hugetlb_put_quota(vma->vm_file->f_mapping, 1); 917 hugetlb_put_quota(inode->i_mapping, chg);
492 return ERR_PTR(-VM_FAULT_OOM); 918 return ERR_PTR(-VM_FAULT_OOM);
493 } 919 }
494 } 920 }
921
922 set_page_refcounted(page);
923 set_page_private(page, (unsigned long) mapping);
924
925 vma_commit_reservation(h, vma, addr);
926
495 return page; 927 return page;
496} 928}
497 929
498static struct page *alloc_huge_page(struct vm_area_struct *vma, 930__attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h)
499 unsigned long addr)
500{ 931{
501 struct page *page; 932 struct huge_bootmem_page *m;
502 struct address_space *mapping = vma->vm_file->f_mapping; 933 int nr_nodes = nodes_weight(node_online_map);
503 934
504 if (vma->vm_flags & VM_MAYSHARE) 935 while (nr_nodes) {
505 page = alloc_huge_page_shared(vma, addr); 936 void *addr;
506 else 937
507 page = alloc_huge_page_private(vma, addr); 938 addr = __alloc_bootmem_node_nopanic(
939 NODE_DATA(h->hugetlb_next_nid),
940 huge_page_size(h), huge_page_size(h), 0);
941
942 if (addr) {
943 /*
944 * Use the beginning of the huge page to store the
945 * huge_bootmem_page struct (until gather_bootmem
946 * puts them into the mem_map).
947 */
948 m = addr;
949 if (m)
950 goto found;
951 }
952 hstate_next_node(h);
953 nr_nodes--;
954 }
955 return 0;
956
957found:
958 BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
959 /* Put them into a private list first because mem_map is not up yet */
960 list_add(&m->list, &huge_boot_pages);
961 m->hstate = h;
962 return 1;
963}
508 964
509 if (!IS_ERR(page)) { 965/* Put bootmem huge pages into the standard lists after mem_map is up */
510 set_page_refcounted(page); 966static void __init gather_bootmem_prealloc(void)
511 set_page_private(page, (unsigned long) mapping); 967{
968 struct huge_bootmem_page *m;
969
970 list_for_each_entry(m, &huge_boot_pages, list) {
971 struct page *page = virt_to_page(m);
972 struct hstate *h = m->hstate;
973 __ClearPageReserved(page);
974 WARN_ON(page_count(page) != 1);
975 prep_compound_page(page, h->order);
976 prep_new_huge_page(h, page, page_to_nid(page));
512 } 977 }
513 return page;
514} 978}
515 979
516static int __init hugetlb_init(void) 980static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
517{ 981{
518 unsigned long i; 982 unsigned long i;
519 983
520 if (HPAGE_SHIFT == 0) 984 for (i = 0; i < h->max_huge_pages; ++i) {
521 return 0; 985 if (h->order >= MAX_ORDER) {
522 986 if (!alloc_bootmem_huge_page(h))
523 for (i = 0; i < MAX_NUMNODES; ++i) 987 break;
524 INIT_LIST_HEAD(&hugepage_freelists[i]); 988 } else if (!alloc_fresh_huge_page(h))
989 break;
990 }
991 h->max_huge_pages = i;
992}
525 993
526 hugetlb_next_nid = first_node(node_online_map); 994static void __init hugetlb_init_hstates(void)
995{
996 struct hstate *h;
527 997
528 for (i = 0; i < max_huge_pages; ++i) { 998 for_each_hstate(h) {
529 if (!alloc_fresh_huge_page()) 999 /* oversize hugepages were init'ed in early boot */
530 break; 1000 if (h->order < MAX_ORDER)
1001 hugetlb_hstate_alloc_pages(h);
531 } 1002 }
532 max_huge_pages = free_huge_pages = nr_huge_pages = i;
533 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
534 return 0;
535} 1003}
536module_init(hugetlb_init);
537 1004
538static int __init hugetlb_setup(char *s) 1005static char * __init memfmt(char *buf, unsigned long n)
539{ 1006{
540 if (sscanf(s, "%lu", &max_huge_pages) <= 0) 1007 if (n >= (1UL << 30))
541 max_huge_pages = 0; 1008 sprintf(buf, "%lu GB", n >> 30);
542 return 1; 1009 else if (n >= (1UL << 20))
1010 sprintf(buf, "%lu MB", n >> 20);
1011 else
1012 sprintf(buf, "%lu KB", n >> 10);
1013 return buf;
1014}
1015
1016static void __init report_hugepages(void)
1017{
1018 struct hstate *h;
1019
1020 for_each_hstate(h) {
1021 char buf[32];
1022 printk(KERN_INFO "HugeTLB registered %s page size, "
1023 "pre-allocated %ld pages\n",
1024 memfmt(buf, huge_page_size(h)),
1025 h->free_huge_pages);
1026 }
543} 1027}
544__setup("hugepages=", hugetlb_setup);
545 1028
546static unsigned int cpuset_mems_nr(unsigned int *array) 1029static unsigned int cpuset_mems_nr(unsigned int *array)
547{ 1030{
@@ -556,35 +1039,42 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
556 1039
557#ifdef CONFIG_SYSCTL 1040#ifdef CONFIG_SYSCTL
558#ifdef CONFIG_HIGHMEM 1041#ifdef CONFIG_HIGHMEM
559static void try_to_free_low(unsigned long count) 1042static void try_to_free_low(struct hstate *h, unsigned long count)
560{ 1043{
561 int i; 1044 int i;
562 1045
1046 if (h->order >= MAX_ORDER)
1047 return;
1048
563 for (i = 0; i < MAX_NUMNODES; ++i) { 1049 for (i = 0; i < MAX_NUMNODES; ++i) {
564 struct page *page, *next; 1050 struct page *page, *next;
565 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { 1051 struct list_head *freel = &h->hugepage_freelists[i];
566 if (count >= nr_huge_pages) 1052 list_for_each_entry_safe(page, next, freel, lru) {
1053 if (count >= h->nr_huge_pages)
567 return; 1054 return;
568 if (PageHighMem(page)) 1055 if (PageHighMem(page))
569 continue; 1056 continue;
570 list_del(&page->lru); 1057 list_del(&page->lru);
571 update_and_free_page(page); 1058 update_and_free_page(h, page);
572 free_huge_pages--; 1059 h->free_huge_pages--;
573 free_huge_pages_node[page_to_nid(page)]--; 1060 h->free_huge_pages_node[page_to_nid(page)]--;
574 } 1061 }
575 } 1062 }
576} 1063}
577#else 1064#else
578static inline void try_to_free_low(unsigned long count) 1065static inline void try_to_free_low(struct hstate *h, unsigned long count)
579{ 1066{
580} 1067}
581#endif 1068#endif
582 1069
583#define persistent_huge_pages (nr_huge_pages - surplus_huge_pages) 1070#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
584static unsigned long set_max_huge_pages(unsigned long count) 1071static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
585{ 1072{
586 unsigned long min_count, ret; 1073 unsigned long min_count, ret;
587 1074
1075 if (h->order >= MAX_ORDER)
1076 return h->max_huge_pages;
1077
588 /* 1078 /*
589 * Increase the pool size 1079 * Increase the pool size
590 * First take pages out of surplus state. Then make up the 1080 * First take pages out of surplus state. Then make up the
@@ -597,20 +1087,19 @@ static unsigned long set_max_huge_pages(unsigned long count)
597 * within all the constraints specified by the sysctls. 1087 * within all the constraints specified by the sysctls.
598 */ 1088 */
599 spin_lock(&hugetlb_lock); 1089 spin_lock(&hugetlb_lock);
600 while (surplus_huge_pages && count > persistent_huge_pages) { 1090 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
601 if (!adjust_pool_surplus(-1)) 1091 if (!adjust_pool_surplus(h, -1))
602 break; 1092 break;
603 } 1093 }
604 1094
605 while (count > persistent_huge_pages) { 1095 while (count > persistent_huge_pages(h)) {
606 int ret;
607 /* 1096 /*
608 * If this allocation races such that we no longer need the 1097 * If this allocation races such that we no longer need the
609 * page, free_huge_page will handle it by freeing the page 1098 * page, free_huge_page will handle it by freeing the page
610 * and reducing the surplus. 1099 * and reducing the surplus.
611 */ 1100 */
612 spin_unlock(&hugetlb_lock); 1101 spin_unlock(&hugetlb_lock);
613 ret = alloc_fresh_huge_page(); 1102 ret = alloc_fresh_huge_page(h);
614 spin_lock(&hugetlb_lock); 1103 spin_lock(&hugetlb_lock);
615 if (!ret) 1104 if (!ret)
616 goto out; 1105 goto out;
@@ -632,31 +1121,288 @@ static unsigned long set_max_huge_pages(unsigned long count)
632 * and won't grow the pool anywhere else. Not until one of the 1121 * and won't grow the pool anywhere else. Not until one of the
633 * sysctls are changed, or the surplus pages go out of use. 1122 * sysctls are changed, or the surplus pages go out of use.
634 */ 1123 */
635 min_count = resv_huge_pages + nr_huge_pages - free_huge_pages; 1124 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
636 min_count = max(count, min_count); 1125 min_count = max(count, min_count);
637 try_to_free_low(min_count); 1126 try_to_free_low(h, min_count);
638 while (min_count < persistent_huge_pages) { 1127 while (min_count < persistent_huge_pages(h)) {
639 struct page *page = dequeue_huge_page(); 1128 struct page *page = dequeue_huge_page(h);
640 if (!page) 1129 if (!page)
641 break; 1130 break;
642 update_and_free_page(page); 1131 update_and_free_page(h, page);
643 } 1132 }
644 while (count < persistent_huge_pages) { 1133 while (count < persistent_huge_pages(h)) {
645 if (!adjust_pool_surplus(1)) 1134 if (!adjust_pool_surplus(h, 1))
646 break; 1135 break;
647 } 1136 }
648out: 1137out:
649 ret = persistent_huge_pages; 1138 ret = persistent_huge_pages(h);
650 spin_unlock(&hugetlb_lock); 1139 spin_unlock(&hugetlb_lock);
651 return ret; 1140 return ret;
652} 1141}
653 1142
1143#define HSTATE_ATTR_RO(_name) \
1144 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1145
1146#define HSTATE_ATTR(_name) \
1147 static struct kobj_attribute _name##_attr = \
1148 __ATTR(_name, 0644, _name##_show, _name##_store)
1149
1150static struct kobject *hugepages_kobj;
1151static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1152
1153static struct hstate *kobj_to_hstate(struct kobject *kobj)
1154{
1155 int i;
1156 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1157 if (hstate_kobjs[i] == kobj)
1158 return &hstates[i];
1159 BUG();
1160 return NULL;
1161}
1162
1163static ssize_t nr_hugepages_show(struct kobject *kobj,
1164 struct kobj_attribute *attr, char *buf)
1165{
1166 struct hstate *h = kobj_to_hstate(kobj);
1167 return sprintf(buf, "%lu\n", h->nr_huge_pages);
1168}
1169static ssize_t nr_hugepages_store(struct kobject *kobj,
1170 struct kobj_attribute *attr, const char *buf, size_t count)
1171{
1172 int err;
1173 unsigned long input;
1174 struct hstate *h = kobj_to_hstate(kobj);
1175
1176 err = strict_strtoul(buf, 10, &input);
1177 if (err)
1178 return 0;
1179
1180 h->max_huge_pages = set_max_huge_pages(h, input);
1181
1182 return count;
1183}
1184HSTATE_ATTR(nr_hugepages);
1185
1186static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1187 struct kobj_attribute *attr, char *buf)
1188{
1189 struct hstate *h = kobj_to_hstate(kobj);
1190 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1191}
1192static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1193 struct kobj_attribute *attr, const char *buf, size_t count)
1194{
1195 int err;
1196 unsigned long input;
1197 struct hstate *h = kobj_to_hstate(kobj);
1198
1199 err = strict_strtoul(buf, 10, &input);
1200 if (err)
1201 return 0;
1202
1203 spin_lock(&hugetlb_lock);
1204 h->nr_overcommit_huge_pages = input;
1205 spin_unlock(&hugetlb_lock);
1206
1207 return count;
1208}
1209HSTATE_ATTR(nr_overcommit_hugepages);
1210
1211static ssize_t free_hugepages_show(struct kobject *kobj,
1212 struct kobj_attribute *attr, char *buf)
1213{
1214 struct hstate *h = kobj_to_hstate(kobj);
1215 return sprintf(buf, "%lu\n", h->free_huge_pages);
1216}
1217HSTATE_ATTR_RO(free_hugepages);
1218
1219static ssize_t resv_hugepages_show(struct kobject *kobj,
1220 struct kobj_attribute *attr, char *buf)
1221{
1222 struct hstate *h = kobj_to_hstate(kobj);
1223 return sprintf(buf, "%lu\n", h->resv_huge_pages);
1224}
1225HSTATE_ATTR_RO(resv_hugepages);
1226
1227static ssize_t surplus_hugepages_show(struct kobject *kobj,
1228 struct kobj_attribute *attr, char *buf)
1229{
1230 struct hstate *h = kobj_to_hstate(kobj);
1231 return sprintf(buf, "%lu\n", h->surplus_huge_pages);
1232}
1233HSTATE_ATTR_RO(surplus_hugepages);
1234
1235static struct attribute *hstate_attrs[] = {
1236 &nr_hugepages_attr.attr,
1237 &nr_overcommit_hugepages_attr.attr,
1238 &free_hugepages_attr.attr,
1239 &resv_hugepages_attr.attr,
1240 &surplus_hugepages_attr.attr,
1241 NULL,
1242};
1243
1244static struct attribute_group hstate_attr_group = {
1245 .attrs = hstate_attrs,
1246};
1247
1248static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
1249{
1250 int retval;
1251
1252 hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
1253 hugepages_kobj);
1254 if (!hstate_kobjs[h - hstates])
1255 return -ENOMEM;
1256
1257 retval = sysfs_create_group(hstate_kobjs[h - hstates],
1258 &hstate_attr_group);
1259 if (retval)
1260 kobject_put(hstate_kobjs[h - hstates]);
1261
1262 return retval;
1263}
1264
1265static void __init hugetlb_sysfs_init(void)
1266{
1267 struct hstate *h;
1268 int err;
1269
1270 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1271 if (!hugepages_kobj)
1272 return;
1273
1274 for_each_hstate(h) {
1275 err = hugetlb_sysfs_add_hstate(h);
1276 if (err)
1277 printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1278 h->name);
1279 }
1280}
1281
1282static void __exit hugetlb_exit(void)
1283{
1284 struct hstate *h;
1285
1286 for_each_hstate(h) {
1287 kobject_put(hstate_kobjs[h - hstates]);
1288 }
1289
1290 kobject_put(hugepages_kobj);
1291}
1292module_exit(hugetlb_exit);
1293
1294static int __init hugetlb_init(void)
1295{
1296 BUILD_BUG_ON(HPAGE_SHIFT == 0);
1297
1298 if (!size_to_hstate(default_hstate_size)) {
1299 default_hstate_size = HPAGE_SIZE;
1300 if (!size_to_hstate(default_hstate_size))
1301 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1302 }
1303 default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1304 if (default_hstate_max_huge_pages)
1305 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1306
1307 hugetlb_init_hstates();
1308
1309 gather_bootmem_prealloc();
1310
1311 report_hugepages();
1312
1313 hugetlb_sysfs_init();
1314
1315 return 0;
1316}
1317module_init(hugetlb_init);
1318
1319/* Should be called on processing a hugepagesz=... option */
1320void __init hugetlb_add_hstate(unsigned order)
1321{
1322 struct hstate *h;
1323 unsigned long i;
1324
1325 if (size_to_hstate(PAGE_SIZE << order)) {
1326 printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1327 return;
1328 }
1329 BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1330 BUG_ON(order == 0);
1331 h = &hstates[max_hstate++];
1332 h->order = order;
1333 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1334 h->nr_huge_pages = 0;
1335 h->free_huge_pages = 0;
1336 for (i = 0; i < MAX_NUMNODES; ++i)
1337 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1338 h->hugetlb_next_nid = first_node(node_online_map);
1339 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1340 huge_page_size(h)/1024);
1341
1342 parsed_hstate = h;
1343}
1344
1345static int __init hugetlb_nrpages_setup(char *s)
1346{
1347 unsigned long *mhp;
1348 static unsigned long *last_mhp;
1349
1350 /*
1351 * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1352 * so this hugepages= parameter goes to the "default hstate".
1353 */
1354 if (!max_hstate)
1355 mhp = &default_hstate_max_huge_pages;
1356 else
1357 mhp = &parsed_hstate->max_huge_pages;
1358
1359 if (mhp == last_mhp) {
1360 printk(KERN_WARNING "hugepages= specified twice without "
1361 "interleaving hugepagesz=, ignoring\n");
1362 return 1;
1363 }
1364
1365 if (sscanf(s, "%lu", mhp) <= 0)
1366 *mhp = 0;
1367
1368 /*
1369 * Global state is always initialized later in hugetlb_init.
1370 * But we need to allocate >= MAX_ORDER hstates here early to still
1371 * use the bootmem allocator.
1372 */
1373 if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1374 hugetlb_hstate_alloc_pages(parsed_hstate);
1375
1376 last_mhp = mhp;
1377
1378 return 1;
1379}
1380__setup("hugepages=", hugetlb_nrpages_setup);
1381
1382static int __init hugetlb_default_setup(char *s)
1383{
1384 default_hstate_size = memparse(s, &s);
1385 return 1;
1386}
1387__setup("default_hugepagesz=", hugetlb_default_setup);
1388
654int hugetlb_sysctl_handler(struct ctl_table *table, int write, 1389int hugetlb_sysctl_handler(struct ctl_table *table, int write,
655 struct file *file, void __user *buffer, 1390 struct file *file, void __user *buffer,
656 size_t *length, loff_t *ppos) 1391 size_t *length, loff_t *ppos)
657{ 1392{
1393 struct hstate *h = &default_hstate;
1394 unsigned long tmp;
1395
1396 if (!write)
1397 tmp = h->max_huge_pages;
1398
1399 table->data = &tmp;
1400 table->maxlen = sizeof(unsigned long);
658 proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 1401 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
659 max_huge_pages = set_max_huge_pages(max_huge_pages); 1402
1403 if (write)
1404 h->max_huge_pages = set_max_huge_pages(h, tmp);
1405
660 return 0; 1406 return 0;
661} 1407}
662 1408
@@ -676,10 +1422,22 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
676 struct file *file, void __user *buffer, 1422 struct file *file, void __user *buffer,
677 size_t *length, loff_t *ppos) 1423 size_t *length, loff_t *ppos)
678{ 1424{
1425 struct hstate *h = &default_hstate;
1426 unsigned long tmp;
1427
1428 if (!write)
1429 tmp = h->nr_overcommit_huge_pages;
1430
1431 table->data = &tmp;
1432 table->maxlen = sizeof(unsigned long);
679 proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 1433 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
680 spin_lock(&hugetlb_lock); 1434
681 nr_overcommit_huge_pages = sysctl_overcommit_huge_pages; 1435 if (write) {
682 spin_unlock(&hugetlb_lock); 1436 spin_lock(&hugetlb_lock);
1437 h->nr_overcommit_huge_pages = tmp;
1438 spin_unlock(&hugetlb_lock);
1439 }
1440
683 return 0; 1441 return 0;
684} 1442}
685 1443
@@ -687,34 +1445,118 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
687 1445
688int hugetlb_report_meminfo(char *buf) 1446int hugetlb_report_meminfo(char *buf)
689{ 1447{
1448 struct hstate *h = &default_hstate;
690 return sprintf(buf, 1449 return sprintf(buf,
691 "HugePages_Total: %5lu\n" 1450 "HugePages_Total: %5lu\n"
692 "HugePages_Free: %5lu\n" 1451 "HugePages_Free: %5lu\n"
693 "HugePages_Rsvd: %5lu\n" 1452 "HugePages_Rsvd: %5lu\n"
694 "HugePages_Surp: %5lu\n" 1453 "HugePages_Surp: %5lu\n"
695 "Hugepagesize: %5lu kB\n", 1454 "Hugepagesize: %5lu kB\n",
696 nr_huge_pages, 1455 h->nr_huge_pages,
697 free_huge_pages, 1456 h->free_huge_pages,
698 resv_huge_pages, 1457 h->resv_huge_pages,
699 surplus_huge_pages, 1458 h->surplus_huge_pages,
700 HPAGE_SIZE/1024); 1459 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
701} 1460}
702 1461
703int hugetlb_report_node_meminfo(int nid, char *buf) 1462int hugetlb_report_node_meminfo(int nid, char *buf)
704{ 1463{
1464 struct hstate *h = &default_hstate;
705 return sprintf(buf, 1465 return sprintf(buf,
706 "Node %d HugePages_Total: %5u\n" 1466 "Node %d HugePages_Total: %5u\n"
707 "Node %d HugePages_Free: %5u\n" 1467 "Node %d HugePages_Free: %5u\n"
708 "Node %d HugePages_Surp: %5u\n", 1468 "Node %d HugePages_Surp: %5u\n",
709 nid, nr_huge_pages_node[nid], 1469 nid, h->nr_huge_pages_node[nid],
710 nid, free_huge_pages_node[nid], 1470 nid, h->free_huge_pages_node[nid],
711 nid, surplus_huge_pages_node[nid]); 1471 nid, h->surplus_huge_pages_node[nid]);
712} 1472}
713 1473
714/* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 1474/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
715unsigned long hugetlb_total_pages(void) 1475unsigned long hugetlb_total_pages(void)
716{ 1476{
717 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); 1477 struct hstate *h = &default_hstate;
1478 return h->nr_huge_pages * pages_per_huge_page(h);
1479}
1480
1481static int hugetlb_acct_memory(struct hstate *h, long delta)
1482{
1483 int ret = -ENOMEM;
1484
1485 spin_lock(&hugetlb_lock);
1486 /*
1487 * When cpuset is configured, it breaks the strict hugetlb page
1488 * reservation as the accounting is done on a global variable. Such
1489 * reservation is completely rubbish in the presence of cpuset because
1490 * the reservation is not checked against page availability for the
1491 * current cpuset. Application can still potentially OOM'ed by kernel
1492 * with lack of free htlb page in cpuset that the task is in.
1493 * Attempt to enforce strict accounting with cpuset is almost
1494 * impossible (or too ugly) because cpuset is too fluid that
1495 * task or memory node can be dynamically moved between cpusets.
1496 *
1497 * The change of semantics for shared hugetlb mapping with cpuset is
1498 * undesirable. However, in order to preserve some of the semantics,
1499 * we fall back to check against current free page availability as
1500 * a best attempt and hopefully to minimize the impact of changing
1501 * semantics that cpuset has.
1502 */
1503 if (delta > 0) {
1504 if (gather_surplus_pages(h, delta) < 0)
1505 goto out;
1506
1507 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
1508 return_unused_surplus_pages(h, delta);
1509 goto out;
1510 }
1511 }
1512
1513 ret = 0;
1514 if (delta < 0)
1515 return_unused_surplus_pages(h, (unsigned long) -delta);
1516
1517out:
1518 spin_unlock(&hugetlb_lock);
1519 return ret;
1520}
1521
1522static void hugetlb_vm_op_open(struct vm_area_struct *vma)
1523{
1524 struct resv_map *reservations = vma_resv_map(vma);
1525
1526 /*
1527 * This new VMA should share its siblings reservation map if present.
1528 * The VMA will only ever have a valid reservation map pointer where
1529 * it is being copied for another still existing VMA. As that VMA
1530 * has a reference to the reservation map it cannot dissappear until
1531 * after this open call completes. It is therefore safe to take a
1532 * new reference here without additional locking.
1533 */
1534 if (reservations)
1535 kref_get(&reservations->refs);
1536}
1537
1538static void hugetlb_vm_op_close(struct vm_area_struct *vma)
1539{
1540 struct hstate *h = hstate_vma(vma);
1541 struct resv_map *reservations = vma_resv_map(vma);
1542 unsigned long reserve;
1543 unsigned long start;
1544 unsigned long end;
1545
1546 if (reservations) {
1547 start = vma_hugecache_offset(h, vma, vma->vm_start);
1548 end = vma_hugecache_offset(h, vma, vma->vm_end);
1549
1550 reserve = (end - start) -
1551 region_count(&reservations->regions, start, end);
1552
1553 kref_put(&reservations->refs, resv_map_release);
1554
1555 if (reserve) {
1556 hugetlb_acct_memory(h, -reserve);
1557 hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
1558 }
1559 }
718} 1560}
719 1561
720/* 1562/*
@@ -731,6 +1573,8 @@ static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
731 1573
732struct vm_operations_struct hugetlb_vm_ops = { 1574struct vm_operations_struct hugetlb_vm_ops = {
733 .fault = hugetlb_vm_op_fault, 1575 .fault = hugetlb_vm_op_fault,
1576 .open = hugetlb_vm_op_open,
1577 .close = hugetlb_vm_op_close,
734}; 1578};
735 1579
736static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 1580static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
@@ -769,14 +1613,16 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
769 struct page *ptepage; 1613 struct page *ptepage;
770 unsigned long addr; 1614 unsigned long addr;
771 int cow; 1615 int cow;
1616 struct hstate *h = hstate_vma(vma);
1617 unsigned long sz = huge_page_size(h);
772 1618
773 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 1619 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
774 1620
775 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { 1621 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
776 src_pte = huge_pte_offset(src, addr); 1622 src_pte = huge_pte_offset(src, addr);
777 if (!src_pte) 1623 if (!src_pte)
778 continue; 1624 continue;
779 dst_pte = huge_pte_alloc(dst, addr); 1625 dst_pte = huge_pte_alloc(dst, addr, sz);
780 if (!dst_pte) 1626 if (!dst_pte)
781 goto nomem; 1627 goto nomem;
782 1628
@@ -804,7 +1650,7 @@ nomem:
804} 1650}
805 1651
806void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 1652void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
807 unsigned long end) 1653 unsigned long end, struct page *ref_page)
808{ 1654{
809 struct mm_struct *mm = vma->vm_mm; 1655 struct mm_struct *mm = vma->vm_mm;
810 unsigned long address; 1656 unsigned long address;
@@ -812,6 +1658,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
812 pte_t pte; 1658 pte_t pte;
813 struct page *page; 1659 struct page *page;
814 struct page *tmp; 1660 struct page *tmp;
1661 struct hstate *h = hstate_vma(vma);
1662 unsigned long sz = huge_page_size(h);
1663
815 /* 1664 /*
816 * A page gathering list, protected by per file i_mmap_lock. The 1665 * A page gathering list, protected by per file i_mmap_lock. The
817 * lock is used to avoid list corruption from multiple unmapping 1666 * lock is used to avoid list corruption from multiple unmapping
@@ -820,11 +1669,11 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
820 LIST_HEAD(page_list); 1669 LIST_HEAD(page_list);
821 1670
822 WARN_ON(!is_vm_hugetlb_page(vma)); 1671 WARN_ON(!is_vm_hugetlb_page(vma));
823 BUG_ON(start & ~HPAGE_MASK); 1672 BUG_ON(start & ~huge_page_mask(h));
824 BUG_ON(end & ~HPAGE_MASK); 1673 BUG_ON(end & ~huge_page_mask(h));
825 1674
826 spin_lock(&mm->page_table_lock); 1675 spin_lock(&mm->page_table_lock);
827 for (address = start; address < end; address += HPAGE_SIZE) { 1676 for (address = start; address < end; address += sz) {
828 ptep = huge_pte_offset(mm, address); 1677 ptep = huge_pte_offset(mm, address);
829 if (!ptep) 1678 if (!ptep)
830 continue; 1679 continue;
@@ -832,6 +1681,27 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
832 if (huge_pmd_unshare(mm, &address, ptep)) 1681 if (huge_pmd_unshare(mm, &address, ptep))
833 continue; 1682 continue;
834 1683
1684 /*
1685 * If a reference page is supplied, it is because a specific
1686 * page is being unmapped, not a range. Ensure the page we
1687 * are about to unmap is the actual page of interest.
1688 */
1689 if (ref_page) {
1690 pte = huge_ptep_get(ptep);
1691 if (huge_pte_none(pte))
1692 continue;
1693 page = pte_page(pte);
1694 if (page != ref_page)
1695 continue;
1696
1697 /*
1698 * Mark the VMA as having unmapped its page so that
1699 * future faults in this VMA will fail rather than
1700 * looking like data was lost
1701 */
1702 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
1703 }
1704
835 pte = huge_ptep_get_and_clear(mm, address, ptep); 1705 pte = huge_ptep_get_and_clear(mm, address, ptep);
836 if (huge_pte_none(pte)) 1706 if (huge_pte_none(pte))
837 continue; 1707 continue;
@@ -850,31 +1720,71 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
850} 1720}
851 1721
852void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 1722void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
853 unsigned long end) 1723 unsigned long end, struct page *ref_page)
854{ 1724{
1725 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
1726 __unmap_hugepage_range(vma, start, end, ref_page);
1727 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
1728}
1729
1730/*
1731 * This is called when the original mapper is failing to COW a MAP_PRIVATE
1732 * mappping it owns the reserve page for. The intention is to unmap the page
1733 * from other VMAs and let the children be SIGKILLed if they are faulting the
1734 * same region.
1735 */
1736int unmap_ref_private(struct mm_struct *mm,
1737 struct vm_area_struct *vma,
1738 struct page *page,
1739 unsigned long address)
1740{
1741 struct vm_area_struct *iter_vma;
1742 struct address_space *mapping;
1743 struct prio_tree_iter iter;
1744 pgoff_t pgoff;
1745
855 /* 1746 /*
856 * It is undesirable to test vma->vm_file as it should be non-null 1747 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
857 * for valid hugetlb area. However, vm_file will be NULL in the error 1748 * from page cache lookup which is in HPAGE_SIZE units.
858 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
859 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
860 * to clean up. Since no pte has actually been setup, it is safe to
861 * do nothing in this case.
862 */ 1749 */
863 if (vma->vm_file) { 1750 address = address & huge_page_mask(hstate_vma(vma));
864 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 1751 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
865 __unmap_hugepage_range(vma, start, end); 1752 + (vma->vm_pgoff >> PAGE_SHIFT);
866 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 1753 mapping = (struct address_space *)page_private(page);
1754
1755 vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1756 /* Do not unmap the current VMA */
1757 if (iter_vma == vma)
1758 continue;
1759
1760 /*
1761 * Unmap the page from other VMAs without their own reserves.
1762 * They get marked to be SIGKILLed if they fault in these
1763 * areas. This is because a future no-page fault on this VMA
1764 * could insert a zeroed page instead of the data existing
1765 * from the time of fork. This would look like data corruption
1766 */
1767 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
1768 unmap_hugepage_range(iter_vma,
1769 address, address + HPAGE_SIZE,
1770 page);
867 } 1771 }
1772
1773 return 1;
868} 1774}
869 1775
870static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 1776static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
871 unsigned long address, pte_t *ptep, pte_t pte) 1777 unsigned long address, pte_t *ptep, pte_t pte,
1778 struct page *pagecache_page)
872{ 1779{
1780 struct hstate *h = hstate_vma(vma);
873 struct page *old_page, *new_page; 1781 struct page *old_page, *new_page;
874 int avoidcopy; 1782 int avoidcopy;
1783 int outside_reserve = 0;
875 1784
876 old_page = pte_page(pte); 1785 old_page = pte_page(pte);
877 1786
1787retry_avoidcopy:
878 /* If no-one else is actually using this page, avoid the copy 1788 /* If no-one else is actually using this page, avoid the copy
879 * and just make the page writable */ 1789 * and just make the page writable */
880 avoidcopy = (page_count(old_page) == 1); 1790 avoidcopy = (page_count(old_page) == 1);
@@ -883,11 +1793,43 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
883 return 0; 1793 return 0;
884 } 1794 }
885 1795
1796 /*
1797 * If the process that created a MAP_PRIVATE mapping is about to
1798 * perform a COW due to a shared page count, attempt to satisfy
1799 * the allocation without using the existing reserves. The pagecache
1800 * page is used to determine if the reserve at this address was
1801 * consumed or not. If reserves were used, a partial faulted mapping
1802 * at the time of fork() could consume its reserves on COW instead
1803 * of the full address range.
1804 */
1805 if (!(vma->vm_flags & VM_SHARED) &&
1806 is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
1807 old_page != pagecache_page)
1808 outside_reserve = 1;
1809
886 page_cache_get(old_page); 1810 page_cache_get(old_page);
887 new_page = alloc_huge_page(vma, address); 1811 new_page = alloc_huge_page(vma, address, outside_reserve);
888 1812
889 if (IS_ERR(new_page)) { 1813 if (IS_ERR(new_page)) {
890 page_cache_release(old_page); 1814 page_cache_release(old_page);
1815
1816 /*
1817 * If a process owning a MAP_PRIVATE mapping fails to COW,
1818 * it is due to references held by a child and an insufficient
1819 * huge page pool. To guarantee the original mappers
1820 * reliability, unmap the page from child processes. The child
1821 * may get SIGKILLed if it later faults.
1822 */
1823 if (outside_reserve) {
1824 BUG_ON(huge_pte_none(pte));
1825 if (unmap_ref_private(mm, vma, old_page, address)) {
1826 BUG_ON(page_count(old_page) != 1);
1827 BUG_ON(huge_pte_none(pte));
1828 goto retry_avoidcopy;
1829 }
1830 WARN_ON_ONCE(1);
1831 }
1832
891 return -PTR_ERR(new_page); 1833 return -PTR_ERR(new_page);
892 } 1834 }
893 1835
@@ -896,7 +1838,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
896 __SetPageUptodate(new_page); 1838 __SetPageUptodate(new_page);
897 spin_lock(&mm->page_table_lock); 1839 spin_lock(&mm->page_table_lock);
898 1840
899 ptep = huge_pte_offset(mm, address & HPAGE_MASK); 1841 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
900 if (likely(pte_same(huge_ptep_get(ptep), pte))) { 1842 if (likely(pte_same(huge_ptep_get(ptep), pte))) {
901 /* Break COW */ 1843 /* Break COW */
902 huge_ptep_clear_flush(vma, address, ptep); 1844 huge_ptep_clear_flush(vma, address, ptep);
@@ -910,19 +1852,44 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
910 return 0; 1852 return 0;
911} 1853}
912 1854
1855/* Return the pagecache page at a given address within a VMA */
1856static struct page *hugetlbfs_pagecache_page(struct hstate *h,
1857 struct vm_area_struct *vma, unsigned long address)
1858{
1859 struct address_space *mapping;
1860 pgoff_t idx;
1861
1862 mapping = vma->vm_file->f_mapping;
1863 idx = vma_hugecache_offset(h, vma, address);
1864
1865 return find_lock_page(mapping, idx);
1866}
1867
913static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 1868static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
914 unsigned long address, pte_t *ptep, int write_access) 1869 unsigned long address, pte_t *ptep, int write_access)
915{ 1870{
1871 struct hstate *h = hstate_vma(vma);
916 int ret = VM_FAULT_SIGBUS; 1872 int ret = VM_FAULT_SIGBUS;
917 unsigned long idx; 1873 pgoff_t idx;
918 unsigned long size; 1874 unsigned long size;
919 struct page *page; 1875 struct page *page;
920 struct address_space *mapping; 1876 struct address_space *mapping;
921 pte_t new_pte; 1877 pte_t new_pte;
922 1878
1879 /*
1880 * Currently, we are forced to kill the process in the event the
1881 * original mapper has unmapped pages from the child due to a failed
1882 * COW. Warn that such a situation has occured as it may not be obvious
1883 */
1884 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
1885 printk(KERN_WARNING
1886 "PID %d killed due to inadequate hugepage pool\n",
1887 current->pid);
1888 return ret;
1889 }
1890
923 mapping = vma->vm_file->f_mapping; 1891 mapping = vma->vm_file->f_mapping;
924 idx = ((address - vma->vm_start) >> HPAGE_SHIFT) 1892 idx = vma_hugecache_offset(h, vma, address);
925 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
926 1893
927 /* 1894 /*
928 * Use page lock to guard against racing truncation 1895 * Use page lock to guard against racing truncation
@@ -931,15 +1898,15 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
931retry: 1898retry:
932 page = find_lock_page(mapping, idx); 1899 page = find_lock_page(mapping, idx);
933 if (!page) { 1900 if (!page) {
934 size = i_size_read(mapping->host) >> HPAGE_SHIFT; 1901 size = i_size_read(mapping->host) >> huge_page_shift(h);
935 if (idx >= size) 1902 if (idx >= size)
936 goto out; 1903 goto out;
937 page = alloc_huge_page(vma, address); 1904 page = alloc_huge_page(vma, address, 0);
938 if (IS_ERR(page)) { 1905 if (IS_ERR(page)) {
939 ret = -PTR_ERR(page); 1906 ret = -PTR_ERR(page);
940 goto out; 1907 goto out;
941 } 1908 }
942 clear_huge_page(page, address); 1909 clear_huge_page(page, address, huge_page_size(h));
943 __SetPageUptodate(page); 1910 __SetPageUptodate(page);
944 1911
945 if (vma->vm_flags & VM_SHARED) { 1912 if (vma->vm_flags & VM_SHARED) {
@@ -955,14 +1922,14 @@ retry:
955 } 1922 }
956 1923
957 spin_lock(&inode->i_lock); 1924 spin_lock(&inode->i_lock);
958 inode->i_blocks += BLOCKS_PER_HUGEPAGE; 1925 inode->i_blocks += blocks_per_huge_page(h);
959 spin_unlock(&inode->i_lock); 1926 spin_unlock(&inode->i_lock);
960 } else 1927 } else
961 lock_page(page); 1928 lock_page(page);
962 } 1929 }
963 1930
964 spin_lock(&mm->page_table_lock); 1931 spin_lock(&mm->page_table_lock);
965 size = i_size_read(mapping->host) >> HPAGE_SHIFT; 1932 size = i_size_read(mapping->host) >> huge_page_shift(h);
966 if (idx >= size) 1933 if (idx >= size)
967 goto backout; 1934 goto backout;
968 1935
@@ -976,7 +1943,7 @@ retry:
976 1943
977 if (write_access && !(vma->vm_flags & VM_SHARED)) { 1944 if (write_access && !(vma->vm_flags & VM_SHARED)) {
978 /* Optimization, do the COW without a second fault */ 1945 /* Optimization, do the COW without a second fault */
979 ret = hugetlb_cow(mm, vma, address, ptep, new_pte); 1946 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
980 } 1947 }
981 1948
982 spin_unlock(&mm->page_table_lock); 1949 spin_unlock(&mm->page_table_lock);
@@ -998,8 +1965,9 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
998 pte_t entry; 1965 pte_t entry;
999 int ret; 1966 int ret;
1000 static DEFINE_MUTEX(hugetlb_instantiation_mutex); 1967 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
1968 struct hstate *h = hstate_vma(vma);
1001 1969
1002 ptep = huge_pte_alloc(mm, address); 1970 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
1003 if (!ptep) 1971 if (!ptep)
1004 return VM_FAULT_OOM; 1972 return VM_FAULT_OOM;
1005 1973
@@ -1021,14 +1989,30 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1021 spin_lock(&mm->page_table_lock); 1989 spin_lock(&mm->page_table_lock);
1022 /* Check for a racing update before calling hugetlb_cow */ 1990 /* Check for a racing update before calling hugetlb_cow */
1023 if (likely(pte_same(entry, huge_ptep_get(ptep)))) 1991 if (likely(pte_same(entry, huge_ptep_get(ptep))))
1024 if (write_access && !pte_write(entry)) 1992 if (write_access && !pte_write(entry)) {
1025 ret = hugetlb_cow(mm, vma, address, ptep, entry); 1993 struct page *page;
1994 page = hugetlbfs_pagecache_page(h, vma, address);
1995 ret = hugetlb_cow(mm, vma, address, ptep, entry, page);
1996 if (page) {
1997 unlock_page(page);
1998 put_page(page);
1999 }
2000 }
1026 spin_unlock(&mm->page_table_lock); 2001 spin_unlock(&mm->page_table_lock);
1027 mutex_unlock(&hugetlb_instantiation_mutex); 2002 mutex_unlock(&hugetlb_instantiation_mutex);
1028 2003
1029 return ret; 2004 return ret;
1030} 2005}
1031 2006
2007/* Can be overriden by architectures */
2008__attribute__((weak)) struct page *
2009follow_huge_pud(struct mm_struct *mm, unsigned long address,
2010 pud_t *pud, int write)
2011{
2012 BUG();
2013 return NULL;
2014}
2015
1032int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 2016int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
1033 struct page **pages, struct vm_area_struct **vmas, 2017 struct page **pages, struct vm_area_struct **vmas,
1034 unsigned long *position, int *length, int i, 2018 unsigned long *position, int *length, int i,
@@ -1037,6 +2021,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
1037 unsigned long pfn_offset; 2021 unsigned long pfn_offset;
1038 unsigned long vaddr = *position; 2022 unsigned long vaddr = *position;
1039 int remainder = *length; 2023 int remainder = *length;
2024 struct hstate *h = hstate_vma(vma);
1040 2025
1041 spin_lock(&mm->page_table_lock); 2026 spin_lock(&mm->page_table_lock);
1042 while (vaddr < vma->vm_end && remainder) { 2027 while (vaddr < vma->vm_end && remainder) {
@@ -1048,7 +2033,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
1048 * each hugepage. We have to make * sure we get the 2033 * each hugepage. We have to make * sure we get the
1049 * first, for the page indexing below to work. 2034 * first, for the page indexing below to work.
1050 */ 2035 */
1051 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); 2036 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
1052 2037
1053 if (!pte || huge_pte_none(huge_ptep_get(pte)) || 2038 if (!pte || huge_pte_none(huge_ptep_get(pte)) ||
1054 (write && !pte_write(huge_ptep_get(pte)))) { 2039 (write && !pte_write(huge_ptep_get(pte)))) {
@@ -1066,7 +2051,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
1066 break; 2051 break;
1067 } 2052 }
1068 2053
1069 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; 2054 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
1070 page = pte_page(huge_ptep_get(pte)); 2055 page = pte_page(huge_ptep_get(pte));
1071same_page: 2056same_page:
1072 if (pages) { 2057 if (pages) {
@@ -1082,7 +2067,7 @@ same_page:
1082 --remainder; 2067 --remainder;
1083 ++i; 2068 ++i;
1084 if (vaddr < vma->vm_end && remainder && 2069 if (vaddr < vma->vm_end && remainder &&
1085 pfn_offset < HPAGE_SIZE/PAGE_SIZE) { 2070 pfn_offset < pages_per_huge_page(h)) {
1086 /* 2071 /*
1087 * We use pfn_offset to avoid touching the pageframes 2072 * We use pfn_offset to avoid touching the pageframes
1088 * of this compound page. 2073 * of this compound page.
@@ -1104,13 +2089,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
1104 unsigned long start = address; 2089 unsigned long start = address;
1105 pte_t *ptep; 2090 pte_t *ptep;
1106 pte_t pte; 2091 pte_t pte;
2092 struct hstate *h = hstate_vma(vma);
1107 2093
1108 BUG_ON(address >= end); 2094 BUG_ON(address >= end);
1109 flush_cache_range(vma, address, end); 2095 flush_cache_range(vma, address, end);
1110 2096
1111 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 2097 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
1112 spin_lock(&mm->page_table_lock); 2098 spin_lock(&mm->page_table_lock);
1113 for (; address < end; address += HPAGE_SIZE) { 2099 for (; address < end; address += huge_page_size(h)) {
1114 ptep = huge_pte_offset(mm, address); 2100 ptep = huge_pte_offset(mm, address);
1115 if (!ptep) 2101 if (!ptep)
1116 continue; 2102 continue;
@@ -1128,195 +2114,59 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
1128 flush_tlb_range(vma, start, end); 2114 flush_tlb_range(vma, start, end);
1129} 2115}
1130 2116
1131struct file_region { 2117int hugetlb_reserve_pages(struct inode *inode,
1132 struct list_head link; 2118 long from, long to,
1133 long from; 2119 struct vm_area_struct *vma)
1134 long to;
1135};
1136
1137static long region_add(struct list_head *head, long f, long t)
1138{
1139 struct file_region *rg, *nrg, *trg;
1140
1141 /* Locate the region we are either in or before. */
1142 list_for_each_entry(rg, head, link)
1143 if (f <= rg->to)
1144 break;
1145
1146 /* Round our left edge to the current segment if it encloses us. */
1147 if (f > rg->from)
1148 f = rg->from;
1149
1150 /* Check for and consume any regions we now overlap with. */
1151 nrg = rg;
1152 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
1153 if (&rg->link == head)
1154 break;
1155 if (rg->from > t)
1156 break;
1157
1158 /* If this area reaches higher then extend our area to
1159 * include it completely. If this is not the first area
1160 * which we intend to reuse, free it. */
1161 if (rg->to > t)
1162 t = rg->to;
1163 if (rg != nrg) {
1164 list_del(&rg->link);
1165 kfree(rg);
1166 }
1167 }
1168 nrg->from = f;
1169 nrg->to = t;
1170 return 0;
1171}
1172
1173static long region_chg(struct list_head *head, long f, long t)
1174{
1175 struct file_region *rg, *nrg;
1176 long chg = 0;
1177
1178 /* Locate the region we are before or in. */
1179 list_for_each_entry(rg, head, link)
1180 if (f <= rg->to)
1181 break;
1182
1183 /* If we are below the current region then a new region is required.
1184 * Subtle, allocate a new region at the position but make it zero
1185 * size such that we can guarantee to record the reservation. */
1186 if (&rg->link == head || t < rg->from) {
1187 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
1188 if (!nrg)
1189 return -ENOMEM;
1190 nrg->from = f;
1191 nrg->to = f;
1192 INIT_LIST_HEAD(&nrg->link);
1193 list_add(&nrg->link, rg->link.prev);
1194
1195 return t - f;
1196 }
1197
1198 /* Round our left edge to the current segment if it encloses us. */
1199 if (f > rg->from)
1200 f = rg->from;
1201 chg = t - f;
1202
1203 /* Check for and consume any regions we now overlap with. */
1204 list_for_each_entry(rg, rg->link.prev, link) {
1205 if (&rg->link == head)
1206 break;
1207 if (rg->from > t)
1208 return chg;
1209
1210 /* We overlap with this area, if it extends futher than
1211 * us then we must extend ourselves. Account for its
1212 * existing reservation. */
1213 if (rg->to > t) {
1214 chg += rg->to - t;
1215 t = rg->to;
1216 }
1217 chg -= rg->to - rg->from;
1218 }
1219 return chg;
1220}
1221
1222static long region_truncate(struct list_head *head, long end)
1223{ 2120{
1224 struct file_region *rg, *trg; 2121 long ret, chg;
1225 long chg = 0; 2122 struct hstate *h = hstate_inode(inode);
1226 2123
1227 /* Locate the region we are either in or before. */ 2124 if (vma && vma->vm_flags & VM_NORESERVE)
1228 list_for_each_entry(rg, head, link)
1229 if (end <= rg->to)
1230 break;
1231 if (&rg->link == head)
1232 return 0; 2125 return 0;
1233 2126
1234 /* If we are in the middle of a region then adjust it. */
1235 if (end > rg->from) {
1236 chg = rg->to - end;
1237 rg->to = end;
1238 rg = list_entry(rg->link.next, typeof(*rg), link);
1239 }
1240
1241 /* Drop any remaining regions. */
1242 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
1243 if (&rg->link == head)
1244 break;
1245 chg += rg->to - rg->from;
1246 list_del(&rg->link);
1247 kfree(rg);
1248 }
1249 return chg;
1250}
1251
1252static int hugetlb_acct_memory(long delta)
1253{
1254 int ret = -ENOMEM;
1255
1256 spin_lock(&hugetlb_lock);
1257 /* 2127 /*
1258 * When cpuset is configured, it breaks the strict hugetlb page 2128 * Shared mappings base their reservation on the number of pages that
1259 * reservation as the accounting is done on a global variable. Such 2129 * are already allocated on behalf of the file. Private mappings need
1260 * reservation is completely rubbish in the presence of cpuset because 2130 * to reserve the full area even if read-only as mprotect() may be
1261 * the reservation is not checked against page availability for the 2131 * called to make the mapping read-write. Assume !vma is a shm mapping
1262 * current cpuset. Application can still potentially OOM'ed by kernel
1263 * with lack of free htlb page in cpuset that the task is in.
1264 * Attempt to enforce strict accounting with cpuset is almost
1265 * impossible (or too ugly) because cpuset is too fluid that
1266 * task or memory node can be dynamically moved between cpusets.
1267 *
1268 * The change of semantics for shared hugetlb mapping with cpuset is
1269 * undesirable. However, in order to preserve some of the semantics,
1270 * we fall back to check against current free page availability as
1271 * a best attempt and hopefully to minimize the impact of changing
1272 * semantics that cpuset has.
1273 */ 2132 */
1274 if (delta > 0) { 2133 if (!vma || vma->vm_flags & VM_SHARED)
1275 if (gather_surplus_pages(delta) < 0) 2134 chg = region_chg(&inode->i_mapping->private_list, from, to);
1276 goto out; 2135 else {
1277 2136 struct resv_map *resv_map = resv_map_alloc();
1278 if (delta > cpuset_mems_nr(free_huge_pages_node)) { 2137 if (!resv_map)
1279 return_unused_surplus_pages(delta); 2138 return -ENOMEM;
1280 goto out;
1281 }
1282 }
1283
1284 ret = 0;
1285 if (delta < 0)
1286 return_unused_surplus_pages((unsigned long) -delta);
1287 2139
1288out: 2140 chg = to - from;
1289 spin_unlock(&hugetlb_lock);
1290 return ret;
1291}
1292 2141
1293int hugetlb_reserve_pages(struct inode *inode, long from, long to) 2142 set_vma_resv_map(vma, resv_map);
1294{ 2143 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
1295 long ret, chg; 2144 }
1296 2145
1297 chg = region_chg(&inode->i_mapping->private_list, from, to);
1298 if (chg < 0) 2146 if (chg < 0)
1299 return chg; 2147 return chg;
1300 2148
1301 if (hugetlb_get_quota(inode->i_mapping, chg)) 2149 if (hugetlb_get_quota(inode->i_mapping, chg))
1302 return -ENOSPC; 2150 return -ENOSPC;
1303 ret = hugetlb_acct_memory(chg); 2151 ret = hugetlb_acct_memory(h, chg);
1304 if (ret < 0) { 2152 if (ret < 0) {
1305 hugetlb_put_quota(inode->i_mapping, chg); 2153 hugetlb_put_quota(inode->i_mapping, chg);
1306 return ret; 2154 return ret;
1307 } 2155 }
1308 region_add(&inode->i_mapping->private_list, from, to); 2156 if (!vma || vma->vm_flags & VM_SHARED)
2157 region_add(&inode->i_mapping->private_list, from, to);
1309 return 0; 2158 return 0;
1310} 2159}
1311 2160
1312void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 2161void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
1313{ 2162{
2163 struct hstate *h = hstate_inode(inode);
1314 long chg = region_truncate(&inode->i_mapping->private_list, offset); 2164 long chg = region_truncate(&inode->i_mapping->private_list, offset);
1315 2165
1316 spin_lock(&inode->i_lock); 2166 spin_lock(&inode->i_lock);
1317 inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed; 2167 inode->i_blocks -= blocks_per_huge_page(h);
1318 spin_unlock(&inode->i_lock); 2168 spin_unlock(&inode->i_lock);
1319 2169
1320 hugetlb_put_quota(inode->i_mapping, (chg - freed)); 2170 hugetlb_put_quota(inode->i_mapping, (chg - freed));
1321 hugetlb_acct_memory(-(chg - freed)); 2171 hugetlb_acct_memory(h, -(chg - freed));
1322} 2172}
diff --git a/mm/internal.h b/mm/internal.h
index 0034e947e4bc..1f43f7416972 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -13,6 +13,11 @@
13 13
14#include <linux/mm.h> 14#include <linux/mm.h>
15 15
16void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
17 unsigned long floor, unsigned long ceiling);
18
19extern void prep_compound_page(struct page *page, unsigned long order);
20
16static inline void set_page_count(struct page *page, int v) 21static inline void set_page_count(struct page *page, int v)
17{ 22{
18 atomic_set(&page->_count, v); 23 atomic_set(&page->_count, v);
@@ -59,4 +64,60 @@ static inline unsigned long page_order(struct page *page)
59#define __paginginit __init 64#define __paginginit __init
60#endif 65#endif
61 66
67/* Memory initialisation debug and verification */
68enum mminit_level {
69 MMINIT_WARNING,
70 MMINIT_VERIFY,
71 MMINIT_TRACE
72};
73
74#ifdef CONFIG_DEBUG_MEMORY_INIT
75
76extern int mminit_loglevel;
77
78#define mminit_dprintk(level, prefix, fmt, arg...) \
79do { \
80 if (level < mminit_loglevel) { \
81 printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
82 printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
83 } \
84} while (0)
85
86extern void mminit_verify_pageflags_layout(void);
87extern void mminit_verify_page_links(struct page *page,
88 enum zone_type zone, unsigned long nid, unsigned long pfn);
89extern void mminit_verify_zonelist(void);
90
91#else
92
93static inline void mminit_dprintk(enum mminit_level level,
94 const char *prefix, const char *fmt, ...)
95{
96}
97
98static inline void mminit_verify_pageflags_layout(void)
99{
100}
101
102static inline void mminit_verify_page_links(struct page *page,
103 enum zone_type zone, unsigned long nid, unsigned long pfn)
104{
105}
106
107static inline void mminit_verify_zonelist(void)
108{
109}
110#endif /* CONFIG_DEBUG_MEMORY_INIT */
111
112/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
113#if defined(CONFIG_SPARSEMEM)
114extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
115 unsigned long *end_pfn);
116#else
117static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
118 unsigned long *end_pfn)
119{
120}
121#endif /* CONFIG_SPARSEMEM */
122
62#endif 123#endif
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e46451e1d9b7..fba566c51322 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -35,9 +35,9 @@
35 35
36#include <asm/uaccess.h> 36#include <asm/uaccess.h>
37 37
38struct cgroup_subsys mem_cgroup_subsys; 38struct cgroup_subsys mem_cgroup_subsys __read_mostly;
39static const int MEM_CGROUP_RECLAIM_RETRIES = 5; 39static struct kmem_cache *page_cgroup_cache __read_mostly;
40static struct kmem_cache *page_cgroup_cache; 40#define MEM_CGROUP_RECLAIM_RETRIES 5
41 41
42/* 42/*
43 * Statistics for memory cgroup. 43 * Statistics for memory cgroup.
@@ -166,7 +166,6 @@ struct page_cgroup {
166 struct list_head lru; /* per cgroup LRU list */ 166 struct list_head lru; /* per cgroup LRU list */
167 struct page *page; 167 struct page *page;
168 struct mem_cgroup *mem_cgroup; 168 struct mem_cgroup *mem_cgroup;
169 int ref_cnt; /* cached, mapped, migrating */
170 int flags; 169 int flags;
171}; 170};
172#define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */ 171#define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
@@ -185,6 +184,7 @@ static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
185enum charge_type { 184enum charge_type {
186 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 185 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
187 MEM_CGROUP_CHARGE_TYPE_MAPPED, 186 MEM_CGROUP_CHARGE_TYPE_MAPPED,
187 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
188}; 188};
189 189
190/* 190/*
@@ -296,7 +296,7 @@ static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
296 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1; 296 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
297 297
298 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false); 298 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
299 list_del_init(&pc->lru); 299 list_del(&pc->lru);
300} 300}
301 301
302static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, 302static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
@@ -354,6 +354,9 @@ void mem_cgroup_move_lists(struct page *page, bool active)
354 struct mem_cgroup_per_zone *mz; 354 struct mem_cgroup_per_zone *mz;
355 unsigned long flags; 355 unsigned long flags;
356 356
357 if (mem_cgroup_subsys.disabled)
358 return;
359
357 /* 360 /*
358 * We cannot lock_page_cgroup while holding zone's lru_lock, 361 * We cannot lock_page_cgroup while holding zone's lru_lock,
359 * because other holders of lock_page_cgroup can be interrupted 362 * because other holders of lock_page_cgroup can be interrupted
@@ -524,7 +527,8 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
524 * < 0 if the cgroup is over its limit 527 * < 0 if the cgroup is over its limit
525 */ 528 */
526static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, 529static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
527 gfp_t gfp_mask, enum charge_type ctype) 530 gfp_t gfp_mask, enum charge_type ctype,
531 struct mem_cgroup *memcg)
528{ 532{
529 struct mem_cgroup *mem; 533 struct mem_cgroup *mem;
530 struct page_cgroup *pc; 534 struct page_cgroup *pc;
@@ -532,35 +536,8 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
532 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 536 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
533 struct mem_cgroup_per_zone *mz; 537 struct mem_cgroup_per_zone *mz;
534 538
535 if (mem_cgroup_subsys.disabled) 539 pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask);
536 return 0; 540 if (unlikely(pc == NULL))
537
538 /*
539 * Should page_cgroup's go to their own slab?
540 * One could optimize the performance of the charging routine
541 * by saving a bit in the page_flags and using it as a lock
542 * to see if the cgroup page already has a page_cgroup associated
543 * with it
544 */
545retry:
546 lock_page_cgroup(page);
547 pc = page_get_page_cgroup(page);
548 /*
549 * The page_cgroup exists and
550 * the page has already been accounted.
551 */
552 if (pc) {
553 VM_BUG_ON(pc->page != page);
554 VM_BUG_ON(pc->ref_cnt <= 0);
555
556 pc->ref_cnt++;
557 unlock_page_cgroup(page);
558 goto done;
559 }
560 unlock_page_cgroup(page);
561
562 pc = kmem_cache_zalloc(page_cgroup_cache, gfp_mask);
563 if (pc == NULL)
564 goto err; 541 goto err;
565 542
566 /* 543 /*
@@ -569,16 +546,18 @@ retry:
569 * thread group leader migrates. It's possible that mm is not 546 * thread group leader migrates. It's possible that mm is not
570 * set, if so charge the init_mm (happens for pagecache usage). 547 * set, if so charge the init_mm (happens for pagecache usage).
571 */ 548 */
572 if (!mm) 549 if (likely(!memcg)) {
573 mm = &init_mm; 550 rcu_read_lock();
574 551 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
575 rcu_read_lock(); 552 /*
576 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 553 * For every charge from the cgroup, increment reference count
577 /* 554 */
578 * For every charge from the cgroup, increment reference count 555 css_get(&mem->css);
579 */ 556 rcu_read_unlock();
580 css_get(&mem->css); 557 } else {
581 rcu_read_unlock(); 558 mem = memcg;
559 css_get(&memcg->css);
560 }
582 561
583 while (res_counter_charge(&mem->res, PAGE_SIZE)) { 562 while (res_counter_charge(&mem->res, PAGE_SIZE)) {
584 if (!(gfp_mask & __GFP_WAIT)) 563 if (!(gfp_mask & __GFP_WAIT))
@@ -603,25 +582,24 @@ retry:
603 } 582 }
604 } 583 }
605 584
606 pc->ref_cnt = 1;
607 pc->mem_cgroup = mem; 585 pc->mem_cgroup = mem;
608 pc->page = page; 586 pc->page = page;
609 pc->flags = PAGE_CGROUP_FLAG_ACTIVE; 587 /*
588 * If a page is accounted as a page cache, insert to inactive list.
589 * If anon, insert to active list.
590 */
610 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) 591 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
611 pc->flags = PAGE_CGROUP_FLAG_CACHE; 592 pc->flags = PAGE_CGROUP_FLAG_CACHE;
593 else
594 pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
612 595
613 lock_page_cgroup(page); 596 lock_page_cgroup(page);
614 if (page_get_page_cgroup(page)) { 597 if (unlikely(page_get_page_cgroup(page))) {
615 unlock_page_cgroup(page); 598 unlock_page_cgroup(page);
616 /*
617 * Another charge has been added to this page already.
618 * We take lock_page_cgroup(page) again and read
619 * page->cgroup, increment refcnt.... just retry is OK.
620 */
621 res_counter_uncharge(&mem->res, PAGE_SIZE); 599 res_counter_uncharge(&mem->res, PAGE_SIZE);
622 css_put(&mem->css); 600 css_put(&mem->css);
623 kmem_cache_free(page_cgroup_cache, pc); 601 kmem_cache_free(page_cgroup_cache, pc);
624 goto retry; 602 goto done;
625 } 603 }
626 page_assign_page_cgroup(page, pc); 604 page_assign_page_cgroup(page, pc);
627 605
@@ -642,24 +620,65 @@ err:
642 620
643int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) 621int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
644{ 622{
623 if (mem_cgroup_subsys.disabled)
624 return 0;
625
626 /*
627 * If already mapped, we don't have to account.
628 * If page cache, page->mapping has address_space.
629 * But page->mapping may have out-of-use anon_vma pointer,
630 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
631 * is NULL.
632 */
633 if (page_mapped(page) || (page->mapping && !PageAnon(page)))
634 return 0;
635 if (unlikely(!mm))
636 mm = &init_mm;
645 return mem_cgroup_charge_common(page, mm, gfp_mask, 637 return mem_cgroup_charge_common(page, mm, gfp_mask,
646 MEM_CGROUP_CHARGE_TYPE_MAPPED); 638 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
647} 639}
648 640
649int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 641int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
650 gfp_t gfp_mask) 642 gfp_t gfp_mask)
651{ 643{
652 if (!mm) 644 if (mem_cgroup_subsys.disabled)
645 return 0;
646
647 /*
648 * Corner case handling. This is called from add_to_page_cache()
649 * in usual. But some FS (shmem) precharges this page before calling it
650 * and call add_to_page_cache() with GFP_NOWAIT.
651 *
652 * For GFP_NOWAIT case, the page may be pre-charged before calling
653 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
654 * charge twice. (It works but has to pay a bit larger cost.)
655 */
656 if (!(gfp_mask & __GFP_WAIT)) {
657 struct page_cgroup *pc;
658
659 lock_page_cgroup(page);
660 pc = page_get_page_cgroup(page);
661 if (pc) {
662 VM_BUG_ON(pc->page != page);
663 VM_BUG_ON(!pc->mem_cgroup);
664 unlock_page_cgroup(page);
665 return 0;
666 }
667 unlock_page_cgroup(page);
668 }
669
670 if (unlikely(!mm))
653 mm = &init_mm; 671 mm = &init_mm;
672
654 return mem_cgroup_charge_common(page, mm, gfp_mask, 673 return mem_cgroup_charge_common(page, mm, gfp_mask,
655 MEM_CGROUP_CHARGE_TYPE_CACHE); 674 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
656} 675}
657 676
658/* 677/*
659 * Uncharging is always a welcome operation, we never complain, simply 678 * uncharge if !page_mapped(page)
660 * uncharge.
661 */ 679 */
662void mem_cgroup_uncharge_page(struct page *page) 680static void
681__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
663{ 682{
664 struct page_cgroup *pc; 683 struct page_cgroup *pc;
665 struct mem_cgroup *mem; 684 struct mem_cgroup *mem;
@@ -674,98 +693,151 @@ void mem_cgroup_uncharge_page(struct page *page)
674 */ 693 */
675 lock_page_cgroup(page); 694 lock_page_cgroup(page);
676 pc = page_get_page_cgroup(page); 695 pc = page_get_page_cgroup(page);
677 if (!pc) 696 if (unlikely(!pc))
678 goto unlock; 697 goto unlock;
679 698
680 VM_BUG_ON(pc->page != page); 699 VM_BUG_ON(pc->page != page);
681 VM_BUG_ON(pc->ref_cnt <= 0);
682 700
683 if (--(pc->ref_cnt) == 0) { 701 if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
684 mz = page_cgroup_zoneinfo(pc); 702 && ((pc->flags & PAGE_CGROUP_FLAG_CACHE)
685 spin_lock_irqsave(&mz->lru_lock, flags); 703 || page_mapped(page)))
686 __mem_cgroup_remove_list(mz, pc); 704 goto unlock;
687 spin_unlock_irqrestore(&mz->lru_lock, flags);
688 705
689 page_assign_page_cgroup(page, NULL); 706 mz = page_cgroup_zoneinfo(pc);
690 unlock_page_cgroup(page); 707 spin_lock_irqsave(&mz->lru_lock, flags);
708 __mem_cgroup_remove_list(mz, pc);
709 spin_unlock_irqrestore(&mz->lru_lock, flags);
691 710
692 mem = pc->mem_cgroup; 711 page_assign_page_cgroup(page, NULL);
693 res_counter_uncharge(&mem->res, PAGE_SIZE); 712 unlock_page_cgroup(page);
694 css_put(&mem->css);
695 713
696 kmem_cache_free(page_cgroup_cache, pc); 714 mem = pc->mem_cgroup;
697 return; 715 res_counter_uncharge(&mem->res, PAGE_SIZE);
698 } 716 css_put(&mem->css);
699 717
718 kmem_cache_free(page_cgroup_cache, pc);
719 return;
700unlock: 720unlock:
701 unlock_page_cgroup(page); 721 unlock_page_cgroup(page);
702} 722}
703 723
724void mem_cgroup_uncharge_page(struct page *page)
725{
726 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
727}
728
729void mem_cgroup_uncharge_cache_page(struct page *page)
730{
731 VM_BUG_ON(page_mapped(page));
732 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
733}
734
704/* 735/*
705 * Returns non-zero if a page (under migration) has valid page_cgroup member. 736 * Before starting migration, account against new page.
706 * Refcnt of page_cgroup is incremented.
707 */ 737 */
708int mem_cgroup_prepare_migration(struct page *page) 738int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
709{ 739{
710 struct page_cgroup *pc; 740 struct page_cgroup *pc;
741 struct mem_cgroup *mem = NULL;
742 enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
743 int ret = 0;
711 744
712 if (mem_cgroup_subsys.disabled) 745 if (mem_cgroup_subsys.disabled)
713 return 0; 746 return 0;
714 747
715 lock_page_cgroup(page); 748 lock_page_cgroup(page);
716 pc = page_get_page_cgroup(page); 749 pc = page_get_page_cgroup(page);
717 if (pc) 750 if (pc) {
718 pc->ref_cnt++; 751 mem = pc->mem_cgroup;
752 css_get(&mem->css);
753 if (pc->flags & PAGE_CGROUP_FLAG_CACHE)
754 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
755 }
719 unlock_page_cgroup(page); 756 unlock_page_cgroup(page);
720 return pc != NULL; 757 if (mem) {
758 ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
759 ctype, mem);
760 css_put(&mem->css);
761 }
762 return ret;
721} 763}
722 764
723void mem_cgroup_end_migration(struct page *page) 765/* remove redundant charge if migration failed*/
766void mem_cgroup_end_migration(struct page *newpage)
724{ 767{
725 mem_cgroup_uncharge_page(page); 768 /*
769 * At success, page->mapping is not NULL.
770 * special rollback care is necessary when
771 * 1. at migration failure. (newpage->mapping is cleared in this case)
772 * 2. the newpage was moved but not remapped again because the task
773 * exits and the newpage is obsolete. In this case, the new page
774 * may be a swapcache. So, we just call mem_cgroup_uncharge_page()
775 * always for avoiding mess. The page_cgroup will be removed if
776 * unnecessary. File cache pages is still on radix-tree. Don't
777 * care it.
778 */
779 if (!newpage->mapping)
780 __mem_cgroup_uncharge_common(newpage,
781 MEM_CGROUP_CHARGE_TYPE_FORCE);
782 else if (PageAnon(newpage))
783 mem_cgroup_uncharge_page(newpage);
726} 784}
727 785
728/* 786/*
729 * We know both *page* and *newpage* are now not-on-LRU and PG_locked. 787 * A call to try to shrink memory usage under specified resource controller.
730 * And no race with uncharge() routines because page_cgroup for *page* 788 * This is typically used for page reclaiming for shmem for reducing side
731 * has extra one reference by mem_cgroup_prepare_migration. 789 * effect of page allocation from shmem, which is used by some mem_cgroup.
732 */ 790 */
733void mem_cgroup_page_migration(struct page *page, struct page *newpage) 791int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
734{ 792{
735 struct page_cgroup *pc; 793 struct mem_cgroup *mem;
736 struct mem_cgroup_per_zone *mz; 794 int progress = 0;
737 unsigned long flags; 795 int retry = MEM_CGROUP_RECLAIM_RETRIES;
738 796
739 lock_page_cgroup(page); 797 if (mem_cgroup_subsys.disabled)
740 pc = page_get_page_cgroup(page); 798 return 0;
741 if (!pc) {
742 unlock_page_cgroup(page);
743 return;
744 }
745 799
746 mz = page_cgroup_zoneinfo(pc); 800 rcu_read_lock();
747 spin_lock_irqsave(&mz->lru_lock, flags); 801 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
748 __mem_cgroup_remove_list(mz, pc); 802 css_get(&mem->css);
749 spin_unlock_irqrestore(&mz->lru_lock, flags); 803 rcu_read_unlock();
750 804
751 page_assign_page_cgroup(page, NULL); 805 do {
752 unlock_page_cgroup(page); 806 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
807 } while (!progress && --retry);
753 808
754 pc->page = newpage; 809 css_put(&mem->css);
755 lock_page_cgroup(newpage); 810 if (!retry)
756 page_assign_page_cgroup(newpage, pc); 811 return -ENOMEM;
812 return 0;
813}
757 814
758 mz = page_cgroup_zoneinfo(pc); 815int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val)
759 spin_lock_irqsave(&mz->lru_lock, flags); 816{
760 __mem_cgroup_add_list(mz, pc); 817
761 spin_unlock_irqrestore(&mz->lru_lock, flags); 818 int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
819 int progress;
820 int ret = 0;
762 821
763 unlock_page_cgroup(newpage); 822 while (res_counter_set_limit(&memcg->res, val)) {
823 if (signal_pending(current)) {
824 ret = -EINTR;
825 break;
826 }
827 if (!retry_count) {
828 ret = -EBUSY;
829 break;
830 }
831 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL);
832 if (!progress)
833 retry_count--;
834 }
835 return ret;
764} 836}
765 837
838
766/* 839/*
767 * This routine traverse page_cgroup in given list and drop them all. 840 * This routine traverse page_cgroup in given list and drop them all.
768 * This routine ignores page_cgroup->ref_cnt.
769 * *And* this routine doesn't reclaim page itself, just removes page_cgroup. 841 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
770 */ 842 */
771#define FORCE_UNCHARGE_BATCH (128) 843#define FORCE_UNCHARGE_BATCH (128)
@@ -790,12 +862,20 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
790 page = pc->page; 862 page = pc->page;
791 get_page(page); 863 get_page(page);
792 spin_unlock_irqrestore(&mz->lru_lock, flags); 864 spin_unlock_irqrestore(&mz->lru_lock, flags);
793 mem_cgroup_uncharge_page(page); 865 /*
794 put_page(page); 866 * Check if this page is on LRU. !LRU page can be found
795 if (--count <= 0) { 867 * if it's under page migration.
796 count = FORCE_UNCHARGE_BATCH; 868 */
869 if (PageLRU(page)) {
870 __mem_cgroup_uncharge_common(page,
871 MEM_CGROUP_CHARGE_TYPE_FORCE);
872 put_page(page);
873 if (--count <= 0) {
874 count = FORCE_UNCHARGE_BATCH;
875 cond_resched();
876 }
877 } else
797 cond_resched(); 878 cond_resched();
798 }
799 spin_lock_irqsave(&mz->lru_lock, flags); 879 spin_lock_irqsave(&mz->lru_lock, flags);
800 } 880 }
801 spin_unlock_irqrestore(&mz->lru_lock, flags); 881 spin_unlock_irqrestore(&mz->lru_lock, flags);
@@ -810,9 +890,6 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
810 int ret = -EBUSY; 890 int ret = -EBUSY;
811 int node, zid; 891 int node, zid;
812 892
813 if (mem_cgroup_subsys.disabled)
814 return 0;
815
816 css_get(&mem->css); 893 css_get(&mem->css);
817 /* 894 /*
818 * page reclaim code (kswapd etc..) will move pages between 895 * page reclaim code (kswapd etc..) will move pages between
@@ -838,32 +915,34 @@ out:
838 return ret; 915 return ret;
839} 916}
840 917
841static int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
842{
843 *tmp = memparse(buf, &buf);
844 if (*buf != '\0')
845 return -EINVAL;
846
847 /*
848 * Round up the value to the closest page size
849 */
850 *tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT;
851 return 0;
852}
853
854static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) 918static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
855{ 919{
856 return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res, 920 return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
857 cft->private); 921 cft->private);
858} 922}
859 923/*
860static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft, 924 * The user of this function is...
861 struct file *file, const char __user *userbuf, 925 * RES_LIMIT.
862 size_t nbytes, loff_t *ppos) 926 */
927static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
928 const char *buffer)
863{ 929{
864 return res_counter_write(&mem_cgroup_from_cont(cont)->res, 930 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
865 cft->private, userbuf, nbytes, ppos, 931 unsigned long long val;
866 mem_cgroup_write_strategy); 932 int ret;
933
934 switch (cft->private) {
935 case RES_LIMIT:
936 /* This function does all necessary parse...reuse it */
937 ret = res_counter_memparse_write_strategy(buffer, &val);
938 if (!ret)
939 ret = mem_cgroup_resize_limit(memcg, val);
940 break;
941 default:
942 ret = -EINVAL; /* should be BUG() ? */
943 break;
944 }
945 return ret;
867} 946}
868 947
869static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) 948static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
@@ -940,7 +1019,7 @@ static struct cftype mem_cgroup_files[] = {
940 { 1019 {
941 .name = "limit_in_bytes", 1020 .name = "limit_in_bytes",
942 .private = RES_LIMIT, 1021 .private = RES_LIMIT,
943 .write = mem_cgroup_write, 1022 .write_string = mem_cgroup_write,
944 .read_u64 = mem_cgroup_read, 1023 .read_u64 = mem_cgroup_read,
945 }, 1024 },
946 { 1025 {
@@ -1070,8 +1149,6 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1070static int mem_cgroup_populate(struct cgroup_subsys *ss, 1149static int mem_cgroup_populate(struct cgroup_subsys *ss,
1071 struct cgroup *cont) 1150 struct cgroup *cont)
1072{ 1151{
1073 if (mem_cgroup_subsys.disabled)
1074 return 0;
1075 return cgroup_add_files(cont, ss, mem_cgroup_files, 1152 return cgroup_add_files(cont, ss, mem_cgroup_files,
1076 ARRAY_SIZE(mem_cgroup_files)); 1153 ARRAY_SIZE(mem_cgroup_files));
1077} 1154}
@@ -1084,9 +1161,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1084 struct mm_struct *mm; 1161 struct mm_struct *mm;
1085 struct mem_cgroup *mem, *old_mem; 1162 struct mem_cgroup *mem, *old_mem;
1086 1163
1087 if (mem_cgroup_subsys.disabled)
1088 return;
1089
1090 mm = get_task_mm(p); 1164 mm = get_task_mm(p);
1091 if (mm == NULL) 1165 if (mm == NULL)
1092 return; 1166 return;
diff --git a/mm/memory.c b/mm/memory.c
index 2302d228fe04..262e3eb6601a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -61,6 +61,8 @@
61#include <linux/swapops.h> 61#include <linux/swapops.h>
62#include <linux/elf.h> 62#include <linux/elf.h>
63 63
64#include "internal.h"
65
64#ifndef CONFIG_NEED_MULTIPLE_NODES 66#ifndef CONFIG_NEED_MULTIPLE_NODES
65/* use the per-pgdat data instead for discontigmem - mbligh */ 67/* use the per-pgdat data instead for discontigmem - mbligh */
66unsigned long max_mapnr; 68unsigned long max_mapnr;
@@ -211,7 +213,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
211 * 213 *
212 * Must be called with pagetable lock held. 214 * Must be called with pagetable lock held.
213 */ 215 */
214void free_pgd_range(struct mmu_gather **tlb, 216void free_pgd_range(struct mmu_gather *tlb,
215 unsigned long addr, unsigned long end, 217 unsigned long addr, unsigned long end,
216 unsigned long floor, unsigned long ceiling) 218 unsigned long floor, unsigned long ceiling)
217{ 219{
@@ -262,16 +264,16 @@ void free_pgd_range(struct mmu_gather **tlb,
262 return; 264 return;
263 265
264 start = addr; 266 start = addr;
265 pgd = pgd_offset((*tlb)->mm, addr); 267 pgd = pgd_offset(tlb->mm, addr);
266 do { 268 do {
267 next = pgd_addr_end(addr, end); 269 next = pgd_addr_end(addr, end);
268 if (pgd_none_or_clear_bad(pgd)) 270 if (pgd_none_or_clear_bad(pgd))
269 continue; 271 continue;
270 free_pud_range(*tlb, pgd, addr, next, floor, ceiling); 272 free_pud_range(tlb, pgd, addr, next, floor, ceiling);
271 } while (pgd++, addr = next, addr != end); 273 } while (pgd++, addr = next, addr != end);
272} 274}
273 275
274void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, 276void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
275 unsigned long floor, unsigned long ceiling) 277 unsigned long floor, unsigned long ceiling)
276{ 278{
277 while (vma) { 279 while (vma) {
@@ -899,9 +901,23 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
899 } 901 }
900 902
901 if (unlikely(is_vm_hugetlb_page(vma))) { 903 if (unlikely(is_vm_hugetlb_page(vma))) {
902 unmap_hugepage_range(vma, start, end); 904 /*
903 zap_work -= (end - start) / 905 * It is undesirable to test vma->vm_file as it
904 (HPAGE_SIZE / PAGE_SIZE); 906 * should be non-null for valid hugetlb area.
907 * However, vm_file will be NULL in the error
908 * cleanup path of do_mmap_pgoff. When
909 * hugetlbfs ->mmap method fails,
910 * do_mmap_pgoff() nullifies vma->vm_file
911 * before calling this function to clean up.
912 * Since no pte has actually been setup, it is
913 * safe to do nothing in this case.
914 */
915 if (vma->vm_file) {
916 unmap_hugepage_range(vma, start, end, NULL);
917 zap_work -= (end - start) /
918 pages_per_huge_page(hstate_vma(vma));
919 }
920
905 start = end; 921 start = end;
906 } else 922 } else
907 start = unmap_page_range(*tlbp, vma, 923 start = unmap_page_range(*tlbp, vma,
@@ -982,19 +998,24 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
982 goto no_page_table; 998 goto no_page_table;
983 999
984 pud = pud_offset(pgd, address); 1000 pud = pud_offset(pgd, address);
985 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 1001 if (pud_none(*pud))
986 goto no_page_table; 1002 goto no_page_table;
987 1003 if (pud_huge(*pud)) {
1004 BUG_ON(flags & FOLL_GET);
1005 page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
1006 goto out;
1007 }
1008 if (unlikely(pud_bad(*pud)))
1009 goto no_page_table;
1010
988 pmd = pmd_offset(pud, address); 1011 pmd = pmd_offset(pud, address);
989 if (pmd_none(*pmd)) 1012 if (pmd_none(*pmd))
990 goto no_page_table; 1013 goto no_page_table;
991
992 if (pmd_huge(*pmd)) { 1014 if (pmd_huge(*pmd)) {
993 BUG_ON(flags & FOLL_GET); 1015 BUG_ON(flags & FOLL_GET);
994 page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); 1016 page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
995 goto out; 1017 goto out;
996 } 1018 }
997
998 if (unlikely(pmd_bad(*pmd))) 1019 if (unlikely(pmd_bad(*pmd)))
999 goto no_page_table; 1020 goto no_page_table;
1000 1021
@@ -1058,11 +1079,9 @@ static inline int use_zero_page(struct vm_area_struct *vma)
1058 if (vma->vm_flags & (VM_LOCKED | VM_SHARED)) 1079 if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
1059 return 0; 1080 return 0;
1060 /* 1081 /*
1061 * And if we have a fault or a nopfn routine, it's not an 1082 * And if we have a fault routine, it's not an anonymous region.
1062 * anonymous region.
1063 */ 1083 */
1064 return !vma->vm_ops || 1084 return !vma->vm_ops || !vma->vm_ops->fault;
1065 (!vma->vm_ops->fault && !vma->vm_ops->nopfn);
1066} 1085}
1067 1086
1068int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1087int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
@@ -1338,6 +1357,11 @@ out:
1338 * 1357 *
1339 * This function should only be called from a vm_ops->fault handler, and 1358 * This function should only be called from a vm_ops->fault handler, and
1340 * in that case the handler should return NULL. 1359 * in that case the handler should return NULL.
1360 *
1361 * vma cannot be a COW mapping.
1362 *
1363 * As this is called only for pages that do not currently exist, we
1364 * do not need to flush old virtual caches or the TLB.
1341 */ 1365 */
1342int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1366int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1343 unsigned long pfn) 1367 unsigned long pfn)
@@ -1548,6 +1572,8 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
1548 unsigned long next; 1572 unsigned long next;
1549 int err; 1573 int err;
1550 1574
1575 BUG_ON(pud_huge(*pud));
1576
1551 pmd = pmd_alloc(mm, pud, addr); 1577 pmd = pmd_alloc(mm, pud, addr);
1552 if (!pmd) 1578 if (!pmd)
1553 return -ENOMEM; 1579 return -ENOMEM;
@@ -2501,59 +2527,6 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2501 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); 2527 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2502} 2528}
2503 2529
2504
2505/*
2506 * do_no_pfn() tries to create a new page mapping for a page without
2507 * a struct_page backing it
2508 *
2509 * As this is called only for pages that do not currently exist, we
2510 * do not need to flush old virtual caches or the TLB.
2511 *
2512 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2513 * but allow concurrent faults), and pte mapped but not yet locked.
2514 * We return with mmap_sem still held, but pte unmapped and unlocked.
2515 *
2516 * It is expected that the ->nopfn handler always returns the same pfn
2517 * for a given virtual mapping.
2518 *
2519 * Mark this `noinline' to prevent it from bloating the main pagefault code.
2520 */
2521static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
2522 unsigned long address, pte_t *page_table, pmd_t *pmd,
2523 int write_access)
2524{
2525 spinlock_t *ptl;
2526 pte_t entry;
2527 unsigned long pfn;
2528
2529 pte_unmap(page_table);
2530 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2531 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2532
2533 pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK);
2534
2535 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2536
2537 if (unlikely(pfn == NOPFN_OOM))
2538 return VM_FAULT_OOM;
2539 else if (unlikely(pfn == NOPFN_SIGBUS))
2540 return VM_FAULT_SIGBUS;
2541 else if (unlikely(pfn == NOPFN_REFAULT))
2542 return 0;
2543
2544 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2545
2546 /* Only go through if we didn't race with anybody else... */
2547 if (pte_none(*page_table)) {
2548 entry = pfn_pte(pfn, vma->vm_page_prot);
2549 if (write_access)
2550 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2551 set_pte_at(mm, address, page_table, entry);
2552 }
2553 pte_unmap_unlock(page_table, ptl);
2554 return 0;
2555}
2556
2557/* 2530/*
2558 * Fault of a previously existing named mapping. Repopulate the pte 2531 * Fault of a previously existing named mapping. Repopulate the pte
2559 * from the encoded file_pte if possible. This enables swappable 2532 * from the encoded file_pte if possible. This enables swappable
@@ -2614,9 +2587,6 @@ static inline int handle_pte_fault(struct mm_struct *mm,
2614 if (likely(vma->vm_ops->fault)) 2587 if (likely(vma->vm_ops->fault))
2615 return do_linear_fault(mm, vma, address, 2588 return do_linear_fault(mm, vma, address,
2616 pte, pmd, write_access, entry); 2589 pte, pmd, write_access, entry);
2617 if (unlikely(vma->vm_ops->nopfn))
2618 return do_no_pfn(mm, vma, address, pte,
2619 pmd, write_access);
2620 } 2590 }
2621 return do_anonymous_page(mm, vma, address, 2591 return do_anonymous_page(mm, vma, address,
2622 pte, pmd, write_access); 2592 pte, pmd, write_access);
@@ -2804,6 +2774,86 @@ int in_gate_area_no_task(unsigned long addr)
2804 2774
2805#endif /* __HAVE_ARCH_GATE_AREA */ 2775#endif /* __HAVE_ARCH_GATE_AREA */
2806 2776
2777#ifdef CONFIG_HAVE_IOREMAP_PROT
2778static resource_size_t follow_phys(struct vm_area_struct *vma,
2779 unsigned long address, unsigned int flags,
2780 unsigned long *prot)
2781{
2782 pgd_t *pgd;
2783 pud_t *pud;
2784 pmd_t *pmd;
2785 pte_t *ptep, pte;
2786 spinlock_t *ptl;
2787 resource_size_t phys_addr = 0;
2788 struct mm_struct *mm = vma->vm_mm;
2789
2790 VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP)));
2791
2792 pgd = pgd_offset(mm, address);
2793 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
2794 goto no_page_table;
2795
2796 pud = pud_offset(pgd, address);
2797 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
2798 goto no_page_table;
2799
2800 pmd = pmd_offset(pud, address);
2801 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
2802 goto no_page_table;
2803
2804 /* We cannot handle huge page PFN maps. Luckily they don't exist. */
2805 if (pmd_huge(*pmd))
2806 goto no_page_table;
2807
2808 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
2809 if (!ptep)
2810 goto out;
2811
2812 pte = *ptep;
2813 if (!pte_present(pte))
2814 goto unlock;
2815 if ((flags & FOLL_WRITE) && !pte_write(pte))
2816 goto unlock;
2817 phys_addr = pte_pfn(pte);
2818 phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
2819
2820 *prot = pgprot_val(pte_pgprot(pte));
2821
2822unlock:
2823 pte_unmap_unlock(ptep, ptl);
2824out:
2825 return phys_addr;
2826no_page_table:
2827 return 0;
2828}
2829
2830int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2831 void *buf, int len, int write)
2832{
2833 resource_size_t phys_addr;
2834 unsigned long prot = 0;
2835 void *maddr;
2836 int offset = addr & (PAGE_SIZE-1);
2837
2838 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
2839 return -EINVAL;
2840
2841 phys_addr = follow_phys(vma, addr, write, &prot);
2842
2843 if (!phys_addr)
2844 return -EINVAL;
2845
2846 maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
2847 if (write)
2848 memcpy_toio(maddr + offset, buf, len);
2849 else
2850 memcpy_fromio(buf, maddr + offset, len);
2851 iounmap(maddr);
2852
2853 return len;
2854}
2855#endif
2856
2807/* 2857/*
2808 * Access another process' address space. 2858 * Access another process' address space.
2809 * Source/target buffer must be kernel space, 2859 * Source/target buffer must be kernel space,
@@ -2813,7 +2863,6 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
2813{ 2863{
2814 struct mm_struct *mm; 2864 struct mm_struct *mm;
2815 struct vm_area_struct *vma; 2865 struct vm_area_struct *vma;
2816 struct page *page;
2817 void *old_buf = buf; 2866 void *old_buf = buf;
2818 2867
2819 mm = get_task_mm(tsk); 2868 mm = get_task_mm(tsk);
@@ -2825,28 +2874,44 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
2825 while (len) { 2874 while (len) {
2826 int bytes, ret, offset; 2875 int bytes, ret, offset;
2827 void *maddr; 2876 void *maddr;
2877 struct page *page = NULL;
2828 2878
2829 ret = get_user_pages(tsk, mm, addr, 1, 2879 ret = get_user_pages(tsk, mm, addr, 1,
2830 write, 1, &page, &vma); 2880 write, 1, &page, &vma);
2831 if (ret <= 0) 2881 if (ret <= 0) {
2832 break; 2882 /*
2833 2883 * Check if this is a VM_IO | VM_PFNMAP VMA, which
2834 bytes = len; 2884 * we can access using slightly different code.
2835 offset = addr & (PAGE_SIZE-1); 2885 */
2836 if (bytes > PAGE_SIZE-offset) 2886#ifdef CONFIG_HAVE_IOREMAP_PROT
2837 bytes = PAGE_SIZE-offset; 2887 vma = find_vma(mm, addr);
2838 2888 if (!vma)
2839 maddr = kmap(page); 2889 break;
2840 if (write) { 2890 if (vma->vm_ops && vma->vm_ops->access)
2841 copy_to_user_page(vma, page, addr, 2891 ret = vma->vm_ops->access(vma, addr, buf,
2842 maddr + offset, buf, bytes); 2892 len, write);
2843 set_page_dirty_lock(page); 2893 if (ret <= 0)
2894#endif
2895 break;
2896 bytes = ret;
2844 } else { 2897 } else {
2845 copy_from_user_page(vma, page, addr, 2898 bytes = len;
2846 buf, maddr + offset, bytes); 2899 offset = addr & (PAGE_SIZE-1);
2900 if (bytes > PAGE_SIZE-offset)
2901 bytes = PAGE_SIZE-offset;
2902
2903 maddr = kmap(page);
2904 if (write) {
2905 copy_to_user_page(vma, page, addr,
2906 maddr + offset, buf, bytes);
2907 set_page_dirty_lock(page);
2908 } else {
2909 copy_from_user_page(vma, page, addr,
2910 buf, maddr + offset, bytes);
2911 }
2912 kunmap(page);
2913 page_cache_release(page);
2847 } 2914 }
2848 kunmap(page);
2849 page_cache_release(page);
2850 len -= bytes; 2915 len -= bytes;
2851 buf += bytes; 2916 buf += bytes;
2852 addr += bytes; 2917 addr += bytes;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 833f854eabe5..89fee2dcb039 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -62,9 +62,9 @@ static void release_memory_resource(struct resource *res)
62 62
63#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 63#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
64#ifndef CONFIG_SPARSEMEM_VMEMMAP 64#ifndef CONFIG_SPARSEMEM_VMEMMAP
65static void get_page_bootmem(unsigned long info, struct page *page, int magic) 65static void get_page_bootmem(unsigned long info, struct page *page, int type)
66{ 66{
67 atomic_set(&page->_mapcount, magic); 67 atomic_set(&page->_mapcount, type);
68 SetPagePrivate(page); 68 SetPagePrivate(page);
69 set_page_private(page, info); 69 set_page_private(page, info);
70 atomic_inc(&page->_count); 70 atomic_inc(&page->_count);
@@ -72,10 +72,10 @@ static void get_page_bootmem(unsigned long info, struct page *page, int magic)
72 72
73void put_page_bootmem(struct page *page) 73void put_page_bootmem(struct page *page)
74{ 74{
75 int magic; 75 int type;
76 76
77 magic = atomic_read(&page->_mapcount); 77 type = atomic_read(&page->_mapcount);
78 BUG_ON(magic >= -1); 78 BUG_ON(type >= -1);
79 79
80 if (atomic_dec_return(&page->_count) == 1) { 80 if (atomic_dec_return(&page->_count) == 1) {
81 ClearPagePrivate(page); 81 ClearPagePrivate(page);
@@ -86,7 +86,7 @@ void put_page_bootmem(struct page *page)
86 86
87} 87}
88 88
89void register_page_bootmem_info_section(unsigned long start_pfn) 89static void register_page_bootmem_info_section(unsigned long start_pfn)
90{ 90{
91 unsigned long *usemap, mapsize, section_nr, i; 91 unsigned long *usemap, mapsize, section_nr, i;
92 struct mem_section *ms; 92 struct mem_section *ms;
@@ -119,7 +119,7 @@ void register_page_bootmem_info_section(unsigned long start_pfn)
119 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; 119 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
120 120
121 for (i = 0; i < mapsize; i++, page++) 121 for (i = 0; i < mapsize; i++, page++)
122 get_page_bootmem(section_nr, page, MIX_INFO); 122 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
123 123
124} 124}
125 125
@@ -429,7 +429,9 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
429 429
430 if (need_zonelists_rebuild) 430 if (need_zonelists_rebuild)
431 build_all_zonelists(); 431 build_all_zonelists();
432 vm_total_pages = nr_free_pagecache_pages(); 432 else
433 vm_total_pages = nr_free_pagecache_pages();
434
433 writeback_set_ratelimit(); 435 writeback_set_ratelimit();
434 436
435 if (onlined_pages) 437 if (onlined_pages)
@@ -455,7 +457,7 @@ static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
455 /* we can use NODE_DATA(nid) from here */ 457 /* we can use NODE_DATA(nid) from here */
456 458
457 /* init node's zones as empty zones, we don't have any present pages.*/ 459 /* init node's zones as empty zones, we don't have any present pages.*/
458 free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size); 460 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
459 461
460 return pgdat; 462 return pgdat;
461} 463}
@@ -521,6 +523,66 @@ EXPORT_SYMBOL_GPL(add_memory);
521 523
522#ifdef CONFIG_MEMORY_HOTREMOVE 524#ifdef CONFIG_MEMORY_HOTREMOVE
523/* 525/*
526 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
527 * set and the size of the free page is given by page_order(). Using this,
528 * the function determines if the pageblock contains only free pages.
529 * Due to buddy contraints, a free page at least the size of a pageblock will
530 * be located at the start of the pageblock
531 */
532static inline int pageblock_free(struct page *page)
533{
534 return PageBuddy(page) && page_order(page) >= pageblock_order;
535}
536
537/* Return the start of the next active pageblock after a given page */
538static struct page *next_active_pageblock(struct page *page)
539{
540 int pageblocks_stride;
541
542 /* Ensure the starting page is pageblock-aligned */
543 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
544
545 /* Move forward by at least 1 * pageblock_nr_pages */
546 pageblocks_stride = 1;
547
548 /* If the entire pageblock is free, move to the end of free page */
549 if (pageblock_free(page))
550 pageblocks_stride += page_order(page) - pageblock_order;
551
552 return page + (pageblocks_stride * pageblock_nr_pages);
553}
554
555/* Checks if this range of memory is likely to be hot-removable. */
556int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
557{
558 int type;
559 struct page *page = pfn_to_page(start_pfn);
560 struct page *end_page = page + nr_pages;
561
562 /* Check the starting page of each pageblock within the range */
563 for (; page < end_page; page = next_active_pageblock(page)) {
564 type = get_pageblock_migratetype(page);
565
566 /*
567 * A pageblock containing MOVABLE or free pages is considered
568 * removable
569 */
570 if (type != MIGRATE_MOVABLE && !pageblock_free(page))
571 return 0;
572
573 /*
574 * A pageblock starting with a PageReserved page is not
575 * considered removable.
576 */
577 if (PageReserved(page))
578 return 0;
579 }
580
581 /* All pageblocks in the memory block are likely to be hot-removable */
582 return 1;
583}
584
585/*
524 * Confirm all pages in a range [start, end) is belongs to the same zone. 586 * Confirm all pages in a range [start, end) is belongs to the same zone.
525 */ 587 */
526static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) 588static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index c94e58b192c3..e550bec20582 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1481,7 +1481,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1481 1481
1482 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 1482 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1483 zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1483 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1484 HPAGE_SHIFT), gfp_flags); 1484 huge_page_shift(hstate_vma(vma))), gfp_flags);
1485 } else { 1485 } else {
1486 zl = policy_zonelist(gfp_flags, *mpol); 1486 zl = policy_zonelist(gfp_flags, *mpol);
1487 if ((*mpol)->mode == MPOL_BIND) 1487 if ((*mpol)->mode == MPOL_BIND)
@@ -2220,9 +2220,12 @@ static void check_huge_range(struct vm_area_struct *vma,
2220{ 2220{
2221 unsigned long addr; 2221 unsigned long addr;
2222 struct page *page; 2222 struct page *page;
2223 struct hstate *h = hstate_vma(vma);
2224 unsigned long sz = huge_page_size(h);
2223 2225
2224 for (addr = start; addr < end; addr += HPAGE_SIZE) { 2226 for (addr = start; addr < end; addr += sz) {
2225 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK); 2227 pte_t *ptep = huge_pte_offset(vma->vm_mm,
2228 addr & huge_page_mask(h));
2226 pte_t pte; 2229 pte_t pte;
2227 2230
2228 if (!ptep) 2231 if (!ptep)
diff --git a/mm/migrate.c b/mm/migrate.c
index 55bd355d170d..d8c65a65c61d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -30,6 +30,7 @@
30#include <linux/vmalloc.h> 30#include <linux/vmalloc.h>
31#include <linux/security.h> 31#include <linux/security.h>
32#include <linux/memcontrol.h> 32#include <linux/memcontrol.h>
33#include <linux/syscalls.h>
33 34
34#include "internal.h" 35#include "internal.h"
35 36
@@ -357,6 +358,9 @@ static int migrate_page_move_mapping(struct address_space *mapping,
357 __inc_zone_page_state(newpage, NR_FILE_PAGES); 358 __inc_zone_page_state(newpage, NR_FILE_PAGES);
358 359
359 write_unlock_irq(&mapping->tree_lock); 360 write_unlock_irq(&mapping->tree_lock);
361 if (!PageSwapCache(newpage)) {
362 mem_cgroup_uncharge_cache_page(page);
363 }
360 364
361 return 0; 365 return 0;
362} 366}
@@ -610,7 +614,6 @@ static int move_to_new_page(struct page *newpage, struct page *page)
610 rc = fallback_migrate_page(mapping, newpage, page); 614 rc = fallback_migrate_page(mapping, newpage, page);
611 615
612 if (!rc) { 616 if (!rc) {
613 mem_cgroup_page_migration(page, newpage);
614 remove_migration_ptes(page, newpage); 617 remove_migration_ptes(page, newpage);
615 } else 618 } else
616 newpage->mapping = NULL; 619 newpage->mapping = NULL;
@@ -640,6 +643,14 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
640 /* page was freed from under us. So we are done. */ 643 /* page was freed from under us. So we are done. */
641 goto move_newpage; 644 goto move_newpage;
642 645
646 charge = mem_cgroup_prepare_migration(page, newpage);
647 if (charge == -ENOMEM) {
648 rc = -ENOMEM;
649 goto move_newpage;
650 }
651 /* prepare cgroup just returns 0 or -ENOMEM */
652 BUG_ON(charge);
653
643 rc = -EAGAIN; 654 rc = -EAGAIN;
644 if (TestSetPageLocked(page)) { 655 if (TestSetPageLocked(page)) {
645 if (!force) 656 if (!force)
@@ -691,19 +702,14 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
691 goto rcu_unlock; 702 goto rcu_unlock;
692 } 703 }
693 704
694 charge = mem_cgroup_prepare_migration(page);
695 /* Establish migration ptes or remove ptes */ 705 /* Establish migration ptes or remove ptes */
696 try_to_unmap(page, 1); 706 try_to_unmap(page, 1);
697 707
698 if (!page_mapped(page)) 708 if (!page_mapped(page))
699 rc = move_to_new_page(newpage, page); 709 rc = move_to_new_page(newpage, page);
700 710
701 if (rc) { 711 if (rc)
702 remove_migration_ptes(page, page); 712 remove_migration_ptes(page, page);
703 if (charge)
704 mem_cgroup_end_migration(page);
705 } else if (charge)
706 mem_cgroup_end_migration(newpage);
707rcu_unlock: 713rcu_unlock:
708 if (rcu_locked) 714 if (rcu_locked)
709 rcu_read_unlock(); 715 rcu_read_unlock();
@@ -724,6 +730,8 @@ unlock:
724 } 730 }
725 731
726move_newpage: 732move_newpage:
733 if (!charge)
734 mem_cgroup_end_migration(newpage);
727 /* 735 /*
728 * Move the new page to the LRU. If migration was not successful 736 * Move the new page to the LRU. If migration was not successful
729 * then this will free the page. 737 * then this will free the page.
@@ -1070,7 +1078,6 @@ out2:
1070 mmput(mm); 1078 mmput(mm);
1071 return err; 1079 return err;
1072} 1080}
1073#endif
1074 1081
1075/* 1082/*
1076 * Call migration functions in the vma_ops that may prepare 1083 * Call migration functions in the vma_ops that may prepare
@@ -1092,3 +1099,4 @@ int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1092 } 1099 }
1093 return err; 1100 return err;
1094} 1101}
1102#endif
diff --git a/mm/mm_init.c b/mm/mm_init.c
new file mode 100644
index 000000000000..c6af41ea9994
--- /dev/null
+++ b/mm/mm_init.c
@@ -0,0 +1,152 @@
1/*
2 * mm_init.c - Memory initialisation verification and debugging
3 *
4 * Copyright 2008 IBM Corporation, 2008
5 * Author Mel Gorman <mel@csn.ul.ie>
6 *
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/kobject.h>
11#include <linux/module.h>
12#include "internal.h"
13
14#ifdef CONFIG_DEBUG_MEMORY_INIT
15int __meminitdata mminit_loglevel;
16
17/* The zonelists are simply reported, validation is manual. */
18void mminit_verify_zonelist(void)
19{
20 int nid;
21
22 if (mminit_loglevel < MMINIT_VERIFY)
23 return;
24
25 for_each_online_node(nid) {
26 pg_data_t *pgdat = NODE_DATA(nid);
27 struct zone *zone;
28 struct zoneref *z;
29 struct zonelist *zonelist;
30 int i, listid, zoneid;
31
32 BUG_ON(MAX_ZONELISTS > 2);
33 for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
34
35 /* Identify the zone and nodelist */
36 zoneid = i % MAX_NR_ZONES;
37 listid = i / MAX_NR_ZONES;
38 zonelist = &pgdat->node_zonelists[listid];
39 zone = &pgdat->node_zones[zoneid];
40 if (!populated_zone(zone))
41 continue;
42
43 /* Print information about the zonelist */
44 printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
45 listid > 0 ? "thisnode" : "general", nid,
46 zone->name);
47
48 /* Iterate the zonelist */
49 for_each_zone_zonelist(zone, z, zonelist, zoneid) {
50#ifdef CONFIG_NUMA
51 printk(KERN_CONT "%d:%s ",
52 zone->node, zone->name);
53#else
54 printk(KERN_CONT "0:%s ", zone->name);
55#endif /* CONFIG_NUMA */
56 }
57 printk(KERN_CONT "\n");
58 }
59 }
60}
61
62void __init mminit_verify_pageflags_layout(void)
63{
64 int shift, width;
65 unsigned long or_mask, add_mask;
66
67 shift = 8 * sizeof(unsigned long);
68 width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH;
69 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
70 "Section %d Node %d Zone %d Flags %d\n",
71 SECTIONS_WIDTH,
72 NODES_WIDTH,
73 ZONES_WIDTH,
74 NR_PAGEFLAGS);
75 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
76 "Section %d Node %d Zone %d\n",
77#ifdef SECTIONS_SHIFT
78 SECTIONS_SHIFT,
79#else
80 0,
81#endif
82 NODES_SHIFT,
83 ZONES_SHIFT);
84 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_offsets",
85 "Section %lu Node %lu Zone %lu\n",
86 (unsigned long)SECTIONS_PGSHIFT,
87 (unsigned long)NODES_PGSHIFT,
88 (unsigned long)ZONES_PGSHIFT);
89 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_zoneid",
90 "Zone ID: %lu -> %lu\n",
91 (unsigned long)ZONEID_PGOFF,
92 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT));
93 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
94 "location: %d -> %d unused %d -> %d flags %d -> %d\n",
95 shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
96#ifdef NODE_NOT_IN_PAGE_FLAGS
97 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
98 "Node not in page flags");
99#endif
100
101 if (SECTIONS_WIDTH) {
102 shift -= SECTIONS_WIDTH;
103 BUG_ON(shift != SECTIONS_PGSHIFT);
104 }
105 if (NODES_WIDTH) {
106 shift -= NODES_WIDTH;
107 BUG_ON(shift != NODES_PGSHIFT);
108 }
109 if (ZONES_WIDTH) {
110 shift -= ZONES_WIDTH;
111 BUG_ON(shift != ZONES_PGSHIFT);
112 }
113
114 /* Check for bitmask overlaps */
115 or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
116 (NODES_MASK << NODES_PGSHIFT) |
117 (SECTIONS_MASK << SECTIONS_PGSHIFT);
118 add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
119 (NODES_MASK << NODES_PGSHIFT) +
120 (SECTIONS_MASK << SECTIONS_PGSHIFT);
121 BUG_ON(or_mask != add_mask);
122}
123
124void __meminit mminit_verify_page_links(struct page *page, enum zone_type zone,
125 unsigned long nid, unsigned long pfn)
126{
127 BUG_ON(page_to_nid(page) != nid);
128 BUG_ON(page_zonenum(page) != zone);
129 BUG_ON(page_to_pfn(page) != pfn);
130}
131
132static __init int set_mminit_loglevel(char *str)
133{
134 get_option(&str, &mminit_loglevel);
135 return 0;
136}
137early_param("mminit_loglevel", set_mminit_loglevel);
138#endif /* CONFIG_DEBUG_MEMORY_INIT */
139
140struct kobject *mm_kobj;
141EXPORT_SYMBOL_GPL(mm_kobj);
142
143static int __init mm_sysfs_init(void)
144{
145 mm_kobj = kobject_create_and_add("mm", kernel_kobj);
146 if (!mm_kobj)
147 return -ENOMEM;
148
149 return 0;
150}
151
152__initcall(mm_sysfs_init);
diff --git a/mm/mmap.c b/mm/mmap.c
index 1d102b956fd8..5e0cc99e9cd5 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -32,6 +32,8 @@
32#include <asm/tlb.h> 32#include <asm/tlb.h>
33#include <asm/mmu_context.h> 33#include <asm/mmu_context.h>
34 34
35#include "internal.h"
36
35#ifndef arch_mmap_check 37#ifndef arch_mmap_check
36#define arch_mmap_check(addr, len, flags) (0) 38#define arch_mmap_check(addr, len, flags) (0)
37#endif 39#endif
@@ -1108,6 +1110,9 @@ munmap_back:
1108 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) 1110 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1109 return -ENOMEM; 1111 return -ENOMEM;
1110 1112
1113 if (flags & MAP_NORESERVE)
1114 vm_flags |= VM_NORESERVE;
1115
1111 if (accountable && (!(flags & MAP_NORESERVE) || 1116 if (accountable && (!(flags & MAP_NORESERVE) ||
1112 sysctl_overcommit_memory == OVERCOMMIT_NEVER)) { 1117 sysctl_overcommit_memory == OVERCOMMIT_NEVER)) {
1113 if (vm_flags & VM_SHARED) { 1118 if (vm_flags & VM_SHARED) {
@@ -1763,7 +1768,7 @@ static void unmap_region(struct mm_struct *mm,
1763 update_hiwater_rss(mm); 1768 update_hiwater_rss(mm);
1764 unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); 1769 unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
1765 vm_unacct_memory(nr_accounted); 1770 vm_unacct_memory(nr_accounted);
1766 free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, 1771 free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
1767 next? next->vm_start: 0); 1772 next? next->vm_start: 0);
1768 tlb_finish_mmu(tlb, start, end); 1773 tlb_finish_mmu(tlb, start, end);
1769} 1774}
@@ -1807,7 +1812,8 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1807 struct mempolicy *pol; 1812 struct mempolicy *pol;
1808 struct vm_area_struct *new; 1813 struct vm_area_struct *new;
1809 1814
1810 if (is_vm_hugetlb_page(vma) && (addr & ~HPAGE_MASK)) 1815 if (is_vm_hugetlb_page(vma) && (addr &
1816 ~(huge_page_mask(hstate_vma(vma)))))
1811 return -EINVAL; 1817 return -EINVAL;
1812 1818
1813 if (mm->map_count >= sysctl_max_map_count) 1819 if (mm->map_count >= sysctl_max_map_count)
@@ -2063,7 +2069,7 @@ void exit_mmap(struct mm_struct *mm)
2063 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2069 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2064 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); 2070 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
2065 vm_unacct_memory(nr_accounted); 2071 vm_unacct_memory(nr_accounted);
2066 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); 2072 free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
2067 tlb_finish_mmu(tlb, 0, end); 2073 tlb_finish_mmu(tlb, 0, end);
2068 2074
2069 /* 2075 /*
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 360d9cc8b38c..abd645a3b0a0 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -153,12 +153,10 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
153 * If we make a private mapping writable we increase our commit; 153 * If we make a private mapping writable we increase our commit;
154 * but (without finer accounting) cannot reduce our commit if we 154 * but (without finer accounting) cannot reduce our commit if we
155 * make it unwritable again. 155 * make it unwritable again.
156 *
157 * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
158 * a MAP_NORESERVE private mapping to writable will now reserve.
159 */ 156 */
160 if (newflags & VM_WRITE) { 157 if (newflags & VM_WRITE) {
161 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) { 158 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|
159 VM_SHARED|VM_NORESERVE))) {
162 charged = nrpages; 160 charged = nrpages;
163 if (security_vm_enough_memory(charged)) 161 if (security_vm_enough_memory(charged))
164 return -ENOMEM; 162 return -ENOMEM;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 79ac4afc908c..6da667274df5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -153,9 +153,9 @@ static unsigned long __meminitdata dma_reserve;
153 static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES]; 153 static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
154 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; 154 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
155#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 155#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
156 unsigned long __initdata required_kernelcore; 156 static unsigned long __initdata required_kernelcore;
157 static unsigned long __initdata required_movablecore; 157 static unsigned long __initdata required_movablecore;
158 unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 158 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
159 159
160 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 160 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
161 int movable_zone; 161 int movable_zone;
@@ -264,7 +264,7 @@ static void free_compound_page(struct page *page)
264 __free_pages_ok(page, compound_order(page)); 264 __free_pages_ok(page, compound_order(page));
265} 265}
266 266
267static void prep_compound_page(struct page *page, unsigned long order) 267void prep_compound_page(struct page *page, unsigned long order)
268{ 268{
269 int i; 269 int i;
270 int nr_pages = 1 << order; 270 int nr_pages = 1 << order;
@@ -432,8 +432,9 @@ static inline void __free_one_page(struct page *page,
432 432
433 buddy = __page_find_buddy(page, page_idx, order); 433 buddy = __page_find_buddy(page, page_idx, order);
434 if (!page_is_buddy(page, buddy, order)) 434 if (!page_is_buddy(page, buddy, order))
435 break; /* Move the buddy up one level. */ 435 break;
436 436
437 /* Our buddy is free, merge with it and move up one order. */
437 list_del(&buddy->lru); 438 list_del(&buddy->lru);
438 zone->free_area[order].nr_free--; 439 zone->free_area[order].nr_free--;
439 rmv_page_order(buddy); 440 rmv_page_order(buddy);
@@ -532,7 +533,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
532/* 533/*
533 * permit the bootmem allocator to evade page validation on high-order frees 534 * permit the bootmem allocator to evade page validation on high-order frees
534 */ 535 */
535void __free_pages_bootmem(struct page *page, unsigned int order) 536void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
536{ 537{
537 if (order == 0) { 538 if (order == 0) {
538 __ClearPageReserved(page); 539 __ClearPageReserved(page);
@@ -673,9 +674,9 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
673 * Note that start_page and end_pages are not aligned on a pageblock 674 * Note that start_page and end_pages are not aligned on a pageblock
674 * boundary. If alignment is required, use move_freepages_block() 675 * boundary. If alignment is required, use move_freepages_block()
675 */ 676 */
676int move_freepages(struct zone *zone, 677static int move_freepages(struct zone *zone,
677 struct page *start_page, struct page *end_page, 678 struct page *start_page, struct page *end_page,
678 int migratetype) 679 int migratetype)
679{ 680{
680 struct page *page; 681 struct page *page;
681 unsigned long order; 682 unsigned long order;
@@ -714,7 +715,8 @@ int move_freepages(struct zone *zone,
714 return pages_moved; 715 return pages_moved;
715} 716}
716 717
717int move_freepages_block(struct zone *zone, struct page *page, int migratetype) 718static int move_freepages_block(struct zone *zone, struct page *page,
719 int migratetype)
718{ 720{
719 unsigned long start_pfn, end_pfn; 721 unsigned long start_pfn, end_pfn;
720 struct page *start_page, *end_page; 722 struct page *start_page, *end_page;
@@ -1429,7 +1431,7 @@ try_next_zone:
1429/* 1431/*
1430 * This is the 'heart' of the zoned buddy allocator. 1432 * This is the 'heart' of the zoned buddy allocator.
1431 */ 1433 */
1432static struct page * 1434struct page *
1433__alloc_pages_internal(gfp_t gfp_mask, unsigned int order, 1435__alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
1434 struct zonelist *zonelist, nodemask_t *nodemask) 1436 struct zonelist *zonelist, nodemask_t *nodemask)
1435{ 1437{
@@ -1632,22 +1634,7 @@ nopage:
1632got_pg: 1634got_pg:
1633 return page; 1635 return page;
1634} 1636}
1635 1637EXPORT_SYMBOL(__alloc_pages_internal);
1636struct page *
1637__alloc_pages(gfp_t gfp_mask, unsigned int order,
1638 struct zonelist *zonelist)
1639{
1640 return __alloc_pages_internal(gfp_mask, order, zonelist, NULL);
1641}
1642
1643struct page *
1644__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1645 struct zonelist *zonelist, nodemask_t *nodemask)
1646{
1647 return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask);
1648}
1649
1650EXPORT_SYMBOL(__alloc_pages);
1651 1638
1652/* 1639/*
1653 * Common helper functions. 1640 * Common helper functions.
@@ -1711,6 +1698,59 @@ void free_pages(unsigned long addr, unsigned int order)
1711 1698
1712EXPORT_SYMBOL(free_pages); 1699EXPORT_SYMBOL(free_pages);
1713 1700
1701/**
1702 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
1703 * @size: the number of bytes to allocate
1704 * @gfp_mask: GFP flags for the allocation
1705 *
1706 * This function is similar to alloc_pages(), except that it allocates the
1707 * minimum number of pages to satisfy the request. alloc_pages() can only
1708 * allocate memory in power-of-two pages.
1709 *
1710 * This function is also limited by MAX_ORDER.
1711 *
1712 * Memory allocated by this function must be released by free_pages_exact().
1713 */
1714void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
1715{
1716 unsigned int order = get_order(size);
1717 unsigned long addr;
1718
1719 addr = __get_free_pages(gfp_mask, order);
1720 if (addr) {
1721 unsigned long alloc_end = addr + (PAGE_SIZE << order);
1722 unsigned long used = addr + PAGE_ALIGN(size);
1723
1724 split_page(virt_to_page(addr), order);
1725 while (used < alloc_end) {
1726 free_page(used);
1727 used += PAGE_SIZE;
1728 }
1729 }
1730
1731 return (void *)addr;
1732}
1733EXPORT_SYMBOL(alloc_pages_exact);
1734
1735/**
1736 * free_pages_exact - release memory allocated via alloc_pages_exact()
1737 * @virt: the value returned by alloc_pages_exact.
1738 * @size: size of allocation, same value as passed to alloc_pages_exact().
1739 *
1740 * Release the memory allocated by a previous call to alloc_pages_exact.
1741 */
1742void free_pages_exact(void *virt, size_t size)
1743{
1744 unsigned long addr = (unsigned long)virt;
1745 unsigned long end = addr + PAGE_ALIGN(size);
1746
1747 while (addr < end) {
1748 free_page(addr);
1749 addr += PAGE_SIZE;
1750 }
1751}
1752EXPORT_SYMBOL(free_pages_exact);
1753
1714static unsigned int nr_free_zone_pages(int offset) 1754static unsigned int nr_free_zone_pages(int offset)
1715{ 1755{
1716 struct zoneref *z; 1756 struct zoneref *z;
@@ -2352,6 +2392,7 @@ void build_all_zonelists(void)
2352 2392
2353 if (system_state == SYSTEM_BOOTING) { 2393 if (system_state == SYSTEM_BOOTING) {
2354 __build_all_zonelists(NULL); 2394 __build_all_zonelists(NULL);
2395 mminit_verify_zonelist();
2355 cpuset_init_current_mems_allowed(); 2396 cpuset_init_current_mems_allowed();
2356 } else { 2397 } else {
2357 /* we have to stop all cpus to guarantee there is no user 2398 /* we have to stop all cpus to guarantee there is no user
@@ -2534,6 +2575,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2534 } 2575 }
2535 page = pfn_to_page(pfn); 2576 page = pfn_to_page(pfn);
2536 set_page_links(page, zone, nid, pfn); 2577 set_page_links(page, zone, nid, pfn);
2578 mminit_verify_page_links(page, zone, nid, pfn);
2537 init_page_count(page); 2579 init_page_count(page);
2538 reset_page_mapcount(page); 2580 reset_page_mapcount(page);
2539 SetPageReserved(page); 2581 SetPageReserved(page);
@@ -2611,7 +2653,7 @@ static int zone_batchsize(struct zone *zone)
2611 return batch; 2653 return batch;
2612} 2654}
2613 2655
2614inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 2656static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2615{ 2657{
2616 struct per_cpu_pages *pcp; 2658 struct per_cpu_pages *pcp;
2617 2659
@@ -2836,6 +2878,12 @@ __meminit int init_currently_empty_zone(struct zone *zone,
2836 2878
2837 zone->zone_start_pfn = zone_start_pfn; 2879 zone->zone_start_pfn = zone_start_pfn;
2838 2880
2881 mminit_dprintk(MMINIT_TRACE, "memmap_init",
2882 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
2883 pgdat->node_id,
2884 (unsigned long)zone_idx(zone),
2885 zone_start_pfn, (zone_start_pfn + size));
2886
2839 zone_init_free_lists(zone); 2887 zone_init_free_lists(zone);
2840 2888
2841 return 0; 2889 return 0;
@@ -2975,7 +3023,8 @@ void __init sparse_memory_present_with_active_regions(int nid)
2975void __init push_node_boundaries(unsigned int nid, 3023void __init push_node_boundaries(unsigned int nid,
2976 unsigned long start_pfn, unsigned long end_pfn) 3024 unsigned long start_pfn, unsigned long end_pfn)
2977{ 3025{
2978 printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n", 3026 mminit_dprintk(MMINIT_TRACE, "zoneboundary",
3027 "Entering push_node_boundaries(%u, %lu, %lu)\n",
2979 nid, start_pfn, end_pfn); 3028 nid, start_pfn, end_pfn);
2980 3029
2981 /* Initialise the boundary for this node if necessary */ 3030 /* Initialise the boundary for this node if necessary */
@@ -2993,7 +3042,8 @@ void __init push_node_boundaries(unsigned int nid,
2993static void __meminit account_node_boundary(unsigned int nid, 3042static void __meminit account_node_boundary(unsigned int nid,
2994 unsigned long *start_pfn, unsigned long *end_pfn) 3043 unsigned long *start_pfn, unsigned long *end_pfn)
2995{ 3044{
2996 printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n", 3045 mminit_dprintk(MMINIT_TRACE, "zoneboundary",
3046 "Entering account_node_boundary(%u, %lu, %lu)\n",
2997 nid, *start_pfn, *end_pfn); 3047 nid, *start_pfn, *end_pfn);
2998 3048
2999 /* Return if boundary information has not been provided */ 3049 /* Return if boundary information has not been provided */
@@ -3050,7 +3100,7 @@ void __meminit get_pfn_range_for_nid(unsigned int nid,
3050 * assumption is made that zones within a node are ordered in monotonic 3100 * assumption is made that zones within a node are ordered in monotonic
3051 * increasing memory addresses so that the "highest" populated zone is used 3101 * increasing memory addresses so that the "highest" populated zone is used
3052 */ 3102 */
3053void __init find_usable_zone_for_movable(void) 3103static void __init find_usable_zone_for_movable(void)
3054{ 3104{
3055 int zone_index; 3105 int zone_index;
3056 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 3106 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
@@ -3076,7 +3126,7 @@ void __init find_usable_zone_for_movable(void)
3076 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 3126 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3077 * zones within a node are in order of monotonic increases memory addresses 3127 * zones within a node are in order of monotonic increases memory addresses
3078 */ 3128 */
3079void __meminit adjust_zone_range_for_zone_movable(int nid, 3129static void __meminit adjust_zone_range_for_zone_movable(int nid,
3080 unsigned long zone_type, 3130 unsigned long zone_type,
3081 unsigned long node_start_pfn, 3131 unsigned long node_start_pfn,
3082 unsigned long node_end_pfn, 3132 unsigned long node_end_pfn,
@@ -3137,7 +3187,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3137 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 3187 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3138 * then all holes in the requested range will be accounted for. 3188 * then all holes in the requested range will be accounted for.
3139 */ 3189 */
3140unsigned long __meminit __absent_pages_in_range(int nid, 3190static unsigned long __meminit __absent_pages_in_range(int nid,
3141 unsigned long range_start_pfn, 3191 unsigned long range_start_pfn,
3142 unsigned long range_end_pfn) 3192 unsigned long range_end_pfn)
3143{ 3193{
@@ -3368,8 +3418,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3368 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; 3418 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3369 if (realsize >= memmap_pages) { 3419 if (realsize >= memmap_pages) {
3370 realsize -= memmap_pages; 3420 realsize -= memmap_pages;
3371 printk(KERN_DEBUG 3421 mminit_dprintk(MMINIT_TRACE, "memmap_init",
3372 " %s zone: %lu pages used for memmap\n", 3422 "%s zone: %lu pages used for memmap\n",
3373 zone_names[j], memmap_pages); 3423 zone_names[j], memmap_pages);
3374 } else 3424 } else
3375 printk(KERN_WARNING 3425 printk(KERN_WARNING
@@ -3379,7 +3429,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3379 /* Account for reserved pages */ 3429 /* Account for reserved pages */
3380 if (j == 0 && realsize > dma_reserve) { 3430 if (j == 0 && realsize > dma_reserve) {
3381 realsize -= dma_reserve; 3431 realsize -= dma_reserve;
3382 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 3432 mminit_dprintk(MMINIT_TRACE, "memmap_init",
3433 "%s zone: %lu pages reserved\n",
3383 zone_names[0], dma_reserve); 3434 zone_names[0], dma_reserve);
3384 } 3435 }
3385 3436
@@ -3464,10 +3515,11 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3464#endif /* CONFIG_FLAT_NODE_MEM_MAP */ 3515#endif /* CONFIG_FLAT_NODE_MEM_MAP */
3465} 3516}
3466 3517
3467void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat, 3518void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3468 unsigned long *zones_size, unsigned long node_start_pfn, 3519 unsigned long node_start_pfn, unsigned long *zholes_size)
3469 unsigned long *zholes_size)
3470{ 3520{
3521 pg_data_t *pgdat = NODE_DATA(nid);
3522
3471 pgdat->node_id = nid; 3523 pgdat->node_id = nid;
3472 pgdat->node_start_pfn = node_start_pfn; 3524 pgdat->node_start_pfn = node_start_pfn;
3473 calculate_node_totalpages(pgdat, zones_size, zholes_size); 3525 calculate_node_totalpages(pgdat, zones_size, zholes_size);
@@ -3520,10 +3572,13 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3520{ 3572{
3521 int i; 3573 int i;
3522 3574
3523 printk(KERN_DEBUG "Entering add_active_range(%d, %#lx, %#lx) " 3575 mminit_dprintk(MMINIT_TRACE, "memory_register",
3524 "%d entries of %d used\n", 3576 "Entering add_active_range(%d, %#lx, %#lx) "
3525 nid, start_pfn, end_pfn, 3577 "%d entries of %d used\n",
3526 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 3578 nid, start_pfn, end_pfn,
3579 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3580
3581 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3527 3582
3528 /* Merge with existing active regions if possible */ 3583 /* Merge with existing active regions if possible */
3529 for (i = 0; i < nr_nodemap_entries; i++) { 3584 for (i = 0; i < nr_nodemap_entries; i++) {
@@ -3669,7 +3724,7 @@ static void __init sort_node_map(void)
3669} 3724}
3670 3725
3671/* Find the lowest pfn for a node */ 3726/* Find the lowest pfn for a node */
3672unsigned long __init find_min_pfn_for_node(int nid) 3727static unsigned long __init find_min_pfn_for_node(int nid)
3673{ 3728{
3674 int i; 3729 int i;
3675 unsigned long min_pfn = ULONG_MAX; 3730 unsigned long min_pfn = ULONG_MAX;
@@ -3741,7 +3796,7 @@ static unsigned long __init early_calculate_totalpages(void)
3741 * memory. When they don't, some nodes will have more kernelcore than 3796 * memory. When they don't, some nodes will have more kernelcore than
3742 * others 3797 * others
3743 */ 3798 */
3744void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) 3799static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3745{ 3800{
3746 int i, nid; 3801 int i, nid;
3747 unsigned long usable_startpfn; 3802 unsigned long usable_startpfn;
@@ -3957,10 +4012,11 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3957 early_node_map[i].end_pfn); 4012 early_node_map[i].end_pfn);
3958 4013
3959 /* Initialise every node */ 4014 /* Initialise every node */
4015 mminit_verify_pageflags_layout();
3960 setup_nr_node_ids(); 4016 setup_nr_node_ids();
3961 for_each_online_node(nid) { 4017 for_each_online_node(nid) {
3962 pg_data_t *pgdat = NODE_DATA(nid); 4018 pg_data_t *pgdat = NODE_DATA(nid);
3963 free_area_init_node(nid, pgdat, NULL, 4019 free_area_init_node(nid, NULL,
3964 find_min_pfn_for_node(nid), NULL); 4020 find_min_pfn_for_node(nid), NULL);
3965 4021
3966 /* Any memory on that node */ 4022 /* Any memory on that node */
@@ -4025,15 +4081,13 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
4025} 4081}
4026 4082
4027#ifndef CONFIG_NEED_MULTIPLE_NODES 4083#ifndef CONFIG_NEED_MULTIPLE_NODES
4028static bootmem_data_t contig_bootmem_data; 4084struct pglist_data contig_page_data = { .bdata = &bootmem_node_data[0] };
4029struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
4030
4031EXPORT_SYMBOL(contig_page_data); 4085EXPORT_SYMBOL(contig_page_data);
4032#endif 4086#endif
4033 4087
4034void __init free_area_init(unsigned long *zones_size) 4088void __init free_area_init(unsigned long *zones_size)
4035{ 4089{
4036 free_area_init_node(0, NODE_DATA(0), zones_size, 4090 free_area_init_node(0, zones_size,
4037 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 4091 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4038} 4092}
4039 4093
diff --git a/mm/pdflush.c b/mm/pdflush.c
index 9d834aa4b979..0cbe0c60c6bf 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -130,7 +130,7 @@ static int __pdflush(struct pdflush_work *my_work)
130 * Thread creation: For how long have there been zero 130 * Thread creation: For how long have there been zero
131 * available threads? 131 * available threads?
132 */ 132 */
133 if (jiffies - last_empty_jifs > 1 * HZ) { 133 if (time_after(jiffies, last_empty_jifs + 1 * HZ)) {
134 /* unlocked list_empty() test is OK here */ 134 /* unlocked list_empty() test is OK here */
135 if (list_empty(&pdflush_list)) { 135 if (list_empty(&pdflush_list)) {
136 /* unlocked test is OK here */ 136 /* unlocked test is OK here */
@@ -151,7 +151,7 @@ static int __pdflush(struct pdflush_work *my_work)
151 if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS) 151 if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS)
152 continue; 152 continue;
153 pdf = list_entry(pdflush_list.prev, struct pdflush_work, list); 153 pdf = list_entry(pdflush_list.prev, struct pdflush_work, list);
154 if (jiffies - pdf->when_i_went_to_sleep > 1 * HZ) { 154 if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) {
155 /* Limit exit rate */ 155 /* Limit exit rate */
156 pdf->when_i_went_to_sleep = jiffies; 156 pdf->when_i_went_to_sleep = jiffies;
157 break; /* exeunt */ 157 break; /* exeunt */
diff --git a/mm/rmap.c b/mm/rmap.c
index bf0a5b7cfb8e..abbd29f7c43f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -576,14 +576,8 @@ void page_add_anon_rmap(struct page *page,
576 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 576 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
577 if (atomic_inc_and_test(&page->_mapcount)) 577 if (atomic_inc_and_test(&page->_mapcount))
578 __page_set_anon_rmap(page, vma, address); 578 __page_set_anon_rmap(page, vma, address);
579 else { 579 else
580 __page_check_anon_rmap(page, vma, address); 580 __page_check_anon_rmap(page, vma, address);
581 /*
582 * We unconditionally charged during prepare, we uncharge here
583 * This takes care of balancing the reference counts
584 */
585 mem_cgroup_uncharge_page(page);
586 }
587} 581}
588 582
589/** 583/**
@@ -614,12 +608,6 @@ void page_add_file_rmap(struct page *page)
614{ 608{
615 if (atomic_inc_and_test(&page->_mapcount)) 609 if (atomic_inc_and_test(&page->_mapcount))
616 __inc_zone_page_state(page, NR_FILE_MAPPED); 610 __inc_zone_page_state(page, NR_FILE_MAPPED);
617 else
618 /*
619 * We unconditionally charged during prepare, we uncharge here
620 * This takes care of balancing the reference counts
621 */
622 mem_cgroup_uncharge_page(page);
623} 611}
624 612
625#ifdef CONFIG_DEBUG_VM 613#ifdef CONFIG_DEBUG_VM
diff --git a/mm/shmem.c b/mm/shmem.c
index e2a6ae1a44e9..f92fea94d037 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -922,20 +922,26 @@ found:
922 error = 1; 922 error = 1;
923 if (!inode) 923 if (!inode)
924 goto out; 924 goto out;
925 /* Precharge page while we can wait, compensate afterwards */ 925 /* Precharge page using GFP_KERNEL while we can wait */
926 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 926 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
927 if (error) 927 if (error)
928 goto out; 928 goto out;
929 error = radix_tree_preload(GFP_KERNEL); 929 error = radix_tree_preload(GFP_KERNEL);
930 if (error) 930 if (error) {
931 goto uncharge; 931 mem_cgroup_uncharge_cache_page(page);
932 goto out;
933 }
932 error = 1; 934 error = 1;
933 935
934 spin_lock(&info->lock); 936 spin_lock(&info->lock);
935 ptr = shmem_swp_entry(info, idx, NULL); 937 ptr = shmem_swp_entry(info, idx, NULL);
936 if (ptr && ptr->val == entry.val) 938 if (ptr && ptr->val == entry.val) {
937 error = add_to_page_cache(page, inode->i_mapping, 939 error = add_to_page_cache(page, inode->i_mapping,
938 idx, GFP_NOWAIT); 940 idx, GFP_NOWAIT);
941 /* does mem_cgroup_uncharge_cache_page on error */
942 } else /* we must compensate for our precharge above */
943 mem_cgroup_uncharge_cache_page(page);
944
939 if (error == -EEXIST) { 945 if (error == -EEXIST) {
940 struct page *filepage = find_get_page(inode->i_mapping, idx); 946 struct page *filepage = find_get_page(inode->i_mapping, idx);
941 error = 1; 947 error = 1;
@@ -961,8 +967,6 @@ found:
961 shmem_swp_unmap(ptr); 967 shmem_swp_unmap(ptr);
962 spin_unlock(&info->lock); 968 spin_unlock(&info->lock);
963 radix_tree_preload_end(); 969 radix_tree_preload_end();
964uncharge:
965 mem_cgroup_uncharge_page(page);
966out: 970out:
967 unlock_page(page); 971 unlock_page(page);
968 page_cache_release(page); 972 page_cache_release(page);
@@ -1311,17 +1315,14 @@ repeat:
1311 shmem_swp_unmap(entry); 1315 shmem_swp_unmap(entry);
1312 spin_unlock(&info->lock); 1316 spin_unlock(&info->lock);
1313 unlock_page(swappage); 1317 unlock_page(swappage);
1318 page_cache_release(swappage);
1314 if (error == -ENOMEM) { 1319 if (error == -ENOMEM) {
1315 /* allow reclaim from this memory cgroup */ 1320 /* allow reclaim from this memory cgroup */
1316 error = mem_cgroup_cache_charge(swappage, 1321 error = mem_cgroup_shrink_usage(current->mm,
1317 current->mm, gfp & ~__GFP_HIGHMEM); 1322 gfp);
1318 if (error) { 1323 if (error)
1319 page_cache_release(swappage);
1320 goto failed; 1324 goto failed;
1321 }
1322 mem_cgroup_uncharge_page(swappage);
1323 } 1325 }
1324 page_cache_release(swappage);
1325 goto repeat; 1326 goto repeat;
1326 } 1327 }
1327 } else if (sgp == SGP_READ && !filepage) { 1328 } else if (sgp == SGP_READ && !filepage) {
@@ -1358,6 +1359,8 @@ repeat:
1358 } 1359 }
1359 1360
1360 if (!filepage) { 1361 if (!filepage) {
1362 int ret;
1363
1361 spin_unlock(&info->lock); 1364 spin_unlock(&info->lock);
1362 filepage = shmem_alloc_page(gfp, info, idx); 1365 filepage = shmem_alloc_page(gfp, info, idx);
1363 if (!filepage) { 1366 if (!filepage) {
@@ -1386,10 +1389,18 @@ repeat:
1386 swap = *entry; 1389 swap = *entry;
1387 shmem_swp_unmap(entry); 1390 shmem_swp_unmap(entry);
1388 } 1391 }
1389 if (error || swap.val || 0 != add_to_page_cache_lru( 1392 ret = error || swap.val;
1390 filepage, mapping, idx, GFP_NOWAIT)) { 1393 if (ret)
1394 mem_cgroup_uncharge_cache_page(filepage);
1395 else
1396 ret = add_to_page_cache_lru(filepage, mapping,
1397 idx, GFP_NOWAIT);
1398 /*
1399 * At add_to_page_cache_lru() failure, uncharge will
1400 * be done automatically.
1401 */
1402 if (ret) {
1391 spin_unlock(&info->lock); 1403 spin_unlock(&info->lock);
1392 mem_cgroup_uncharge_page(filepage);
1393 page_cache_release(filepage); 1404 page_cache_release(filepage);
1394 shmem_unacct_blocks(info->flags, 1); 1405 shmem_unacct_blocks(info->flags, 1);
1395 shmem_free_blocks(inode, 1); 1406 shmem_free_blocks(inode, 1);
@@ -1398,7 +1409,6 @@ repeat:
1398 goto failed; 1409 goto failed;
1399 goto repeat; 1410 goto repeat;
1400 } 1411 }
1401 mem_cgroup_uncharge_page(filepage);
1402 info->flags |= SHMEM_PAGEIN; 1412 info->flags |= SHMEM_PAGEIN;
1403 } 1413 }
1404 1414
@@ -1690,26 +1700,38 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
1690 file_accessed(filp); 1700 file_accessed(filp);
1691} 1701}
1692 1702
1693static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) 1703static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1704 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1694{ 1705{
1695 read_descriptor_t desc; 1706 struct file *filp = iocb->ki_filp;
1707 ssize_t retval;
1708 unsigned long seg;
1709 size_t count;
1710 loff_t *ppos = &iocb->ki_pos;
1696 1711
1697 if ((ssize_t) count < 0) 1712 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1698 return -EINVAL; 1713 if (retval)
1699 if (!access_ok(VERIFY_WRITE, buf, count)) 1714 return retval;
1700 return -EFAULT;
1701 if (!count)
1702 return 0;
1703 1715
1704 desc.written = 0; 1716 for (seg = 0; seg < nr_segs; seg++) {
1705 desc.count = count; 1717 read_descriptor_t desc;
1706 desc.arg.buf = buf;
1707 desc.error = 0;
1708 1718
1709 do_shmem_file_read(filp, ppos, &desc, file_read_actor); 1719 desc.written = 0;
1710 if (desc.written) 1720 desc.arg.buf = iov[seg].iov_base;
1711 return desc.written; 1721 desc.count = iov[seg].iov_len;
1712 return desc.error; 1722 if (desc.count == 0)
1723 continue;
1724 desc.error = 0;
1725 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1726 retval += desc.written;
1727 if (desc.error) {
1728 retval = retval ?: desc.error;
1729 break;
1730 }
1731 if (desc.count > 0)
1732 break;
1733 }
1734 return retval;
1713} 1735}
1714 1736
1715static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1737static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -2369,8 +2391,9 @@ static const struct file_operations shmem_file_operations = {
2369 .mmap = shmem_mmap, 2391 .mmap = shmem_mmap,
2370#ifdef CONFIG_TMPFS 2392#ifdef CONFIG_TMPFS
2371 .llseek = generic_file_llseek, 2393 .llseek = generic_file_llseek,
2372 .read = shmem_file_read, 2394 .read = do_sync_read,
2373 .write = do_sync_write, 2395 .write = do_sync_write,
2396 .aio_read = shmem_file_aio_read,
2374 .aio_write = generic_file_aio_write, 2397 .aio_write = generic_file_aio_write,
2375 .fsync = simple_sync_file, 2398 .fsync = simple_sync_file,
2376 .splice_read = generic_file_splice_read, 2399 .splice_read = generic_file_splice_read,
diff --git a/mm/slob.c b/mm/slob.c
index a3ad6671adf1..de268eb7ac70 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -130,17 +130,17 @@ static LIST_HEAD(free_slob_large);
130 */ 130 */
131static inline int slob_page(struct slob_page *sp) 131static inline int slob_page(struct slob_page *sp)
132{ 132{
133 return test_bit(PG_active, &sp->flags); 133 return PageSlobPage((struct page *)sp);
134} 134}
135 135
136static inline void set_slob_page(struct slob_page *sp) 136static inline void set_slob_page(struct slob_page *sp)
137{ 137{
138 __set_bit(PG_active, &sp->flags); 138 __SetPageSlobPage((struct page *)sp);
139} 139}
140 140
141static inline void clear_slob_page(struct slob_page *sp) 141static inline void clear_slob_page(struct slob_page *sp)
142{ 142{
143 __clear_bit(PG_active, &sp->flags); 143 __ClearPageSlobPage((struct page *)sp);
144} 144}
145 145
146/* 146/*
@@ -148,19 +148,19 @@ static inline void clear_slob_page(struct slob_page *sp)
148 */ 148 */
149static inline int slob_page_free(struct slob_page *sp) 149static inline int slob_page_free(struct slob_page *sp)
150{ 150{
151 return test_bit(PG_private, &sp->flags); 151 return PageSlobFree((struct page *)sp);
152} 152}
153 153
154static void set_slob_page_free(struct slob_page *sp, struct list_head *list) 154static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
155{ 155{
156 list_add(&sp->list, list); 156 list_add(&sp->list, list);
157 __set_bit(PG_private, &sp->flags); 157 __SetPageSlobFree((struct page *)sp);
158} 158}
159 159
160static inline void clear_slob_page_free(struct slob_page *sp) 160static inline void clear_slob_page_free(struct slob_page *sp)
161{ 161{
162 list_del(&sp->list); 162 list_del(&sp->list);
163 __clear_bit(PG_private, &sp->flags); 163 __ClearPageSlobFree((struct page *)sp);
164} 164}
165 165
166#define SLOB_UNIT sizeof(slob_t) 166#define SLOB_UNIT sizeof(slob_t)
diff --git a/mm/slub.c b/mm/slub.c
index 6d4a49c1ff2f..77c21cf53ff9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -102,44 +102,12 @@
102 * the fast path and disables lockless freelists. 102 * the fast path and disables lockless freelists.
103 */ 103 */
104 104
105#define FROZEN (1 << PG_active)
106
107#ifdef CONFIG_SLUB_DEBUG 105#ifdef CONFIG_SLUB_DEBUG
108#define SLABDEBUG (1 << PG_error) 106#define SLABDEBUG 1
109#else 107#else
110#define SLABDEBUG 0 108#define SLABDEBUG 0
111#endif 109#endif
112 110
113static inline int SlabFrozen(struct page *page)
114{
115 return page->flags & FROZEN;
116}
117
118static inline void SetSlabFrozen(struct page *page)
119{
120 page->flags |= FROZEN;
121}
122
123static inline void ClearSlabFrozen(struct page *page)
124{
125 page->flags &= ~FROZEN;
126}
127
128static inline int SlabDebug(struct page *page)
129{
130 return page->flags & SLABDEBUG;
131}
132
133static inline void SetSlabDebug(struct page *page)
134{
135 page->flags |= SLABDEBUG;
136}
137
138static inline void ClearSlabDebug(struct page *page)
139{
140 page->flags &= ~SLABDEBUG;
141}
142
143/* 111/*
144 * Issues still to be resolved: 112 * Issues still to be resolved:
145 * 113 *
@@ -971,7 +939,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
971 } 939 }
972 940
973 /* Special debug activities for freeing objects */ 941 /* Special debug activities for freeing objects */
974 if (!SlabFrozen(page) && !page->freelist) 942 if (!PageSlubFrozen(page) && !page->freelist)
975 remove_full(s, page); 943 remove_full(s, page);
976 if (s->flags & SLAB_STORE_USER) 944 if (s->flags & SLAB_STORE_USER)
977 set_track(s, object, TRACK_FREE, addr); 945 set_track(s, object, TRACK_FREE, addr);
@@ -1157,7 +1125,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1157 page->flags |= 1 << PG_slab; 1125 page->flags |= 1 << PG_slab;
1158 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | 1126 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1159 SLAB_STORE_USER | SLAB_TRACE)) 1127 SLAB_STORE_USER | SLAB_TRACE))
1160 SetSlabDebug(page); 1128 __SetPageSlubDebug(page);
1161 1129
1162 start = page_address(page); 1130 start = page_address(page);
1163 1131
@@ -1184,14 +1152,14 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1184 int order = compound_order(page); 1152 int order = compound_order(page);
1185 int pages = 1 << order; 1153 int pages = 1 << order;
1186 1154
1187 if (unlikely(SlabDebug(page))) { 1155 if (unlikely(SLABDEBUG && PageSlubDebug(page))) {
1188 void *p; 1156 void *p;
1189 1157
1190 slab_pad_check(s, page); 1158 slab_pad_check(s, page);
1191 for_each_object(p, s, page_address(page), 1159 for_each_object(p, s, page_address(page),
1192 page->objects) 1160 page->objects)
1193 check_object(s, page, p, 0); 1161 check_object(s, page, p, 0);
1194 ClearSlabDebug(page); 1162 __ClearPageSlubDebug(page);
1195 } 1163 }
1196 1164
1197 mod_zone_page_state(page_zone(page), 1165 mod_zone_page_state(page_zone(page),
@@ -1288,7 +1256,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1288 if (slab_trylock(page)) { 1256 if (slab_trylock(page)) {
1289 list_del(&page->lru); 1257 list_del(&page->lru);
1290 n->nr_partial--; 1258 n->nr_partial--;
1291 SetSlabFrozen(page); 1259 __SetPageSlubFrozen(page);
1292 return 1; 1260 return 1;
1293 } 1261 }
1294 return 0; 1262 return 0;
@@ -1398,7 +1366,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1398 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1366 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1399 struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id()); 1367 struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
1400 1368
1401 ClearSlabFrozen(page); 1369 __ClearPageSlubFrozen(page);
1402 if (page->inuse) { 1370 if (page->inuse) {
1403 1371
1404 if (page->freelist) { 1372 if (page->freelist) {
@@ -1406,7 +1374,8 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1406 stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1374 stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1407 } else { 1375 } else {
1408 stat(c, DEACTIVATE_FULL); 1376 stat(c, DEACTIVATE_FULL);
1409 if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) 1377 if (SLABDEBUG && PageSlubDebug(page) &&
1378 (s->flags & SLAB_STORE_USER))
1410 add_full(n, page); 1379 add_full(n, page);
1411 } 1380 }
1412 slab_unlock(page); 1381 slab_unlock(page);
@@ -1551,7 +1520,7 @@ load_freelist:
1551 object = c->page->freelist; 1520 object = c->page->freelist;
1552 if (unlikely(!object)) 1521 if (unlikely(!object))
1553 goto another_slab; 1522 goto another_slab;
1554 if (unlikely(SlabDebug(c->page))) 1523 if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
1555 goto debug; 1524 goto debug;
1556 1525
1557 c->freelist = object[c->offset]; 1526 c->freelist = object[c->offset];
@@ -1588,7 +1557,7 @@ new_slab:
1588 if (c->page) 1557 if (c->page)
1589 flush_slab(s, c); 1558 flush_slab(s, c);
1590 slab_lock(new); 1559 slab_lock(new);
1591 SetSlabFrozen(new); 1560 __SetPageSlubFrozen(new);
1592 c->page = new; 1561 c->page = new;
1593 goto load_freelist; 1562 goto load_freelist;
1594 } 1563 }
@@ -1674,7 +1643,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
1674 stat(c, FREE_SLOWPATH); 1643 stat(c, FREE_SLOWPATH);
1675 slab_lock(page); 1644 slab_lock(page);
1676 1645
1677 if (unlikely(SlabDebug(page))) 1646 if (unlikely(SLABDEBUG && PageSlubDebug(page)))
1678 goto debug; 1647 goto debug;
1679 1648
1680checks_ok: 1649checks_ok:
@@ -1682,7 +1651,7 @@ checks_ok:
1682 page->freelist = object; 1651 page->freelist = object;
1683 page->inuse--; 1652 page->inuse--;
1684 1653
1685 if (unlikely(SlabFrozen(page))) { 1654 if (unlikely(PageSlubFrozen(page))) {
1686 stat(c, FREE_FROZEN); 1655 stat(c, FREE_FROZEN);
1687 goto out_unlock; 1656 goto out_unlock;
1688 } 1657 }
@@ -3317,12 +3286,12 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3317 s->name, page); 3286 s->name, page);
3318 3287
3319 if (s->flags & DEBUG_DEFAULT_FLAGS) { 3288 if (s->flags & DEBUG_DEFAULT_FLAGS) {
3320 if (!SlabDebug(page)) 3289 if (!PageSlubDebug(page))
3321 printk(KERN_ERR "SLUB %s: SlabDebug not set " 3290 printk(KERN_ERR "SLUB %s: SlubDebug not set "
3322 "on slab 0x%p\n", s->name, page); 3291 "on slab 0x%p\n", s->name, page);
3323 } else { 3292 } else {
3324 if (SlabDebug(page)) 3293 if (PageSlubDebug(page))
3325 printk(KERN_ERR "SLUB %s: SlabDebug set on " 3294 printk(KERN_ERR "SLUB %s: SlubDebug set on "
3326 "slab 0x%p\n", s->name, page); 3295 "slab 0x%p\n", s->name, page);
3327 } 3296 }
3328} 3297}
diff --git a/mm/sparse.c b/mm/sparse.c
index 36511c7b5e2c..8ffc08990008 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -12,6 +12,7 @@
12#include <asm/dma.h> 12#include <asm/dma.h>
13#include <asm/pgalloc.h> 13#include <asm/pgalloc.h>
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include "internal.h"
15 16
16/* 17/*
17 * Permanent SPARSEMEM data: 18 * Permanent SPARSEMEM data:
@@ -147,22 +148,41 @@ static inline int sparse_early_nid(struct mem_section *section)
147 return (section->section_mem_map >> SECTION_NID_SHIFT); 148 return (section->section_mem_map >> SECTION_NID_SHIFT);
148} 149}
149 150
150/* Record a memory area against a node. */ 151/* Validate the physical addressing limitations of the model */
151void __init memory_present(int nid, unsigned long start, unsigned long end) 152void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
153 unsigned long *end_pfn)
152{ 154{
153 unsigned long max_arch_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); 155 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
154 unsigned long pfn;
155 156
156 /* 157 /*
157 * Sanity checks - do not allow an architecture to pass 158 * Sanity checks - do not allow an architecture to pass
158 * in larger pfns than the maximum scope of sparsemem: 159 * in larger pfns than the maximum scope of sparsemem:
159 */ 160 */
160 if (start >= max_arch_pfn) 161 if (*start_pfn > max_sparsemem_pfn) {
161 return; 162 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
162 if (end >= max_arch_pfn) 163 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
163 end = max_arch_pfn; 164 *start_pfn, *end_pfn, max_sparsemem_pfn);
165 WARN_ON_ONCE(1);
166 *start_pfn = max_sparsemem_pfn;
167 *end_pfn = max_sparsemem_pfn;
168 }
169
170 if (*end_pfn > max_sparsemem_pfn) {
171 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
172 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
173 *start_pfn, *end_pfn, max_sparsemem_pfn);
174 WARN_ON_ONCE(1);
175 *end_pfn = max_sparsemem_pfn;
176 }
177}
178
179/* Record a memory area against a node. */
180void __init memory_present(int nid, unsigned long start, unsigned long end)
181{
182 unsigned long pfn;
164 183
165 start &= PAGE_SECTION_MASK; 184 start &= PAGE_SECTION_MASK;
185 mminit_validate_memmodel_limits(&start, &end);
166 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { 186 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
167 unsigned long section = pfn_to_section_nr(pfn); 187 unsigned long section = pfn_to_section_nr(pfn);
168 struct mem_section *ms; 188 struct mem_section *ms;
@@ -187,6 +207,7 @@ unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
187 unsigned long pfn; 207 unsigned long pfn;
188 unsigned long nr_pages = 0; 208 unsigned long nr_pages = 0;
189 209
210 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
190 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 211 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
191 if (nid != early_pfn_to_nid(pfn)) 212 if (nid != early_pfn_to_nid(pfn))
192 continue; 213 continue;
@@ -248,16 +269,92 @@ static unsigned long *__kmalloc_section_usemap(void)
248} 269}
249#endif /* CONFIG_MEMORY_HOTPLUG */ 270#endif /* CONFIG_MEMORY_HOTPLUG */
250 271
272#ifdef CONFIG_MEMORY_HOTREMOVE
273static unsigned long * __init
274sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat)
275{
276 unsigned long section_nr;
277
278 /*
279 * A page may contain usemaps for other sections preventing the
280 * page being freed and making a section unremovable while
281 * other sections referencing the usemap retmain active. Similarly,
282 * a pgdat can prevent a section being removed. If section A
283 * contains a pgdat and section B contains the usemap, both
284 * sections become inter-dependent. This allocates usemaps
285 * from the same section as the pgdat where possible to avoid
286 * this problem.
287 */
288 section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
289 return alloc_bootmem_section(usemap_size(), section_nr);
290}
291
292static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
293{
294 unsigned long usemap_snr, pgdat_snr;
295 static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
296 static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
297 struct pglist_data *pgdat = NODE_DATA(nid);
298 int usemap_nid;
299
300 usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
301 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
302 if (usemap_snr == pgdat_snr)
303 return;
304
305 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
306 /* skip redundant message */
307 return;
308
309 old_usemap_snr = usemap_snr;
310 old_pgdat_snr = pgdat_snr;
311
312 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
313 if (usemap_nid != nid) {
314 printk(KERN_INFO
315 "node %d must be removed before remove section %ld\n",
316 nid, usemap_snr);
317 return;
318 }
319 /*
320 * There is a circular dependency.
321 * Some platforms allow un-removable section because they will just
322 * gather other removable sections for dynamic partitioning.
323 * Just notify un-removable section's number here.
324 */
325 printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,
326 pgdat_snr, nid);
327 printk(KERN_CONT
328 " have a circular dependency on usemap and pgdat allocations\n");
329}
330#else
331static unsigned long * __init
332sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat)
333{
334 return NULL;
335}
336
337static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
338{
339}
340#endif /* CONFIG_MEMORY_HOTREMOVE */
341
251static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum) 342static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum)
252{ 343{
253 unsigned long *usemap; 344 unsigned long *usemap;
254 struct mem_section *ms = __nr_to_section(pnum); 345 struct mem_section *ms = __nr_to_section(pnum);
255 int nid = sparse_early_nid(ms); 346 int nid = sparse_early_nid(ms);
256 347
257 usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size()); 348 usemap = sparse_early_usemap_alloc_pgdat_section(NODE_DATA(nid));
258 if (usemap) 349 if (usemap)
259 return usemap; 350 return usemap;
260 351
352 usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
353 if (usemap) {
354 check_usemap_section_nr(nid, usemap);
355 return usemap;
356 }
357
261 /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */ 358 /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
262 nid = 0; 359 nid = 0;
263 360
diff --git a/mm/swap.c b/mm/swap.c
index 45c9f25a8a3b..dd89234ee51f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -34,9 +34,9 @@
34/* How many pages do we try to swap or page in/out together? */ 34/* How many pages do we try to swap or page in/out together? */
35int page_cluster; 35int page_cluster;
36 36
37static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, }; 37static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs);
38static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, }; 38static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs);
39static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs) = { 0, }; 39static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
40 40
41/* 41/*
42 * This path almost never happens for VM activity - pages are normally 42 * This path almost never happens for VM activity - pages are normally
@@ -493,7 +493,7 @@ EXPORT_SYMBOL(pagevec_lookup_tag);
493 */ 493 */
494#define ACCT_THRESHOLD max(16, NR_CPUS * 2) 494#define ACCT_THRESHOLD max(16, NR_CPUS * 2)
495 495
496static DEFINE_PER_CPU(long, committed_space) = 0; 496static DEFINE_PER_CPU(long, committed_space);
497 497
498void vm_acct_memory(long pages) 498void vm_acct_memory(long pages)
499{ 499{
diff --git a/mm/swapfile.c b/mm/swapfile.c
index bd1bb5920306..2f33edb8bee9 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -37,6 +37,7 @@ DEFINE_SPINLOCK(swap_lock);
37unsigned int nr_swapfiles; 37unsigned int nr_swapfiles;
38long total_swap_pages; 38long total_swap_pages;
39static int swap_overflow; 39static int swap_overflow;
40static int least_priority;
40 41
41static const char Bad_file[] = "Bad swap file entry "; 42static const char Bad_file[] = "Bad swap file entry ";
42static const char Unused_file[] = "Unused swap file entry "; 43static const char Unused_file[] = "Unused swap file entry ";
@@ -1260,6 +1261,11 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
1260 /* just pick something that's safe... */ 1261 /* just pick something that's safe... */
1261 swap_list.next = swap_list.head; 1262 swap_list.next = swap_list.head;
1262 } 1263 }
1264 if (p->prio < 0) {
1265 for (i = p->next; i >= 0; i = swap_info[i].next)
1266 swap_info[i].prio = p->prio--;
1267 least_priority++;
1268 }
1263 nr_swap_pages -= p->pages; 1269 nr_swap_pages -= p->pages;
1264 total_swap_pages -= p->pages; 1270 total_swap_pages -= p->pages;
1265 p->flags &= ~SWP_WRITEOK; 1271 p->flags &= ~SWP_WRITEOK;
@@ -1272,9 +1278,14 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
1272 if (err) { 1278 if (err) {
1273 /* re-insert swap space back into swap_list */ 1279 /* re-insert swap space back into swap_list */
1274 spin_lock(&swap_lock); 1280 spin_lock(&swap_lock);
1275 for (prev = -1, i = swap_list.head; i >= 0; prev = i, i = swap_info[i].next) 1281 if (p->prio < 0)
1282 p->prio = --least_priority;
1283 prev = -1;
1284 for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
1276 if (p->prio >= swap_info[i].prio) 1285 if (p->prio >= swap_info[i].prio)
1277 break; 1286 break;
1287 prev = i;
1288 }
1278 p->next = i; 1289 p->next = i;
1279 if (prev < 0) 1290 if (prev < 0)
1280 swap_list.head = swap_list.next = p - swap_info; 1291 swap_list.head = swap_list.next = p - swap_info;
@@ -1447,7 +1458,6 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1447 unsigned int type; 1458 unsigned int type;
1448 int i, prev; 1459 int i, prev;
1449 int error; 1460 int error;
1450 static int least_priority;
1451 union swap_header *swap_header = NULL; 1461 union swap_header *swap_header = NULL;
1452 int swap_header_version; 1462 int swap_header_version;
1453 unsigned int nr_good_pages = 0; 1463 unsigned int nr_good_pages = 0;
@@ -1455,7 +1465,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1455 sector_t span; 1465 sector_t span;
1456 unsigned long maxpages = 1; 1466 unsigned long maxpages = 1;
1457 int swapfilesize; 1467 int swapfilesize;
1458 unsigned short *swap_map; 1468 unsigned short *swap_map = NULL;
1459 struct page *page = NULL; 1469 struct page *page = NULL;
1460 struct inode *inode = NULL; 1470 struct inode *inode = NULL;
1461 int did_down = 0; 1471 int did_down = 0;
@@ -1474,22 +1484,10 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1474 } 1484 }
1475 if (type >= nr_swapfiles) 1485 if (type >= nr_swapfiles)
1476 nr_swapfiles = type+1; 1486 nr_swapfiles = type+1;
1487 memset(p, 0, sizeof(*p));
1477 INIT_LIST_HEAD(&p->extent_list); 1488 INIT_LIST_HEAD(&p->extent_list);
1478 p->flags = SWP_USED; 1489 p->flags = SWP_USED;
1479 p->swap_file = NULL;
1480 p->old_block_size = 0;
1481 p->swap_map = NULL;
1482 p->lowest_bit = 0;
1483 p->highest_bit = 0;
1484 p->cluster_nr = 0;
1485 p->inuse_pages = 0;
1486 p->next = -1; 1490 p->next = -1;
1487 if (swap_flags & SWAP_FLAG_PREFER) {
1488 p->prio =
1489 (swap_flags & SWAP_FLAG_PRIO_MASK)>>SWAP_FLAG_PRIO_SHIFT;
1490 } else {
1491 p->prio = --least_priority;
1492 }
1493 spin_unlock(&swap_lock); 1491 spin_unlock(&swap_lock);
1494 name = getname(specialfile); 1492 name = getname(specialfile);
1495 error = PTR_ERR(name); 1493 error = PTR_ERR(name);
@@ -1632,19 +1630,20 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1632 goto bad_swap; 1630 goto bad_swap;
1633 1631
1634 /* OK, set up the swap map and apply the bad block list */ 1632 /* OK, set up the swap map and apply the bad block list */
1635 if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) { 1633 swap_map = vmalloc(maxpages * sizeof(short));
1634 if (!swap_map) {
1636 error = -ENOMEM; 1635 error = -ENOMEM;
1637 goto bad_swap; 1636 goto bad_swap;
1638 } 1637 }
1639 1638
1640 error = 0; 1639 error = 0;
1641 memset(p->swap_map, 0, maxpages * sizeof(short)); 1640 memset(swap_map, 0, maxpages * sizeof(short));
1642 for (i = 0; i < swap_header->info.nr_badpages; i++) { 1641 for (i = 0; i < swap_header->info.nr_badpages; i++) {
1643 int page_nr = swap_header->info.badpages[i]; 1642 int page_nr = swap_header->info.badpages[i];
1644 if (page_nr <= 0 || page_nr >= swap_header->info.last_page) 1643 if (page_nr <= 0 || page_nr >= swap_header->info.last_page)
1645 error = -EINVAL; 1644 error = -EINVAL;
1646 else 1645 else
1647 p->swap_map[page_nr] = SWAP_MAP_BAD; 1646 swap_map[page_nr] = SWAP_MAP_BAD;
1648 } 1647 }
1649 nr_good_pages = swap_header->info.last_page - 1648 nr_good_pages = swap_header->info.last_page -
1650 swap_header->info.nr_badpages - 1649 swap_header->info.nr_badpages -
@@ -1654,7 +1653,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1654 } 1653 }
1655 1654
1656 if (nr_good_pages) { 1655 if (nr_good_pages) {
1657 p->swap_map[0] = SWAP_MAP_BAD; 1656 swap_map[0] = SWAP_MAP_BAD;
1658 p->max = maxpages; 1657 p->max = maxpages;
1659 p->pages = nr_good_pages; 1658 p->pages = nr_good_pages;
1660 nr_extents = setup_swap_extents(p, &span); 1659 nr_extents = setup_swap_extents(p, &span);
@@ -1672,6 +1671,12 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1672 1671
1673 mutex_lock(&swapon_mutex); 1672 mutex_lock(&swapon_mutex);
1674 spin_lock(&swap_lock); 1673 spin_lock(&swap_lock);
1674 if (swap_flags & SWAP_FLAG_PREFER)
1675 p->prio =
1676 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
1677 else
1678 p->prio = --least_priority;
1679 p->swap_map = swap_map;
1675 p->flags = SWP_ACTIVE; 1680 p->flags = SWP_ACTIVE;
1676 nr_swap_pages += nr_good_pages; 1681 nr_swap_pages += nr_good_pages;
1677 total_swap_pages += nr_good_pages; 1682 total_swap_pages += nr_good_pages;
@@ -1707,12 +1712,8 @@ bad_swap:
1707 destroy_swap_extents(p); 1712 destroy_swap_extents(p);
1708bad_swap_2: 1713bad_swap_2:
1709 spin_lock(&swap_lock); 1714 spin_lock(&swap_lock);
1710 swap_map = p->swap_map;
1711 p->swap_file = NULL; 1715 p->swap_file = NULL;
1712 p->swap_map = NULL;
1713 p->flags = 0; 1716 p->flags = 0;
1714 if (!(swap_flags & SWAP_FLAG_PREFER))
1715 ++least_priority;
1716 spin_unlock(&swap_lock); 1717 spin_unlock(&swap_lock);
1717 vfree(swap_map); 1718 vfree(swap_map);
1718 if (swap_file) 1719 if (swap_file)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6e45b0f3d125..35f293816294 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -931,6 +931,25 @@ static void s_stop(struct seq_file *m, void *p)
931 read_unlock(&vmlist_lock); 931 read_unlock(&vmlist_lock);
932} 932}
933 933
934static void show_numa_info(struct seq_file *m, struct vm_struct *v)
935{
936 if (NUMA_BUILD) {
937 unsigned int nr, *counters = m->private;
938
939 if (!counters)
940 return;
941
942 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
943
944 for (nr = 0; nr < v->nr_pages; nr++)
945 counters[page_to_nid(v->pages[nr])]++;
946
947 for_each_node_state(nr, N_HIGH_MEMORY)
948 if (counters[nr])
949 seq_printf(m, " N%u=%u", nr, counters[nr]);
950 }
951}
952
934static int s_show(struct seq_file *m, void *p) 953static int s_show(struct seq_file *m, void *p)
935{ 954{
936 struct vm_struct *v = p; 955 struct vm_struct *v = p;
@@ -967,6 +986,7 @@ static int s_show(struct seq_file *m, void *p)
967 if (v->flags & VM_VPAGES) 986 if (v->flags & VM_VPAGES)
968 seq_printf(m, " vpages"); 987 seq_printf(m, " vpages");
969 988
989 show_numa_info(m, v);
970 seq_putc(m, '\n'); 990 seq_putc(m, '\n');
971 return 0; 991 return 0;
972} 992}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 967d30ccd92b..26672c6cd3ce 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -38,6 +38,7 @@
38#include <linux/kthread.h> 38#include <linux/kthread.h>
39#include <linux/freezer.h> 39#include <linux/freezer.h>
40#include <linux/memcontrol.h> 40#include <linux/memcontrol.h>
41#include <linux/delayacct.h>
41 42
42#include <asm/tlbflush.h> 43#include <asm/tlbflush.h>
43#include <asm/div64.h> 44#include <asm/div64.h>
@@ -1316,6 +1317,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1316 struct zone *zone; 1317 struct zone *zone;
1317 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); 1318 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1318 1319
1320 delayacct_freepages_start();
1321
1319 if (scan_global_lru(sc)) 1322 if (scan_global_lru(sc))
1320 count_vm_event(ALLOCSTALL); 1323 count_vm_event(ALLOCSTALL);
1321 /* 1324 /*
@@ -1396,6 +1399,8 @@ out:
1396 } else 1399 } else
1397 mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority); 1400 mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
1398 1401
1402 delayacct_freepages_end();
1403
1399 return ret; 1404 return ret;
1400} 1405}
1401 1406
diff --git a/mm/vmstat.c b/mm/vmstat.c
index db9eabb2c5b3..b0d08e667ece 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -13,6 +13,7 @@
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/cpu.h> 15#include <linux/cpu.h>
16#include <linux/vmstat.h>
16#include <linux/sched.h> 17#include <linux/sched.h>
17 18
18#ifdef CONFIG_VM_EVENT_COUNTERS 19#ifdef CONFIG_VM_EVENT_COUNTERS
@@ -26,7 +27,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
26 27
27 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); 28 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
28 29
29 for_each_cpu_mask(cpu, *cpumask) { 30 for_each_cpu_mask_nr(cpu, *cpumask) {
30 struct vm_event_state *this = &per_cpu(vm_event_states, cpu); 31 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
31 32
32 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 33 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
diff --git a/net/802/psnap.c b/net/802/psnap.c
index ea4643931446..b3cfe5a14fca 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -31,11 +31,9 @@ static struct llc_sap *snap_sap;
31 */ 31 */
32static struct datalink_proto *find_snap_client(unsigned char *desc) 32static struct datalink_proto *find_snap_client(unsigned char *desc)
33{ 33{
34 struct list_head *entry;
35 struct datalink_proto *proto = NULL, *p; 34 struct datalink_proto *proto = NULL, *p;
36 35
37 list_for_each_rcu(entry, &snap_list) { 36 list_for_each_entry_rcu(p, &snap_list, node) {
38 p = list_entry(entry, struct datalink_proto, node);
39 if (!memcmp(p->type, desc, 5)) { 37 if (!memcmp(p->type, desc, 5)) {
40 proto = p; 38 proto = p;
41 break; 39 break;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 4507f744f44e..cdf137af7adc 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -1285,7 +1285,7 @@ static int p9_socket_open(struct p9_trans *trans, struct socket *csocket)
1285 int fd, ret; 1285 int fd, ret;
1286 1286
1287 csocket->sk->sk_allocation = GFP_NOIO; 1287 csocket->sk->sk_allocation = GFP_NOIO;
1288 fd = sock_map_fd(csocket); 1288 fd = sock_map_fd(csocket, 0);
1289 if (fd < 0) { 1289 if (fd < 0) {
1290 P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n"); 1290 P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
1291 return fd; 1291 return fd;
diff --git a/net/compat.c b/net/compat.c
index 6e1b03b51933..67fb6a3834a3 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -722,9 +722,10 @@ EXPORT_SYMBOL(compat_mc_getsockopt);
722 722
723/* Argument list sizes for compat_sys_socketcall */ 723/* Argument list sizes for compat_sys_socketcall */
724#define AL(x) ((x) * sizeof(u32)) 724#define AL(x) ((x) * sizeof(u32))
725static unsigned char nas[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), 725static unsigned char nas[19]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
726 AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), 726 AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
727 AL(6),AL(2),AL(5),AL(5),AL(3),AL(3)}; 727 AL(6),AL(2),AL(5),AL(5),AL(3),AL(3),
728 AL(6)};
728#undef AL 729#undef AL
729 730
730asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) 731asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags)
@@ -737,13 +738,52 @@ asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, uns
737 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 738 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
738} 739}
739 740
741asmlinkage long compat_sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr,
742 int __user *upeer_addrlen,
743 const compat_sigset_t __user *sigmask,
744 compat_size_t sigsetsize, int flags)
745{
746 compat_sigset_t ss32;
747 sigset_t ksigmask, sigsaved;
748 int ret;
749
750 if (sigmask) {
751 if (sigsetsize != sizeof(compat_sigset_t))
752 return -EINVAL;
753 if (copy_from_user(&ss32, sigmask, sizeof(ss32)))
754 return -EFAULT;
755 sigset_from_compat(&ksigmask, &ss32);
756
757 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
758 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
759 }
760
761 ret = do_accept(fd, upeer_sockaddr, upeer_addrlen, flags);
762
763 if (ret == -ERESTARTNOHAND) {
764 /*
765 * Don't restore the signal mask yet. Let do_signal() deliver
766 * the signal on the way back to userspace, before the signal
767 * mask is restored.
768 */
769 if (sigmask) {
770 memcpy(&current->saved_sigmask, &sigsaved,
771 sizeof(sigsaved));
772 set_restore_sigmask();
773 }
774 } else if (sigmask)
775 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
776
777 return ret;
778}
779
740asmlinkage long compat_sys_socketcall(int call, u32 __user *args) 780asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
741{ 781{
742 int ret; 782 int ret;
743 u32 a[6]; 783 u32 a[6];
744 u32 a0, a1; 784 u32 a0, a1;
745 785
746 if (call < SYS_SOCKET || call > SYS_RECVMSG) 786 if (call < SYS_SOCKET || call > SYS_PACCEPT)
747 return -EINVAL; 787 return -EINVAL;
748 if (copy_from_user(a, args, nas[call])) 788 if (copy_from_user(a, args, nas[call]))
749 return -EFAULT; 789 return -EFAULT;
@@ -764,7 +804,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
764 ret = sys_listen(a0, a1); 804 ret = sys_listen(a0, a1);
765 break; 805 break;
766 case SYS_ACCEPT: 806 case SYS_ACCEPT:
767 ret = sys_accept(a0, compat_ptr(a1), compat_ptr(a[2])); 807 ret = do_accept(a0, compat_ptr(a1), compat_ptr(a[2]), 0);
768 break; 808 break;
769 case SYS_GETSOCKNAME: 809 case SYS_GETSOCKNAME:
770 ret = sys_getsockname(a0, compat_ptr(a1), compat_ptr(a[2])); 810 ret = sys_getsockname(a0, compat_ptr(a1), compat_ptr(a[2]));
@@ -804,6 +844,10 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
804 case SYS_RECVMSG: 844 case SYS_RECVMSG:
805 ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]); 845 ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]);
806 break; 846 break;
847 case SYS_PACCEPT:
848 ret = compat_sys_paccept(a0, compat_ptr(a1), compat_ptr(a[2]),
849 compat_ptr(a[3]), a[4], a[5]);
850 break;
807 default: 851 default:
808 ret = -EINVAL; 852 ret = -EINVAL;
809 break; 853 break;
diff --git a/net/core/dev.c b/net/core/dev.c
index ccf97f9f37eb..53af7841018a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2395,7 +2395,7 @@ out:
2395 */ 2395 */
2396 if (!cpus_empty(net_dma.channel_mask)) { 2396 if (!cpus_empty(net_dma.channel_mask)) {
2397 int chan_idx; 2397 int chan_idx;
2398 for_each_cpu_mask(chan_idx, net_dma.channel_mask) { 2398 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
2399 struct dma_chan *chan = net_dma.channels[chan_idx]; 2399 struct dma_chan *chan = net_dma.channels[chan_idx];
2400 if (chan) 2400 if (chan)
2401 dma_async_memcpy_issue_pending(chan); 2401 dma_async_memcpy_issue_pending(chan);
@@ -4530,7 +4530,7 @@ static void net_dma_rebalance(struct net_dma *net_dma)
4530 i = 0; 4530 i = 0;
4531 cpu = first_cpu(cpu_online_map); 4531 cpu = first_cpu(cpu_online_map);
4532 4532
4533 for_each_cpu_mask(chan_idx, net_dma->channel_mask) { 4533 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4534 chan = net_dma->channels[chan_idx]; 4534 chan = net_dma->channels[chan_idx];
4535 4535
4536 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) 4536 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index a570e2af22cb..f686467ff12b 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -67,7 +67,7 @@ static struct ctl_table net_core_table[] = {
67 { 67 {
68 .ctl_name = NET_CORE_MSG_COST, 68 .ctl_name = NET_CORE_MSG_COST,
69 .procname = "message_cost", 69 .procname = "message_cost",
70 .data = &net_msg_cost, 70 .data = &net_ratelimit_state.interval,
71 .maxlen = sizeof(int), 71 .maxlen = sizeof(int),
72 .mode = 0644, 72 .mode = 0644,
73 .proc_handler = &proc_dointvec_jiffies, 73 .proc_handler = &proc_dointvec_jiffies,
@@ -76,7 +76,7 @@ static struct ctl_table net_core_table[] = {
76 { 76 {
77 .ctl_name = NET_CORE_MSG_BURST, 77 .ctl_name = NET_CORE_MSG_BURST,
78 .procname = "message_burst", 78 .procname = "message_burst",
79 .data = &net_msg_burst, 79 .data = &net_ratelimit_state.burst,
80 .maxlen = sizeof(int), 80 .maxlen = sizeof(int),
81 .mode = 0644, 81 .mode = 0644,
82 .proc_handler = &proc_dointvec, 82 .proc_handler = &proc_dointvec,
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index c77aff9c6eb3..8c6b706963ff 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -34,6 +34,7 @@
34#define NET_DMA_DEFAULT_COPYBREAK 4096 34#define NET_DMA_DEFAULT_COPYBREAK 4096
35 35
36int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK; 36int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK;
37EXPORT_SYMBOL(sysctl_tcp_dma_copybreak);
37 38
38/** 39/**
39 * dma_skb_copy_datagram_iovec - Copy a datagram to an iovec. 40 * dma_skb_copy_datagram_iovec - Copy a datagram to an iovec.
diff --git a/net/core/utils.c b/net/core/utils.c
index 8031eb59054e..72e0ebe964a0 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -31,17 +31,16 @@
31#include <asm/system.h> 31#include <asm/system.h>
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33 33
34int net_msg_cost __read_mostly = 5*HZ;
35int net_msg_burst __read_mostly = 10;
36int net_msg_warn __read_mostly = 1; 34int net_msg_warn __read_mostly = 1;
37EXPORT_SYMBOL(net_msg_warn); 35EXPORT_SYMBOL(net_msg_warn);
38 36
37DEFINE_RATELIMIT_STATE(net_ratelimit_state, 5 * HZ, 10);
39/* 38/*
40 * All net warning printk()s should be guarded by this function. 39 * All net warning printk()s should be guarded by this function.
41 */ 40 */
42int net_ratelimit(void) 41int net_ratelimit(void)
43{ 42{
44 return __printk_ratelimit(net_msg_cost, net_msg_burst); 43 return __ratelimit(&net_ratelimit_state);
45} 44}
46EXPORT_SYMBOL(net_ratelimit); 45EXPORT_SYMBOL(net_ratelimit);
47 46
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index dd919d84285f..f440a9f54924 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -264,7 +264,6 @@ static inline int inet_netns_ok(struct net *net, int protocol)
264static int inet_create(struct net *net, struct socket *sock, int protocol) 264static int inet_create(struct net *net, struct socket *sock, int protocol)
265{ 265{
266 struct sock *sk; 266 struct sock *sk;
267 struct list_head *p;
268 struct inet_protosw *answer; 267 struct inet_protosw *answer;
269 struct inet_sock *inet; 268 struct inet_sock *inet;
270 struct proto *answer_prot; 269 struct proto *answer_prot;
@@ -281,13 +280,12 @@ static int inet_create(struct net *net, struct socket *sock, int protocol)
281 sock->state = SS_UNCONNECTED; 280 sock->state = SS_UNCONNECTED;
282 281
283 /* Look for the requested type/protocol pair. */ 282 /* Look for the requested type/protocol pair. */
284 answer = NULL;
285lookup_protocol: 283lookup_protocol:
286 err = -ESOCKTNOSUPPORT; 284 err = -ESOCKTNOSUPPORT;
287 rcu_read_lock(); 285 rcu_read_lock();
288 list_for_each_rcu(p, &inetsw[sock->type]) { 286 list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
289 answer = list_entry(p, struct inet_protosw, list);
290 287
288 err = 0;
291 /* Check the non-wild match. */ 289 /* Check the non-wild match. */
292 if (protocol == answer->protocol) { 290 if (protocol == answer->protocol) {
293 if (protocol != IPPROTO_IP) 291 if (protocol != IPPROTO_IP)
@@ -302,10 +300,9 @@ lookup_protocol:
302 break; 300 break;
303 } 301 }
304 err = -EPROTONOSUPPORT; 302 err = -EPROTONOSUPPORT;
305 answer = NULL;
306 } 303 }
307 304
308 if (unlikely(answer == NULL)) { 305 if (unlikely(err)) {
309 if (try_loading_module < 2) { 306 if (try_loading_module < 2) {
310 rcu_read_unlock(); 307 rcu_read_unlock();
311 /* 308 /*
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 3d828bc4b1cf..60461ad7fa6f 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -83,7 +83,6 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol)
83 struct inet_sock *inet; 83 struct inet_sock *inet;
84 struct ipv6_pinfo *np; 84 struct ipv6_pinfo *np;
85 struct sock *sk; 85 struct sock *sk;
86 struct list_head *p;
87 struct inet_protosw *answer; 86 struct inet_protosw *answer;
88 struct proto *answer_prot; 87 struct proto *answer_prot;
89 unsigned char answer_flags; 88 unsigned char answer_flags;
@@ -97,13 +96,12 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol)
97 build_ehash_secret(); 96 build_ehash_secret();
98 97
99 /* Look for the requested type/protocol pair. */ 98 /* Look for the requested type/protocol pair. */
100 answer = NULL;
101lookup_protocol: 99lookup_protocol:
102 err = -ESOCKTNOSUPPORT; 100 err = -ESOCKTNOSUPPORT;
103 rcu_read_lock(); 101 rcu_read_lock();
104 list_for_each_rcu(p, &inetsw6[sock->type]) { 102 list_for_each_entry_rcu(answer, &inetsw6[sock->type], list) {
105 answer = list_entry(p, struct inet_protosw, list);
106 103
104 err = 0;
107 /* Check the non-wild match. */ 105 /* Check the non-wild match. */
108 if (protocol == answer->protocol) { 106 if (protocol == answer->protocol) {
109 if (protocol != IPPROTO_IP) 107 if (protocol != IPPROTO_IP)
@@ -118,10 +116,9 @@ lookup_protocol:
118 break; 116 break;
119 } 117 }
120 err = -EPROTONOSUPPORT; 118 err = -EPROTONOSUPPORT;
121 answer = NULL;
122 } 119 }
123 120
124 if (!answer) { 121 if (err) {
125 if (try_loading_module < 2) { 122 if (try_loading_module < 2) {
126 rcu_read_unlock(); 123 rcu_read_unlock();
127 /* 124 /*
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 265b1b289a32..705959b31e24 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -497,7 +497,7 @@ static void iucv_setmask_up(void)
497 /* Disable all cpu but the first in cpu_irq_cpumask. */ 497 /* Disable all cpu but the first in cpu_irq_cpumask. */
498 cpumask = iucv_irq_cpumask; 498 cpumask = iucv_irq_cpumask;
499 cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); 499 cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
500 for_each_cpu_mask(cpu, cpumask) 500 for_each_cpu_mask_nr(cpu, cpumask)
501 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); 501 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
502} 502}
503 503
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 79bece16aede..dbb79adf8f3c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3910,7 +3910,7 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval
3910 goto out; 3910 goto out;
3911 3911
3912 /* Map the socket to an unused fd that can be returned to the user. */ 3912 /* Map the socket to an unused fd that can be returned to the user. */
3913 retval = sock_map_fd(newsock); 3913 retval = sock_map_fd(newsock, 0);
3914 if (retval < 0) { 3914 if (retval < 0) {
3915 sock_release(newsock); 3915 sock_release(newsock);
3916 goto out; 3916 goto out;
diff --git a/net/socket.c b/net/socket.c
index 1ba57d888981..1310a82cbba7 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -63,11 +63,13 @@
63#include <linux/file.h> 63#include <linux/file.h>
64#include <linux/net.h> 64#include <linux/net.h>
65#include <linux/interrupt.h> 65#include <linux/interrupt.h>
66#include <linux/thread_info.h>
66#include <linux/rcupdate.h> 67#include <linux/rcupdate.h>
67#include <linux/netdevice.h> 68#include <linux/netdevice.h>
68#include <linux/proc_fs.h> 69#include <linux/proc_fs.h>
69#include <linux/seq_file.h> 70#include <linux/seq_file.h>
70#include <linux/mutex.h> 71#include <linux/mutex.h>
72#include <linux/thread_info.h>
71#include <linux/wanrouter.h> 73#include <linux/wanrouter.h>
72#include <linux/if_bridge.h> 74#include <linux/if_bridge.h>
73#include <linux/if_frad.h> 75#include <linux/if_frad.h>
@@ -349,11 +351,11 @@ static struct dentry_operations sockfs_dentry_operations = {
349 * but we take care of internal coherence yet. 351 * but we take care of internal coherence yet.
350 */ 352 */
351 353
352static int sock_alloc_fd(struct file **filep) 354static int sock_alloc_fd(struct file **filep, int flags)
353{ 355{
354 int fd; 356 int fd;
355 357
356 fd = get_unused_fd(); 358 fd = get_unused_fd_flags(flags);
357 if (likely(fd >= 0)) { 359 if (likely(fd >= 0)) {
358 struct file *file = get_empty_filp(); 360 struct file *file = get_empty_filp();
359 361
@@ -367,7 +369,7 @@ static int sock_alloc_fd(struct file **filep)
367 return fd; 369 return fd;
368} 370}
369 371
370static int sock_attach_fd(struct socket *sock, struct file *file) 372static int sock_attach_fd(struct socket *sock, struct file *file, int flags)
371{ 373{
372 struct dentry *dentry; 374 struct dentry *dentry;
373 struct qstr name = { .name = "" }; 375 struct qstr name = { .name = "" };
@@ -389,20 +391,20 @@ static int sock_attach_fd(struct socket *sock, struct file *file)
389 init_file(file, sock_mnt, dentry, FMODE_READ | FMODE_WRITE, 391 init_file(file, sock_mnt, dentry, FMODE_READ | FMODE_WRITE,
390 &socket_file_ops); 392 &socket_file_ops);
391 SOCK_INODE(sock)->i_fop = &socket_file_ops; 393 SOCK_INODE(sock)->i_fop = &socket_file_ops;
392 file->f_flags = O_RDWR; 394 file->f_flags = O_RDWR | (flags & O_NONBLOCK);
393 file->f_pos = 0; 395 file->f_pos = 0;
394 file->private_data = sock; 396 file->private_data = sock;
395 397
396 return 0; 398 return 0;
397} 399}
398 400
399int sock_map_fd(struct socket *sock) 401int sock_map_fd(struct socket *sock, int flags)
400{ 402{
401 struct file *newfile; 403 struct file *newfile;
402 int fd = sock_alloc_fd(&newfile); 404 int fd = sock_alloc_fd(&newfile, flags);
403 405
404 if (likely(fd >= 0)) { 406 if (likely(fd >= 0)) {
405 int err = sock_attach_fd(sock, newfile); 407 int err = sock_attach_fd(sock, newfile, flags);
406 408
407 if (unlikely(err < 0)) { 409 if (unlikely(err < 0)) {
408 put_filp(newfile); 410 put_filp(newfile);
@@ -1218,12 +1220,27 @@ asmlinkage long sys_socket(int family, int type, int protocol)
1218{ 1220{
1219 int retval; 1221 int retval;
1220 struct socket *sock; 1222 struct socket *sock;
1223 int flags;
1224
1225 /* Check the SOCK_* constants for consistency. */
1226 BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC);
1227 BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK);
1228 BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK);
1229 BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK);
1230
1231 flags = type & ~SOCK_TYPE_MASK;
1232 if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1233 return -EINVAL;
1234 type &= SOCK_TYPE_MASK;
1235
1236 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
1237 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1221 1238
1222 retval = sock_create(family, type, protocol, &sock); 1239 retval = sock_create(family, type, protocol, &sock);
1223 if (retval < 0) 1240 if (retval < 0)
1224 goto out; 1241 goto out;
1225 1242
1226 retval = sock_map_fd(sock); 1243 retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
1227 if (retval < 0) 1244 if (retval < 0)
1228 goto out_release; 1245 goto out_release;
1229 1246
@@ -1246,6 +1263,15 @@ asmlinkage long sys_socketpair(int family, int type, int protocol,
1246 struct socket *sock1, *sock2; 1263 struct socket *sock1, *sock2;
1247 int fd1, fd2, err; 1264 int fd1, fd2, err;
1248 struct file *newfile1, *newfile2; 1265 struct file *newfile1, *newfile2;
1266 int flags;
1267
1268 flags = type & ~SOCK_TYPE_MASK;
1269 if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1270 return -EINVAL;
1271 type &= SOCK_TYPE_MASK;
1272
1273 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
1274 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1249 1275
1250 /* 1276 /*
1251 * Obtain the first socket and check if the underlying protocol 1277 * Obtain the first socket and check if the underlying protocol
@@ -1264,13 +1290,13 @@ asmlinkage long sys_socketpair(int family, int type, int protocol,
1264 if (err < 0) 1290 if (err < 0)
1265 goto out_release_both; 1291 goto out_release_both;
1266 1292
1267 fd1 = sock_alloc_fd(&newfile1); 1293 fd1 = sock_alloc_fd(&newfile1, flags & O_CLOEXEC);
1268 if (unlikely(fd1 < 0)) { 1294 if (unlikely(fd1 < 0)) {
1269 err = fd1; 1295 err = fd1;
1270 goto out_release_both; 1296 goto out_release_both;
1271 } 1297 }
1272 1298
1273 fd2 = sock_alloc_fd(&newfile2); 1299 fd2 = sock_alloc_fd(&newfile2, flags & O_CLOEXEC);
1274 if (unlikely(fd2 < 0)) { 1300 if (unlikely(fd2 < 0)) {
1275 err = fd2; 1301 err = fd2;
1276 put_filp(newfile1); 1302 put_filp(newfile1);
@@ -1278,12 +1304,12 @@ asmlinkage long sys_socketpair(int family, int type, int protocol,
1278 goto out_release_both; 1304 goto out_release_both;
1279 } 1305 }
1280 1306
1281 err = sock_attach_fd(sock1, newfile1); 1307 err = sock_attach_fd(sock1, newfile1, flags & O_NONBLOCK);
1282 if (unlikely(err < 0)) { 1308 if (unlikely(err < 0)) {
1283 goto out_fd2; 1309 goto out_fd2;
1284 } 1310 }
1285 1311
1286 err = sock_attach_fd(sock2, newfile2); 1312 err = sock_attach_fd(sock2, newfile2, flags & O_NONBLOCK);
1287 if (unlikely(err < 0)) { 1313 if (unlikely(err < 0)) {
1288 fput(newfile1); 1314 fput(newfile1);
1289 goto out_fd1; 1315 goto out_fd1;
@@ -1401,14 +1427,20 @@ asmlinkage long sys_listen(int fd, int backlog)
1401 * clean when we restucture accept also. 1427 * clean when we restucture accept also.
1402 */ 1428 */
1403 1429
1404asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, 1430long do_accept(int fd, struct sockaddr __user *upeer_sockaddr,
1405 int __user *upeer_addrlen) 1431 int __user *upeer_addrlen, int flags)
1406{ 1432{
1407 struct socket *sock, *newsock; 1433 struct socket *sock, *newsock;
1408 struct file *newfile; 1434 struct file *newfile;
1409 int err, len, newfd, fput_needed; 1435 int err, len, newfd, fput_needed;
1410 struct sockaddr_storage address; 1436 struct sockaddr_storage address;
1411 1437
1438 if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1439 return -EINVAL;
1440
1441 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
1442 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1443
1412 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1444 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1413 if (!sock) 1445 if (!sock)
1414 goto out; 1446 goto out;
@@ -1426,14 +1458,14 @@ asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr,
1426 */ 1458 */
1427 __module_get(newsock->ops->owner); 1459 __module_get(newsock->ops->owner);
1428 1460
1429 newfd = sock_alloc_fd(&newfile); 1461 newfd = sock_alloc_fd(&newfile, flags & O_CLOEXEC);
1430 if (unlikely(newfd < 0)) { 1462 if (unlikely(newfd < 0)) {
1431 err = newfd; 1463 err = newfd;
1432 sock_release(newsock); 1464 sock_release(newsock);
1433 goto out_put; 1465 goto out_put;
1434 } 1466 }
1435 1467
1436 err = sock_attach_fd(newsock, newfile); 1468 err = sock_attach_fd(newsock, newfile, flags & O_NONBLOCK);
1437 if (err < 0) 1469 if (err < 0)
1438 goto out_fd_simple; 1470 goto out_fd_simple;
1439 1471
@@ -1479,6 +1511,66 @@ out_fd:
1479 goto out_put; 1511 goto out_put;
1480} 1512}
1481 1513
1514#ifdef HAVE_SET_RESTORE_SIGMASK
1515asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr,
1516 int __user *upeer_addrlen,
1517 const sigset_t __user *sigmask,
1518 size_t sigsetsize, int flags)
1519{
1520 sigset_t ksigmask, sigsaved;
1521 int ret;
1522
1523 if (sigmask) {
1524 /* XXX: Don't preclude handling different sized sigset_t's. */
1525 if (sigsetsize != sizeof(sigset_t))
1526 return -EINVAL;
1527 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1528 return -EFAULT;
1529
1530 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1531 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1532 }
1533
1534 ret = do_accept(fd, upeer_sockaddr, upeer_addrlen, flags);
1535
1536 if (ret < 0 && signal_pending(current)) {
1537 /*
1538 * Don't restore the signal mask yet. Let do_signal() deliver
1539 * the signal on the way back to userspace, before the signal
1540 * mask is restored.
1541 */
1542 if (sigmask) {
1543 memcpy(&current->saved_sigmask, &sigsaved,
1544 sizeof(sigsaved));
1545 set_restore_sigmask();
1546 }
1547 } else if (sigmask)
1548 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1549
1550 return ret;
1551}
1552#else
1553asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr,
1554 int __user *upeer_addrlen,
1555 const sigset_t __user *sigmask,
1556 size_t sigsetsize, int flags)
1557{
1558 /* The platform does not support restoring the signal mask in the
1559 * return path. So we do not allow using paccept() with a signal
1560 * mask. */
1561 if (sigmask)
1562 return -EINVAL;
1563
1564 return do_accept(fd, upeer_sockaddr, upeer_addrlen, flags);
1565}
1566#endif
1567
1568asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr,
1569 int __user *upeer_addrlen)
1570{
1571 return do_accept(fd, upeer_sockaddr, upeer_addrlen, 0);
1572}
1573
1482/* 1574/*
1483 * Attempt to connect to a socket with the server address. The address 1575 * Attempt to connect to a socket with the server address. The address
1484 * is in user space so we verify it is OK and move it to kernel space. 1576 * is in user space so we verify it is OK and move it to kernel space.
@@ -1999,10 +2091,11 @@ out:
1999 2091
2000/* Argument list sizes for sys_socketcall */ 2092/* Argument list sizes for sys_socketcall */
2001#define AL(x) ((x) * sizeof(unsigned long)) 2093#define AL(x) ((x) * sizeof(unsigned long))
2002static const unsigned char nargs[18]={ 2094static const unsigned char nargs[19]={
2003 AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), 2095 AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
2004 AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), 2096 AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
2005 AL(6),AL(2),AL(5),AL(5),AL(3),AL(3) 2097 AL(6),AL(2),AL(5),AL(5),AL(3),AL(3),
2098 AL(6)
2006}; 2099};
2007 2100
2008#undef AL 2101#undef AL
@@ -2021,7 +2114,7 @@ asmlinkage long sys_socketcall(int call, unsigned long __user *args)
2021 unsigned long a0, a1; 2114 unsigned long a0, a1;
2022 int err; 2115 int err;
2023 2116
2024 if (call < 1 || call > SYS_RECVMSG) 2117 if (call < 1 || call > SYS_PACCEPT)
2025 return -EINVAL; 2118 return -EINVAL;
2026 2119
2027 /* copy_from_user should be SMP safe. */ 2120 /* copy_from_user should be SMP safe. */
@@ -2050,8 +2143,8 @@ asmlinkage long sys_socketcall(int call, unsigned long __user *args)
2050 break; 2143 break;
2051 case SYS_ACCEPT: 2144 case SYS_ACCEPT:
2052 err = 2145 err =
2053 sys_accept(a0, (struct sockaddr __user *)a1, 2146 do_accept(a0, (struct sockaddr __user *)a1,
2054 (int __user *)a[2]); 2147 (int __user *)a[2], 0);
2055 break; 2148 break;
2056 case SYS_GETSOCKNAME: 2149 case SYS_GETSOCKNAME:
2057 err = 2150 err =
@@ -2098,6 +2191,13 @@ asmlinkage long sys_socketcall(int call, unsigned long __user *args)
2098 case SYS_RECVMSG: 2191 case SYS_RECVMSG:
2099 err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); 2192 err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]);
2100 break; 2193 break;
2194 case SYS_PACCEPT:
2195 err =
2196 sys_paccept(a0, (struct sockaddr __user *)a1,
2197 (int __user *)a[2],
2198 (const sigset_t __user *) a[3],
2199 a[4], a[5]);
2200 break;
2101 default: 2201 default:
2102 err = -EINVAL; 2202 err = -EINVAL;
2103 break; 2203 break;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 5a32cb7c4bb4..835d27413083 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -310,7 +310,8 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
310 switch (m->mode) { 310 switch (m->mode) {
311 case SVC_POOL_PERCPU: 311 case SVC_POOL_PERCPU:
312 { 312 {
313 set_cpus_allowed_ptr(task, &cpumask_of_cpu(node)); 313 cpumask_of_cpu_ptr(cpumask, node);
314 set_cpus_allowed_ptr(task, cpumask);
314 break; 315 break;
315 } 316 }
316 case SVC_POOL_PERNODE: 317 case SVC_POOL_PERNODE:
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 007c1a6708ee..63ada437fc2f 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -35,8 +35,22 @@ net_ctl_header_lookup(struct ctl_table_root *root, struct nsproxy *namespaces)
35 return &namespaces->net_ns->sysctl_table_headers; 35 return &namespaces->net_ns->sysctl_table_headers;
36} 36}
37 37
38/* Return standard mode bits for table entry. */
39static int net_ctl_permissions(struct ctl_table_root *root,
40 struct nsproxy *nsproxy,
41 struct ctl_table *table)
42{
43 /* Allow network administrator to have same access as root. */
44 if (capable(CAP_NET_ADMIN)) {
45 int mode = (table->mode >> 6) & 7;
46 return (mode << 6) | (mode << 3) | mode;
47 }
48 return table->mode;
49}
50
38static struct ctl_table_root net_sysctl_root = { 51static struct ctl_table_root net_sysctl_root = {
39 .lookup = net_ctl_header_lookup, 52 .lookup = net_ctl_header_lookup,
53 .permissions = net_ctl_permissions,
40}; 54};
41 55
42static LIST_HEAD(net_sysctl_ro_tables); 56static LIST_HEAD(net_sysctl_ro_tables);
diff --git a/scripts/Makefile.fwinst b/scripts/Makefile.fwinst
index c972c0f54ce0..f63a663de158 100644
--- a/scripts/Makefile.fwinst
+++ b/scripts/Makefile.fwinst
@@ -17,14 +17,15 @@ include $(srctree)/$(obj)/Makefile
17 17
18include scripts/Makefile.host 18include scripts/Makefile.host
19 19
20mod-fw := $(addprefix $(INSTALL_FW_PATH)/,$(fw-shipped-m)) 20mod-fw := $(fw-shipped-m)
21
22# If CONFIG_FIRMWARE_IN_KERNEL isn't set, then install the 21# If CONFIG_FIRMWARE_IN_KERNEL isn't set, then install the
23# firmware for in-kernel drivers too. 22# firmware for in-kernel drivers too.
24ifndef CONFIG_FIRMWARE_IN_KERNEL 23ifndef CONFIG_FIRMWARE_IN_KERNEL
25mod-fw += $(addprefix $(INSTALL_FW_PATH)/,$(fw-shipped-y)) 24mod-fw += $(fw-shipped-y)
26endif 25endif
27 26
27installed-mod-fw := $(addprefix $(INSTALL_FW_PATH)/,$(mod-fw))
28
28installed-fw := $(addprefix $(INSTALL_FW_PATH)/,$(fw-shipped-all)) 29installed-fw := $(addprefix $(INSTALL_FW_PATH)/,$(fw-shipped-all))
29installed-fw-dirs := $(sort $(dir $(installed-fw))) $(INSTALL_FW_PATH)/. 30installed-fw-dirs := $(sort $(dir $(installed-fw))) $(INSTALL_FW_PATH)/.
30 31
@@ -49,7 +50,8 @@ PHONY += __fw_install __fw_modinst FORCE
49.PHONY: $(PHONY) 50.PHONY: $(PHONY)
50 51
51__fw_install: $(installed-fw) 52__fw_install: $(installed-fw)
52__fw_modinst: $(mod-fw) 53__fw_modinst: $(installed-mod-fw)
54__fw_modbuild: $(addprefix $(obj)/,$(mod-fw))
53 55
54FORCE: 56FORCE:
55 57
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 6971bf078d13..bc6779398229 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -9,7 +9,7 @@ use strict;
9my $P = $0; 9my $P = $0;
10$P =~ s@.*/@@g; 10$P =~ s@.*/@@g;
11 11
12my $V = '0.19'; 12my $V = '0.21';
13 13
14use Getopt::Long qw(:config no_auto_abbrev); 14use Getopt::Long qw(:config no_auto_abbrev);
15 15
@@ -17,7 +17,6 @@ my $quiet = 0;
17my $tree = 1; 17my $tree = 1;
18my $chk_signoff = 1; 18my $chk_signoff = 1;
19my $chk_patch = 1; 19my $chk_patch = 1;
20my $tst_type = 0;
21my $tst_only; 20my $tst_only;
22my $emacs = 0; 21my $emacs = 0;
23my $terse = 0; 22my $terse = 0;
@@ -44,7 +43,6 @@ GetOptions(
44 'summary-file!' => \$summary_file, 43 'summary-file!' => \$summary_file,
45 44
46 'debug=s' => \%debug, 45 'debug=s' => \%debug,
47 'test-type!' => \$tst_type,
48 'test-only=s' => \$tst_only, 46 'test-only=s' => \$tst_only,
49) or exit; 47) or exit;
50 48
@@ -67,6 +65,7 @@ if ($#ARGV < 0) {
67 65
68my $dbg_values = 0; 66my $dbg_values = 0;
69my $dbg_possible = 0; 67my $dbg_possible = 0;
68my $dbg_type = 0;
70for my $key (keys %debug) { 69for my $key (keys %debug) {
71 eval "\${dbg_$key} = '$debug{$key}';" 70 eval "\${dbg_$key} = '$debug{$key}';"
72} 71}
@@ -169,24 +168,23 @@ our @modifierList = (
169); 168);
170 169
171sub build_types { 170sub build_types {
172 my $mods = "(?: \n" . join("|\n ", @modifierList) . "\n)"; 171 my $mods = "(?x: \n" . join("|\n ", @modifierList) . "\n)";
173 my $all = "(?: \n" . join("|\n ", @typeList) . "\n)"; 172 my $all = "(?x: \n" . join("|\n ", @typeList) . "\n)";
173 $Modifier = qr{(?:$Attribute|$Sparse|$mods)};
174 $NonptrType = qr{ 174 $NonptrType = qr{
175 (?:const\s+)? 175 (?:$Modifier\s+|const\s+)*
176 (?:$mods\s+)?
177 (?: 176 (?:
178 (?:typeof|__typeof__)\s*\(\s*\**\s*$Ident\s*\)| 177 (?:typeof|__typeof__)\s*\(\s*\**\s*$Ident\s*\)|
179 (?:${all}\b) 178 (?:${all}\b)
180 ) 179 )
181 (?:\s+$Sparse|\s+const)* 180 (?:\s+$Modifier|\s+const)*
182 }x; 181 }x;
183 $Type = qr{ 182 $Type = qr{
184 $NonptrType 183 $NonptrType
185 (?:\s*\*+\s*const|\s*\*+|(?:\s*\[\s*\])+)? 184 (?:\s*\*+\s*const|\s*\*+|(?:\s*\[\s*\])+)?
186 (?:\s+$Inline|\s+$Sparse|\s+$Attribute|\s+$mods)* 185 (?:\s+$Inline|\s+$Modifier)*
187 }x; 186 }x;
188 $Declare = qr{(?:$Storage\s+)?$Type}; 187 $Declare = qr{(?:$Storage\s+)?$Type};
189 $Modifier = qr{(?:$Attribute|$Sparse|$mods)};
190} 188}
191build_types(); 189build_types();
192 190
@@ -470,7 +468,9 @@ sub ctx_statement_block {
470 } 468 }
471 $off++; 469 $off++;
472 } 470 }
471 # We are truly at the end, so shuffle to the next line.
473 if ($off == $len) { 472 if ($off == $len) {
473 $loff = $len + 1;
474 $line++; 474 $line++;
475 $remain--; 475 $remain--;
476 } 476 }
@@ -631,7 +631,7 @@ sub ctx_locate_comment {
631 my ($first_line, $end_line) = @_; 631 my ($first_line, $end_line) = @_;
632 632
633 # Catch a comment on the end of the line itself. 633 # Catch a comment on the end of the line itself.
634 my ($current_comment) = ($rawlines[$end_line - 1] =~ m@.*(/\*.*\*/)\s*$@); 634 my ($current_comment) = ($rawlines[$end_line - 1] =~ m@.*(/\*.*\*/)\s*(?:\\\s*)?$@);
635 return $current_comment if (defined $current_comment); 635 return $current_comment if (defined $current_comment);
636 636
637 # Look through the context and try and figure out if there is a 637 # Look through the context and try and figure out if there is a
@@ -689,17 +689,20 @@ sub cat_vet {
689my $av_preprocessor = 0; 689my $av_preprocessor = 0;
690my $av_pending; 690my $av_pending;
691my @av_paren_type; 691my @av_paren_type;
692my $av_pend_colon;
692 693
693sub annotate_reset { 694sub annotate_reset {
694 $av_preprocessor = 0; 695 $av_preprocessor = 0;
695 $av_pending = '_'; 696 $av_pending = '_';
696 @av_paren_type = ('E'); 697 @av_paren_type = ('E');
698 $av_pend_colon = 'O';
697} 699}
698 700
699sub annotate_values { 701sub annotate_values {
700 my ($stream, $type) = @_; 702 my ($stream, $type) = @_;
701 703
702 my $res; 704 my $res;
705 my $var = '_' x length($stream);
703 my $cur = $stream; 706 my $cur = $stream;
704 707
705 print "$stream\n" if ($dbg_values > 1); 708 print "$stream\n" if ($dbg_values > 1);
@@ -715,10 +718,14 @@ sub annotate_values {
715 $av_preprocessor = 0; 718 $av_preprocessor = 0;
716 } 719 }
717 720
718 } elsif ($cur =~ /^($Type)/) { 721 } elsif ($cur =~ /^($Type)\s*(?:$Ident|,|\)|\()/) {
719 print "DECLARE($1)\n" if ($dbg_values > 1); 722 print "DECLARE($1)\n" if ($dbg_values > 1);
720 $type = 'T'; 723 $type = 'T';
721 724
725 } elsif ($cur =~ /^($Modifier)\s*/) {
726 print "MODIFIER($1)\n" if ($dbg_values > 1);
727 $type = 'T';
728
722 } elsif ($cur =~ /^(\#\s*define\s*$Ident)(\(?)/o) { 729 } elsif ($cur =~ /^(\#\s*define\s*$Ident)(\(?)/o) {
723 print "DEFINE($1,$2)\n" if ($dbg_values > 1); 730 print "DEFINE($1,$2)\n" if ($dbg_values > 1);
724 $av_preprocessor = 1; 731 $av_preprocessor = 1;
@@ -780,7 +787,12 @@ sub annotate_values {
780 $av_pending = 'N'; 787 $av_pending = 'N';
781 $type = 'N'; 788 $type = 'N';
782 789
783 } elsif ($cur =~/^(return|case|else)/o) { 790 } elsif ($cur =~/^(case)/o) {
791 print "CASE($1)\n" if ($dbg_values > 1);
792 $av_pend_colon = 'C';
793 $type = 'N';
794
795 } elsif ($cur =~/^(return|else|goto)/o) {
784 print "KEYWORD($1)\n" if ($dbg_values > 1); 796 print "KEYWORD($1)\n" if ($dbg_values > 1);
785 $type = 'N'; 797 $type = 'N';
786 798
@@ -800,10 +812,20 @@ sub annotate_values {
800 print "PAREN('$1')\n" if ($dbg_values > 1); 812 print "PAREN('$1')\n" if ($dbg_values > 1);
801 } 813 }
802 814
803 } elsif ($cur =~ /^($Ident)\(/o) { 815 } elsif ($cur =~ /^($Ident)\s*\(/o) {
804 print "FUNC($1)\n" if ($dbg_values > 1); 816 print "FUNC($1)\n" if ($dbg_values > 1);
817 $type = 'V';
805 $av_pending = 'V'; 818 $av_pending = 'V';
806 819
820 } elsif ($cur =~ /^($Ident\s*):/) {
821 if ($type eq 'E') {
822 $av_pend_colon = 'L';
823 } elsif ($type eq 'T') {
824 $av_pend_colon = 'B';
825 }
826 print "IDENT_COLON($1,$type>$av_pend_colon)\n" if ($dbg_values > 1);
827 $type = 'V';
828
807 } elsif ($cur =~ /^($Ident|$Constant)/o) { 829 } elsif ($cur =~ /^($Ident|$Constant)/o) {
808 print "IDENT($1)\n" if ($dbg_values > 1); 830 print "IDENT($1)\n" if ($dbg_values > 1);
809 $type = 'V'; 831 $type = 'V';
@@ -815,11 +837,40 @@ sub annotate_values {
815 } elsif ($cur =~/^(;|{|})/) { 837 } elsif ($cur =~/^(;|{|})/) {
816 print "END($1)\n" if ($dbg_values > 1); 838 print "END($1)\n" if ($dbg_values > 1);
817 $type = 'E'; 839 $type = 'E';
840 $av_pend_colon = 'O';
841
842 } elsif ($cur =~ /^(\?)/o) {
843 print "QUESTION($1)\n" if ($dbg_values > 1);
844 $type = 'N';
818 845
819 } elsif ($cur =~ /^(;|\?|:|\[)/o) { 846 } elsif ($cur =~ /^(:)/o) {
847 print "COLON($1,$av_pend_colon)\n" if ($dbg_values > 1);
848
849 substr($var, length($res), 1, $av_pend_colon);
850 if ($av_pend_colon eq 'C' || $av_pend_colon eq 'L') {
851 $type = 'E';
852 } else {
853 $type = 'N';
854 }
855 $av_pend_colon = 'O';
856
857 } elsif ($cur =~ /^(;|\[)/o) {
820 print "CLOSE($1)\n" if ($dbg_values > 1); 858 print "CLOSE($1)\n" if ($dbg_values > 1);
821 $type = 'N'; 859 $type = 'N';
822 860
861 } elsif ($cur =~ /^(-(?![->])|\+(?!\+)|\*|\&(?!\&))/o) {
862 my $variant;
863
864 print "OPV($1)\n" if ($dbg_values > 1);
865 if ($type eq 'V') {
866 $variant = 'B';
867 } else {
868 $variant = 'U';
869 }
870
871 substr($var, length($res), 1, $variant);
872 $type = 'N';
873
823 } elsif ($cur =~ /^($Operators)/o) { 874 } elsif ($cur =~ /^($Operators)/o) {
824 print "OP($1)\n" if ($dbg_values > 1); 875 print "OP($1)\n" if ($dbg_values > 1);
825 if ($1 ne '++' && $1 ne '--') { 876 if ($1 ne '++' && $1 ne '--') {
@@ -835,17 +886,17 @@ sub annotate_values {
835 } 886 }
836 } 887 }
837 888
838 return $res; 889 return ($res, $var);
839} 890}
840 891
841sub possible { 892sub possible {
842 my ($possible, $line) = @_; 893 my ($possible, $line) = @_;
843 894
844 print "CHECK<$possible> ($line)\n" if ($dbg_possible > 1); 895 print "CHECK<$possible> ($line)\n" if ($dbg_possible > 1);
845 if ($possible !~ /^(?:$Storage|$Type|DEFINE_\S+)$/ && 896 if ($possible !~ /^(?:$Modifier|$Storage|$Type|DEFINE_\S+)$/ &&
846 $possible ne 'goto' && $possible ne 'return' && 897 $possible ne 'goto' && $possible ne 'return' &&
847 $possible ne 'case' && $possible ne 'else' && 898 $possible ne 'case' && $possible ne 'else' &&
848 $possible ne 'asm' && 899 $possible ne 'asm' && $possible ne '__asm__' &&
849 $possible !~ /^(typedef|struct|enum)\b/) { 900 $possible !~ /^(typedef|struct|enum)\b/) {
850 # Check for modifiers. 901 # Check for modifiers.
851 $possible =~ s/\s*$Storage\s*//g; 902 $possible =~ s/\s*$Storage\s*//g;
@@ -854,8 +905,10 @@ sub possible {
854 905
855 } elsif ($possible =~ /\s/) { 906 } elsif ($possible =~ /\s/) {
856 $possible =~ s/\s*$Type\s*//g; 907 $possible =~ s/\s*$Type\s*//g;
857 warn "MODIFIER: $possible ($line)\n" if ($dbg_possible); 908 for my $modifier (split(' ', $possible)) {
858 push(@modifierList, $possible); 909 warn "MODIFIER: $modifier ($possible) ($line)\n" if ($dbg_possible);
910 push(@modifierList, $modifier);
911 }
859 912
860 } else { 913 } else {
861 warn "POSSIBLE: $possible ($line)\n" if ($dbg_possible); 914 warn "POSSIBLE: $possible ($line)\n" if ($dbg_possible);
@@ -1135,7 +1188,9 @@ sub process {
1135 } 1188 }
1136#80 column limit 1189#80 column limit
1137 if ($line =~ /^\+/ && $prevrawline !~ /\/\*\*/ && 1190 if ($line =~ /^\+/ && $prevrawline !~ /\/\*\*/ &&
1138 $rawline !~ /^.\s*\*\s*\@$Ident\s/ && $length > 80) 1191 $rawline !~ /^.\s*\*\s*\@$Ident\s/ &&
1192 $line !~ /^\+\s*printk\s*\(\s*(?:KERN_\S+\s*)?"[X\t]*"\s*(?:,|\)\s*;)\s*$/ &&
1193 $length > 80)
1139 { 1194 {
1140 WARN("line over 80 characters\n" . $herecurr); 1195 WARN("line over 80 characters\n" . $herecurr);
1141 } 1196 }
@@ -1162,10 +1217,10 @@ sub process {
1162 } 1217 }
1163 1218
1164# Check for potential 'bare' types 1219# Check for potential 'bare' types
1165 my ($stat, $cond); 1220 my ($stat, $cond, $line_nr_next, $remain_next);
1166 if ($realcnt && $line =~ /.\s*\S/) { 1221 if ($realcnt && $line =~ /.\s*\S/) {
1167 ($stat, $cond) = ctx_statement_block($linenr, 1222 ($stat, $cond, $line_nr_next, $remain_next) =
1168 $realcnt, 0); 1223 ctx_statement_block($linenr, $realcnt, 0);
1169 $stat =~ s/\n./\n /g; 1224 $stat =~ s/\n./\n /g;
1170 $cond =~ s/\n./\n /g; 1225 $cond =~ s/\n./\n /g;
1171 1226
@@ -1179,7 +1234,7 @@ sub process {
1179 } elsif ($s =~ /^.\s*$Ident\s*\(/s) { 1234 } elsif ($s =~ /^.\s*$Ident\s*\(/s) {
1180 1235
1181 # declarations always start with types 1236 # declarations always start with types
1182 } elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?((?:\s*$Ident)+)\b(?:\s+$Sparse)?\s*\**\s*(?:$Ident|\(\*[^\)]*\))\s*(?:;|=|,|\()/s) { 1237 } elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?((?:\s*$Ident)+?)\b(?:\s+$Sparse)?\s*\**\s*(?:$Ident|\(\*[^\)]*\))(?:\s*$Modifier)?\s*(?:;|=|,|\()/s) {
1183 my $type = $1; 1238 my $type = $1;
1184 $type =~ s/\s+/ /g; 1239 $type =~ s/\s+/ /g;
1185 possible($type, "A:" . $s); 1240 possible($type, "A:" . $s);
@@ -1239,6 +1294,10 @@ sub process {
1239 ERROR("switch and case should be at the same indent\n$hereline$err"); 1294 ERROR("switch and case should be at the same indent\n$hereline$err");
1240 } 1295 }
1241 } 1296 }
1297 if ($line =~ /^.\s*(?:case\s*.*|default\s*):/g &&
1298 $line !~ /\G(?:\s*{)?(?:\s*$;*)(?:\s*\\)?\s*$/g) {
1299 ERROR("trailing statements should be on next line\n" . $herecurr);
1300 }
1242 1301
1243# if/while/etc brace do not go on next line, unless defining a do while loop, 1302# if/while/etc brace do not go on next line, unless defining a do while loop,
1244# or if that brace on the next line is for something else 1303# or if that brace on the next line is for something else
@@ -1246,17 +1305,22 @@ sub process {
1246 my $pre_ctx = "$1$2"; 1305 my $pre_ctx = "$1$2";
1247 1306
1248 my ($level, @ctx) = ctx_statement_level($linenr, $realcnt, 0); 1307 my ($level, @ctx) = ctx_statement_level($linenr, $realcnt, 0);
1249 my $ctx_ln = $linenr + $#ctx + 1;
1250 my $ctx_cnt = $realcnt - $#ctx - 1; 1308 my $ctx_cnt = $realcnt - $#ctx - 1;
1251 my $ctx = join("\n", @ctx); 1309 my $ctx = join("\n", @ctx);
1252 1310
1253 ##warn "realcnt<$realcnt> ctx_cnt<$ctx_cnt>\n"; 1311 my $ctx_ln = $linenr;
1312 my $ctx_skip = $realcnt;
1254 1313
1255 # Skip over any removed lines in the context following statement. 1314 while ($ctx_skip > $ctx_cnt || ($ctx_skip == $ctx_cnt &&
1256 while (defined($lines[$ctx_ln - 1]) && $lines[$ctx_ln - 1] =~ /^-/) { 1315 defined $lines[$ctx_ln - 1] &&
1316 $lines[$ctx_ln - 1] =~ /^-/)) {
1317 ##print "SKIP<$ctx_skip> CNT<$ctx_cnt>\n";
1318 $ctx_skip-- if (!defined $lines[$ctx_ln - 1] || $lines[$ctx_ln - 1] !~ /^-/);
1257 $ctx_ln++; 1319 $ctx_ln++;
1258 } 1320 }
1259 ##warn "pre<$pre_ctx>\nline<$line>\nctx<$ctx>\nnext<$lines[$ctx_ln - 1]>\n"; 1321
1322 #print "realcnt<$realcnt> ctx_cnt<$ctx_cnt>\n";
1323 #print "pre<$pre_ctx>\nline<$line>\nctx<$ctx>\nnext<$lines[$ctx_ln - 1]>\n";
1260 1324
1261 if ($ctx !~ /{\s*/ && defined($lines[$ctx_ln -1]) && $lines[$ctx_ln - 1] =~ /^\+\s*{/) { 1325 if ($ctx !~ /{\s*/ && defined($lines[$ctx_ln -1]) && $lines[$ctx_ln - 1] =~ /^\+\s*{/) {
1262 ERROR("that open brace { should be on the previous line\n" . 1326 ERROR("that open brace { should be on the previous line\n" .
@@ -1276,12 +1340,14 @@ sub process {
1276 1340
1277 # Track the 'values' across context and added lines. 1341 # Track the 'values' across context and added lines.
1278 my $opline = $line; $opline =~ s/^./ /; 1342 my $opline = $line; $opline =~ s/^./ /;
1279 my $curr_values = annotate_values($opline . "\n", $prev_values); 1343 my ($curr_values, $curr_vars) =
1344 annotate_values($opline . "\n", $prev_values);
1280 $curr_values = $prev_values . $curr_values; 1345 $curr_values = $prev_values . $curr_values;
1281 if ($dbg_values) { 1346 if ($dbg_values) {
1282 my $outline = $opline; $outline =~ s/\t/ /g; 1347 my $outline = $opline; $outline =~ s/\t/ /g;
1283 print "$linenr > .$outline\n"; 1348 print "$linenr > .$outline\n";
1284 print "$linenr > $curr_values\n"; 1349 print "$linenr > $curr_values\n";
1350 print "$linenr > $curr_vars\n";
1285 } 1351 }
1286 $prev_values = substr($curr_values, -1); 1352 $prev_values = substr($curr_values, -1);
1287 1353
@@ -1289,8 +1355,12 @@ sub process {
1289 if ($line=~/^[^\+]/) {next;} 1355 if ($line=~/^[^\+]/) {next;}
1290 1356
1291# TEST: allow direct testing of the type matcher. 1357# TEST: allow direct testing of the type matcher.
1292 if ($tst_type && $line =~ /^.$Declare$/) { 1358 if ($dbg_type) {
1293 ERROR("TEST: is type $Declare\n" . $herecurr); 1359 if ($line =~ /^.\s*$Declare\s*$/) {
1360 ERROR("TEST: is type\n" . $herecurr);
1361 } elsif ($dbg_type > 1 && $line =~ /^.+($Declare)/) {
1362 ERROR("TEST: is not type ($1 is)\n". $herecurr);
1363 }
1294 next; 1364 next;
1295 } 1365 }
1296 1366
@@ -1365,11 +1435,11 @@ sub process {
1365 ERROR("\"(foo $1 )\" should be \"(foo $1)\"\n" . 1435 ERROR("\"(foo $1 )\" should be \"(foo $1)\"\n" .
1366 $herecurr); 1436 $herecurr);
1367 1437
1368 } elsif ($line =~ m{$NonptrType(\*+)(?:\s+(?:$Attribute|$Sparse))?\s+[A-Za-z\d_]+}) { 1438 } elsif ($line =~ m{\b$NonptrType(\*+)(?:\s+(?:$Attribute|$Sparse))?\s+[A-Za-z\d_]+}) {
1369 ERROR("\"foo$1 bar\" should be \"foo $1bar\"\n" . 1439 ERROR("\"foo$1 bar\" should be \"foo $1bar\"\n" .
1370 $herecurr); 1440 $herecurr);
1371 1441
1372 } elsif ($line =~ m{$NonptrType\s+(\*+)(?!\s+(?:$Attribute|$Sparse))\s+[A-Za-z\d_]+}) { 1442 } elsif ($line =~ m{\b$NonptrType\s+(\*+)(?!\s+(?:$Attribute|$Sparse))\s+[A-Za-z\d_]+}) {
1373 ERROR("\"foo $1 bar\" should be \"foo $1bar\"\n" . 1443 ERROR("\"foo $1 bar\" should be \"foo $1bar\"\n" .
1374 $herecurr); 1444 $herecurr);
1375 } 1445 }
@@ -1421,6 +1491,17 @@ sub process {
1421 ERROR("open brace '{' following $1 go on the same line\n" . $hereprev); 1491 ERROR("open brace '{' following $1 go on the same line\n" . $hereprev);
1422 } 1492 }
1423 1493
1494# check for spacing round square brackets; allowed:
1495# 1. with a type on the left -- int [] a;
1496# 2. at the beginning of a line for slice initialisers -- [0..10] = 5,
1497 while ($line =~ /(.*?\s)\[/g) {
1498 my ($where, $prefix) = ($-[1], $1);
1499 if ($prefix !~ /$Type\s+$/ &&
1500 ($where != 0 || $prefix !~ /^.\s+$/)) {
1501 ERROR("space prohibited before open square bracket '['\n" . $herecurr);
1502 }
1503 }
1504
1424# check for spaces between functions and their parentheses. 1505# check for spaces between functions and their parentheses.
1425 while ($line =~ /($Ident)\s+\(/g) { 1506 while ($line =~ /($Ident)\s+\(/g) {
1426 my $name = $1; 1507 my $name = $1;
@@ -1457,7 +1538,8 @@ sub process {
1457 <<=|>>=|<=|>=|==|!=| 1538 <<=|>>=|<=|>=|==|!=|
1458 \+=|-=|\*=|\/=|%=|\^=|\|=|&=| 1539 \+=|-=|\*=|\/=|%=|\^=|\|=|&=|
1459 =>|->|<<|>>|<|>|=|!|~| 1540 =>|->|<<|>>|<|>|=|!|~|
1460 &&|\|\||,|\^|\+\+|--|&|\||\+|-|\*|\/|% 1541 &&|\|\||,|\^|\+\+|--|&|\||\+|-|\*|\/|%|
1542 \?|:
1461 }x; 1543 }x;
1462 my @elements = split(/($ops|;)/, $opline); 1544 my @elements = split(/($ops|;)/, $opline);
1463 my $off = 0; 1545 my $off = 0;
@@ -1504,22 +1586,11 @@ sub process {
1504 my $ptr = substr($blank, 0, $off) . "^"; 1586 my $ptr = substr($blank, 0, $off) . "^";
1505 my $hereptr = "$hereline$ptr\n"; 1587 my $hereptr = "$hereline$ptr\n";
1506 1588
1507 # Classify operators into binary, unary, or 1589 # Pull out the value of this operator.
1508 # definitions (* only) where they have more
1509 # than one mode.
1510 my $op_type = substr($curr_values, $off + 1, 1); 1590 my $op_type = substr($curr_values, $off + 1, 1);
1511 my $op_left = substr($curr_values, $off, 1); 1591
1512 my $is_unary; 1592 # Get the full operator variant.
1513 if ($op_type eq 'T') { 1593 my $opv = $op . substr($curr_vars, $off, 1);
1514 $is_unary = 2;
1515 } elsif ($op_left eq 'V') {
1516 $is_unary = 0;
1517 } else {
1518 $is_unary = 1;
1519 }
1520 #if ($op eq '-' || $op eq '&' || $op eq '*') {
1521 # print "UNARY: <$op_left$op_type $is_unary $a:$op:$c> <$ca:$op:$cc> <$unary_ctx>\n";
1522 #}
1523 1594
1524 # Ignore operators passed as parameters. 1595 # Ignore operators passed as parameters.
1525 if ($op_type ne 'V' && 1596 if ($op_type ne 'V' &&
@@ -1538,8 +1609,10 @@ sub process {
1538 # // is a comment 1609 # // is a comment
1539 } elsif ($op eq '//') { 1610 } elsif ($op eq '//') {
1540 1611
1541 # -> should have no spaces 1612 # No spaces for:
1542 } elsif ($op eq '->') { 1613 # ->
1614 # : when part of a bitfield
1615 } elsif ($op eq '->' || $opv eq ':B') {
1543 if ($ctx =~ /Wx.|.xW/) { 1616 if ($ctx =~ /Wx.|.xW/) {
1544 ERROR("spaces prohibited around that '$op' $at\n" . $hereptr); 1617 ERROR("spaces prohibited around that '$op' $at\n" . $hereptr);
1545 } 1618 }
@@ -1551,18 +1624,19 @@ sub process {
1551 } 1624 }
1552 1625
1553 # '*' as part of a type definition -- reported already. 1626 # '*' as part of a type definition -- reported already.
1554 } elsif ($op eq '*' && $is_unary == 2) { 1627 } elsif ($opv eq '*_') {
1555 #warn "'*' is part of type\n"; 1628 #warn "'*' is part of type\n";
1556 1629
1557 # unary operators should have a space before and 1630 # unary operators should have a space before and
1558 # none after. May be left adjacent to another 1631 # none after. May be left adjacent to another
1559 # unary operator, or a cast 1632 # unary operator, or a cast
1560 } elsif ($op eq '!' || $op eq '~' || 1633 } elsif ($op eq '!' || $op eq '~' ||
1561 ($is_unary && ($op eq '*' || $op eq '-' || $op eq '&'))) { 1634 $opv eq '*U' || $opv eq '-U' ||
1635 $opv eq '&U') {
1562 if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) { 1636 if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) {
1563 ERROR("space required before that '$op' $at\n" . $hereptr); 1637 ERROR("space required before that '$op' $at\n" . $hereptr);
1564 } 1638 }
1565 if ($op eq '*' && $cc =~/\s*const\b/) { 1639 if ($op eq '*' && $cc =~/\s*const\b/) {
1566 # A unary '*' may be const 1640 # A unary '*' may be const
1567 1641
1568 } elsif ($ctx =~ /.xW/) { 1642 } elsif ($ctx =~ /.xW/) {
@@ -1595,11 +1669,33 @@ sub process {
1595 $hereptr); 1669 $hereptr);
1596 } 1670 }
1597 1671
1672 # A colon needs no spaces before when it is
1673 # terminating a case value or a label.
1674 } elsif ($opv eq ':C' || $opv eq ':L') {
1675 if ($ctx =~ /Wx./) {
1676 ERROR("space prohibited before that '$op' $at\n" . $hereptr);
1677 }
1678
1598 # All the others need spaces both sides. 1679 # All the others need spaces both sides.
1599 } elsif ($ctx !~ /[EWC]x[CWE]/) { 1680 } elsif ($ctx !~ /[EWC]x[CWE]/) {
1681 my $ok = 0;
1682
1600 # Ignore email addresses <foo@bar> 1683 # Ignore email addresses <foo@bar>
1601 if (!($op eq '<' && $cb =~ /$;\S+\@\S+>/) && 1684 if (($op eq '<' &&
1602 !($op eq '>' && $cb =~ /<\S+\@\S+$;/)) { 1685 $cc =~ /^\S+\@\S+>/) ||
1686 ($op eq '>' &&
1687 $ca =~ /<\S+\@\S+$/))
1688 {
1689 $ok = 1;
1690 }
1691
1692 # Ignore ?:
1693 if (($opv eq ':O' && $ca =~ /\?$/) ||
1694 ($op eq '?' && $cc =~ /^:/)) {
1695 $ok = 1;
1696 }
1697
1698 if ($ok == 0) {
1603 ERROR("spaces required around that '$op' $at\n" . $hereptr); 1699 ERROR("spaces required around that '$op' $at\n" . $hereptr);
1604 } 1700 }
1605 } 1701 }
@@ -1670,6 +1766,7 @@ sub process {
1670 my $value = $2; 1766 my $value = $2;
1671 1767
1672 # Flatten any parentheses and braces 1768 # Flatten any parentheses and braces
1769 $value =~ s/\)\(/\) \(/g;
1673 while ($value =~ s/\([^\(\)]*\)/1/) { 1770 while ($value =~ s/\([^\(\)]*\)/1/) {
1674 } 1771 }
1675 1772
@@ -1686,8 +1783,9 @@ sub process {
1686 ERROR("space required before the open parenthesis '('\n" . $herecurr); 1783 ERROR("space required before the open parenthesis '('\n" . $herecurr);
1687 } 1784 }
1688 1785
1689# Check for illegal assignment in if conditional. 1786# Check for illegal assignment in if conditional -- and check for trailing
1690 if ($line =~ /\bif\s*\(/) { 1787# statements after the conditional.
1788 if ($line =~ /\b(?:if|while|for)\s*\(/ && $line !~ /^.\s*#/) {
1691 my ($s, $c) = ($stat, $cond); 1789 my ($s, $c) = ($stat, $cond);
1692 1790
1693 if ($c =~ /\bif\s*\(.*[^<>!=]=[^=].*/) { 1791 if ($c =~ /\bif\s*\(.*[^<>!=]=[^=].*/) {
@@ -1699,13 +1797,63 @@ sub process {
1699 substr($s, 0, length($c), ''); 1797 substr($s, 0, length($c), '');
1700 $s =~ s/\n.*//g; 1798 $s =~ s/\n.*//g;
1701 $s =~ s/$;//g; # Remove any comments 1799 $s =~ s/$;//g; # Remove any comments
1702 if (length($c) && $s !~ /^\s*({|;|)\s*\\*\s*$/ && 1800 if (length($c) && $s !~ /^\s*{?\s*\\*\s*$/ &&
1703 $c !~ /^.\s*\#\s*if/) 1801 $c !~ /}\s*while\s*/)
1704 { 1802 {
1705 ERROR("trailing statements should be on next line\n" . $herecurr); 1803 ERROR("trailing statements should be on next line\n" . $herecurr);
1706 } 1804 }
1707 } 1805 }
1708 1806
1807# Check relative indent for conditionals and blocks.
1808 if ($line =~ /\b(?:(?:if|while|for)\s*\(|do\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
1809 my ($s, $c) = ($stat, $cond);
1810
1811 substr($s, 0, length($c), '');
1812
1813 # Make sure we remove the line prefixes as we have
1814 # none on the first line, and are going to readd them
1815 # where necessary.
1816 $s =~ s/\n./\n/gs;
1817
1818 # We want to check the first line inside the block
1819 # starting at the end of the conditional, so remove:
1820 # 1) any blank line termination
1821 # 2) any opening brace { on end of the line
1822 # 3) any do (...) {
1823 my $continuation = 0;
1824 my $check = 0;
1825 $s =~ s/^.*\bdo\b//;
1826 $s =~ s/^\s*{//;
1827 if ($s =~ s/^\s*\\//) {
1828 $continuation = 1;
1829 }
1830 if ($s =~ s/^\s*\n//) {
1831 $check = 1;
1832 }
1833
1834 # Also ignore a loop construct at the end of a
1835 # preprocessor statement.
1836 if (($prevline =~ /^.\s*#\s*define\s/ ||
1837 $prevline =~ /\\\s*$/) && $continuation == 0) {
1838 $check = 0;
1839 }
1840
1841 # Ignore the current line if its is a preprocessor
1842 # line.
1843 if ($s =~ /^\s*#\s*/) {
1844 $check = 0;
1845 }
1846
1847 my (undef, $sindent) = line_stats("+" . $s);
1848
1849 ##print "line<$line> prevline<$prevline> indent<$indent> sindent<$sindent> check<$check> continuation<$continuation> s<$s>\n";
1850
1851 if ($check && (($sindent % 8) != 0 ||
1852 ($sindent <= $indent && $s ne ''))) {
1853 WARN("suspect code indent for conditional statements\n" . $herecurr);
1854 }
1855 }
1856
1709# Check for bitwise tests written as boolean 1857# Check for bitwise tests written as boolean
1710 if ($line =~ / 1858 if ($line =~ /
1711 (?: 1859 (?:
@@ -1777,7 +1925,8 @@ sub process {
1777# multi-statement macros should be enclosed in a do while loop, grab the 1925# multi-statement macros should be enclosed in a do while loop, grab the
1778# first statement and ensure its the whole macro if its not enclosed 1926# first statement and ensure its the whole macro if its not enclosed
1779# in a known good container 1927# in a known good container
1780 if ($line =~ /^.\s*\#\s*define\s*$Ident(\()?/) { 1928 if ($realfile !~ m@/vmlinux.lds.h$@ &&
1929 $line =~ /^.\s*\#\s*define\s*$Ident(\()?/) {
1781 my $ln = $linenr; 1930 my $ln = $linenr;
1782 my $cnt = $realcnt; 1931 my $cnt = $realcnt;
1783 my ($off, $dstat, $dcond, $rest); 1932 my ($off, $dstat, $dcond, $rest);
@@ -1791,30 +1940,26 @@ sub process {
1791 $lines[$ln - 1] =~ /^(?:-|..*\\$)/) 1940 $lines[$ln - 1] =~ /^(?:-|..*\\$)/)
1792 { 1941 {
1793 $ctx .= $rawlines[$ln - 1] . "\n"; 1942 $ctx .= $rawlines[$ln - 1] . "\n";
1943 $cnt-- if ($lines[$ln - 1] !~ /^-/);
1794 $ln++; 1944 $ln++;
1795 $cnt--;
1796 } 1945 }
1797 $ctx .= $rawlines[$ln - 1]; 1946 $ctx .= $rawlines[$ln - 1];
1798 1947
1799 ($dstat, $dcond, $ln, $cnt, $off) = 1948 ($dstat, $dcond, $ln, $cnt, $off) =
1800 ctx_statement_block($linenr, $ln - $linenr + 1, 0); 1949 ctx_statement_block($linenr, $ln - $linenr + 1, 0);
1801 #print "dstat<$dstat> dcond<$dcond> cnt<$cnt> off<$off>\n"; 1950 #print "dstat<$dstat> dcond<$dcond> cnt<$cnt> off<$off>\n";
1802 #print "LINE<$lines[$ln]> len<" . length($lines[$ln]) . "\n"; 1951 #print "LINE<$lines[$ln-1]> len<" . length($lines[$ln-1]) . "\n";
1803 1952
1804 # Extract the remainder of the define (if any) and 1953 # Extract the remainder of the define (if any) and
1805 # rip off surrounding spaces, and trailing \'s. 1954 # rip off surrounding spaces, and trailing \'s.
1806 $rest = ''; 1955 $rest = '';
1807 if (defined $lines[$ln - 1] && 1956 while ($off != 0 || ($cnt > 0 && $rest =~ /(?:^|\\)\s*$/)) {
1808 $off > length($lines[$ln - 1])) 1957 #print "ADDING $off <" . substr($lines[$ln - 1], $off) . ">\n";
1809 { 1958 if ($off != 0 || $lines[$ln - 1] !~ /^-/) {
1810 $ln++; 1959 $rest .= substr($lines[$ln - 1], $off) . "\n";
1811 $cnt--; 1960 $cnt--;
1812 $off = 0; 1961 }
1813 }
1814 while ($cnt > 0) {
1815 $rest .= substr($lines[$ln - 1], $off) . "\n";
1816 $ln++; 1962 $ln++;
1817 $cnt--;
1818 $off = 0; 1963 $off = 0;
1819 } 1964 }
1820 $rest =~ s/\\\n.//g; 1965 $rest =~ s/\\\n.//g;
@@ -1827,6 +1972,7 @@ sub process {
1827 } else { 1972 } else {
1828 $dstat =~ s/^.\s*\#\s*define\s+$Ident\s*//; 1973 $dstat =~ s/^.\s*\#\s*define\s+$Ident\s*//;
1829 } 1974 }
1975 $dstat =~ s/$;//g;
1830 $dstat =~ s/\\\n.//g; 1976 $dstat =~ s/\\\n.//g;
1831 $dstat =~ s/^\s*//s; 1977 $dstat =~ s/^\s*//s;
1832 $dstat =~ s/\s*$//s; 1978 $dstat =~ s/\s*$//s;
@@ -1845,6 +1991,7 @@ sub process {
1845 DEFINE_PER_CPU| 1991 DEFINE_PER_CPU|
1846 __typeof__\( 1992 __typeof__\(
1847 }x; 1993 }x;
1994 #print "REST<$rest>\n";
1848 if ($rest ne '') { 1995 if ($rest ne '') {
1849 if ($rest !~ /while\s*\(/ && 1996 if ($rest !~ /while\s*\(/ &&
1850 $dstat !~ /$exceptions/) 1997 $dstat !~ /$exceptions/)
@@ -2001,7 +2148,14 @@ sub process {
2001 if ($prevline =~ /\bif\s*\(([^\)]*)\)/) { 2148 if ($prevline =~ /\bif\s*\(([^\)]*)\)/) {
2002 my $expr = $1; 2149 my $expr = $1;
2003 if ($line =~ /\bkfree\(\Q$expr\E\);/) { 2150 if ($line =~ /\bkfree\(\Q$expr\E\);/) {
2004 WARN("kfree(NULL) is safe this check is probabally not required\n" . $hereprev); 2151 WARN("kfree(NULL) is safe this check is probably not required\n" . $hereprev);
2152 }
2153 }
2154# check for needless usb_free_urb() checks
2155 if ($prevline =~ /\bif\s*\(([^\)]*)\)/) {
2156 my $expr = $1;
2157 if ($line =~ /\busb_free_urb\(\Q$expr\E\);/) {
2158 WARN("usb_free_urb(NULL) is safe this check is probably not required\n" . $hereprev);
2005 } 2159 }
2006 } 2160 }
2007 2161
@@ -2106,6 +2260,10 @@ sub process {
2106 if ($line =~ /\bsimple_(strto.*?)\s*\(/) { 2260 if ($line =~ /\bsimple_(strto.*?)\s*\(/) {
2107 WARN("consider using strict_$1 in preference to simple_$1\n" . $herecurr); 2261 WARN("consider using strict_$1 in preference to simple_$1\n" . $herecurr);
2108 } 2262 }
2263# check for __initcall(), use device_initcall() explicitly please
2264 if ($line =~ /^.\s*__initcall\s*\(/) {
2265 WARN("please use device_initcall() instead of __initcall()\n" . $herecurr);
2266 }
2109 2267
2110# use of NR_CPUS is usually wrong 2268# use of NR_CPUS is usually wrong
2111# ignore definitions of NR_CPUS and usage to define arrays as likely right 2269# ignore definitions of NR_CPUS and usage to define arrays as likely right
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index 340ad6920511..3eca62566d6b 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -26,12 +26,17 @@
26# $& (whole re) matches the complete objdump line with the stack growth 26# $& (whole re) matches the complete objdump line with the stack growth
27# $1 (first bracket) matches the size of the stack growth 27# $1 (first bracket) matches the size of the stack growth
28# 28#
29# $dre is similar, but for dynamic stack redutions:
30# $& (whole re) matches the complete objdump line with the stack growth
31# $1 (first bracket) matches the dynamic amount of the stack growth
32#
29# use anything else and feel the pain ;) 33# use anything else and feel the pain ;)
30my (@stack, $re, $x, $xs); 34my (@stack, $re, $dre, $x, $xs);
31{ 35{
32 my $arch = shift; 36 my $arch = shift;
33 if ($arch eq "") { 37 if ($arch eq "") {
34 $arch = `uname -m`; 38 $arch = `uname -m`;
39 chomp($arch);
35 } 40 }
36 41
37 $x = "[0-9a-f]"; # hex character 42 $x = "[0-9a-f]"; # hex character
@@ -46,9 +51,11 @@ my (@stack, $re, $x, $xs);
46 } elsif ($arch =~ /^i[3456]86$/) { 51 } elsif ($arch =~ /^i[3456]86$/) {
47 #c0105234: 81 ec ac 05 00 00 sub $0x5ac,%esp 52 #c0105234: 81 ec ac 05 00 00 sub $0x5ac,%esp
48 $re = qr/^.*[as][du][db] \$(0x$x{1,8}),\%esp$/o; 53 $re = qr/^.*[as][du][db] \$(0x$x{1,8}),\%esp$/o;
54 $dre = qr/^.*[as][du][db] (%.*),\%esp$/o;
49 } elsif ($arch eq 'x86_64') { 55 } elsif ($arch eq 'x86_64') {
50 # 2f60: 48 81 ec e8 05 00 00 sub $0x5e8,%rsp 56 # 2f60: 48 81 ec e8 05 00 00 sub $0x5e8,%rsp
51 $re = qr/^.*[as][du][db] \$(0x$x{1,8}),\%rsp$/o; 57 $re = qr/^.*[as][du][db] \$(0x$x{1,8}),\%rsp$/o;
58 $dre = qr/^.*[as][du][db] (\%.*),\%rsp$/o;
52 } elsif ($arch eq 'ia64') { 59 } elsif ($arch eq 'ia64') {
53 #e0000000044011fc: 01 0f fc 8c adds r12=-384,r12 60 #e0000000044011fc: 01 0f fc 8c adds r12=-384,r12
54 $re = qr/.*adds.*r12=-(([0-9]{2}|[3-9])[0-9]{2}),r12/o; 61 $re = qr/.*adds.*r12=-(([0-9]{2}|[3-9])[0-9]{2}),r12/o;
@@ -85,7 +92,7 @@ my (@stack, $re, $x, $xs);
85 # 0: 00 e8 38 01 LINK 0x4e0; 92 # 0: 00 e8 38 01 LINK 0x4e0;
86 $re = qr/.*[[:space:]]LINK[[:space:]]*(0x$x{1,8})/o; 93 $re = qr/.*[[:space:]]LINK[[:space:]]*(0x$x{1,8})/o;
87 } else { 94 } else {
88 print("wrong or unknown architecture\n"); 95 print("wrong or unknown architecture \"$arch\"\n");
89 exit 96 exit
90 } 97 }
91} 98}
@@ -141,6 +148,22 @@ while (my $line = <STDIN>) {
141 next if ($size < 100); 148 next if ($size < 100);
142 push @stack, "$intro$size\n"; 149 push @stack, "$intro$size\n";
143 } 150 }
151 elsif (defined $dre && $line =~ m/$dre/) {
152 my $size = "Dynamic ($1)";
153
154 next if $line !~ m/^($xs*)/;
155 my $addr = $1;
156 $addr =~ s/ /0/g;
157 $addr = "0x$addr";
158
159 my $intro = "$addr $func [$file]:";
160 my $padlen = 56 - length($intro);
161 while ($padlen > 0) {
162 $intro .= ' ';
163 $padlen -= 8;
164 }
165 push @stack, "$intro$size\n";
166 }
144} 167}
145 168
146print sort bysize @stack; 169print sort bysize @stack;
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
index dca5e0dd09bf..4f8a3007e457 100644
--- a/scripts/genksyms/genksyms.c
+++ b/scripts/genksyms/genksyms.c
@@ -520,8 +520,7 @@ int main(int argc, char **argv)
520 genksyms_usage(); 520 genksyms_usage();
521 return 1; 521 return 1;
522 } 522 }
523 if ((strcmp(arch, "v850") == 0) || (strcmp(arch, "h8300") == 0) 523 if ((strcmp(arch, "h8300") == 0) || (strcmp(arch, "blackfin") == 0))
524 || (strcmp(arch, "blackfin") == 0))
525 mod_prefix = "_"; 524 mod_prefix = "_";
526 { 525 {
527 extern int yydebug; 526 extern int yydebug;
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 1fcaf3284a6a..4fa1f3ad2513 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -623,7 +623,7 @@ static int do_i2c_entry(const char *filename, struct i2c_device_id *id,
623 return 1; 623 return 1;
624} 624}
625 625
626/* Ignore any prefix, eg. v850 prepends _ */ 626/* Ignore any prefix, eg. some architectures prepend _ */
627static inline int sym_is(const char *symbol, const char *name) 627static inline int sym_is(const char *symbol, const char *name)
628{ 628{
629 const char *match; 629 const char *match;
diff --git a/scripts/mod/mk_elfconfig.c b/scripts/mod/mk_elfconfig.c
index db3881f14c2d..6a96d47bd1e6 100644
--- a/scripts/mod/mk_elfconfig.c
+++ b/scripts/mod/mk_elfconfig.c
@@ -55,7 +55,7 @@ main(int argc, char **argv)
55 else 55 else
56 exit(1); 56 exit(1);
57 57
58 if ((strcmp(argv[1], "v850") == 0) || (strcmp(argv[1], "h8300") == 0) 58 if ((strcmp(argv[1], "h8300") == 0)
59 || (strcmp(argv[1], "blackfin") == 0)) 59 || (strcmp(argv[1], "blackfin") == 0))
60 printf("#define MODULE_SYMBOL_PREFIX \"_\"\n"); 60 printf("#define MODULE_SYMBOL_PREFIX \"_\"\n");
61 else 61 else
diff --git a/security/Kconfig b/security/Kconfig
index 62ed4717d334..559293922a47 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -74,8 +74,7 @@ config SECURITY_NETWORK_XFRM
74 If you are unsure how to answer this question, answer N. 74 If you are unsure how to answer this question, answer N.
75 75
76config SECURITY_FILE_CAPABILITIES 76config SECURITY_FILE_CAPABILITIES
77 bool "File POSIX Capabilities (EXPERIMENTAL)" 77 bool "File POSIX Capabilities"
78 depends on EXPERIMENTAL
79 default n 78 default n
80 help 79 help
81 This enables filesystem capabilities, allowing you to give 80 This enables filesystem capabilities, allowing you to give
diff --git a/security/commoncap.c b/security/commoncap.c
index 0b6537a3672d..4afbece37a08 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -162,8 +162,7 @@ void cap_capset_set (struct task_struct *target, kernel_cap_t *effective,
162 162
163static inline void bprm_clear_caps(struct linux_binprm *bprm) 163static inline void bprm_clear_caps(struct linux_binprm *bprm)
164{ 164{
165 cap_clear(bprm->cap_inheritable); 165 cap_clear(bprm->cap_post_exec_permitted);
166 cap_clear(bprm->cap_permitted);
167 bprm->cap_effective = false; 166 bprm->cap_effective = false;
168} 167}
169 168
@@ -198,6 +197,7 @@ static inline int cap_from_disk(struct vfs_cap_data *caps,
198{ 197{
199 __u32 magic_etc; 198 __u32 magic_etc;
200 unsigned tocopy, i; 199 unsigned tocopy, i;
200 int ret;
201 201
202 if (size < sizeof(magic_etc)) 202 if (size < sizeof(magic_etc))
203 return -EINVAL; 203 return -EINVAL;
@@ -225,19 +225,40 @@ static inline int cap_from_disk(struct vfs_cap_data *caps,
225 bprm->cap_effective = false; 225 bprm->cap_effective = false;
226 } 226 }
227 227
228 for (i = 0; i < tocopy; ++i) { 228 ret = 0;
229 bprm->cap_permitted.cap[i] = 229
230 le32_to_cpu(caps->data[i].permitted); 230 CAP_FOR_EACH_U32(i) {
231 bprm->cap_inheritable.cap[i] = 231 __u32 value_cpu;
232 le32_to_cpu(caps->data[i].inheritable); 232
233 } 233 if (i >= tocopy) {
234 while (i < VFS_CAP_U32) { 234 /*
235 bprm->cap_permitted.cap[i] = 0; 235 * Legacy capability sets have no upper bits
236 bprm->cap_inheritable.cap[i] = 0; 236 */
237 i++; 237 bprm->cap_post_exec_permitted.cap[i] = 0;
238 continue;
239 }
240 /*
241 * pP' = (X & fP) | (pI & fI)
242 */
243 value_cpu = le32_to_cpu(caps->data[i].permitted);
244 bprm->cap_post_exec_permitted.cap[i] =
245 (current->cap_bset.cap[i] & value_cpu) |
246 (current->cap_inheritable.cap[i] &
247 le32_to_cpu(caps->data[i].inheritable));
248 if (value_cpu & ~bprm->cap_post_exec_permitted.cap[i]) {
249 /*
250 * insufficient to execute correctly
251 */
252 ret = -EPERM;
253 }
238 } 254 }
239 255
240 return 0; 256 /*
257 * For legacy apps, with no internal support for recognizing they
258 * do not have enough capabilities, we return an error if they are
259 * missing some "forced" (aka file-permitted) capabilities.
260 */
261 return bprm->cap_effective ? ret : 0;
241} 262}
242 263
243/* Locate any VFS capabilities: */ 264/* Locate any VFS capabilities: */
@@ -269,9 +290,9 @@ static int get_file_caps(struct linux_binprm *bprm)
269 goto out; 290 goto out;
270 291
271 rc = cap_from_disk(&vcaps, bprm, rc); 292 rc = cap_from_disk(&vcaps, bprm, rc);
272 if (rc) 293 if (rc == -EINVAL)
273 printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n", 294 printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
274 __func__, rc, bprm->filename); 295 __func__, rc, bprm->filename);
275 296
276out: 297out:
277 dput(dentry); 298 dput(dentry);
@@ -304,25 +325,24 @@ int cap_bprm_set_security (struct linux_binprm *bprm)
304 int ret; 325 int ret;
305 326
306 ret = get_file_caps(bprm); 327 ret = get_file_caps(bprm);
307 if (ret)
308 printk(KERN_NOTICE "%s: get_file_caps returned %d for %s\n",
309 __func__, ret, bprm->filename);
310
311 /* To support inheritance of root-permissions and suid-root
312 * executables under compatibility mode, we raise all three
313 * capability sets for the file.
314 *
315 * If only the real uid is 0, we only raise the inheritable
316 * and permitted sets of the executable file.
317 */
318 328
319 if (!issecure (SECURE_NOROOT)) { 329 if (!issecure(SECURE_NOROOT)) {
330 /*
331 * To support inheritance of root-permissions and suid-root
332 * executables under compatibility mode, we override the
333 * capability sets for the file.
334 *
335 * If only the real uid is 0, we do not set the effective
336 * bit.
337 */
320 if (bprm->e_uid == 0 || current->uid == 0) { 338 if (bprm->e_uid == 0 || current->uid == 0) {
321 cap_set_full (bprm->cap_inheritable); 339 /* pP' = (cap_bset & ~0) | (pI & ~0) */
322 cap_set_full (bprm->cap_permitted); 340 bprm->cap_post_exec_permitted = cap_combine(
341 current->cap_bset, current->cap_inheritable
342 );
343 bprm->cap_effective = (bprm->e_uid == 0);
344 ret = 0;
323 } 345 }
324 if (bprm->e_uid == 0)
325 bprm->cap_effective = true;
326 } 346 }
327 347
328 return ret; 348 return ret;
@@ -330,17 +350,9 @@ int cap_bprm_set_security (struct linux_binprm *bprm)
330 350
331void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe) 351void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe)
332{ 352{
333 /* Derived from fs/exec.c:compute_creds. */
334 kernel_cap_t new_permitted, working;
335
336 new_permitted = cap_intersect(bprm->cap_permitted,
337 current->cap_bset);
338 working = cap_intersect(bprm->cap_inheritable,
339 current->cap_inheritable);
340 new_permitted = cap_combine(new_permitted, working);
341
342 if (bprm->e_uid != current->uid || bprm->e_gid != current->gid || 353 if (bprm->e_uid != current->uid || bprm->e_gid != current->gid ||
343 !cap_issubset (new_permitted, current->cap_permitted)) { 354 !cap_issubset(bprm->cap_post_exec_permitted,
355 current->cap_permitted)) {
344 set_dumpable(current->mm, suid_dumpable); 356 set_dumpable(current->mm, suid_dumpable);
345 current->pdeath_signal = 0; 357 current->pdeath_signal = 0;
346 358
@@ -350,9 +362,9 @@ void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe)
350 bprm->e_gid = current->gid; 362 bprm->e_gid = current->gid;
351 } 363 }
352 if (cap_limit_ptraced_target()) { 364 if (cap_limit_ptraced_target()) {
353 new_permitted = 365 bprm->cap_post_exec_permitted = cap_intersect(
354 cap_intersect(new_permitted, 366 bprm->cap_post_exec_permitted,
355 current->cap_permitted); 367 current->cap_permitted);
356 } 368 }
357 } 369 }
358 } 370 }
@@ -364,9 +376,9 @@ void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe)
364 * in the init_task struct. Thus we skip the usual 376 * in the init_task struct. Thus we skip the usual
365 * capability rules */ 377 * capability rules */
366 if (!is_global_init(current)) { 378 if (!is_global_init(current)) {
367 current->cap_permitted = new_permitted; 379 current->cap_permitted = bprm->cap_post_exec_permitted;
368 if (bprm->cap_effective) 380 if (bprm->cap_effective)
369 current->cap_effective = new_permitted; 381 current->cap_effective = bprm->cap_post_exec_permitted;
370 else 382 else
371 cap_clear(current->cap_effective); 383 cap_clear(current->cap_effective);
372 } 384 }
@@ -381,9 +393,7 @@ int cap_bprm_secureexec (struct linux_binprm *bprm)
381 if (current->uid != 0) { 393 if (current->uid != 0) {
382 if (bprm->cap_effective) 394 if (bprm->cap_effective)
383 return 1; 395 return 1;
384 if (!cap_isclear(bprm->cap_permitted)) 396 if (!cap_isclear(bprm->cap_post_exec_permitted))
385 return 1;
386 if (!cap_isclear(bprm->cap_inheritable))
387 return 1; 397 return 1;
388 } 398 }
389 399
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index ddd92cec78ed..7bd296cca041 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -41,6 +41,7 @@ struct dev_whitelist_item {
41 short type; 41 short type;
42 short access; 42 short access;
43 struct list_head list; 43 struct list_head list;
44 struct rcu_head rcu;
44}; 45};
45 46
46struct dev_cgroup { 47struct dev_cgroup {
@@ -59,6 +60,11 @@ static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
59 return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id)); 60 return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
60} 61}
61 62
63static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
64{
65 return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
66}
67
62struct cgroup_subsys devices_subsys; 68struct cgroup_subsys devices_subsys;
63 69
64static int devcgroup_can_attach(struct cgroup_subsys *ss, 70static int devcgroup_can_attach(struct cgroup_subsys *ss,
@@ -128,11 +134,19 @@ static int dev_whitelist_add(struct dev_cgroup *dev_cgroup,
128 } 134 }
129 135
130 if (whcopy != NULL) 136 if (whcopy != NULL)
131 list_add_tail(&whcopy->list, &dev_cgroup->whitelist); 137 list_add_tail_rcu(&whcopy->list, &dev_cgroup->whitelist);
132 spin_unlock(&dev_cgroup->lock); 138 spin_unlock(&dev_cgroup->lock);
133 return 0; 139 return 0;
134} 140}
135 141
142static void whitelist_item_free(struct rcu_head *rcu)
143{
144 struct dev_whitelist_item *item;
145
146 item = container_of(rcu, struct dev_whitelist_item, rcu);
147 kfree(item);
148}
149
136/* 150/*
137 * called under cgroup_lock() 151 * called under cgroup_lock()
138 * since the list is visible to other tasks, we need the spinlock also 152 * since the list is visible to other tasks, we need the spinlock also
@@ -156,8 +170,8 @@ static void dev_whitelist_rm(struct dev_cgroup *dev_cgroup,
156remove: 170remove:
157 walk->access &= ~wh->access; 171 walk->access &= ~wh->access;
158 if (!walk->access) { 172 if (!walk->access) {
159 list_del(&walk->list); 173 list_del_rcu(&walk->list);
160 kfree(walk); 174 call_rcu(&walk->rcu, whitelist_item_free);
161 } 175 }
162 } 176 }
163 spin_unlock(&dev_cgroup->lock); 177 spin_unlock(&dev_cgroup->lock);
@@ -188,7 +202,7 @@ static struct cgroup_subsys_state *devcgroup_create(struct cgroup_subsys *ss,
188 } 202 }
189 wh->minor = wh->major = ~0; 203 wh->minor = wh->major = ~0;
190 wh->type = DEV_ALL; 204 wh->type = DEV_ALL;
191 wh->access = ACC_MKNOD | ACC_READ | ACC_WRITE; 205 wh->access = ACC_MASK;
192 list_add(&wh->list, &dev_cgroup->whitelist); 206 list_add(&wh->list, &dev_cgroup->whitelist);
193 } else { 207 } else {
194 parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup); 208 parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
@@ -250,11 +264,10 @@ static char type_to_char(short type)
250 264
251static void set_majmin(char *str, unsigned m) 265static void set_majmin(char *str, unsigned m)
252{ 266{
253 memset(str, 0, MAJMINLEN);
254 if (m == ~0) 267 if (m == ~0)
255 sprintf(str, "*"); 268 strcpy(str, "*");
256 else 269 else
257 snprintf(str, MAJMINLEN, "%u", m); 270 sprintf(str, "%u", m);
258} 271}
259 272
260static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft, 273static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
@@ -264,15 +277,15 @@ static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
264 struct dev_whitelist_item *wh; 277 struct dev_whitelist_item *wh;
265 char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN]; 278 char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
266 279
267 spin_lock(&devcgroup->lock); 280 rcu_read_lock();
268 list_for_each_entry(wh, &devcgroup->whitelist, list) { 281 list_for_each_entry_rcu(wh, &devcgroup->whitelist, list) {
269 set_access(acc, wh->access); 282 set_access(acc, wh->access);
270 set_majmin(maj, wh->major); 283 set_majmin(maj, wh->major);
271 set_majmin(min, wh->minor); 284 set_majmin(min, wh->minor);
272 seq_printf(m, "%c %s:%s %s\n", type_to_char(wh->type), 285 seq_printf(m, "%c %s:%s %s\n", type_to_char(wh->type),
273 maj, min, acc); 286 maj, min, acc);
274 } 287 }
275 spin_unlock(&devcgroup->lock); 288 rcu_read_unlock();
276 289
277 return 0; 290 return 0;
278} 291}
@@ -312,10 +325,10 @@ static int may_access_whitelist(struct dev_cgroup *c,
312 * when adding a new allow rule to a device whitelist, the rule 325 * when adding a new allow rule to a device whitelist, the rule
313 * must be allowed in the parent device 326 * must be allowed in the parent device
314 */ 327 */
315static int parent_has_perm(struct cgroup *childcg, 328static int parent_has_perm(struct dev_cgroup *childcg,
316 struct dev_whitelist_item *wh) 329 struct dev_whitelist_item *wh)
317{ 330{
318 struct cgroup *pcg = childcg->parent; 331 struct cgroup *pcg = childcg->css.cgroup->parent;
319 struct dev_cgroup *parent; 332 struct dev_cgroup *parent;
320 int ret; 333 int ret;
321 334
@@ -341,39 +354,19 @@ static int parent_has_perm(struct cgroup *childcg,
341 * new access is only allowed if you're in the top-level cgroup, or your 354 * new access is only allowed if you're in the top-level cgroup, or your
342 * parent cgroup has the access you're asking for. 355 * parent cgroup has the access you're asking for.
343 */ 356 */
344static ssize_t devcgroup_access_write(struct cgroup *cgroup, struct cftype *cft, 357static int devcgroup_update_access(struct dev_cgroup *devcgroup,
345 struct file *file, const char __user *userbuf, 358 int filetype, const char *buffer)
346 size_t nbytes, loff_t *ppos)
347{ 359{
348 struct cgroup *cur_cgroup; 360 struct dev_cgroup *cur_devcgroup;
349 struct dev_cgroup *devcgroup, *cur_devcgroup; 361 const char *b;
350 int filetype = cft->private; 362 char *endp;
351 char *buffer, *b;
352 int retval = 0, count; 363 int retval = 0, count;
353 struct dev_whitelist_item wh; 364 struct dev_whitelist_item wh;
354 365
355 if (!capable(CAP_SYS_ADMIN)) 366 if (!capable(CAP_SYS_ADMIN))
356 return -EPERM; 367 return -EPERM;
357 368
358 devcgroup = cgroup_to_devcgroup(cgroup); 369 cur_devcgroup = task_devcgroup(current);
359 cur_cgroup = task_cgroup(current, devices_subsys.subsys_id);
360 cur_devcgroup = cgroup_to_devcgroup(cur_cgroup);
361
362 buffer = kmalloc(nbytes+1, GFP_KERNEL);
363 if (!buffer)
364 return -ENOMEM;
365
366 if (copy_from_user(buffer, userbuf, nbytes)) {
367 retval = -EFAULT;
368 goto out1;
369 }
370 buffer[nbytes] = 0; /* nul-terminate */
371
372 cgroup_lock();
373 if (cgroup_is_removed(cgroup)) {
374 retval = -ENODEV;
375 goto out2;
376 }
377 370
378 memset(&wh, 0, sizeof(wh)); 371 memset(&wh, 0, sizeof(wh));
379 b = buffer; 372 b = buffer;
@@ -392,32 +385,23 @@ static ssize_t devcgroup_access_write(struct cgroup *cgroup, struct cftype *cft,
392 wh.type = DEV_CHAR; 385 wh.type = DEV_CHAR;
393 break; 386 break;
394 default: 387 default:
395 retval = -EINVAL; 388 return -EINVAL;
396 goto out2;
397 } 389 }
398 b++; 390 b++;
399 if (!isspace(*b)) { 391 if (!isspace(*b))
400 retval = -EINVAL; 392 return -EINVAL;
401 goto out2;
402 }
403 b++; 393 b++;
404 if (*b == '*') { 394 if (*b == '*') {
405 wh.major = ~0; 395 wh.major = ~0;
406 b++; 396 b++;
407 } else if (isdigit(*b)) { 397 } else if (isdigit(*b)) {
408 wh.major = 0; 398 wh.major = simple_strtoul(b, &endp, 10);
409 while (isdigit(*b)) { 399 b = endp;
410 wh.major = wh.major*10+(*b-'0');
411 b++;
412 }
413 } else { 400 } else {
414 retval = -EINVAL; 401 return -EINVAL;
415 goto out2;
416 }
417 if (*b != ':') {
418 retval = -EINVAL;
419 goto out2;
420 } 402 }
403 if (*b != ':')
404 return -EINVAL;
421 b++; 405 b++;
422 406
423 /* read minor */ 407 /* read minor */
@@ -425,19 +409,13 @@ static ssize_t devcgroup_access_write(struct cgroup *cgroup, struct cftype *cft,
425 wh.minor = ~0; 409 wh.minor = ~0;
426 b++; 410 b++;
427 } else if (isdigit(*b)) { 411 } else if (isdigit(*b)) {
428 wh.minor = 0; 412 wh.minor = simple_strtoul(b, &endp, 10);
429 while (isdigit(*b)) { 413 b = endp;
430 wh.minor = wh.minor*10+(*b-'0');
431 b++;
432 }
433 } else { 414 } else {
434 retval = -EINVAL; 415 return -EINVAL;
435 goto out2;
436 }
437 if (!isspace(*b)) {
438 retval = -EINVAL;
439 goto out2;
440 } 416 }
417 if (!isspace(*b))
418 return -EINVAL;
441 for (b++, count = 0; count < 3; count++, b++) { 419 for (b++, count = 0; count < 3; count++, b++) {
442 switch (*b) { 420 switch (*b) {
443 case 'r': 421 case 'r':
@@ -454,8 +432,7 @@ static ssize_t devcgroup_access_write(struct cgroup *cgroup, struct cftype *cft,
454 count = 3; 432 count = 3;
455 break; 433 break;
456 default: 434 default:
457 retval = -EINVAL; 435 return -EINVAL;
458 goto out2;
459 } 436 }
460 } 437 }
461 438
@@ -463,38 +440,39 @@ handle:
463 retval = 0; 440 retval = 0;
464 switch (filetype) { 441 switch (filetype) {
465 case DEVCG_ALLOW: 442 case DEVCG_ALLOW:
466 if (!parent_has_perm(cgroup, &wh)) 443 if (!parent_has_perm(devcgroup, &wh))
467 retval = -EPERM; 444 return -EPERM;
468 else 445 return dev_whitelist_add(devcgroup, &wh);
469 retval = dev_whitelist_add(devcgroup, &wh);
470 break;
471 case DEVCG_DENY: 446 case DEVCG_DENY:
472 dev_whitelist_rm(devcgroup, &wh); 447 dev_whitelist_rm(devcgroup, &wh);
473 break; 448 break;
474 default: 449 default:
475 retval = -EINVAL; 450 return -EINVAL;
476 goto out2;
477 } 451 }
452 return 0;
453}
478 454
479 if (retval == 0) 455static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
480 retval = nbytes; 456 const char *buffer)
481 457{
482out2: 458 int retval;
459 if (!cgroup_lock_live_group(cgrp))
460 return -ENODEV;
461 retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
462 cft->private, buffer);
483 cgroup_unlock(); 463 cgroup_unlock();
484out1:
485 kfree(buffer);
486 return retval; 464 return retval;
487} 465}
488 466
489static struct cftype dev_cgroup_files[] = { 467static struct cftype dev_cgroup_files[] = {
490 { 468 {
491 .name = "allow", 469 .name = "allow",
492 .write = devcgroup_access_write, 470 .write_string = devcgroup_access_write,
493 .private = DEVCG_ALLOW, 471 .private = DEVCG_ALLOW,
494 }, 472 },
495 { 473 {
496 .name = "deny", 474 .name = "deny",
497 .write = devcgroup_access_write, 475 .write_string = devcgroup_access_write,
498 .private = DEVCG_DENY, 476 .private = DEVCG_DENY,
499 }, 477 },
500 { 478 {
@@ -535,8 +513,8 @@ int devcgroup_inode_permission(struct inode *inode, int mask)
535 if (!dev_cgroup) 513 if (!dev_cgroup)
536 return 0; 514 return 0;
537 515
538 spin_lock(&dev_cgroup->lock); 516 rcu_read_lock();
539 list_for_each_entry(wh, &dev_cgroup->whitelist, list) { 517 list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
540 if (wh->type & DEV_ALL) 518 if (wh->type & DEV_ALL)
541 goto acc_check; 519 goto acc_check;
542 if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode)) 520 if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode))
@@ -552,10 +530,10 @@ acc_check:
552 continue; 530 continue;
553 if ((mask & MAY_READ) && !(wh->access & ACC_READ)) 531 if ((mask & MAY_READ) && !(wh->access & ACC_READ))
554 continue; 532 continue;
555 spin_unlock(&dev_cgroup->lock); 533 rcu_read_unlock();
556 return 0; 534 return 0;
557 } 535 }
558 spin_unlock(&dev_cgroup->lock); 536 rcu_read_unlock();
559 537
560 return -EPERM; 538 return -EPERM;
561} 539}
@@ -570,7 +548,7 @@ int devcgroup_inode_mknod(int mode, dev_t dev)
570 if (!dev_cgroup) 548 if (!dev_cgroup)
571 return 0; 549 return 0;
572 550
573 spin_lock(&dev_cgroup->lock); 551 rcu_read_lock();
574 list_for_each_entry(wh, &dev_cgroup->whitelist, list) { 552 list_for_each_entry(wh, &dev_cgroup->whitelist, list) {
575 if (wh->type & DEV_ALL) 553 if (wh->type & DEV_ALL)
576 goto acc_check; 554 goto acc_check;
@@ -585,9 +563,9 @@ int devcgroup_inode_mknod(int mode, dev_t dev)
585acc_check: 563acc_check:
586 if (!(wh->access & ACC_MKNOD)) 564 if (!(wh->access & ACC_MKNOD))
587 continue; 565 continue;
588 spin_unlock(&dev_cgroup->lock); 566 rcu_read_unlock();
589 return 0; 567 return 0;
590 } 568 }
591 spin_unlock(&dev_cgroup->lock); 569 rcu_read_unlock();
592 return -EPERM; 570 return -EPERM;
593} 571}
diff --git a/sound/core/info.c b/sound/core/info.c
index cb5ead3e202d..c67773ad9298 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/time.h> 23#include <linux/time.h>
24#include <linux/mm.h>
24#include <linux/smp_lock.h> 25#include <linux/smp_lock.h>
25#include <linux/string.h> 26#include <linux/string.h>
26#include <sound/core.h> 27#include <sound/core.h>
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index 33940139844b..d4fafb6eec6c 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -35,47 +35,6 @@ config SOUND_AU1550_AC97
35 tristate "Au1550/Au1200 AC97 Sound" 35 tristate "Au1550/Au1200 AC97 Sound"
36 depends on SOC_AU1550 || SOC_AU1200 36 depends on SOC_AU1550 || SOC_AU1200
37 37
38config SOUND_TRIDENT
39 tristate "Trident 4DWave DX/NX, SiS 7018 or ALi 5451 PCI Audio Core"
40 depends on PCI
41 ---help---
42 Say Y or M if you have a PCI sound card utilizing the Trident
43 4DWave-DX/NX chipset or your mother board chipset has SiS 7018
44 or ALi 5451 built-in. The SiS 7018 PCI Audio Core is embedded
45 in SiS960 Super South Bridge and SiS540/630 Single Chipset.
46 The ALi 5451 PCI Audio Core is embedded in ALi M1535, M1535D,
47 M1535+ or M1535D+ South Bridge.
48
49 Use lspci -n to find out if your sound card or chipset uses
50 Trident 4DWave or SiS 7018. PCI ID 1023:2000 or 1023:2001 stands
51 for Trident 4Dwave. PCI ID 1039:7018 stands for SiS7018. PCI ID
52 10B9:5451 stands for ALi5451.
53
54 This driver supports S/PDIF in/out (record/playback) for ALi 5451
55 embedded in ALi M1535+ and M1535D+. Note that they aren't all
56 enabled by default; you can enable them by saying Y to "/proc file
57 system support" and "Sysctl support", and after the /proc file
58 system has been mounted, executing the command
59
60 command what is enabled
61
62 echo 0>/proc/ALi5451 pcm out is also set to S/PDIF out. (Default).
63
64 echo 1>/proc/ALi5451 use S/PDIF out to output pcm data.
65
66 echo 2>/proc/ALi5451 use S/PDIF out to output non-pcm data.
67 (AC3...).
68
69 echo 3>/proc/ALi5451 record from Ac97 in(MIC, Line in...).
70 (Default).
71
72 echo 4>/proc/ALi5451 no matter Ac97 settings, record from S/PDIF
73 in.
74
75
76 This driver differs slightly from OSS/Free, so PLEASE READ the
77 comments at the top of <file:sound/oss/trident.c>.
78
79config SOUND_MSNDCLAS 38config SOUND_MSNDCLAS
80 tristate "Support for Turtle Beach MultiSound Classic, Tahiti, Monterey" 39 tristate "Support for Turtle Beach MultiSound Classic, Tahiti, Monterey"
81 depends on (m || !STANDALONE) && ISA 40 depends on (m || !STANDALONE) && ISA
diff --git a/sound/oss/Makefile b/sound/oss/Makefile
index 1f86299fae40..c611514f7ff1 100644
--- a/sound/oss/Makefile
+++ b/sound/oss/Makefile
@@ -29,11 +29,8 @@ obj-$(CONFIG_SOUND_MSNDCLAS) += msnd.o msnd_classic.o
29obj-$(CONFIG_SOUND_MSNDPIN) += msnd.o msnd_pinnacle.o 29obj-$(CONFIG_SOUND_MSNDPIN) += msnd.o msnd_pinnacle.o
30obj-$(CONFIG_SOUND_VWSND) += vwsnd.o 30obj-$(CONFIG_SOUND_VWSND) += vwsnd.o
31obj-$(CONFIG_SOUND_AU1550_AC97) += au1550_ac97.o ac97_codec.o 31obj-$(CONFIG_SOUND_AU1550_AC97) += au1550_ac97.o ac97_codec.o
32obj-$(CONFIG_SOUND_TRIDENT) += trident.o ac97_codec.o
33obj-$(CONFIG_SOUND_BCM_CS4297A) += swarm_cs4297a.o 32obj-$(CONFIG_SOUND_BCM_CS4297A) += swarm_cs4297a.o
34 33
35obj-$(CONFIG_SOUND_WM97XX) += ac97_plugin_wm97xx.o
36
37obj-$(CONFIG_DMASOUND) += dmasound/ 34obj-$(CONFIG_DMASOUND) += dmasound/
38 35
39# Declare multi-part drivers. 36# Declare multi-part drivers.
diff --git a/sound/oss/trident.c b/sound/oss/trident.c
deleted file mode 100644
index f43f91ef86c7..000000000000
--- a/sound/oss/trident.c
+++ /dev/null
@@ -1,4654 +0,0 @@
1/*
2 * OSS driver for Linux 2.[46].x for
3 *
4 * Trident 4D-Wave
5 * SiS 7018
6 * ALi 5451
7 * Tvia/IGST CyberPro 5050
8 *
9 * Driver: Alan Cox <alan@redhat.com>
10 *
11 * Built from:
12 * Low level code: <audio@tridentmicro.com> from ALSA
13 * Framework: Thomas Sailer <sailer@ife.ee.ethz.ch>
14 * Extended by: Zach Brown <zab@redhat.com>
15 *
16 * Hacked up by:
17 * Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
18 * Ollie Lho <ollie@sis.com.tw> SiS 7018 Audio Core Support
19 * Ching-Ling Lee <cling-li@ali.com.tw> ALi 5451 Audio Core Support
20 * Matt Wu <mattwu@acersoftech.com.cn> ALi 5451 Audio Core Support
21 * Peter Wächtler <pwaechtler@loewe-komp.de> CyberPro5050 support
22 * Muli Ben-Yehuda <mulix@mulix.org>
23 *
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
38 *
39 * History
40 * v0.14.10j
41 * January 3 2004 Eugene Teo <eugeneteo@eugeneteo.net>
42 * minor cleanup to use pr_debug instead of TRDBG since it is already
43 * defined in linux/kernel.h.
44 * v0.14.10i
45 * December 29 2003 Muli Ben-Yehuda <mulix@mulix.org>
46 * major cleanup for 2.6, fix a few error patch buglets
47 * with returning without properly cleaning up first,
48 * get rid of lock_kernel().
49 * v0.14.10h
50 * Sept 10 2002 Pascal Schmidt <der.eremit@email.de>
51 * added support for ALi 5451 joystick port
52 * v0.14.10g
53 * Sept 05 2002 Alan Cox <alan@redhat.com>
54 * adapt to new pci joystick attachment interface
55 * v0.14.10f
56 * July 24 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
57 * patch from Eric Lemar (via Ian Soboroff): in suspend and resume,
58 * fix wrong cast from pci_dev* to struct trident_card*.
59 * v0.14.10e
60 * July 19 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
61 * rewrite the DMA buffer allocation/deallcoation functions, to make it
62 * modular and fix a bug where we would call free_pages on memory
63 * obtained with pci_alloc_consistent. Also remove unnecessary #ifdef
64 * CONFIG_PROC_FS and various other cleanups.
65 * v0.14.10d
66 * July 19 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
67 * made several printk(KERN_NOTICE...) into TRDBG(...), to avoid spamming
68 * my syslog with hundreds of messages.
69 * v0.14.10c
70 * July 16 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
71 * Cleaned up Lei Hu's 0.4.10 driver to conform to Documentation/CodingStyle
72 * and the coding style used in the rest of the file.
73 * v0.14.10b
74 * June 23 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
75 * add a missing unlock_set_fmt, remove a superflous lock/unlock pair
76 * with nothing in between.
77 * v0.14.10a
78 * June 21 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
79 * use a debug macro instead of #ifdef CONFIG_DEBUG, trim to 80 columns
80 * per line, use 'do {} while (0)' in statement macros.
81 * v0.14.10
82 * June 6 2002 Lei Hu <Lei_hu@ali.com.tw>
83 * rewrite the part to read/write registers of audio codec for Ali5451
84 * v0.14.9e
85 * January 2 2002 Vojtech Pavlik <vojtech@ucw.cz> added gameport
86 * support to avoid resource conflict with pcigame.c
87 * v0.14.9d
88 * October 8 2001 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
89 * use set_current_state, properly release resources on failure in
90 * trident_probe, get rid of check_region
91 * v0.14.9c
92 * August 10 2001 Peter Wächtler <pwaechtler@loewe-komp.de>
93 * added support for Tvia (formerly Integraphics/IGST) CyberPro5050
94 * this chip is often found in settop boxes (combined video+audio)
95 * v0.14.9b
96 * Switch to static inline not extern inline (gcc 3)
97 * v0.14.9a
98 * Aug 6 2001 Alan Cox
99 * 0.14.9 crashed on rmmod due to a timer/bh left running. Simplified
100 * the existing logic (the BH doesn't help as ac97 is lock_irqsave)
101 * and used del_timer_sync to clean up
102 * Fixed a problem where the ALi change broke my generic card
103 * v0.14.9
104 * Jul 10 2001 Matt Wu
105 * Add H/W Volume Control
106 * v0.14.8a
107 * July 7 2001 Alan Cox
108 * Moved Matt Wu's ac97 register cache into the card structure
109 * v0.14.8
110 * Apr 30 2001 Matt Wu
111 * Set EBUF1 and EBUF2 to still mode
112 * Add dc97/ac97 reset function
113 * Fix power management: ali_restore_regs
114 * unreleased
115 * Mar 09 2001 Matt Wu
116 * Add cache for ac97 access
117 * v0.14.7
118 * Feb 06 2001 Matt Wu
119 * Fix ac97 initialization
120 * Fix bug: an extra tail will be played when playing
121 * Jan 05 2001 Matt Wu
122 * Implement multi-channels and S/PDIF in support for ALi 1535+
123 * v0.14.6
124 * Nov 1 2000 Ching-Ling Lee
125 * Fix the bug of memory leak when switching 5.1-channels to 2 channels.
126 * Add lock protection into dynamic changing format of data.
127 * Oct 18 2000 Ching-Ling Lee
128 * 5.1-channels support for ALi
129 * June 28 2000 Ching-Ling Lee
130 * S/PDIF out/in(playback/record) support for ALi 1535+, using /proc to be selected by user
131 * Simple Power Management support for ALi
132 * v0.14.5 May 23 2000 Ollie Lho
133 * Misc bug fix from the Net
134 * v0.14.4 May 20 2000 Aaron Holtzman
135 * Fix kfree'd memory access in release
136 * Fix race in open while looking for a free virtual channel slot
137 * remove open_wait wq (which appears to be unused)
138 * v0.14.3 May 10 2000 Ollie Lho
139 * fixed a small bug in trident_update_ptr, xmms 1.0.1 no longer uses 100% CPU
140 * v0.14.2 Mar 29 2000 Ching-Ling Lee
141 * Add clear to silence advance in trident_update_ptr
142 * fix invalid data of the end of the sound
143 * v0.14.1 Mar 24 2000 Ching-Ling Lee
144 * ALi 5451 support added, playback and recording O.K.
145 * ALi 5451 originally developed and structured based on sonicvibes, and
146 * suggested to merge into this file by Alan Cox.
147 * v0.14 Mar 15 2000 Ollie Lho
148 * 5.1 channel output support with channel binding. What's the Matrix ?
149 * v0.13.1 Mar 10 2000 Ollie Lho
150 * few minor bugs on dual codec support, needs more testing
151 * v0.13 Mar 03 2000 Ollie Lho
152 * new pci_* for 2.4 kernel, back ported to 2.2
153 * v0.12 Feb 23 2000 Ollie Lho
154 * Preliminary Recording support
155 * v0.11.2 Feb 19 2000 Ollie Lho
156 * removed incomplete full-dulplex support
157 * v0.11.1 Jan 28 2000 Ollie Lho
158 * small bug in setting sample rate for 4d-nx (reported by Aaron)
159 * v0.11 Jan 27 2000 Ollie Lho
160 * DMA bug, scheduler latency, second try
161 * v0.10 Jan 24 2000 Ollie Lho
162 * DMA bug fixed, found kernel scheduling problem
163 * v0.09 Jan 20 2000 Ollie Lho
164 * Clean up of channel register access routine (prepare for channel binding)
165 * v0.08 Jan 14 2000 Ollie Lho
166 * Isolation of AC97 codec code
167 * v0.07 Jan 13 2000 Ollie Lho
168 * Get rid of ugly old low level access routines (e.g. CHRegs.lp****)
169 * v0.06 Jan 11 2000 Ollie Lho
170 * Preliminary support for dual (more ?) AC97 codecs
171 * v0.05 Jan 08 2000 Luca Montecchiani <m.luca@iname.com>
172 * adapt to 2.3.x new __setup/__init call
173 * v0.04 Dec 31 1999 Ollie Lho
174 * Multiple Open, using Middle Loop Interrupt to smooth playback
175 * v0.03 Dec 24 1999 Ollie Lho
176 * mem leak in prog_dmabuf and dealloc_dmabuf removed
177 * v0.02 Dec 15 1999 Ollie Lho
178 * SiS 7018 support added, playback O.K.
179 * v0.01 Alan Cox et. al.
180 * Initial Release in kernel 2.3.30, does not work
181 *
182 * ToDo
183 * Clean up of low level channel register access code. (done)
184 * Fix the bug on dma buffer management in update_ptr, read/write, drain_dac (done)
185 * Dual AC97 codecs support (done)
186 * Recording support (done)
187 * Mmap support
188 * "Channel Binding" ioctl extension (done)
189 * new pci device driver interface for 2.4 kernel (done)
190 *
191 * Lock order (high->low)
192 * lock - hardware lock
193 * open_mutex - guard opens
194 * sem - guard dmabuf, write re-entry etc
195 */
196
197#include <linux/module.h>
198#include <linux/string.h>
199#include <linux/ctype.h>
200#include <linux/ioport.h>
201#include <linux/sched.h>
202#include <linux/delay.h>
203#include <linux/sound.h>
204#include <linux/slab.h>
205#include <linux/soundcard.h>
206#include <linux/pci.h>
207#include <linux/init.h>
208#include <linux/poll.h>
209#include <linux/spinlock.h>
210#include <linux/ac97_codec.h>
211#include <linux/bitops.h>
212#include <linux/proc_fs.h>
213#include <linux/interrupt.h>
214#include <linux/pm.h>
215#include <linux/gameport.h>
216#include <linux/kernel.h>
217#include <linux/mutex.h>
218#include <linux/mm.h>
219
220#include <asm/uaccess.h>
221#include <asm/io.h>
222#include <asm/dma.h>
223
224#if defined(CONFIG_ALPHA_NAUTILUS) || defined(CONFIG_ALPHA_GENERIC)
225#include <asm/hwrpb.h>
226#endif
227
228#include "trident.h"
229
230#define DRIVER_VERSION "0.14.10j-2.6"
231
232#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
233#define SUPPORT_JOYSTICK 1
234#endif
235
236/* magic numbers to protect our data structures */
237#define TRIDENT_CARD_MAGIC 0x5072696E /* "Prin" */
238#define TRIDENT_STATE_MAGIC 0x63657373 /* "cess" */
239
240#define TRIDENT_DMA_MASK 0x3fffffff /* DMA buffer mask for pci_alloc_consist */
241#define ALI_DMA_MASK 0x7fffffff /* ALI Tridents have 31-bit DMA. Wow. */
242
243#define NR_HW_CH 32
244
245/* maximum number of AC97 codecs connected, AC97 2.0 defined 4, but 7018 and 4D-NX only
246 have 2 SDATA_IN lines (currently) */
247#define NR_AC97 2
248
249/* minor number of /dev/swmodem (temporary, experimental) */
250#define SND_DEV_SWMODEM 7
251
252static const unsigned ali_multi_channels_5_1[] = {
253 /*ALI_SURR_LEFT_CHANNEL, ALI_SURR_RIGHT_CHANNEL, */
254 ALI_CENTER_CHANNEL,
255 ALI_LEF_CHANNEL,
256 ALI_SURR_LEFT_CHANNEL,
257 ALI_SURR_RIGHT_CHANNEL
258};
259
260static const unsigned sample_size[] = { 1, 2, 2, 4 };
261static const unsigned sample_shift[] = { 0, 1, 1, 2 };
262
263static const char invalid_magic[] = KERN_CRIT "trident: invalid magic value in %s\n";
264
265enum {
266 TRIDENT_4D_DX = 0,
267 TRIDENT_4D_NX,
268 SIS_7018,
269 ALI_5451,
270 CYBER5050
271};
272
273static char *card_names[] = {
274 "Trident 4DWave DX",
275 "Trident 4DWave NX",
276 "SiS 7018 PCI Audio",
277 "ALi Audio Accelerator",
278 "Tvia/IGST CyberPro 5050"
279};
280
281static struct pci_device_id trident_pci_tbl[] = {
282 {PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_DX),
283 PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, TRIDENT_4D_DX},
284 {PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_NX),
285 0, 0, TRIDENT_4D_NX},
286 {PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7018), 0, 0, SIS_7018},
287 {PCI_DEVICE(PCI_VENDOR_ID_ALI, PCI_DEVICE_ID_ALI_5451), 0, 0, ALI_5451},
288 {PCI_DEVICE(PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_5050),
289 0, 0, CYBER5050},
290 {0,}
291};
292
293MODULE_DEVICE_TABLE(pci, trident_pci_tbl);
294
295/* "software" or virtual channel, an instance of opened /dev/dsp */
296struct trident_state {
297 unsigned int magic;
298 struct trident_card *card; /* Card info */
299
300 /* file mode */
301 mode_t open_mode;
302
303 /* virtual channel number */
304 int virt;
305
306 struct dmabuf {
307 /* wave sample stuff */
308 unsigned int rate;
309 unsigned char fmt, enable;
310
311 /* hardware channel */
312 struct trident_channel *channel;
313
314 /* OSS buffer management stuff */
315 void *rawbuf;
316 dma_addr_t dma_handle;
317 unsigned buforder;
318 unsigned numfrag;
319 unsigned fragshift;
320
321 /* our buffer acts like a circular ring */
322 unsigned hwptr; /* where dma last started, updated by update_ptr */
323 unsigned swptr; /* where driver last clear/filled, updated by read/write */
324 int count; /* bytes to be comsumed or been generated by dma machine */
325 unsigned total_bytes; /* total bytes dmaed by hardware */
326
327 unsigned error; /* number of over/underruns */
328 /* put process on wait queue when no more space in buffer */
329 wait_queue_head_t wait;
330
331 /* redundant, but makes calculations easier */
332 unsigned fragsize;
333 unsigned dmasize;
334 unsigned fragsamples;
335
336 /* OSS stuff */
337 unsigned mapped:1;
338 unsigned ready:1;
339 unsigned endcleared:1;
340 unsigned update_flag;
341 unsigned ossfragshift;
342 int ossmaxfrags;
343 unsigned subdivision;
344
345 } dmabuf;
346
347 /* 5.1 channels */
348 struct trident_state *other_states[4];
349 int multi_channels_adjust_count;
350 unsigned chans_num;
351 unsigned long fmt_flag;
352 /* Guard against mmap/write/read races */
353 struct mutex sem;
354
355};
356
357/* hardware channels */
358struct trident_channel {
359 int num; /* channel number */
360 u32 lba; /* Loop Begine Address, where dma buffer starts */
361 u32 eso; /* End Sample Offset, wehre dma buffer ends */
362 /* (in the unit of samples) */
363 u32 delta; /* delta value, sample rate / 48k for playback, */
364 /* 48k/sample rate for recording */
365 u16 attribute; /* control where PCM data go and come */
366 u16 fm_vol;
367 u32 control; /* signed/unsigned, 8/16 bits, mono/stereo */
368};
369
370struct trident_pcm_bank_address {
371 u32 start;
372 u32 stop;
373 u32 aint;
374 u32 aint_en;
375};
376
377static struct trident_pcm_bank_address bank_a_addrs = {
378 T4D_START_A,
379 T4D_STOP_A,
380 T4D_AINT_A,
381 T4D_AINTEN_A
382};
383
384static struct trident_pcm_bank_address bank_b_addrs = {
385 T4D_START_B,
386 T4D_STOP_B,
387 T4D_AINT_B,
388 T4D_AINTEN_B
389};
390
391struct trident_pcm_bank {
392 /* register addresses to control bank operations */
393 struct trident_pcm_bank_address *addresses;
394 /* each bank has 32 channels */
395 u32 bitmap; /* channel allocation bitmap */
396 struct trident_channel channels[32];
397};
398
399struct trident_card {
400 unsigned int magic;
401
402 /* We keep trident cards in a linked list */
403 struct trident_card *next;
404
405 /* single open lock mechanism, only used for recording */
406 struct mutex open_mutex;
407
408 /* The trident has a certain amount of cross channel interaction
409 so we use a single per card lock */
410 spinlock_t lock;
411
412 /* PCI device stuff */
413 struct pci_dev *pci_dev;
414 u16 pci_id;
415 u8 revision;
416
417 /* soundcore stuff */
418 int dev_audio;
419
420 /* structures for abstraction of hardware facilities, codecs, */
421 /* banks and channels */
422 struct ac97_codec *ac97_codec[NR_AC97];
423 struct trident_pcm_bank banks[NR_BANKS];
424 struct trident_state *states[NR_HW_CH];
425
426 /* hardware resources */
427 unsigned long iobase;
428 u32 irq;
429
430 /* Function support */
431 struct trident_channel *(*alloc_pcm_channel) (struct trident_card *);
432 struct trident_channel *(*alloc_rec_pcm_channel) (struct trident_card *);
433 void (*free_pcm_channel) (struct trident_card *, unsigned int chan);
434 void (*address_interrupt) (struct trident_card *);
435
436 /* Added by Matt Wu 01-05-2001 for spdif in */
437 int multi_channel_use_count;
438 int rec_channel_use_count;
439 u16 mixer_regs[64][NR_AC97]; /* Made card local by Alan */
440 int mixer_regs_ready;
441
442 /* Added for hardware volume control */
443 int hwvolctl;
444 struct timer_list timer;
445
446 /* Game port support */
447 struct gameport *gameport;
448};
449
450enum dmabuf_mode {
451 DM_PLAYBACK = 0,
452 DM_RECORD
453};
454
455/* table to map from CHANNELMASK to channel attribute for SiS 7018 */
456static u16 mask2attr[] = {
457 PCM_LR, PCM_LR, SURR_LR, CENTER_LFE,
458 HSET, MIC, MODEM_LINE1, MODEM_LINE2,
459 I2S_LR, SPDIF_LR
460};
461
462/* table to map from channel attribute to CHANNELMASK for SiS 7018 */
463static int attr2mask[] = {
464 DSP_BIND_MODEM1, DSP_BIND_MODEM2, DSP_BIND_FRONT, DSP_BIND_HANDSET,
465 DSP_BIND_I2S, DSP_BIND_CENTER_LFE, DSP_BIND_SURR, DSP_BIND_SPDIF
466};
467
468/* Added by Matt Wu 01-05-2001 for spdif in */
469static int ali_close_multi_channels(void);
470static void ali_delay(struct trident_card *card, int interval);
471static void ali_detect_spdif_rate(struct trident_card *card);
472
473static void ali_ac97_write(struct ac97_codec *codec, u8 reg, u16 val);
474static u16 ali_ac97_read(struct ac97_codec *codec, u8 reg);
475
476static struct trident_card *devs;
477
478static void trident_ac97_set(struct ac97_codec *codec, u8 reg, u16 val);
479static u16 trident_ac97_get(struct ac97_codec *codec, u8 reg);
480
481static int trident_open_mixdev(struct inode *inode, struct file *file);
482static int trident_ioctl_mixdev(struct inode *inode, struct file *file,
483 unsigned int cmd, unsigned long arg);
484
485static void ali_ac97_set(struct trident_card *card, int secondary, u8 reg, u16 val);
486static u16 ali_ac97_get(struct trident_card *card, int secondary, u8 reg);
487static void ali_set_spdif_out_rate(struct trident_card *card, unsigned int rate);
488static void ali_enable_special_channel(struct trident_state *stat);
489static struct trident_channel *ali_alloc_rec_pcm_channel(struct trident_card *card);
490static struct trident_channel *ali_alloc_pcm_channel(struct trident_card *card);
491static void ali_free_pcm_channel(struct trident_card *card, unsigned int channel);
492static int ali_setup_multi_channels(struct trident_card *card, int chan_nums);
493static unsigned int ali_get_spdif_in_rate(struct trident_card *card);
494static void ali_setup_spdif_in(struct trident_card *card);
495static void ali_disable_spdif_in(struct trident_card *card);
496static void ali_disable_special_channel(struct trident_card *card, int ch);
497static void ali_setup_spdif_out(struct trident_card *card, int flag);
498static int ali_write_5_1(struct trident_state *state,
499 const char __user *buffer,
500 int cnt_for_multi_channel, unsigned int *copy_count,
501 unsigned int *state_cnt);
502static int ali_allocate_other_states_resources(struct trident_state *state,
503 int chan_nums);
504static void ali_free_other_states_resources(struct trident_state *state);
505
506#define seek_offset(dma_ptr, buffer, cnt, offset, copy_count) do { \
507 (dma_ptr) += (offset); \
508 (buffer) += (offset); \
509 (cnt) -= (offset); \
510 (copy_count) += (offset); \
511} while (0)
512
513static inline int lock_set_fmt(struct trident_state* state)
514{
515 if (test_and_set_bit(0, &state->fmt_flag))
516 return -EFAULT;
517
518 return 0;
519}
520
521static inline void unlock_set_fmt(struct trident_state* state)
522{
523 clear_bit(0, &state->fmt_flag);
524}
525
526static int
527trident_enable_loop_interrupts(struct trident_card *card)
528{
529 u32 global_control;
530
531 global_control = inl(TRID_REG(card, T4D_LFO_GC_CIR));
532
533 switch (card->pci_id) {
534 case PCI_DEVICE_ID_SI_7018:
535 global_control |= (ENDLP_IE | MIDLP_IE | BANK_B_EN);
536 break;
537 case PCI_DEVICE_ID_ALI_5451:
538 case PCI_DEVICE_ID_TRIDENT_4DWAVE_DX:
539 case PCI_DEVICE_ID_TRIDENT_4DWAVE_NX:
540 case PCI_DEVICE_ID_INTERG_5050:
541 global_control |= (ENDLP_IE | MIDLP_IE);
542 break;
543 default:
544 return 0;
545 }
546
547 outl(global_control, TRID_REG(card, T4D_LFO_GC_CIR));
548
549 pr_debug("trident: Enable Loop Interrupts, globctl = 0x%08X\n",
550 inl(TRID_REG(card, T4D_LFO_GC_CIR)));
551
552 return 1;
553}
554
555static int
556trident_disable_loop_interrupts(struct trident_card *card)
557{
558 u32 global_control;
559
560 global_control = inl(TRID_REG(card, T4D_LFO_GC_CIR));
561 global_control &= ~(ENDLP_IE | MIDLP_IE);
562 outl(global_control, TRID_REG(card, T4D_LFO_GC_CIR));
563
564 pr_debug("trident: Disabled Loop Interrupts, globctl = 0x%08X\n",
565 global_control);
566
567 return 1;
568}
569
570static void
571trident_enable_voice_irq(struct trident_card *card, unsigned int channel)
572{
573 unsigned int mask = 1 << (channel & 0x1f);
574 struct trident_pcm_bank *bank = &card->banks[channel >> 5];
575 u32 reg, addr = bank->addresses->aint_en;
576
577 reg = inl(TRID_REG(card, addr));
578 reg |= mask;
579 outl(reg, TRID_REG(card, addr));
580
581#ifdef DEBUG
582 reg = inl(TRID_REG(card, addr));
583 pr_debug("trident: enabled IRQ on channel %d, %s = 0x%08x(addr:%X)\n",
584 channel, addr == T4D_AINTEN_B ? "AINTEN_B" : "AINTEN_A",
585 reg, addr);
586#endif /* DEBUG */
587}
588
589static void
590trident_disable_voice_irq(struct trident_card *card, unsigned int channel)
591{
592 unsigned int mask = 1 << (channel & 0x1f);
593 struct trident_pcm_bank *bank = &card->banks[channel >> 5];
594 u32 reg, addr = bank->addresses->aint_en;
595
596 reg = inl(TRID_REG(card, addr));
597 reg &= ~mask;
598 outl(reg, TRID_REG(card, addr));
599
600 /* Ack the channel in case the interrupt was set before we disable it. */
601 outl(mask, TRID_REG(card, bank->addresses->aint));
602
603#ifdef DEBUG
604 reg = inl(TRID_REG(card, addr));
605 pr_debug("trident: disabled IRQ on channel %d, %s = 0x%08x(addr:%X)\n",
606 channel, addr == T4D_AINTEN_B ? "AINTEN_B" : "AINTEN_A",
607 reg, addr);
608#endif /* DEBUG */
609}
610
611static void
612trident_start_voice(struct trident_card *card, unsigned int channel)
613{
614 unsigned int mask = 1 << (channel & 0x1f);
615 struct trident_pcm_bank *bank = &card->banks[channel >> 5];
616 u32 addr = bank->addresses->start;
617
618#ifdef DEBUG
619 u32 reg;
620#endif /* DEBUG */
621
622 outl(mask, TRID_REG(card, addr));
623
624#ifdef DEBUG
625 reg = inl(TRID_REG(card, addr));
626 pr_debug("trident: start voice on channel %d, %s = 0x%08x(addr:%X)\n",
627 channel, addr == T4D_START_B ? "START_B" : "START_A",
628 reg, addr);
629#endif /* DEBUG */
630}
631
632static void
633trident_stop_voice(struct trident_card *card, unsigned int channel)
634{
635 unsigned int mask = 1 << (channel & 0x1f);
636 struct trident_pcm_bank *bank = &card->banks[channel >> 5];
637 u32 addr = bank->addresses->stop;
638
639#ifdef DEBUG
640 u32 reg;
641#endif /* DEBUG */
642
643 outl(mask, TRID_REG(card, addr));
644
645#ifdef DEBUG
646 reg = inl(TRID_REG(card, addr));
647 pr_debug("trident: stop voice on channel %d, %s = 0x%08x(addr:%X)\n",
648 channel, addr == T4D_STOP_B ? "STOP_B" : "STOP_A",
649 reg, addr);
650#endif /* DEBUG */
651}
652
653static u32
654trident_get_interrupt_mask(struct trident_card *card, unsigned int channel)
655{
656 struct trident_pcm_bank *bank = &card->banks[channel];
657 u32 addr = bank->addresses->aint;
658 return inl(TRID_REG(card, addr));
659}
660
661static int
662trident_check_channel_interrupt(struct trident_card *card, unsigned int channel)
663{
664 unsigned int mask = 1 << (channel & 0x1f);
665 u32 reg = trident_get_interrupt_mask(card, channel >> 5);
666
667#ifdef DEBUG
668 if (reg & mask)
669 pr_debug("trident: channel %d has interrupt, %s = 0x%08x\n",
670 channel, reg == T4D_AINT_B ? "AINT_B" : "AINT_A",
671 reg);
672#endif /* DEBUG */
673 return (reg & mask) ? 1 : 0;
674}
675
676static void
677trident_ack_channel_interrupt(struct trident_card *card, unsigned int channel)
678{
679 unsigned int mask = 1 << (channel & 0x1f);
680 struct trident_pcm_bank *bank = &card->banks[channel >> 5];
681 u32 reg, addr = bank->addresses->aint;
682
683 reg = inl(TRID_REG(card, addr));
684 reg &= mask;
685 outl(reg, TRID_REG(card, addr));
686
687#ifdef DEBUG
688 reg = inl(TRID_REG(card, T4D_AINT_B));
689 pr_debug("trident: Ack channel %d interrupt, AINT_B = 0x%08x\n",
690 channel, reg);
691#endif /* DEBUG */
692}
693
694static struct trident_channel *
695trident_alloc_pcm_channel(struct trident_card *card)
696{
697 struct trident_pcm_bank *bank;
698 int idx;
699
700 bank = &card->banks[BANK_B];
701
702 for (idx = 31; idx >= 0; idx--) {
703 if (!(bank->bitmap & (1 << idx))) {
704 struct trident_channel *channel = &bank->channels[idx];
705 bank->bitmap |= 1 << idx;
706 channel->num = idx + 32;
707 return channel;
708 }
709 }
710
711 /* no more free channels available */
712 printk(KERN_ERR "trident: no more channels available on Bank B.\n");
713 return NULL;
714}
715
716static void
717trident_free_pcm_channel(struct trident_card *card, unsigned int channel)
718{
719 int bank;
720 unsigned char b;
721
722 if (channel < 31 || channel > 63)
723 return;
724
725 if (card->pci_id == PCI_DEVICE_ID_TRIDENT_4DWAVE_DX ||
726 card->pci_id == PCI_DEVICE_ID_TRIDENT_4DWAVE_NX) {
727 b = inb(TRID_REG(card, T4D_REC_CH));
728 if ((b & ~0x80) == channel)
729 outb(0x0, TRID_REG(card, T4D_REC_CH));
730 }
731
732 bank = channel >> 5;
733 channel = channel & 0x1f;
734
735 card->banks[bank].bitmap &= ~(1 << (channel));
736}
737
738static struct trident_channel *
739cyber_alloc_pcm_channel(struct trident_card *card)
740{
741 struct trident_pcm_bank *bank;
742 int idx;
743
744 /* The cyberpro 5050 has only 32 voices and one bank */
745 /* .. at least they are not documented (if you want to call that
746 * crap documentation), perhaps broken ? */
747
748 bank = &card->banks[BANK_A];
749
750 for (idx = 31; idx >= 0; idx--) {
751 if (!(bank->bitmap & (1 << idx))) {
752 struct trident_channel *channel = &bank->channels[idx];
753 bank->bitmap |= 1 << idx;
754 channel->num = idx;
755 return channel;
756 }
757 }
758
759 /* no more free channels available */
760 printk(KERN_ERR "cyberpro5050: no more channels available on Bank A.\n");
761 return NULL;
762}
763
764static void
765cyber_free_pcm_channel(struct trident_card *card, unsigned int channel)
766{
767 if (channel > 31)
768 return;
769 card->banks[BANK_A].bitmap &= ~(1 << (channel));
770}
771
772static inline void
773cyber_outidx(int port, int idx, int data)
774{
775 outb(idx, port);
776 outb(data, port + 1);
777}
778
779static inline int
780cyber_inidx(int port, int idx)
781{
782 outb(idx, port);
783 return inb(port + 1);
784}
785
786static int
787cyber_init_ritual(struct trident_card *card)
788{
789 /* some black magic, taken from SDK samples */
790 /* remove this and nothing will work */
791 int portDat;
792 int ret = 0;
793 unsigned long flags;
794
795 /*
796 * Keep interrupts off for the configure - we don't want to
797 * clash with another cyberpro config event
798 */
799
800 spin_lock_irqsave(&card->lock, flags);
801 portDat = cyber_inidx(CYBER_PORT_AUDIO, CYBER_IDX_AUDIO_ENABLE);
802 /* enable, if it was disabled */
803 if ((portDat & CYBER_BMSK_AUENZ) != CYBER_BMSK_AUENZ_ENABLE) {
804 printk(KERN_INFO "cyberpro5050: enabling audio controller\n");
805 cyber_outidx(CYBER_PORT_AUDIO, CYBER_IDX_AUDIO_ENABLE,
806 portDat | CYBER_BMSK_AUENZ_ENABLE);
807 /* check again if hardware is enabled now */
808 portDat = cyber_inidx(CYBER_PORT_AUDIO, CYBER_IDX_AUDIO_ENABLE);
809 }
810 if ((portDat & CYBER_BMSK_AUENZ) != CYBER_BMSK_AUENZ_ENABLE) {
811 printk(KERN_ERR "cyberpro5050: initAudioAccess: no success\n");
812 ret = -1;
813 } else {
814 cyber_outidx(CYBER_PORT_AUDIO, CYBER_IDX_IRQ_ENABLE,
815 CYBER_BMSK_AUDIO_INT_ENABLE);
816 cyber_outidx(CYBER_PORT_AUDIO, 0xbf, 0x01);
817 cyber_outidx(CYBER_PORT_AUDIO, 0xba, 0x20);
818 cyber_outidx(CYBER_PORT_AUDIO, 0xbb, 0x08);
819 cyber_outidx(CYBER_PORT_AUDIO, 0xbf, 0x02);
820 cyber_outidx(CYBER_PORT_AUDIO, 0xb3, 0x06);
821 cyber_outidx(CYBER_PORT_AUDIO, 0xbf, 0x00);
822 }
823 spin_unlock_irqrestore(&card->lock, flags);
824 return ret;
825}
826
827/* called with spin lock held */
828
829static int
830trident_load_channel_registers(struct trident_card *card, u32 * data,
831 unsigned int channel)
832{
833 int i;
834
835 if (channel > 63)
836 return 0;
837
838 /* select hardware channel to write */
839 outb(channel, TRID_REG(card, T4D_LFO_GC_CIR));
840
841 /* Output the channel registers, but don't write register
842 three to an ALI chip. */
843 for (i = 0; i < CHANNEL_REGS; i++) {
844 if (i == 3 && card->pci_id == PCI_DEVICE_ID_ALI_5451)
845 continue;
846 outl(data[i], TRID_REG(card, CHANNEL_START + 4 * i));
847 }
848 if (card->pci_id == PCI_DEVICE_ID_ALI_5451 ||
849 card->pci_id == PCI_DEVICE_ID_INTERG_5050) {
850 outl(ALI_EMOD_Still, TRID_REG(card, ALI_EBUF1));
851 outl(ALI_EMOD_Still, TRID_REG(card, ALI_EBUF2));
852 }
853 return 1;
854}
855
856/* called with spin lock held */
857static int
858trident_write_voice_regs(struct trident_state *state)
859{
860 unsigned int data[CHANNEL_REGS + 1];
861 struct trident_channel *channel;
862
863 channel = state->dmabuf.channel;
864
865 data[1] = channel->lba;
866 data[4] = channel->control;
867
868 switch (state->card->pci_id) {
869 case PCI_DEVICE_ID_ALI_5451:
870 data[0] = 0; /* Current Sample Offset */
871 data[2] = (channel->eso << 16) | (channel->delta & 0xffff);
872 data[3] = 0;
873 break;
874 case PCI_DEVICE_ID_SI_7018:
875 case PCI_DEVICE_ID_INTERG_5050:
876 data[0] = 0; /* Current Sample Offset */
877 data[2] = (channel->eso << 16) | (channel->delta & 0xffff);
878 data[3] = (channel->attribute << 16) | (channel->fm_vol & 0xffff);
879 break;
880 case PCI_DEVICE_ID_TRIDENT_4DWAVE_DX:
881 data[0] = 0; /* Current Sample Offset */
882 data[2] = (channel->eso << 16) | (channel->delta & 0xffff);
883 data[3] = channel->fm_vol & 0xffff;
884 break;
885 case PCI_DEVICE_ID_TRIDENT_4DWAVE_NX:
886 data[0] = (channel->delta << 24);
887 data[2] = ((channel->delta << 16) & 0xff000000) |
888 (channel->eso & 0x00ffffff);
889 data[3] = channel->fm_vol & 0xffff;
890 break;
891 default:
892 return 0;
893 }
894
895 return trident_load_channel_registers(state->card, data, channel->num);
896}
897
898static int
899compute_rate_play(u32 rate)
900{
901 int delta;
902 /* We special case 44100 and 8000 since rounding with the equation
903 does not give us an accurate enough value. For 11025 and 22050
904 the equation gives us the best answer. All other frequencies will
905 also use the equation. JDW */
906 if (rate == 44100)
907 delta = 0xeb3;
908 else if (rate == 8000)
909 delta = 0x2ab;
910 else if (rate == 48000)
911 delta = 0x1000;
912 else
913 delta = (((rate << 12) + rate) / 48000) & 0x0000ffff;
914 return delta;
915}
916
917static int
918compute_rate_rec(u32 rate)
919{
920 int delta;
921
922 if (rate == 44100)
923 delta = 0x116a;
924 else if (rate == 8000)
925 delta = 0x6000;
926 else if (rate == 48000)
927 delta = 0x1000;
928 else
929 delta = ((48000 << 12) / rate) & 0x0000ffff;
930
931 return delta;
932}
933
934/* set playback sample rate */
935static unsigned int
936trident_set_dac_rate(struct trident_state *state, unsigned int rate)
937{
938 struct dmabuf *dmabuf = &state->dmabuf;
939
940 if (rate > 48000)
941 rate = 48000;
942 if (rate < 4000)
943 rate = 4000;
944
945 dmabuf->rate = rate;
946 dmabuf->channel->delta = compute_rate_play(rate);
947
948 trident_write_voice_regs(state);
949
950 pr_debug("trident: called trident_set_dac_rate : rate = %d\n", rate);
951
952 return rate;
953}
954
955/* set recording sample rate */
956static unsigned int
957trident_set_adc_rate(struct trident_state *state, unsigned int rate)
958{
959 struct dmabuf *dmabuf = &state->dmabuf;
960
961 if (rate > 48000)
962 rate = 48000;
963 if (rate < 4000)
964 rate = 4000;
965
966 dmabuf->rate = rate;
967 dmabuf->channel->delta = compute_rate_rec(rate);
968
969 trident_write_voice_regs(state);
970
971 pr_debug("trident: called trident_set_adc_rate : rate = %d\n", rate);
972
973 return rate;
974}
975
976/* prepare channel attributes for playback */
977static void
978trident_play_setup(struct trident_state *state)
979{
980 struct dmabuf *dmabuf = &state->dmabuf;
981 struct trident_channel *channel = dmabuf->channel;
982
983 channel->lba = dmabuf->dma_handle;
984 channel->delta = compute_rate_play(dmabuf->rate);
985
986 channel->eso = dmabuf->dmasize >> sample_shift[dmabuf->fmt];
987 channel->eso -= 1;
988
989 if (state->card->pci_id != PCI_DEVICE_ID_SI_7018) {
990 channel->attribute = 0;
991 if (state->card->pci_id == PCI_DEVICE_ID_ALI_5451) {
992 if ((channel->num == ALI_SPDIF_IN_CHANNEL) ||
993 (channel->num == ALI_PCM_IN_CHANNEL))
994 ali_disable_special_channel(state->card, channel->num);
995 else if ((inl(TRID_REG(state->card, ALI_GLOBAL_CONTROL))
996 & ALI_SPDIF_OUT_CH_ENABLE)
997 && (channel->num == ALI_SPDIF_OUT_CHANNEL)) {
998 ali_set_spdif_out_rate(state->card,
999 state->dmabuf.rate);
1000 state->dmabuf.channel->delta = 0x1000;
1001 }
1002 }
1003 }
1004
1005 channel->fm_vol = 0x0;
1006
1007 channel->control = CHANNEL_LOOP;
1008 if (dmabuf->fmt & TRIDENT_FMT_16BIT) {
1009 /* 16-bits */
1010 channel->control |= CHANNEL_16BITS;
1011 /* signed */
1012 channel->control |= CHANNEL_SIGNED;
1013 }
1014 if (dmabuf->fmt & TRIDENT_FMT_STEREO)
1015 /* stereo */
1016 channel->control |= CHANNEL_STEREO;
1017
1018 pr_debug("trident: trident_play_setup, LBA = 0x%08x, Delta = 0x%08x, "
1019 "ESO = 0x%08x, Control = 0x%08x\n", channel->lba,
1020 channel->delta, channel->eso, channel->control);
1021
1022 trident_write_voice_regs(state);
1023}
1024
1025/* prepare channel attributes for recording */
1026static void
1027trident_rec_setup(struct trident_state *state)
1028{
1029 u16 w;
1030 u8 bval;
1031
1032 struct trident_card *card = state->card;
1033 struct dmabuf *dmabuf = &state->dmabuf;
1034 struct trident_channel *channel = dmabuf->channel;
1035 unsigned int rate;
1036
1037 /* Enable AC-97 ADC (capture) */
1038 switch (card->pci_id) {
1039 case PCI_DEVICE_ID_ALI_5451:
1040 ali_enable_special_channel(state);
1041 break;
1042 case PCI_DEVICE_ID_SI_7018:
1043 /* for 7018, the ac97 is always in playback/record (duplex) mode */
1044 break;
1045 case PCI_DEVICE_ID_TRIDENT_4DWAVE_DX:
1046 w = inb(TRID_REG(card, DX_ACR2_AC97_COM_STAT));
1047 outb(w | 0x48, TRID_REG(card, DX_ACR2_AC97_COM_STAT));
1048 /* enable and set record channel */
1049 outb(0x80 | channel->num, TRID_REG(card, T4D_REC_CH));
1050 break;
1051 case PCI_DEVICE_ID_TRIDENT_4DWAVE_NX:
1052 w = inw(TRID_REG(card, T4D_MISCINT));
1053 outw(w | 0x1000, TRID_REG(card, T4D_MISCINT));
1054 /* enable and set record channel */
1055 outb(0x80 | channel->num, TRID_REG(card, T4D_REC_CH));
1056 break;
1057 case PCI_DEVICE_ID_INTERG_5050:
1058 /* don't know yet, using special channel 22 in GC1(0xd4)? */
1059 break;
1060 default:
1061 return;
1062 }
1063
1064 channel->lba = dmabuf->dma_handle;
1065 channel->delta = compute_rate_rec(dmabuf->rate);
1066 if ((card->pci_id == PCI_DEVICE_ID_ALI_5451) &&
1067 (channel->num == ALI_SPDIF_IN_CHANNEL)) {
1068 rate = ali_get_spdif_in_rate(card);
1069 if (rate == 0) {
1070 printk(KERN_WARNING "trident: ALi 5451 "
1071 "S/PDIF input setup error!\n");
1072 rate = 48000;
1073 }
1074 bval = inb(TRID_REG(card, ALI_SPDIF_CTRL));
1075 if (bval & 0x10) {
1076 outb(bval, TRID_REG(card, ALI_SPDIF_CTRL));
1077 printk(KERN_WARNING "trident: cleared ALi "
1078 "5451 S/PDIF parity error flag.\n");
1079 }
1080
1081 if (rate != 48000)
1082 channel->delta = ((rate << 12) / dmabuf->rate) & 0x0000ffff;
1083 }
1084
1085 channel->eso = dmabuf->dmasize >> sample_shift[dmabuf->fmt];
1086 channel->eso -= 1;
1087
1088 if (state->card->pci_id != PCI_DEVICE_ID_SI_7018) {
1089 channel->attribute = 0;
1090 }
1091
1092 channel->fm_vol = 0x0;
1093
1094 channel->control = CHANNEL_LOOP;
1095 if (dmabuf->fmt & TRIDENT_FMT_16BIT) {
1096 /* 16-bits */
1097 channel->control |= CHANNEL_16BITS;
1098 /* signed */
1099 channel->control |= CHANNEL_SIGNED;
1100 }
1101 if (dmabuf->fmt & TRIDENT_FMT_STEREO)
1102 /* stereo */
1103 channel->control |= CHANNEL_STEREO;
1104
1105 pr_debug("trident: trident_rec_setup, LBA = 0x%08x, Delat = 0x%08x, "
1106 "ESO = 0x%08x, Control = 0x%08x\n", channel->lba,
1107 channel->delta, channel->eso, channel->control);
1108
1109 trident_write_voice_regs(state);
1110}
1111
1112/* get current playback/recording dma buffer pointer (byte offset from LBA),
1113 called with spinlock held! */
1114static inline unsigned
1115trident_get_dma_addr(struct trident_state *state)
1116{
1117 struct dmabuf *dmabuf = &state->dmabuf;
1118 u32 cso;
1119
1120 if (!dmabuf->enable)
1121 return 0;
1122
1123 outb(dmabuf->channel->num, TRID_REG(state->card, T4D_LFO_GC_CIR));
1124
1125 switch (state->card->pci_id) {
1126 case PCI_DEVICE_ID_ALI_5451:
1127 case PCI_DEVICE_ID_SI_7018:
1128 case PCI_DEVICE_ID_TRIDENT_4DWAVE_DX:
1129 case PCI_DEVICE_ID_INTERG_5050:
1130 /* 16 bits ESO, CSO for 7018 and DX */
1131 cso = inw(TRID_REG(state->card, CH_DX_CSO_ALPHA_FMS + 2));
1132 break;
1133 case PCI_DEVICE_ID_TRIDENT_4DWAVE_NX:
1134 /* 24 bits ESO, CSO for NX */
1135 cso = inl(TRID_REG(state->card, CH_NX_DELTA_CSO)) & 0x00ffffff;
1136 break;
1137 default:
1138 return 0;
1139 }
1140
1141 pr_debug("trident: trident_get_dma_addr: chip reported channel: %d, "
1142 "cso = 0x%04x\n", dmabuf->channel->num, cso);
1143
1144 /* ESO and CSO are in units of Samples, convert to byte offset */
1145 cso <<= sample_shift[dmabuf->fmt];
1146
1147 return (cso % dmabuf->dmasize);
1148}
1149
1150/* Stop recording (lock held) */
1151static inline void
1152__stop_adc(struct trident_state *state)
1153{
1154 struct dmabuf *dmabuf = &state->dmabuf;
1155 unsigned int chan_num = dmabuf->channel->num;
1156 struct trident_card *card = state->card;
1157
1158 dmabuf->enable &= ~ADC_RUNNING;
1159 trident_stop_voice(card, chan_num);
1160 trident_disable_voice_irq(card, chan_num);
1161}
1162
1163static void
1164stop_adc(struct trident_state *state)
1165{
1166 struct trident_card *card = state->card;
1167 unsigned long flags;
1168
1169 spin_lock_irqsave(&card->lock, flags);
1170 __stop_adc(state);
1171 spin_unlock_irqrestore(&card->lock, flags);
1172}
1173
1174static void
1175start_adc(struct trident_state *state)
1176{
1177 struct dmabuf *dmabuf = &state->dmabuf;
1178 unsigned int chan_num = dmabuf->channel->num;
1179 struct trident_card *card = state->card;
1180 unsigned long flags;
1181
1182 spin_lock_irqsave(&card->lock, flags);
1183 if ((dmabuf->mapped ||
1184 dmabuf->count < (signed) dmabuf->dmasize) &&
1185 dmabuf->ready) {
1186 dmabuf->enable |= ADC_RUNNING;
1187 trident_enable_voice_irq(card, chan_num);
1188 trident_start_voice(card, chan_num);
1189 }
1190 spin_unlock_irqrestore(&card->lock, flags);
1191}
1192
1193/* stop playback (lock held) */
1194static inline void
1195__stop_dac(struct trident_state *state)
1196{
1197 struct dmabuf *dmabuf = &state->dmabuf;
1198 unsigned int chan_num = dmabuf->channel->num;
1199 struct trident_card *card = state->card;
1200
1201 dmabuf->enable &= ~DAC_RUNNING;
1202 trident_stop_voice(card, chan_num);
1203 if (state->chans_num == 6) {
1204 trident_stop_voice(card, state->other_states[0]->
1205 dmabuf.channel->num);
1206 trident_stop_voice(card, state->other_states[1]->
1207 dmabuf.channel->num);
1208 trident_stop_voice(card, state->other_states[2]->
1209 dmabuf.channel->num);
1210 trident_stop_voice(card, state->other_states[3]->
1211 dmabuf.channel->num);
1212 }
1213 trident_disable_voice_irq(card, chan_num);
1214}
1215
1216static void
1217stop_dac(struct trident_state *state)
1218{
1219 struct trident_card *card = state->card;
1220 unsigned long flags;
1221
1222 spin_lock_irqsave(&card->lock, flags);
1223 __stop_dac(state);
1224 spin_unlock_irqrestore(&card->lock, flags);
1225}
1226
1227static void
1228start_dac(struct trident_state *state)
1229{
1230 struct dmabuf *dmabuf = &state->dmabuf;
1231 unsigned int chan_num = dmabuf->channel->num;
1232 struct trident_card *card = state->card;
1233 unsigned long flags;
1234
1235 spin_lock_irqsave(&card->lock, flags);
1236 if ((dmabuf->mapped || dmabuf->count > 0) && dmabuf->ready) {
1237 dmabuf->enable |= DAC_RUNNING;
1238 trident_enable_voice_irq(card, chan_num);
1239 trident_start_voice(card, chan_num);
1240 if (state->chans_num == 6) {
1241 trident_start_voice(card, state->other_states[0]->
1242 dmabuf.channel->num);
1243 trident_start_voice(card, state->other_states[1]->
1244 dmabuf.channel->num);
1245 trident_start_voice(card, state->other_states[2]->
1246 dmabuf.channel->num);
1247 trident_start_voice(card, state->other_states[3]->
1248 dmabuf.channel->num);
1249 }
1250 }
1251 spin_unlock_irqrestore(&card->lock, flags);
1252}
1253
1254#define DMABUF_DEFAULTORDER (15-PAGE_SHIFT)
1255#define DMABUF_MINORDER 1
1256
1257/* alloc a DMA buffer of with a buffer of this order */
1258static int
1259alloc_dmabuf(struct dmabuf *dmabuf, struct pci_dev *pci_dev, int order)
1260{
1261 void *rawbuf = NULL;
1262 struct page *page, *pend;
1263
1264 if (!(rawbuf = pci_alloc_consistent(pci_dev, PAGE_SIZE << order,
1265 &dmabuf->dma_handle)))
1266 return -ENOMEM;
1267
1268 pr_debug("trident: allocated %ld (order = %d) bytes at %p\n",
1269 PAGE_SIZE << order, order, rawbuf);
1270
1271 dmabuf->ready = dmabuf->mapped = 0;
1272 dmabuf->rawbuf = rawbuf;
1273 dmabuf->buforder = order;
1274
1275 /* now mark the pages as reserved; otherwise */
1276 /* remap_pfn_range doesn't do what we want */
1277 pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
1278 for (page = virt_to_page(rawbuf); page <= pend; page++)
1279 SetPageReserved(page);
1280
1281 return 0;
1282}
1283
1284/* allocate the main DMA buffer, playback and recording buffer should be */
1285/* allocated separately */
1286static int
1287alloc_main_dmabuf(struct trident_state *state)
1288{
1289 struct dmabuf *dmabuf = &state->dmabuf;
1290 int order;
1291 int ret = -ENOMEM;
1292
1293 /* alloc as big a chunk as we can, FIXME: is this necessary ?? */
1294 for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--) {
1295 if (!(ret = alloc_dmabuf(dmabuf, state->card->pci_dev, order)))
1296 return 0;
1297 /* else try again */
1298 }
1299 return ret;
1300}
1301
1302/* deallocate a DMA buffer */
1303static void
1304dealloc_dmabuf(struct dmabuf *dmabuf, struct pci_dev *pci_dev)
1305{
1306 struct page *page, *pend;
1307
1308 if (dmabuf->rawbuf) {
1309 /* undo marking the pages as reserved */
1310 pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
1311 for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++)
1312 ClearPageReserved(page);
1313 pci_free_consistent(pci_dev, PAGE_SIZE << dmabuf->buforder,
1314 dmabuf->rawbuf, dmabuf->dma_handle);
1315 dmabuf->rawbuf = NULL;
1316 }
1317 dmabuf->mapped = dmabuf->ready = 0;
1318}
1319
1320static int
1321prog_dmabuf(struct trident_state *state, enum dmabuf_mode rec)
1322{
1323 struct dmabuf *dmabuf = &state->dmabuf;
1324 unsigned bytepersec;
1325 struct trident_state *s = state;
1326 unsigned bufsize, dma_nums;
1327 unsigned long flags;
1328 int ret, i, order;
1329
1330 if ((ret = lock_set_fmt(state)) < 0)
1331 return ret;
1332
1333 if (state->chans_num == 6)
1334 dma_nums = 5;
1335 else
1336 dma_nums = 1;
1337
1338 for (i = 0; i < dma_nums; i++) {
1339 if (i > 0) {
1340 s = state->other_states[i - 1];
1341 dmabuf = &s->dmabuf;
1342 dmabuf->fmt = state->dmabuf.fmt;
1343 dmabuf->rate = state->dmabuf.rate;
1344 }
1345
1346 spin_lock_irqsave(&s->card->lock, flags);
1347 dmabuf->hwptr = dmabuf->swptr = dmabuf->total_bytes = 0;
1348 dmabuf->count = dmabuf->error = 0;
1349 spin_unlock_irqrestore(&s->card->lock, flags);
1350
1351 /* allocate DMA buffer if not allocated yet */
1352 if (!dmabuf->rawbuf) {
1353 if (i == 0) {
1354 if ((ret = alloc_main_dmabuf(state))) {
1355 unlock_set_fmt(state);
1356 return ret;
1357 }
1358 } else {
1359 ret = -ENOMEM;
1360 order = state->dmabuf.buforder - 1;
1361 if (order >= DMABUF_MINORDER) {
1362 ret = alloc_dmabuf(dmabuf,
1363 state->card->pci_dev,
1364 order);
1365 }
1366 if (ret) {
1367 /* release the main DMA buffer */
1368 dealloc_dmabuf(&state->dmabuf, state->card->pci_dev);
1369 /* release the auxiliary DMA buffers */
1370 for (i -= 2; i >= 0; i--)
1371 dealloc_dmabuf(&state->other_states[i]->dmabuf,
1372 state->card->pci_dev);
1373 unlock_set_fmt(state);
1374 return ret;
1375 }
1376 }
1377 }
1378 /* FIXME: figure out all this OSS fragment stuff */
1379 bytepersec = dmabuf->rate << sample_shift[dmabuf->fmt];
1380 bufsize = PAGE_SIZE << dmabuf->buforder;
1381 if (dmabuf->ossfragshift) {
1382 if ((1000 << dmabuf->ossfragshift) < bytepersec)
1383 dmabuf->fragshift = ld2(bytepersec / 1000);
1384 else
1385 dmabuf->fragshift = dmabuf->ossfragshift;
1386 } else {
1387 /* lets hand out reasonable big ass buffers by default */
1388 dmabuf->fragshift = (dmabuf->buforder + PAGE_SHIFT - 2);
1389 }
1390 dmabuf->numfrag = bufsize >> dmabuf->fragshift;
1391 while (dmabuf->numfrag < 4 && dmabuf->fragshift > 3) {
1392 dmabuf->fragshift--;
1393 dmabuf->numfrag = bufsize >> dmabuf->fragshift;
1394 }
1395 dmabuf->fragsize = 1 << dmabuf->fragshift;
1396 if (dmabuf->ossmaxfrags >= 4 && dmabuf->ossmaxfrags < dmabuf->numfrag)
1397 dmabuf->numfrag = dmabuf->ossmaxfrags;
1398 dmabuf->fragsamples = dmabuf->fragsize >> sample_shift[dmabuf->fmt];
1399 dmabuf->dmasize = dmabuf->numfrag << dmabuf->fragshift;
1400
1401 memset(dmabuf->rawbuf, (dmabuf->fmt & TRIDENT_FMT_16BIT) ? 0 : 0x80,
1402 dmabuf->dmasize);
1403
1404 spin_lock_irqsave(&s->card->lock, flags);
1405 if (rec == DM_RECORD)
1406 trident_rec_setup(s);
1407 else /* DM_PLAYBACK */
1408 trident_play_setup(s);
1409
1410 spin_unlock_irqrestore(&s->card->lock, flags);
1411
1412 /* set the ready flag for the dma buffer */
1413 dmabuf->ready = 1;
1414
1415 pr_debug("trident: prog_dmabuf(%d), sample rate = %d, "
1416 "format = %d, numfrag = %d, fragsize = %d "
1417 "dmasize = %d\n", dmabuf->channel->num,
1418 dmabuf->rate, dmabuf->fmt, dmabuf->numfrag,
1419 dmabuf->fragsize, dmabuf->dmasize);
1420 }
1421 unlock_set_fmt(state);
1422 return 0;
1423}
1424
1425
1426static inline int prog_dmabuf_record(struct trident_state* state)
1427{
1428 return prog_dmabuf(state, DM_RECORD);
1429}
1430
1431static inline int prog_dmabuf_playback(struct trident_state* state)
1432{
1433 return prog_dmabuf(state, DM_PLAYBACK);
1434}
1435
1436/* we are doing quantum mechanics here, the buffer can only be empty, half or full filled i.e.
1437 |------------|------------| or |xxxxxxxxxxxx|------------| or |xxxxxxxxxxxx|xxxxxxxxxxxx|
1438 but we almost always get this
1439 |xxxxxx------|------------| or |xxxxxxxxxxxx|xxxxx-------|
1440 so we have to clear the tail space to "silence"
1441 |xxxxxx000000|------------| or |xxxxxxxxxxxx|xxxxxx000000|
1442*/
1443static void
1444trident_clear_tail(struct trident_state *state)
1445{
1446 struct dmabuf *dmabuf = &state->dmabuf;
1447 unsigned swptr;
1448 unsigned char silence = (dmabuf->fmt & TRIDENT_FMT_16BIT) ? 0 : 0x80;
1449 unsigned int len;
1450 unsigned long flags;
1451
1452 spin_lock_irqsave(&state->card->lock, flags);
1453 swptr = dmabuf->swptr;
1454 spin_unlock_irqrestore(&state->card->lock, flags);
1455
1456 if (swptr == 0 || swptr == dmabuf->dmasize / 2 ||
1457 swptr == dmabuf->dmasize)
1458 return;
1459
1460 if (swptr < dmabuf->dmasize / 2)
1461 len = dmabuf->dmasize / 2 - swptr;
1462 else
1463 len = dmabuf->dmasize - swptr;
1464
1465 memset(dmabuf->rawbuf + swptr, silence, len);
1466 if (state->card->pci_id != PCI_DEVICE_ID_ALI_5451) {
1467 spin_lock_irqsave(&state->card->lock, flags);
1468 dmabuf->swptr += len;
1469 dmabuf->count += len;
1470 spin_unlock_irqrestore(&state->card->lock, flags);
1471 }
1472
1473 /* restart the dma machine in case it is halted */
1474 start_dac(state);
1475}
1476
1477static int
1478drain_dac(struct trident_state *state, int nonblock)
1479{
1480 DECLARE_WAITQUEUE(wait, current);
1481 struct dmabuf *dmabuf = &state->dmabuf;
1482 unsigned long flags;
1483 unsigned long tmo;
1484 int count;
1485 unsigned long diff = 0;
1486
1487 if (dmabuf->mapped || !dmabuf->ready)
1488 return 0;
1489
1490 add_wait_queue(&dmabuf->wait, &wait);
1491 for (;;) {
1492 /* It seems that we have to set the current state to TASK_INTERRUPTIBLE
1493 every time to make the process really go to sleep */
1494 set_current_state(TASK_INTERRUPTIBLE);
1495
1496 spin_lock_irqsave(&state->card->lock, flags);
1497 count = dmabuf->count;
1498 spin_unlock_irqrestore(&state->card->lock, flags);
1499
1500 if (count <= 0)
1501 break;
1502
1503 if (signal_pending(current))
1504 break;
1505
1506 if (nonblock) {
1507 remove_wait_queue(&dmabuf->wait, &wait);
1508 set_current_state(TASK_RUNNING);
1509 return -EBUSY;
1510 }
1511
1512 /* No matter how much data is left in the buffer, we have to wait until
1513 CSO == ESO/2 or CSO == ESO when address engine interrupts */
1514 if (state->card->pci_id == PCI_DEVICE_ID_ALI_5451 ||
1515 state->card->pci_id == PCI_DEVICE_ID_INTERG_5050) {
1516 diff = dmabuf->swptr - trident_get_dma_addr(state) + dmabuf->dmasize;
1517 diff = diff % (dmabuf->dmasize);
1518 tmo = (diff * HZ) / dmabuf->rate;
1519 } else {
1520 tmo = (dmabuf->dmasize * HZ) / dmabuf->rate;
1521 }
1522 tmo >>= sample_shift[dmabuf->fmt];
1523 if (!schedule_timeout(tmo ? tmo : 1) && tmo) {
1524 break;
1525 }
1526 }
1527 remove_wait_queue(&dmabuf->wait, &wait);
1528 set_current_state(TASK_RUNNING);
1529 if (signal_pending(current))
1530 return -ERESTARTSYS;
1531
1532 return 0;
1533}
1534
1535/* update buffer manangement pointers, especially, */
1536/* dmabuf->count and dmabuf->hwptr */
1537static void
1538trident_update_ptr(struct trident_state *state)
1539{
1540 struct dmabuf *dmabuf = &state->dmabuf;
1541 unsigned hwptr, swptr;
1542 int clear_cnt = 0;
1543 int diff;
1544 unsigned char silence;
1545 unsigned half_dmasize;
1546
1547 /* update hardware pointer */
1548 hwptr = trident_get_dma_addr(state);
1549 diff = (dmabuf->dmasize + hwptr - dmabuf->hwptr) % dmabuf->dmasize;
1550 dmabuf->hwptr = hwptr;
1551 dmabuf->total_bytes += diff;
1552
1553 /* error handling and process wake up for ADC */
1554 if (dmabuf->enable == ADC_RUNNING) {
1555 if (dmabuf->mapped) {
1556 dmabuf->count -= diff;
1557 if (dmabuf->count >= (signed) dmabuf->fragsize)
1558 wake_up(&dmabuf->wait);
1559 } else {
1560 dmabuf->count += diff;
1561
1562 if (dmabuf->count < 0 ||
1563 dmabuf->count > dmabuf->dmasize) {
1564 /* buffer underrun or buffer overrun, */
1565 /* we have no way to recover it here, just */
1566 /* stop the machine and let the process */
1567 /* force hwptr and swptr to sync */
1568 __stop_adc(state);
1569 dmabuf->error++;
1570 }
1571 if (dmabuf->count < (signed) dmabuf->dmasize / 2)
1572 wake_up(&dmabuf->wait);
1573 }
1574 }
1575
1576 /* error handling and process wake up for DAC */
1577 if (dmabuf->enable == DAC_RUNNING) {
1578 if (dmabuf->mapped) {
1579 dmabuf->count += diff;
1580 if (dmabuf->count >= (signed) dmabuf->fragsize)
1581 wake_up(&dmabuf->wait);
1582 } else {
1583 dmabuf->count -= diff;
1584
1585 if (dmabuf->count < 0 ||
1586 dmabuf->count > dmabuf->dmasize) {
1587 /* buffer underrun or buffer overrun, we have no way to recover
1588 it here, just stop the machine and let the process force hwptr
1589 and swptr to sync */
1590 __stop_dac(state);
1591 dmabuf->error++;
1592 } else if (!dmabuf->endcleared) {
1593 swptr = dmabuf->swptr;
1594 silence = (dmabuf->fmt & TRIDENT_FMT_16BIT ? 0 : 0x80);
1595 if (dmabuf->update_flag & ALI_ADDRESS_INT_UPDATE) {
1596 /* We must clear end data of 1/2 dmabuf if needed.
1597 According to 1/2 algorithm of Address Engine Interrupt,
1598 check the validation of the data of half dmasize. */
1599 half_dmasize = dmabuf->dmasize / 2;
1600 if ((diff = hwptr - half_dmasize) < 0)
1601 diff = hwptr;
1602 if ((dmabuf->count + diff) < half_dmasize) {
1603 //there is invalid data in the end of half buffer
1604 if ((clear_cnt = half_dmasize - swptr) < 0)
1605 clear_cnt += half_dmasize;
1606 //clear the invalid data
1607 memset(dmabuf->rawbuf + swptr, silence, clear_cnt);
1608 if (state->chans_num == 6) {
1609 clear_cnt = clear_cnt / 2;
1610 swptr = swptr / 2;
1611 memset(state->other_states[0]->dmabuf.rawbuf + swptr,
1612 silence, clear_cnt);
1613 memset(state->other_states[1]->dmabuf.rawbuf + swptr,
1614 silence, clear_cnt);
1615 memset(state->other_states[2]->dmabuf.rawbuf + swptr,
1616 silence, clear_cnt);
1617 memset(state->other_states[3]->dmabuf.rawbuf + swptr,
1618 silence, clear_cnt);
1619 }
1620 dmabuf->endcleared = 1;
1621 }
1622 } else if (dmabuf->count < (signed) dmabuf->fragsize) {
1623 clear_cnt = dmabuf->fragsize;
1624 if ((swptr + clear_cnt) > dmabuf->dmasize)
1625 clear_cnt = dmabuf->dmasize - swptr;
1626 memset(dmabuf->rawbuf + swptr, silence, clear_cnt);
1627 if (state->chans_num == 6) {
1628 clear_cnt = clear_cnt / 2;
1629 swptr = swptr / 2;
1630 memset(state->other_states[0]->dmabuf.rawbuf + swptr,
1631 silence, clear_cnt);
1632 memset(state->other_states[1]->dmabuf.rawbuf + swptr,
1633 silence, clear_cnt);
1634 memset(state->other_states[2]->dmabuf.rawbuf + swptr,
1635 silence, clear_cnt);
1636 memset(state->other_states[3]->dmabuf.rawbuf + swptr,
1637 silence, clear_cnt);
1638 }
1639 dmabuf->endcleared = 1;
1640 }
1641 }
1642 /* trident_update_ptr is called by interrupt handler or by process via
1643 ioctl/poll, we only wake up the waiting process when we have more
1644 than 1/2 buffer free (always true for interrupt handler) */
1645 if (dmabuf->count < (signed) dmabuf->dmasize / 2)
1646 wake_up(&dmabuf->wait);
1647 }
1648 }
1649 dmabuf->update_flag &= ~ALI_ADDRESS_INT_UPDATE;
1650}
1651
1652static void
1653trident_address_interrupt(struct trident_card *card)
1654{
1655 int i;
1656 struct trident_state *state;
1657 unsigned int channel;
1658
1659 /* Update the pointers for all channels we are running. */
1660 /* FIXME: should read interrupt status only once */
1661 for (i = 0; i < NR_HW_CH; i++) {
1662 channel = 63 - i;
1663 if (trident_check_channel_interrupt(card, channel)) {
1664 trident_ack_channel_interrupt(card, channel);
1665 if ((state = card->states[i]) != NULL) {
1666 trident_update_ptr(state);
1667 } else {
1668 printk(KERN_WARNING "trident: spurious channel "
1669 "irq %d.\n", channel);
1670 trident_stop_voice(card, channel);
1671 trident_disable_voice_irq(card, channel);
1672 }
1673 }
1674 }
1675}
1676
1677static void
1678ali_hwvol_control(struct trident_card *card, int opt)
1679{
1680 u16 dwTemp, volume[2], mute, diff, *pVol[2];
1681
1682 dwTemp = ali_ac97_read(card->ac97_codec[0], 0x02);
1683 mute = dwTemp & 0x8000;
1684 volume[0] = dwTemp & 0x001f;
1685 volume[1] = (dwTemp & 0x1f00) >> 8;
1686 if (volume[0] < volume[1]) {
1687 pVol[0] = &volume[0];
1688 pVol[1] = &volume[1];
1689 } else {
1690 pVol[1] = &volume[0];
1691 pVol[0] = &volume[1];
1692 }
1693 diff = *(pVol[1]) - *(pVol[0]);
1694
1695 if (opt == 1) { // MUTE
1696 dwTemp ^= 0x8000;
1697 ali_ac97_write(card->ac97_codec[0],
1698 0x02, dwTemp);
1699 } else if (opt == 2) { // Down
1700 if (mute)
1701 return;
1702 if (*(pVol[1]) < 0x001f) {
1703 (*pVol[1])++;
1704 *(pVol[0]) = *(pVol[1]) - diff;
1705 }
1706 dwTemp &= 0xe0e0;
1707 dwTemp |= (volume[0]) | (volume[1] << 8);
1708 ali_ac97_write(card->ac97_codec[0], 0x02, dwTemp);
1709 card->ac97_codec[0]->mixer_state[0] = ((32 - volume[0]) * 25 / 8) |
1710 (((32 - volume[1]) * 25 / 8) << 8);
1711 } else if (opt == 4) { // Up
1712 if (mute)
1713 return;
1714 if (*(pVol[0]) > 0) {
1715 (*pVol[0])--;
1716 *(pVol[1]) = *(pVol[0]) + diff;
1717 }
1718 dwTemp &= 0xe0e0;
1719 dwTemp |= (volume[0]) | (volume[1] << 8);
1720 ali_ac97_write(card->ac97_codec[0], 0x02, dwTemp);
1721 card->ac97_codec[0]->mixer_state[0] = ((32 - volume[0]) * 25 / 8) |
1722 (((32 - volume[1]) * 25 / 8) << 8);
1723 } else {
1724 /* Nothing needs doing */
1725 }
1726}
1727
1728/*
1729 * Re-enable reporting of vol change after 0.1 seconds
1730 */
1731
1732static void
1733ali_timeout(unsigned long ptr)
1734{
1735 struct trident_card *card = (struct trident_card *) ptr;
1736 u16 temp = 0;
1737
1738 /* Enable GPIO IRQ (MISCINT bit 18h) */
1739 temp = inw(TRID_REG(card, T4D_MISCINT + 2));
1740 temp |= 0x0004;
1741 outw(temp, TRID_REG(card, T4D_MISCINT + 2));
1742}
1743
1744/*
1745 * Set up the timer to clear the vol change notification
1746 */
1747
1748static void
1749ali_set_timer(struct trident_card *card)
1750{
1751 /* Add Timer Routine to Enable GPIO IRQ */
1752 del_timer(&card->timer); /* Never queue twice */
1753 card->timer.function = ali_timeout;
1754 card->timer.data = (unsigned long) card;
1755 card->timer.expires = jiffies + HZ / 10;
1756 add_timer(&card->timer);
1757}
1758
1759/*
1760 * Process a GPIO event
1761 */
1762
1763static void
1764ali_queue_task(struct trident_card *card, int opt)
1765{
1766 u16 temp;
1767
1768 /* Disable GPIO IRQ (MISCINT bit 18h) */
1769 temp = inw(TRID_REG(card, T4D_MISCINT + 2));
1770 temp &= (u16) (~0x0004);
1771 outw(temp, TRID_REG(card, T4D_MISCINT + 2));
1772
1773 /* Adjust the volume */
1774 ali_hwvol_control(card, opt);
1775
1776 /* Set the timer for 1/10th sec */
1777 ali_set_timer(card);
1778}
1779
1780static void
1781cyber_address_interrupt(struct trident_card *card)
1782{
1783 int i, irq_status;
1784 struct trident_state *state;
1785 unsigned int channel;
1786
1787 /* Update the pointers for all channels we are running. */
1788 /* FIXED: read interrupt status only once */
1789 irq_status = inl(TRID_REG(card, T4D_AINT_A));
1790
1791 pr_debug("cyber_address_interrupt: irq_status 0x%X\n", irq_status);
1792
1793 for (i = 0; i < NR_HW_CH; i++) {
1794 channel = 31 - i;
1795 if (irq_status & (1 << channel)) {
1796 /* clear bit by writing a 1, zeroes are ignored */
1797 outl((1 << channel), TRID_REG(card, T4D_AINT_A));
1798
1799 pr_debug("cyber_interrupt: channel %d\n", channel);
1800
1801 if ((state = card->states[i]) != NULL) {
1802 trident_update_ptr(state);
1803 } else {
1804 printk(KERN_WARNING "cyber5050: spurious "
1805 "channel irq %d.\n", channel);
1806 trident_stop_voice(card, channel);
1807 trident_disable_voice_irq(card, channel);
1808 }
1809 }
1810 }
1811}
1812
1813static irqreturn_t
1814trident_interrupt(int irq, void *dev_id)
1815{
1816 struct trident_card *card = (struct trident_card *) dev_id;
1817 u32 event;
1818 u32 gpio;
1819
1820 spin_lock(&card->lock);
1821 event = inl(TRID_REG(card, T4D_MISCINT));
1822
1823 pr_debug("trident: trident_interrupt called, MISCINT = 0x%08x\n",
1824 event);
1825
1826 if (event & ADDRESS_IRQ) {
1827 card->address_interrupt(card);
1828 }
1829
1830 if (card->pci_id == PCI_DEVICE_ID_ALI_5451) {
1831 /* GPIO IRQ (H/W Volume Control) */
1832 event = inl(TRID_REG(card, T4D_MISCINT));
1833 if (event & (1 << 25)) {
1834 gpio = inl(TRID_REG(card, ALI_GPIO));
1835 if (!timer_pending(&card->timer))
1836 ali_queue_task(card, gpio & 0x07);
1837 }
1838 event = inl(TRID_REG(card, T4D_MISCINT));
1839 outl(event | (ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW),
1840 TRID_REG(card, T4D_MISCINT));
1841 spin_unlock(&card->lock);
1842 return IRQ_HANDLED;
1843 }
1844
1845 /* manually clear interrupt status, bad hardware design, blame T^2 */
1846 outl((ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW),
1847 TRID_REG(card, T4D_MISCINT));
1848 spin_unlock(&card->lock);
1849 return IRQ_HANDLED;
1850}
1851
1852/* in this loop, dmabuf.count signifies the amount of data that is waiting */
1853/* to be copied to the user's buffer. it is filled by the dma machine and */
1854/* drained by this loop. */
1855static ssize_t
1856trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
1857{
1858 struct trident_state *state = (struct trident_state *)file->private_data;
1859 struct dmabuf *dmabuf = &state->dmabuf;
1860 ssize_t ret = 0;
1861 unsigned long flags;
1862 unsigned swptr;
1863 int cnt;
1864
1865 pr_debug("trident: trident_read called, count = %zd\n", count);
1866
1867 VALIDATE_STATE(state);
1868
1869 if (dmabuf->mapped)
1870 return -ENXIO;
1871 if (!access_ok(VERIFY_WRITE, buffer, count))
1872 return -EFAULT;
1873
1874 mutex_lock(&state->sem);
1875 if (!dmabuf->ready && (ret = prog_dmabuf_record(state)))
1876 goto out;
1877
1878 while (count > 0) {
1879 spin_lock_irqsave(&state->card->lock, flags);
1880 if (dmabuf->count > (signed) dmabuf->dmasize) {
1881 /* buffer overrun, we are recovering from */
1882 /* sleep_on_timeout, resync hwptr and swptr, */
1883 /* make process flush the buffer */
1884 dmabuf->count = dmabuf->dmasize;
1885 dmabuf->swptr = dmabuf->hwptr;
1886 }
1887 swptr = dmabuf->swptr;
1888 cnt = dmabuf->dmasize - swptr;
1889 if (dmabuf->count < cnt)
1890 cnt = dmabuf->count;
1891 spin_unlock_irqrestore(&state->card->lock, flags);
1892
1893 if (cnt > count)
1894 cnt = count;
1895 if (cnt <= 0) {
1896 unsigned long tmo;
1897 /* buffer is empty, start the dma machine and */
1898 /* wait for data to be recorded */
1899 start_adc(state);
1900 if (file->f_flags & O_NONBLOCK) {
1901 if (!ret)
1902 ret = -EAGAIN;
1903 goto out;
1904 }
1905
1906 mutex_unlock(&state->sem);
1907 /* No matter how much space left in the buffer, */
1908 /* we have to wait until CSO == ESO/2 or CSO == ESO */
1909 /* when address engine interrupts */
1910 tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2);
1911 tmo >>= sample_shift[dmabuf->fmt];
1912 /* There are two situations when sleep_on_timeout returns, one is when
1913 the interrupt is serviced correctly and the process is waked up by
1914 ISR ON TIME. Another is when timeout is expired, which means that
1915 either interrupt is NOT serviced correctly (pending interrupt) or it
1916 is TOO LATE for the process to be scheduled to run (scheduler latency)
1917 which results in a (potential) buffer overrun. And worse, there is
1918 NOTHING we can do to prevent it. */
1919 if (!interruptible_sleep_on_timeout(&dmabuf->wait, tmo)) {
1920 pr_debug(KERN_ERR "trident: recording schedule timeout, "
1921 "dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
1922 dmabuf->dmasize, dmabuf->fragsize, dmabuf->count,
1923 dmabuf->hwptr, dmabuf->swptr);
1924
1925 /* a buffer overrun, we delay the recovery until next time the
1926 while loop begin and we REALLY have space to record */
1927 }
1928 if (signal_pending(current)) {
1929 if (!ret)
1930 ret = -ERESTARTSYS;
1931 goto out;
1932 }
1933 mutex_lock(&state->sem);
1934 if (dmabuf->mapped) {
1935 if (!ret)
1936 ret = -ENXIO;
1937 goto out;
1938 }
1939 continue;
1940 }
1941
1942 if (copy_to_user(buffer, dmabuf->rawbuf + swptr, cnt)) {
1943 if (!ret)
1944 ret = -EFAULT;
1945 goto out;
1946 }
1947
1948 swptr = (swptr + cnt) % dmabuf->dmasize;
1949
1950 spin_lock_irqsave(&state->card->lock, flags);
1951 dmabuf->swptr = swptr;
1952 dmabuf->count -= cnt;
1953 spin_unlock_irqrestore(&state->card->lock, flags);
1954
1955 count -= cnt;
1956 buffer += cnt;
1957 ret += cnt;
1958 start_adc(state);
1959 }
1960out:
1961 mutex_unlock(&state->sem);
1962 return ret;
1963}
1964
1965/* in this loop, dmabuf.count signifies the amount of data that is waiting to be dma to
1966 the soundcard. it is drained by the dma machine and filled by this loop. */
1967
1968static ssize_t
1969trident_write(struct file *file, const char __user *buffer, size_t count, loff_t * ppos)
1970{
1971 struct trident_state *state = (struct trident_state *)file->private_data;
1972 struct dmabuf *dmabuf = &state->dmabuf;
1973 ssize_t ret;
1974 unsigned long flags;
1975 unsigned swptr;
1976 int cnt;
1977 unsigned int state_cnt;
1978 unsigned int copy_count;
1979 int lret; /* for lock_set_fmt */
1980
1981 pr_debug("trident: trident_write called, count = %zd\n", count);
1982
1983 VALIDATE_STATE(state);
1984
1985 /*
1986 * Guard against an mmap or ioctl while writing
1987 */
1988
1989 mutex_lock(&state->sem);
1990
1991 if (dmabuf->mapped) {
1992 ret = -ENXIO;
1993 goto out;
1994 }
1995 if (!dmabuf->ready && (ret = prog_dmabuf_playback(state)))
1996 goto out;
1997
1998 if (!access_ok(VERIFY_READ, buffer, count)) {
1999 ret = -EFAULT;
2000 goto out;
2001 }
2002
2003 ret = 0;
2004
2005 while (count > 0) {
2006 spin_lock_irqsave(&state->card->lock, flags);
2007 if (dmabuf->count < 0) {
2008 /* buffer underrun, we are recovering from */
2009 /* sleep_on_timeout, resync hwptr and swptr */
2010 dmabuf->count = 0;
2011 dmabuf->swptr = dmabuf->hwptr;
2012 }
2013 swptr = dmabuf->swptr;
2014 cnt = dmabuf->dmasize - swptr;
2015 if (dmabuf->count + cnt > dmabuf->dmasize)
2016 cnt = dmabuf->dmasize - dmabuf->count;
2017 spin_unlock_irqrestore(&state->card->lock, flags);
2018
2019 if (cnt > count)
2020 cnt = count;
2021 if (cnt <= 0) {
2022 unsigned long tmo;
2023 /* buffer is full, start the dma machine and */
2024 /* wait for data to be played */
2025 start_dac(state);
2026 if (file->f_flags & O_NONBLOCK) {
2027 if (!ret)
2028 ret = -EAGAIN;
2029 goto out;
2030 }
2031 /* No matter how much data left in the buffer, */
2032 /* we have to wait until CSO == ESO/2 or CSO == ESO */
2033 /* when address engine interrupts */
2034 lock_set_fmt(state);
2035 tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2);
2036 tmo >>= sample_shift[dmabuf->fmt];
2037 unlock_set_fmt(state);
2038 mutex_unlock(&state->sem);
2039
2040 /* There are two situations when sleep_on_timeout */
2041 /* returns, one is when the interrupt is serviced */
2042 /* correctly and the process is waked up by ISR */
2043 /* ON TIME. Another is when timeout is expired, which */
2044 /* means that either interrupt is NOT serviced */
2045 /* correctly (pending interrupt) or it is TOO LATE */
2046 /* for the process to be scheduled to run */
2047 /* (scheduler latency) which results in a (potential) */
2048 /* buffer underrun. And worse, there is NOTHING we */
2049 /* can do to prevent it. */
2050 if (!interruptible_sleep_on_timeout(&dmabuf->wait, tmo)) {
2051 pr_debug(KERN_ERR "trident: playback schedule "
2052 "timeout, dmasz %u fragsz %u count %i "
2053 "hwptr %u swptr %u\n", dmabuf->dmasize,
2054 dmabuf->fragsize, dmabuf->count,
2055 dmabuf->hwptr, dmabuf->swptr);
2056
2057 /* a buffer underrun, we delay the recovery */
2058 /* until next time the while loop begin and */
2059 /* we REALLY have data to play */
2060 }
2061 if (signal_pending(current)) {
2062 if (!ret)
2063 ret = -ERESTARTSYS;
2064 goto out_nolock;
2065 }
2066 mutex_lock(&state->sem);
2067 if (dmabuf->mapped) {
2068 if (!ret)
2069 ret = -ENXIO;
2070 goto out;
2071 }
2072 continue;
2073 }
2074 if ((lret = lock_set_fmt(state)) < 0) {
2075 ret = lret;
2076 goto out;
2077 }
2078
2079 if (state->chans_num == 6) {
2080 copy_count = 0;
2081 state_cnt = 0;
2082 if (ali_write_5_1(state, buffer, cnt, &copy_count,
2083 &state_cnt) == -EFAULT) {
2084 if (state_cnt) {
2085 swptr = (swptr + state_cnt) % dmabuf->dmasize;
2086 spin_lock_irqsave(&state->card->lock, flags);
2087 dmabuf->swptr = swptr;
2088 dmabuf->count += state_cnt;
2089 dmabuf->endcleared = 0;
2090 spin_unlock_irqrestore(&state->card->lock, flags);
2091 }
2092 ret += copy_count;
2093 if (!ret)
2094 ret = -EFAULT;
2095 unlock_set_fmt(state);
2096 goto out;
2097 }
2098 } else {
2099 if (copy_from_user(dmabuf->rawbuf + swptr,
2100 buffer, cnt)) {
2101 if (!ret)
2102 ret = -EFAULT;
2103 unlock_set_fmt(state);
2104 goto out;
2105 }
2106 state_cnt = cnt;
2107 }
2108 unlock_set_fmt(state);
2109
2110 swptr = (swptr + state_cnt) % dmabuf->dmasize;
2111
2112 spin_lock_irqsave(&state->card->lock, flags);
2113 dmabuf->swptr = swptr;
2114 dmabuf->count += state_cnt;
2115 dmabuf->endcleared = 0;
2116 spin_unlock_irqrestore(&state->card->lock, flags);
2117
2118 count -= cnt;
2119 buffer += cnt;
2120 ret += cnt;
2121 start_dac(state);
2122 }
2123out:
2124 mutex_unlock(&state->sem);
2125out_nolock:
2126 return ret;
2127}
2128
2129/* No kernel lock - we have our own spinlock */
2130static unsigned int
2131trident_poll(struct file *file, struct poll_table_struct *wait)
2132{
2133 struct trident_state *state = (struct trident_state *)file->private_data;
2134 struct dmabuf *dmabuf = &state->dmabuf;
2135 unsigned long flags;
2136 unsigned int mask = 0;
2137
2138 VALIDATE_STATE(state);
2139
2140 /*
2141 * Guard against a parallel poll and write causing multiple
2142 * prog_dmabuf events
2143 */
2144
2145 mutex_lock(&state->sem);
2146
2147 if (file->f_mode & FMODE_WRITE) {
2148 if (!dmabuf->ready && prog_dmabuf_playback(state)) {
2149 mutex_unlock(&state->sem);
2150 return 0;
2151 }
2152 poll_wait(file, &dmabuf->wait, wait);
2153 }
2154 if (file->f_mode & FMODE_READ) {
2155 if (!dmabuf->ready && prog_dmabuf_record(state)) {
2156 mutex_unlock(&state->sem);
2157 return 0;
2158 }
2159 poll_wait(file, &dmabuf->wait, wait);
2160 }
2161
2162 mutex_unlock(&state->sem);
2163
2164 spin_lock_irqsave(&state->card->lock, flags);
2165 trident_update_ptr(state);
2166 if (file->f_mode & FMODE_READ) {
2167 if (dmabuf->count >= (signed) dmabuf->fragsize)
2168 mask |= POLLIN | POLLRDNORM;
2169 }
2170 if (file->f_mode & FMODE_WRITE) {
2171 if (dmabuf->mapped) {
2172 if (dmabuf->count >= (signed) dmabuf->fragsize)
2173 mask |= POLLOUT | POLLWRNORM;
2174 } else {
2175 if ((signed) dmabuf->dmasize >= dmabuf->count +
2176 (signed) dmabuf->fragsize)
2177 mask |= POLLOUT | POLLWRNORM;
2178 }
2179 }
2180 spin_unlock_irqrestore(&state->card->lock, flags);
2181
2182 return mask;
2183}
2184
2185static int
2186trident_mmap(struct file *file, struct vm_area_struct *vma)
2187{
2188 struct trident_state *state = (struct trident_state *)file->private_data;
2189 struct dmabuf *dmabuf = &state->dmabuf;
2190 int ret = -EINVAL;
2191 unsigned long size;
2192
2193 VALIDATE_STATE(state);
2194
2195 /*
2196 * Lock against poll read write or mmap creating buffers. Also lock
2197 * a read or write against an mmap.
2198 */
2199
2200 mutex_lock(&state->sem);
2201
2202 if (vma->vm_flags & VM_WRITE) {
2203 if ((ret = prog_dmabuf_playback(state)) != 0)
2204 goto out;
2205 } else if (vma->vm_flags & VM_READ) {
2206 if ((ret = prog_dmabuf_record(state)) != 0)
2207 goto out;
2208 } else
2209 goto out;
2210
2211 ret = -EINVAL;
2212 if (vma->vm_pgoff != 0)
2213 goto out;
2214 size = vma->vm_end - vma->vm_start;
2215 if (size > (PAGE_SIZE << dmabuf->buforder))
2216 goto out;
2217 ret = -EAGAIN;
2218 if (remap_pfn_range(vma, vma->vm_start,
2219 virt_to_phys(dmabuf->rawbuf) >> PAGE_SHIFT,
2220 size, vma->vm_page_prot))
2221 goto out;
2222 dmabuf->mapped = 1;
2223 ret = 0;
2224out:
2225 mutex_unlock(&state->sem);
2226 return ret;
2227}
2228
2229static int
2230trident_ioctl(struct inode *inode, struct file *file,
2231 unsigned int cmd, unsigned long arg)
2232{
2233 struct trident_state *state = (struct trident_state *)file->private_data;
2234 struct dmabuf *dmabuf = &state->dmabuf;
2235 unsigned long flags;
2236 audio_buf_info abinfo;
2237 count_info cinfo;
2238 int val, mapped, ret = 0;
2239 struct trident_card *card = state->card;
2240 void __user *argp = (void __user *)arg;
2241 int __user *p = argp;
2242
2243 VALIDATE_STATE(state);
2244
2245
2246 mapped = ((file->f_mode & (FMODE_WRITE | FMODE_READ)) && dmabuf->mapped);
2247
2248 pr_debug("trident: trident_ioctl, command = %2d, arg = 0x%08x\n",
2249 _IOC_NR(cmd), arg ? *p : 0);
2250
2251 switch (cmd) {
2252 case OSS_GETVERSION:
2253 ret = put_user(SOUND_VERSION, p);
2254 break;
2255
2256 case SNDCTL_DSP_RESET:
2257 /* FIXME: spin_lock ? */
2258 if (file->f_mode & FMODE_WRITE) {
2259 stop_dac(state);
2260 synchronize_irq(card->irq);
2261 dmabuf->ready = 0;
2262 dmabuf->swptr = dmabuf->hwptr = 0;
2263 dmabuf->count = dmabuf->total_bytes = 0;
2264 }
2265 if (file->f_mode & FMODE_READ) {
2266 stop_adc(state);
2267 synchronize_irq(card->irq);
2268 dmabuf->ready = 0;
2269 dmabuf->swptr = dmabuf->hwptr = 0;
2270 dmabuf->count = dmabuf->total_bytes = 0;
2271 }
2272 break;
2273
2274 case SNDCTL_DSP_SYNC:
2275 if (file->f_mode & FMODE_WRITE)
2276 ret = drain_dac(state, file->f_flags & O_NONBLOCK);
2277 break;
2278
2279 case SNDCTL_DSP_SPEED: /* set smaple rate */
2280 if (get_user(val, p)) {
2281 ret = -EFAULT;
2282 break;
2283 }
2284 if (val >= 0) {
2285 if (file->f_mode & FMODE_WRITE) {
2286 stop_dac(state);
2287 dmabuf->ready = 0;
2288 spin_lock_irqsave(&state->card->lock, flags);
2289 trident_set_dac_rate(state, val);
2290 spin_unlock_irqrestore(&state->card->lock, flags);
2291 }
2292 if (file->f_mode & FMODE_READ) {
2293 stop_adc(state);
2294 dmabuf->ready = 0;
2295 spin_lock_irqsave(&state->card->lock, flags);
2296 trident_set_adc_rate(state, val);
2297 spin_unlock_irqrestore(&state->card->lock, flags);
2298 }
2299 }
2300 ret = put_user(dmabuf->rate, p);
2301 break;
2302
2303 case SNDCTL_DSP_STEREO: /* set stereo or mono channel */
2304 if (get_user(val, p)) {
2305 ret = -EFAULT;
2306 break;
2307 }
2308 if ((ret = lock_set_fmt(state)) < 0)
2309 return ret;
2310
2311 if (file->f_mode & FMODE_WRITE) {
2312 stop_dac(state);
2313 dmabuf->ready = 0;
2314 if (val)
2315 dmabuf->fmt |= TRIDENT_FMT_STEREO;
2316 else
2317 dmabuf->fmt &= ~TRIDENT_FMT_STEREO;
2318 }
2319 if (file->f_mode & FMODE_READ) {
2320 stop_adc(state);
2321 dmabuf->ready = 0;
2322 if (val)
2323 dmabuf->fmt |= TRIDENT_FMT_STEREO;
2324 else
2325 dmabuf->fmt &= ~TRIDENT_FMT_STEREO;
2326 }
2327 unlock_set_fmt(state);
2328 break;
2329
2330 case SNDCTL_DSP_GETBLKSIZE:
2331 if (file->f_mode & FMODE_WRITE) {
2332 if ((val = prog_dmabuf_playback(state)))
2333 ret = val;
2334 else
2335 ret = put_user(dmabuf->fragsize, p);
2336 break;
2337 }
2338 if (file->f_mode & FMODE_READ) {
2339 if ((val = prog_dmabuf_record(state)))
2340 ret = val;
2341 else
2342 ret = put_user(dmabuf->fragsize, p);
2343 break;
2344 }
2345 /* neither READ nor WRITE? is this even possible? */
2346 ret = -EINVAL;
2347 break;
2348
2349
2350 case SNDCTL_DSP_GETFMTS: /* Returns a mask of supported sample format */
2351 ret = put_user(AFMT_S16_LE | AFMT_U16_LE | AFMT_S8 |
2352 AFMT_U8, p);
2353 break;
2354
2355 case SNDCTL_DSP_SETFMT: /* Select sample format */
2356 if (get_user(val, p)) {
2357 ret = -EFAULT;
2358 break;
2359 }
2360 if ((ret = lock_set_fmt(state)) < 0)
2361 return ret;
2362
2363 if (val != AFMT_QUERY) {
2364 if (file->f_mode & FMODE_WRITE) {
2365 stop_dac(state);
2366 dmabuf->ready = 0;
2367 if (val == AFMT_S16_LE)
2368 dmabuf->fmt |= TRIDENT_FMT_16BIT;
2369 else
2370 dmabuf->fmt &= ~TRIDENT_FMT_16BIT;
2371 }
2372 if (file->f_mode & FMODE_READ) {
2373 stop_adc(state);
2374 dmabuf->ready = 0;
2375 if (val == AFMT_S16_LE)
2376 dmabuf->fmt |= TRIDENT_FMT_16BIT;
2377 else
2378 dmabuf->fmt &= ~TRIDENT_FMT_16BIT;
2379 }
2380 }
2381 unlock_set_fmt(state);
2382 ret = put_user((dmabuf->fmt & TRIDENT_FMT_16BIT) ? AFMT_S16_LE :
2383 AFMT_U8, p);
2384 break;
2385
2386 case SNDCTL_DSP_CHANNELS:
2387 if (get_user(val, p)) {
2388 ret = -EFAULT;
2389 break;
2390 }
2391 if (val != 0) {
2392 if ((ret = lock_set_fmt(state)) < 0)
2393 return ret;
2394
2395 if (file->f_mode & FMODE_WRITE) {
2396 stop_dac(state);
2397 dmabuf->ready = 0;
2398
2399 //prevent from memory leak
2400 if ((state->chans_num > 2) && (state->chans_num != val)) {
2401 ali_free_other_states_resources(state);
2402 state->chans_num = 1;
2403 }
2404
2405 if (val >= 2) {
2406
2407 dmabuf->fmt |= TRIDENT_FMT_STEREO;
2408 if ((val == 6) && (state->card->pci_id == PCI_DEVICE_ID_ALI_5451)) {
2409 if (card->rec_channel_use_count > 0) {
2410 printk(KERN_ERR "trident: Record is "
2411 "working on the card!\n");
2412 ret = -EBUSY;
2413 unlock_set_fmt(state);
2414 break;
2415 }
2416
2417 ret = ali_setup_multi_channels(state->card, 6);
2418 if (ret < 0) {
2419 unlock_set_fmt(state);
2420 break;
2421 }
2422 mutex_lock(&state->card->open_mutex);
2423 ret = ali_allocate_other_states_resources(state, 6);
2424 if (ret < 0) {
2425 mutex_unlock(&state->card->open_mutex);
2426 unlock_set_fmt(state);
2427 break;
2428 }
2429 state->card->multi_channel_use_count++;
2430 mutex_unlock(&state->card->open_mutex);
2431 } else
2432 val = 2; /*yield to 2-channels */
2433 } else
2434 dmabuf->fmt &= ~TRIDENT_FMT_STEREO;
2435 state->chans_num = val;
2436 }
2437 if (file->f_mode & FMODE_READ) {
2438 stop_adc(state);
2439 dmabuf->ready = 0;
2440 if (val >= 2) {
2441 if (!((file->f_mode & FMODE_WRITE) &&
2442 (val == 6)))
2443 val = 2;
2444 dmabuf->fmt |= TRIDENT_FMT_STEREO;
2445 } else
2446 dmabuf->fmt &= ~TRIDENT_FMT_STEREO;
2447 state->chans_num = val;
2448 }
2449 unlock_set_fmt(state);
2450 }
2451 ret = put_user(val, p);
2452 break;
2453
2454 case SNDCTL_DSP_POST:
2455 /* Cause the working fragment to be output */
2456 break;
2457
2458 case SNDCTL_DSP_SUBDIVIDE:
2459 if (dmabuf->subdivision) {
2460 ret = -EINVAL;
2461 break;
2462 }
2463 if (get_user(val, p)) {
2464 ret = -EFAULT;
2465 break;
2466 }
2467 if (val != 1 && val != 2 && val != 4) {
2468 ret = -EINVAL;
2469 break;
2470 }
2471 dmabuf->subdivision = val;
2472 break;
2473
2474 case SNDCTL_DSP_SETFRAGMENT:
2475 if (get_user(val, p)) {
2476 ret = -EFAULT;
2477 break;
2478 }
2479
2480 dmabuf->ossfragshift = val & 0xffff;
2481 dmabuf->ossmaxfrags = (val >> 16) & 0xffff;
2482 if (dmabuf->ossfragshift < 4)
2483 dmabuf->ossfragshift = 4;
2484 if (dmabuf->ossfragshift > 15)
2485 dmabuf->ossfragshift = 15;
2486 if (dmabuf->ossmaxfrags < 4)
2487 dmabuf->ossmaxfrags = 4;
2488
2489 break;
2490
2491 case SNDCTL_DSP_GETOSPACE:
2492 if (!(file->f_mode & FMODE_WRITE)) {
2493 ret = -EINVAL;
2494 break;
2495 }
2496 if (!dmabuf->ready && (val = prog_dmabuf_playback(state)) != 0) {
2497 ret = val;
2498 break;
2499 }
2500 spin_lock_irqsave(&state->card->lock, flags);
2501 trident_update_ptr(state);
2502 abinfo.fragsize = dmabuf->fragsize;
2503 abinfo.bytes = dmabuf->dmasize - dmabuf->count;
2504 abinfo.fragstotal = dmabuf->numfrag;
2505 abinfo.fragments = abinfo.bytes >> dmabuf->fragshift;
2506 spin_unlock_irqrestore(&state->card->lock, flags);
2507 ret = copy_to_user(argp, &abinfo, sizeof (abinfo)) ?
2508 -EFAULT : 0;
2509 break;
2510
2511 case SNDCTL_DSP_GETISPACE:
2512 if (!(file->f_mode & FMODE_READ)) {
2513 ret = -EINVAL;
2514 break;
2515 }
2516 if (!dmabuf->ready && (val = prog_dmabuf_record(state)) != 0) {
2517 ret = val;
2518 break;
2519 }
2520 spin_lock_irqsave(&state->card->lock, flags);
2521 trident_update_ptr(state);
2522 abinfo.fragsize = dmabuf->fragsize;
2523 abinfo.bytes = dmabuf->count;
2524 abinfo.fragstotal = dmabuf->numfrag;
2525 abinfo.fragments = abinfo.bytes >> dmabuf->fragshift;
2526 spin_unlock_irqrestore(&state->card->lock, flags);
2527 ret = copy_to_user(argp, &abinfo, sizeof (abinfo)) ?
2528 -EFAULT : 0;
2529 break;
2530
2531 case SNDCTL_DSP_NONBLOCK:
2532 file->f_flags |= O_NONBLOCK;
2533 break;
2534
2535 case SNDCTL_DSP_GETCAPS:
2536 ret = put_user(DSP_CAP_REALTIME | DSP_CAP_TRIGGER |
2537 DSP_CAP_MMAP | DSP_CAP_BIND, p);
2538 break;
2539
2540 case SNDCTL_DSP_GETTRIGGER:
2541 val = 0;
2542 if ((file->f_mode & FMODE_READ) && dmabuf->enable)
2543 val |= PCM_ENABLE_INPUT;
2544 if ((file->f_mode & FMODE_WRITE) && dmabuf->enable)
2545 val |= PCM_ENABLE_OUTPUT;
2546 ret = put_user(val, p);
2547 break;
2548
2549 case SNDCTL_DSP_SETTRIGGER:
2550 if (get_user(val, p)) {
2551 ret = -EFAULT;
2552 break;
2553 }
2554 if (file->f_mode & FMODE_READ) {
2555 if (val & PCM_ENABLE_INPUT) {
2556 if (!dmabuf->ready &&
2557 (ret = prog_dmabuf_record(state)))
2558 break;
2559 start_adc(state);
2560 } else
2561 stop_adc(state);
2562 }
2563 if (file->f_mode & FMODE_WRITE) {
2564 if (val & PCM_ENABLE_OUTPUT) {
2565 if (!dmabuf->ready &&
2566 (ret = prog_dmabuf_playback(state)))
2567 break;
2568 start_dac(state);
2569 } else
2570 stop_dac(state);
2571 }
2572 break;
2573
2574 case SNDCTL_DSP_GETIPTR:
2575 if (!(file->f_mode & FMODE_READ)) {
2576 ret = -EINVAL;
2577 break;
2578 }
2579 if (!dmabuf->ready && (val = prog_dmabuf_record(state))
2580 != 0) {
2581 ret = val;
2582 break;
2583 }
2584 spin_lock_irqsave(&state->card->lock, flags);
2585 trident_update_ptr(state);
2586 cinfo.bytes = dmabuf->total_bytes;
2587 cinfo.blocks = dmabuf->count >> dmabuf->fragshift;
2588 cinfo.ptr = dmabuf->hwptr;
2589 if (dmabuf->mapped)
2590 dmabuf->count &= dmabuf->fragsize - 1;
2591 spin_unlock_irqrestore(&state->card->lock, flags);
2592 ret = copy_to_user(argp, &cinfo, sizeof (cinfo)) ?
2593 -EFAULT : 0;
2594 break;
2595
2596 case SNDCTL_DSP_GETOPTR:
2597 if (!(file->f_mode & FMODE_WRITE)) {
2598 ret = -EINVAL;
2599 break;
2600 }
2601 if (!dmabuf->ready && (val = prog_dmabuf_playback(state))
2602 != 0) {
2603 ret = val;
2604 break;
2605 }
2606
2607 spin_lock_irqsave(&state->card->lock, flags);
2608 trident_update_ptr(state);
2609 cinfo.bytes = dmabuf->total_bytes;
2610 cinfo.blocks = dmabuf->count >> dmabuf->fragshift;
2611 cinfo.ptr = dmabuf->hwptr;
2612 if (dmabuf->mapped)
2613 dmabuf->count &= dmabuf->fragsize - 1;
2614 spin_unlock_irqrestore(&state->card->lock, flags);
2615 ret = copy_to_user(argp, &cinfo, sizeof (cinfo)) ?
2616 -EFAULT : 0;
2617 break;
2618
2619 case SNDCTL_DSP_SETDUPLEX:
2620 ret = -EINVAL;
2621 break;
2622
2623 case SNDCTL_DSP_GETODELAY:
2624 if (!(file->f_mode & FMODE_WRITE)) {
2625 ret = -EINVAL;
2626 break;
2627 }
2628 if (!dmabuf->ready && (val = prog_dmabuf_playback(state)) != 0) {
2629 ret = val;
2630 break;
2631 }
2632 spin_lock_irqsave(&state->card->lock, flags);
2633 trident_update_ptr(state);
2634 val = dmabuf->count;
2635 spin_unlock_irqrestore(&state->card->lock, flags);
2636 ret = put_user(val, p);
2637 break;
2638
2639 case SOUND_PCM_READ_RATE:
2640 ret = put_user(dmabuf->rate, p);
2641 break;
2642
2643 case SOUND_PCM_READ_CHANNELS:
2644 ret = put_user((dmabuf->fmt & TRIDENT_FMT_STEREO) ? 2 : 1,
2645 p);
2646 break;
2647
2648 case SOUND_PCM_READ_BITS:
2649 ret = put_user((dmabuf->fmt & TRIDENT_FMT_16BIT) ? AFMT_S16_LE :
2650 AFMT_U8, p);
2651 break;
2652
2653 case SNDCTL_DSP_GETCHANNELMASK:
2654 ret = put_user(DSP_BIND_FRONT | DSP_BIND_SURR |
2655 DSP_BIND_CENTER_LFE, p);
2656 break;
2657
2658 case SNDCTL_DSP_BIND_CHANNEL:
2659 if (state->card->pci_id != PCI_DEVICE_ID_SI_7018) {
2660 ret = -EINVAL;
2661 break;
2662 }
2663
2664 if (get_user(val, p)) {
2665 ret = -EFAULT;
2666 break;
2667 }
2668 if (val == DSP_BIND_QUERY) {
2669 val = dmabuf->channel->attribute | 0x3c00;
2670 val = attr2mask[val >> 8];
2671 } else {
2672 dmabuf->ready = 0;
2673 if (file->f_mode & FMODE_READ)
2674 dmabuf->channel->attribute = (CHANNEL_REC |
2675 SRC_ENABLE);
2676 if (file->f_mode & FMODE_WRITE)
2677 dmabuf->channel->attribute = (CHANNEL_SPC_PB |
2678 SRC_ENABLE);
2679 dmabuf->channel->attribute |= mask2attr[ffs(val)];
2680 }
2681 ret = put_user(val, p);
2682 break;
2683
2684 case SNDCTL_DSP_MAPINBUF:
2685 case SNDCTL_DSP_MAPOUTBUF:
2686 case SNDCTL_DSP_SETSYNCRO:
2687 case SOUND_PCM_WRITE_FILTER:
2688 case SOUND_PCM_READ_FILTER:
2689 default:
2690 ret = -EINVAL;
2691 break;
2692
2693 }
2694 return ret;
2695}
2696
2697static int
2698trident_open(struct inode *inode, struct file *file)
2699{
2700 int i = 0;
2701 int minor = iminor(inode);
2702 struct trident_card *card = devs;
2703 struct trident_state *state = NULL;
2704 struct dmabuf *dmabuf = NULL;
2705 unsigned long flags;
2706
2707 /* Added by Matt Wu 01-05-2001 */
2708 /* TODO: there's some redundacy here wrt the check below */
2709 /* for multi_use_count > 0. Should we return -EBUSY or find */
2710 /* a different card? for now, don't break current behaviour */
2711 /* -- mulix */
2712 if (file->f_mode & FMODE_READ) {
2713 if (card->pci_id == PCI_DEVICE_ID_ALI_5451) {
2714 if (card->multi_channel_use_count > 0)
2715 return -EBUSY;
2716 }
2717 }
2718
2719 /* find an available virtual channel (instance of /dev/dsp) */
2720 while (card != NULL) {
2721 mutex_lock(&card->open_mutex);
2722 if (file->f_mode & FMODE_READ) {
2723 /* Skip opens on cards that are in 6 channel mode */
2724 if (card->multi_channel_use_count > 0) {
2725 mutex_unlock(&card->open_mutex);
2726 card = card->next;
2727 continue;
2728 }
2729 }
2730 for (i = 0; i < NR_HW_CH; i++) {
2731 if (card->states[i] == NULL) {
2732 state = card->states[i] = kzalloc(sizeof(*state), GFP_KERNEL);
2733 if (state == NULL) {
2734 mutex_unlock(&card->open_mutex);
2735 return -ENOMEM;
2736 }
2737 mutex_init(&state->sem);
2738 dmabuf = &state->dmabuf;
2739 goto found_virt;
2740 }
2741 }
2742 mutex_unlock(&card->open_mutex);
2743 card = card->next;
2744 }
2745 /* no more virtual channel avaiable */
2746 if (!state) {
2747 return -ENODEV;
2748 }
2749 found_virt:
2750 /* found a free virtual channel, allocate hardware channels */
2751 if (file->f_mode & FMODE_READ)
2752 dmabuf->channel = card->alloc_rec_pcm_channel(card);
2753 else
2754 dmabuf->channel = card->alloc_pcm_channel(card);
2755
2756 if (dmabuf->channel == NULL) {
2757 kfree(card->states[i]);
2758 card->states[i] = NULL;
2759 return -ENODEV;
2760 }
2761
2762 /* initialize the virtual channel */
2763 state->virt = i;
2764 state->card = card;
2765 state->magic = TRIDENT_STATE_MAGIC;
2766 init_waitqueue_head(&dmabuf->wait);
2767 file->private_data = state;
2768
2769 /* set default sample format. According to OSS Programmer's */
2770 /* Guide /dev/dsp should be default to unsigned 8-bits, mono, */
2771 /* with sample rate 8kHz and /dev/dspW will accept 16-bits sample */
2772 if (file->f_mode & FMODE_WRITE) {
2773 dmabuf->fmt &= ~TRIDENT_FMT_MASK;
2774 if ((minor & 0x0f) == SND_DEV_DSP16)
2775 dmabuf->fmt |= TRIDENT_FMT_16BIT;
2776 dmabuf->ossfragshift = 0;
2777 dmabuf->ossmaxfrags = 0;
2778 dmabuf->subdivision = 0;
2779 if (card->pci_id == PCI_DEVICE_ID_SI_7018) {
2780 /* set default channel attribute to normal playback */
2781 dmabuf->channel->attribute = CHANNEL_PB;
2782 }
2783 spin_lock_irqsave(&card->lock, flags);
2784 trident_set_dac_rate(state, 8000);
2785 spin_unlock_irqrestore(&card->lock, flags);
2786 }
2787
2788 if (file->f_mode & FMODE_READ) {
2789 /* FIXME: Trident 4d can only record in signed 16-bits stereo, */
2790 /* 48kHz sample, to be dealed with in trident_set_adc_rate() ?? */
2791 dmabuf->fmt &= ~TRIDENT_FMT_MASK;
2792 if ((minor & 0x0f) == SND_DEV_DSP16)
2793 dmabuf->fmt |= TRIDENT_FMT_16BIT;
2794 dmabuf->ossfragshift = 0;
2795 dmabuf->ossmaxfrags = 0;
2796 dmabuf->subdivision = 0;
2797 if (card->pci_id == PCI_DEVICE_ID_SI_7018) {
2798 /* set default channel attribute to 0x8a80, record from
2799 PCM L/R FIFO and mono = (left + right + 1)/2 */
2800 dmabuf->channel->attribute = (CHANNEL_REC | PCM_LR |
2801 MONO_MIX);
2802 }
2803 spin_lock_irqsave(&card->lock, flags);
2804 trident_set_adc_rate(state, 8000);
2805 spin_unlock_irqrestore(&card->lock, flags);
2806
2807 /* Added by Matt Wu 01-05-2001 */
2808 if (card->pci_id == PCI_DEVICE_ID_ALI_5451)
2809 card->rec_channel_use_count++;
2810 }
2811
2812 state->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
2813 mutex_unlock(&card->open_mutex);
2814
2815 pr_debug("trident: open virtual channel %d, hard channel %d\n",
2816 state->virt, dmabuf->channel->num);
2817
2818 return nonseekable_open(inode, file);
2819}
2820
2821static int
2822trident_release(struct inode *inode, struct file *file)
2823{
2824 struct trident_state *state = (struct trident_state *)file->private_data;
2825 struct trident_card *card;
2826 struct dmabuf *dmabuf;
2827
2828 VALIDATE_STATE(state);
2829
2830 card = state->card;
2831 dmabuf = &state->dmabuf;
2832
2833 if (file->f_mode & FMODE_WRITE) {
2834 trident_clear_tail(state);
2835 drain_dac(state, file->f_flags & O_NONBLOCK);
2836 }
2837
2838 pr_debug("trident: closing virtual channel %d, hard channel %d\n",
2839 state->virt, dmabuf->channel->num);
2840
2841 /* stop DMA state machine and free DMA buffers/channels */
2842 mutex_lock(&card->open_mutex);
2843
2844 if (file->f_mode & FMODE_WRITE) {
2845 stop_dac(state);
2846 dealloc_dmabuf(&state->dmabuf, state->card->pci_dev);
2847 state->card->free_pcm_channel(state->card, dmabuf->channel->num);
2848
2849 /* Added by Matt Wu */
2850 if (card->pci_id == PCI_DEVICE_ID_ALI_5451) {
2851 if (state->chans_num > 2) {
2852 if (card->multi_channel_use_count-- < 0)
2853 card->multi_channel_use_count = 0;
2854 if (card->multi_channel_use_count == 0)
2855 ali_close_multi_channels();
2856 ali_free_other_states_resources(state);
2857 }
2858 }
2859 }
2860 if (file->f_mode & FMODE_READ) {
2861 stop_adc(state);
2862 dealloc_dmabuf(&state->dmabuf, state->card->pci_dev);
2863 state->card->free_pcm_channel(state->card, dmabuf->channel->num);
2864
2865 /* Added by Matt Wu */
2866 if (card->pci_id == PCI_DEVICE_ID_ALI_5451) {
2867 if (card->rec_channel_use_count-- < 0)
2868 card->rec_channel_use_count = 0;
2869 }
2870 }
2871
2872 card->states[state->virt] = NULL;
2873 kfree(state);
2874
2875 /* we're covered by the open_mutex */
2876 mutex_unlock(&card->open_mutex);
2877
2878 return 0;
2879}
2880
2881static const struct file_operations trident_audio_fops = {
2882 .owner = THIS_MODULE,
2883 .llseek = no_llseek,
2884 .read = trident_read,
2885 .write = trident_write,
2886 .poll = trident_poll,
2887 .ioctl = trident_ioctl,
2888 .mmap = trident_mmap,
2889 .open = trident_open,
2890 .release = trident_release,
2891};
2892
2893/* trident specific AC97 functions */
2894/* Write AC97 codec registers */
2895static void
2896trident_ac97_set(struct ac97_codec *codec, u8 reg, u16 val)
2897{
2898 struct trident_card *card = (struct trident_card *)codec->private_data;
2899 unsigned int address, mask, busy;
2900 unsigned short count = 0xffff;
2901 unsigned long flags;
2902 u32 data;
2903
2904 data = ((u32) val) << 16;
2905
2906 switch (card->pci_id) {
2907 default:
2908 case PCI_DEVICE_ID_SI_7018:
2909 address = SI_AC97_WRITE;
2910 mask = SI_AC97_BUSY_WRITE | SI_AC97_AUDIO_BUSY;
2911 if (codec->id)
2912 mask |= SI_AC97_SECONDARY;
2913 busy = SI_AC97_BUSY_WRITE;
2914 break;
2915 case PCI_DEVICE_ID_TRIDENT_4DWAVE_DX:
2916 address = DX_ACR0_AC97_W;
2917 mask = busy = DX_AC97_BUSY_WRITE;
2918 break;
2919 case PCI_DEVICE_ID_TRIDENT_4DWAVE_NX:
2920 address = NX_ACR1_AC97_W;
2921 mask = NX_AC97_BUSY_WRITE;
2922 if (codec->id)
2923 mask |= NX_AC97_WRITE_SECONDARY;
2924 busy = NX_AC97_BUSY_WRITE;
2925 break;
2926 case PCI_DEVICE_ID_INTERG_5050:
2927 address = SI_AC97_WRITE;
2928 mask = busy = SI_AC97_BUSY_WRITE;
2929 if (codec->id)
2930 mask |= SI_AC97_SECONDARY;
2931 break;
2932 }
2933
2934 spin_lock_irqsave(&card->lock, flags);
2935 do {
2936 if ((inw(TRID_REG(card, address)) & busy) == 0)
2937 break;
2938 } while (--count);
2939
2940 data |= (mask | (reg & AC97_REG_ADDR));
2941
2942 if (count == 0) {
2943 printk(KERN_ERR "trident: AC97 CODEC write timed out.\n");
2944 spin_unlock_irqrestore(&card->lock, flags);
2945 return;
2946 }
2947
2948 outl(data, TRID_REG(card, address));
2949 spin_unlock_irqrestore(&card->lock, flags);
2950}
2951
2952/* Read AC97 codec registers */
2953static u16
2954trident_ac97_get(struct ac97_codec *codec, u8 reg)
2955{
2956 struct trident_card *card = (struct trident_card *)codec->private_data;
2957 unsigned int address, mask, busy;
2958 unsigned short count = 0xffff;
2959 unsigned long flags;
2960 u32 data;
2961
2962 switch (card->pci_id) {
2963 default:
2964 case PCI_DEVICE_ID_SI_7018:
2965 address = SI_AC97_READ;
2966 mask = SI_AC97_BUSY_READ | SI_AC97_AUDIO_BUSY;
2967 if (codec->id)
2968 mask |= SI_AC97_SECONDARY;
2969 busy = SI_AC97_BUSY_READ;
2970 break;
2971 case PCI_DEVICE_ID_TRIDENT_4DWAVE_DX:
2972 address = DX_ACR1_AC97_R;
2973 mask = busy = DX_AC97_BUSY_READ;
2974 break;
2975 case PCI_DEVICE_ID_TRIDENT_4DWAVE_NX:
2976 if (codec->id)
2977 address = NX_ACR3_AC97_R_SECONDARY;
2978 else
2979 address = NX_ACR2_AC97_R_PRIMARY;
2980 mask = NX_AC97_BUSY_READ;
2981 busy = NX_AC97_BUSY_READ | NX_AC97_BUSY_DATA;
2982 break;
2983 case PCI_DEVICE_ID_INTERG_5050:
2984 address = SI_AC97_READ;
2985 mask = busy = SI_AC97_BUSY_READ;
2986 if (codec->id)
2987 mask |= SI_AC97_SECONDARY;
2988 break;
2989 }
2990
2991 data = (mask | (reg & AC97_REG_ADDR));
2992
2993 spin_lock_irqsave(&card->lock, flags);
2994 outl(data, TRID_REG(card, address));
2995 do {
2996 data = inl(TRID_REG(card, address));
2997 if ((data & busy) == 0)
2998 break;
2999 } while (--count);
3000 spin_unlock_irqrestore(&card->lock, flags);
3001
3002 if (count == 0) {
3003 printk(KERN_ERR "trident: AC97 CODEC read timed out.\n");
3004 data = 0;
3005 }
3006 return ((u16) (data >> 16));
3007}
3008
3009/* rewrite ac97 read and write mixer register by hulei for ALI*/
3010static int
3011acquirecodecaccess(struct trident_card *card)
3012{
3013 u16 wsemamask = 0x6000; /* bit 14..13 */
3014 u16 wsemabits;
3015 u16 wcontrol;
3016 int block = 0;
3017 int ncount = 25;
3018 while (1) {
3019 wcontrol = inw(TRID_REG(card, ALI_AC97_WRITE));
3020 wsemabits = wcontrol & wsemamask;
3021
3022 if (wsemabits == 0x4000)
3023 return 1; /* 0x4000 is audio ,then success */
3024 if (ncount-- < 0)
3025 break;
3026 if (wsemabits == 0) {
3027 unlock:
3028 outl(((u32) (wcontrol & 0x1eff) | 0x00004000),
3029 TRID_REG(card, ALI_AC97_WRITE));
3030 continue;
3031 }
3032 udelay(20);
3033 }
3034 if (!block) {
3035 pr_debug("accesscodecsemaphore: try unlock\n");
3036 block = 1;
3037 goto unlock;
3038 }
3039 return 0;
3040}
3041
3042static void
3043releasecodecaccess(struct trident_card *card)
3044{
3045 unsigned long wcontrol;
3046 wcontrol = inl(TRID_REG(card, ALI_AC97_WRITE));
3047 outl((wcontrol & 0xffff1eff), TRID_REG(card, ALI_AC97_WRITE));
3048}
3049
3050static int
3051waitforstimertick(struct trident_card *card)
3052{
3053 unsigned long chk1, chk2;
3054 unsigned int wcount = 0xffff;
3055 chk1 = inl(TRID_REG(card, ALI_STIMER));
3056
3057 while (1) {
3058 chk2 = inl(TRID_REG(card, ALI_STIMER));
3059 if ((wcount > 0) && chk1 != chk2)
3060 return 1;
3061 if (wcount <= 0)
3062 break;
3063 udelay(50);
3064 }
3065 return 0;
3066}
3067
3068/* Read AC97 codec registers for ALi*/
3069static u16
3070ali_ac97_get(struct trident_card *card, int secondary, u8 reg)
3071{
3072 unsigned int address, mask;
3073 unsigned int ncount;
3074 unsigned long aud_reg;
3075 u32 data;
3076 u16 wcontrol;
3077 unsigned long flags;
3078
3079 BUG_ON(!card);
3080
3081 address = ALI_AC97_READ;
3082 if (card->revision == ALI_5451_V02) {
3083 address = ALI_AC97_WRITE;
3084 }
3085 mask = ALI_AC97_READ_ACTION | ALI_AC97_AUDIO_BUSY;
3086 if (secondary)
3087 mask |= ALI_AC97_SECONDARY;
3088
3089 spin_lock_irqsave(&card->lock, flags);
3090
3091 if (!acquirecodecaccess(card))
3092 printk(KERN_ERR "access codec fail\n");
3093
3094 wcontrol = inw(TRID_REG(card, ALI_AC97_WRITE));
3095 wcontrol &= 0xfe00;
3096 wcontrol |= (0x8000 | reg);
3097 outw(wcontrol, TRID_REG(card, ALI_AC97_WRITE));
3098
3099 data = (mask | (reg & AC97_REG_ADDR));
3100
3101 if (!waitforstimertick(card)) {
3102 printk(KERN_ERR "ali_ac97_read: BIT_CLOCK is dead\n");
3103 goto releasecodec;
3104 }
3105
3106 udelay(20);
3107
3108 ncount = 10;
3109
3110 while (1) {
3111 if ((inw(TRID_REG(card, ALI_AC97_WRITE)) & ALI_AC97_BUSY_READ)
3112 != 0)
3113 break;
3114 if (ncount <= 0)
3115 break;
3116 if (ncount-- == 1) {
3117 pr_debug("ali_ac97_read :try clear busy flag\n");
3118 aud_reg = inl(TRID_REG(card, ALI_AC97_WRITE));
3119 outl((aud_reg & 0xffff7fff),
3120 TRID_REG(card, ALI_AC97_WRITE));
3121 }
3122 udelay(10);
3123 }
3124
3125 data = inl(TRID_REG(card, address));
3126
3127 spin_unlock_irqrestore(&card->lock, flags);
3128
3129 return ((u16) (data >> 16));
3130
3131 releasecodec:
3132 releasecodecaccess(card);
3133 spin_unlock_irqrestore(&card->lock, flags);
3134 printk(KERN_ERR "ali_ac97_read: AC97 CODEC read timed out.\n");
3135 return 0;
3136}
3137
3138/* Write AC97 codec registers for hulei*/
3139static void
3140ali_ac97_set(struct trident_card *card, int secondary, u8 reg, u16 val)
3141{
3142 unsigned int address, mask;
3143 unsigned int ncount;
3144 u32 data;
3145 u16 wcontrol;
3146 unsigned long flags;
3147
3148 data = ((u32) val) << 16;
3149
3150 BUG_ON(!card);
3151
3152 address = ALI_AC97_WRITE;
3153 mask = ALI_AC97_WRITE_ACTION | ALI_AC97_AUDIO_BUSY;
3154 if (secondary)
3155 mask |= ALI_AC97_SECONDARY;
3156 if (card->revision == ALI_5451_V02)
3157 mask |= ALI_AC97_WRITE_MIXER_REGISTER;
3158
3159 spin_lock_irqsave(&card->lock, flags);
3160 if (!acquirecodecaccess(card))
3161 printk(KERN_ERR "ali_ac97_write: access codec fail\n");
3162
3163 wcontrol = inw(TRID_REG(card, ALI_AC97_WRITE));
3164 wcontrol &= 0xff00;
3165 wcontrol |= (0x8100 | reg); /* bit 8=1: (ali1535 )reserved/ */
3166 /* ali1535+ write */
3167 outl((data | wcontrol), TRID_REG(card, ALI_AC97_WRITE));
3168
3169 if (!waitforstimertick(card)) {
3170 printk(KERN_ERR "BIT_CLOCK is dead\n");
3171 goto releasecodec;
3172 }
3173
3174 ncount = 10;
3175 while (1) {
3176 wcontrol = inw(TRID_REG(card, ALI_AC97_WRITE));
3177 if (!(wcontrol & 0x8000))
3178 break;
3179 if (ncount <= 0)
3180 break;
3181 if (ncount-- == 1) {
3182 pr_debug("ali_ac97_set :try clear busy flag!!\n");
3183 outw(wcontrol & 0x7fff,
3184 TRID_REG(card, ALI_AC97_WRITE));
3185 }
3186 udelay(10);
3187 }
3188
3189 releasecodec:
3190 releasecodecaccess(card);
3191 spin_unlock_irqrestore(&card->lock, flags);
3192 return;
3193}
3194
3195static void
3196ali_enable_special_channel(struct trident_state *stat)
3197{
3198 struct trident_card *card = stat->card;
3199 unsigned long s_channels;
3200
3201 s_channels = inl(TRID_REG(card, ALI_GLOBAL_CONTROL));
3202 s_channels |= (1 << stat->dmabuf.channel->num);
3203 outl(s_channels, TRID_REG(card, ALI_GLOBAL_CONTROL));
3204}
3205
3206static u16
3207ali_ac97_read(struct ac97_codec *codec, u8 reg)
3208{
3209 int id;
3210 u16 data;
3211 struct trident_card *card = NULL;
3212
3213 /* Added by Matt Wu */
3214 BUG_ON(!codec);
3215
3216 card = (struct trident_card *) codec->private_data;
3217
3218 if (!card->mixer_regs_ready)
3219 return ali_ac97_get(card, codec->id, reg);
3220
3221 /*
3222 * FIXME: need to stop this caching some registers
3223 */
3224 if (codec->id)
3225 id = 1;
3226 else
3227 id = 0;
3228
3229 data = card->mixer_regs[reg / 2][id];
3230 return data;
3231}
3232
3233static void
3234ali_ac97_write(struct ac97_codec *codec, u8 reg, u16 val)
3235{
3236 int id;
3237 struct trident_card *card;
3238
3239 /* Added by Matt Wu */
3240 BUG_ON(!codec);
3241
3242 card = (struct trident_card *) codec->private_data;
3243
3244 if (!card->mixer_regs_ready) {
3245 ali_ac97_set(card, codec->id, reg, val);
3246 return;
3247 }
3248
3249 if (codec->id)
3250 id = 1;
3251 else
3252 id = 0;
3253
3254 card->mixer_regs[reg / 2][id] = val;
3255 ali_ac97_set(card, codec->id, reg, val);
3256}
3257
3258/*
3259flag: ALI_SPDIF_OUT_TO_SPDIF_OUT
3260 ALI_PCM_TO_SPDIF_OUT
3261*/
3262
3263static void
3264ali_setup_spdif_out(struct trident_card *card, int flag)
3265{
3266 unsigned long spdif;
3267 unsigned char ch;
3268
3269 char temp;
3270 struct pci_dev *pci_dev = NULL;
3271
3272 pci_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533,
3273 pci_dev);
3274 if (pci_dev == NULL)
3275 return;
3276 pci_read_config_byte(pci_dev, 0x61, &temp);
3277 temp |= 0x40;
3278 pci_write_config_byte(pci_dev, 0x61, temp);
3279 pci_read_config_byte(pci_dev, 0x7d, &temp);
3280 temp |= 0x01;
3281 pci_write_config_byte(pci_dev, 0x7d, temp);
3282 pci_read_config_byte(pci_dev, 0x7e, &temp);
3283 temp &= (~0x20);
3284 temp |= 0x10;
3285 pci_write_config_byte(pci_dev, 0x7e, temp);
3286
3287 pci_dev_put(pci_dev);
3288
3289 ch = inb(TRID_REG(card, ALI_SCTRL));
3290 outb(ch | ALI_SPDIF_OUT_ENABLE, TRID_REG(card, ALI_SCTRL));
3291 ch = inb(TRID_REG(card, ALI_SPDIF_CTRL));
3292 outb(ch & ALI_SPDIF_OUT_CH_STATUS, TRID_REG(card, ALI_SPDIF_CTRL));
3293
3294 if (flag & ALI_SPDIF_OUT_TO_SPDIF_OUT) {
3295 spdif = inw(TRID_REG(card, ALI_GLOBAL_CONTROL));
3296 spdif |= ALI_SPDIF_OUT_CH_ENABLE;
3297 spdif &= ALI_SPDIF_OUT_SEL_SPDIF;
3298 outw(spdif, TRID_REG(card, ALI_GLOBAL_CONTROL));
3299 spdif = inw(TRID_REG(card, ALI_SPDIF_CS));
3300 if (flag & ALI_SPDIF_OUT_NON_PCM)
3301 spdif |= 0x0002;
3302 else
3303 spdif &= (~0x0002);
3304 outw(spdif, TRID_REG(card, ALI_SPDIF_CS));
3305 } else {
3306 spdif = inw(TRID_REG(card, ALI_GLOBAL_CONTROL));
3307 spdif |= ALI_SPDIF_OUT_SEL_PCM;
3308 outw(spdif, TRID_REG(card, ALI_GLOBAL_CONTROL));
3309 }
3310}
3311
3312static void
3313ali_disable_special_channel(struct trident_card *card, int ch)
3314{
3315 unsigned long sc;
3316
3317 sc = inl(TRID_REG(card, ALI_GLOBAL_CONTROL));
3318 sc &= ~(1 << ch);
3319 outl(sc, TRID_REG(card, ALI_GLOBAL_CONTROL));
3320}
3321
3322static void
3323ali_disable_spdif_in(struct trident_card *card)
3324{
3325 unsigned long spdif;
3326
3327 spdif = inl(TRID_REG(card, ALI_GLOBAL_CONTROL));
3328 spdif &= (~ALI_SPDIF_IN_SUPPORT);
3329 outl(spdif, TRID_REG(card, ALI_GLOBAL_CONTROL));
3330
3331 ali_disable_special_channel(card, ALI_SPDIF_IN_CHANNEL);
3332}
3333
3334static void
3335ali_setup_spdif_in(struct trident_card *card)
3336{
3337 unsigned long spdif;
3338
3339 //Set SPDIF IN Supported
3340 spdif = inl(TRID_REG(card, ALI_GLOBAL_CONTROL));
3341 spdif |= ALI_SPDIF_IN_SUPPORT;
3342 outl(spdif, TRID_REG(card, ALI_GLOBAL_CONTROL));
3343
3344 //Set SPDIF IN Rec
3345 spdif = inl(TRID_REG(card, ALI_GLOBAL_CONTROL));
3346 spdif |= ALI_SPDIF_IN_CH_ENABLE;
3347 outl(spdif, TRID_REG(card, ALI_GLOBAL_CONTROL));
3348
3349 spdif = inb(TRID_REG(card, ALI_SPDIF_CTRL));
3350 spdif |= ALI_SPDIF_IN_CH_STATUS;
3351 outb(spdif, TRID_REG(card, ALI_SPDIF_CTRL));
3352/*
3353 spdif = inb(TRID_REG(card, ALI_SPDIF_CTRL));
3354 spdif |= ALI_SPDIF_IN_FUNC_ENABLE;
3355 outb(spdif, TRID_REG(card, ALI_SPDIF_CTRL));
3356*/
3357}
3358
3359static void
3360ali_delay(struct trident_card *card, int interval)
3361{
3362 unsigned long begintimer, currenttimer;
3363
3364 begintimer = inl(TRID_REG(card, ALI_STIMER));
3365 currenttimer = inl(TRID_REG(card, ALI_STIMER));
3366
3367 while (currenttimer < begintimer + interval)
3368 currenttimer = inl(TRID_REG(card, ALI_STIMER));
3369}
3370
3371static void
3372ali_detect_spdif_rate(struct trident_card *card)
3373{
3374 u16 wval = 0;
3375 u16 count = 0;
3376 u8 bval = 0, R1 = 0, R2 = 0;
3377
3378 bval = inb(TRID_REG(card, ALI_SPDIF_CTRL));
3379 bval |= 0x02;
3380 outb(bval, TRID_REG(card, ALI_SPDIF_CTRL));
3381
3382 bval = inb(TRID_REG(card, ALI_SPDIF_CTRL + 1));
3383 bval |= 0x1F;
3384 outb(bval, TRID_REG(card, ALI_SPDIF_CTRL + 1));
3385
3386 while (((R1 < 0x0B) || (R1 > 0x0E)) && (R1 != 0x12) &&
3387 count <= 50000) {
3388 count++;
3389
3390 ali_delay(card, 6);
3391
3392 bval = inb(TRID_REG(card, ALI_SPDIF_CTRL + 1));
3393 R1 = bval & 0x1F;
3394 }
3395
3396 if (count > 50000) {
3397 printk(KERN_WARNING "trident: Error in "
3398 "ali_detect_spdif_rate!\n");
3399 return;
3400 }
3401
3402 count = 0;
3403
3404 while (count <= 50000) {
3405 count++;
3406
3407 ali_delay(card, 6);
3408
3409 bval = inb(TRID_REG(card, ALI_SPDIF_CTRL + 1));
3410 R2 = bval & 0x1F;
3411
3412 if (R2 != R1)
3413 R1 = R2;
3414 else
3415 break;
3416 }
3417
3418 if (count > 50000) {
3419 printk(KERN_WARNING "trident: Error in "
3420 "ali_detect_spdif_rate!\n");
3421 return;
3422 }
3423
3424 switch (R2) {
3425 case 0x0b:
3426 case 0x0c:
3427 case 0x0d:
3428 case 0x0e:
3429 wval = inw(TRID_REG(card, ALI_SPDIF_CTRL + 2));
3430 wval &= 0xE0F0;
3431 wval |= (u16) 0x09 << 8 | (u16) 0x05;
3432 outw(wval, TRID_REG(card, ALI_SPDIF_CTRL + 2));
3433
3434 bval = inb(TRID_REG(card, ALI_SPDIF_CS + 3)) & 0xF0;
3435 outb(bval | 0x02, TRID_REG(card, ALI_SPDIF_CS + 3));
3436 break;
3437
3438 case 0x12:
3439 wval = inw(TRID_REG(card, ALI_SPDIF_CTRL + 2));
3440 wval &= 0xE0F0;
3441 wval |= (u16) 0x0E << 8 | (u16) 0x08;
3442 outw(wval, TRID_REG(card, ALI_SPDIF_CTRL + 2));
3443
3444 bval = inb(TRID_REG(card, ALI_SPDIF_CS + 3)) & 0xF0;
3445 outb(bval | 0x03, TRID_REG(card, ALI_SPDIF_CS + 3));
3446 break;
3447
3448 default:
3449 break;
3450 }
3451
3452}
3453
3454static unsigned int
3455ali_get_spdif_in_rate(struct trident_card *card)
3456{
3457 u32 dwRate = 0;
3458 u8 bval = 0;
3459
3460 ali_detect_spdif_rate(card);
3461
3462 bval = inb(TRID_REG(card, ALI_SPDIF_CTRL));
3463 bval &= 0x7F;
3464 bval |= 0x40;
3465 outb(bval, TRID_REG(card, ALI_SPDIF_CTRL));
3466
3467 bval = inb(TRID_REG(card, ALI_SPDIF_CS + 3));
3468 bval &= 0x0F;
3469
3470 switch (bval) {
3471 case 0:
3472 dwRate = 44100;
3473 break;
3474 case 1:
3475 dwRate = 48000;
3476 break;
3477 case 2:
3478 dwRate = 32000;
3479 break;
3480 default:
3481 // Error occurs
3482 break;
3483 }
3484
3485 return dwRate;
3486
3487}
3488
3489static int
3490ali_close_multi_channels(void)
3491{
3492 char temp = 0;
3493 struct pci_dev *pci_dev = NULL;
3494
3495 pci_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533,
3496 pci_dev);
3497 if (pci_dev == NULL)
3498 return -1;
3499
3500 pci_read_config_byte(pci_dev, 0x59, &temp);
3501 temp &= ~0x80;
3502 pci_write_config_byte(pci_dev, 0x59, temp);
3503
3504 pci_dev_put(pci_dev);
3505
3506 pci_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101,
3507 NULL);
3508 if (pci_dev == NULL)
3509 return -1;
3510
3511 pci_read_config_byte(pci_dev, 0xB8, &temp);
3512 temp &= ~0x20;
3513 pci_write_config_byte(pci_dev, 0xB8, temp);
3514
3515 pci_dev_put(pci_dev);
3516
3517 return 0;
3518}
3519
3520static int
3521ali_setup_multi_channels(struct trident_card *card, int chan_nums)
3522{
3523 unsigned long dwValue;
3524 char temp = 0;
3525 struct pci_dev *pci_dev = NULL;
3526
3527 pci_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533,
3528 pci_dev);
3529 if (pci_dev == NULL)
3530 return -1;
3531 pci_read_config_byte(pci_dev, 0x59, &temp);
3532 temp |= 0x80;
3533 pci_write_config_byte(pci_dev, 0x59, temp);
3534
3535 pci_dev_put(pci_dev);
3536
3537 pci_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101,
3538 NULL);
3539 if (pci_dev == NULL)
3540 return -1;
3541 pci_read_config_byte(pci_dev, (int) 0xB8, &temp);
3542 temp |= 0x20;
3543 pci_write_config_byte(pci_dev, (int) 0xB8, (u8) temp);
3544
3545 pci_dev_put(pci_dev);
3546
3547 if (chan_nums == 6) {
3548 dwValue = inl(TRID_REG(card, ALI_SCTRL)) | 0x000f0000;
3549 outl(dwValue, TRID_REG(card, ALI_SCTRL));
3550 mdelay(4);
3551 dwValue = inl(TRID_REG(card, ALI_SCTRL));
3552 if (dwValue & 0x2000000) {
3553 ali_ac97_write(card->ac97_codec[0], 0x02, 8080);
3554 ali_ac97_write(card->ac97_codec[0], 0x36, 0);
3555 ali_ac97_write(card->ac97_codec[0], 0x38, 0);
3556 /*
3557 * On a board with a single codec you won't get the
3558 * surround. On other boards configure it.
3559 */
3560 if (card->ac97_codec[1] != NULL) {
3561 ali_ac97_write(card->ac97_codec[1], 0x36, 0);
3562 ali_ac97_write(card->ac97_codec[1], 0x38, 0);
3563 ali_ac97_write(card->ac97_codec[1], 0x02, 0x0606);
3564 ali_ac97_write(card->ac97_codec[1], 0x18, 0x0303);
3565 ali_ac97_write(card->ac97_codec[1], 0x74, 0x3);
3566 }
3567 return 1;
3568 }
3569 }
3570 return -EINVAL;
3571}
3572
3573static void
3574ali_free_pcm_channel(struct trident_card *card, unsigned int channel)
3575{
3576 int bank;
3577
3578 if (channel > 31)
3579 return;
3580
3581 bank = channel >> 5;
3582 channel = channel & 0x1f;
3583
3584 card->banks[bank].bitmap &= ~(1 << (channel));
3585}
3586
3587static int
3588ali_allocate_other_states_resources(struct trident_state *state, int chan_nums)
3589{
3590 struct trident_card *card = state->card;
3591 struct trident_state *s;
3592 int i, state_count = 0;
3593 struct trident_pcm_bank *bank;
3594 struct trident_channel *channel;
3595 unsigned long num;
3596
3597 bank = &card->banks[BANK_A];
3598
3599 if (chan_nums != 6)
3600 return 0;
3601
3602 for (i = 0; (i < ALI_CHANNELS) && (state_count != 4); i++) {
3603 if (card->states[i])
3604 continue;
3605
3606 num = ali_multi_channels_5_1[state_count];
3607 if (!(bank->bitmap & (1 << num))) {
3608 bank->bitmap |= 1 << num;
3609 channel = &bank->channels[num];
3610 channel->num = num;
3611 } else {
3612 state_count--;
3613 for (; state_count >= 0; state_count--) {
3614 kfree(state->other_states[state_count]);
3615 num = ali_multi_channels_5_1[state_count];
3616 ali_free_pcm_channel(card, num);
3617 }
3618 return -EBUSY;
3619 }
3620 s = card->states[i] = kzalloc(sizeof(*state), GFP_KERNEL);
3621 if (!s) {
3622 num = ali_multi_channels_5_1[state_count];
3623 ali_free_pcm_channel(card, num);
3624 state_count--;
3625 for (; state_count >= 0; state_count--) {
3626 num = ali_multi_channels_5_1[state_count];
3627 ali_free_pcm_channel(card, num);
3628 kfree(state->other_states[state_count]);
3629 }
3630 return -ENOMEM;
3631 }
3632
3633 s->dmabuf.channel = channel;
3634 s->dmabuf.ossfragshift = s->dmabuf.ossmaxfrags =
3635 s->dmabuf.subdivision = 0;
3636 init_waitqueue_head(&s->dmabuf.wait);
3637 s->magic = card->magic;
3638 s->card = card;
3639 s->virt = i;
3640 ali_enable_special_channel(s);
3641 state->other_states[state_count++] = s;
3642 }
3643
3644 if (state_count != 4) {
3645 state_count--;
3646 for (; state_count >= 0; state_count--) {
3647 kfree(state->other_states[state_count]);
3648 num = ali_multi_channels_5_1[state_count];
3649 ali_free_pcm_channel(card, num);
3650 }
3651 return -EBUSY;
3652 }
3653 return 0;
3654}
3655
3656#ifdef CONFIG_PM
3657/* save registers for ALi Power Management */
3658static struct ali_saved_registers {
3659 unsigned long global_regs[ALI_GLOBAL_REGS];
3660 unsigned long channel_regs[ALI_CHANNELS][ALI_CHANNEL_REGS];
3661 unsigned mixer_regs[ALI_MIXER_REGS];
3662} ali_registers;
3663
3664static void
3665ali_save_regs(struct trident_card *card)
3666{
3667 unsigned long flags;
3668 int i, j;
3669
3670 spin_lock_irqsave(&card->lock, flags);
3671
3672 ali_registers.global_regs[0x2c] = inl(TRID_REG(card, T4D_MISCINT));
3673 //ali_registers.global_regs[0x20] = inl(TRID_REG(card,T4D_START_A));
3674 ali_registers.global_regs[0x21] = inl(TRID_REG(card, T4D_STOP_A));
3675
3676 //disable all IRQ bits
3677 outl(ALI_DISABLE_ALL_IRQ, TRID_REG(card, T4D_MISCINT));
3678
3679 for (i = 1; i < ALI_MIXER_REGS; i++)
3680 ali_registers.mixer_regs[i] = ali_ac97_read(card->ac97_codec[0],
3681 i * 2);
3682
3683 for (i = 0; i < ALI_GLOBAL_REGS; i++) {
3684 if ((i * 4 == T4D_MISCINT) || (i * 4 == T4D_STOP_A))
3685 continue;
3686 ali_registers.global_regs[i] = inl(TRID_REG(card, i * 4));
3687 }
3688
3689 for (i = 0; i < ALI_CHANNELS; i++) {
3690 outb(i, TRID_REG(card, T4D_LFO_GC_CIR));
3691 for (j = 0; j < ALI_CHANNEL_REGS; j++)
3692 ali_registers.channel_regs[i][j] = inl(TRID_REG(card,
3693 j * 4 + 0xe0));
3694 }
3695
3696 //Stop all HW channel
3697 outl(ALI_STOP_ALL_CHANNELS, TRID_REG(card, T4D_STOP_A));
3698
3699 spin_unlock_irqrestore(&card->lock, flags);
3700}
3701
3702static void
3703ali_restore_regs(struct trident_card *card)
3704{
3705 unsigned long flags;
3706 int i, j;
3707
3708 spin_lock_irqsave(&card->lock, flags);
3709
3710 for (i = 1; i < ALI_MIXER_REGS; i++)
3711 ali_ac97_write(card->ac97_codec[0], i * 2,
3712 ali_registers.mixer_regs[i]);
3713
3714 for (i = 0; i < ALI_CHANNELS; i++) {
3715 outb(i, TRID_REG(card, T4D_LFO_GC_CIR));
3716 for (j = 0; j < ALI_CHANNEL_REGS; j++)
3717 outl(ali_registers.channel_regs[i][j],
3718 TRID_REG(card, j * 4 + 0xe0));
3719 }
3720
3721 for (i = 0; i < ALI_GLOBAL_REGS; i++) {
3722 if ((i * 4 == T4D_MISCINT) || (i * 4 == T4D_STOP_A) ||
3723 (i * 4 == T4D_START_A))
3724 continue;
3725 outl(ali_registers.global_regs[i], TRID_REG(card, i * 4));
3726 }
3727
3728 //start HW channel
3729 outl(ali_registers.global_regs[0x20], TRID_REG(card, T4D_START_A));
3730 //restore IRQ enable bits
3731 outl(ali_registers.global_regs[0x2c], TRID_REG(card, T4D_MISCINT));
3732
3733 spin_unlock_irqrestore(&card->lock, flags);
3734}
3735
3736static int
3737trident_suspend(struct pci_dev *dev, pm_message_t unused)
3738{
3739 struct trident_card *card = pci_get_drvdata(dev);
3740
3741 if (card->pci_id == PCI_DEVICE_ID_ALI_5451) {
3742 ali_save_regs(card);
3743 }
3744 return 0;
3745}
3746
3747static int
3748trident_resume(struct pci_dev *dev)
3749{
3750 struct trident_card *card = pci_get_drvdata(dev);
3751
3752 if (card->pci_id == PCI_DEVICE_ID_ALI_5451) {
3753 ali_restore_regs(card);
3754 }
3755 return 0;
3756}
3757#endif
3758
3759static struct trident_channel *
3760ali_alloc_pcm_channel(struct trident_card *card)
3761{
3762 struct trident_pcm_bank *bank;
3763 int idx;
3764
3765 bank = &card->banks[BANK_A];
3766
3767 if (inl(TRID_REG(card, ALI_GLOBAL_CONTROL)) &
3768 (ALI_SPDIF_OUT_CH_ENABLE)) {
3769 idx = ALI_SPDIF_OUT_CHANNEL;
3770 if (!(bank->bitmap & (1 << idx))) {
3771 struct trident_channel *channel = &bank->channels[idx];
3772 bank->bitmap |= 1 << idx;
3773 channel->num = idx;
3774 return channel;
3775 }
3776 }
3777
3778 for (idx = ALI_PCM_OUT_CHANNEL_FIRST; idx <= ALI_PCM_OUT_CHANNEL_LAST;
3779 idx++) {
3780 if (!(bank->bitmap & (1 << idx))) {
3781 struct trident_channel *channel = &bank->channels[idx];
3782 bank->bitmap |= 1 << idx;
3783 channel->num = idx;
3784 return channel;
3785 }
3786 }
3787
3788 /* no more free channels avaliable */
3789#if 0
3790 printk(KERN_ERR "ali: no more channels available on Bank A.\n");
3791#endif /* 0 */
3792 return NULL;
3793}
3794
3795static struct trident_channel *
3796ali_alloc_rec_pcm_channel(struct trident_card *card)
3797{
3798 struct trident_pcm_bank *bank;
3799 int idx;
3800
3801 if (inl(TRID_REG(card, ALI_GLOBAL_CONTROL)) & ALI_SPDIF_IN_SUPPORT)
3802 idx = ALI_SPDIF_IN_CHANNEL;
3803 else
3804 idx = ALI_PCM_IN_CHANNEL;
3805
3806 bank = &card->banks[BANK_A];
3807
3808 if (!(bank->bitmap & (1 << idx))) {
3809 struct trident_channel *channel = &bank->channels[idx];
3810 bank->bitmap |= 1 << idx;
3811 channel->num = idx;
3812 return channel;
3813 }
3814
3815 /* no free recordable channels avaliable */
3816#if 0
3817 printk(KERN_ERR "ali: no recordable channels available on Bank A.\n");
3818#endif /* 0 */
3819 return NULL;
3820}
3821
3822static void
3823ali_set_spdif_out_rate(struct trident_card *card, unsigned int rate)
3824{
3825 unsigned char ch_st_sel;
3826 unsigned short status_rate;
3827
3828 switch (rate) {
3829 case 44100:
3830 status_rate = 0;
3831 break;
3832 case 32000:
3833 status_rate = 0x300;
3834 break;
3835 case 48000:
3836 default:
3837 status_rate = 0x200;
3838 break;
3839 }
3840
3841 /* select spdif_out */
3842 ch_st_sel = inb(TRID_REG(card, ALI_SPDIF_CTRL)) & ALI_SPDIF_OUT_CH_STATUS;
3843
3844 ch_st_sel |= 0x80; /* select right */
3845 outb(ch_st_sel, TRID_REG(card, ALI_SPDIF_CTRL));
3846 outb(status_rate | 0x20, TRID_REG(card, ALI_SPDIF_CS + 2));
3847
3848 ch_st_sel &= (~0x80); /* select left */
3849 outb(ch_st_sel, TRID_REG(card, ALI_SPDIF_CTRL));
3850 outw(status_rate | 0x10, TRID_REG(card, ALI_SPDIF_CS + 2));
3851}
3852
3853static void
3854ali_address_interrupt(struct trident_card *card)
3855{
3856 int i, channel;
3857 struct trident_state *state;
3858 u32 mask, channel_mask;
3859
3860 mask = trident_get_interrupt_mask(card, 0);
3861 for (i = 0; i < NR_HW_CH; i++) {
3862 if ((state = card->states[i]) == NULL)
3863 continue;
3864 channel = state->dmabuf.channel->num;
3865 if ((channel_mask = 1 << channel) & mask) {
3866 mask &= ~channel_mask;
3867 trident_ack_channel_interrupt(card, channel);
3868 udelay(100);
3869 state->dmabuf.update_flag |= ALI_ADDRESS_INT_UPDATE;
3870 trident_update_ptr(state);
3871 }
3872 }
3873 if (mask) {
3874 for (i = 0; i < NR_HW_CH; i++) {
3875 if (mask & (1 << i)) {
3876 printk("ali: spurious channel irq %d.\n", i);
3877 trident_ack_channel_interrupt(card, i);
3878 trident_stop_voice(card, i);
3879 trident_disable_voice_irq(card, i);
3880 }
3881 }
3882 }
3883}
3884
3885/* Updating the values of counters of other_states' DMAs without lock
3886protection is no harm because all DMAs of multi-channels and interrupt
3887depend on a master state's DMA, and changing the counters of the master
3888state DMA is protected by a spinlock.
3889*/
3890static int
3891ali_write_5_1(struct trident_state *state, const char __user *buf,
3892 int cnt_for_multi_channel, unsigned int *copy_count,
3893 unsigned int *state_cnt)
3894{
3895
3896 struct dmabuf *dmabuf = &state->dmabuf;
3897 struct dmabuf *dmabuf_temp;
3898 const char __user *buffer = buf;
3899 unsigned swptr, other_dma_nums, sample_s;
3900 unsigned int i, loop;
3901
3902 other_dma_nums = 4;
3903 sample_s = sample_size[dmabuf->fmt] >> 1;
3904 swptr = dmabuf->swptr;
3905
3906 if ((i = state->multi_channels_adjust_count) > 0) {
3907 if (i == 1) {
3908 if (copy_from_user(dmabuf->rawbuf + swptr,
3909 buffer, sample_s))
3910 return -EFAULT;
3911 seek_offset(swptr, buffer, cnt_for_multi_channel,
3912 sample_s, *copy_count);
3913 i--;
3914 (*state_cnt) += sample_s;
3915 state->multi_channels_adjust_count++;
3916 } else
3917 i = i - (state->chans_num - other_dma_nums);
3918 for (; (i < other_dma_nums) && (cnt_for_multi_channel > 0); i++) {
3919 dmabuf_temp = &state->other_states[i]->dmabuf;
3920 if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
3921 buffer, sample_s))
3922 return -EFAULT;
3923 seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
3924 sample_s, *copy_count);
3925 }
3926 if (cnt_for_multi_channel == 0)
3927 state->multi_channels_adjust_count += i;
3928 }
3929 if (cnt_for_multi_channel > 0) {
3930 loop = cnt_for_multi_channel / (state->chans_num * sample_s);
3931 for (i = 0; i < loop; i++) {
3932 if (copy_from_user(dmabuf->rawbuf + swptr, buffer,
3933 sample_s * 2))
3934 return -EFAULT;
3935 seek_offset(swptr, buffer, cnt_for_multi_channel,
3936 sample_s * 2, *copy_count);
3937 (*state_cnt) += (sample_s * 2);
3938
3939 dmabuf_temp = &state->other_states[0]->dmabuf;
3940 if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
3941 buffer, sample_s))
3942 return -EFAULT;
3943 seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
3944 sample_s, *copy_count);
3945
3946 dmabuf_temp = &state->other_states[1]->dmabuf;
3947 if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
3948 buffer, sample_s))
3949 return -EFAULT;
3950 seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
3951 sample_s, *copy_count);
3952
3953 dmabuf_temp = &state->other_states[2]->dmabuf;
3954 if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
3955 buffer, sample_s))
3956 return -EFAULT;
3957 seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
3958 sample_s, *copy_count);
3959
3960 dmabuf_temp = &state->other_states[3]->dmabuf;
3961 if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
3962 buffer, sample_s))
3963 return -EFAULT;
3964 seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
3965 sample_s, *copy_count);
3966 }
3967
3968 if (cnt_for_multi_channel > 0) {
3969 state->multi_channels_adjust_count = cnt_for_multi_channel / sample_s;
3970
3971 if (copy_from_user(dmabuf->rawbuf + swptr, buffer, sample_s))
3972 return -EFAULT;
3973 seek_offset(swptr, buffer, cnt_for_multi_channel,
3974 sample_s, *copy_count);
3975 (*state_cnt) += sample_s;
3976
3977 if (cnt_for_multi_channel > 0) {
3978 if (copy_from_user(dmabuf->rawbuf + swptr,
3979 buffer, sample_s))
3980 return -EFAULT;
3981 seek_offset(swptr, buffer, cnt_for_multi_channel,
3982 sample_s, *copy_count);
3983 (*state_cnt) += sample_s;
3984
3985 if (cnt_for_multi_channel > 0) {
3986 int diff = state->chans_num - other_dma_nums;
3987 loop = state->multi_channels_adjust_count - diff;
3988 for (i = 0; i < loop; i++) {
3989 dmabuf_temp = &state->other_states[i]->dmabuf;
3990 if (copy_from_user(dmabuf_temp->rawbuf +
3991 dmabuf_temp->swptr,
3992 buffer, sample_s))
3993 return -EFAULT;
3994 seek_offset(dmabuf_temp->swptr, buffer,
3995 cnt_for_multi_channel,
3996 sample_s, *copy_count);
3997 }
3998 }
3999 }
4000 } else
4001 state->multi_channels_adjust_count = 0;
4002 }
4003 for (i = 0; i < other_dma_nums; i++) {
4004 dmabuf_temp = &state->other_states[i]->dmabuf;
4005 dmabuf_temp->swptr = dmabuf_temp->swptr % dmabuf_temp->dmasize;
4006 }
4007 return *state_cnt;
4008}
4009
4010static void
4011ali_free_other_states_resources(struct trident_state *state)
4012{
4013 int i;
4014 struct trident_card *card = state->card;
4015 struct trident_state *s;
4016 unsigned other_states_count;
4017
4018 other_states_count = state->chans_num - 2; /* except PCM L/R channels */
4019 for (i = 0; i < other_states_count; i++) {
4020 s = state->other_states[i];
4021 dealloc_dmabuf(&s->dmabuf, card->pci_dev);
4022 ali_disable_special_channel(s->card, s->dmabuf.channel->num);
4023 state->card->free_pcm_channel(s->card, s->dmabuf.channel->num);
4024 card->states[s->virt] = NULL;
4025 kfree(s);
4026 }
4027}
4028
4029static struct proc_dir_entry *res;
4030
4031static int
4032ali_write_proc(struct file *file, const char __user *buffer, unsigned long count, void *data)
4033{
4034 struct trident_card *card = (struct trident_card *) data;
4035 unsigned long flags;
4036 char c;
4037
4038 if (count < 0)
4039 return -EINVAL;
4040 if (count == 0)
4041 return 0;
4042 if (get_user(c, buffer))
4043 return -EFAULT;
4044
4045 spin_lock_irqsave(&card->lock, flags);
4046 switch (c) {
4047 case '0':
4048 ali_setup_spdif_out(card, ALI_PCM_TO_SPDIF_OUT);
4049 ali_disable_special_channel(card, ALI_SPDIF_OUT_CHANNEL);
4050 break;
4051 case '1':
4052 ali_setup_spdif_out(card, ALI_SPDIF_OUT_TO_SPDIF_OUT |
4053 ALI_SPDIF_OUT_PCM);
4054 break;
4055 case '2':
4056 ali_setup_spdif_out(card, ALI_SPDIF_OUT_TO_SPDIF_OUT |
4057 ALI_SPDIF_OUT_NON_PCM);
4058 break;
4059 case '3':
4060 ali_disable_spdif_in(card); //default
4061 break;
4062 case '4':
4063 ali_setup_spdif_in(card);
4064 break;
4065 }
4066 spin_unlock_irqrestore(&card->lock, flags);
4067
4068 return count;
4069}
4070
4071/* OSS /dev/mixer file operation methods */
4072static int
4073trident_open_mixdev(struct inode *inode, struct file *file)
4074{
4075 int i = 0;
4076 int minor = iminor(inode);
4077 struct trident_card *card = devs;
4078
4079 for (card = devs; card != NULL; card = card->next)
4080 for (i = 0; i < NR_AC97; i++)
4081 if (card->ac97_codec[i] != NULL &&
4082 card->ac97_codec[i]->dev_mixer == minor)
4083 goto match;
4084
4085 if (!card) {
4086 return -ENODEV;
4087 }
4088 match:
4089 file->private_data = card->ac97_codec[i];
4090
4091 return nonseekable_open(inode, file);
4092}
4093
4094static int
4095trident_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd,
4096 unsigned long arg)
4097{
4098 struct ac97_codec *codec = (struct ac97_codec *) file->private_data;
4099
4100 return codec->mixer_ioctl(codec, cmd, arg);
4101}
4102
4103static const struct file_operations trident_mixer_fops = {
4104 .owner = THIS_MODULE,
4105 .llseek = no_llseek,
4106 .ioctl = trident_ioctl_mixdev,
4107 .open = trident_open_mixdev,
4108};
4109
4110static int
4111ali_reset_5451(struct trident_card *card)
4112{
4113 struct pci_dev *pci_dev = NULL;
4114 unsigned int dwVal;
4115 unsigned short wCount, wReg;
4116
4117 pci_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533,
4118 pci_dev);
4119 if (pci_dev == NULL)
4120 return -1;
4121
4122 pci_read_config_dword(pci_dev, 0x7c, &dwVal);
4123 pci_write_config_dword(pci_dev, 0x7c, dwVal | 0x08000000);
4124 udelay(5000);
4125 pci_read_config_dword(pci_dev, 0x7c, &dwVal);
4126 pci_write_config_dword(pci_dev, 0x7c, dwVal & 0xf7ffffff);
4127 udelay(5000);
4128 pci_dev_put(pci_dev);
4129
4130 pci_dev = card->pci_dev;
4131 if (pci_dev == NULL)
4132 return -1;
4133
4134 pci_read_config_dword(pci_dev, 0x44, &dwVal);
4135 pci_write_config_dword(pci_dev, 0x44, dwVal | 0x000c0000);
4136 udelay(500);
4137 pci_read_config_dword(pci_dev, 0x44, &dwVal);
4138 pci_write_config_dword(pci_dev, 0x44, dwVal & 0xfffbffff);
4139 udelay(5000);
4140
4141 /* TODO: recognize if we have a PM capable codec and only do this */
4142 /* if the codec is PM capable */
4143 wCount = 2000;
4144 while (wCount--) {
4145 wReg = ali_ac97_get(card, 0, AC97_POWER_CONTROL);
4146 if ((wReg & 0x000f) == 0x000f)
4147 return 0;
4148 udelay(5000);
4149 }
4150 /* This is non fatal if you have a non PM capable codec.. */
4151 return 0;
4152}
4153
4154/* AC97 codec initialisation. */
4155static int __devinit
4156trident_ac97_init(struct trident_card *card)
4157{
4158 int num_ac97 = 0;
4159 unsigned long ready_2nd = 0;
4160 struct ac97_codec *codec;
4161 int i = 0;
4162
4163 /* initialize controller side of AC link, and find out if secondary codes
4164 really exist */
4165 switch (card->pci_id) {
4166 case PCI_DEVICE_ID_ALI_5451:
4167 if (ali_reset_5451(card)) {
4168 printk(KERN_ERR "trident_ac97_init: error "
4169 "resetting 5451.\n");
4170 return -1;
4171 }
4172 outl(0x80000001, TRID_REG(card, ALI_GLOBAL_CONTROL));
4173 outl(0x00000000, TRID_REG(card, T4D_AINTEN_A));
4174 outl(0xffffffff, TRID_REG(card, T4D_AINT_A));
4175 outl(0x00000000, TRID_REG(card, T4D_MUSICVOL_WAVEVOL));
4176 outb(0x10, TRID_REG(card, ALI_MPUR2));
4177 ready_2nd = inl(TRID_REG(card, ALI_SCTRL));
4178 ready_2nd &= 0x3fff;
4179 outl(ready_2nd | PCMOUT | 0x8000, TRID_REG(card, ALI_SCTRL));
4180 ready_2nd = inl(TRID_REG(card, ALI_SCTRL));
4181 ready_2nd &= SI_AC97_SECONDARY_READY;
4182 if (card->revision < ALI_5451_V02)
4183 ready_2nd = 0;
4184 break;
4185 case PCI_DEVICE_ID_SI_7018:
4186 /* disable AC97 GPIO interrupt */
4187 outl(0x00, TRID_REG(card, SI_AC97_GPIO));
4188 /* when power up the AC link is in cold reset mode so stop it */
4189 outl(PCMOUT | SURROUT | CENTEROUT | LFEOUT | SECONDARY_ID,
4190 TRID_REG(card, SI_SERIAL_INTF_CTRL));
4191 /* it take a long time to recover from a cold reset */
4192 /* (especially when you have more than one codec) */
4193 udelay(2000);
4194 ready_2nd = inl(TRID_REG(card, SI_SERIAL_INTF_CTRL));
4195 ready_2nd &= SI_AC97_SECONDARY_READY;
4196 break;
4197 case PCI_DEVICE_ID_TRIDENT_4DWAVE_DX:
4198 /* playback on */
4199 outl(DX_AC97_PLAYBACK, TRID_REG(card, DX_ACR2_AC97_COM_STAT));
4200 break;
4201 case PCI_DEVICE_ID_TRIDENT_4DWAVE_NX:
4202 /* enable AC97 Output Slot 3,4 (PCM Left/Right Playback) */
4203 outl(NX_AC97_PCM_OUTPUT, TRID_REG(card, NX_ACR0_AC97_COM_STAT));
4204 ready_2nd = inl(TRID_REG(card, NX_ACR0_AC97_COM_STAT));
4205 ready_2nd &= NX_AC97_SECONDARY_READY;
4206 break;
4207 case PCI_DEVICE_ID_INTERG_5050:
4208 /* disable AC97 GPIO interrupt */
4209 outl(0x00, TRID_REG(card, SI_AC97_GPIO));
4210 /* when power up, the AC link is in cold reset mode, so stop it */
4211 outl(PCMOUT | SURROUT | CENTEROUT | LFEOUT,
4212 TRID_REG(card, SI_SERIAL_INTF_CTRL));
4213 /* it take a long time to recover from a cold reset (especially */
4214 /* when you have more than one codec) */
4215 udelay(2000);
4216 ready_2nd = inl(TRID_REG(card, SI_SERIAL_INTF_CTRL));
4217 ready_2nd &= SI_AC97_SECONDARY_READY;
4218 break;
4219 }
4220
4221 for (num_ac97 = 0; num_ac97 < NR_AC97; num_ac97++) {
4222 if ((codec = ac97_alloc_codec()) == NULL)
4223 return -ENOMEM;
4224
4225 /* initialize some basic codec information, other fields */
4226 /* will be filled in ac97_probe_codec */
4227 codec->private_data = card;
4228 codec->id = num_ac97;
4229
4230 if (card->pci_id == PCI_DEVICE_ID_ALI_5451) {
4231 codec->codec_read = ali_ac97_read;
4232 codec->codec_write = ali_ac97_write;
4233 } else {
4234 codec->codec_read = trident_ac97_get;
4235 codec->codec_write = trident_ac97_set;
4236 }
4237
4238 if (ac97_probe_codec(codec) == 0)
4239 break;
4240
4241 codec->dev_mixer = register_sound_mixer(&trident_mixer_fops, -1);
4242 if (codec->dev_mixer < 0) {
4243 printk(KERN_ERR "trident: couldn't register mixer!\n");
4244 ac97_release_codec(codec);
4245 break;
4246 }
4247
4248 card->ac97_codec[num_ac97] = codec;
4249
4250 /* if there is no secondary codec at all, don't probe any more */
4251 if (!ready_2nd)
4252 break;
4253 }
4254
4255 if (card->pci_id == PCI_DEVICE_ID_ALI_5451) {
4256 for (num_ac97 = 0; num_ac97 < NR_AC97; num_ac97++) {
4257 if (card->ac97_codec[num_ac97] == NULL)
4258 break;
4259 for (i = 0; i < 64; i++) {
4260 u16 reg = ali_ac97_get(card, num_ac97, i * 2);
4261 card->mixer_regs[i][num_ac97] = reg;
4262 }
4263 }
4264 }
4265 return num_ac97 + 1;
4266}
4267
4268#ifdef SUPPORT_JOYSTICK
4269/* Gameport functions for the cards ADC gameport */
4270
4271static unsigned char trident_game_read(struct gameport *gameport)
4272{
4273 struct trident_card *card = gameport->port_data;
4274
4275 return inb(TRID_REG(card, T4D_GAME_LEG));
4276}
4277
4278static void trident_game_trigger(struct gameport *gameport)
4279{
4280 struct trident_card *card = gameport->port_data;
4281
4282 outb(0xff, TRID_REG(card, T4D_GAME_LEG));
4283}
4284
4285static int trident_game_cooked_read(struct gameport *gameport,
4286 int *axes, int *buttons)
4287{
4288 struct trident_card *card = gameport->port_data;
4289 int i;
4290
4291 *buttons = (~inb(TRID_REG(card, T4D_GAME_LEG)) >> 4) & 0xf;
4292
4293 for (i = 0; i < 4; i++) {
4294 axes[i] = inw(TRID_REG(card, T4D_GAME_AXD) + i * sizeof (u16));
4295 if (axes[i] == 0xffff)
4296 axes[i] = -1;
4297 }
4298
4299 return 0;
4300}
4301
4302static int trident_game_open(struct gameport *gameport, int mode)
4303{
4304 struct trident_card *card = gameport->port_data;
4305
4306 switch (mode) {
4307 case GAMEPORT_MODE_COOKED:
4308 outb(0x80, TRID_REG(card, T4D_GAME_CR));
4309 msleep(20);
4310 return 0;
4311 case GAMEPORT_MODE_RAW:
4312 outb(0x00, TRID_REG(card, T4D_GAME_CR));
4313 return 0;
4314 default:
4315 return -1;
4316 }
4317
4318 return 0;
4319}
4320
4321static int __devinit trident_register_gameport(struct trident_card *card)
4322{
4323 struct gameport *gp;
4324
4325 card->gameport = gp = gameport_allocate_port();
4326 if (!gp) {
4327 printk(KERN_ERR "trident: can not allocate memory for gameport\n");
4328 return -ENOMEM;
4329 }
4330
4331 gameport_set_name(gp, "Trident 4DWave");
4332 gameport_set_phys(gp, "pci%s/gameport0", pci_name(card->pci_dev));
4333 gp->read = trident_game_read;
4334 gp->trigger = trident_game_trigger;
4335 gp->cooked_read = trident_game_cooked_read;
4336 gp->open = trident_game_open;
4337 gp->fuzz = 64;
4338 gp->port_data = card;
4339
4340 gameport_register_port(gp);
4341
4342 return 0;
4343}
4344
4345static inline void trident_unregister_gameport(struct trident_card *card)
4346{
4347 if (card->gameport)
4348 gameport_unregister_port(card->gameport);
4349}
4350
4351#else
4352static inline int trident_register_gameport(struct trident_card *card) { return -ENOSYS; }
4353static inline void trident_unregister_gameport(struct trident_card *card) { }
4354#endif /* SUPPORT_JOYSTICK */
4355
4356/* install the driver, we do not allocate hardware channel nor DMA buffer */
4357/* now, they are defered until "ACCESS" time (in prog_dmabuf called by */
4358/* open/read/write/ioctl/mmap) */
4359static int __devinit
4360trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
4361{
4362 unsigned long iobase;
4363 struct trident_card *card;
4364 u8 bits;
4365 u8 revision;
4366 int i = 0;
4367 u16 temp;
4368 struct pci_dev *pci_dev_m1533 = NULL;
4369 int rc = -ENODEV;
4370 u64 dma_mask;
4371
4372 if (pci_enable_device(pci_dev))
4373 goto out;
4374
4375 if (pci_dev->device == PCI_DEVICE_ID_ALI_5451)
4376 dma_mask = ALI_DMA_MASK;
4377 else
4378 dma_mask = TRIDENT_DMA_MASK;
4379 if (pci_set_dma_mask(pci_dev, dma_mask)) {
4380 printk(KERN_ERR "trident: architecture does not support"
4381 " %s PCI busmaster DMA\n",
4382 pci_dev->device == PCI_DEVICE_ID_ALI_5451 ?
4383 "32-bit" : "30-bit");
4384 goto out;
4385 }
4386 pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &revision);
4387
4388 if (pci_id->device == PCI_DEVICE_ID_INTERG_5050)
4389 iobase = pci_resource_start(pci_dev, 1);
4390 else
4391 iobase = pci_resource_start(pci_dev, 0);
4392
4393 if (!request_region(iobase, 256, card_names[pci_id->driver_data])) {
4394 printk(KERN_ERR "trident: can't allocate I/O space at "
4395 "0x%4.4lx\n", iobase);
4396 goto out;
4397 }
4398
4399 rc = -ENOMEM;
4400 if ((card = kzalloc(sizeof(*card), GFP_KERNEL)) == NULL) {
4401 printk(KERN_ERR "trident: out of memory\n");
4402 goto out_release_region;
4403 }
4404
4405 init_timer(&card->timer);
4406 card->iobase = iobase;
4407 card->pci_dev = pci_dev_get(pci_dev);
4408 card->pci_id = pci_id->device;
4409 card->revision = revision;
4410 card->irq = pci_dev->irq;
4411 card->next = devs;
4412 card->magic = TRIDENT_CARD_MAGIC;
4413 card->banks[BANK_A].addresses = &bank_a_addrs;
4414 card->banks[BANK_A].bitmap = 0UL;
4415 card->banks[BANK_B].addresses = &bank_b_addrs;
4416 card->banks[BANK_B].bitmap = 0UL;
4417
4418 mutex_init(&card->open_mutex);
4419 spin_lock_init(&card->lock);
4420 init_timer(&card->timer);
4421
4422 devs = card;
4423
4424 pci_set_master(pci_dev);
4425
4426 printk(KERN_INFO "trident: %s found at IO 0x%04lx, IRQ %d\n",
4427 card_names[pci_id->driver_data], card->iobase, card->irq);
4428
4429 if (card->pci_id == PCI_DEVICE_ID_ALI_5451) {
4430 /* ALi channel Management */
4431 card->alloc_pcm_channel = ali_alloc_pcm_channel;
4432 card->alloc_rec_pcm_channel = ali_alloc_rec_pcm_channel;
4433 card->free_pcm_channel = ali_free_pcm_channel;
4434
4435 card->address_interrupt = ali_address_interrupt;
4436
4437 /* Added by Matt Wu 01-05-2001 for spdif in */
4438 card->multi_channel_use_count = 0;
4439 card->rec_channel_use_count = 0;
4440
4441 /* ALi SPDIF OUT function */
4442 if (card->revision == ALI_5451_V02) {
4443 ali_setup_spdif_out(card, ALI_PCM_TO_SPDIF_OUT);
4444 res = create_proc_entry("ALi5451", 0, NULL);
4445 if (res) {
4446 res->write_proc = ali_write_proc;
4447 res->data = card;
4448 }
4449 }
4450
4451 /* Add H/W Volume Control By Matt Wu Jul. 06, 2001 */
4452 card->hwvolctl = 0;
4453 pci_dev_m1533 = pci_get_device(PCI_VENDOR_ID_AL,
4454 PCI_DEVICE_ID_AL_M1533,
4455 pci_dev_m1533);
4456 rc = -ENODEV;
4457 if (pci_dev_m1533 == NULL)
4458 goto out_proc_fs;
4459 pci_read_config_byte(pci_dev_m1533, 0x63, &bits);
4460 if (bits & (1 << 5))
4461 card->hwvolctl = 1;
4462 if (card->hwvolctl) {
4463 /* Clear m1533 pci cfg 78h bit 30 to zero, which makes
4464 GPIO11/12/13 work as ACGP_UP/DOWN/MUTE. */
4465 pci_read_config_byte(pci_dev_m1533, 0x7b, &bits);
4466 bits &= 0xbf; /*clear bit 6 */
4467 pci_write_config_byte(pci_dev_m1533, 0x7b, bits);
4468 }
4469 pci_dev_put(pci_dev_m1533);
4470
4471 } else if (card->pci_id == PCI_DEVICE_ID_INTERG_5050) {
4472 card->alloc_pcm_channel = cyber_alloc_pcm_channel;
4473 card->alloc_rec_pcm_channel = cyber_alloc_pcm_channel;
4474 card->free_pcm_channel = cyber_free_pcm_channel;
4475 card->address_interrupt = cyber_address_interrupt;
4476 cyber_init_ritual(card);
4477 } else {
4478 card->alloc_pcm_channel = trident_alloc_pcm_channel;
4479 card->alloc_rec_pcm_channel = trident_alloc_pcm_channel;
4480 card->free_pcm_channel = trident_free_pcm_channel;
4481 card->address_interrupt = trident_address_interrupt;
4482 }
4483
4484 /* claim our irq */
4485 rc = -ENODEV;
4486 if (request_irq(card->irq, &trident_interrupt, IRQF_SHARED,
4487 card_names[pci_id->driver_data], card)) {
4488 printk(KERN_ERR "trident: unable to allocate irq %d\n",
4489 card->irq);
4490 goto out_proc_fs;
4491 }
4492 /* register /dev/dsp */
4493 if ((card->dev_audio = register_sound_dsp(&trident_audio_fops, -1)) < 0) {
4494 printk(KERN_ERR "trident: couldn't register DSP device!\n");
4495 goto out_free_irq;
4496 }
4497 card->mixer_regs_ready = 0;
4498 /* initialize AC97 codec and register /dev/mixer */
4499 if (trident_ac97_init(card) <= 0) {
4500 /* unregister audio devices */
4501 for (i = 0; i < NR_AC97; i++) {
4502 if (card->ac97_codec[i] != NULL) {
4503 struct ac97_codec* codec = card->ac97_codec[i];
4504 unregister_sound_mixer(codec->dev_mixer);
4505 ac97_release_codec(codec);
4506 }
4507 }
4508 goto out_unregister_sound_dsp;
4509 }
4510 card->mixer_regs_ready = 1;
4511 outl(0x00, TRID_REG(card, T4D_MUSICVOL_WAVEVOL));
4512
4513 if (card->pci_id == PCI_DEVICE_ID_ALI_5451) {
4514 /* Add H/W Volume Control By Matt Wu Jul. 06, 2001 */
4515 if (card->hwvolctl) {
4516 /* Enable GPIO IRQ (MISCINT bit 18h) */
4517 temp = inw(TRID_REG(card, T4D_MISCINT + 2));
4518 temp |= 0x0004;
4519 outw(temp, TRID_REG(card, T4D_MISCINT + 2));
4520
4521 /* Enable H/W Volume Control GLOVAL CONTROL bit 0 */
4522 temp = inw(TRID_REG(card, ALI_GLOBAL_CONTROL));
4523 temp |= 0x0001;
4524 outw(temp, TRID_REG(card, ALI_GLOBAL_CONTROL));
4525
4526 }
4527 if (card->revision == ALI_5451_V02)
4528 ali_close_multi_channels();
4529 /* edited by HMSEO for GT sound */
4530#if defined(CONFIG_ALPHA_NAUTILUS) || defined(CONFIG_ALPHA_GENERIC)
4531 {
4532 u16 ac97_data;
4533 extern struct hwrpb_struct *hwrpb;
4534
4535 if ((hwrpb->sys_type) == 201) {
4536 printk(KERN_INFO "trident: Running on Alpha system "
4537 "type Nautilus\n");
4538 ac97_data = ali_ac97_get(card, 0, AC97_POWER_CONTROL);
4539 ali_ac97_set(card, 0, AC97_POWER_CONTROL,
4540 ac97_data | ALI_EAPD_POWER_DOWN);
4541 }
4542 }
4543#endif /* CONFIG_ALPHA_NAUTILUS || CONFIG_ALPHA_GENERIC */
4544 /* edited by HMSEO for GT sound */
4545 }
4546 rc = 0;
4547 pci_set_drvdata(pci_dev, card);
4548
4549 /* Enable Address Engine Interrupts */
4550 trident_enable_loop_interrupts(card);
4551
4552 /* Register gameport */
4553 trident_register_gameport(card);
4554
4555out:
4556 return rc;
4557
4558out_unregister_sound_dsp:
4559 unregister_sound_dsp(card->dev_audio);
4560out_free_irq:
4561 free_irq(card->irq, card);
4562out_proc_fs:
4563 pci_dev_put(card->pci_dev);
4564 if (res) {
4565 remove_proc_entry("ALi5451", NULL);
4566 res = NULL;
4567 }
4568 kfree(card);
4569 devs = NULL;
4570out_release_region:
4571 release_region(iobase, 256);
4572 return rc;
4573}
4574
4575static void __devexit
4576trident_remove(struct pci_dev *pci_dev)
4577{
4578 int i;
4579 struct trident_card *card = pci_get_drvdata(pci_dev);
4580
4581 /*
4582 * Kill running timers before unload. We can't have them
4583 * going off after rmmod!
4584 */
4585 if (card->hwvolctl)
4586 del_timer_sync(&card->timer);
4587
4588 /* ALi S/PDIF and Power Management */
4589 if (card->pci_id == PCI_DEVICE_ID_ALI_5451) {
4590 ali_setup_spdif_out(card, ALI_PCM_TO_SPDIF_OUT);
4591 ali_disable_special_channel(card, ALI_SPDIF_OUT_CHANNEL);
4592 ali_disable_spdif_in(card);
4593 remove_proc_entry("ALi5451", NULL);
4594 }
4595
4596 /* Unregister gameport */
4597 trident_unregister_gameport(card);
4598
4599 /* Kill interrupts, and SP/DIF */
4600 trident_disable_loop_interrupts(card);
4601
4602 /* free hardware resources */
4603 free_irq(card->irq, card);
4604 release_region(card->iobase, 256);
4605
4606 /* unregister audio devices */
4607 for (i = 0; i < NR_AC97; i++)
4608 if (card->ac97_codec[i] != NULL) {
4609 unregister_sound_mixer(card->ac97_codec[i]->dev_mixer);
4610 ac97_release_codec(card->ac97_codec[i]);
4611 }
4612 unregister_sound_dsp(card->dev_audio);
4613
4614 pci_set_drvdata(pci_dev, NULL);
4615 pci_dev_put(card->pci_dev);
4616 kfree(card);
4617}
4618
4619MODULE_AUTHOR("Alan Cox, Aaron Holtzman, Ollie Lho, Ching Ling Lee, Muli Ben-Yehuda");
4620MODULE_DESCRIPTION("Trident 4DWave/SiS 7018/ALi 5451 and Tvia/IGST CyberPro5050 PCI "
4621 "Audio Driver");
4622MODULE_LICENSE("GPL");
4623
4624#define TRIDENT_MODULE_NAME "trident"
4625
4626static struct pci_driver trident_pci_driver = {
4627 .name = TRIDENT_MODULE_NAME,
4628 .id_table = trident_pci_tbl,
4629 .probe = trident_probe,
4630 .remove = __devexit_p(trident_remove),
4631#ifdef CONFIG_PM
4632 .suspend = trident_suspend,
4633 .resume = trident_resume
4634#endif
4635};
4636
4637static int __init
4638trident_init_module(void)
4639{
4640 printk(KERN_INFO "Trident 4DWave/SiS 7018/ALi 5451,Tvia CyberPro "
4641 "5050 PCI Audio, version " DRIVER_VERSION ", " __TIME__ " "
4642 __DATE__ "\n");
4643
4644 return pci_register_driver(&trident_pci_driver);
4645}
4646
4647static void __exit
4648trident_cleanup_module(void)
4649{
4650 pci_unregister_driver(&trident_pci_driver);
4651}
4652
4653module_init(trident_init_module);
4654module_exit(trident_cleanup_module);
diff --git a/sound/oss/trident.h b/sound/oss/trident.h
deleted file mode 100644
index ff30a1d7c2f1..000000000000
--- a/sound/oss/trident.h
+++ /dev/null
@@ -1,358 +0,0 @@
1#ifndef __TRID4DWAVE_H
2#define __TRID4DWAVE_H
3
4/*
5 * audio@tridentmicro.com
6 * Fri Feb 19 15:55:28 MST 1999
7 * Definitions for Trident 4DWave DX/NX chips
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 */
25
26/* PCI vendor and device ID */
27#ifndef PCI_VENDOR_ID_TRIDENT
28#define PCI_VENDOR_ID_TRIDENT 0x1023
29#endif
30
31#ifndef PCI_VENDOR_ID_SI
32#define PCI_VENDOR_ID_SI 0x1039
33#endif
34
35#ifndef PCI_VENDOR_ID_ALI
36#define PCI_VENDOR_ID_ALI 0x10b9
37#endif
38
39#ifndef PCI_DEVICE_ID_TRIDENT_4DWAVE_DX
40#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
41#endif
42
43#ifndef PCI_DEVICE_ID_TRIDENT_4DWAVE_NX
44#define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001
45#endif
46
47#ifndef PCI_DEVICE_ID_SI_7018
48#define PCI_DEVICE_ID_SI_7018 0x7018
49#endif
50
51#ifndef PCI_DEVICE_ID_ALI_5451
52#define PCI_DEVICE_ID_ALI_5451 0x5451
53#endif
54
55#ifndef PCI_DEVICE_ID_ALI_1533
56#define PCI_DEVICE_ID_ALI_1533 0x1533
57#endif
58
59#define CHANNEL_REGS 5
60#define CHANNEL_START 0xe0 // The first bytes of the contiguous register space.
61
62#define BANK_A 0
63#define BANK_B 1
64#define NR_BANKS 2
65
66#define TRIDENT_FMT_STEREO 0x01
67#define TRIDENT_FMT_16BIT 0x02
68#define TRIDENT_FMT_MASK 0x03
69
70#define DAC_RUNNING 0x01
71#define ADC_RUNNING 0x02
72
73/* Register Addresses */
74
75/* operational registers common to DX, NX, 7018 */
76enum trident_op_registers {
77 T4D_GAME_CR = 0x30, T4D_GAME_LEG = 0x31,
78 T4D_GAME_AXD = 0x34,
79 T4D_REC_CH = 0x70,
80 T4D_START_A = 0x80, T4D_STOP_A = 0x84,
81 T4D_DLY_A = 0x88, T4D_SIGN_CSO_A = 0x8c,
82 T4D_CSPF_A = 0x90, T4D_CEBC_A = 0x94,
83 T4D_AINT_A = 0x98, T4D_EINT_A = 0x9c,
84 T4D_LFO_GC_CIR = 0xa0, T4D_AINTEN_A = 0xa4,
85 T4D_MUSICVOL_WAVEVOL = 0xa8, T4D_SBDELTA_DELTA_R = 0xac,
86 T4D_MISCINT = 0xb0, T4D_START_B = 0xb4,
87 T4D_STOP_B = 0xb8, T4D_CSPF_B = 0xbc,
88 T4D_SBBL_SBCL = 0xc0, T4D_SBCTRL_SBE2R_SBDD = 0xc4,
89 T4D_STIMER = 0xc8, T4D_LFO_B_I2S_DELTA = 0xcc,
90 T4D_AINT_B = 0xd8, T4D_AINTEN_B = 0xdc,
91 ALI_MPUR2 = 0x22, ALI_GPIO = 0x7c,
92 ALI_EBUF1 = 0xf4,
93 ALI_EBUF2 = 0xf8
94};
95
96enum ali_op_registers {
97 ALI_SCTRL = 0x48,
98 ALI_GLOBAL_CONTROL = 0xd4,
99 ALI_STIMER = 0xc8,
100 ALI_SPDIF_CS = 0x70,
101 ALI_SPDIF_CTRL = 0x74
102};
103
104enum ali_registers_number {
105 ALI_GLOBAL_REGS = 56,
106 ALI_CHANNEL_REGS = 8,
107 ALI_MIXER_REGS = 20
108};
109
110enum ali_sctrl_control_bit {
111 ALI_SPDIF_OUT_ENABLE = 0x20
112};
113
114enum ali_global_control_bit {
115 ALI_SPDIF_OUT_SEL_PCM = 0x00000400,
116 ALI_SPDIF_IN_SUPPORT = 0x00000800,
117 ALI_SPDIF_OUT_CH_ENABLE = 0x00008000,
118 ALI_SPDIF_IN_CH_ENABLE = 0x00080000,
119 ALI_PCM_IN_DISABLE = 0x7fffffff,
120 ALI_PCM_IN_ENABLE = 0x80000000,
121 ALI_SPDIF_IN_CH_DISABLE = 0xfff7ffff,
122 ALI_SPDIF_OUT_CH_DISABLE = 0xffff7fff,
123 ALI_SPDIF_OUT_SEL_SPDIF = 0xfffffbff
124
125};
126
127enum ali_spdif_control_bit {
128 ALI_SPDIF_IN_FUNC_ENABLE = 0x02,
129 ALI_SPDIF_IN_CH_STATUS = 0x40,
130 ALI_SPDIF_OUT_CH_STATUS = 0xbf
131
132};
133
134enum ali_control_all {
135 ALI_DISABLE_ALL_IRQ = 0,
136 ALI_CHANNELS = 32,
137 ALI_STOP_ALL_CHANNELS = 0xffffffff,
138 ALI_MULTI_CHANNELS_START_STOP = 0x07800000
139};
140
141enum ali_EMOD_control_bit {
142 ALI_EMOD_DEC = 0x00000000,
143 ALI_EMOD_INC = 0x10000000,
144 ALI_EMOD_Delay = 0x20000000,
145 ALI_EMOD_Still = 0x30000000
146};
147
148enum ali_pcm_in_channel_num {
149 ALI_NORMAL_CHANNEL = 0,
150 ALI_SPDIF_OUT_CHANNEL = 15,
151 ALI_SPDIF_IN_CHANNEL = 19,
152 ALI_LEF_CHANNEL = 23,
153 ALI_CENTER_CHANNEL = 24,
154 ALI_SURR_RIGHT_CHANNEL = 25,
155 ALI_SURR_LEFT_CHANNEL = 26,
156 ALI_PCM_IN_CHANNEL = 31
157};
158
159enum ali_pcm_out_channel_num {
160 ALI_PCM_OUT_CHANNEL_FIRST = 0,
161 ALI_PCM_OUT_CHANNEL_LAST = 31
162};
163
164enum ali_ac97_power_control_bit {
165 ALI_EAPD_POWER_DOWN = 0x8000
166};
167
168enum ali_update_ptr_flags {
169 ALI_ADDRESS_INT_UPDATE = 0x01
170};
171
172enum ali_revision {
173 ALI_5451_V02 = 0x02
174};
175
176enum ali_spdif_out_control {
177 ALI_PCM_TO_SPDIF_OUT = 0,
178 ALI_SPDIF_OUT_TO_SPDIF_OUT = 1,
179 ALI_SPDIF_OUT_PCM = 0,
180 ALI_SPDIF_OUT_NON_PCM = 2
181};
182
183/* S/PDIF Operational Registers for 4D-NX */
184enum nx_spdif_registers {
185 NX_SPCTRL_SPCSO = 0x24, NX_SPLBA = 0x28,
186 NX_SPESO = 0x2c, NX_SPCSTATUS = 0x64
187};
188
189/* OP registers to access each hardware channel */
190enum channel_registers {
191 CH_DX_CSO_ALPHA_FMS = 0xe0, CH_DX_ESO_DELTA = 0xe8,
192 CH_DX_FMC_RVOL_CVOL = 0xec,
193 CH_NX_DELTA_CSO = 0xe0, CH_NX_DELTA_ESO = 0xe8,
194 CH_NX_ALPHA_FMS_FMC_RVOL_CVOL = 0xec,
195 CH_LBA = 0xe4,
196 CH_GVSEL_PAN_VOL_CTRL_EC = 0xf0
197};
198
199/* registers to read/write/control AC97 codec */
200enum dx_ac97_registers {
201 DX_ACR0_AC97_W = 0x40, DX_ACR1_AC97_R = 0x44,
202 DX_ACR2_AC97_COM_STAT = 0x48
203};
204
205enum nx_ac97_registers {
206 NX_ACR0_AC97_COM_STAT = 0x40, NX_ACR1_AC97_W = 0x44,
207 NX_ACR2_AC97_R_PRIMARY = 0x48, NX_ACR3_AC97_R_SECONDARY = 0x4c
208};
209
210enum si_ac97_registers {
211 SI_AC97_WRITE = 0x40, SI_AC97_READ = 0x44,
212 SI_SERIAL_INTF_CTRL = 0x48, SI_AC97_GPIO = 0x4c
213};
214
215enum ali_ac97_registers {
216 ALI_AC97_WRITE = 0x40, ALI_AC97_READ = 0x44
217};
218
219/* Bit mask for operational registers */
220#define AC97_REG_ADDR 0x000000ff
221
222enum ali_ac97_bits {
223 ALI_AC97_BUSY_WRITE = 0x8000, ALI_AC97_BUSY_READ = 0x8000,
224 ALI_AC97_WRITE_ACTION = 0x8000, ALI_AC97_READ_ACTION = 0x8000,
225 ALI_AC97_AUDIO_BUSY = 0x4000, ALI_AC97_SECONDARY = 0x0080,
226 ALI_AC97_READ_MIXER_REGISTER = 0xfeff,
227 ALI_AC97_WRITE_MIXER_REGISTER = 0x0100
228};
229
230enum sis7018_ac97_bits {
231 SI_AC97_BUSY_WRITE = 0x8000, SI_AC97_BUSY_READ = 0x8000,
232 SI_AC97_AUDIO_BUSY = 0x4000, SI_AC97_MODEM_BUSY = 0x2000,
233 SI_AC97_SECONDARY = 0x0080
234};
235
236enum trident_dx_ac97_bits {
237 DX_AC97_BUSY_WRITE = 0x8000, DX_AC97_BUSY_READ = 0x8000,
238 DX_AC97_READY = 0x0010, DX_AC97_RECORD = 0x0008,
239 DX_AC97_PLAYBACK = 0x0002
240};
241
242enum trident_nx_ac97_bits {
243 /* ACR1-3 */
244 NX_AC97_BUSY_WRITE = 0x0800, NX_AC97_BUSY_READ = 0x0800,
245 NX_AC97_BUSY_DATA = 0x0400, NX_AC97_WRITE_SECONDARY = 0x0100,
246 /* ACR0 */
247 NX_AC97_SECONDARY_READY = 0x0040, NX_AC97_SECONDARY_RECORD = 0x0020,
248 NX_AC97_SURROUND_OUTPUT = 0x0010,
249 NX_AC97_PRIMARY_READY = 0x0008, NX_AC97_PRIMARY_RECORD = 0x0004,
250 NX_AC97_PCM_OUTPUT = 0x0002,
251 NX_AC97_WARM_RESET = 0x0001
252};
253
254enum serial_intf_ctrl_bits {
255 WARM_REST = 0x00000001, COLD_RESET = 0x00000002,
256 I2S_CLOCK = 0x00000004, PCM_SEC_AC97= 0x00000008,
257 AC97_DBL_RATE = 0x00000010, SPDIF_EN = 0x00000020,
258 I2S_OUTPUT_EN = 0x00000040, I2S_INPUT_EN = 0x00000080,
259 PCMIN = 0x00000100, LINE1IN = 0x00000200,
260 MICIN = 0x00000400, LINE2IN = 0x00000800,
261 HEAD_SET_IN = 0x00001000, GPIOIN = 0x00002000,
262 /* 7018 spec says id = 01 but the demo board routed to 10
263 SECONDARY_ID= 0x00004000, */
264 SECONDARY_ID= 0x00004000,
265 PCMOUT = 0x00010000, SURROUT = 0x00020000,
266 CENTEROUT = 0x00040000, LFEOUT = 0x00080000,
267 LINE1OUT = 0x00100000, LINE2OUT = 0x00200000,
268 GPIOOUT = 0x00400000,
269 SI_AC97_PRIMARY_READY = 0x01000000,
270 SI_AC97_SECONDARY_READY = 0x02000000,
271};
272
273enum global_control_bits {
274 CHANNLE_IDX = 0x0000003f, PB_RESET = 0x00000100,
275 PAUSE_ENG = 0x00000200,
276 OVERRUN_IE = 0x00000400, UNDERRUN_IE = 0x00000800,
277 ENDLP_IE = 0x00001000, MIDLP_IE = 0x00002000,
278 ETOG_IE = 0x00004000,
279 EDROP_IE = 0x00008000, BANK_B_EN = 0x00010000
280};
281
282enum channel_control_bits {
283 CHANNEL_LOOP = 0x00001000, CHANNEL_SIGNED = 0x00002000,
284 CHANNEL_STEREO = 0x00004000, CHANNEL_16BITS = 0x00008000,
285};
286
287enum channel_attribute {
288 /* playback/record select */
289 CHANNEL_PB = 0x0000, CHANNEL_SPC_PB = 0x4000,
290 CHANNEL_REC = 0x8000, CHANNEL_REC_PB = 0xc000,
291 /* playback destination/record source select */
292 MODEM_LINE1 = 0x0000, MODEM_LINE2 = 0x0400,
293 PCM_LR = 0x0800, HSET = 0x0c00,
294 I2S_LR = 0x1000, CENTER_LFE = 0x1400,
295 SURR_LR = 0x1800, SPDIF_LR = 0x1c00,
296 MIC = 0x1400,
297 /* mist stuff */
298 MONO_LEFT = 0x0000, MONO_RIGHT = 0x0100,
299 MONO_MIX = 0x0200, SRC_ENABLE = 0x0080,
300};
301
302enum miscint_bits {
303 PB_UNDERRUN_IRO = 0x00000001, REC_OVERRUN_IRQ = 0x00000002,
304 SB_IRQ = 0x00000004, MPU401_IRQ = 0x00000008,
305 OPL3_IRQ = 0x00000010, ADDRESS_IRQ = 0x00000020,
306 ENVELOPE_IRQ = 0x00000040, ST_IRQ = 0x00000080,
307 PB_UNDERRUN = 0x00000100, REC_OVERRUN = 0x00000200,
308 MIXER_UNDERFLOW = 0x00000400, MIXER_OVERFLOW = 0x00000800,
309 ST_TARGET_REACHED = 0x00008000, PB_24K_MODE = 0x00010000,
310 ST_IRQ_EN = 0x00800000, ACGPIO_IRQ = 0x01000000
311};
312
313#define TRID_REG( trident, x ) ( (trident) -> iobase + (x) )
314
315#define CYBER_PORT_AUDIO 0x3CE
316#define CYBER_IDX_AUDIO_ENABLE 0x7B
317#define CYBER_BMSK_AUDIO_INT_ENABLE 0x09
318#define CYBER_BMSK_AUENZ 0x01
319#define CYBER_BMSK_AUENZ_ENABLE 0x00
320#define CYBER_IDX_IRQ_ENABLE 0x12
321
322#define VALIDATE_MAGIC(FOO,MAG) \
323({ \
324 if (!(FOO) || (FOO)->magic != MAG) { \
325 printk(invalid_magic,__func__); \
326 return -ENXIO; \
327 } \
328})
329
330#define VALIDATE_STATE(a) VALIDATE_MAGIC(a,TRIDENT_STATE_MAGIC)
331#define VALIDATE_CARD(a) VALIDATE_MAGIC(a,TRIDENT_CARD_MAGIC)
332
333static inline unsigned ld2(unsigned int x)
334{
335 unsigned r = 0;
336
337 if (x >= 0x10000) {
338 x >>= 16;
339 r += 16;
340 }
341 if (x >= 0x100) {
342 x >>= 8;
343 r += 8;
344 }
345 if (x >= 0x10) {
346 x >>= 4;
347 r += 4;
348 }
349 if (x >= 4) {
350 x >>= 2;
351 r += 2;
352 }
353 if (x >= 2)
354 r++;
355 return r;
356}
357
358#endif /* __TRID4DWAVE_H */
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 12f6ac99b04c..9212c37a33b8 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -48,6 +48,7 @@ config SND_PXA2XX_SOC_POODLE
48config SND_PXA2XX_SOC_TOSA 48config SND_PXA2XX_SOC_TOSA
49 tristate "SoC AC97 Audio support for Tosa" 49 tristate "SoC AC97 Audio support for Tosa"
50 depends on SND_PXA2XX_SOC && MACH_TOSA 50 depends on SND_PXA2XX_SOC && MACH_TOSA
51 depends on MFD_TC6393XB
51 select SND_PXA2XX_SOC_AC97 52 select SND_PXA2XX_SOC_AC97
52 select SND_SOC_WM9712 53 select SND_SOC_WM9712
53 help 54 help
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index b6edb61a3a30..fe6cca9c9e76 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -21,6 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/moduleparam.h> 22#include <linux/moduleparam.h>
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/gpio.h>
24 25
25#include <sound/core.h> 26#include <sound/core.h>
26#include <sound/pcm.h> 27#include <sound/pcm.h>
@@ -28,7 +29,7 @@
28#include <sound/soc-dapm.h> 29#include <sound/soc-dapm.h>
29 30
30#include <asm/mach-types.h> 31#include <asm/mach-types.h>
31#include <asm/hardware/tmio.h> 32#include <asm/arch/tosa.h>
32#include <asm/arch/pxa-regs.h> 33#include <asm/arch/pxa-regs.h>
33#include <asm/arch/hardware.h> 34#include <asm/arch/hardware.h>
34#include <asm/arch/audio.h> 35#include <asm/arch/audio.h>
@@ -137,10 +138,7 @@ static int tosa_set_spk(struct snd_kcontrol *kcontrol,
137static int tosa_hp_event(struct snd_soc_dapm_widget *w, 138static int tosa_hp_event(struct snd_soc_dapm_widget *w,
138 struct snd_kcontrol *k, int event) 139 struct snd_kcontrol *k, int event)
139{ 140{
140 if (SND_SOC_DAPM_EVENT_ON(event)) 141 gpio_set_value(TOSA_GPIO_L_MUTE, SND_SOC_DAPM_EVENT_ON(event) ? 1 :0);
141 set_tc6393_gpio(&tc6393_device.dev,TOSA_TC6393_L_MUTE);
142 else
143 reset_tc6393_gpio(&tc6393_device.dev,TOSA_TC6393_L_MUTE);
144 return 0; 142 return 0;
145} 143}
146 144
@@ -254,16 +252,28 @@ static int __init tosa_init(void)
254 if (!machine_is_tosa()) 252 if (!machine_is_tosa())
255 return -ENODEV; 253 return -ENODEV;
256 254
255 ret = gpio_request(TOSA_GPIO_L_MUTE, "Headphone Jack");
256 if (ret)
257 return ret;
258 gpio_direction_output(TOSA_GPIO_L_MUTE, 0);
259
257 tosa_snd_device = platform_device_alloc("soc-audio", -1); 260 tosa_snd_device = platform_device_alloc("soc-audio", -1);
258 if (!tosa_snd_device) 261 if (!tosa_snd_device) {
259 return -ENOMEM; 262 ret = -ENOMEM;
263 goto err_alloc;
264 }
260 265
261 platform_set_drvdata(tosa_snd_device, &tosa_snd_devdata); 266 platform_set_drvdata(tosa_snd_device, &tosa_snd_devdata);
262 tosa_snd_devdata.dev = &tosa_snd_device->dev; 267 tosa_snd_devdata.dev = &tosa_snd_device->dev;
263 ret = platform_device_add(tosa_snd_device); 268 ret = platform_device_add(tosa_snd_device);
264 269
265 if (ret) 270 if (!ret)
266 platform_device_put(tosa_snd_device); 271 return 0;
272
273 platform_device_put(tosa_snd_device);
274
275err_alloc:
276 gpio_free(TOSA_GPIO_L_MUTE);
267 277
268 return ret; 278 return ret;
269} 279}
@@ -271,6 +281,7 @@ static int __init tosa_init(void)
271static void __exit tosa_exit(void) 281static void __exit tosa_exit(void)
272{ 282{
273 platform_device_unregister(tosa_snd_device); 283 platform_device_unregister(tosa_snd_device);
284 gpio_free(TOSA_GPIO_L_MUTE);
274} 285}
275 286
276module_init(tosa_init); 287module_init(tosa_init);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 904d7b7bd780..a845890b6800 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -902,7 +902,7 @@ static const struct file_operations kvm_vcpu_fops = {
902 */ 902 */
903static int create_vcpu_fd(struct kvm_vcpu *vcpu) 903static int create_vcpu_fd(struct kvm_vcpu *vcpu)
904{ 904{
905 int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu); 905 int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
906 if (fd < 0) 906 if (fd < 0)
907 kvm_put_kvm(vcpu->kvm); 907 kvm_put_kvm(vcpu->kvm);
908 return fd; 908 return fd;
@@ -1261,7 +1261,7 @@ static int kvm_dev_ioctl_create_vm(void)
1261 kvm = kvm_create_vm(); 1261 kvm = kvm_create_vm();
1262 if (IS_ERR(kvm)) 1262 if (IS_ERR(kvm))
1263 return PTR_ERR(kvm); 1263 return PTR_ERR(kvm);
1264 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm); 1264 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
1265 if (fd < 0) 1265 if (fd < 0)
1266 kvm_put_kvm(kvm); 1266 kvm_put_kvm(kvm);
1267 1267